query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Add one or more links to the menu links collection. | def add_links(self, *args):
for link in args:
self.add_link(link) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_link(self, **kwgs):\n self.links.append(kwgs)",
"def links(self, links):\n\n self.container['links'] = links",
"def links(self, links):\n self._links = links",
"def links(self, links):\n if links is None:\n raise ValueError(\"Invalid value for `links`, must not be `None`\")\n\n self._links = links",
"def links(self, links):\n\n self._links = links",
"def links(self, links):\n\n self._links = links",
"def links(self, links):\n\n self._links = links",
"def links(self, links):\n\n self._links = links",
"def links(self, links):\n\n self._links = links",
"def links(self, links):\n\n self._links = links",
"def links(self, links):\n\n self._links = links",
"def links(self, links):\n\n self._links = links",
"def links(self, links):\n\n self._links = links",
"def links(self, links):\n\n self._links = links",
"def links(self, links):\n\n self._links = links",
"def add_link(self, link):\n if link.category:\n self._add_menu_item(link, link.category)\n else:\n self._menu_links.append(link)",
"def add_navigation_links(self, navigation_tuples):\n self._add_tuples(navigation_tuples, SubNavigationLink)",
"def update_links(self, new_link):\r\n self.__links = new_link",
"def links(links_title, links=None, **kwargs):\n\n l = links or []\n for url, title in kwargs.items():\n l.append({\"title\": title, \"url\": url})\n\n return {\n \"class\": \"links\",\n \"title\": links_title,\n \"links\": l\n }",
"def setAddLinks(self,value):\n self.PDFreactorConfiguration.in1[\"addLinks\"] = value",
"def add_admin_links(sender, **kwds):\n\n priv_check = kwds['request'].user.has_privilege\n\n entries = [('list', url_for('admin/news'), _(u'Overview'))]\n\n if priv_check(NEWS_CREATE) or priv_check(NEWS_EDIT):\n entries.append(('edit', url_for('admin/news/new'), _(u'Write')))\n\n kwds['navbar'].insert(1,(('news', url_for('admin/news'), _(u'News'), entries)))",
"def add_link(self, link):\n raise NotImplementedError",
"def hyperlinks(self, hyperlinks):\n\n self.container['hyperlinks'] = hyperlinks",
"def _add_link_to_targets(self, link):\n for target in self._selected_data():\n target.add_component_link(link)",
"def append_links(self, lines, lang):\n lines.append(\"verbatim \")\n lines.append(\"section Links\")\n lines.append(\"external http://polcasaglia.blogspot.com Blog\")\n lines.append(\"external http://www.uisp-fe.it/calcio.php UISP\" )\n lines.append(\"verbatim \")\n return lines",
"def add_link(self, from_doc_id, to_doc_id):\n # Insert the doc_id to doc_id combination as a tuple and append to list\n # of links\n if (from_doc_id,to_doc_id) not in self._visited_links:\n self._links.append((from_doc_id,to_doc_id))",
"def loadLinks(self):\n\t\tmainPageFile = open(self.mainPageFilePath, 'r')\n\t\ttry:\n\t\t\tlines = mainPageFile.readlines()\n\t\t\tif len(lines) > 0:\n\t\t\t\tself.mainPage = lines[0]\n\t\t\telse:\n\t\t\t\tprint(\"No link in : \", self.mainPageFilePath)\n\t\tfinally:\n\t\t mainPageFile.close()\n\n\t\tlinkFile = open(self.linkFilePath, 'r')\n\t\ttry:\n\t\t\t# add all links to a list\n\t\t\tfor line in linkFile.readlines():\n\t\t\t\tif \"http\" in line:\n\t\t\t\t\tsubPageName = line.split(\".de\")[1].replace(\"/\", \"\")\n\t\t\t\t\tself.subpages.append( Subpage(line.strip(' \\t\\n\\r'),subPageName,self.mainPage) )\n\t\tfinally:\n\t\t linkFile.close()",
"def make_links(self):\n for filepath in list(self):\n self.make_link(filepath)",
"def add_pressed(self):\n new_link = self.link_field.text\n self.links_list.add_widget(LinkIconListItem(self, text=new_link))\n self.link_field.text = \"\"\n self.add_link_button.disabled = True\n self.link_field.focus = False\n self.link_field.helper_text = \"Please enter a valid url\"\n self.links.append(new_link)\n utils.update_data()\n utils.data[self.parent_screen.name][\"links\"] = self.links\n utils.save_project_data(utils.data[self.parent_screen.name],\n f\"{utils.data[self.parent_screen.name]['proj_path']}/project_data.json\")",
"def add(self, posts):\n li_html = []\n for post in posts:\n li_html.append(\n u'<li><a href=\"{route}\">{title}</a></li>'.format(\n route=post.route, title=post.title))\n self._blog_list = u'\\n'.join(li_html)\n self._posts = posts"
]
| [
"0.73592496",
"0.72956747",
"0.6941168",
"0.69123816",
"0.69015694",
"0.69015694",
"0.69015694",
"0.69015694",
"0.69015694",
"0.69015694",
"0.69015694",
"0.69015694",
"0.69015694",
"0.69015694",
"0.69015694",
"0.67823446",
"0.6446638",
"0.63709253",
"0.6358212",
"0.6212924",
"0.6092794",
"0.6058775",
"0.5981178",
"0.59340745",
"0.593203",
"0.5923319",
"0.5877436",
"0.5862687",
"0.58512",
"0.5849874"
]
| 0.8036747 | 0 |
Add a view to the menu tree | def _add_view_to_menu(self, view):
self._add_menu_item(MenuView(view.name, view), view.category) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_view(self, view):\n # Add to views\n self._views.append(view)\n\n # If app was provided in constructor, register view with Flask app\n if self.app is not None:\n self.app.register_blueprint(view.create_blueprint(self))\n if view.is_menu:\n self._add_view_to_menu(view)",
"def _add_view(self, window, view):\r\n\r\n # If no 'relative_to' is specified then the view is positioned\r\n # relative to the editor area.\r\n if len(view.relative_to) > 0:\r\n relative_to = window.get_view_by_id(view.relative_to)\r\n \r\n else:\r\n relative_to = None\r\n\r\n # Add the view to the window.\r\n window.add_view(\r\n view, view.position, relative_to, (view.width, view.height)\r\n )\r\n\r\n return",
"def initViewMenu(self):\n menu = QMenu(QCoreApplication.translate('ViewManager', '&View'),\n self.ui)\n menu.setTearOffEnabled(True)\n menu.addActions(self.viewActGrp.actions())\n menu.addSeparator()\n menu.addActions(self.viewFoldActGrp.actions())\n menu.addSeparator()\n menu.addAction(self.previewAct)\n menu.addAction(self.astViewerAct)\n menu.addSeparator()\n menu.addAction(self.unhighlightAct)\n menu.addSeparator()\n menu.addAction(self.newDocumentViewAct)\n if self.canSplit():\n menu.addAction(self.newDocumentSplitViewAct)\n menu.addSeparator()\n menu.addAction(self.splitViewAct)\n menu.addAction(self.splitOrientationAct)\n menu.addAction(self.splitRemoveAct)\n menu.addAction(self.nextSplitAct)\n menu.addAction(self.prevSplitAct)\n \n return menu",
"def add_view( *args, **kwargs ):",
"def create(self, parent):\n self.widget = QtGui.QTreeView(parent)",
"def treeView(*args, addItem: Union[List[AnyStr, AnyStr], List[List[AnyStr, AnyStr]]]=None,\n allowDragAndDrop: bool=True, allowHiddenParents: bool=True, allowMultiSelection:\n bool=True, allowReparenting: bool=True, annotation: Union[AnyStr, bool]=\"\",\n attachButtonRight: int=0, backgroundColor: Union[List[float, float, float],\n bool]=None, borderHighlite: List[AnyStr, bool]=None, borderHighliteColor:\n List[AnyStr, float, float, float]=None, buttonErase: Union[List[AnyStr, bool],\n List[List[AnyStr, bool]]]=None, buttonState: Union[List[AnyStr, int, AnyStr],\n List[List[AnyStr, int, AnyStr]]]=None, buttonStyle: Union[List[AnyStr, int,\n AnyStr], List[List[AnyStr, int, AnyStr]]]=None, buttonTextIcon: Union[List[AnyStr,\n int, AnyStr], List[List[AnyStr, int, AnyStr]]]=None, buttonTooltip:\n Union[List[AnyStr, int, AnyStr], List[List[AnyStr, int, AnyStr]]]=None,\n buttonTransparencyColor: Union[List[AnyStr, int, float, float, float],\n List[List[AnyStr, int, float, float, float]]]=None, buttonTransparencyOverride:\n Union[List[AnyStr, int, bool], List[List[AnyStr, int, bool]]]=None, buttonVisible:\n Union[List[AnyStr, int, bool], List[List[AnyStr, int, bool]]]=None, children:\n Union[AnyStr, bool]=\"\", clearSelection: bool=True, contextMenuCommand: Script=None,\n defineTemplate: AnyStr=\"\", displayLabel: Union[List[AnyStr, AnyStr],\n List[List[AnyStr, AnyStr]]]=None, displayLabelSuffix: Union[List[AnyStr, AnyStr],\n List[List[AnyStr, AnyStr]]]=None, docTag: Union[AnyStr, bool]=\"\",\n dragAndDropCommand: Script=None, dragCallback: Script=None, dropCallback:\n Script=None, editLabelCommand: Script=None, enable: bool=True, enableBackground:\n bool=True, enableButton: Union[List[AnyStr, int, int], List[List[AnyStr, int,\n int]]]=None, enableKeyboardFocus: bool=True, enableKeys: bool=True, enableLabel:\n List[AnyStr, int]=None, exists: bool=True, expandCollapseCommand: Script=None,\n expandItem: List[AnyStr, bool]=None, flatButton: Union[int, bool]=0, font:\n Union[List[AnyStr, AnyStr], bool]=None, fontFace: List[AnyStr, int]=None,\n fullPathName: bool=True, height: Union[int, bool]=0, hideButtons: bool=True,\n highlightColor: Union[List[float, float, float], bool]=None, highlite: List[AnyStr,\n bool]=None, highliteColor: List[AnyStr, float, float, float]=None,\n ignoreButtonClick: Union[List[AnyStr, int, int], List[List[AnyStr, int,\n int]]]=None, image: Union[List[AnyStr, int, AnyStr], List[List[AnyStr, int,\n AnyStr]]]=None, insertItem: Union[List[AnyStr, AnyStr, int], List[List[AnyStr,\n AnyStr, int]]]=None, isItemExpanded: Union[AnyStr, bool]=\"\", isLeaf: Union[AnyStr,\n bool]=\"\", isObscured: bool=True, item: Union[AnyStr, bool]=\"\", itemAnnotation:\n Union[List[AnyStr, AnyStr], bool]=None, itemDblClickCommand: Script=None,\n itemDblClickCommand2: Script=None, itemExists: Union[AnyStr, bool]=\"\", itemIndex:\n Union[AnyStr, bool]=\"\", itemParent: Union[AnyStr, bool]=\"\", itemRenamedCommand:\n Script=None, itemSelected: Union[AnyStr, bool]=\"\", itemVisible: List[AnyStr,\n bool]=None, labelBackgroundColor: List[AnyStr, float, float, float]=None, manage:\n bool=True, noBackground: bool=True, numberOfButtons: int=0, numberOfPopupMenus:\n bool=True, ornament: List[AnyStr, int, int, int]=None, ornamentColor: List[AnyStr,\n float, float, float]=None, parent: Union[AnyStr, bool]=\"\", popupMenuArray:\n bool=True, pressCommand: Union[List[int, Script], List[List[int, Script]]]=None,\n preventOverride: bool=True, removeAll: bool=True, removeItem: AnyStr=\"\",\n reverseTreeOrder: bool=True, rightPressCommand: Union[List[int, Script],\n List[List[int, Script]]]=None, select: List[AnyStr, int]=None, selectCommand:\n Script=None, selectItem: List[AnyStr, bool]=None, selectionChangedCommand:\n Script=None, selectionColor: Union[List[AnyStr, float, float, float], bool]=None,\n showItem: AnyStr=\"\", statusBarMessage: AnyStr=\"\", textColor: List[AnyStr, float,\n float, float]=None, useTemplate: AnyStr=\"\", visible: bool=True,\n visibleChangeCommand: Union[Script, bool]=None, width: Union[int, bool]=0, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def add_tree_view(self):\n self.data_view = QTreeView()\n self.data_view.setRootIsDecorated(False)\n self.data_view.setAlternatingRowColors(True)\n self.mbox.addWidget(self.data_view)\n\n self.data_layout = QHBoxLayout()\n self.data_layout.addWidget(self.data_view)\n\n self.model = self.create_track_model(self)\n self.data_view.setModel(self.model)",
"def add_view(self, *args, **kwargs):\n return self._resources_manager.add_view(*args, **kwargs)",
"def add_to_menu ( self, menu_item ):\r\n pass",
"def create_menu():",
"def create_menus( self ):",
"def addMenu():\n mb.addAction(actionAccessories)\n actionAccessories.setVisible(True)",
"def on_view_menu_after_insert(\n self, mapper: Mapper, connection: Connection, target: ViewMenu\n ) -> None:",
"def addMenu(self):\n menu = self.interface.getPulldownMenu(0)\n actionBefore = menu.actions()[8]\n menu.insertAction(actionBefore, self.action)",
"def _add_level_to_view(self, level):\n key = Level.key(self.sorting)(level)\n index = bisect.bisect(self.view_keys, key)\n self.view_keys[index:index] = [key]\n\n # If sorting is reversed, the key list and view are in different orders\n if(self.sorting & Sorting.Reversed):\n index = len(self.view_list) - index\n\n\n self.list_lock.acquire()\n\n self.beginInsertRows(QModelIndex(), index, index)\n self.view_list[index:index] = [level]\n\n self.endInsertRows()\n\n self.list_lock.release()",
"def add_menu(self, menu):\n name = menu.get_name()\n self.__menus[name] = menu",
"def _CreateView(self, stage, role):\n # type: (Usd.Stage, Union[Type[OutlinerRole], OutlinerRole]) -> QtWidgets.QAbstractItemView\n return OutlinerTreeView(\n contextMenuActions=role.GetContextMenuActions(self),\n contextProvider=self,\n parent=self)",
"def add_views(self, *args):\n for view in args:\n self.add_view(view)",
"def addView(self, dataView):\n hooks = self.getHooks()\n if hooks is not None:\n dataView.setHooks(hooks)\n self.__views.append(dataView)",
"def inserir(self):\n self.new_window = tk.Toplevel(self.menu)\n Inserir(self.new_window)",
"def add_explorer_view(\n config: Configurator,\n route: str = \"/docs/\",\n route_name: str = \"pyramid_openapi3.explorer\",\n template: str = \"static/index.html\",\n ui_version: str = \"4.18.3\",\n permission: str = NO_PERMISSION_REQUIRED,\n apiname: str = \"pyramid_openapi3\",\n proto_port: t.Optional[t.Tuple[str, int]] = None,\n) -> None:\n\n def register() -> None:\n resolved_template = AssetResolver().resolve(template)\n\n def explorer_view(request: Request) -> Response:\n settings = config.registry.settings\n if settings.get(apiname) is None:\n raise ConfigurationError(\n \"You need to call config.pyramid_openapi3_spec for the explorer \"\n \"to work.\"\n )\n with open(resolved_template.abspath()) as f:\n if proto_port:\n spec_url = request.route_url(\n settings[apiname][\"spec_route_name\"],\n _scheme=proto_port[0],\n _port=proto_port[1],\n )\n else:\n spec_url = request.route_url(settings[apiname][\"spec_route_name\"])\n\n template = Template(f.read())\n html = template.safe_substitute(\n ui_version=ui_version,\n spec_url=spec_url,\n )\n return Response(html)\n\n config.add_route(route_name, route)\n config.add_view(\n route_name=route_name, permission=permission, view=explorer_view\n )\n\n config.action((f\"{apiname}_add_explorer\",), register, order=PHASE0_CONFIG)",
"def add_view(self, *args, **kwargs):\r\n if not kwargs.get(\"extra_context\"):\r\n kwargs[\"extra_context\"] = {}\r\n kwargs[\"extra_context\"].update({\r\n \"insert_classes\": self.admin_site.insert_classes,\r\n \"form_url\": \"herp\"\r\n })\r\n return super(ServeeModelAdmin, self).add_view(*args, **kwargs)",
"def add_views_widget(self):\n axial_view = QtWidgets.QPushButton(\"Axial\")\n coronal_view = QtWidgets.QPushButton(\"Coronal\")\n sagittal_view = QtWidgets.QPushButton(\"Sagittal\")\n views_box = QtWidgets.QGroupBox(\"Views\")\n views_box_layout = QtWidgets.QVBoxLayout()\n views_box_layout.addWidget(axial_view)\n views_box_layout.addWidget(coronal_view)\n views_box_layout.addWidget(sagittal_view)\n views_box.setLayout(views_box_layout)\n self.grid.addWidget(views_box, 3, 0, 2, 2)\n axial_view.clicked.connect(self.set_axial_view)\n coronal_view.clicked.connect(self.set_coronal_view)\n sagittal_view.clicked.connect(self.set_sagittal_view)",
"def show_menus(self, type_):\n if type_ == self._current:\n # do nothing\n pass\n else:\n if self._current == self.TYPE_VOIGT:\n # Plot menus are visible; hide them.\n plot_menu_labels = [menu.label for menu in self._plot_menus]\n\n for menu in self.top_level_menus:\n if menu.label in plot_menu_labels:\n self.Remove(self.FindMenu(menu.label))\n elif self._current == self.TYPE_GISO:\n # Plot menus are visible; hide them.\n plot_menu_labels = [menu.label for menu in self._plot_menus]\n\n for menu in self.top_level_menus:\n if menu.label in plot_menu_labels:\n self.Remove(self.FindMenu(menu.label))\n\n # Rebuild the view menu by deleting everything from it and then \n # reappending the appropriate items.\n while self.view_menu.GetMenuItemCount():\n #self.view_menu.DeleteItem(self.view_menu.FindItemByPosition(0))\n self.view_menu.Delete(self.view_menu.FindItemByPosition(0))\n\n _append_items(self._main, self.view_menu, self._menu_data[type_])\n\n if type_ == self.TYPE_VOIGT:\n # add plot menus\n for menu in self._plot_menus[::-1]:\n self.Insert(_PLOT_MENU_INSERT_INDEX, menu, menu.label)\n # Under wxPython 2.9, the menus I add with this call to \n # Insert() don't have their label set. I think it's a bug,\n # but I can't recreate it outside of this app. Manually\n # setting the label here is a workaround.\n self.SetMenuLabel(_PLOT_MENU_INSERT_INDEX, menu.label)\n elif type_ == self.TYPE_GISO:\n # add plot menus\n for menu in self._plot_menus[::-1]:\n self.Insert(_PLOT_MENU_INSERT_INDEX, menu, menu.label)\n # Under wxPython 2.9, the menus I add with this call to \n # Insert() don't have their label set. I think it's a bug,\n # but I can't recreate it outside of this app. Manually\n # setting the label here is a workaround.\n self.SetMenuLabel(_PLOT_MENU_INSERT_INDEX, menu.label)\n\n\n self._current = type_",
"def add_view_pl_button(self):\n self.view_pl = QPushButton(\"View Playlist\")\n self.view_pl.clicked.connect(self.view_pl_btn_push)\n self.hbtnbox.addWidget(self.view_pl)",
"def View_Tree_1(self):\r\n self.system.Set_Tree_View_Mode(1)",
"def add_specific_menu(self, menu, event, lat, lon): \n add_item = Gtk.MenuItem()\n add_item.show()\n menu.append(add_item)\n add_item = Gtk.MenuItem(label=_(\"Choose and bookmark the new reference family\"))\n add_item.connect(\"activate\", self.selectFamily)\n add_item.show()\n menu.append(add_item)\n return",
"def addMenu():\n toolsMenu = mb.findChild(QtGui.QMenu, \"&Tools\")\n if toolsMenu:\n toolsMenu.addAction(action)",
"def add_menu(menu_name, parent_menu=None, tear_off=True, icon='', **kwargs):\n\n pass",
"def add_menu():\n\n def _(*args, **kwargs):\n args = (cast_str(i) if isinstance(i, six.text_type) else i for i in args)\n kwargs = tuple(\n {\n k: cast_str(v) if isinstance(v, six.text_type) else v\n for k, v in kwargs.items()\n }.items()\n )\n return (args, kwargs)\n\n def _auto_comp():\n try:\n comp.Comp().create_nodes()\n except comp.FootageError:\n nuke.message(cast_str(\"请先导入素材\"))\n\n all_menu = [\n {\n _(\"工具\"): [\n {\n _(\"按素材名组装\"): [\n _(\"对当前工程执行\", _auto_comp, icon=\"autocomp.png\"),\n _(\n \"批量执行\",\n lambda: comp.panels.BatchCompPanel().showModalDialog(),\n icon=\"autocomp.png\",\n ),\n _(\n \"设置\",\n lambda: comp.panels.CompConfigPanel().showModalDialog(),\n icon=\"autocomp.png\",\n ),\n ],\n },\n {\n _(\"转换为序列工程\"): [\n _(\"对当前工程执行\", edit.script_use_seq.execute),\n _(\n \"批量执行\",\n lambda: edit.script_use_seq.panels.BatchPanel().showModalDialog(),\n ),\n _(\n \"设置\",\n lambda: edit.script_use_seq.panels.ConfigPanel().showModalDialog(),\n ),\n ]\n },\n ]\n }\n ]\n\n # Add all menu.\n def _add_menu(menu, parent=nuke.menu(cast_str(\"Nuke\"))):\n # type: (..., nuke.Menu) -> None\n assert isinstance(menu, dict)\n\n for k, v in menu.items():\n m = parent.addMenu(*k[0], **dict(k[1]))\n for i in v:\n if i is None:\n _ = m.addSeparator()\n elif isinstance(i, dict):\n _add_menu(i, m)\n elif isinstance(i, tuple):\n _ = m.addCommand(*i[0], **dict(i[1]))\n\n for menu in all_menu:\n _add_menu(menu)"
]
| [
"0.72809106",
"0.7083871",
"0.6713048",
"0.67065996",
"0.665997",
"0.656862",
"0.6568591",
"0.650373",
"0.64664364",
"0.6376501",
"0.63577974",
"0.6311282",
"0.6241752",
"0.6241142",
"0.61784154",
"0.6142204",
"0.61220807",
"0.6109262",
"0.61089903",
"0.609636",
"0.60494685",
"0.6033075",
"0.5996338",
"0.59956336",
"0.59764296",
"0.59758574",
"0.5942441",
"0.58674383",
"0.58171785",
"0.5803952"
]
| 0.8479969 | 0 |
! Renders user page. Gets the currently authenticated user to display and add his/her competences. Displays the form for adding a competence. form The form for adding the user's competence. | def users_page(request):
if request.method == 'POST':
user = request.user
form = CompetenceForm(request.POST)
if form.is_valid():
form.instance.person = request.user
form.save()
# return redirect('user-page')
# competence = Competence.objects.create_competence(user, form.title_of_competence, form.level_of_competence)
else:
form = CompetenceForm()
return render(request, 'core/user-page.html', {'form': form}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_new_user_form():\r\n return render_template('user-form.html')",
"def add_user_form():\n\n return render_template(\"add_user.html\", headline=\"Add New Blogly User\")",
"def add_user():\n\n return render_template('register-form.html')",
"def show_user_detail_form():\n\n return render_template(\"add-user-details.html\")",
"def get(self, request):\n self.context[\"form\"] = AddUserForm()\n return render(request, \"dbkeeper/add.html\", self.context)",
"def make_new_user():\n return render_template('users/new_user_form.html')",
"def add_new_user():\n return render_template('new.html')",
"def show_new_user_page():\n\n return render_template(\"new_user.html\")",
"def create_user_form():\n template_name = \"create_user.html\"\n users = []\n print request.form\n\n flash(request.form['username'])\n flash(request.form['email'])\n\n return render_template(template_name, users=users)",
"def cassh_add(current_user=None):\n return render_template('add.html', username=current_user['name'], \\\n logged_in=current_user['is_authenticated'])",
"def get(self, request, *args, **kwargs):\n organization_form = organization.forms.OrganizationForm()\n user_form = organization.forms.UserForm()\n # print(pet_form, pet_video_form)\n context = {'organization_form': organization_form,'user_form': user_form}\n context.update(django.core.context_processors.csrf(request))\n return django.shortcuts.render_to_response('organization/organization_insert.html', context)",
"def add_recipe():\r\n if \"user\" in session:\r\n cuisine, course, allergens = Helpers.dropdowns(coll_cuisines, coll_courses, coll_allergens)\r\n return render_template(\r\n \"addrecipe.html\",\r\n cuisine=sorted(cuisine),\r\n course=course,\r\n allergens=allergens)\r\n else:\r\n flash(\"You must be logged in to view this page!\")\r\n return redirect(url_for(\"users.login\"))",
"def signup():\n return render_template(\"new_user.html\")",
"def render_create_user_page():\n\n return render_template(\"create_user.html\")",
"def display_form():\n\n return render_template('add_new_student.html')",
"def management():\n if request.method == 'POST' and request.form['submit'] == 'Add User':\n Storage.save_user(request.form['username'], request.form['password'])\n if request.method == 'POST' and request.form['submit'] == 'Add Client':\n Storage.generate_client()\n return render_template('management.html', users=Storage.all_users(),\n clients=Storage.all_clients())",
"def add_user():\n\n roles = Role.query.all()\n\n user_form = UserForm(request.form)\n user_form.roles.choices = [(i.name,i.name) for i in roles]\n\n if user_form.validate_on_submit():\n\n if not request.form['username'] or request.form['username'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/add_edit_user.html', title='Add User',add=True,\n user_form=user_form)\n if not request.form['email'] or request.form['email'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/add_edit_user.html', title='Add User',add=True,\n user_form=user_form)\n if not request.form['password'] or request.form['password'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/add_edit_user.html', title='Add User',add=True,\n user_form=user_form)\n if request.form['password'] != request.form['retype_password']:\n flash(\"Passwords are not the same!\",\"warn\")\n return render_template('user/add_edit_user.html', title='Add User',add=True,\n user_form=user_form)\n\n hashed_password = user_manager.hash_password(user_form.password.data)\n new_user = User(\n username=user_form.username.data,\n email=user_form.email.data,\n password=hashed_password,\n confirmed_at=datetime.datetime.utcnow(),\n is_enabled=user_form.is_enabled.data,\n first_name=user_form.first_name.data,\n last_name=user_form.last_name.data,\n locale=user_form.locale.data,\n timezone=user_form.timezone.data\n )\n\n # Si existe la lista de roles que hemos elegido se anadira al usuario\n if user_form.roles.data:\n for rol in roles:\n if rol.name in user_form.roles.data:\n new_user.roles.add(rol)\n try:\n correct = True\n db.session.add(new_user)\n db.session.commit()\n\n except Exception as e:\n # Catch anything unknown\n print(e)\n correct = False\n\n finally:\n if not correct:\n # Cleanup and show error\n db.session.rollback()\n flash('Error creating user, make sure username and email are unique','error')\n\n else:\n flash('Congratulations, you have created a new user!','success')\n return redirect(url_for('user_ksat.manage_user'))\n\n\n return render_template('user/add_edit_user.html', title='Add User',add=True,user_form=user_form)",
"def new_users():\n\n return render_template(\"new_user.html\")",
"def signup():\n if request.method == 'GET':\n form = SignUpForm(obj=current_user)\n\n else:\n form = SignUpForm(request.form)\n if request.method == 'POST' and form.validate():\n email = form.email.data\n password = form.password.data\n\n # Check if they they exist already\n user = Users.get_one(email = email)\n if not user:\n email = form.email.data\n first_name = form.first_name.data\n last_name = form.last_name.data\n user = User(**{'email':email, 'first_name':first_name, 'last_name':last_name})\n user.set_password(password)\n user.provider_type = form.provider_type.data\n user.practice_name = form.practice_name.data\n user.practice_type = form.practice_type.data\n try:\n user.save()\n except Exception as e:\n log.exception(f\"Exception trying to save user {email}\")\n else:\n return redirect('/')\n else:\n form.errors = \"User already exists\"\n \n context = {'form':form}\n content = render_template( 'signup.html', **context )\n return content",
"def add():\n form = RegisterForm(request.form)\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n email = form.email.data\n \n save_result = add_user(username, password, email)\n if save_result['status']:\n # create a starter project\n save_result_project = add_project(save_result['entry'], 'my first project', [], 'an example project', True, 2)\n save_result_task = add_task(save_result['entry'].id, 'this is an example task', save_result_project['entry'].id, 'you can edit these notes', False, False, 2, '1970-01-01')\n flash(u'thanks for joining, %s. please login!' % username, 'success')\n else:\n flash(u'cannot register \"%s\". try a different username or email.' % username, 'error')\n return redirect(url_for('.add'))\n\n return redirect(url_for('login'))\n\n return render_template('users/register.html'\n ,form=form\n ,t=t\n ,m=m)",
"def tiny_organics():\n x = request.form.keys()\n # print(f\"keys: {x}\")\n fname = request.form.get(\"fname\")\n # print(f\"FIRST NAME: {fname}\")\n lname = request.form.get(\"lname\")\n # print(f\"LAST NAME: {lname}\")\n email = request.form.get(\"email\")\n # print(f\"EMAIL: {email}\")\n baby_fname = request.form.get(\"baby_fname\")\n # print(f\"BABY FIRST NAME {baby_fname}\")\n baby_lname = request.form.get(\"baby_lname\")\n # print(f\"BABY LAST NAME: {baby_lname}\")\n allergies = request.form.getlist(\"allergen\")\n # print(f\"ALLERGIES: {allergies}\")\n\n\n\n new_user = crud.create_user(fname,lname,email,baby_fname,baby_lname,allergies)\n\n #adding recipes that fit user's selections into their result after sumbit.\n users_recipes = []\n for r in recipes:\n matched_allergy = False\n for name in allergies:\n if name in r[\"allergens\"]:\n matched_allergy = True\n break\n if matched_allergy == False:\n users_recipes.append(r)\n \n return render_template('user_recipe.HTML', new_user=new_user, users_recipes=users_recipes)",
"def register():\r\n form = RegistrationUserForm()\r\n if form.validate_on_submit():\r\n user = User(\r\n first_name=form.first_name.data,\r\n last_name=form.last_name.data,\r\n email=form.email.data,\r\n password=form.password.data)\r\n db.session.add(user)\r\n db.session.commit()\r\n professor = Professor(\r\n user_id=user.id,\r\n first_name=form.first_name.data,\r\n last_name=form.last_name.data,\r\n university=form.university.data\r\n )\r\n db.session.add(professor)\r\n db.session.commit()\r\n token = user.generate_confirmation_token()\r\n confirm_link = url_for('account.confirm', token=token, _external=True)\r\n get_queue().enqueue(\r\n send_email,\r\n recipient=user.email,\r\n subject='Confirm Your Account',\r\n template='account/email/confirm',\r\n user=user,\r\n confirm_link=confirm_link)\r\n flash('A confirmation link has been sent to {}.'.format(user.email),\r\n 'warning')\r\n return redirect(url_for('main.index'))\r\n return render_template('professor/signup.html', form=form)",
"def add_profile():\n \n form=ProfileForm() \n if 'username' in session:\n user = mongo.db.user.find_one({'username': session['username']})\n pro = mongo.db.profiles.find_one({'user_id': user['_id']})\n if pro:\n flash('Sorry, only one profile per user permitted. You can update your profile on your dashboard under the profile tab.', 'info')\n return redirect(url_for('dashboard'))\n \n if request.method == 'POST':\n if form.validate_on_submit():\n \n mongo.db.profiles.insert_one({'user_id': user['_id'],\n 'headline': form.headline.data,\n 'bio': form.bio.data,\n 'username': session['username'],\n 'date': datetime.utcnow(),\n 'xp': form.xp.data,\n 'interests': form.interests.data,\n 'stack': form.stack.data,\n 'languages': form.languages.data,\n 'frameworks': form.frameworks.data,\n 'github': form.github.data,\n 'linkedin': form.linkedin.data\n })\n flash('Your profile has been created.', 'success')\n return redirect('profiles')\n \n return render_template('pages/addprofile.html', title='Post',\n form=form, legend='Create your profile')\n \n flash('You need to be logged in to post any content.', 'info')\n return redirect(url_for('login'))",
"def add_user():\n\n if request.method == 'POST':\n add_new_user_schema = AddNewUser()\n\n errors = add_new_user_schema.validate(data=request.form)\n\n if errors:\n abort(400, str(errors))\n\n args = add_new_user_schema.dump(request.form)\n\n user = User(connection=connection, cursor=cursor)\n user.add_user(\n first_name=args['first_name'],\n second_name=args['second_name'],\n is_internal=args['is_internal'],\n\n position=args['position'],\n email=args['email'],\n phone_number=args['phone_number']\n )\n\n return redirect(url_for('documentation.home'))\n\n return render_template('pages/inputs/add_user.html')",
"def get(self,request,*args,**kwargs):\n\n\t\tsucursal = Sucursal.objects.get(id=kwargs['spk'])\n\n\t\tuser_form = UserForm()\n\t\templeado_form = EmpleadoForm( initial={'sucursal':sucursal.id} )\n\n\t\tforms = [user_form,empleado_form]\n\t\tcontext = {\n\t\t'section_title':'Nuevo Empleado',\n\t\t'button_text':'Crear',\n\t\t'sucursal':sucursal,\n\t\t'user_form':user_form,\n\t\t'empleado_form':empleado_form }\n\n\t\treturn render_to_response(\n\t\t\t'empleado/empleado_form.html',\n\t\t\tcontext,\n\t\t\tcontext_instance=RequestContext(request))",
"def user():\r\n return render_base_template(\"user.html\", user=current_user)",
"def register_page():\n form = addUser()\n\n if form.validate_on_submit():\n username=form.username.data\n password=form.password.data\n email=form.email.data\n first_name=form.first_name.data\n last_name=form.last_name.data\n \n new_user = User.register(username=username, password=password, email=email, first_name=first_name, last_name=last_name)\n\n db.session.add(new_user)\n db.session.commit()\n\n session[\"user\"] = new_user.username\n return redirect(f'/users/{username}')\n else:\n return render_template(\"reg_form.html\", form=form)",
"def goto_make_new_user():\n\n return render_template('users/new.html')",
"def register():\n form = RegistrationForm()\n if form.validate_on_submit():\n expert_data = Expert(first_name=form.first_name.data,\n last_name=form.last_name.data,\n username=form.username.data,\n title_id=form.title_id.data.id,\n affiliation_id=form.affiliation_id.data.id,\n discipline=form.discipline.data,\n uni_work=form.uni_work.data,\n country=form.country.data,\n specialization=form.specialization.data,\n personal_descr=form.personal_descr.data,\n permission_mention=form.permission_mention.data.name,\n permission_add_question=form.permission_add_question.data.name,\n email=form.email.data,\n password=form.password.data)\n\n # add employee to the database\n db.session.add(expert_data)\n db.session.commit()\n flash('You have successfully registered! You may now login.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))\n\n # load registration template\n return render_template('auth/register.html', form=form, title='Register')",
"def show_add_student_form():\n\n return render_template(\"add_student_form.html\")"
]
| [
"0.66302705",
"0.6523123",
"0.6281098",
"0.62157583",
"0.61718553",
"0.61372375",
"0.59278387",
"0.5913561",
"0.5878333",
"0.58747965",
"0.5868361",
"0.5863985",
"0.5855513",
"0.576994",
"0.5756267",
"0.5705283",
"0.56708217",
"0.5656304",
"0.5601829",
"0.5593466",
"0.55738395",
"0.55544704",
"0.5547943",
"0.55402195",
"0.5533129",
"0.5514565",
"0.5512145",
"0.5498693",
"0.54831976",
"0.54824424"
]
| 0.7923422 | 0 |
! Render main page with search results. Displays the main page with search results which are the vacancy cards with all the required info. add_list The list which gets the vacancies from other services. competence_list The competence list percent The percent parameters which shows whether the vacancy fits you. vacs The param for Elasticsearch index creating graph_dict The dictionary for Neo4j graph creation | def search_results(request):
competencies = Competence.objects.all()
comp_user = request.user
comp_list_filer = []
comp_num = 0
for competence in competencies:
if competence.person == comp_user:
comp_list_filer.append((competence.title_of_competence, competence.level_of_competence))
comp_num = comp_num + 1
q = request.GET.get('q')
add_list = get_vac(q)
competence_list = []
for vacancy_dict in add_list:
if Vacancy.objects.filter(title_of_vacancy=vacancy_dict['name']):
percent = get_percent(comp_list_filer, comp_num, vacancy_dict['name'])
obj = Vacancy.objects.get(title_of_vacancy=vacancy_dict['name'])
obj.percent = percent
obj.save()
continue
else:
percent = 0
if vacancy_dict['salary'] == 0:
vacancy = Vacancy.objects.create_vacancy(vacancy_dict['name'], vacancy_dict['description'],
vacancy_dict['city'],
0, vacancy_dict['webSite'], percent)
else:
vacancy = Vacancy.objects.create_vacancy(vacancy_dict['name'], vacancy_dict['description'],
vacancy_dict['city'],
vacancy_dict['salary'], vacancy_dict['webSite'], percent)
for i in vacancy_dict['description'].split(" "):
competence_list.append(i)
graph_dict = {"vac_name": vacancy_dict['name'], "com_name": competence_list}
graph_add(graph_dict)
percent = get_percent(comp_list_filer, comp_num, vacancy_dict['name'])
obj = Vacancy.objects.get(title_of_vacancy=vacancy_dict['name'])
obj.percent = percent
obj.save()
competence_list = []
if q:
vacs = VacDocument.search().query("match", title_of_vacancy=q)
vacs = vacs.sort({"percent": {"order": "desc"}})
else:
vacs = ''
return render(request, 'core/search-results.html', {'vacs': vacs, 'competencies': competencies}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_results():\n\n\tuser_query = request.args.get(\"search\")\n\tsearch_activity = SearchActivity(user_id=session.get('user_id'), search_query=user_query, datetime = datetime.now())\n\n\tdb.session.add(search_activity)\n\tdb.session.commit()\n\tsearch_items_not_filtered_list = user_search(user_query)\n\tfound_items = []\n\t\n\tfor item in search_items_not_filtered_list:\n\t\tTaxonomy_obj = db.session.query(Taxonomy).filter(Taxonomy.path.like(\"%Food%\")).filter_by(category_node=item[u'categoryNode']).all()\n\t\tfor obj in Taxonomy_obj:\n\t\t\tif item[u'categoryNode'] == obj.category_node:\t\n\t\t\t\tfound_items.append({\n\t\t\t\t\t\"name\": item.get(u'name', \"\"), \n\t\t\t\t\t\"item_id\": item.get(u'itemId', \"\"),\n\t\t\t\t\t\"category\": item.get(u'categoryPath', \"\"), \n\t\t\t\t\t\"sale_price\": format(item.get(u'salePrice', \"\"), \".2f\"), \n\t\t\t\t\t\"description\": unescape(item.get(u'shortDescription', \"\")), \n\t\t\t\t\t\"customer_rating_img\": item.get(u'customerRatingImage', \"\"),\n\t\t\t\t\t\"thumbnail_image\": item.get(u'thumbnailImage', \"\")\n\t\t\t\t\t})\n\t\t\t\t\n\treturn render_template(\"searchresults.html\", found_items=found_items)",
"def cat_results():\n\n cats = petfinder.search_data_map()\n cats = list(cats.values())\n\n return render_template('search_results.html',\n cats=cats)",
"def vis_results():\n try:\n uuid = session['uuid']\n dm = DatabaseManager()\n genes, diseases, uuiddb, query, genpanel, date \\\n = dm.retreieve_zoekopdracht(uuid)\n\n return render_template(\"results.html\", genes=genes, diseases=diseases,\n uuid=uuid, query=query, genpanel=genpanel,\n date=date)\n except KeyError:\n flash(\"Please run a search or retrieve one from the archived \"\n \"searches before visiting this page!\")\n return redirect(url_for('search'))",
"def search_results():\n skip = int(flask.request.args.get(\"skip\", \"0\"))\n limit = int(flask.request.args.get(\"limit\", \"20\"))\n\n obj = {}\n\n # query : will be event kit in case of triage information\n uidstr = flask.request.args.get(\"query\", None)\n\n if uidstr == None:\n obj[\"error\"] = \"Missing search ID\"\n\n uidstr = json.loads(uidstr)\n\n obj[\"query\"] = {}\n obj[\"query\"][\"uid\"] = uidstr\n obj[\"clips\"] = []\n states = backend.get_search_sessions()\n obj[\"sessions\"] = []\n for astate in states:\n obj[\"sessions\"].append(str(astate))\n try:\n uid = uuid.UUID(uidstr)\n state = backend.get_iqr_search_state(uid)\n # use the uid of the state and get the information from the database\n col = str(state.uuid)\n obj[\"collection\"] = col\n searchdb[col].ensure_index([(\"model_id\", pymongo.ASCENDING),(\"probability\", pymongo.DESCENDING) ])\n # Force probabilities\n obj[\"positives\"] = list(state.positives)\n obj[\"negatives\"] = list(state.negatives)\n log = \"\"\n for id in state.positives:\n # log = log + \"Found %d\"%(searchdb[col].find({\"model_id\" : \"FUSION\", \"clip_id\" : id}).count()) + \", \"\n # res = searchdb[col].update({\"model_id\" : \"FUSION\", \"clip_id\" : id}, {\"$set\" : { \"probability\" : 1.0}})\n # log = log + \"Done %d\"%id + \", \"\n news = searchdb[col].find_one({\"model_id\" : \"FUSION\", \"clip_id\" : id})\n news[\"probability\"] = 1.0001\n searchdb[col].save(news)\n log = log + \"Now : \" + str(news)\n\n\n for id in state.negatives:\n # log = log + \"Found %d\"%(searchdb[col].find({\"model_id\" : \"FUSION\", \"clip_id\" : id}).count()) + \", \"\n # res = searchdb[col].update({\"model_id\" : \"FUSION\", \"clip_id\" : id}, {\"$set\" : { \"probability\" : 0.0}})\n # log = log + \"Done %d\"%id + \", \"\n news = searchdb[col].find_one({\"model_id\" : \"FUSION\", \"clip_id\" : id})\n news[\"probability\"] = 0.0\n searchdb[col].save(news)\n log = log + \"Now : \" + str(news)\n\n obj[\"log\"] = log\n\n allres = searchdb[col].find({\"model_id\" : \"FUSION\"}).sort([(\"probability\", pymongo.DESCENDING)]).skip(skip).limit(limit)\n rank = skip + 1\n for one in allres:\n aclip = {}\n aclip[\"score\"] = one[\"probability\"]\n aclip[\"id\"] = \"HVC\" + str(one[\"clip_id\"]).zfill(6)\n clipobj = db[\"clips\"].find_one({\"id\" : \"HVC\" + str(one[\"clip_id\"]).zfill(6)},{\"duration\" : 1})\n aclip[\"duration\"] = clipobj[\"duration\"]\n aclip[\"rank\"] = rank\n rank = rank + 1\n obj[\"clips\"].append(aclip)\n obj[\"count\"] = len(obj[\"clips\"])\n\n except Exception as e:\n obj[\"error\"] = str(type(e)) + \": \" + str(e)\n return jsonify(obj)\n\n obj[\"next\"] = \"http://localhost:5003/iqr/search_results?\" + urllib.urlencode({\"uid\" : uid, \"skip\" : skip+limit } )\n return jsonify(obj)",
"def search(request):\n title = \"Voices search\"\n search_term = request.params.get('search_term','')\n form = Form(request)\n searchstring = u'%%%s%%' % search_term\n\n # generic_filter can be applied to all Node (and subclassed) objects\n\n generic_filter = or_(\n Content.title.like(searchstring),\n Content.body.like(searchstring),\n )\n\n results = DBSession.query(Content).filter(Content.type !='listing').filter(generic_filter).\\\n order_by(Content.title.asc()).all()\n\n\n page_url = PageURL_WebOb(request)\n page = int(request.params.get(\"page\", 1))\n paginator = Page(results,\n page=page,\n items_per_page=10,\n url=page_url)\n\n return render_to_response(\"buddy:templates/home/searchresult.mako\",\n dict(paginator=paginator,title=title,\n form=FormRenderer(form)),request=request)",
"def search_form(request): \n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\ttitle= \"Please search by:\"\n\t# opening files for plotting stat\n\torganismName=overallSumresult['organism']\n\tspeciesName=overallSumresult['species']\n\tspeciesstat=overallSumresult['speciesstat'][0:10]\n\tspeciesName=list(set(speciesName))\n\tspeciesName=sorted(speciesName)\n\tspeciesstat.insert(0,['Species','Unique protein','Unique peptide'])\n\tgostat=overallSumresult['gostat'][:10]\n\tgostat.insert(0,['Go Term','Unique proteins in various species'])\n\tkeggstat=overallSumresult['keggstat'][:10]\n\tkeggstat.insert(0,['Pathway Name', 'Unique proteins in various species', 'PeptideTracker', 'CPTAC', 'PASSEL', 'SRMAtlas', 'PanoramaWeb'])\n\tpepseqdic=finalresult['pepseqdic']\n\tprodic=finalresult['prodic']\n\tpepdatavalues=finalresult['pepdatavalues']\n\tprodatavalues=finalresult['prodatavalues']\n\tmrmdatabase=finalresult['mrmdatabase']\n\tallpepassay=totalpepassay['totalassayNonValid']\n\tallvalidpepassay=totalpepassay['totalassayValid']\n\tallunqStripPep=totalpepassay['totalstripPep']\n\tuqpep=len(pepseqdic)\n\tuqpro=len(prodic)\n\tkeggstat=[i[:2] for i in keggstat]\n\tspeciesstat=[i[:2] for i in speciesstat]\n\tcontextindex ={\"title\": title,\"uqpro\":uqpro, \"uqpep\":uqpep,\\\n\t\t\t\t\t\"speciesName\":speciesName,\"speciesnumber\":len(speciesName)-1,\\\n\t\t\t\t\t\"speciesstat\":json.dumps(speciesstat),\\\n\t\t\t\t\t\"gostat\":json.dumps(gostat),\"keggstat\":json.dumps(keggstat),\\\n\t\t\t\t\t'allpepassay':allpepassay,\\\n\t\t\t\t\t'allvalidpepassay':allvalidpepassay,\\\n\t\t\t\t\t'allunqStripPep':len(allunqStripPep),\\\n\t\t\t\t\t'jvennpep':json.dumps(pepdatavalues),\\\n\t\t\t\t\t'jvennprot':json.dumps(prodatavalues),\\\n\t\t\t\t\t'jvennmrmdb':json.dumps(mrmdatabase)\\\n\t\t\t\t\t}\n\treturn render(request, 'index.html', contextindex)",
"def covid19(request):\n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\n\tcontextres =[]\n\t#build elasticsearch query to search data\n\tquery={\"query\": {\n\t\t\"bool\": {\n\t\t\t\"must\": [\n\t\t\t\t{\"match\": {\"Associated with COVID-19\": \"Yes\"}},\n\t\t\t\t{\"match\": {\"UniprotKb entry status\": \"Yes\"}}\n\t\t\t]\n\t\t}\n\t}\n\t}\n\t#generate random file name to store search result in json format\n\tnameFIle=names.get_first_name()\n\tjsonfilename=nameFIle+'_basic_search_covid19.json'\n\tjsonfilepath=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'basicsearch', 'results', jsonfilename)\n\tjsonfileoutput= open(jsonfilepath,'w')\n\tjfinaldata=[]\n\tes.indices.refresh(index=\"mrmassaydb-index\")\n\t#elasticsearch will search data\n\tres=helpers.scan(client=es,scroll='2m',index=\"mrmassaydb-index\", doc_type=\"mrmassaydb-type\",query=query,request_timeout=30)\n\tjfinaldata=[]\n\tpepSeqList=[]\n\tproteinList=[]\n\t#if data is valid based on uniprotkb release then it will display\n\tfor i in res:\n\t\tjdic=i['_source']\n\t\tjdic={str(tkey):force_text(tvalue) for tkey,tvalue in jdic.items()}\n\t\tif jdic[\"UniprotKb entry status\"] ==\"Yes\" and jdic['UniProtKB Accession'] !='502':\n\t\t\tjdic[\"PPI\"] =\"View\"\n\t\t\tjdic[\"sel\"] =\"\"\n\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('\\\\','')\n\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('<br>','|')\n\t\t\tjdic[\"SRMAtlas URL\"]=jdic[\"SRMAtlas URL\"].replace('\\\\','')\n\t\t\tjdic[\"Passel URL\"]=jdic[\"Passel URL\"].replace('\\\\','')\n\t\t\tjdic[\"CPTAC URL\"]=jdic[\"CPTAC URL\"].replace('\\\\','')\n\t\t\tjdic[\"Panoramaweb URL\"]=jdic[\"Panoramaweb URL\"].replace('\\\\','')\n\t\t\tjdic[\"PeptideTracker URL\"]=jdic[\"PeptideTracker URL\"].replace('\\\\','')\n\t\t\t#if jdic[\"Pathway Name\"].lower() !='na':\n\t\t\t#\tjdic[\"Pathway Name\"]=re.sub(r\"(\\w)([A-Z])\",r\"\\1|\\2\",jdic[\"Pathway Name\"])\n\t\t\tjdic[\"Mean Concentration\"] =jdic[\"Mean Concentration\"].replace('fmol/','fmol/µ')\n\t\t\tjdic[\"Concentration\"] =jdic[\"Concentration\"].replace('fmol/','fmol/µ')\n\t\t\tif str(jdic[\"Associated with COVID-19\"]).strip().upper() =='YES':\n\t\t\t\tpepSeqList.append(jdic[\"Peptide Sequence\"].strip())\n\t\t\t\tproteinList.append(jdic[\"UniProtKB Accession\"].strip().split('-')[0])\n\t\t\tjfinaldata.append(jdic)\n\tes.indices.refresh(index=\"mrmassaydb-index\")\n\t#checking any result generated by database\n\tfoundHits=len(jfinaldata)\n\t#storing only 10000 rows in json format\n\tjson.dump(jfinaldata[:10000],jsonfileoutput)\n\tjsonfileoutput.close()\n\t# if result found then do other job\n\tif foundHits >0:\n\t\tstatsummary=summaryStatcal(jfinaldata) # sent data to this funcation for generating stat\n\t\tpathwaychart=statsummary['pathwaychart']\n\t\tpathwaychart=[i[:2] for i in pathwaychart]\n\t\tspecieslist=statsummary['specieslist']\n\t\ttotallist=statsummary['total']\n\t\tsubcell=statsummary['subcell']\n\t\tgodic=statsummary['godic']\n\t\tjvennprot=statsummary['jevennstat'][0]\n\t\tjvennpep=statsummary['jevennstat'][1]\n\t\tmrmdatabase=statsummary['jevennstat'][2]\n\t\tsortedgodic=OrderedDict(sorted(godic.items(), key=lambda t: t[1])) # sorting GO data\n\t\tupdatedgodic=dict(list(sortedgodic.items()))\n\t\tpepseqdataseries=ast.literal_eval(json.dumps(statsummary['pepseqdataseries'])) #dumping data into json format\n\t\tprodataseries=statsummary['prodataseries']\n\t\tunqisostat=statsummary['unqisostat']\n\t\tjsonfilepathStat=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'basicsearch', 'statsummary', jsonfilename) #storing stat result in json format\n\t\tjsonfileoutputStat= open(jsonfilepathStat,'w')\n\t\tjson.dump(statsummary,jsonfileoutputStat)\n\t\tjsonfileoutputStat.close()\n\t\turlname=\"'/resultFile/jsonData/resultJson/basicsearch/results/\"+jsonfilename+\"'\"\n\n\t\tcontextindex={\n\t\t\t\"filename\":urlname,\"colname\":json.dumps(colname),'foundHits':foundHits,\n\t\t\t'pathwaychart':pathwaychart[:11],'specieslist':specieslist,\n\t\t\t'totallist':totallist,'subcell':subcell,\n\t\t\t'updatedgodic':updatedgodic,'pepseqdataseries':pepseqdataseries,\n\t\t\t'prodataseries':prodataseries,'unqisostat':unqisostat,\n\t\t\t'uniquePepSeq':len(set(pepSeqList)),'uniqueProtein':len(set(proteinList)),\n\t\t\t'jvennprot':json.dumps(jvennprot),'jvennpep':json.dumps(jvennpep),'jvennmrmdb':json.dumps(mrmdatabase)\n\t\t\t}\n\t\treturn render(request,'covid19.html',contextindex)\n\telse:\n\t\treturn render(request,'covid19.html',{'foundHits':foundHits})",
"def search_venues_submission():\n # seach for Hop should return \"The Musical Hop\".\n # search for \"Music\" should return \"The Musical Hop\" and \"Park Square Live Music & Coffee\"\n keyword = request.form.get('keyword', '')\n \n # data:\n shows_subq = Show.query.with_entities(\n Show.venue_id,\n func.count(Show.venue_id).label('num_upcoming_shows')\n ).filter(\n Show.start_time > datetime.utcnow()\n ).group_by(\n Show.venue_id\n ).subquery()\n\n venues_subq = Venue.query.with_entities(\n Venue.id,\n Venue.name\n ).filter(\n Venue.name.contains(keyword)\n ).subquery()\n\n data = db.session.query(\n venues_subq.c.id,\n venues_subq.c.name,\n shows_subq.c.num_upcoming_shows\n ).join(\n shows_subq, venues_subq.c.id == shows_subq.c.venue_id\n ).all()\n\n results={\n \"count\": len(data),\n \"data\": [\n {\n \"id\": id,\n \"name\": name,\n \"num_upcoming_shows\": num_upcoming_shows,\n } for (id, name, num_upcoming_shows) in data\n ]\n }\n\n return render_template(\n 'pages/search_venues.html', \n results=results, keyword=keyword\n )",
"def search(request):\n\n term = \"\"\n organizations = None\n memberships = None\n events = None\n persons = None\n airports = None\n training_requests = None\n comments = None\n only_result = None\n\n if request.method == \"GET\" and \"term\" in request.GET:\n form = SearchForm(request.GET)\n if form.is_valid():\n term = form.cleaned_data.get(\"term\", \"\")\n tokens = re.split(r\"\\s+\", term)\n\n organizations = Organization.objects.filter(\n Q(domain__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"fullname\")\n if len(organizations) == 1 and not only_result:\n only_result = organizations[0]\n\n memberships = Membership.objects.filter(\n registration_code__icontains=term\n ).order_by(\"-agreement_start\")\n if len(memberships) == 1 and not only_result:\n only_result = memberships[0]\n\n events = Event.objects.filter(\n Q(slug__icontains=term)\n | Q(host__domain__icontains=term)\n | Q(host__fullname__icontains=term)\n | Q(url__icontains=term)\n | Q(contact__icontains=term)\n | Q(venue__icontains=term)\n | Q(address__icontains=term)\n ).order_by(\"-slug\")\n if len(events) == 1 and not only_result:\n only_result = events[0]\n\n # if user searches for two words, assume they mean a person\n # name\n if len(tokens) == 2:\n name1, name2 = tokens\n complex_q = (\n (Q(personal__icontains=name1) & Q(family__icontains=name2))\n | (Q(personal__icontains=name2) & Q(family__icontains=name1))\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n )\n persons = Person.objects.filter(complex_q)\n else:\n persons = Person.objects.filter(\n Q(personal__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n ).order_by(\"family\")\n\n if len(persons) == 1 and not only_result:\n only_result = persons[0]\n\n airports = Airport.objects.filter(\n Q(iata__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"iata\")\n if len(airports) == 1 and not only_result:\n only_result = airports[0]\n\n training_requests = TrainingRequest.objects.filter(\n Q(group_name__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(github__icontains=term)\n | Q(affiliation__icontains=term)\n | Q(location__icontains=term)\n | Q(user_notes__icontains=term)\n )\n if len(training_requests) == 1 and not only_result:\n only_result = training_requests[0]\n\n comments = Comment.objects.filter(\n Q(comment__icontains=term)\n | Q(user_name__icontains=term)\n | Q(user_email__icontains=term)\n | Q(user__personal__icontains=term)\n | Q(user__family__icontains=term)\n | Q(user__email__icontains=term)\n | Q(user__github__icontains=term)\n ).prefetch_related(\"content_object\")\n if len(comments) == 1 and not only_result:\n only_result = comments[0]\n\n # only 1 record found? Let's move to it immediately\n if only_result and not form.cleaned_data[\"no_redirect\"]:\n msg = format_html(\n \"You were moved to this page, because your search <i>{}</i> \"\n \"yields only this result.\",\n term,\n )\n if isinstance(only_result, Comment):\n messages.success(request, msg)\n return redirect(\n only_result.content_object.get_absolute_url()\n + \"#c{}\".format(only_result.id)\n )\n elif hasattr(only_result, \"get_absolute_url\"):\n messages.success(request, msg)\n return redirect(only_result.get_absolute_url())\n\n else:\n messages.error(request, \"Fix errors below.\")\n\n # if empty GET, we'll create a blank form\n else:\n form = SearchForm()\n\n context = {\n \"title\": \"Search\",\n \"form\": form,\n \"term\": term,\n \"organisations\": organizations,\n \"memberships\": memberships,\n \"events\": events,\n \"persons\": persons,\n \"airports\": airports,\n \"comments\": comments,\n \"training_requests\": training_requests,\n }\n return render(request, \"dashboard/search.html\", context)",
"def results():\n\n queryName = request.form['query']\n queryStars = request.form['stars']\n \n datasource = DataSource()\n listOfRestaurantNames = datasource.searchRestaurantsByNameAndMinimumStars(queryName, queryStars)\n restaurants = datasource.generateRestaurantObjects(listOfRestaurantNames[:15])\n\n return render_template('results.html', restaurants=restaurants)",
"def search_convo_show_result(update, context):\n chat = Chat.get(update.message.chat_id)\n user_data = context.user_data\n query = user_data['query']\n price = float(update.message.text)\n user_data['price'] = price\n\n ses = Session()\n offers = ses.search_all(query, chat.lat, chat.lon, chat.radius)\n too_expensive = 0\n total_offers = 0\n for offer in offers:\n total_offers += 1\n if offer.price > price:\n too_expensive += 1\n continue\n\n update.message.reply_text(offer_text(offer))\n\n if total_offers == 0:\n update.message.reply_text(\n f'Der blev ikke fundet nogen tilbud lige nu.')\n if too_expensive > 0:\n update.message.reply_text(f'{too_expensive} tilbud blev frasorteret, '\n 'fordi de var for dyre.')\n\n keyboard = [[\n InlineKeyboardButton(text='💾 Gem søgning', callback_data='save'),\n InlineKeyboardButton(text='🌟 Ny søgning', callback_data='new'),\n InlineKeyboardButton(text='🚪️ Færdig', callback_data='done')\n ]]\n markup = InlineKeyboardMarkup(keyboard)\n\n update.message.reply_text('❓ Vil du gemme søgningen?', reply_markup=markup)\n\n return SEARCH_DONE",
"def index(request):\n form = SearchForm()\n \n form2 = SearchFormMini(request=request)\n objects = Apartament.objects.filter(show_on_start_page=True, is_published=True).all()\n\n return direct_to_template(request, 'start_page.html', {\n 'form': form,\n 'form2': form2,\n 'objects':objects,\n 'data': {'main_page': True},\n\t\t'main_text': get_object_or_404(StaticPage, pk=10)\n })",
"def search_results():\n search = False\n if session['patron']:\n search = False\n try:\n page = int(request.args.get('page', 1))\n except ValueError:\n page = 1\n\n search_criteria = request.args.get('search')\n patron_id = session['patron']\n session['search_criteria'] = search_criteria\n\n if search_criteria != '':\n print \"do a search\"\n list_of_books = booksearch.search(search_criteria, patron_id)\n pagination = Pagination(page=page, \n total=len(list_of_books), \n search=search, \n record_name='list_of_books')\n return render_template('book_list.html', search=search_criteria,\n list_of_books=list_of_books,\n pagination=pagination,\n )\n else:\n flash(\"Please enter an author or a title.\")\n return render_template('index.html')",
"def search_results(request):\n #key\n\n user_input = request.GET['q']\n\n people_objs = Person.objects.filter(Q(last__contains=user_input) | Q(\n first__contains=user_input))\n document_objs = Document.objects.filter(title__contains=user_input)\n folder_objs = Folder.objects.filter(full__contains=user_input)\n organization_objs = Organization.objects.filter(Q(name__contains=user_input)|Q(\n location__contains=user_input))\n obj_dict = {\n 'people_objs': people_objs,\n 'document_objs': document_objs,\n 'folder_objs': folder_objs,\n 'organization_objs': organization_objs,\n 'query': user_input,\n }\n response = render(request, 'search_results.jinja2', obj_dict)\n return response",
"def search_helper():\n\n if request.args.get(\"movie_name\"):\n movie_name = request.args.get(\"movie_name\")\n movie = Movie.query.filter(Movie.name == movie_name).one()\n session['movie'] = movie.name\n\n else:\n print 'RANDOMLY PICKING A MOVIE'\n movie = random.choice(Movie.query.all())\n\n color_list = get_colors_from_movie(movie)\n print 'Originally got colors %s from Movie %s' % (sorted(color_list), movie.name)\n\n result_dict = etsy.get_listing_items(color_list)\n\n print 'Colors returned %s' % (sorted(result_dict['colors']))\n \n best_dict = etsy.get_image_urls(result_dict, movie.id)\n \n (top_listing, bottom_listing, accessory_listing, dress_listing,\n shoe_listing, bag_listing) = etsy.get_listing_urls(best_dict)\n\n print 'returning ' , result_dict['colors']\n return (result_dict['colors'], movie, best_dict, top_listing, bottom_listing, accessory_listing, dress_listing,\n shoe_listing, bag_listing)",
"def make_searches(vase):\n params = {\n 'ch': vase.trendall_ch,\n 'no': vase.trendall_no,\n 'city': vase.location.city_name,\n 'col': vase.location.collection_name,\n 'id': vase.location.collection_id,\n }\n return [\n 'trendall {ch}.{no}'.format(**params),\n 'trendall {ch}/{no}'.format(**params),\n '{city} {id}'.format(**params),\n '{col} {id}'.format(**params),\n ]",
"def page_body():\r\n st.header(\"Search\")\r\n st.subheader(\"Search For SMEs With A Few Different Options\")\r\n\r\n search_mode_selection = st.radio(\r\n help=\"Search For SMEs That Have Particular Connections, Titles, Or Names...\",\r\n label=\"Search By\",\r\n options=(SearchMode.Connection.value, SearchMode.JobTitle.value, SearchMode.Name.value),\r\n )\r\n\r\n search_form = st.form(key=\"search_form\", clear_on_submit=False)\r\n search_query = search_form.text_input(label=\"\", value=\"Search...\", max_chars=50)\r\n search_button = search_form.form_submit_button(label=\"Search\")\r\n\r\n if search_button:\r\n results = get_search_results(search_query, SearchMode[str(search_mode_selection).replace(\" \", \"\")])\r\n\r\n # Loop through the results returned from the database query\r\n for result in results:\r\n result_dict = result.to_dict() # Convert internally to a Python dict\r\n\r\n # dict keys here are actually database keys in Firestore. You would need to be signed in to see the proper values\r\n with st.expander(result_dict[\"name\"] + \" - \" + str(result_dict[\"age\"]) + \" years old\"):\r\n st.header(result_dict[\"name\"])\r\n st.write(result_dict[\"jobTitle\"])\r\n\r\n st.subheader(\"Personal Summary\")\r\n st.write(result_dict[\"personalSummary\"])\r\n\r\n if result_dict[\"companyName\"]:\r\n st.subheader(\"Works At\")\r\n st.write(result_dict[\"companyName\"])\r\n\r\n if result_dict[\"connections\"]:\r\n st.subheader(result_dict[\"name\"] + \"'s Connections\")\r\n st.write(\", \".join(result_dict[\"connections\"]))",
"def search():\r\n return render_template(\"/home/search.html\")",
"def search_all_view(request): # searchAll\n voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id\n text_from_search_field = request.GET.get('text_from_search_field', '')\n search_scope_list = request.GET.getlist('search_scope_list[]')\n search_scope_list = list(filter(None, search_scope_list))\n # search_scope_list options\n # PN = POLITICIAN_NAME\n\n if not positive_value_exists(text_from_search_field):\n status = 'MISSING_TEXT_FROM_SEARCH_FIELD'\n json_data = {\n 'status': status,\n 'success': False,\n 'text_from_search_field': text_from_search_field,\n 'voter_device_id': voter_device_id,\n 'search_results': [],\n }\n return HttpResponse(json.dumps(json_data), content_type='application/json')\n\n results = search_all_for_api(\n text_from_search_field=text_from_search_field,\n voter_device_id=voter_device_id,\n search_scope_list=search_scope_list)\n # results = search_all_elastic_for_api(text_from_search_field, voter_device_id) #\n status = \"UNABLE_TO_FIND_ANY_SEARCH_RESULTS \"\n search_results = []\n if results['search_results_found']:\n search_results = results['search_results']\n status = results['status']\n else:\n status += results['status']\n\n json_data = {\n 'status': status,\n 'success': True,\n 'text_from_search_field': text_from_search_field,\n 'voter_device_id': voter_device_id,\n 'search_results': search_results,\n }\n return HttpResponse(json.dumps(json_data), content_type='application/json')",
"def index(request,role=\"\"):\n\n context = {}\n if role == \"top\":\n context['champion'] = Champion.objects.filter(top = True).all()\n elif role == \"jungle\":\n context['champion'] = Champion.objects.filter(jungler = True).all()\n elif role == \"mid\":\n context['champion'] = Champion.objects.filter(mid = True).all()\n elif role == \"adc\":\n context['champion'] = Champion.objects.filter(adc = True).all()\n elif role == \"support\":\n context['champion'] = Champion.objects.filter(support = True).all()\n else:\n context['champion'] = Champion.objects.all()\n if request.GET:\n query = request.GET.get(\"search\")\n all_champ = Champion.objects.all()\n querylist = query.split(\" \")\n if not query.isalpha():\n return HttpResponse(render(request,'noxusProject/error.html'))\n for x in all_champ:\n for i in range(len(querylist)):\n # for user quality of life Ex.(user search \"A\" >> it will show all champ start with \"A\")\n if (x.capital().__contains__(querylist[i].capitalize())): \n return search(request,querylist[i].capitalize())\n if query:\n return HttpResponse(render(request,'noxusProject/error.html'))\n return HttpResponse(render(request,'noxusProject/index.html',context))",
"def search_venues():\n search_term = request.form.get('search_term', '')\n # search venue by venue name partial match\n venues_by_text = search_venue(search_term)\n # prepare data to shown in the template\n response = {\n 'count': len(venues_by_text),\n 'data': [v.short_serializer for v in venues_by_text]\n }\n return render_template('pages/search_venues.html', results=response,\n search_term=request.form.get('search_term', ''))",
"def index():\n\n # User reached route via GET (as by clicking a link or via redirect)\n if request.method == \"GET\":\n\n # FORM TABLE total SELECT name for lookup(name) of now price, price, total costmoney ,totalshares\n portf = db.execute(\"SELECT name, symbol, price, sharesTotal, costmoneyTotal FROM total WHERE userID = :userID\", userID=session[\"user_id\"])\n\n # Len of portf list, rows\n porLen = len(portf)\n\n # For loop portf index \"nowPrice\" to new dict, costmoneyTotal\n for item in range(porLen):\n e = portf[item][\"symbol\"]\n nowPrice = lookup(e).get(\"price\")\n portf[item]['nowPrice'] = nowPrice\n portf[item]['costmoneyTotal'] = usd(portf[item]['costmoneyTotal'])\n\n # List reversed\n portf = list(reversed(portf))\n\n\n # FORM TABLE users SELECT end cash\n endPrice = db.execute(\"SELECT cash FROM users WHERE id = :userID\", userID=session[\"user_id\"])\n\n endPrice = usd(endPrice[0][\"cash\"])\n return render_template(\"index.html\", portf=portf, endPrice = endPrice, porLen=porLen)",
"def get_vacancies(config):\r\n vacancies_file = deepcopy(config[\"vacancies_file\"])\r\n headers = deepcopy(config[\"headers\"])\r\n filters = deepcopy(config[\"url_params\"])\r\n filters[\"area\"] = filters[\"area\"][-1].split(\"|\")\r\n if filters[\"area\"] == [\"\"]:\r\n del filters[\"area\"]\r\n if \"date_from\" or \"date_to\" in filters:\r\n del filters[\"period\"]\r\n filters[\"page\"] = 0\r\n in_tests.test_dict_data_type(filters)\r\n print (\"\\n\\nGetting vacancies from hh...\")\r\n\r\n date_current = datetime.datetime.now().replace(microsecond=0).isoformat()\r\n while True:\r\n vacancies = load_vacancies(headers, filters)\r\n found_vacancies = vacancies[\"found\"]\r\n if found_vacancies:\r\n write_vacancies_to_database(\r\n config, create_vacancies_generator(vacancies[\"items\"]))\r\n write_to_file(vacancies_file, vacancies)\r\n filters[\"page\"] += 1\r\n if vacancies[\"pages\"] <= filters[\"page\"]:\r\n break\r\n config[\"url_params\"][\"date_from\"] = date_current\r\n import_database_columns(config)\r\n got_vacancies = min(found_vacancies, filters[\"per_page\"]*filters[\"page\"])\r\n if \"period\" in filters:\r\n print(f\"\\n\\nFound: {found_vacancies} vacancies \\\r\nfor period of {filters['period']} days.\")\r\n elif \"date_from\" in filters:\r\n print(f\"\\n\\nFound: {found_vacancies} vacancies \\\r\nfrom {format(filters['date_from'])}.\")\r\n else:\r\n print(\r\n \"\\n\\nNo `period` or `date_from` in `config.yaml > url_params`\\n\\n\")\r\n raise AttributeError\r\n if found_vacancies:\r\n print(f\"Got: {got_vacancies} vacancies \\\r\n({round(got_vacancies/found_vacancies*100, 2)}%)\")\r\n else:\r\n print(f\"Got: {got_vacancies} vacancies (0%)\")\r\n if found_vacancies > got_vacancies:\r\n print(f\"\\nYou can get more vacancies by:\\n\\\r\n 1. Scheduling parse more often.\\n\\\r\n 2. Adding more filter params to `config.yaml > url_params`.\\n\\\r\n 3. Changing region\\n\\\r\n 4. Changing `config.yaml > url_params > period` \\\r\n(legal values in [1, 31]).\\n\\\r\n Works only if no param `date_from`: \\\r\n\")\r\n return ()",
"def index():\n aaa.require(fail_redirect='/login')\n\n if \"search\" in request.params:\n search = request.params['search']\n\n # split search string and check for special keywords\n keyword = search.split(\":\")[0]\n arg = \":\".join(search.split(\":\")[1:])\n if keyword in [\"actor\",\"director\",\"year\"]:\n user_films = mongo_mgr.get_films_by_user(aaa.current_user.id)\n user_films = [film for film in user_films if film is not None]\n user_film_ids = [film[\"_id\"] for film in user_films]\n\n query = {\"_id\": {\"$in\": user_film_ids}}\n if keyword == \"actor\":\n query[\"actors\"] = {\"$in\": [re.compile(arg,re.IGNORECASE)]}\n elif keyword == \"director\":\n query[\"directed_by\"] = {\"$in\": [re.compile(arg,re.IGNORECASE)]}\n elif keyword == \"year\":\n query[\"initial_release_date\"] = int(arg)\n\n cursor = mongo_mgr.get_films_by_pattern(query)\n\n films = {\"result\": cursor}\n films = json.loads(dumps(films))\n else:\n films = mediator.get_films_by_name(search)\n\n for film in films['result']:\n film['my_movie'] = mongo_mgr.user_has_movie(film['_id']['$oid'], aaa.current_user.id)\n\n return json.dumps(films)\n else:\n if request.headers['accept'] == \"application/json\":\n films = mongo_mgr.get_films_by_user(aaa.current_user.id)\n films = [film for film in films if film is not None]\n for film in films:\n film['my_movie'] = True\n return dumps(films)\n else:\n return template(\"index.html\", user=aaa.current_user.username)",
"def advanced_search():\n\n return render_template('Advanced_Search.html')",
"def search_venues_form():\n # seach for Hop should return \"The Musical Hop\".\n # search for \"Music\" should return \"The Musical Hop\" and \"Park Square Live Music & Coffee\"\n return render_template(\n 'pages/search_venues.html'\n )",
"def search(self):\n premium = self.config.get('premium', False)\n\n self.params[self.opts['keyword']['query_key']] = self.config[self.opts['keyword']['config_key']] # keyword\n # Selection params\n self.append_param('tag_mode', 'selection')\n if premium:\n self.append_param('order_premium', 'selection')\n else:\n self.append_param('order_not_premium', 'selection')\n\n self.append_param('type', 'selection')\n self.append_param('tool', 'selection')\n self.append_param('ratio', 'selection')\n self.append_param('mode', 'selection')\n\n # Number params\n self.append_param('min_width', 'number')\n self.append_param('max_width', 'number')\n self.append_param('min_height', 'number')\n self.append_param('max_height', 'number')\n if premium:\n self.append_param('min_bookmark', 'number')\n self.append_param('max_bookmark', 'number')\n else:\n self.set_bookmark_filter()\n\n # Date params\n self.append_param('start_time', 'date')\n self.append_param('end_time', 'date')\n\n # multi work filter\n self.filters['multi'] = self.config.get('download_multi', False)\n\n for i in range(self.config['start_page'], self.config['end_page'] + 1):\n self.params['p'] = i\n self.headers['Referer'] = 'https://www.pixiv.net/'\n url ='https://www.pixiv.net/search.php'\n html = self.session.get(url, headers = self.headers, params = self.params, timeout = 10, proxies = self.proxies)\n\n soup = BeautifulSoup(html.text, 'lxml')\n data_items = json.loads(soup.find('input', id = 'js-mount-point-search-result-list')['data-items'])\n\n return self.extract_work_info(data_items)",
"def index():\n response.flash=\"Welcome to TheChef\"\n \"\"\"recepies = db(db.recepies).select(orderby=~db.recepies.pub_date)\n return dict(recepies=recepies) \"\"\"\n if len(request.args): page=int(request.args[0])\n else: page=0\n items_per_page=9\n limitby=(page*items_per_page,(page+1)*items_per_page+1)\n rows=db(db.recepies).select(orderby=~db.recepies.pub_date,limitby=limitby)\n return dict(rows=rows,page=page,items_per_page=items_per_page)",
"def tvresult():\n selected_date = request.args.get('selected_date')\n selected_date2 = request.args.get('selected_date2')\n actor = request.args.get('actor')\n genre = request.args.get('genre')\n\n if not selected_date:\n selectedDateFormatted = datetime.datetime.today().strftime('%Y-%m-%d')\n else:\n selectedDate = datetime.datetime.strptime(selected_date, '%d.%m.%Y')\n selectedDateFormatted = selectedDate.strftime('%Y-%m-%d')\n selectedDate2 = datetime.datetime.strptime(selected_date2, '%d.%m.%Y')\n selectedDate2Formatted = selectedDate2.strftime('%Y-%m-%d')\n \n dates = set()\n dates.add(selectedDateFormatted)\n\n \"\"\" Collect dates for searching \"\"\"\n\n nextDateFormatted = selectedDateFormatted\n x = 1\n\n while nextDateFormatted < selectedDate2Formatted:\n nextDate = selectedDate + datetime.timedelta(days=x)\n nextDateFormatted = nextDate.strftime ('%Y-%m-%d')\n dates.add(nextDateFormatted)\n x += 1\n\n \"\"\" Loop through dates \"\"\"\n movies = []\n for x in dates:\n\n searchUrl = \"https://www.iltalehti.fi/telkku/tv-ohjelmat/\" + x + \"/peruskanavat/koko-paiva\"\n \n \"\"\" Gather data from telkku.com with BeautifulSoup. We are interested\n in movies on public television. From page content, look for 'li' tags.\n \"\"\"\n\n page = requests.get(searchUrl)\n soup = BeautifulSoup(page.content, 'html.parser')\n programs = soup.find_all('li')\n\n \"\"\" Loop through tv programs data for current date \"\"\"\n for y in programs:\n \n \"\"\" Movies have the class tag publication__imdb-link. Other data is skipped. \"\"\"\n\n imdb_link_cl = y.find(class_=\"publication__imdb-link\")\n if imdb_link_cl is None:\n continue\n\n movie_title = y.get(\"title\") \n \n if movie_title is None:\n continue \n\n print(movie_title)\n imdb_link = imdb_link_cl.get('href')\n showdatetime = y.find('time').get(\"datetime\")\n \n (sdate_tmp, stime_tmp) = showdatetime.split(\"T\")\n showdate = sdate_tmp[8:10] + \".\" + sdate_tmp[5:7] + \".\" + sdate_tmp[0:4]\n\n showdate_obj = datetime.datetime.strptime(showdate, '%d.%m.%Y')\n\n if showdate_obj > selectedDate2:\n continue\n\n showtime = stime_tmp[0:5]\n imdb_temp = imdb_link.split(\"/\")\n imdb_id = imdb_temp[len(imdb_temp) - 2]\n channel_cl = y.find(class_=\"publication__title\")\n channel_name_href = channel_cl.get(\"href\")\n channel = get_channel_name(channel_name_href)\n if channel == \"Not found\":\n continue\n \n movie_data = find_movie_from_api(imdb_id)\n \n if actor and not actor in movie_data['Actors']:\n continue\n \n if not genre is None and not \"Any\" in genre:\n genres = movie_data['Genre']\n if not genre in genres:\n continue\n\n img = movie_data['Poster']\n if len(img) < 5:\n img = find_poster_from_imdb(imdb_id)\n \n reviews = find_reviews(imdb_id)\n plot = movie_data['Plot'].replace('\"','\\\\\"')\n\n film = {\"showtime\": showtime, \"fi_name\": movie_title, \"reviews\": reviews,\n \"channel\": channel, \"showdate\": showdate, \"imdb_id\": imdb_id,\n \"img\": img, \"name\": movie_data['Title'],\n \"year\": movie_data['Year'], \"country\": movie_data['Country'],\n \"director\": movie_data['Director'], \"actors\": movie_data['Actors'],\n \"genre\": movie_data['Genre'], \"rated\": movie_data['Rated'],\n \"runtime\": movie_data['Runtime'],\"plot\": plot}\n \n if film not in movies:\n movies.append(film)\n\n return render_template(\"results.html\", movies=movies, dateFrom=selected_date, dateTo=selected_date2)",
"def search():\n\n # POST\n if request.method == \"POST\":\n\n # validate form submission\n if not request.form.get(\"intervention\"):\n return render_template(\"results.html\", results=entries.values())\n ''' \n elif not request.form.get(\"setting\"):\n return apology(\"missing setting\")\n elif not request.form.get(\"emrpref\"):\n return apology(\"missing emr pref\")\n elif not request.form.get(\"budget\"):\n return apology(\"missing budget\")'''\n \n results = []\n for k in entries:\n print('entries', entries[k]['Keywords'])\n print('term', request.form.get(\"intervention\"))\n if request.form.get(\"intervention\") in entries[k]['Keywords']:\n print('ya')\n results.append(entries[k])\n\n\n return render_template(\"results.html\", results=results)\n\n\n # GET\n else:\n return render_template(\"search.html\")"
]
| [
"0.6161342",
"0.5999555",
"0.59908456",
"0.58384055",
"0.58010125",
"0.5743552",
"0.57279676",
"0.57167953",
"0.5694214",
"0.56624436",
"0.5640085",
"0.56321853",
"0.5625539",
"0.5614104",
"0.5570825",
"0.5562445",
"0.5561555",
"0.555835",
"0.54716927",
"0.5470257",
"0.546811",
"0.54586506",
"0.54549986",
"0.5452898",
"0.5451952",
"0.54463536",
"0.544634",
"0.54409397",
"0.54331005",
"0.5415571"
]
| 0.791957 | 0 |
Initialize player with initial skills. | def __initSkills(self):
skills = self.teamparser.getPlayerSkills()
try:
skills = skills[(self.team, self.position)] #initial skills
except KeyError, err:
skills = []
raise TypeError, "Invalid Team/Position: " + self.team
for skill in skills:
skobj = pyBBSkill.BBSkill(skill, self.skillparser)
self.skills.append(skobj) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resetSkills(self):\r\n \"\"\" Reset the default attributes \"\"\"\r\n self.player['level'] = 1\r\n self.player['xp'] = 0\r\n self.player['credits'] = int(startCredits)\r\n self.player['popup'] = int(popupStatus)\r\n self.player['name'] = self.player.name\r\n self.player['lastconnected'] = int(time.time())\r\n\r\n \r\n \"\"\" Iterate through the skills list then set each skill to 0 \"\"\"\r\n for skill in skills:\r\n self.player[skill.name] = 0\r\n\r\n \"\"\" Slay the player \"\"\"\r\n es.server.queuecmd(\"damage %s %s\" % (self.userid, es.getplayerprop(self.userid, \"CBasePlayer.m_iHealth\")))\r\n \r\n \"\"\" Notify the user \"\"\"\r\n tell(self.userid, 'info deleted')",
"def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()",
"def get_initial(self):\n initial = super(SkillsView, self).get_initial()\n ai = get_ai(\n self.request.session.get('token', False),\n self.kwargs['aiid']\n )\n initial = {\n 'skills': ai['linked_bots']\n }\n return initial",
"def __init__(self):\n super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n self.player_count: int = None\n self.player_hand_0: arcade.SpriteList = None\n self.player_hand_1: arcade.SpriteList = None\n self.deck: arcade.SpriteList = None\n self.pile: arcade.SpriteList = None",
"def setUp(self):\n self.player = Player()",
"def bot_init():\n client.Console.Say('Hello World')\n global player\n player = client.GetPlayer()",
"def __init__(self,\r\n )->None:\r\n Player.__init__(self)\r\n self.classifier[\"stochastic\"] = True #Make the communicator such by default, to pass to submodules.\r\n \r\n #DEBUG / ANALYSIS TOOLS\r\n self.finished_opponent = \"none yet\"\r\n self.list_base_action = []\r\n self.list_intent_sent = []\r\n self.list_intent_received = []\r\n self.list_intent_assessment = []\r\n self.list_intent_true = []\r\n self.list_reward = []\r\n self.list_decision = []",
"def init_players(self):\n self.spaceships.append(self.player)\n SoundManager.add_sfx(\n self.player.states['exploded']['sfx'],\n self.player\n )",
"def initialize_players():\n return [Player(name, Hand([]), 0) for name in PLAYER_NAMES]",
"def __init__(self, *args, **kwargs):\n super(Player, self).__init__(*args, **kwargs)",
"def __init__(self, player1, player2, state_machine, restore = False):\r\n super().__init__()\r\n self.__players[0] = player1\r\n self.__players[1] = player2\r\n self.__player_names[player1] = 'Human'\r\n self.__player_names[player2] = 'Bot'\r\n self.__state_machine = state_machine",
"def __init__(self, skillName, maxLevel, creditStart, creditIncrement):\r\n self.name = skillName\r\n self.maxLevel = maxLevel\r\n self.info = None\r\n self.startCredit = creditStart\r\n self.creditIncrement = creditIncrement",
"def __init__(self):\r\n self.skills = {}\r\n self.orderedSkills = []",
"def set_plays(self) -> None:\n player1 = self._get_input('What is the name of player 1?')\n player2 = self._get_input('What is the name of player 2?')\n self.state = State(player1, player2)",
"def _starting_up():\n global ws, skill_reload_thread, event_scheduler\n\n ws.on('intent_failure', FallbackSkill.make_intent_failure_handler(ws))\n\n # Create skill_manager listener and invoke the first time\n ws.on('skill_manager', skills_manager)\n ws.on('mycroft.internet.connected', install_default_skills)\n ws.emit(Message('skill_manager', {}))\n\n # Create the Intent manager, which converts utterances to intents\n # This is the heart of the voice invoked skill system\n\n PadatiousService(ws)\n IntentService(ws)\n event_scheduler = EventScheduler(ws)\n # Create a thread that monitors the loaded skills, looking for updates\n skill_reload_thread = WatchSkills()\n skill_reload_thread.daemon = True\n skill_reload_thread.start()\n\n # Wait until skills have been loaded once before starting to check\n # network connection\n skill_reload_thread.wait_loaded_priority()\n check_connection()",
"def __init__(self, player_id, difficulty_level):\n self.player_id = player_id\n self.difficulty_level = difficulty_level\n global our_player\n our_player = player_id",
"def __init__(self, name=\"Player\", resources=[0,0,0,0,0,0,0,0], xor_resources=None,\\\n current_hand=None, structures=None, starting_gold=3, discounted_resources=None):\n if structures != None:\n self.structures = structures # by type? Should we have a structure type? \n else:\n self.structures = []\n \n self.name = name\n self.wonders = None \n player.west_natural= False\n player.west_manufactured = False\n player.east_natural= False\n player.east_manufactured= False\n\n if current_hand == None:\n self.current_hand = None\n else:\n self.current_hand = current_hand #I dont know if we need this\n self.starting_gold = starting_gold",
"def __init__(self, player):\n\t\tself.player = player",
"def __init__(self, player):\n self.player = player",
"def initialize(self):\n\n # --------- BEGIN YOUR CODE ----------\n\n # This is exactly the same as Human.initialize, just copy the code over\n\n # --------- END YOUR CODE ----------\n pass",
"def awake_init(\n cls, player_index: int, game_roles: list[Role], original_roles: tuple[Role, ...]\n ) -> Mason:\n del game_roles\n is_user = const.IS_USER[player_index]\n mason_indices = util.find_all_player_indices(original_roles, Role.MASON)\n logger.debug(f\"[Hidden] Masons are at indices: {list(mason_indices)}\")\n if is_user:\n logger.info(\n f\"Masons are players: {list(mason_indices)} \"\n f\"(You are player {player_index})\",\n cache=True,\n )\n return cls(player_index, mason_indices)",
"def _initialize_player_stats(self):\r\n self.reset()\r\n for x in range(self.num_players):\r\n self.player_draws[f'{x}']=[]\r\n self.player_points[f'{x}']= 0",
"def init_player():\n global active_track_idx\n global track_last_slided_pos\n global track_last_paused_pos\n global track_total_play_time \n\n # INITIALIZE Player\n active_track_idx = -1\n cancel_update_play_time_loop()\n cancel_track_end_event_loop()\n track_status.set(\"---\")\n track_title.set(\"--- : \")\n play_pause_btn.configure(image=play_img)\n track_last_slided_pos = 0\n track_last_paused_pos = 0\n track_total_play_time = 0\n track_pos_label.configure(text=\"00:00\")\n track_length_label.configure(text=\"00:00\")\n track_pos_slider.configure(state=\"disabled\")\n track_pos.set(0)",
"def __init__(self, players):\n\n self._players = players\n self._game = None",
"def create_player(player: Player) -> None:\n with engine.connect() as conn:\n\n conn.execute(\n player_table.insert().values(\n steamid=player.steamid,\n level=player.level,\n xp=player.xp,\n credits=player.credits,\n )\n )\n\n skills = list(player.skills)\n result = conn.execute(\n skill_table.insert().values([\n {\n 'key': skill.key,\n 'level': skill.level,\n 'steamid': player.steamid,\n }\n for skill in skills\n ])\n )\n\n for id, skill in zip(result.inserted_primary_key, skills):\n skill._db_id = id",
"def setup(self):\n # inicializamos el juego\n\n # Sprite lists\n self.player_list = arcade.SpriteList() # sera lista de personajes\n self.coin_list = arcade.SpriteList() # sera lista de monedas\n self.bullet_list = arcade.SpriteList() # lista de disparos\n\n # Set up the player\n self.score = 0\n\n # Image from kenney.nl\n # cargamos el sprite del jugador\n self.player_sprite = arcade.Sprite(\"character.png\", SPRITE_SCALING_PLAYER)\n # establecemos el inicio de posicion de nuestro jugador\n self.player_sprite.center_x = 50\n self.player_sprite.center_y = 70\n # lo agregamos a la lista de nuestros jugadores\n self.player_list.append(self.player_sprite)\n\n # Create the coins\n for i in range(COIN_COUNT):\n\n # Create the coin instance\n # Coin image from kenney.nl\n # cargamos las monedas\n coin = arcade.Sprite(\"coin_01.png\", SPRITE_SCALING_COIN)\n\n # Position the coin\n coin.center_x = random.randrange(SCREEN_WIDTH)\n coin.center_y = random.randrange(120, SCREEN_HEIGHT)\n\n # Add the coin to the lists\n # lo agregamos a la lista\n self.coin_list.append(coin)\n\n # Set the background color\n # esto aun nose para que sirve\n arcade.set_background_color(arcade.color.AMAZON)",
"def start(self):\n self.player = Player()\n self.dealer = Dealer()\n self.pot = 0\n self.side_bet = 0\n start_game()",
"def init2(self):\n self.skill_points = self.count_skill_points()\n self.count_saves()\n self.lives = self.count_lives()\n self.base_attack = fetch_data.get_base_attack(self.BASE_ATTACK_LVLS, self.lvl)",
"def __init__(self):\n\n self.score = 0\n self.game_over = False\n\n # Create sprite lists\n self.all_sprites_list = pygame.sprite.Group()\n\n # Create the player\n self.player = Player(5, 5)\n self.all_sprites_list.add(self.player)",
"def init_player(self, exploration_id, expected_title, expected_response):\n exp_services.delete_demo(exploration_id)\n exp_services.load_demo(exploration_id)\n\n self.EXP_ID = exploration_id\n\n reader_dict = self.get_json(\n '%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))\n\n self.last_state_name = reader_dict['exploration']['init_state_name']\n init_state_data = (\n reader_dict['exploration']['states'][self.last_state_name])\n init_content = init_state_data['content'][0]['value']\n\n self.assertRegexpMatches(init_content, expected_response)\n self.assertEqual(reader_dict['exploration']['title'], expected_title)"
]
| [
"0.66441405",
"0.62616307",
"0.6103922",
"0.60704607",
"0.5984646",
"0.5974035",
"0.5972956",
"0.5920967",
"0.59191483",
"0.5893052",
"0.58330375",
"0.58031964",
"0.57984424",
"0.57871443",
"0.5775477",
"0.5772137",
"0.5760124",
"0.57090324",
"0.569261",
"0.56771094",
"0.5668261",
"0.56554383",
"0.565391",
"0.5621087",
"0.5610375",
"0.5606049",
"0.55980456",
"0.5591874",
"0.5591357",
"0.55623287"
]
| 0.72202 | 0 |
Initialize player with his picks. | def __initPicks(self):
picks = self.teamparser.getPlayerPicks()
try:
self.picks = picks[(self.team, self.position)] #players picks
except KeyError, err:
self.picks = []
raise TypeError, "Invalid Team/Position: " + self.team | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_player():\n global active_track_idx\n global track_last_slided_pos\n global track_last_paused_pos\n global track_total_play_time \n\n # INITIALIZE Player\n active_track_idx = -1\n cancel_update_play_time_loop()\n cancel_track_end_event_loop()\n track_status.set(\"---\")\n track_title.set(\"--- : \")\n play_pause_btn.configure(image=play_img)\n track_last_slided_pos = 0\n track_last_paused_pos = 0\n track_total_play_time = 0\n track_pos_label.configure(text=\"00:00\")\n track_length_label.configure(text=\"00:00\")\n track_pos_slider.configure(state=\"disabled\")\n track_pos.set(0)",
"def __init__(self, player):\n\t\tself.player = player",
"def __init__(self, player):\n self.player = player",
"def __init__(self, players):\n\n self._players = players\n self._current_player = players.get()",
"def _initialize_player_stats(self):\r\n self.reset()\r\n for x in range(self.num_players):\r\n self.player_draws[f'{x}']=[]\r\n self.player_points[f'{x}']= 0",
"def __init__(self):\n super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n self.player_count: int = None\n self.player_hand_0: arcade.SpriteList = None\n self.player_hand_1: arcade.SpriteList = None\n self.deck: arcade.SpriteList = None\n self.pile: arcade.SpriteList = None",
"def __init__(self, player_id = 0):\n all_players = ['adiumy', 'amanda', 'beastie', 'emule', 'gavroche', 'hexley', 'kiki', 'konqi', 'nolok', 'pidgin', 'puffy', 'sara_the_racer', 'sara_the_wizard', 'suzanne', 'tux', 'wilber', 'xue']\n self.kart = all_players[np.random.choice(len(all_players))]",
"def __init__(self, min_player_count):\n self.min_player_count = min_player_count",
"def __init__(self, player1, player2, state_machine, restore = False):\r\n super().__init__()\r\n self.__players[0] = player1\r\n self.__players[1] = player2\r\n self.__player_names[player1] = 'Human'\r\n self.__player_names[player2] = 'Bot'\r\n self.__state_machine = state_machine",
"def initialize_players():\n return [Player(name, Hand([]), 0) for name in PLAYER_NAMES]",
"def initializeGame(self):\n # Fill deck with cards and shuffle it\n self.deck.fill(104)\n self.deck.shuffle()\n #print \"Deck initialized\"\n\n # Initialize the field\n self.field.initialize(self.deck.draw(4))\n self.field.sortField()\n #self.field.printField()\n\n # Set players to initial state again\n # Distribute cards and set bulls to 0\n for p in self.players:\n p.bulls = 0\n p.setHand(self.deck.draw(10))",
"def __init__(self, *args, **kwargs):\n super(Player, self).__init__(*args, **kwargs)",
"def setUp(self):\n self.player = Player()",
"def __init__(self, players):\n\n self._players = players\n self._game = None",
"def set_plays(self) -> None:\n player1 = self._get_input('What is the name of player 1?')\n player2 = self._get_input('What is the name of player 2?')\n self.state = State(player1, player2)",
"def __init__(self):\n self.played_pos = []\n self.grid = [['-', '-', '-'],\n ['-', '-', '-'],\n ['-', '-', '-']]\n self.player_played_pos = {'p1': set(), 'p2': set()}",
"def init_players(self):\n self.spaceships.append(self.player)\n SoundManager.add_sfx(\n self.player.states['exploded']['sfx'],\n self.player\n )",
"def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()",
"def start(self):\n self.player = Player()\n self.dealer = Dealer()\n self.pot = 0\n self.side_bet = 0\n start_game()",
"def __init__(self):\n \n self.player = None\n self.brick_list = None\n self.coin_list = None\n self.physics_engine = None\n self.num_coins = None",
"def __init__(self):\n\n self.name = 'KuhnPoker'\n self.num_players = 2",
"def __init__(self, player1AI = False, player2AI = False):\n\t\tself.tick = 0\n\t\tself.player1AI = player1AI\n\t\tself.player2AI = player2AI\n\t\tself.selectionIndex = [0, 0]\n\t\tself.colours = [\"#ff6363\", \"#ffc163\", \"#88de68\", \"#63c6ff\", \"#ffffff\", \"#000000\"]\n\t\tself.playerReady = [False, False]\n\t\tself.playerKeys = {0: [\"W\", \"S\", \"SPACE\"], 1: [\"⭡\", \"⭣\", \"ENTER\"]}\n\t\tself.timeSinceReady = 0\n\t\tself.headings = [\n\t\t\t\"Player 1:\" if not self.player1AI else \"Computer:\",\n\t\t\t\"Player 2:\" if not self.player2AI else \"Computer:\"\n\t\t]\n\t\tself.itemSpacing = 0.15",
"def __init__(self, player_id, difficulty_level):\n self.player_id = player_id\n self.difficulty_level = difficulty_level\n global our_player\n our_player = player_id",
"def __init__(self, players):\n\n # Define the players\n self.players = players\n\n # Define who starts the game\n self.nplayer = 1 \n\n # Define the board\n self.board = [0] * 9",
"def initGame(self):\n self.map = {}\n self.blocks = Group()\n self.Coins =Group()\n self.players = Group()\n self.player1 = Player(1525,75,2)\n self.players.add(self.player1)\n if self.playernum == 2:\n self.player2 = Player(75,825,1)\n self.players.add(self.player2)\n else:\n self.player2 = False",
"def __init__(self):\n\n self.score = 0\n self.game_over = False\n\n # Create sprite lists\n self.all_sprites_list = pygame.sprite.Group()\n\n # Create the player\n self.player = Player(5, 5)\n self.all_sprites_list.add(self.player)",
"def __init__(self, players, piles=None):\n self.players = players\n self.piles = piles if (piles != None) else [5, 5, 5, 5]\n self.nplayer = 1 # player 1 starts.",
"def __init__(self, player_control, players=None):\r\n self.player_control = player_control\r\n self.players = {} # copy for restoration\r\n if players is not None:\r\n for player in players.values():\r\n self.add_player(player)",
"def __init__(self):\n self.__grid = create_grid(\n Settings.SIZE_X, Settings.SIZE_Y, MarkerType.NONE)\n\n self.__turn = 0\n self.__state = GameState.PLAYING\n self.__winner = MarkerType.NONE\n self.__loser = MarkerType.NONE\n\n # Separate counter for turns, because __turn depends on starting player\n self.__turns_played = 0",
"def spawn_players(self) -> None:\n # Initialise the players\n self.player1 = Player(self.sensitivity, self.screen_width, self.screen_height, self.screen_width // 2, 50,\n self.player_lives, self.fps, self.player1_bullet, Direction.DOWN, self.debug)\n self.player2 = Player(self.sensitivity, self.screen_width, self.screen_height, self.screen_width // 2,\n self.screen_height - 50, self.player_lives, self.fps, self.player2_bullet, Direction.UP,\n self.debug)\n\n # Rotate the image of the player at the top\n self.player1.rotate(180)"
]
| [
"0.6884577",
"0.6513735",
"0.6483426",
"0.64026976",
"0.63838255",
"0.63824165",
"0.63602155",
"0.6354291",
"0.6353228",
"0.6350858",
"0.6297863",
"0.62961894",
"0.62944204",
"0.62919915",
"0.624991",
"0.62254775",
"0.6221873",
"0.6197839",
"0.6139676",
"0.6126346",
"0.61250985",
"0.6124518",
"0.609222",
"0.6069332",
"0.60563034",
"0.60401434",
"0.60241616",
"0.60093534",
"0.5995952",
"0.5990125"
]
| 0.74288225 | 0 |
Set the players movement. | def setMovement(self, movement):
self.ma = movement | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def player_movement(self):",
"def _move(self, dx, dy):\n # horizontal velocity is dx, vertical velocity is dy\n self._player.set_velocity((dx, dy))",
"def move(self, p):\r\n self.position.setvalue(p)",
"def movement(self):",
"def move(self,x,y):\n self.pos.x = x\n self.pos.y = y",
"def move(self) -> None:\n\n new_x = self.getX() + self.speed[0]\n new_y = self.getY() + self.speed[1]\n self.setX(new_x)\n self.setY(new_y)",
"def set_move(self, x, y):\n self.pieces[x + (y * self.width)].set_move()",
"def setMovement(self, movement, isSpecial = False, canControl = True):\n\n vel = self.ode_body.getLinearVel()\n for i in range(len(self.direction)):\n vel[i] = self.direction[i] * movement\n\n self.ode_body.setLinearVel(vel)\n\n self.moveVal = self.direction\n self.moveSpecial = isSpecial\n self.isMove = [False, False]\n self.direction = [self.moveVal[0], self.moveVal[1]]\n\n if not canControl:\n self.knockback()\n self.moveLock(None, 9999)\n self.isKnockback = True\n \n # Play Sound\n if movement > 10:\n self.sfx['lunge'].play()",
"def move(self):\n \n self.position = self.wander()",
"def movePlayer(self,direction):\n if direction == Direction.UP:\n self.y -= 1\n elif direction == Direction.DOWN:\n self.y += 1\n elif direction == Direction.LEFT:\n self.x -= 1\n elif direction == Direction.RIGHT:\n self.x += 1",
"def set_movement(self, x, y):\r\n if x < 0 or x > 2:\r\n return False\r\n if y < 0 or y > 2:\r\n return False\r\n rotation_one = self.rotate_matrix(self.matrix)\r\n rotation_two = self.rotate_matrix(rotation_one)\r\n matrix = self.rotate_matrix(rotation_two)\r\n if matrix[x][y] == \" \":\r\n matrix[x][y] = self.player\r\n self.rotate_matrix = self.rotate_matrix(matrix)\r\n self.round_game = self.round_game + 1\r\n if self.player == self.players[0]:\r\n self.player = self.players[1]\r\n else:\r\n self.player = self.players[0]\r\n return True\r\n return False",
"def move(self):\n self.position += self.speed",
"def move_tower(self, x, y):\n self.x = x\n self.y = y\n self.menu.x = x\n self.menu.y = y\n self.menu.update()",
"def move(self):\r\n min_x = self.__screen.SCREEN_MIN_X\r\n min_y = self.__screen.SCREEN_MIN_Y\r\n delta_x = self.__screen.SCREEN_MAX_X - min_x\r\n delta_y = self.__screen.SCREEN_MAX_Y - min_y\r\n\r\n # new location formula according to pdf.\r\n new_x = (self.__x_speed + self.__x - min_x) % delta_x + min_x\r\n new_y = (self.__y_speed + self.__y - min_y) % delta_y + min_y\r\n self.__x, self.__y = new_x, new_y",
"def move(self, dt):\n dt = dt",
"def moving(self, moving):\n\n self._moving = moving",
"def update_movement(self):\n if self.way_idx < len(self.waypoints) and not self.moving_object.is_moving:\n self.moving_object.start_moving(self.waypoints[self.way_idx])\n self.way_idx += 1",
"def move(self):\n self.tick()\n self.pressed = pygame.key.get_pressed()\n\n self.player.update(self)",
"def move(self):\n \n # checks for bots nearby\n next_move = self.follow()\n \n # finds a random move if no bot\n if next_move is self.position:\n self.position = self.wander()\n else:\n self.position = next_move",
"def move(self):\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_w]:\n self.y -= self.vel\n if keys[pygame.K_a]:\n self.x -= self.vel\n if keys[pygame.K_s]:\n self.y += self.vel\n if keys[pygame.K_d]:\n self.x += self.vel",
"def _drive_player_position(self) -> None:\n player = self._player\n if player:\n assert self.node\n assert player.node\n self.node.connectattr('torso_position', player.node, 'position')",
"def reset_movement(self):\n self.direction = [0, 0]",
"def set_player_position(self, position):\n raise NotImplementedError",
"def move(self):\n \n self.position = self.explore()",
"def move_turtle(self):\n self.forward(self.move_speed)",
"def move(self):\n pass",
"def move(self, x, y):\n self.x = x\n self.y = y\n self.call('move', x, y)",
"def setPlayerPositions(self, updates):\r\n for upd in updates:\r\n playerNode = self.playersNode.find(upd[0])\r\n playerNode.setPos(upd[1])\r\n playerNode.setHpr(upd[2])",
"def set_position(self, posicion):\n\n if self.duracion < posicion:\n return\n\n if self.duracion == 0 or posicion == 0:\n return\n\n posicion = self.duracion * posicion / 100\n\n # http://pygstdocs.berlios.de/pygst-reference/gst-constants.html\n #self.player.set_state(gst.STATE_PAUSED)\n # http://nullege.com/codes/show/src@d@b@dbr-HEAD@trunk@[email protected]/72/gst.SEEK_TYPE_SET\n #self.player.seek(\n # 1.0,\n # gst.FORMAT_TIME,\n # gst.SEEK_FLAG_FLUSH,\n # gst.SEEK_TYPE_SET,\n # posicion,\n # gst.SEEK_TYPE_SET,\n # self.duracion)\n\n # http://nullege.com/codes/show/src@c@o@congabonga-HEAD@congaplayer@congalib@[email protected]/104/gst.SEEK_FLAG_ACCURATE\n event = gst.event_new_seek(\n 1.0, gst.FORMAT_TIME,\n gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_ACCURATE,\n gst.SEEK_TYPE_SET, posicion * 1000000000,\n gst.SEEK_TYPE_NONE, self.duracion * 1000000000)\n\n self.send_event(event)\n #self.player.set_state(gst.STATE_PLAYING)",
"def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY"
]
| [
"0.7168324",
"0.713118",
"0.69156325",
"0.68838024",
"0.6857421",
"0.6846156",
"0.67566127",
"0.6728673",
"0.66751224",
"0.6668629",
"0.6647784",
"0.66257435",
"0.6579948",
"0.6578657",
"0.65438896",
"0.65333104",
"0.6495413",
"0.64810866",
"0.646134",
"0.6450361",
"0.6438607",
"0.64373654",
"0.64351535",
"0.6431769",
"0.6386222",
"0.6382486",
"0.6375902",
"0.6364584",
"0.6356983",
"0.6350679"
]
| 0.7729088 | 0 |
Set the players strength. | def setStrength(self, strength):
self.st = strength | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def strength(self, value: int):\n self._strength = value",
"def strength(self, strength):\n self._characterStrength = intToStrength[strength]\n if self._characterStrength == 'Health': self.characterHealth = 100\n elif self._characterStrength == 'Power': self.characterPower = 15\n elif self._characterStrength == 'Smarts': self.characterSmarts = 20",
"def addStrength(self):\n\t\tself.strength += 1\n\t\tif self.strength > 10:\n\t\t\tself.strength = 10",
"def update_strength(self, strength):\n\n self.strength = strength\n self.right_leg.strength = strength\n self.finite_leg.strength = strength\n self.left_leg.strength = strength",
"def setMyStrength(self):\n ispRatio = float(self.currentISP/self.myShipHull.maxISP)\n myComponents = 0\n designComponents = 0\n for position, myQuad in self.quads.iteritems():\n myComponents += len(myQuad.components)\n for position, myQuad in self.myDesign.quads.iteritems():\n designComponents += len(myQuad.components)\n\n self.strength = (ispRatio * float(myComponents)/float(designComponents))*100.0",
"def strength(self) -> float:\n ...",
"def update_strength(self, strength):\n\n self.strength = strength\n self.right_leg.strength = strength\n self.front_leg.strength = strength\n self.left_leg.strength = strength\n self.back_leg.strength = strength",
"def setAssaultStrength(self, ratio):\n assaultStrength = int(float(self.maxAssault) * ratio)\n if self.isAssault == 1:\n self.assaultStrength = assaultStrength\n else:\n self.assaultStrength = assaultStrength + self.getPersonStrength()",
"def SetStrengthThresh(self, strength):\n return _hypre.HypreBoomerAMG_SetStrengthThresh(self, strength)",
"def _update_strength(self, event=None):\n password = self.password.GetValue()\n nd = calc_password_strength(password, WEB_SPEED)\n self.strength.UpdateStrength(nd)",
"def strength(self) -> int:\n return self._strength",
"def calculate_my_win_strength(self):\n self.winStrength = self.strategy(deepcopy(self.currentBoardState))",
"def setwealth(self, w):\n self.wealth = w",
"def _update_strength(self, event):\n if self.showText.IsChecked():\n password = self.password_txt.Value\n else:\n password = self.password.Value\n \n nd = calc_password_strength(password)\n self.strength.UpdateStrength(nd)",
"def show_strength(self):\n print(self.name, \"has a strength of\", str(self.strength))",
"def add_strength(add, value):\n global STRENGTHVAL\n\n if add == True:\n STRENGTHVAL = STRENGTHVAL + value\n if STRENGTHVAL > 150:\n STRENGTHVAL = 150\n else:\n STRENGTHVAL = STRENGTHVAL - value",
"def LevelUpPlayer(self):\n self.lvl += 1\n self.skillPts += 1\n percent = 0.5\n if self.lvl > 8:\n percent = 0.45 # reduce how much xp is added once higher level\n elif self.lvl > 16:\n percent = 0.4\n elif self.lvl > 25:\n percent = 0.3\n self.xpNeeded = floor(self.xpNeeded + self.xpNeeded * percent)",
"def setDifficulty(self,n):\r\n self.difficulty = n",
"def strength(self):\n return self._characterStrength",
"def getStrength(self):\n return self.st",
"def set_strength_ratios(\n self,\n strength_ratios: Union[float, Tuple[float], np.ndarray],\n ):\n self._strength_ratios = np.clip(\n _convert_to_np_array(strength_ratios, self._num_motors), 0, 1)",
"def setOutputStrength(self, strength):\n DPxSetDinDataOutStrength(strength)",
"def set_powers(self, power_1, power_2):\n pass",
"def set_power(self, power):\n print('Setting santec power to %.4f mW' % power)\n self.santec1.write(\"LP %.2f\" % power)\n self.santec2.write(\"LP %.2f\" % power)\n self.santec3.write(\"LP %.2f\" % power)\n self.santec4.write(\"LP %.2f\" % power)",
"def attackSpeedModifier(self):\n return 0",
"def gainLevelUp(self, statsOnly=True):\n # Gain stats\n roundUpStrength = sumRollsOver(self._baseStrength, self.levelupStrength)\n self._baseStrength += self.levelupStrength\n displayStrengthGain = int(math.floor(self.levelupStrength))\n if roundUpStrength:\n displayStrengthGain += 1\n\n roundUpDexterity = sumRollsOver(self._baseDexterity, self.levelupDexterity)\n self._baseDexterity += self.levelupDexterity\n displayDexterityGain = int(math.floor(self.levelupDexterity))\n if roundUpDexterity:\n displayDexterityGain += 1\n\n roundUpCunning = sumRollsOver(self._baseCunning, self.levelupCunning)\n self._baseCunning += self.levelupCunning\n displayCunningGain = int(math.floor(self.levelupCunning))\n if roundUpCunning:\n displayCunningGain += 1\n\n roundUpSorcery = sumRollsOver(self._baseSorcery, self.levelupSorcery)\n self._baseSorcery += self.levelupSorcery\n displaySorceryGain = int(math.floor(self.levelupSorcery))\n if roundUpSorcery:\n displaySorceryGain += 1\n\n roundUpPiety = sumRollsOver(self._basePiety, self.levelupPiety)\n self._basePiety += self.levelupPiety\n displayPietyGain = int(math.floor(self.levelupPiety))\n if roundUpPiety:\n displayPietyGain += 1\n\n roundUpConstitution = sumRollsOver(self._baseConstitution, self.levelupConstitution)\n self._baseConstitution += self.levelupConstitution\n displayConstitutionGain = int(math.floor(self.levelupConstitution))\n if roundUpConstitution:\n displayConstitutionGain += 1\n\n self._baseHP += self.levelupHP\n self._baseMP += self.levelupMP\n\n self.HP = self.totalHP\n self.MP = self.totalMP",
"def subStrength(self):\n\t\tself.strength -= 1\n\t\tif self.strength < -10:\n\t\t\tself.strength = -10",
"def set_gain(self, *args):\n return _uhd_swig.usrp_source_set_gain(self, *args)",
"async def password_strength(self, ctx, password: str):\n conv = PasswordStats(password)\n converter = conv.strength()\n if converter < 0.250:\n emoji = RED_CIRCLE\n text = \"This is a **weak** password.\"\n elif converter > 0.250 and converter < 0.500:\n emoji = ORANGE_CIRCLE\n text = \"This is an **okay** password.\"\n elif converter > 0.500 and converter < 0.750:\n emoji = YELLOW_CIRCLE\n text = \"This is a **good** password!\"\n else:\n emoji = GREEN_CIRCLE\n text = \"This is an **excellent** password!\"\n await ctx.maybe_send_embed(\n f\"**Strength rating: {round(converter * 100)}%** {emoji}\\n{cf.quote(text)}\"\n )",
"def setWeight(self, w):\n self._W = w"
]
| [
"0.7709192",
"0.7151182",
"0.7149471",
"0.69595134",
"0.6955204",
"0.69542503",
"0.68287134",
"0.67099065",
"0.65833145",
"0.6429088",
"0.6344868",
"0.62404734",
"0.6180489",
"0.61771923",
"0.60525155",
"0.602884",
"0.599069",
"0.59772205",
"0.5974587",
"0.59201777",
"0.58630383",
"0.584284",
"0.5787959",
"0.5740592",
"0.57398677",
"0.5725697",
"0.5674429",
"0.5650096",
"0.5649052",
"0.56415415"
]
| 0.7839418 | 0 |
Set the players agility. | def setAgility(self, agility):
self.ag = agility | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def assign_points(players):\n pass",
"def addAgility(self):\t\n\t\tself.agility += 1\n\t\tif self.agility > 10:\n\t\t\tself.agility = 10",
"def __add_players_spawns(self):\n # Werewolves\n self.__grid[self.__werewolves_start[0]][self.__werewolves_start[1]][\"werewolves\"] \\\n = self.__number_of_beasts\n # Vampires\n self.__grid[self.__vampires_start[0]][self.__vampires_start[1]][\"vampires\"] \\\n = self.__number_of_beasts",
"def LevelUpPlayer(self):\n self.lvl += 1\n self.skillPts += 1\n percent = 0.5\n if self.lvl > 8:\n percent = 0.45 # reduce how much xp is added once higher level\n elif self.lvl > 16:\n percent = 0.4\n elif self.lvl > 25:\n percent = 0.3\n self.xpNeeded = floor(self.xpNeeded + self.xpNeeded * percent)",
"def player_a_games(self, player_a_games):\n\n self._player_a_games = player_a_games",
"def set_experience(self):\n if self.__experience < 50:\n self.__experience += 1",
"def setMyShipCounts(self):\n self.retargetGoal = globals.rankMods[self.myCaptain.rank]['retarget']\n self.retargetCount = globals.rankMods[self.myCaptain.rank]['retarget']",
"def set_scores(apps, schema_editor):\n\n Game = apps.get_model(\"stats\", \"Game\")\n for game in Game.objects.all():\n score_allies = 0\n score_opponents = 0\n player_stats = game.playerstat_set.all()\n for stat in player_stats:\n if stat.is_opponent:\n score_opponents += stat.scored\n else:\n score_allies += stat.scored\n\n game.score_allies = score_allies\n game.score_opponents = score_opponents\n game.save()",
"def min_players(self):\n return 2",
"def set_game_params(self, board):\n self.board = board.copy()\n self.n_rows = len(self.board[0]) # cols number\n self.n_cols = len(self.board) # rows number\n self.fruits_ttl = min(self.n_rows,self.n_cols)+1\n player_pos = np.where(board == 1)\n rival_pos = np.where(board == 2)\n self.locations[PLAYER] = tuple(ax[0] for ax in player_pos)\n self.locations[RIVAL] = tuple(ax[0] for ax in rival_pos)\n self.turns = 0\n self.max_turns = reachables(self.board,self.locations[PLAYER])\n self.player_turns = self.max_turns // 2",
"def set_points(self):\n self.white_player.play_against(self.black_player)\n self.black_player.play_against(self.white_player)\n\n if self.ended:\n if self.winner is not None:\n self.winner.points += 1\n else:\n self.white_player.points += 0.5\n self.black_player.points += 0.5",
"def assign(self,player,hcp):\n\n # Higher hcp = higher bonus potention (max 100)\n assert hcp <= 100, 'Skill handicap cannot be >100 hcp : {0}'.format(\n hcp)\n\n if self.level is not None:\n base,bonus = RandomRoll(player,self,hcp)\n\n if base and bonus:\n self.level += random.randint(3)+1\n elif base:\n self.level += random.randint(2)",
"def setPlayerStates(self, updates):\r\n for upd in updates:\r\n print \"UPD player %s\" % upd['player']\r\n player = self.players[upd['player']]\r\n player.setStatus(upd['status'], upd['jump'], upd['charge'])\r\n\r\n player.health = upd['health']\r\n if player == self.myPlayer:\r\n #self.healthbar.setValue(self.myPlayer.health)\r\n pass",
"def point_assigner_win(self, group, player_sprites):\n if group != {}:\n for player in player_sprites:\n player.points += 1",
"def set_ability_scores(self, scores: List[int]):\n for s in range(6):\n self.dna[s] = scores[s]",
"def set_game_params(self, board):\n self.board = board\n # setting fruit's timer to the shortest edge * 2.\n numrows = len(board)\n numcols = len(board[0])\n self.fruits_timer = min(numrows, numcols) * 2\n # setting the locations of the players\n self.update_players_locations()",
"def setExperience(self, damage, enemyShip):\n experience = damage/25.0\n if experience > 0:\n self.myCaptain.addExperience(experience)\n enemyShip.myCaptain.addExperience(experience)",
"def assignRanks(self):\r\n\t\trank = 0\r\n\t\tscores = list(self._playerScores)\r\n\t\tscores.reverse()\r\n\t\tfor playerScore in scores:\r\n\t\t\tif not playerScore.has(NOT_MET) or not playerScore.value(NOT_MET):\r\n\t\t\t\trank += 1\r\n\t\t\t\tplayerScore.set(RANK, smallText(BugUtil.colorText(u\"%d\" % rank, ScoreOpt.getRankColor())))\r\n\t\tif rank > 0:\r\n\t\t\tself._anyHas[RANK] = True",
"def addExperience(self, xp):\n self.xp += xp\n if self.xp >= self.xpNeeded:\n self.LevelUpPlayer()",
"def _update_score(self) -> None:\n\n # setting new score by iterating over players\n self.score_play[self.n_play_turns, ] = [\n self._score_table[(\n self.contract.level,\n self.contract.suit,\n self.tricks[i],\n self.contract.player_vulnerability[i],\n int(self.contract.double + self.contract.redouble)\n )]\n for i in range(NUM_PLAYERS)\n ]",
"def min_players(self, min_players):\n\n self._min_players = min_players",
"def update_players_locations(self):\n self.loc = self.find_value(1)\n self.opponent_loc = self.find_value(2)",
"def test_assign_managing_team(self):\n pass",
"def player_update(self,p,player):\n node = self._validate(p)\n node._player = player",
"def playerdefeated(self):\n globalvalues.gameover_combat()",
"def update(self):\n for pl, result in zip(self._players, self.golf_round.doc.results):\n for score in result.scores:\n n = score.num-1\n # update net \n pl.dct_net['holes'][n] = score.gross - pl._bumps[n]\n pl.update_totals(pl.dct_net)",
"def score_game(self):\r\n players = self.player_control.get_players()\r\n ###game_control = self.game_control\r\n ###if game_control is not None:\r\n ### game_control.set_vals() # Update any changed game control settings\r\n if len(players) == 0:\r\n return # No players\r\n n_top_score = 0\r\n top_score = players[0].get_score()\r\n for player in players:\r\n if player.get_score() > top_score:\r\n top_score = player.get_score()\r\n for player in players:\r\n player_score = player.get_score()\r\n if player_score == top_score:\r\n n_top_score += 1\r\n \r\n for player in players:\r\n player_score = player.get_score()\r\n player_played = player.get_played()\r\n player_ties = player.get_ties()\r\n player_wins = player.get_wins()\r\n new_played = player_played+1\r\n player.set_played(new_played)\r\n player.set_prop(\"played\")\r\n if player_score == top_score:\r\n if n_top_score > 1:\r\n new_ties = player_ties + 1\r\n player.set_ties(new_ties)\r\n player.set_prop(\"ties\")\r\n else:\r\n new_wins = player_wins + 1\r\n player.set_wins(new_wins)\r\n player.set_prop(\"wins\")\r\n self.update_score_window()",
"def reset_stats(self):\r\n self.pepes_left = self.ai_settings.pepe_limit\r\n self.score = 0\r\n self.level = 1",
"def spawn_players(self) -> None:\n #Create the player\n self.player1 = Player(self.sensitivity, self.screen_width, self.screen_height, self.screen_width//(3/2), self.screen_height-50, self.player_lives, self.fps, self.player1_bullet, Direction.UP, self.debug)\n\n #Create the AI\n self.player2 = AIPlayer(self.sensitivity, self.screen_width, self.screen_height, self.screen_width//3, self.screen_height-50, self.player_lives, self.fps, self.player2_bullet, Direction.UP, 1, True, self.debug)",
"def live(self):\n\t\t#random assignment of fittnes for now\n\t\tfor chrom in self.chromosomes:\n\t\t\tchrom.strength = random.random()\n\t\tself.chromosomes.sort(key=lambda chromosomes: chromosomes.strength, reverse = True)\n\n\t\tself.bestChromosomes = self.chromosomes[0:2]"
]
| [
"0.64619476",
"0.5974813",
"0.58572036",
"0.5838505",
"0.5767299",
"0.5742054",
"0.5728973",
"0.56840265",
"0.56714505",
"0.5633035",
"0.56120574",
"0.56076914",
"0.5600924",
"0.5550766",
"0.5505914",
"0.5502566",
"0.5472727",
"0.54698473",
"0.5447161",
"0.5443643",
"0.54399514",
"0.5431327",
"0.5427122",
"0.5399758",
"0.5385126",
"0.5368749",
"0.53593475",
"0.5335053",
"0.53322923",
"0.5317124"
]
| 0.62469506 | 1 |
Set the players armor value. | def setArmor(self, armor):
self.av = armor | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_player_state(self, player):\n\n health_str = \"HP {0}/{1}\".format(int(player.health), int(player.max_health))\n self.health_label.element.text = health_str\n\n if player.armor is not None:\n armor_hp = int(player.armor.health)\n max_armor_hp = int(player.armor.max_health)\n armor_str = \"ARMOR {0}/{1}\".format(armor_hp, max_armor_hp)\n self.armor_label.element.text = armor_str",
"def __setitem__(self, item, value):\r\n debug.write(\"[SourceRPG] Assigning attribute %s with the value of %s to player %s\" % (item, value, self.name), 3)\r\n if item in self.currentAttributes:\r\n debug.write(\"Value is in current attributes, assign to the currentAttributes dict\", 4)\r\n self.currentAttributes[item] = value\r\n elif item in self.currentSkills or item in skills:\r\n debug.write(\"Value is in skills, assign to the currentSkills dict\", 4)\r\n self.currentSkills[item] = value\r\n else:\r\n debug.write(\"Value is not in any dictionary, assign to the custom playerAttributes dict\", 4)\r\n self.playerAttributes[item] = value\r\n debug.write(\"[SourceRPG] Value updated\", 3)",
"def set_hp(self, val) -> None:\n self._player_data.hp = np.clip(val, 0, self.character_data.max_hp)",
"async def set_volume(self, value: int):\n if value < self._pytheos.api.player.VOLUME_MIN:\n value = self._pytheos.api.player.VOLUME_MIN\n elif value > self._pytheos.api.player.VOLUME_MAX:\n value = self._pytheos.api.player.VOLUME_MAX\n\n await self._pytheos.api.player.set_volume(self.id, value)",
"def set_player(self, player):\n\n self._player = player",
"def set_motor(name, value):\n assert type(name) is str, \"Type Mismatch: Must pass in a string to name.\"\n assert type(value) is int or type(name) is float, \"Type Mismatch: Must pass in an integer or float to value.\"\n assert value <= 100 and value >= -100, \"Motor value must be a decimal between -100 and 100 inclusive.\"\n name_to_value = mc.get('motor_values')\n try:\n name_to_value[name] = value\n mc.set('motor_values', name_to_value)\n except KeyError:\n raise KeyError(\"No motor with that name\")",
"def assignValue(self,value):\n self.itemset(value)",
"def assignValue(self,value):\n self.itemset(value)",
"def set_volume(self, value):\n utils.set_volume(self.config[\"alsa\"][\"card\"], value) # Sets the actual volume level\n\n if value == 0:\n mode = \"muted\"\n elif value <= 25:\n mode = \"low\"\n elif value <= 75:\n mode = \"medium\"\n else:\n mode = \"high\"\n \n icon = utils.get_volume_icon(mode)\n self.settings_window.volume_label.setPixmap(icon)",
"def reward(self, value):\n self._custom_setter('reward', value)",
"def _set_volume(self, value):\n if self.player:\n vol = 100 if abs(value) >= 1.0 else 100 * abs(value)\n self.player.audio_set_volume(int(vol))",
"def adjust_volume(self, value):\n logger.debug('adjusting volume')\n volume_command = str('amixer set PCM -- ' + str(value) + '%')\n os.system(volume_command)",
"def reduce_armor(self, amount):\n super().reduce_armor(amount)\n if self.armor <=0 and self.is_true_king:\n terminators_win()",
"def hk_armor(self):\n self.name = \"Holy Knight's Armor\"\n self.rarity = \"Common\"\n self.pdef_value = 40\n self.mdef_value = 10\n self.increase_crit = 0\n self.desc = \"Armor of the Holy Guard, you feel the light flowing.\"",
"def set_player(self, new_player):\n self.player = new_player",
"def equip_armor(self, armor):\n\t\tfor key in self.inventoryDictionary:\n\t\t\tif key.name == armor:\n\t\t\t\tself.equippedArmor = key\n\t\t\t\treturn True\n\t\treturn False",
"def set_value(self,x):\n self._value = x",
"def set_value(self,x):\n self._value = x",
"def setExperience(self, damage, enemyShip):\n experience = damage/25.0\n if experience > 0:\n self.myCaptain.addExperience(experience)\n enemyShip.myCaptain.addExperience(experience)",
"def set_player(self, player_params):\n\n self.saved_player = player_params",
"def assign(self,player,hcp):\n\n # Higher hcp = higher bonus potention (max 100)\n assert hcp <= 100, 'Skill handicap cannot be >100 hcp : {0}'.format(\n hcp)\n\n if self.level is not None:\n base,bonus = RandomRoll(player,self,hcp)\n\n if base and bonus:\n self.level += random.randint(3)+1\n elif base:\n self.level += random.randint(2)",
"async def set_player(self, player: Optional[andesite.Player]) -> None:\n ...",
"def ammo(player, arg):\n global database\n table = database.Tables.players\n upd = table.update(None).where(table.c.id == player['id']).values(ammo=table.c.ammo+(int(arg)))\n database.conn.execute(upd)",
"def level(self, value):\n self._level = mdraid.RAID_levels.raidLevel(value) # pylint: disable=attribute-defined-outside-init",
"def set_value (self):\n raise NotImplementedError",
"def strength(self, value: int):\n self._strength = value",
"def change_volume(value):\n\t\n\tprint('received val:', value)\n\t\n\tcommand = ['amixer', '--card', '1', 'set', 'Speaker', value]\t\n\tsubprocess.Popen(command)",
"def set_ai(self, value):\n if value not in (\"easy\", \"normal\", \"advanced\"):\n raise SettingsError(\"Invalid choice\")\n self._parser.set(\"settings\", \"difficulty\", value)\n self._save()",
"def changeValue(situation , valueToPlay, player):\r\n situation[valueToPlay[0]][valueToPlay[1]] = Player.get_spec(player)\r\n return situation",
"def combat_player_score(self, combat_player_score):\n\n self._combat_player_score = combat_player_score"
]
| [
"0.64061564",
"0.59360194",
"0.59290016",
"0.56689304",
"0.5441629",
"0.54194176",
"0.5381563",
"0.5381563",
"0.5374806",
"0.53710526",
"0.53614014",
"0.53501886",
"0.5346353",
"0.5327724",
"0.53240293",
"0.5308827",
"0.53075",
"0.53075",
"0.5305588",
"0.5303419",
"0.5276931",
"0.5263106",
"0.5262067",
"0.5261887",
"0.52451074",
"0.52323323",
"0.5210263",
"0.5192416",
"0.5188895",
"0.51815164"
]
| 0.78425336 | 0 |
> int Returns players costs. | def getCosts(self):
return self.costs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cost(self) -> float:",
"def cost(foods, foods_used):\n cost = 0.00\n for i, count in foods_used.items():\n cost += (foods[i]['serving_cost'] * count)\n return cost",
"def _calculate_costs(self):\n cost = 0\n cost += self._cost_route_fine()\n cost += self._cost_petrol()\n cost += self._cost_wage()\n cost += self._cost_refueling()\n cost += self._cost_caught_by_police()\n cost += self._cost_vehicle_malfunction()\n return cost",
"def calculateCosts(self):\n self.costs = 0\n for house in self.houses:\n if not house.distance == 1000:\n self.costs += house.distance * 9\n for battery in self.batteries:\n self.costs += battery.costs\n return self.costs",
"def revenue(tips, n_players, price_per_player):\n\n total_revenue = tips + (n_players * price_per_player)\n return total_revenue",
"def calculate_score(player_cards):\n score = sum(player_cards)\n return score",
"def score(self,player, board):\r\n numPlayer = 0\r\n numOpp = 0\r\n for i in self.squares():\r\n if board[i] == player:\r\n numPlayer+= SQUARE_WEIGHTS[i]\r\n else:\r\n numOpp+=SQUARE_WEIGHTS[i]\r\n return numPlayer-numOpp",
"def cost(self):\n return self._cost",
"def cost(self):\n return self._cost",
"def _cost_petrol(self):\n return self.distance * self.petrol_usage * self.petrol_cost",
"def get_score(self, player: int) -> int:\n score = 0\n i = 0\n while i < len(self.leylines):\n score += 1 if self.leylines[i].player == player else 0\n score += 1 if self.rights[i].player == player else 0\n score += 1 if self.lefts[i].player == player else 0\n i += 1\n return score",
"def get_expected_cost(self):",
"def total_cost(self):\n if self.goal:\n return self.goal + (self.community_contribution or 0)\n else:\n return 0",
"def pay_costs(self):\r\n cost = self.cost\r\n if cost:\r\n self.owner.player.char_ob.pay_money(cost)\r\n self.owner.player.msg(\"You pay %s coins for the event.\" % cost)",
"def weighted_score(player, board):\n opp = Othello.opponent(player)\n total = 0\n for sq in Othello.squares():\n if board[sq] == player:\n total += SQUARE_WEIGHTS[sq]\n elif board[sq] == opp:\n total -= SQUARE_WEIGHTS[sq]\n return total",
"def getCost(self):\n\n return self.cost",
"def getProduction(self, playerID):\n prod=0\n for p in self.__camps:\n if( p.getOwner() == playerID ):\n prod = prod + p.getGrowthrate()\n return prod",
"def getCost(self):\n return self._cost",
"def get_team_stats(players: list[Player]) -> dict[int]:\n\n team_stats = {}\n\n total_reaction = 0\n total_mechanical_skill = 0\n total_tactical_skill = 0\n total_game_knowledge = 0\n total_xp = 0\n\n for player in players:\n total_reaction += player.reaction\n total_mechanical_skill += player.mechanical_skill\n total_tactical_skill += player.tactical_skill\n total_game_knowledge += player.game_knowledge\n total_xp += player.xp\n\n team_stats.update(\n {\"reaction\": total_reaction,\n \"mechanical_skill\": total_mechanical_skill,\n \"tactical_skill\": total_tactical_skill,\n \"game_knowledge\": total_game_knowledge,\n \"xp\": total_xp})\n\n return team_stats",
"def cost(self):\n\n return self._cost",
"def determineAmountToCall(self, player):\n\t\treturn sum(self.currentBet) - sum(player.betAmount)",
"def cost(self):\n\t\treturn self.g + self.h",
"def CalcCostForTurn(self):\r\n costsThisTurn = 0\r\n \r\n inventoryStorageCost = self.currentStock * STORAGE_COST_PER_UNIT\r\n backorderPenaltyCost = self.currentOrders * BACKORDER_PENALTY_COST_PER_UNIT\r\n \r\n costsThisTurn = inventoryStorageCost + backorderPenaltyCost\r\n \r\n return costsThisTurn",
"def calculate_total_cost(state):\r\n return state.cost()",
"def cost(self):\n cost = 0\n for battery in self.grid.batteries:\n for house in battery.connections:\n cost += house.distances[house.connection] * 9\n cost += battery.cost\n return cost",
"def GOAL_TOTAL() -> int:\n return 21",
"def get_cost(self) -> float:\n return math.e / self.fitness",
"def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores",
"def inventory_value(self):\n cost = 0\n for bike in self.inventory:\n cost = cost + bike.total_cost()\n return cost",
"def test_get_player_sum(self):\n self.assertIsInstance(network.get_player_sum(), int)"
]
| [
"0.715212",
"0.6709893",
"0.6695885",
"0.669135",
"0.6688659",
"0.6661801",
"0.6611084",
"0.6575354",
"0.6575354",
"0.6538438",
"0.6503868",
"0.64907724",
"0.64563376",
"0.6455219",
"0.64488846",
"0.6423245",
"0.6419398",
"0.64004976",
"0.6389483",
"0.63875544",
"0.637959",
"0.62862635",
"0.6273402",
"0.62688905",
"0.6260982",
"0.6254459",
"0.6211051",
"0.61918646",
"0.6153873",
"0.6150319"
]
| 0.6873076 | 1 |
> list Returns a list of all player skills. See pyBBSkill.BBSkill. | def getSkills(self):
return self.skills | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_skill_list(self):\n return [\n i.strip() for i in\n self.ansi_escape.sub('', check_output([BIN, 'list'])).split('\\n')\n ]",
"def data_skill_list(self):\n data_skill_list = []\n for skill in self.data_skill:\n if 'name' in skill.keys():\n data_skill_list.append(skill['name'])\n return data_skill_list",
"def all_skill_list(self):\n data_skill_list = self.data_skill_list()\n self.skill_list = []\n for i in range(len(self.data_profile)):\n if 'skills' in self.data_profile[i].keys():\n for j in range(len(self.data_profile[i]['skills'])):\n for skills in self.data_profile[i]['skills'][j]['skills']:\n if skills['title'] in data_skill_list:\n self.skill_list.append(skills['title'])\n return",
"def get_skills(self):\n return self.skills[:]",
"def skills():\n with app.app_context():\n results = Skill.query.all()\n return SkillsResponse(skills=results).json(), 200",
"def skills():\n all_skills = [\"strength\", \"patience\", \"cleanliness\", \"leadership\", \"communication\",\n \"science\", \"math\", \"engineering\", \"intelligence\", \"driving\"]\n\n random.shuffle(all_skills)\n return all_skills[0:2]",
"def getSkills(self,number):\n skills = ['java','javascript','nodejs','css','scss','angular',\n 'express','sql','mongodb','spark','python','opencv',\n 'native-script','reactjs','backbone-js','docker','unix']\n returnSkills = []\n\n if number< skills.__len__():\n for item in range(0,number):\n tempSkill = skills[randint(0,skills.__len__()-1)]\n if tempSkill not in returnSkills:\n returnSkills.append(tempSkill)\n else:\n continue\n return returnSkills\n else:\n return skills",
"def skills(self):\n if \"skills\" in self._prop_dict:\n return self._prop_dict[\"skills\"]\n else:\n return None",
"async def all_skills_data(self) -> AllSkillsData:\n return AllSkillsData(**await self.get(\"/skill/all\"))",
"def fetch_skills(self):\r\n\r\n noun_chunks = self.doc.noun_chunks\r\n nlp_text = self.doc\r\n\r\n # removing stop words and implementing word tokenization\r\n tokens = [token.text for token in nlp_text if not token.is_stop]\r\n\r\n data = pd.read_csv(\"skills.csv\") # reading the csv file\r\n skills = list(data.columns.values) # extract values into a lis\r\n skillset = [] # store final skills here\r\n\r\n # check for one-grams (example: python)\r\n for token in tokens:\r\n if token.lower() in skills:\r\n skillset.append(token)\r\n\r\n # check for bi-grams and tri-grams (example: machine learning)\r\n for token in noun_chunks:\r\n token = token.text.lower().strip()\r\n if token in skills:\r\n skillset.append(token)\r\n\r\n return [i.capitalize() for i in set([i.lower() for i in skillset])]",
"def __initSkills(self):\n skills = self.teamparser.getPlayerSkills()\n try:\n skills = skills[(self.team, self.position)] #initial skills\n except KeyError, err:\n skills = []\n raise TypeError, \"Invalid Team/Position: \" + self.team\n for skill in skills:\n skobj = pyBBSkill.BBSkill(skill, self.skillparser)\n self.skills.append(skobj)",
"def top_skill_list(self):\n data_skill_list = self.data_skill_list()\n self.skill_list = []\n for i in range(len(self.data_profile)):\n if 'skills' in self.data_profile[i].keys():\n if self.data_profile[i]['skills'][0]['title'] == 'Top Skills':\n for skills in self.data_profile[i]['skills'][0]['skills']:\n if skills['title'] in data_skill_list:\n self.skill_list.append(skills['title'])\n return",
"def _read_skills(self, root):\n\n output_list = []\n for _, value in enumerate(root[0][6]):\n output_list.append(Skill(value))\n\n # check if sinnenschaerfe is part of skills\n if output_list[-1].name == \"Sinnenschärfe\":\n output_list = self._add_sinnenschaerfe(output_list)\n\n return output_list",
"def endpoint_skills():\n q = \"\"\"\n SELECT ?localName\n WHERE { ?entity rdfs:subClassOf* cogrobtut:Skill .\n\t bind( strafter(str(?entity), \"#\") as ?localName) .\n }\n \"\"\"\n res = utils.kb.query(q, initNs=utils.namespaces)\n res_rows = [x for x in res]\n individuals=[]\n for row in res_rows:\n for elem in row:\n individuals.append(elem)\n return jsonify({\"result\" : individuals})",
"def load_skill_list(skills_to_load):\n if exists(SKILLS_DIR):\n # checking skills dir and getting all priority skills there\n skill_list = [folder for folder in filter(\n lambda x: os.path.isdir(os.path.join(SKILLS_DIR, x)),\n os.listdir(SKILLS_DIR)) if folder in skills_to_load]\n for skill_folder in skill_list:\n skill = {\"id\": hash(os.path.join(SKILLS_DIR, skill_folder))}\n skill[\"path\"] = os.path.join(SKILLS_DIR, skill_folder)\n # checking if is a skill\n if not MainModule + \".py\" in os.listdir(skill[\"path\"]):\n continue\n # getting the newest modified date of skill\n last_mod = _get_last_modified_date(skill[\"path\"])\n skill[\"last_modified\"] = last_mod\n # loading skill\n skill[\"loaded\"] = True\n skill[\"instance\"] = load_skill(\n create_skill_descriptor(skill[\"path\"]),\n ws, skill[\"id\"])\n loaded_skills[skill_folder] = skill",
"def applicants_skills(driver):\n try:\n raw_skills = driver.find_elements_by_css_selector(\"span.pill\")\n skills = [skill.text for skill in raw_skills] \n return skills\n except Exception as e:\n print(\"error acquiring applicant skills\")\n print(e)\n return []",
"def _read_special_skills(root):\n output_list = []\n for _, value in enumerate(root[0][4]):\n output_list.append(SpecialSkill(value))\n return output_list",
"async def get_all_skill_scores(idol_skill_type, card_rarity):\n skill_types = {\"rap\": 0, \"dance\": 1, \"vocal\": 2}\n\n all_skills = [0, 0, 0]\n all_skills[skill_types.get(idol_skill_type)] = await Gacha.random_skill_score(card_rarity)\n return all_skills",
"def getSkill(userId, skill=-1) -> list:\n # fetch user\n try:\n user = fetchUser(userId=userId)\n except:\n user = []\n\n skill_temp = -1\n # get skills if user is found\n if (len(user) != 0):\n for u in user:\n if (skill != -1):\n for entry in u[\"skills\"]:\n if (skill == entry[\"id\"]):\n skill_temp = entry\n if (skill_temp == -1):\n return \"No such skill exist for the given user\"\n else:\n return skill_temp\n else:\n skill_temp = u[\"skills\"]\n for i in skill_temp:\n name = getSkillName(i['id'])\n i['name'] = name\n return skill_temp",
"def display_skill(self):\n return ', '.join([skill.name for skill in self.skill.all()[:3]])",
"def skill(self):\n return self._get(\"skill\")",
"def getPlayerList(self):\n return(self.playerList)",
"def candidate_skills(self, source_object: Dict) -> CandidateSkillYielder:\n pass",
"def loadallskills(self):\r\n for skill in os.listdir( os.path.join( es.getAddonPath( info.basename ), \"skills\" )):\r\n es.load(\"%s/skills/%s\" % (info.basename, skill))",
"def add_skill(skill_list, skill): #inputs the skill dictionary and skill\r\n\tif skill==\"Gun Combat\":\r\n\t\tif stellagama.dice(1,6)>=3:\r\n\t\t\tfor item in guns:\r\n\t\t\t\tif item in skill_list:\r\n\t\t\t\t\tskill=item\r\n\t\t\t\telse:\r\n\t\t\t\t\tskill=stellagama.random_choice(guns)\r\n\t\telse:\r\n\t\t\tskill=stellagama.random_choice(guns)\r\n\telif skill in [\"Blade Combat\", \"Blade Cbt\"]:\r\n\t\tif stellagama.dice(1,6)>=3:\r\n\t\t\tfor item in melee:\r\n\t\t\t\tif item in skill_list:\r\n\t\t\t\t\tskill=item\r\n\t\t\t\telse:\r\n\t\t\t\t\tskill=stellagama.random_choice(melee)\r\n\t\telse:\r\n\t\t\tskill=stellagama.random_choice(melee)\r\n\telif skill==\"Vehicle\":\r\n\t\tif stellagama.dice(1,6)>=3:\r\n\t\t\tfor item in vehicles:\r\n\t\t\t\tif item in skill_list:\r\n\t\t\t\t\tskill=item\r\n\t\t\telse:\r\n\t\t\t\tskill=stellagama.random_choice(vehicles)\r\n\t\telse:\r\n\t\t\tskill=stellagama.random_choice(vehicles)\r\n\tif skill in skill_list:\r\n\t\tskill_list[skill] += 1\r\n\telif skill not in skill_list:\r\n\t\tskill_list[skill] = 1\r\n\treturn skill_list #outputs the skill dictionary\r",
"def get_skill_levels(self):\n return self.model_class.objects.filter(enforced=self.enforced).order_by('-gte')",
"def __init__(self):\r\n self.skills = {}\r\n self.orderedSkills = []",
"def get_skills(username):\n root_categories = Skill.query.filter_by(root=True).all()\n skill_models = []\n for root in root_categories:\n skill_model = database_controller.build_subcategories(username, root.path)\n if skill_model:\n skill_models.append(skill_model)\n return skill_models",
"def make_raw_skill_sets(skills):\n\n possible_sizes_in_list_of_things = range(4)\n basic_skills = [list(combinations_of_a_certain_size)\n for each_possible_size_of_combination in possible_sizes_in_list_of_things\n for combinations_of_a_certain_size in itertools.combinations(skills,\n each_possible_size_of_combination)]\n\n return basic_skills",
"def insert_skills(cursor):\n # Get the class of every skill\n skills_classes = dict()\n with open(CLASSES_PATH, encoding='UTF-8') as classes_file:\n classes_dict = ujson.load(classes_file)\n for class_id, _class in classes_dict.items():\n class_skills = _class.get(\"skills\", list())\n for class_skill in class_skills:\n skills_classes[class_skill.lower()] = class_id\n\n with open(SKILLS_PATH, encoding='UTF-8') as skills_file:\n skills_dict = ujson.load(skills_file)\n skills = list()\n # Get list of sorted skills\n sorted_skills_ids = list()\n for skill_id, skill in skills_dict.items():\n if skill_id:\n sorted_skills_ids.append((skill_id, int(skill.get(\"id\", 0))))\n else:\n sorted_skills_ids.append((skill_id, 0))\n sorted_skills_ids.sort(key=lambda tup: tup[1])\n # Start processing them\n for skill_id, _ in sorted_skills_ids:\n skill = skills_dict[skill_id]\n skill_info = list()\n # Get Skill Id\n skill_info.append(int(get_value(skill, \"Skill\", \"id\", str)))\n # Get Skill Name\n skill_info.append(get_value(skill, \"Skill\", \"name\", str))\n # Get Skill Identifier\n identifier = get_value(skill, \"Skill\", \"ident\", str).lower()\n skill_info.append(identifier)\n # Get Skill Icon\n skill_info.append(format_icon(get_value(skill, \"Skill\", \"icon\", str)))\n # Get Skill Circle\n skill_info.append(int(get_value(skill, \"Skill\", \"circle\", str)))\n # Get Skill Rank Level\n skill_info.append(int(get_value(skill, \"Skill\", \"rankLevel\", str)))\n # Get Skill Max Level\n skill_info.append(int(get_value(skill, \"Skill\", \"maxLevel\", str)))\n # Get Skill Video\n skill_info.append(get_value(skill, \"Skill\", \"video\", str))\n # Get Skill Desc\n skill_info.append(get_value(skill, \"Skill\", \"desc\", str))\n # Get Skill Details\n skill_info.append(get_value(skill, \"Skill\", \"desc2\", str))\n # Get Skill Type 1\n skill_info.append(get_value(skill, \"Skill\", \"type1\", str))\n # Get Skill Type 2\n skill_info.append(get_value(skill, \"Skill\", \"type2\", str))\n # Get Skill Cooldown\n skill_info.append(get_value(skill, \"Skill\", \"cooldown\", int))\n # Get Skill Element\n skill_info.append(get_value(skill, \"Skill\", \"element\", str))\n # Get Skill Required Stance\n skill_info.append(get_value(skill, \"Skill\", \"reqStance\", str))\n # Get Skill Level List\n skill_info.append(ujson.dumps(get_value(skill, \"Skill\", \"levelList\", dict)))\n # Get Skill Use Overheat\n skill_info.append(get_value(skill, \"Skill\", \"useOverHeat\", int))\n # Get Skill Class\n skill_info.append(get_skill_class(cursor, skills_classes.get(identifier, '')))\n\n\n skills.append(tuple(skill_info))\n\n skills = tuple(skills)\n\n cursor.executemany(\"INSERT INTO skills (id, name, identifier, icon, circle, rank_level, max_level, video, \"\n \"desc, details, type1, type2, cooldown, element, req_stance, level_list, use_overheat, \"\n \"class) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", skills)"
]
| [
"0.77789354",
"0.72201234",
"0.7200818",
"0.7170593",
"0.7167288",
"0.7144829",
"0.7065746",
"0.6862136",
"0.6775424",
"0.66222405",
"0.65763843",
"0.6495369",
"0.6487568",
"0.645083",
"0.64182407",
"0.6328377",
"0.61917514",
"0.61423326",
"0.60695565",
"0.6047104",
"0.5932783",
"0.5866777",
"0.5826694",
"0.58261",
"0.58142227",
"0.5781824",
"0.57536834",
"0.5751738",
"0.55772245",
"0.55765533"
]
| 0.7546291 | 1 |
Adding a number of touchdowns. | def addTouchdowns(self, number):
self.touchdowns += number
self.ssp += (number * __SSP_TOUCHDOWN__) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def multi_touch(self, points):\n if len(points) < 2:\n raise EnvironmentError(\"Need at least 2 points\")\n\n self.android_device_driver.send_motion_event(points[0],\n MotionEvent.ACTION_DOWN)\n\n medium_points = points[1:-1]\n for point in medium_points:\n self.android_device_driver.send_motion_event(\n point, MotionEvent.ACTION_POINTER_DOWN)\n\n self.android_device_driver.send_motion_event(points[-1],\n MotionEvent.ACTION_POINTER_UP)",
"def getTouchdowns(self):\n return self.touchdowns",
"def fingersUp(self):\n if self.results.multi_hand_landmarks:\n myHandType = self.handType()\n fingers = []\n # Thumb\n if myHandType == \"Right\":\n if self.lmList[self.tipIds[0]][0] > self.lmList[self.tipIds[0] - 1][0]:\n fingers.append(1)\n else:\n fingers.append(0)\n else:\n if self.lmList[self.tipIds[0]][0] < self.lmList[self.tipIds[0] - 1][0]:\n fingers.append(1)\n else:\n fingers.append(0)\n\n # 4 Fingers\n for id in range(1, 5):\n if self.lmList[self.tipIds[id]][1] < self.lmList[self.tipIds[id] - 2][1]:\n fingers.append(1)\n else:\n fingers.append(0)\n return fingers",
"def rush_touchdowns(self):\n return self._rush_touchdowns",
"def get_fingers_up(self, num_list):\n finger_list = []\n for num in num_list:\n finger_list.append(self._finger[num])\n return finger_list",
"def multiTouch(*args, gestures: bool=True, trackpad: Union[int, bool]=0, q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass",
"def pass_touchdowns(self):\n return self._pass_touchdowns",
"def up(self, i):\n pass",
"def touches(a, b, **kwargs):\n return lib.touches(a, b, **kwargs)",
"def touch_began(self, touch):\n\t\tpass",
"def maxTouchPoints(self):\n return 1",
"def touches((u,v)):\r\n return ((u,v), (u,v-1), (u-1,v-1), (u-1,v))",
"def touch(v, times=1, **kwargs):\n if isinstance(v, Template):\n pos = loop_find(v, timeout=ST.FIND_TIMEOUT)\n else:\n try_log_screen()\n pos = v\n for _ in range(times):\n G.DEVICE.touch(pos, **kwargs)\n time.sleep(0.05)\n delay_after_operation()\n return pos",
"def button_touch_down(self, button, touch):\n if button.collide_point(*touch.pos):\n self.select_with_touch(button, touch)",
"def swipe_down(self):\n self.swipe_sub(SWIPE_MATRIX[1])",
"def pressAdjTiles(self, event):\n clickedTile = event.widget\n if clickedTile.isInPlay(): self.changeSmile(2)\n for adjTile in self.getAdjacentTiles(clickedTile.row, clickedTile.col):\n if not adjTile.isFlagged(): adjTile.buttonPress()",
"def button_touch_up(self, button, touch):\n if not button.collide_point(*touch.pos) or self.touch_multiselect:\n self.deselect_node(button)",
"def controlDown(*args):",
"def controlDown(*args):",
"def controlDown(*args):",
"def controlDown(*args):",
"def controlDown(*args):",
"def controlDown(*args):",
"def controlDown(*args):",
"def controlDown(*args):",
"def controlDown(*args):",
"def controlDown(*args):",
"def controlDown(*args):",
"def swipe_up(self):\n self.swipe_sub(SWIPE_MATRIX[0])",
"def on_touch_down(self, touch):\n if not self.destroyed:\n if self.collide_point(touch.x, touch.y):\n Scatter.on_touch_down(self, touch)\n\n if touch.is_double_tap and self.support != \"tablette\":\n if self.zoom_mode:\n self.size = (120,120)\n self.zoom_mode = False\n else:\n self.size = (400, 400)\n self.zoom_mode = True\n self.remove_widget(self.label_question)\n self.remove_widget(self.label_text)\n self.label_text_pos = [5, 5 + self.size[1] / 5]\n self.label_text_size = [self.size[0] - 10, self.size[1] - 30 - 2 * self.size[1] / 5]\n self.label_question_pos = [5, self.size[1] - self.size[1] / 5 - 25]\n self.label_question_size = [self.size[0] - 10, 20]\n\n self.label_question = Label(text=self.text_type, text_size=self.label_question_size,\n pos=self.label_question_pos, halign=\"left\", valign='top',\n size=self.label_question_size)\n self.label_text = Label(text=self.text, text_size=self.label_text_size, pos=self.label_text_pos,\n valign='top', halign=\"left\", size_hint_y=None, multiline=True,\n size=self.label_text_size)\n self.add_widget(self.label_question)\n self.add_widget(self.label_text)"
]
| [
"0.6410019",
"0.60803634",
"0.5922031",
"0.5838873",
"0.57827616",
"0.5774716",
"0.5716002",
"0.5539994",
"0.5525225",
"0.53205305",
"0.524736",
"0.5124107",
"0.5112547",
"0.5092738",
"0.50853735",
"0.505566",
"0.49927524",
"0.49888572",
"0.49888572",
"0.49888572",
"0.49888572",
"0.49888572",
"0.49888572",
"0.49888572",
"0.49888572",
"0.49888572",
"0.49888572",
"0.49888572",
"0.49513686",
"0.4949058"
]
| 0.8269817 | 0 |
Adding a number of completions. | def addCompletions(self, number):
self.completions += number
self.ssp += (number * __SSP_COMPLETION__) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def increment_gpt_completions():\n _increment_counter(\"gpt_completions\")",
"def add_many(self, count, *args, **kwargs):\n for idx in range(count):\n kw = {k: v[idx] for k, v in kwargs.items()}\n arg = () if not len(args) else args[idx]\n self.add(*arg, **kw)",
"def get_gpt_completions() -> int:\n return _get_counter(\"gpt_completions\")",
"def add_count(self):\n self.count += 1",
"def index(self, suggestions: Sequence[str]) -> None:\n for s in suggestions:\n self.data.append(s)",
"def nextNumberOfResults(self, N=10):\n self.start += self.N\n self.N = N",
"def suggestions(prefix : str = typer.Argument(...), n : int = typer.Option(5, help=\"Number of suggestions to display\")): \n response_url = url + \"/suggestions/\" + prefix + \"?suggestion_nums=\" + str(n) \n response = requests.get(response_url) \n for i in range(len(response.json())):\n typer.echo(response.json()[i])",
"def setNumberOfCompletions(self, value):\n return self._set(numberOfCompletions=value)",
"def count_tokens(self, request: Request, completions: List[Sequence]) -> int:\n return sum(len(sequence.tokens) for sequence in completions)",
"def count_tokens(self, request: Request, completions: List[Sequence]) -> int:\n return sum(len(sequence.tokens) for sequence in completions)",
"def completion() -> None:",
"def comply(self, counts):\n pass",
"def increase_count(self, number=1):\n self.count += number",
"def combine_many(*fudge):\n result = sum(fudge)\n print(result)",
"def add_gates(self, num_gates_to_add):\n pass",
"def increment_number_served(self, numbers):\n\t\tself.number_served += numbers",
"def all_completions(self, text):\n \n completions = []\n try:\n for i in xrange(sys.maxint):\n res = self.complete(text, i)\n\n if not res: break\n\n completions.append(res)\n #XXX workaround for ``notDefined.<tab>``\n except NameError:\n pass\n return completions",
"def add(self, number: int) -> None:\n self.cnt[number] = self.cnt.get(number, 0) + 1",
"def add_gt_positives(self, num_positives: int):\n self.num_gt_positives += num_positives",
"def increment(self, inc):\n self.done += inc",
"def appendsize(self, numents):\n self._numents += numents",
"def addCasualties(self, number):\n self.casualties += number\n self.ssp += (number * __SSP_CASUALTY__)",
"def add_cups(self, target_idx, cups_to_insert):\n part_a = self.cups[0 : target_idx + 1]\n part_b = self.cups[target_idx + 1 :]\n print(f\"cups: {self.cups} part_a[{part_a}], part_b[{part_b}]\")\n self.cups = part_a + cups_to_insert + part_b",
"def get_completions(self, info):\r\n pass",
"def addLikeCount(self,count):\n self.interactionCount += count\n return None",
"async def register_completions(ls: RobotFrameworkLanguageServer, *args):\n params = RegistrationParams([Registration(str(uuid.uuid4()), COMPLETION, {\"triggerCharacters\": \"[':']\"})])\n response = await ls.register_capability_async(params)\n if response is None:\n ls.show_message(\"Successfully registered completions method\")\n else:\n ls.show_message(\"Error happened during completions registration.\", MessageType.Error)",
"def main(num_trials, num_actions):\n\tfor i in xrange(int(num_trials)):\n\t\ttrial(i+1, int(num_actions))",
"def addCounters(self,args):\n # 025 Previous implementation was: def addCounters(self,*args): # Changed in order to get the list to settings.\n for counterName in args:\n if not self.hasCounter(counterName):\n self.addCounter(counterName)",
"def index(self, suggestions: Sequence[str]) -> None:\n for s in suggestions:\n self.data.append(s)\n\n self.data.sort()",
"def add_competitors():\n while len(competitors) < MAX_COMPETITORS:\n next_competitor = raw_input(\"Enter the next competitor and confirm \" +\n \"with <ENTER>. If there are no more \" +\n \"competitors, confirm with <ENTER>\\n\")\n if len(next_competitor) == 0:\n break\n else:\n add_competitor(next_competitor)"
]
| [
"0.7036388",
"0.59186363",
"0.5836048",
"0.5596797",
"0.5527813",
"0.54738206",
"0.54519755",
"0.54369843",
"0.541016",
"0.541016",
"0.53719944",
"0.5357698",
"0.5339925",
"0.53249377",
"0.5321615",
"0.53152066",
"0.5314819",
"0.5307126",
"0.52951205",
"0.5288174",
"0.5243879",
"0.5230441",
"0.5223123",
"0.518735",
"0.5170656",
"0.51067746",
"0.5089092",
"0.5082578",
"0.5081186",
"0.5073104"
]
| 0.76030844 | 0 |
Adding a number of interceptions. | def addInterceptions(self, number):
self.interceptions += number
self.ssp += (number * __SSP_INTERCEPTION__) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_many(self, count, *args, **kwargs):\n for idx in range(count):\n kw = {k: v[idx] for k, v in kwargs.items()}\n arg = () if not len(args) else args[idx]\n self.add(*arg, **kw)",
"def increment_number_served(self, numbers):\n\t\tself.number_served += numbers",
"def extend(self, i):\n for x in i:\n self.add(x)",
"def add_inputs(self, inputs):\n self.inputs += inputs",
"def addLikeCount(self,count):\n self.interactionCount += count\n return None",
"def add_count(self):\n self.count += 1",
"def add_experiences(\n self,\n curr_all_info: AllBrainInfo,\n next_all_info: AllBrainInfo,\n take_action_outputs: ActionInfoOutputs,\n ) -> None:\n raise UnityTrainerException(\n \"The process_experiences method was not implemented.\"\n )",
"def incInstCount(self):\n self.instCount += 1",
"def interceptions(self):\n return self._interceptions",
"def addAll(self, *args):\n pass",
"def addAll(self,*args, **kwargs):\n pass",
"def increment_etherscan_calls():\n _increment_counter(\"etherscan_calls\")",
"def learn(self,n):\n for i in range(n):\n self.class_counts,self.feature_counts = self.em_step(self.class_counts,\n self.feature_counts)",
"def extend(self, observations, actions, rewards, next_observations, dones):\n for o, a, r, n, d in zip(observations, actions, rewards, next_observations, dones):\n self.append(o, a, r, n, d)",
"def addCounters(self,args):\n # 025 Previous implementation was: def addCounters(self,*args): # Changed in order to get the list to settings.\n for counterName in args:\n if not self.hasCounter(counterName):\n self.addCounter(counterName)",
"def __iadd__(self, n):\n return _elas.SwigPyIterator___iadd__(self, n)",
"def run(self, n):\n new_trajectories = self.enumerate_trajectories(self.gpm.Graph, n, self.source, self.target, max_iter=self.max_iter)\n self._trajectories += new_trajectories",
"def add_runs(self,runs):\n for r in runs:\n self.add(r)",
"def n_steps(self, actions):\n return [self.step(action) for action in actions]",
"def n_ins(self):\n pass",
"def increase_count(self, number=1):\n self.count += number",
"def __add__(self, n):\n return _elas.SwigPyIterator___add__(self, n)",
"def getInterceptions(self):\n return self.interceptions",
"def computeIntercepts():\n pass",
"def add_kills(self, num_kills):\n\n self.kills += num_kills",
"def add_gates(self, num_gates_to_add):\n pass",
"def extend(self, inputs: Iterable[I]):\n for inp in inputs:\n self.append(inp)",
"def addActions (self, actions) :\r\n self.action_buffer.extend(actions)",
"def add_n():\n pass",
"def add(self, iterable):\n raise NotImplementedError()"
]
| [
"0.63440615",
"0.61527723",
"0.58244026",
"0.56522334",
"0.5619547",
"0.55821234",
"0.55803514",
"0.54850674",
"0.5402",
"0.53676546",
"0.53664225",
"0.5341623",
"0.53301764",
"0.5306467",
"0.5300925",
"0.52936804",
"0.5289543",
"0.5283552",
"0.52623886",
"0.5232382",
"0.52252865",
"0.52213264",
"0.5219798",
"0.5205675",
"0.51941174",
"0.5188311",
"0.5176519",
"0.5160011",
"0.51565844",
"0.51510006"
]
| 0.8029194 | 0 |
> int Returns the number of caused casualties. | def getCasualties(self):
return self.casualties | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def occurs(self) -> int:\n return self._occurs",
"def __numHeads(self):\n count = 1\n\n while (self.__coinFlip() == 1):\n count += 1\n return count",
"def addCasualties(self, number):\n self.casualties += number\n self.ssp += (number * __SSP_CASUALTY__)",
"def num_cochains(self) -> int:\n if self.__num_cochains__ is not None:\n return self.__num_cochains__\n return self.ptr.numel() + 1",
"def carn_count(self):\n return len(self.carnivores)",
"def num_conll(self):\n pass",
"def coherence(self):\r\n return np.abs(self.coherency) ** 2",
"def coherence(self):\r\n coherence = np.abs(self.coherency ** 2)\r\n\r\n return coherence",
"def lives_counter(self):\n count = 15\n for row in self.board:\n for column in row:\n if column == HITSHIP:\n count -= 1\n self.lives = count\n return self.lives",
"def count(self):\n # TODO not implemented yet\n return 0",
"def num_cones(self):\n return self._shape_count(_sff.cone)",
"def get_number_of_unrelaxed_candidates(self):\n return len(self.__get_ids_of_all_unrelaxed_candidates__())",
"def constituent_count(self):\n return self._constituent_count",
"def n_time_comeback(self, chosen_class):\n return self.customer_classes[chosen_class].n_times_comeback()",
"def N(self):\n return len(self.cavity_grid.cavities) + 1",
"def hives_count(self) -> int:\n return self.hives.count()",
"def num_adversaries(self) -> int:\n pass",
"def number_bites_resolved(self) -> int:\r\n resolved_bites = {\r\n row['bite']\r\n for row in self.rows\r\n if row['completed'] == 'True'\r\n }\r\n\r\n return len(resolved_bites)",
"def _get_existence_frequency(self):\n with np.errstate(invalid='ignore'):\n return np.nan_to_num(self.existence_array / self.coverage_array * 100)",
"def count_side_effects(self) -> int:\n return self._count_model(SideEffect)",
"def nClumps(self):\n \n return len(self)",
"def get_number_of_cows(self):\n bulls = self.get_number_of_bulls()\n list_of_cows = set(self.puzzle) & set(self.guess)\n cows = (len(list_of_cows) - bulls)\n return cows",
"def get_n_chains(self): \n res_id_cnt = 0\n tot_n_res = len(self.res_ids)\n n_chns = 0\n for res_id in self.res_ids:\n res_chn_i = res_id[2]\n if res_id_cnt > 1:\n if res_chn_i == self.res_ids[res_id_cnt-1][2]:\n pass\n else:\n n_chns+=1\n res_id_cnt+=1\n return n_chns",
"def count_amino_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_amino_acids()\n return n",
"def ticktock(self): # controller\n for tractor in self.tractors:\n try:\n next(tractor) # state changer\n except StopIteration:\n pass # harmless stuck tractor signal\n\n self.framenumber += 1\n return self.framenumber",
"def get_number_of_cheeses(self):\n number = 0\n for i in range(len(self._stools)):\n number += len(self._stools[i])\n return number",
"def culggroup_donecount(group, dones):\n return sum(dones[l] for l in group)",
"def getNumberOfTraces(self) -> int:\n\n if not self.debug:\n self.myFieldFox.write(\"CALC:PAR:COUN?\")\n ret = self.myFieldFox.read()\n else:\n ret = 4\n return ret",
"def number_of_carnivores_island(self):\n return np.sum(self.carnivores_on_island)",
"def flaky_count(self) -> int:\n return pulumi.get(self, \"flaky_count\")"
]
| [
"0.61615974",
"0.61548567",
"0.6102373",
"0.60826445",
"0.6037719",
"0.5997485",
"0.58958644",
"0.58879924",
"0.5883311",
"0.5854218",
"0.58147615",
"0.579766",
"0.57895315",
"0.5769656",
"0.5769637",
"0.57637495",
"0.57197696",
"0.571867",
"0.57067",
"0.56936866",
"0.5681366",
"0.56637484",
"0.5655714",
"0.5652708",
"0.565063",
"0.5650209",
"0.5626118",
"0.5618012",
"0.5606656",
"0.5597452"
]
| 0.65333235 | 0 |
Adding a number of casualties. | def addCasualties(self, number):
self.casualties += number
self.ssp += (number * __SSP_CASUALTY__) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getCasualties(self):\n return self.casualties",
"def comply(self, counts):\n pass",
"def threes(dice):\n return sum([x for x in dice if x == 3])",
"def addCowWeight(list, cows):\r\n sum = 0.0\r\n for key in list:\r\n sum += cows[key]\r\n return sum",
"def add_candidates() -> None:\r\n faculties = [\"Computer Science\", \"Performing Arts\", \"Engineering\", \"Economics\"]\r\n for faculty in faculties:\r\n create_candidate(faculty, \"President\")\r\n for _ in range(3):\r\n create_candidate(faculty, \"GSU Officer\")\r\n for _ in range(16):\r\n create_candidate(faculty, \"Faculty Officer\")",
"def combine_many(*fudge):\n result = sum(fudge)\n print(result)",
"def adding_total_calories(total_calories: int) -> int:\n for item in _calories:\n total_calories = total_calories + _calories[item]\n return total_calories",
"def test_extra_chocolates_multiple(self):\n _inpc = ChocolateFeast(6,2,2)\n self.assertEquals(5,_inpc.get_total_chocolates())",
"def give_candies(number_students, students_rate):\r\n # at least 1 candy should be given to a student\r\n candies_given = [1] * number_students\r\n # for each student, except for the 1st student.\r\n for i in range(1, number_students): # check the candies of the student to the left.\r\n # The student must have more candies than the 1 to his left if his rate is bigger.\r\n if students_rate[i] > students_rate[i - 1]:\r\n candies_given[i] = candies_given[i - 1] + 1\r\n # for each student, except for the last student.\r\n for i2 in range(number_students - 2, -1, -1): # check the candies of the student to the right.\r\n # The student must have more candies than the 1 to his right if his rate is bigger.\r\n # and if the candies given to the student after him is more than his.\r\n if students_rate[i2] > students_rate[i2 + 1] and candies_given[i2] < candies_given[i2 + 1] + 1:\r\n candies_given[i2] = candies_given[i2 + 1] + 1\r\n\r\n\r\n total_candies = sum(candies_given)\r\n return total_candies",
"def collect(self, key, ages):\n for age in ages:\n self.collection[key][age - 1] += 1 # age == index + 1",
"def lucas(n):\n \n A = 2\n B = 1\n Counter = 1\n C = 0 \n \n while Counter <= n:\n C = A + B\n A = B\n B = C\n Counter = Counter + 1\n if (Counter + 1) == n:\n return C",
"def number_of_connectives(formula):\n pass\n # ======== YOUR CODE HERE ========",
"def csize(grades):\n\tp = 0\n\tfor k in grades:\n\t\tl = _comb(n,k)\n\t\tp += l\n\treturn p",
"def csize(grades):\n\tp = 0\n\tfor k in grades:\n\t\tl = _comb(n,k)\n\t\tp += l\n\treturn p",
"def count_codon_all(self):\n return Counter(list(self))",
"def test_add_chance(self):\n chance_fixtures = [[1, 2, 3, 4, 5],\n [1, 1, 1, 1, 1],\n [6, 6, 6, 6, 6],\n [1, 1, 1, 1, 2],\n [1, 1, 1, 3, 3],\n [1, 2, 3, 4, 6],\n ]\n\n for fixture in chance_fixtures:\n score = self.roll.add_chance(fixture)\n\n self.assertEqual(score, sum(fixture))\n self.assertNotEqual(score, 0)\n self.assertEqual(len(fixture), 5)",
"def audit_cuisines(cuisines, cuisine_list):\n for cuisine_type in cuisine_list.split(';'):\n cuisines[cuisine_type] += 1",
"def how_many(cls):\n print(\"We have {:d} companies.\".format(cls.population))",
"def sum_crimes(cs:CrimeStatistics)-> int:\n # return 0 # stub\n #template from atomic\n crimes_total = (cs.violent_crimes+cs.property_crimes+cs.arson)\n return crimes_total",
"def cash_coupon(certificate, percentage):\n return sum(stake for name, stake in certificate['underlyings'].items()) * percentage",
"def update_biodiversity_score(self, hexagons, turn):\n floodplain_count = 0\n non_eco_count = 0\n for feature in hexagons.features:\n if feature.properties[\"ghost_hexagon\"]:\n continue\n if (feature.properties[\"floodplain_north\"] or\n feature.properties[\"floodplain_south\"]):\n floodplain_count += 1\n if (feature.properties[\"landuse\"] == 0 or\n feature.properties[\"landuse\"] == 1):\n non_eco_count += 1\n eco_score = (((floodplain_count - non_eco_count) /\n floodplain_count) * 100)\n if turn < len(self.turn):\n self.biodiversity[turn] = eco_score\n else:\n self.biodiversity.append(eco_score)\n return",
"def add_up(num):\n aList = list(range(1, num + 1))\n sum = 0\n\n for item in aList:\n sum = add_together(sum, item)\n# print(\"NOW SUM IS: \" + str(sum))\n\n return sum",
"def count_liberties(self, x, y):\n return len(self.get_liberties(x, y))",
"def monkey_count(n):\n return [i for i in range(1, n + 1)]",
"def get_number_of_cows(self):\n bulls = self.get_number_of_bulls()\n list_of_cows = set(self.puzzle) & set(self.guess)\n cows = (len(list_of_cows) - bulls)\n return cows",
"def __add__(self, y):\n addend = Counter()\n for key in self:\n if key in y:\n addend[key] = self[key] + y[key]\n else:\n addend[key] = self[key]\n for key in y:\n if key in self:\n continue\n addend[key] = y[key]\n return addend",
"def __add__(self, y):\n addend = Counter()\n for key in self:\n if key in y:\n addend[key] = self[key] + y[key]\n else:\n addend[key] = self[key]\n for key in y:\n if key in self:\n continue\n addend[key] = y[key]\n return addend",
"def testNumberIndividualsAddsUp(self):\n number = sum([x[1] for x in self.tree.get_species_abundances(reference=3)])\n number2 = sum([x[1] for x in self.tree.get_species_abundances(reference=3)])\n self.assertEqual(number, 3734)\n self.assertEqual(number2, 3734)",
"def add(self, number: int) -> None:\n self.cnt[number] = self.cnt.get(number, 0) + 1",
"def addAgility(self):\t\n\t\tself.agility += 1\n\t\tif self.agility > 10:\n\t\t\tself.agility = 10"
]
| [
"0.6208296",
"0.5390049",
"0.5363903",
"0.5341021",
"0.5328923",
"0.52360344",
"0.51885754",
"0.5157553",
"0.5156806",
"0.5020415",
"0.50171345",
"0.49978325",
"0.49969417",
"0.49969417",
"0.49925345",
"0.49781254",
"0.49718857",
"0.49669224",
"0.49357152",
"0.49299726",
"0.49122244",
"0.4911917",
"0.48857227",
"0.48753116",
"0.48477682",
"0.48467624",
"0.48467624",
"0.4845956",
"0.48365077",
"0.47888157"
]
| 0.81291544 | 0 |
> int Returns the number of earned MVP awards. | def getMVPAwards(self):
return self.mvpawards | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_awarded(self, floor=None):\n if self.award_to in (\"individual_overall\", \"floor_overall\", \"dorm\"):\n # For overall prizes, it is only possible to award one.\n return 1\n \n elif self.award_to in (\"floor_dorm\", \"individual_dorm\"):\n # For dorm prizes, this is just the number of dorms.\n return Dorm.objects.count()\n \n elif self.award_to == \"individual_floor\":\n # This is awarded to each floor.\n return Floor.objects.count()\n \n raise Exception(\"Unknown award_to value '%s'\" % self.award_to)",
"def total_rewards(self) -> float:\n return self.__total_rewards",
"def awards(self) -> str:\n return self._awards",
"def att_pos_reward(state, election_results, electoral_votes, attack_list, partisan = False):\n if state in attack_list:\n if partisan:\n return partisan_att_reward(state, election_results, electoral_votes)\n else:\n return att_reward(state, election_results, electoral_votes)\n else:\n return 0",
"def _get_reward(self):\n if self.status():\n return self.current_step/self.ep_length # the reward is proportional to the duration \n else:\n return 0",
"def reward(self, player, winning_state):\n if winning_state == \"Tie\":\n return 1\n elif winning_state == \"Resume\":\n return -1\n else:\n if player == \"agent\":\n return 10\n else:\n return -10",
"def hives_count(self) -> int:\n return self.hives.count()",
"def addMVPAwards(self, number):\n self.mvpawards += number\n self.ssp += (number * __SSP_MVP__)",
"def _get_reward(self, five_cards):\n \n return 1-self.evaluator.get_five_card_rank_percentage(self.evaluator._five(five_cards))",
"def att_reward(state, election_results, electoral_votes):\n evotes = int(electoral_votes[electoral_votes['state'] == state].evotes)\n dem_votes = int(election_results[(election_results['state'] == state) & (\n election_results['party'] == 'democrat')].votes)\n rep_votes = int(election_results[(election_results['state'] == state) & (\n election_results['party'] == 'republican')].votes)\n total_votes = dem_votes + rep_votes\n margin = (max(dem_votes, rep_votes) -\n min(dem_votes, rep_votes))/total_votes\n return evotes * margin",
"def episode_total_score(self):\n return self.game.get_total_reward()",
"def partisan_att_reward(state, election_results, electoral_votes):\n evotes = int(electoral_votes[electoral_votes['state'] == state].evotes)\n dem_votes = int(election_results[(election_results['state'] == state) & (\n election_results['party'] == 'democrat')].votes)\n rep_votes = int(election_results[(election_results['state'] == state) & (\n election_results['party'] == 'republican')].votes)\n total_votes = dem_votes + rep_votes\n margin = (max(dem_votes, rep_votes) -\n min(dem_votes, rep_votes))/total_votes\n return evotes/(1+margin)",
"def att_neg_reward(state, election_results, electoral_votes, attack_list):\n return -538/51",
"def get_amount(self): \n return len(self.get_cards())",
"def getTotalReward(self):\n return self.lastFitness",
"def get_ave_reward(self):\n return sum(self._gameRewards) / len(self._gameRewards)",
"def get_ave_reward(self):\n return sum(self._gameRewards) / len(self._gameRewards)",
"def get_ave_reward(self):\n return sum(self._gameRewards) / len(self._gameRewards)",
"def get_marble_count(self):",
"def get_agent_number_of_players(players):\n return sum([count_players(player) for player in players\n if player.startswith('agent')])",
"def get_num_actions():\n return 6",
"def _compute_reward(self):\n last_score = self.episode_qualities[-2]\n new_score = self.episode_qualities[-1]\n reward = new_score - last_score\n return reward",
"def goalReached(self, rewards):\n return len(rewards) >= 100 and np.mean(rewards[-100:]) >= 18",
"def getTotalReward(self):\n return self.cumreward",
"def getTotalReward(self):\n return self.cumreward",
"def reward_from_events(self, events: List[str]) -> int:\n game_rewards = {\n e.COIN_COLLECTED: 20,\n e.KILLED_OPPONENT: 25,\n PLACEHOLDER_EVENT: -.1, # idea: the custom event is bad\n e.MOVED_LEFT: -.1,\n e.MOVED_RIGHT: -.1,\n e.MOVED_UP: -.1,\n e.MOVED_DOWN: -.1,\n e.WAITED: -.1,\n e.KILLED_SELF: -100,\n e.INVALID_ACTION:-5\n }\n reward_sum = 0\n for event in events:\n if event in game_rewards:\n reward_sum += game_rewards[event]\n self.logger.info(f\"Awarded {reward_sum} for events {', '.join(events)}\")\n return reward_sum",
"def calculate_reward(self):\n r = 0.\n for o in self.obstacles:\n if not isinstance(o, Apple):\n continue # only consider apples\n xy_diff = o.get_position()[:2] - self.agent.get_position()[:2]\n dist = np.linalg.norm(xy_diff)\n if o.is_visible and dist < self.detection_distance:\n o.update_visuals(make_visible=False)\n r += self.apple_reward\n if not self.agent.alive:\n r = self.dead_agent_reward\n return r",
"def credits_earned(self):\n\n if self.grade() >= 69.5:\n return self.nCredits\n else:\n return 0.0",
"def rewards(self, s_p, action):\n r0 = GenerateSuccessor13(s_p, action, self.id).agents[self.id].completed_seqs\n r1 = GetReward(s_p)\n if self.r0 < r0:\n self.r0 = r0\n # print(\"0\",self.r0)\n return self.r0\n elif self.r1 > (r1*(-1)):\n self.r1 = r1*(-1)\n # print(\"1\",self.r1)\n return self.r1\n else:\n return 0",
"def hit(self):\r\n self.life -= 1\r\n total_score = 1\r\n if self.life == 0:\r\n self.alive = False\r\n total_score += self.bonus\r\n return total_score"
]
| [
"0.6416264",
"0.6337798",
"0.6078525",
"0.6070558",
"0.6050751",
"0.6013443",
"0.6000904",
"0.59628725",
"0.59128845",
"0.590868",
"0.5906904",
"0.58636963",
"0.58580637",
"0.58382326",
"0.5780159",
"0.57746166",
"0.57746166",
"0.57746166",
"0.5734031",
"0.57163376",
"0.5701522",
"0.5681576",
"0.5659612",
"0.5639241",
"0.5639241",
"0.5606712",
"0.55895054",
"0.5584031",
"0.5575512",
"0.55625314"
]
| 0.6356103 | 1 |
Adding a number of MVP Awards. | def addMVPAwards(self, number):
self.mvpawards += number
self.ssp += (number * __SSP_MVP__) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getMVPAwards(self):\n return self.mvpawards",
"def add(self, states, actions, rewards, next_states, dones):\n assert len(states) == self.num_agents, 'ERROR> group states size mismatch'\n assert len(actions) == self.num_agents, 'ERROR> group actions size mismatch'\n assert len(rewards) == self.num_agents, 'ERROR> group rewards size mismatch'\n assert len(next_states) == self.num_agents, 'ERROR> group next states size mismatch'\n assert len(dones) == self.num_agents, 'ERROR> group dones size mismatch'\n\n experience = (states, actions, rewards, next_states, dones)\n self.memory.append(experience)",
"def create_awards(request):\n auction = request.validated['auction']\n auction.status = 'active.qualification'\n now = get_now()\n auction.awardPeriod = type(auction).awardPeriod({'startDate': now})\n awarding_type = request.content_configurator.awarding_type\n valid_bids = [bid for bid in auction.bids if bid['value'] is not None]\n bids = chef(valid_bids, auction.features or [], [], True)\n bids_to_qualify = get_bids_to_qualify(bids)\n for i in xrange(0, bids_to_qualify):\n status = 'pending.waiting'\n if i == 0:\n status = 'pending.verification'\n bid = bids[i].serialize()\n award = make_award(request, auction, bid, status, now, parent=True)\n if bid['status'] == 'invalid':\n set_award_status_unsuccessful(award, now)\n if award.status == 'pending.verification':\n award.signingPeriod = award.paymentPeriod = award.verificationPeriod = {'startDate': now}\n add_award_route_url(request, auction, award, awarding_type)\n auction.awards.append(award)",
"def add_reward(self, choice, count=1):\n self.redis.hincrby(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, \"%s:rewards\" % choice, count)\n self._choices = None",
"def extend(self, observations, actions, rewards, next_observations, dones):\n for o, a, r, n, d in zip(observations, actions, rewards, next_observations, dones):\n self.append(o, a, r, n, d)",
"def add_experiences(\n self,\n curr_all_info: AllBrainInfo,\n next_all_info: AllBrainInfo,\n take_action_outputs: ActionInfoOutputs,\n ) -> None:\n raise UnityTrainerException(\n \"The process_experiences method was not implemented.\"\n )",
"def add_experience(self, state, action, reward, next_state, done):\n experience = (state, action, reward, next_state, done)\n for i, k in enumerate(self.data_keys):\n getattr(self, k).append(experience[i])\n self.size += 1",
"def awardExperience(self, pkmnToAwardExperienceFrom):\r\n messages = []\r\n \r\n for oppPkmn in pkmnToAwardExperienceFrom:\r\n experienceToAward = oppPkmn.experienceToAward\r\n for pkmn in self.pkmnInPlay:\r\n pkmn.gainExperience(experienceToAward)\r\n messages += [\"{0} gained {1} experience points.\".format(pkmn.name, experienceToAward)]\r\n while pkmn.canLevelUp():\r\n messages += pkmn.levelUp()\r\n \r\n return messages",
"def awards(self, awards: str):\n\n self._awards = awards",
"def _accumulate_rewards(self) -> None:\n for agent, reward in self.rewards.items():\n self._cumulative_rewards[agent] += reward",
"def add_experiences(\n self,\n curr_all_info: AllBrainInfo,\n next_all_info: AllBrainInfo,\n take_action_outputs: ActionInfoOutputs,\n ) -> None:\n self.trainer_metrics.start_experience_collection_timer()\n if take_action_outputs:\n self.stats[\"Policy/Entropy\"].append(take_action_outputs[\"entropy\"].mean())\n self.stats[\"Policy/Learning Rate\"].append(\n take_action_outputs[\"learning_rate\"]\n )\n for name, signal in self.policy.reward_signals.items():\n self.stats[signal.value_name].append(\n np.mean(take_action_outputs[\"value\"][name])\n )\n\n curr_info = curr_all_info[self.brain_name]\n next_info = next_all_info[self.brain_name]\n\n for agent_id in curr_info.agents:\n self.training_buffer[agent_id].last_brain_info = curr_info\n self.training_buffer[\n agent_id\n ].last_take_action_outputs = take_action_outputs\n\n if curr_info.agents != next_info.agents:\n curr_to_use = self.construct_curr_info(next_info)\n else:\n curr_to_use = curr_info\n\n tmp_rewards_dict = {}\n for name, signal in self.policy.reward_signals.items():\n tmp_rewards_dict[name] = signal.evaluate(curr_to_use, next_info)\n\n for agent_id in next_info.agents:\n stored_info = self.training_buffer[agent_id].last_brain_info\n stored_take_action_outputs = self.training_buffer[\n agent_id\n ].last_take_action_outputs\n if stored_info is not None:\n idx = stored_info.agents.index(agent_id)\n next_idx = next_info.agents.index(agent_id)\n if not stored_info.local_done[idx]:\n for i, _ in enumerate(stored_info.visual_observations):\n self.training_buffer[agent_id][\"visual_obs%d\" % i].append(\n stored_info.visual_observations[i][idx]\n )\n self.training_buffer[agent_id][\"next_visual_obs%d\" % i].append(\n next_info.visual_observations[i][next_idx]\n )\n if self.policy.use_vec_obs:\n self.training_buffer[agent_id][\"vector_obs\"].append(\n stored_info.vector_observations[idx]\n )\n self.training_buffer[agent_id][\"next_vector_in\"].append(\n next_info.vector_observations[next_idx]\n )\n if self.policy.use_recurrent:\n if stored_info.memories.shape[1] == 0:\n stored_info.memories = np.zeros(\n (len(stored_info.agents), self.policy.m_size)\n )\n self.training_buffer[agent_id][\"memory\"].append(\n stored_info.memories[idx]\n )\n actions = stored_take_action_outputs[\"action\"]\n if self.policy.use_continuous_act:\n actions_pre = stored_take_action_outputs[\"pre_action\"]\n self.training_buffer[agent_id][\"actions_pre\"].append(\n actions_pre[idx]\n )\n epsilons = stored_take_action_outputs[\"random_normal_epsilon\"]\n self.training_buffer[agent_id][\"random_normal_epsilon\"].append(\n epsilons[idx]\n )\n else:\n self.training_buffer[agent_id][\"action_mask\"].append(\n stored_info.action_masks[idx], padding_value=1\n )\n a_dist = stored_take_action_outputs[\"log_probs\"]\n # value is a dictionary from name of reward to value estimate of the value head\n value = stored_take_action_outputs[\"value\"]\n self.training_buffer[agent_id][\"actions\"].append(actions[idx])\n self.training_buffer[agent_id][\"prev_action\"].append(\n stored_info.previous_vector_actions[idx]\n )\n self.training_buffer[agent_id][\"masks\"].append(1.0)\n self.training_buffer[agent_id][\"done\"].append(\n next_info.local_done[next_idx]\n )\n\n for name, reward_result in tmp_rewards_dict.items():\n # 0 because we use the scaled reward to train the agent\n self.training_buffer[agent_id][\n \"{}_rewards\".format(name)\n ].append(reward_result.scaled_reward[next_idx])\n self.training_buffer[agent_id][\n \"{}_value_estimates\".format(name)\n ].append(value[name][idx][0])\n\n self.training_buffer[agent_id][\"action_probs\"].append(a_dist[idx])\n\n for name, rewards in self.collected_rewards.items():\n if agent_id not in rewards:\n rewards[agent_id] = 0\n if name == \"environment\":\n # Report the reward from the environment\n rewards[agent_id] += np.array(next_info.rewards)[next_idx]\n else:\n # Report the reward signals\n rewards[agent_id] += tmp_rewards_dict[name].scaled_reward[\n next_idx\n ]\n\n if not next_info.local_done[next_idx]:\n if agent_id not in self.episode_steps:\n self.episode_steps[agent_id] = 0\n self.episode_steps[agent_id] += 1\n self.trainer_metrics.end_experience_collection_timer()",
"def add_experience(self, action, state, reward, terminal):\n self.replay_buffer.add_experience(action, state, reward, terminal)",
"def rewards_list_manager(self, reward):\n self.rewards_list.append(reward)\n if len(self.rewards_list) > self.list_size:\n self.rewards_list.pop(0)",
"def add_cards(self, cards):\n self.get_cards().extend(cards)",
"def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n \n if done and self.episode % 50 == 0 and self.episode > 50:\n self.episode += 1\n self.remove_bad_experience()\n elif done:\n self.episode += 1",
"def agent_reward(self, func):\n if len(self.agents_reward) is 2:\n raise Exception(\"You cannot register 3 or more agents_reward.\")\n\n self.agents_reward.append(func)\n return func",
"def add_new_experience(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done, self.max_priority)\n self.memory.append(e)",
"def add(self, states, all_state, action, all_actions, reward, next_state, all_next_state, done):\n e = self.experience(states, all_state, action, all_actions, reward, next_state, all_next_state, done)\n self.memory.append(e)",
"def add_experiences(self, curr_all_info: AllBrainInfo, next_all_info: AllBrainInfo, take_action_outputs):\n curr_info = curr_all_info[self.brain_name]\n next_info = next_all_info[self.brain_name]\n\n for agent_id in curr_info.agents:\n self.training_buffer[agent_id].last_brain_info = curr_info\n self.training_buffer[agent_id].last_take_action_outputs = take_action_outputs\n\n if curr_info.agents != next_info.agents:\n curr_to_use = self.construct_curr_info(next_info)\n else:\n curr_to_use = curr_info\n\n intrinsic_rewards = self.policy.get_intrinsic_rewards(curr_to_use, next_info)\n\n for agent_id in next_info.agents:\n stored_info = self.training_buffer[agent_id].last_brain_info\n stored_take_action_outputs = self.training_buffer[agent_id].last_take_action_outputs\n if stored_info is not None:\n idx = stored_info.agents.index(agent_id)\n next_idx = next_info.agents.index(agent_id)\n if not stored_info.local_done[idx]:\n for i, _ in enumerate(stored_info.visual_observations):\n self.training_buffer[agent_id]['visual_obs%d' % i].append(\n stored_info.visual_observations[i][idx])\n self.training_buffer[agent_id]['next_visual_obs%d' % i].append(\n next_info.visual_observations[i][next_idx])\n if self.policy.use_vec_obs:\n self.training_buffer[agent_id]['vector_obs'].append(stored_info.vector_observations[idx])\n self.training_buffer[agent_id]['next_vector_in'].append(\n next_info.vector_observations[next_idx])\n if self.policy.use_recurrent:\n if stored_info.memories.shape[1] == 0:\n stored_info.memories = np.zeros((len(stored_info.agents), self.policy.m_size))\n self.training_buffer[agent_id]['memory'].append(stored_info.memories[idx])\n actions = stored_take_action_outputs['action']\n if self.policy.use_continuous_act:\n actions_pre = stored_take_action_outputs['pre_action']\n self.training_buffer[agent_id]['actions_pre'].append(actions_pre[idx])\n else:\n self.training_buffer[agent_id]['action_mask'].append(\n stored_info.action_masks[idx])\n a_dist = stored_take_action_outputs['log_probs']\n value = stored_take_action_outputs['value']\n self.training_buffer[agent_id]['actions'].append(actions[idx])\n self.training_buffer[agent_id]['prev_action'].append(stored_info.previous_vector_actions[idx])\n self.training_buffer[agent_id]['masks'].append(1.0)\n if self.use_curiosity:\n self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx] +\n intrinsic_rewards[next_idx])\n else:\n self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx])\n self.training_buffer[agent_id]['action_probs'].append(a_dist[idx])\n self.training_buffer[agent_id]['value_estimates'].append(value[idx][0])\n if agent_id not in self.cumulative_rewards:\n self.cumulative_rewards[agent_id] = 0\n self.cumulative_rewards[agent_id] += next_info.rewards[next_idx]\n if self.use_curiosity:\n if agent_id not in self.intrinsic_rewards:\n self.intrinsic_rewards[agent_id] = 0\n self.intrinsic_rewards[agent_id] += intrinsic_rewards[next_idx]\n if not next_info.local_done[next_idx]:\n if agent_id not in self.episode_steps:\n self.episode_steps[agent_id] = 0\n self.episode_steps[agent_id] += 1",
"def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n self.priorities.append(max(self.priorities, default=1)) # Save all new experiences as maximum priority",
"def add_reward(self, r):\n self.reward += r",
"def rewards(self, choices: Sequence[Tuple[Key,Choice]] ) -> Sequence[Reward]:\n ...",
"def addQueryArp(self, num=1):\n self.query_arp += num",
"def add(self, state, action, reward, next_state, done):\n experience = Experience(state, action, reward, next_state, done)\n self.memory.append(experience)",
"def get_awards(self):\n self.awards = imdb.get_title_awards(self.ID)['awards']\n self._awards_parser()",
"def test_add_advices_to_user(self):\n # create user\n user_created = self.create_user_start_program_advices_list_empty()\n\n # count the number of challenges\n # before a call to the method\n user = self.user.objects.get(id=user_created.id)\n number_advice_to_user_before = user.advices_to_user.count()\n\n # call method\n self.new_controller.add_advices_to_user(user_created.id)\n\n # count the number of challenges\n # after a call to the method\n number_advice_to_user_after = user.advices_to_user.count()\n advice_to_user = user.advices_to_user.values_list(\"id\")\n self.assertEqual(number_advice_to_user_before, 0)\n self.assertEqual(number_advice_to_user_after, 5)\n for id_advice in advice_to_user:\n self.assertEqual([(27,), (28,), (29,), (25,), (26,)].count(id_advice), 1)",
"def create_reward_history(number_of_turns, policies):\n rewards_history = dict()\n for policy in policies:\n rewards_history[policy]=[False]*number_of_turns\n return rewards_history",
"def add(self, state, action, reward, next_state, done):\r\n e = self.experience(state, action, reward, next_state, done)\r\n self.memory.append(e)",
"def turn(self):\n\n # Let \n # I = loan interest rate\n # E = awarded money as a result of certain accounts randomly quadrupling\n # A = original assets under management\n #\n # Then profit = A * I - E\n self.profits.append(self.assets_under_management * LOAN_INTEREST - \\\n self._award_accounts())\n self.assets_under_management = np.sum(self.accounts)",
"def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)"
]
| [
"0.61265165",
"0.57455695",
"0.55548084",
"0.54876035",
"0.5444259",
"0.5348443",
"0.5315942",
"0.52942413",
"0.5251396",
"0.5241589",
"0.5185412",
"0.51815754",
"0.51787543",
"0.5119472",
"0.5110886",
"0.5063625",
"0.50448275",
"0.50270516",
"0.5008033",
"0.49999782",
"0.49882138",
"0.49411243",
"0.49274203",
"0.49259692",
"0.4917447",
"0.49145803",
"0.49139717",
"0.49133",
"0.4884034",
"0.48713228"
]
| 0.75747424 | 0 |
Gain a number of new niggling injuries. | def addNigglingInjury(self, number):
self.injury += number | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def J (self, n):",
"def ngens(self):\n return 1",
"def test_twenty_rounds_joss_for_noncyclers(self):\n seed = 4\n match = axl.Match(\n (axl.FirstByJoss(), axl.AntiCycler()), turns=20, seed=seed\n )\n match.play()\n self.versus_test(\n axl.AntiCycler(), expected_actions=match.result, seed=seed\n )",
"def n_ins(self):\n pass",
"def I (self, n):",
"def nits(self):",
"def inc_gains_of_free_cells(self):\r\n for cell in self.cells:\r\n if not cell.locked:\r\n cell.gain += 1\r\n cell.yank()",
"def update_inhibition(self) -> None:\n if self.spec.inhibition_type == \"fffb\":\n self.calc_fffb_inhibition()\n else:\n self.calc_kwta_inhibition()\n\n self.units.update_inhibition(torch.Tensor(self.size).fill_(self.gc_i))",
"def addAgility(self):\t\n\t\tself.agility += 1\n\t\tif self.agility > 10:\n\t\t\tself.agility = 10",
"def penalty(self):\n return 0",
"def num_wires(self):",
"def grow(self):\n self.starve = 500 # useful to avoid looping AI snakes (they die younger -> bad fitness)\n self.body.append(self.old_tail) # that's why I keep old_tail",
"def addInitiallyInfected(self, number_initially_infected):\n\n x_infected = 0\n y_infected = 0\n\n # place as many infected as required in this loop\n while self.infected < number_initially_infected:\n \n # randomly generate the position\n x_infected = np.random.randint(0,self.space.shape[0])\n y_infected = np.random.randint(0,self.space.shape[1])\n \n # check that the position isn't already occupied by an infected\n if (self.space[x_infected][y_infected] == 0):\n self.space[x_infected][y_infected] = 1\n self.infected += 1\n\n # add the initial values of healthy/infected to the arrays keeping track\n self.SIR[0, 0] = self.getSusceptible()\n self.SIR[1, 0] = self.getInfected()\n\n # add the first snapshot of the simulation\n self.snapshots.append(self.getSpace().copy())",
"def nb_elephants(self, x):\n print('Warning ! Changing the number of Elephant is not possible!')",
"def ncore(self):",
"def n(self):\n pass",
"def increase_learning(self, factor):\n pass",
"def low_fitness_animals():\n jungle_cell = topo.Jungle()\n herbivore = animals.Herbivores()\n carnivore = animals.Carnivores()\n carnivore.weight, carnivore.age = 1, 1000\n herbivore.weight, herbivore.age = 1, 1000\n herbivore.parameters[\"omega\"] = 1\n carnivore.parameters[\"omega\"] = 1\n jungle_cell.add_animal(herbivore)\n jungle_cell.add_animal(carnivore)\n return jungle_cell",
"def monkey_count(n):\n return [i for i in range(1, n + 1)]",
"def test_incept():\n\n # help.ogler.resetLevel(level=logging.DEBUG)\n\n for name in [\"wan\", \"wil\", \"wes\", \"multisig1\", \"multisig2\", \"multisig3\"]:\n if os.path.exists('/usr/local/var/keri/db/{}'.format(name)):\n shutil.rmtree('/usr/local/var/keri/db/{}'.format(name))\n if os.path.exists('/usr/local/var/keri/ks/{}'.format(name)):\n shutil.rmtree('/usr/local/var/keri/ks/{}'.format(name))\n\n\n with habbing.openHab(name=\"wan\", salt=b'wann-the-witness', transferable=False, temp=False) as wanHab, \\\n habbing.openHab(name=\"wil\", salt=b'will-the-witness', transferable=False, temp=False) as wilHab, \\\n habbing.openHab(name=\"wes\", salt=b'wess-the-witness', transferable=False, temp=False) as wesHab:\n\n wanDoers = indirecting.setupWitness(name=\"wan\", hab=wanHab, temp=False, tcpPort=5632, httpPort=5642)\n wilDoers = indirecting.setupWitness(name=\"wil\", hab=wilHab, temp=False, tcpPort=5633, httpPort=5643)\n wesDoers = indirecting.setupWitness(name=\"wes\", hab=wesHab, temp=False, tcpPort=5634, httpPort=5644)\n\n inceptor = InceptingDoer()\n doers = wanDoers + wilDoers + wesDoers + [inceptor]\n\n limit = 2.0\n tock = 0.03125\n doist = doing.Doist(tock=tock, limit=limit, doers=doers)\n doist.enter()\n\n tymer = tyming.Tymer(tymth=doist.tymen(), duration=doist.limit)\n\n while not tymer.expired:\n doist.recur()\n time.sleep(doist.tock)\n # doist.do(doers=doers)\n\n assert doist.limit == limit\n\n doist.exit()\n\n preb = b'Ep4WexrfQvQjblYg9ti12cr7NpKWaXLNP5CXmq_4Zhng' # multisig aid\n digb = b'E5_qNBzfkBc2kWAzGn5UUWhWUVrX4Yk1QBpVXHucygfk' # multisig inception digest\n dgkey = dbing.dgKey(preb, digb)\n\n wigs = wanHab.db.getWigs(dgkey)\n # assert len(wigs) == 3\n wigs = wilHab.db.getWigs(dgkey)\n # assert len(wigs) == 3\n wigs = wesHab.db.getWigs(dgkey)\n # assert len(wigs) == 3",
"def setMine(self):\n self.count = 13\n self.mine = True",
"def __IERS(self):\n\n assert self.__lmax <= 30\n '''\n load love number, from 1 to 30\n '''\n Kn = np.array([\n 0.000000000000000, -0.30750000000000,\n -0.19500000000000, -0.13200000000000,\n -0.10320000000000, -0.89166666666670e-1,\n -0.81710392640550e-1, -0.75500000000000e-1,\n -0.71685683412260e-1, -0.68200000000000e-1,\n -0.65980069344540e-1, -0.63812455645590e-1,\n -0.61732085548940e-1, -0.59754188127910e-1,\n -0.57883368816860e-1, -0.56118520212550e-1,\n -0.54455544917280e-1, -0.52888888888890e-1,\n -0.51529657180340e-1, -0.50236923831480e-1,\n -0.49007643741670e-1, -0.47838465083770e-1,\n -0.46725942423010e-1, -0.45666666666670e-1,\n -0.44657342166760e-1, -0.43694830109180e-1,\n -0.42776170404080e-1, -0.41898589949110e-1,\n -0.41059502372580e-1, -0.40256502584650e-1\n ])\n\n return np.append(np.zeros(1), Kn[0:self.__lmax])",
"def test_hky_nielsen(self):\n distribution = {'A':.2,'C':.3,'G':.3,'T':.2}\n kappa = 2\n rate_matrix_object = RateMatrix.get_unscaled_hky85_rate_matrix(distribution, kappa)\n rate_matrix_object.normalize()\n rate_matrix = rate_matrix_object.get_dictionary_rate_matrix()\n path_length = 2\n initial_state = 'A'\n terminal_state = 'C'\n states = 'ACGT'\n iterations = 200\n rejection_changes = []\n i = 0\n while i < iterations:\n rejection_events = get_rejection_sample(initial_state, terminal_state, states, path_length, rate_matrix)\n if rejection_events is not None:\n rejection_changes.append(len(rejection_events))\n i += 1\n nielsen_changes = []\n i = 0\n while i < iterations:\n nielsen_events = get_nielsen_sample(initial_state, terminal_state, states, path_length, rate_matrix)\n if nielsen_events is not None:\n nielsen_changes.append(len(nielsen_events))\n i += 1\n t, p = scipy.stats.mannwhitneyu(rejection_changes, nielsen_changes)\n self.failIf(p < .001)",
"def __step(self, G):\n new_infected_node_set = self.infected_node_set.copy()\n #look for new infections\n for node in self.infected_node_set:\n #try to infect neighbors\n for neighbor in G.neighbors(node):\n if random() < self.p:\n new_infected_node_set.add(neighbor)\n\n #look for recuperations\n for node in self.infected_node_set:\n #try to recuperate\n if random() < self.q:\n new_infected_node_set.remove(node)\n #set new infected nodes\n self.infected_node_set = new_infected_node_set",
"def rabbitGrowth():\n # you need this line for modifying global variables\n global CURRENTRABBITPOP\n\n for rabbit in range(CURRENTRABBITPOP):\n rabbit_reproduction_prob = 1.0 - (float(CURRENTRABBITPOP) / MAXRABBITPOP)\n if random.random() < rabbit_reproduction_prob and CURRENTRABBITPOP < 1000:\n CURRENTRABBITPOP += 1",
"def silnia_it(n):\n wynik = 1\n \n for i in range(1, n + 1):\n wynik = wynik * i\n return wynik",
"def fn(i, x):\n if i == goal: return x == n \n ans = 0 \n if x < n: ans += (n-x) * fn(i+1, x+1) # a new song\n if k < x: ans += (x-k) * fn(i+1, x) # an old song\n return ans % 1_000_000_007",
"def find_N(config, data, imatch=I_DEAD, itarget=10):\n\n np.random.seed(1)\n # Parse parameters\n # get just 1 base case sample corresponding to average\n\n # results = np.zeros((len(m_prior), 13, len(fwd_args['time'])))\n\n i_mod = o_calmodes[imatch]\n i_obs = i_calmodes[imatch]\n obsdead = data[:, i_obs]\n time_delay = config['time_delay']\n obsdead_index = np.where(obsdead > itarget)[0][0] + time_delay\n found = False\n icount = 0\n ncountmax = 50\n nnew = 1000\n\n ndata = np.size(data[:, 0])\n m_prior, fwd_args = parse_config(config, ndata, mode='mean')\n m_prior = reshape_prior(m_prior)\n param = m_prior[0]\n\n while not found and icount < ncountmax:\n fwd_args['locked']['n'] = nnew\n res = base_seir_model(param, fwd_args)\n moddead = res[i_mod, :]\n moddead_index = np.where(moddead > itarget)\n\n print('moddead index, obsdead index ', moddead_index[0][0], obsdead_index)\n found = moddead_index[0][0] >= obsdead_index\n if not found:\n icount += 1\n nnew = fwd_args['locked']['n'] * 2\n fwd_args['locked']['n'] = nnew\n\n return nnew",
"def ComputeRegenerativeBraking(self):\r\n pass",
"def warmUpExercise():\n\n return np.identity(5)"
]
| [
"0.6187792",
"0.600103",
"0.57108754",
"0.5682342",
"0.566876",
"0.56315416",
"0.5602613",
"0.5481354",
"0.5478897",
"0.54706854",
"0.54320294",
"0.5412855",
"0.5390383",
"0.5351051",
"0.534429",
"0.53177613",
"0.5313821",
"0.5296265",
"0.5289871",
"0.52859455",
"0.5210033",
"0.520412",
"0.52040076",
"0.51935786",
"0.51882905",
"0.5188081",
"0.5185664",
"0.51791525",
"0.51461136",
"0.5132404"
]
| 0.65779567 | 0 |
Add a newskill to the players list of skills. | def addSkill(self, newskill):
self.skills.append( newskill ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addSkill(self, skillName, maxLevel, creditStart, creditIncrement):\r\n self.skills[skillName] = SkillObject(skillName, maxLevel, creditStart, creditIncrement)\r\n self.orderedSkills.append(skillName)",
"def addSkill(skill, db, **kwargs):\n skill_data = db.execute(\n 'SELECT * FROM mystatus WHERE skill = ?', (str(skill), )).fetchone()\n if skill_data:\n return colored(\"ERROR: Skill {S} is already in the skill set!\".format(S=str(skill)), \"red\", \"on_white\")\n db.execute(\n 'INSERT INTO mystatus (skill, power, points)'\n 'VALUES (?, ?, ?)', (str(skill), str(kwargs['power']), \"0\"))\n db.commit()\n return colored(\"Add new skill: \" + str(skill), 'cyan')",
"def add_skill(skill_list, skill): #inputs the skill dictionary and skill\r\n\tif skill==\"Gun Combat\":\r\n\t\tif stellagama.dice(1,6)>=3:\r\n\t\t\tfor item in guns:\r\n\t\t\t\tif item in skill_list:\r\n\t\t\t\t\tskill=item\r\n\t\t\t\telse:\r\n\t\t\t\t\tskill=stellagama.random_choice(guns)\r\n\t\telse:\r\n\t\t\tskill=stellagama.random_choice(guns)\r\n\telif skill in [\"Blade Combat\", \"Blade Cbt\"]:\r\n\t\tif stellagama.dice(1,6)>=3:\r\n\t\t\tfor item in melee:\r\n\t\t\t\tif item in skill_list:\r\n\t\t\t\t\tskill=item\r\n\t\t\t\telse:\r\n\t\t\t\t\tskill=stellagama.random_choice(melee)\r\n\t\telse:\r\n\t\t\tskill=stellagama.random_choice(melee)\r\n\telif skill==\"Vehicle\":\r\n\t\tif stellagama.dice(1,6)>=3:\r\n\t\t\tfor item in vehicles:\r\n\t\t\t\tif item in skill_list:\r\n\t\t\t\t\tskill=item\r\n\t\t\telse:\r\n\t\t\t\tskill=stellagama.random_choice(vehicles)\r\n\t\telse:\r\n\t\t\tskill=stellagama.random_choice(vehicles)\r\n\tif skill in skill_list:\r\n\t\tskill_list[skill] += 1\r\n\telif skill not in skill_list:\r\n\t\tskill_list[skill] = 1\r\n\treturn skill_list #outputs the skill dictionary\r",
"def add_skills_to_profile():\n # get specific objects\n profile = storage.get(\"Profile\", profile_id)\n skills = storage.get(\"Skills\", skills_id)\n if profile is not None and skills is not None:\n # check every skill in profile\n for profile_skill in profile.skills:\n # if the given skill is already linked to profile, return\n if profile_skill.id == skills.id:\n return jsonify(skills.to_dict()), 200\n # if skill is not in profile, append skill and save\n profile.skills.append(skills)\n profile.save()\n return jsonify(skills.to_dict()), 201\n\n # if id not in database, abort\n abort(404)",
"def new_skill_interaction(self, skill):\n self.skill_interact[skill] = True",
"def addSkillIntoPlayerDatabase(self, userid, name, level = 0):\r\n if not isinstance(userid, int):\r\n userid = self.getUserIdFromSteamId(userid)\r\n self.execute(\"INSERT OR IGNORE INTO Skill (UserID, name, level) VALUES (?,?,?)\", userid, name, level)\r\n return self.cursor.lastrowid",
"def _set_skill(caller, _, **kwargs):\n pool = _skill_pool(caller, kwargs.get(\"skill\"))\n caller.db.d1_skills[kwargs.get(\"skill\")][\"rank\"] += 1\n caller.ndb.pregen[\"skills\"] = pool\n\n return \"node_skills\"",
"def __initSkills(self):\n skills = self.teamparser.getPlayerSkills()\n try:\n skills = skills[(self.team, self.position)] #initial skills\n except KeyError, err:\n skills = []\n raise TypeError, \"Invalid Team/Position: \" + self.team\n for skill in skills:\n skobj = pyBBSkill.BBSkill(skill, self.skillparser)\n self.skills.append(skobj)",
"def updateSkillForPlayer(self, userid, name, level):\r\n if not isinstance(userid, int):\r\n userid = self.getUserIdFromSteamId(userid)\r\n self.execute(\"UPDATE Skill SET level=? WHERE UserID=? AND name=?\", level, userid, name)",
"def insert_skills(cursor):\n # Get the class of every skill\n skills_classes = dict()\n with open(CLASSES_PATH, encoding='UTF-8') as classes_file:\n classes_dict = ujson.load(classes_file)\n for class_id, _class in classes_dict.items():\n class_skills = _class.get(\"skills\", list())\n for class_skill in class_skills:\n skills_classes[class_skill.lower()] = class_id\n\n with open(SKILLS_PATH, encoding='UTF-8') as skills_file:\n skills_dict = ujson.load(skills_file)\n skills = list()\n # Get list of sorted skills\n sorted_skills_ids = list()\n for skill_id, skill in skills_dict.items():\n if skill_id:\n sorted_skills_ids.append((skill_id, int(skill.get(\"id\", 0))))\n else:\n sorted_skills_ids.append((skill_id, 0))\n sorted_skills_ids.sort(key=lambda tup: tup[1])\n # Start processing them\n for skill_id, _ in sorted_skills_ids:\n skill = skills_dict[skill_id]\n skill_info = list()\n # Get Skill Id\n skill_info.append(int(get_value(skill, \"Skill\", \"id\", str)))\n # Get Skill Name\n skill_info.append(get_value(skill, \"Skill\", \"name\", str))\n # Get Skill Identifier\n identifier = get_value(skill, \"Skill\", \"ident\", str).lower()\n skill_info.append(identifier)\n # Get Skill Icon\n skill_info.append(format_icon(get_value(skill, \"Skill\", \"icon\", str)))\n # Get Skill Circle\n skill_info.append(int(get_value(skill, \"Skill\", \"circle\", str)))\n # Get Skill Rank Level\n skill_info.append(int(get_value(skill, \"Skill\", \"rankLevel\", str)))\n # Get Skill Max Level\n skill_info.append(int(get_value(skill, \"Skill\", \"maxLevel\", str)))\n # Get Skill Video\n skill_info.append(get_value(skill, \"Skill\", \"video\", str))\n # Get Skill Desc\n skill_info.append(get_value(skill, \"Skill\", \"desc\", str))\n # Get Skill Details\n skill_info.append(get_value(skill, \"Skill\", \"desc2\", str))\n # Get Skill Type 1\n skill_info.append(get_value(skill, \"Skill\", \"type1\", str))\n # Get Skill Type 2\n skill_info.append(get_value(skill, \"Skill\", \"type2\", str))\n # Get Skill Cooldown\n skill_info.append(get_value(skill, \"Skill\", \"cooldown\", int))\n # Get Skill Element\n skill_info.append(get_value(skill, \"Skill\", \"element\", str))\n # Get Skill Required Stance\n skill_info.append(get_value(skill, \"Skill\", \"reqStance\", str))\n # Get Skill Level List\n skill_info.append(ujson.dumps(get_value(skill, \"Skill\", \"levelList\", dict)))\n # Get Skill Use Overheat\n skill_info.append(get_value(skill, \"Skill\", \"useOverHeat\", int))\n # Get Skill Class\n skill_info.append(get_skill_class(cursor, skills_classes.get(identifier, '')))\n\n\n skills.append(tuple(skill_info))\n\n skills = tuple(skills)\n\n cursor.executemany(\"INSERT INTO skills (id, name, identifier, icon, circle, rank_level, max_level, video, \"\n \"desc, details, type1, type2, cooldown, element, req_stance, level_list, use_overheat, \"\n \"class) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", skills)",
"async def skill(self, ctx, *, skill: str):\n\n try:\n skill = self.get_entry('Skill', skill.lower())\n except RuntimeError as e:\n return await ctx.send(e)\n\n name = skill['Name']\n\n embed = discord.Embed(title=name)\n embed.set_thumbnail(url='attachment://skill.png')\n embed.add_field(name='Learned', value=skill['Class/Rank'], inline=False)\n embed.add_field(name='Effect', value=skill['Effect'])\n\n await ctx.send(file=discord.File(f'xenox/skills/{name}.png', 'skill.png'), embed=embed)",
"def required_skills(self, required_skills):\n\n self._required_skills = required_skills",
"def addExperience(self, xp):\n self.xp += xp\n if self.xp >= self.xpNeeded:\n self.LevelUpPlayer()",
"def use_skill(self, g, i, x, y):\n # @ param g a reference to the game engine\n # @ param i the index of the skill (basically what skill)\n # @ param x the x target coordinate in game pixels\n # @ param y the y target coordinate in game pixels\n if self.attackTimer < self.attackDelay:\n print(\"attack on CD\")\n return\n \n if self.skill[i].skillAttr == 0:\n g.fire_skill_sound.play()\n elif self.skill[i].skillAttr == 1:\n g.ice_skill_sound.play()\n elif self.skill[i].skillAttr == 2:\n g.lightning_skill_sound.play()\n elif self.skill[i].skillAttr == 3:\n g.poison_skill_sound.play()\n \n \n if self.skill[i].skillKey == 0: #Aura\n #turn the aura on/off\n if self.skill[i].active == False:\n #print(\"aura on\")\n self.skill[i].active = True\n else:\n self.skill[i].active = False\n #print(\"aura off\")\n \n elif self.skill[i].skillKey == 1: #Missile\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n target = Target(x, y)\n center_x = self.rect.x + (self.rect.width / 2)\n center_y = self.rect.y + (self.rect.height / 2)\n #bullet types: fire 5, ice 6, lightning 7\n #skill types: fire 0, ice 1, lightning 2\n g.bullets.append(self.bulletFactory.createBullet(g, self.skill[i].skillAttr + 5, 0, self.attack, 1024, target, center_x, center_y))\n #print(\"missile\")\n\n elif self.skill[i].skillKey == 2: #Breath\n #for each creep in the AoE cone, do damage.\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n #get low and high angle (-45 degrees and +45 degrees from player -> point angle)\n lowAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) - 3.1415 / 2.0\n highAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) + 3.1415 / 2.0\n for creep in g.creeps:\n #get angle to creep\n creepAngle = math.atan2(creep.rect.centery - self.rect.centery, creep.rect.centerx - self.rect.centerx)\n \n #if angle to the creep is between the two angles\n if creepAngle > lowAngle and creepAngle < highAngle:\n #and the distance to the creep is below the skill's range\n if ( (creep.rect.centerx - self.rect.centerx) ** 2 + (creep.rect.centery - self.rect.centery) ** 2 ) ** 0.5 < 4 * 24:\n creep.take_damage( self.attack )\n #print(\"breath\")\n #apply debuffs, based on type\n if self.skill[i].skillAttr == 0: #fire\n creep.applyBurning()\n elif self.skill[i].skillAttr == 1: #frost\n creep.applyChilled()\n elif self.skill[i].skillAttr == 2: #lightning\n creep.applyShocked()",
"def load_skill_list(skills_to_load):\n if exists(SKILLS_DIR):\n # checking skills dir and getting all priority skills there\n skill_list = [folder for folder in filter(\n lambda x: os.path.isdir(os.path.join(SKILLS_DIR, x)),\n os.listdir(SKILLS_DIR)) if folder in skills_to_load]\n for skill_folder in skill_list:\n skill = {\"id\": hash(os.path.join(SKILLS_DIR, skill_folder))}\n skill[\"path\"] = os.path.join(SKILLS_DIR, skill_folder)\n # checking if is a skill\n if not MainModule + \".py\" in os.listdir(skill[\"path\"]):\n continue\n # getting the newest modified date of skill\n last_mod = _get_last_modified_date(skill[\"path\"])\n skill[\"last_modified\"] = last_mod\n # loading skill\n skill[\"loaded\"] = True\n skill[\"instance\"] = load_skill(\n create_skill_descriptor(skill[\"path\"]),\n ws, skill[\"id\"])\n loaded_skills[skill_folder] = skill",
"def insert_skill(self, skill_info, skill_rate):\n\n if self.check_input_type(skill_rate, \"float\"):\n skill_rate = float(skill_rate)\n if skill_rate >= 0.0:\n query = \"insert into skill(skill_descrpt, skill_rate) \" \\\n \"value ('{}', '{}')\".format(skill_info, skill_rate)\n\n try:\n self.dbCursor.execute(query)\n SuccessMessageWindow(\"Insert success!\")\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n self.dbConnection.commit()\n else:\n ErrorMessageWindow(\"Skill rate must be non-negative!\")\n else:\n ErrorMessageWindow(\"Skill rate must be a number!\")",
"def add_player(self, player):\n\t\tself.players.append(player)",
"def create_skill_for_vendor_v1(self, create_skill_request, **kwargs):\n # type: (CreateSkillRequest_92e74e84, **Any) -> Union[ApiResponse, object, CreateSkillResponse_2bad1094, StandardizedError_f5106a89, BadRequestError_f854b05]\n operation_name = \"create_skill_for_vendor_v1\"\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'create_skill_request' is set\n if ('create_skill_request' not in params) or (params['create_skill_request'] is None):\n raise ValueError(\n \"Missing the required parameter `create_skill_request` when calling `\" + operation_name + \"`\")\n\n resource_path = '/v1/skills'\n resource_path = resource_path.replace('{format}', 'json')\n\n path_params = {} # type: Dict\n\n query_params = [] # type: List\n\n header_params = [] # type: List\n\n body_params = None\n if 'create_skill_request' in params:\n body_params = params['create_skill_request']\n header_params.append(('Content-type', 'application/json'))\n header_params.append(('User-Agent', self.user_agent))\n\n # Response Type\n full_response = False\n if 'full_response' in params:\n full_response = params['full_response']\n\n # Authentication setting\n access_token = self._lwa_service_client.get_access_token_from_refresh_token()\n authorization_value = \"Bearer \" + access_token\n header_params.append(('Authorization', authorization_value))\n\n error_definitions = [] # type: List\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.create_skill_response.CreateSkillResponse\", status_code=202, message=\"Accepted; Returns a URL to track the status in 'Location' header.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.bad_request_error.BadRequestError\", status_code=400, message=\"Server cannot process the request due to a client error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=401, message=\"The auth token is invalid/expired or doesn't have access to the resource.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.bad_request_error.BadRequestError\", status_code=403, message=\"The operation being requested is not allowed.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=429, message=\"Exceeds the permitted request limit. Throttling criteria includes total requests, per API, ClientId, and CustomerId.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=500, message=\"Internal Server Error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=503, message=\"Service Unavailable.\"))\n\n api_response = self.invoke(\n method=\"POST\",\n endpoint=self._api_endpoint,\n path=resource_path,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n body=body_params,\n response_definitions=error_definitions,\n response_type=\"ask_smapi_model.v1.skill.create_skill_response.CreateSkillResponse\")\n\n if full_response:\n return api_response\n return api_response.body",
"def setSkillInfo(self, name, information):\r\n skills[name].info = information",
"def create(self):\n # type: () -> AbstractSkill\n raise NotImplementedError",
"def test_skills_updated(self):\n assert self.skill_config.skills == {self.new_skill_id}",
"def upgrade_skill(self, skill_string):\r\n skill = self.__skills[skill_string]\r\n skill.skill_level += 1\r\n\r\n # Downgrading enabled the first time a skill is upgraded.\r\n if skill.skill_level == 1:\r\n self.skill_down_enable(skill_string)\r\n\r\n # Updates the UI and skill point value\r\n self.update_skill_level_info(skill_string)\r\n self.deduct_skill_points(skill.points_to_up)\r\n self.update_skill_info_box(skill_string)\r\n\r\n # Checks other requirements.\r\n for skill_string2 in self.__skills:\r\n self.check_skill_requirements(skill_string2)",
"def upload_skills_data(self, data):\n if not isinstance(data, dict):\n raise ValueError('data must be of type dict')\n\n _data = deepcopy(data) # Make sure the input data isn't modified\n # Strip the skills.json down to the bare essentials\n to_send = {}\n if 'blacklist' in _data:\n to_send['blacklist'] = _data['blacklist']\n else:\n LOG.warning('skills manifest lacks blacklist entry')\n to_send['blacklist'] = []\n\n # Make sure skills doesn't contain duplicates (keep only last)\n if 'skills' in _data:\n skills = {s['name']: s for s in _data['skills']}\n to_send['skills'] = [skills[key] for key in skills]\n else:\n LOG.warning('skills manifest lacks skills entry')\n to_send['skills'] = []\n\n for s in to_send['skills']:\n # Remove optional fields backend objects to\n if 'update' in s:\n s.pop('update')\n\n # Finalize skill_gid with uuid if needed\n s['skill_gid'] = s.get('skill_gid', '').replace(\n '@|', '@{}|'.format(self.identity.uuid))\n\n self.request({\n \"method\": \"PUT\",\n \"path\": \"/\" + UUID + \"/skillJson\",\n \"json\": to_send\n })",
"def add_new_player(self) -> None:\n\n # 1\n for elem in self.data:\n key = ''\n value = ''\n for k, v in elem.items():\n if k == 'name':\n key = v\n else:\n value = v.get()\n self.attributs.update({key: value})\n\n # 2\n order = ct.Controls.verify_players_creation(self.attributs)\n self.master.master.list_instances_menus_tournament = Menu.update_menus_tournament(order, self.master)\n self.master.master.left_window.update_and_display(self.master.master.list_instances_menus_tournament)\n # 3\n if order['order'] == 'repeat_step':\n self.display()\n else:\n self.destroy_window()\n self.master.master.launch()",
"def insert_skill_abilities(cursor):\n # Get the skill of every ability\n abilities_skills = dict()\n with open(SKILL_ABILITIES_PATH, encoding='UTF-8') as skills_file:\n skills_dict = ujson.load(skills_file)\n for skill_id, skill_abilities in skills_dict.items():\n for skill_ability in skill_abilities:\n abilities_skills[skill_ability.lower()] = skill_id.lower()\n\n # Get info from HTML\n abilities_html_dict = dict()\n with open(ABILITIES_HTML_PATH, encoding='UTF-8') as abilities_html_file:\n soup = BeautifulSoup(abilities_html_file, 'html.parser')\n for ability in soup.findAll('div'):\n # Remove clutter from attribute ID\n ability_id = ability.attrs['id'][18:-8]\n ability_name = ability.b.text\n ability_type = ''\n ability_max_level = 0\n ability_req_skill_level = 0\n ability_desc = ability.contents[-1].strip()\n # Parse all except the name and desc that we already got\n for i in range(2, len(ability.contents)-2):\n if isinstance(ability.contents[i], Tag):\n if ability.contents[i].text == \"Type:\":\n ability_type = ability.contents[i+1].strip()\n elif ability.contents[i].text == \"Max Level:\":\n ability_max_level = int(ability.contents[i+1].strip())\n elif ability.contents[i].text == \"Required Skill Level:\":\n ability_req_skill_level = int(ability.contents[i+1].strip())\n elif ability.contents[i].text == \"Circle:\":\n pass\n else:\n if ability.contents[i].name != 'br':\n l.warning(\"There is a non handled tag {} in ability: {}\".format(ability.contents[i].text,\n ability))\n abilities_html_dict[ability_id.lower()] = {\n 'name': ability_name,\n 'type': ability_type,\n 'max_level': ability_max_level,\n 'req_skill_level': ability_req_skill_level,\n 'desc': ability_desc\n }\n\n with open(ABILITIES_JSON_PATH, encoding='UTF-8') as abilities_file:\n abilities_dict = ujson.load(abilities_file)\n abilities = list()\n # Get list of sorted abilities\n sorted_abilities_ids = list()\n for ability_id, ability in abilities_dict.items():\n if ability_id:\n sorted_abilities_ids.append((ability_id, int(ability.get(\"ClassID\", 0))))\n else:\n sorted_abilities_ids.append((ability_id, 0))\n sorted_abilities_ids.sort(key=lambda tup: tup[1])\n # Start processing them\n for ability_id, _ in sorted_abilities_ids:\n ability = abilities_dict[ability_id]\n html_ability = abilities_html_dict.get(ability.get(\"ClassName\", \"\").lower(), dict())\n ability_info = list()\n # Get Ability Id\n ability_info.append(int(get_value(ability, \"Ability\", \"ClassID\", str)))\n # Get Ability Name\n ability_info.append(get_value(html_ability, \"Ability\", \"name\", str))\n # Get Ability Type\n ability_info.append(get_value(html_ability, \"Ability\", \"type\", str))\n # Get Ability Required Circle\n ability_info.append(int(get_value(ability, \"Ability\", \"ReqCircle\", int)))\n # Get Ability Max Level\n ability_info.append(get_value(html_ability, \"Ability\", \"max_level\", int))\n # Get Ability Desc\n ability_info.append(get_value(html_ability, \"Ability\", \"desc\", str))\n # Get Ability Icon\n ability_info.append(format_icon(get_value(ability, \"Ability\", \"Icon\", str)))\n # Get Skill Class\n ability_info.append(get_ability_skill(cursor, abilities_skills.get(ability_id.lower(), '')))\n # Get Ability Required Skill Level\n ability_info.append(get_value(html_ability, \"Ability\", \"req_skill_level\", int))\n\n abilities.append(tuple(ability_info))\n\n abilities = tuple(abilities)\n\n cursor.executemany(\"INSERT INTO skill_abilities (id, name, type, circle, max_level, desc, icon, skill_id, \"\n \"req_skill_level) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\", abilities)",
"def create_player(player: Player) -> None:\n with engine.connect() as conn:\n\n conn.execute(\n player_table.insert().values(\n steamid=player.steamid,\n level=player.level,\n xp=player.xp,\n credits=player.credits,\n )\n )\n\n skills = list(player.skills)\n result = conn.execute(\n skill_table.insert().values([\n {\n 'key': skill.key,\n 'level': skill.level,\n 'steamid': player.steamid,\n }\n for skill in skills\n ])\n )\n\n for id, skill in zip(result.inserted_primary_key, skills):\n skill._db_id = id",
"def updateSkillPoints(skill, db, delta):\n skill_data = db.execute(\n 'SELECT * FROM mystatus WHERE skill = ?', (str(skill), )).fetchone()\n if not skill_data:\n return colored(\"ERROR: Skill {S} is not in your skill set!\".format(S=str(skill)), \"red\", \"on_white\")\n new_points = max(0, skill_data['points'] + int(delta))\n db.execute(\n 'UPDATE mystatus SET points = ? WHERE skill = ?', (str(new_points), str(skill)))\n db.commit()\n return colored(\"{S}\\' power is updated from {OLD} -> {NEW}\".format(\n S=str(skill), OLD=str(skill_data['points']), NEW=str(new_points)), 'cyan')",
"def requires_matching_skills(self, requires_matching_skills):\n \n self._requires_matching_skills = requires_matching_skills",
"def set_skills(username, skillpaths):\n cdate = Date()\n user = database_controller.get_user(username)\n db.session.add(cdate)\n db.session.commit()\n for skillpath, level in skillpaths.items():\n new_skill = database_controller.get_skill(skillpath)\n if not new_skill:\n raise NameError('The Skill {0} does not exist in the database!'.format(skillpath))\n database_controller.add_milestone(username, skillpath, cdate.date, \"Level {0}\".format(level), level)\n assoc = Association(level=level)\n assoc.skill_assoc = new_skill\n assoc.date_assoc = cdate\n assoc.users_assoc = user\n db.session.commit()",
"def test_skills_updated(self):\n assert self.agent_config.skills == {self.new_skill_id}"
]
| [
"0.74936",
"0.7302804",
"0.7260293",
"0.6880153",
"0.65211946",
"0.6501737",
"0.6126843",
"0.6107915",
"0.5938357",
"0.5916689",
"0.5907391",
"0.5876193",
"0.5863165",
"0.578554",
"0.5712432",
"0.5672942",
"0.56194305",
"0.56185836",
"0.56102777",
"0.5591866",
"0.5590392",
"0.5583567",
"0.5567213",
"0.5560467",
"0.5521706",
"0.55127823",
"0.55118406",
"0.5506611",
"0.5483308",
"0.5471395"
]
| 0.8658494 | 0 |
> bool Checks if a given skill is valid for the player. | def isValidSkill(self, skill):
try:
skills = self.skillparser.getSkills()
skilldetails = skills[skill]
if skilldetails[1] in self.picks:
return True
else:
return False
except KeyError:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkPlayerSkillExists(self, userid, skillName):\r\n if not isinstance(userid, int):\r\n userid = self.getUserIdFromSteamId(userid)\r\n self.execute(\"SELECT level FROM Skill WHERE UserID=? AND name=?\", userid, skillName)\r\n return bool( self.fetchone())",
"def requires_matching_skills(self):\n return self._requires_matching_skills",
"def check_skill_requirements(self, skill_string):\r\n requirements_met = True\r\n # Shortened for easier usage, same practice in other functions\r\n # below.\r\n skill = self.__skills[skill_string]\r\n\r\n # Checks if skill is at it's maximum level or at\r\n # the maximum level it can be at current character level\r\n if self.__char_lvl.get() == \"\" or skill.skill_level == skill.skill_max_level or skill.lvl_req[skill.skill_level] > int(self.__char_lvl.get()):\r\n requirements_met = False\r\n\r\n\r\n\r\n # If there is a prerequired skill, checks if it's level is high\r\n # enough.\r\n if skill.prereq_skill_name != \"-\":\r\n if self.__skills[skill.prereq_skill_name].\\\r\n skill_level < skill.prereq_skill_lvl:\r\n self.reset(skill_string)\r\n requirements_met = False\r\n\r\n if not requirements_met:\r\n self.skill_up_disable(skill_string)\r\n else:\r\n self.skill_up_enable(skill_string)\r\n\r\n # If very little skill points are left after upgrading,\r\n # requirements of all skills are tested to see if there's enough\r\n # left to upgrade them.\r\n if self.__skill_points < MOST_SKILL_POINTS_POSSIBLY_REQUIRED:\r\n self.check_if_enough_skill_points()",
"def check_if_enough_skill_points(self):\r\n for skill_string in self.__skills:\r\n if (self.__skills[skill_string].points_to_up >\r\n self.__skill_points):\r\n self.skill_up_disable(skill_string)",
"def match_skills(item):\n\n text = item.text\n if any([skill in text for skill in skill_names]):\n return True\n return False",
"def isValid(self, game):\n return True",
"def playerCanPlay(game, situation, player):\r\n return True",
"def canWield(self, item):\n if (item.weapon or item.spell) and 'no_weapons' in self.status_bundle:\n return False\n if Weapons.TRIANGLE.isMagic(item) and 'no_magic_weapons' in self.status_bundle:\n return False\n # if the item is a weapon\n if item.weapon:\n itemLvl = item.weapon.LVL\n elif item.spell:\n itemLvl = item.spell.LVL\n else:\n return True # does not have a level so it can be used\n\n idx = Weapons.TRIANGLE.name_to_index[item.TYPE]\n unitwexp = self.wexp[idx]\n if itemLvl in Weapons.EXP.wexp_dict and unitwexp >= Weapons.EXP.wexp_dict[itemLvl]:\n return True\n elif itemLvl == self.name: # If this weapon is for me!\n return True\n else:\n return False",
"def is_valid(self):\n return self.eyes[0] and self.eyes[1]",
"def is_valid_player(user_input):\n \n i = user_input.upper()\n if i in Board.player_decoder:\n return True\n elif i == 'Q':\n exit(\"\\nExiting program. Thanks for using Clue Detective!\\n\")\n else:\n return False",
"def check_skill_prerequisites(self, skill, header):\n try: \n skill_type = ContentType.objects.get_for_model(Skill)\n skill_prerequisites = Prerequisite.objects.filter(\n content_type__pk=skill_type.id,\n object_id=skill.id\n )\n return self.check_prerequisites(skill_prerequisites)\n except Prerequisite.DoesNotExist:\n return True\n return True",
"def enough_players():\n return True",
"def qualifies(self, weapon):\n return True",
"def use_skill(self, g, i, x, y):\n # @ param g a reference to the game engine\n # @ param i the index of the skill (basically what skill)\n # @ param x the x target coordinate in game pixels\n # @ param y the y target coordinate in game pixels\n if self.attackTimer < self.attackDelay:\n print(\"attack on CD\")\n return\n \n if self.skill[i].skillAttr == 0:\n g.fire_skill_sound.play()\n elif self.skill[i].skillAttr == 1:\n g.ice_skill_sound.play()\n elif self.skill[i].skillAttr == 2:\n g.lightning_skill_sound.play()\n elif self.skill[i].skillAttr == 3:\n g.poison_skill_sound.play()\n \n \n if self.skill[i].skillKey == 0: #Aura\n #turn the aura on/off\n if self.skill[i].active == False:\n #print(\"aura on\")\n self.skill[i].active = True\n else:\n self.skill[i].active = False\n #print(\"aura off\")\n \n elif self.skill[i].skillKey == 1: #Missile\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n target = Target(x, y)\n center_x = self.rect.x + (self.rect.width / 2)\n center_y = self.rect.y + (self.rect.height / 2)\n #bullet types: fire 5, ice 6, lightning 7\n #skill types: fire 0, ice 1, lightning 2\n g.bullets.append(self.bulletFactory.createBullet(g, self.skill[i].skillAttr + 5, 0, self.attack, 1024, target, center_x, center_y))\n #print(\"missile\")\n\n elif self.skill[i].skillKey == 2: #Breath\n #for each creep in the AoE cone, do damage.\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n #get low and high angle (-45 degrees and +45 degrees from player -> point angle)\n lowAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) - 3.1415 / 2.0\n highAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) + 3.1415 / 2.0\n for creep in g.creeps:\n #get angle to creep\n creepAngle = math.atan2(creep.rect.centery - self.rect.centery, creep.rect.centerx - self.rect.centerx)\n \n #if angle to the creep is between the two angles\n if creepAngle > lowAngle and creepAngle < highAngle:\n #and the distance to the creep is below the skill's range\n if ( (creep.rect.centerx - self.rect.centerx) ** 2 + (creep.rect.centery - self.rect.centery) ** 2 ) ** 0.5 < 4 * 24:\n creep.take_damage( self.attack )\n #print(\"breath\")\n #apply debuffs, based on type\n if self.skill[i].skillAttr == 0: #fire\n creep.applyBurning()\n elif self.skill[i].skillAttr == 1: #frost\n creep.applyChilled()\n elif self.skill[i].skillAttr == 2: #lightning\n creep.applyShocked()",
"def is_talent_active(talent_data: dict) -> bool:\n\n return talent_data[\"spell_tooltip\"][\"cast_time\"].lower() != \"passive\"",
"def is_valid_play(play, curr_trick, hand):\n\tif len(curr_trick) == 0: # Player is lead\n\t\treturn True\n\telse:\n\t\tlead_suit = curr_trick[0][1]\n\t\tplay_suit = play[1]\n\n\t\tif play_suit == lead_suit:\n\t\t\treturn True\n\t\telse:\n\t\t\t# Check remaining cards in hand\n\t\t\tfor card in hand:\n\t\t\t\tsuit = card[1]\n\t\t\t\tif suit == lead_suit:\n\t\t\t\t\treturn False\n\t\t\t# no card in hand matches lead_suit\n\t\t\treturn True",
"def still_valid(self) -> bool:\n return self._data.player_alive(self._data.player_turn)",
"async def skill(self, ctx, *, skill: str):\n\n try:\n skill = self.get_entry('Skill', skill.lower())\n except RuntimeError as e:\n return await ctx.send(e)\n\n name = skill['Name']\n\n embed = discord.Embed(title=name)\n embed.set_thumbnail(url='attachment://skill.png')\n embed.add_field(name='Learned', value=skill['Class/Rank'], inline=False)\n embed.add_field(name='Effect', value=skill['Effect'])\n\n await ctx.send(file=discord.File(f'xenox/skills/{name}.png', 'skill.png'), embed=embed)",
"def is_shot_valid(self, shot):\n a = self.check_position(shot.opponent)\n b = self.check_shot_direction(shot)\n c = self.check_shot_on_target(shot)\n return a and b and c",
"def valid(self):\n try:\n if self.getPret() > 0 and self.getAn() > 0 and self.validProgram(self.getProgram()):\n return True\n except:\n return False\n return False",
"def check_hand(self, player):\n\n total = player.score()\n if total > 21:\n status = 'bust'\n elif total == 21:\n status = 'win'\n else:\n status = 'okay'\n\n if self.verbose:\n print(total, 'points')\n \n return status",
"def _validability(self, ability):\n return (isinstance(ability, AttributeAbility) or\n isinstance(ability, WeaponAbility))",
"def can_add_player(self, user):\n user_profile = user.get_profile()\n if user_profile.credits < self.entrance_fee:\n return False\n if self.is_user_playing(user):\n return False\n return True",
"def succeeded(self):\n return self.current_reward == 300",
"async def validate(self, ctx: Context, argument: str) -> bool:\n return True",
"def is_won(self):\n return self.position == self.proposition.outcome and self.proposition.is_paid",
"def is_valid(self, attr: Optional[str] = None) -> bool:\n try:\n self.validate(attr)\n except (TypeError, ValueError):\n return False\n return True",
"def has_won(board, player):\n return False",
"def has_won(board, player):\r\n return False",
"def apply_skill_effects(self, behavior):\n b_type = type(behavior)\n if issubclass(b_type, ESAttackUp):\n if b_type == ESAttackUPRemainingEnemies \\\n and behavior.enemy_count is not None \\\n and self.enemies > behavior.enemy_count:\n return False\n if self.enraged is None:\n if b_type == ESAttackUPCooldown and behavior.turn_cooldown is not None:\n self.enraged = -behavior.turn_cooldown + 1\n return False\n else:\n self.enraged = behavior.turns\n return True\n else:\n if self.enraged == 0:\n self.enraged = behavior.turns\n return True\n else:\n return False\n elif b_type == ESDamageShield:\n if self.damage_shield == 0:\n self.damage_shield = behavior.turns\n return True\n else:\n return False\n elif b_type == ESStatusShield:\n if self.status_shield == 0:\n self.status_shield = behavior.turns\n return True\n else:\n return False\n return True"
]
| [
"0.7394698",
"0.6206403",
"0.61741734",
"0.6111237",
"0.5894406",
"0.5616227",
"0.56150854",
"0.5613188",
"0.5574354",
"0.55501664",
"0.5539028",
"0.54900765",
"0.5446474",
"0.5440643",
"0.5423603",
"0.5418668",
"0.5418457",
"0.54121983",
"0.53990597",
"0.5391631",
"0.5368901",
"0.5368185",
"0.5347749",
"0.53459567",
"0.5328529",
"0.53184986",
"0.53123295",
"0.5305574",
"0.5294016",
"0.5293555"
]
| 0.8047101 | 0 |
Create a fake Options object for testing. Note that the returned object only provides access to the provided options values. There is no registration mechanism on this object. Code under test shouldn't care about resolving cmdline flags vs. config vs. env vars etc. etc. | def create_options(options, passthru_args=None, fingerprintable_options=None):
fingerprintable = fingerprintable_options or defaultdict(dict)
class FakeOptions(object):
def for_scope(self, scope):
# TODO(John Sirois): Some users pass in A dict of scope -> _FakeOptionValues instead of a
# dict of scope -> (dict of option name -> value). Clean up these usages and kill this
# accommodation.
options_for_this_scope = options.get(scope) or {}
if isinstance(options_for_this_scope, _FakeOptionValues):
options_for_this_scope = options_for_this_scope.option_values
scoped_options = {}
if scope:
scoped_options.update(self.for_scope(enclosing_scope(scope)).option_values)
scoped_options.update(options_for_this_scope)
return _FakeOptionValues(scoped_options)
def for_global_scope(self):
return self.for_scope('')
def passthru_args_for_scope(self, scope):
return passthru_args or []
def items(self):
return options.items()
@property
def scope_to_flags(self):
return {}
def get_fingerprintable_for_scope(self, bottom_scope, include_passthru=False):
"""Returns a list of fingerprintable (option type, option value) pairs for
the given scope.
Note that this method only collects values for a single scope, NOT from
all enclosing scopes as in the Options class!
:param str bottom_scope: The scope to gather fingerprintable options for.
:param bool include_passthru: Whether to include passthru args captured by `bottom_scope` in the
fingerprintable options.
"""
pairs = []
if include_passthru:
pu_args = self.passthru_args_for_scope(bottom_scope)
pairs.extend((str, arg) for arg in pu_args)
option_values = self.for_scope(bottom_scope)
for option_name, option_type in fingerprintable[bottom_scope].items():
pairs.append((option_type, option_values[option_name]))
return pairs
def __getitem__(self, scope):
return self.for_scope(scope)
return FakeOptions() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testParseOptions(self):\n options = cli_test_lib.TestOptions()\n\n output_module = MockOutputModule()\n server_config.ServerArgumentsHelper.ParseOptions(options, output_module)\n\n with self.assertRaises(errors.BadConfigObject):\n server_config.ServerArgumentsHelper.ParseOptions(options, None)",
"def cmdLineOptions(self):\n\t\tself.opts = Options()\n\t\tself.opts.process()\n\t\treturn self",
"def create_options():\n optparser = optparse.OptionParser()\n optparser.add_option(\"-f\", \"--filename\", type=\"string\",\n help=\"execute a single unit test file\")\n optparser.add_option(\"-s\", \"--subprocess\", action=\"store_true\",\n default=False,\n help=\"run everything in an own subprocess \"\n \"(default: use a single process)\")\n optparser.add_option(\"-t\", \"--timeout\", type=\"int\", default=70,\n help=\"Timout for subprocesses before being killed \"\n \"(default: 70s per file)\")\n optparser.add_option(\"-v\", \"--verbose\", action=\"store_true\", default=False,\n help=\"be verbose and print anything instantly\")\n optparser.add_option(\"-r\", \"--random\", action=\"store_true\", default=False,\n help=\"randomize the order of tests\")\n optparser.add_option(\"-S\", \"--seed\", type=\"int\",\n help=\"seed the randomizer(useful to \"\n \"recreate earlier randomized test cases)\")\n optparser.add_option(\"-i\", \"--interactive\", action=\"callback\",\n callback=include_tag,\n callback_args=(\"interactive\",),\n help=\"also execute interactive tests\")\n optparser.add_option(\"-e\", \"--exclude\", action=\"callback\",\n callback=exclude_tag, type=\"string\",\n help=\"exclude test containing the tag\")\n optparser.add_option(\"-l\", \"--listtags\", action=\"callback\",\n callback=list_tags,\n help=\"lists all available tags and exits\")\n optparser.add_option(\"--logfile\", type=\"string\",\n help=\"save output to log file\")\n optkeys = [\"filename\",\n \"subprocess\",\n \"timeout\",\n \"random\",\n \"seed\",\n \"verbose\"\n ]\n return optparser, optkeys",
"def test_options_flags(self):\n opts = []\n\n # Handle \"--failed\" as a special case: we want to re-run only\n # the tests that failed within our Django apps\n # This sets the --last-failed flag for the pytest command, so this\n # functionality is the same as described in the pytest documentation\n if self.failed_only:\n opts.append(\"--last-failed\")\n\n # This makes it so we use pytest's fail-fast feature in two cases.\n # Case 1: --fail-fast is passed as an arg in the paver command\n # Case 2: The environment variable TESTS_FAIL_FAST is set as True\n env_fail_fast_set = (\n 'TESTS_FAIL_FAST' in os.environ and os.environ['TEST_FAIL_FAST']\n )\n\n if self.fail_fast or env_fail_fast_set:\n opts.append(\"--exitfirst\")\n\n if self.with_wtw:\n opts.extend([\n '--wtw',\n f'{COVERAGE_CACHE_BASEPATH}/{WHO_TESTS_WHAT_DIFF}',\n '--wtwdb',\n f'{COVERAGE_CACHE_BASEPATH}/{COVERAGE_CACHE_BASELINE}'\n ])\n\n return opts",
"def get_options(cls):\n return {\n \"name\": str,\n ConfigOption(\"install_files\", default=None): Or(None, list),\n ConfigOption(\"timeout\", default=300): int,\n ConfigOption(\"log_regexps\", default=None): Or(None, list),\n ConfigOption(\"stdout_regexps\", default=None): Or(None, list),\n ConfigOption(\"stderr_regexps\", default=None): Or(None, list),\n ConfigOption(\"file_logger\", default=None): Or(None, str),\n ConfigOption(\"async_start\", default=False): bool,\n ConfigOption(\"report_errors_from_logs\", default=False): bool,\n ConfigOption(\"error_logs_max_lines\", default=10): int,\n ConfigOption(\"path_cleanup\", default=True): bool,\n ConfigOption(\"pre_start\", default=None): validate_func(\"driver\"),\n ConfigOption(\"post_start\", default=None): validate_func(\"driver\"),\n ConfigOption(\"pre_stop\", default=None): validate_func(\"driver\"),\n ConfigOption(\"post_stop\", default=None): validate_func(\"driver\"),\n }",
"def create_options(test_args) -> testutils.Optional[Options]:\n options = Options()\n options.p4_file = Path(testutils.check_if_file(test_args.p4_file))\n testfile = test_args.testfile\n if not testfile:\n testutils.log.info(\"No test file provided. Checking for file in folder.\")\n testfile = options.p4_file.with_suffix(\".py\")\n result = testutils.check_if_file(testfile)\n if not result:\n return None\n options.testfile = Path(result)\n testdir = test_args.testdir\n if not testdir:\n testutils.log.info(\"No test directory provided. Generating temporary folder.\")\n testdir = tempfile.mkdtemp(dir=Path(\".\").absolute())\n # Generous permissions because the program is usually edited by sudo.\n os.chmod(testdir, 0o755)\n options.testdir = Path(testdir)\n options.rootdir = Path(test_args.rootdir)\n options.num_ifaces = args.num_ifaces\n\n try:\n import nnpy # pylint: disable=W0611,C0415\n\n assert nnpy\n options.use_nn = args.use_nn\n except ImportError:\n testutils.log.error(\"nnpy is not available on this system. Falling back to veth testing.\")\n options.use_nn = False\n\n # Configure logging.\n logging.basicConfig(\n filename=options.testdir.joinpath(\"test.log\"),\n format=\"%(levelname)s: %(message)s\",\n level=getattr(logging, test_args.log_level),\n filemode=\"w\",\n )\n stderr_log = logging.StreamHandler()\n stderr_log.setFormatter(logging.Formatter(\"%(levelname)s: %(message)s\"))\n logging.getLogger().addHandler(stderr_log)\n return options",
"def _options_commandline_overrides(options):\n cmdline_values = {\n 'run_storage_base': options.run_storage_base,\n 'watch': options.watch,\n 'verbose': options.verbose,\n 'uploader_config': options.uploader_config,\n 'logging_config': options.logging_config,\n }\n\n # Commandline options override any value in the config file.\n for k, v in cmdline_values.items():\n if v is not None:\n options[k] = v\n\n return options",
"def get_args():\n args = mock.Mock()\n args.debug = None\n args.generateconfig = None\n args.config = 'doesntmatter'\n return args",
"def get_args():\n args = mock.Mock()\n args.debug = None\n args.generateconfig = None\n args.config = 'doesntmatter'\n return args",
"def test_cli_utils(mock_empty_os_environ, mock_settings_file, mode, option_name, use_method):\n settings_map = settings_parser.Settings(prefix='TEST_STUFF')\n assert settings_map._data == {}\n\n if use_method:\n opt = cli_utils.click_settings_file_option(settings_map, option_name=option_name)\n else:\n opt = settings_map.click_settings_file_option(option_name=option_name)\n\n @click.command()\n @opt\n def tmp_cli():\n pass\n\n runner = CliRunner()\n if mode == 'config':\n args = ['--' + option_name, mock_settings_file[0]]\n result = runner.invoke(tmp_cli, args)\n assert dict(settings_map) == mock_settings_file[1]\n assert result.exit_code == 0\n elif mode == 'noconfig':\n args = []\n result = runner.invoke(tmp_cli, args)\n assert dict(settings_map) == {}\n assert result.exit_code == 0\n elif 'wrongfile':\n args = ['--' + option_name, 'badlfkjasfkj']\n result = runner.invoke(tmp_cli, args)\n assert result.exit_code == 2\n expected_output = (\n 'Usage: tmp-cli [OPTIONS]\\n'\n 'Try \"tmp-cli --help\" for help.\\n\\n'\n 'Error: Invalid value for \"--{}\" / \"-{}\": '\n 'File \"badlfkjasfkj\" does not exist.'\n '\\n'\n ).format(option_name, option_name[0])\n assert result.output == expected_output",
"def _create_options(self):\n self._OPTIONS = {}",
"def _get_options(ret):\n attrs = {\"host\": \"host\", \"port\": \"port\", \"skip\": \"skip_on_error\", \"mode\": \"mode\"}\n\n _options = salt.returners.get_returner_options(\n __virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__\n )\n return _options",
"def get_options(parser=None):\n if parser is None:\n parser = get_parser()\n inline_options = vars(parser.parse_args())\n inline_options[consts.DEVICE] =\\\n torch.device(consts.CUDA if torch.cuda.is_available() and not inline_options[consts.NO_GPU] else consts.CPU)\n\n # Load inline_options\n # Check if continue option is given. If so, replace the command-line arguments with those loaded from a file.\n if inline_options[consts.CONTINUE]:\n try:\n options_path = os.path.join(inline_options[consts.EXPORT_DIR], 'options.pickle')\n with open(options_path, 'rb') as file:\n unpickled_options = pickle.load(file)\n for option, value in unpickled_options.items():\n inline_options[option] = value\n inline_options[consts.CONTINUE] = True\n except FileNotFoundError as _:\n print('Invalid load directory.', file=sys.stderr)\n exit(66)\n\n # Load the options given through a configuration file.\n options_array = [inline_options.copy()]\n for config_file in inline_options[consts.CONFIG_FILE]:\n try:\n with open(config_file, 'r') as file:\n options_array.append({**inline_options, **vars(parser.parse_args(file.read()))})\n except IOError as _:\n print('Bad argument: --config-file {}.'.format(config_file), file=sys.stderr)\n exit(64)\n\n # Check options for errors\n for options in options_array:\n if options[consts.CHECKPOINT_STEP] < 0:\n print('Bad argument: --checkpoint-step must be non-negative.', file=sys.stderr)\n exit(64)\n if options[consts.EXPORT_DIR] is None:\n print('Missing argument: --export-dir', file=sys.stderr)\n exit(66)\n if options[consts.NO_GPU] is None:\n options[consts.NO_GPU] = False\n for batch_size in options[consts.BATCH_SIZE]:\n if batch_size <= 0:\n print('Bad argument: --batch-size must be non-negative.', file=sys.stderr)\n exit(64)\n if len(options[consts.DATASET]) == 0:\n print('Missing argument: --dataset', file=sys.stderr)\n exit(66)\n for dataset in options[consts.DATASET]:\n if dataset == consts.SIGMORPHON2020:\n if options[consts.SIGMORPHON2020_ROOT] is None:\n print('Missing argument: --sigmorphon2020-root', file=sys.stderr)\n exit(66)\n else:\n print('Bad argument: --dataset {} is not recognized.'.format(options[consts.DATASET]),\n file=sys.stderr)\n exit(64)\n if options[consts.NUM_EPOCHS] <= 0:\n print('Bad argument: --num-epochs must be positive.', file=sys.stderr)\n exit(64)\n\n return inline_options",
"def MakeOpts():\n opt_parser = OptionParser()\n opt_parser.add_option(\"-s\", \"--thermodynamics_source\",\n dest=\"thermodynamics_source\",\n type=\"choice\",\n choices=['observed_only',\n 'hatzi_only',\n 'milo_only',\n 'milo_merged'],\n default=\"milo_merged\",\n help=\"The thermodynamic data to use\")\n opt_parser.add_option(\"-k\", \"--kegg_database_location\", \n dest=\"kegg_db_filename\",\n default=\"../data/public_data.sqlite\",\n help=\"The KEGG database location\")\n opt_parser.add_option(\"-d\", \"--database_location\", \n dest=\"db_filename\",\n default=\"../res/gibbs.sqlite\",\n help=\"The Thermodynamic database location\")\n opt_parser.add_option(\"-t\", \"--thermodynamics_filename\",\n dest=\"thermodynamics_filename\",\n default='../data/thermodynamics/dG0.csv',\n help=\"The name of the thermodynamics file to load.\")\n opt_parser.add_option(\"-i\", \"--input_filename\",\n dest=\"input_filename\",\n default=\"../data/thermodynamics/pathways.txt\",\n help=\"The file to read for pathways to analyze.\")\n opt_parser.add_option(\"-o\", \"--output_filename\",\n dest=\"output_filename\",\n default='../res/thermo_comparison/report.html',\n help=\"Where to write output to.\")\n return opt_parser",
"def test_cli_utils(\n mock_empty_os_environ, mock_settings_file, mode, option_name, use_method\n):\n climate = core.Climate(prefix=\"TEST_STUFF\")\n # test equality here as _data is not only NoneType but also a proxy so \"is\" comparison would alwas evaluate to false.\n assert isinstance(climate._data, type(None))\n\n if use_method:\n opt = cli_utils.click_settings_file_option(climate, option_name=option_name)\n else:\n opt = climate.click_settings_file_option(option_name=option_name)\n\n @click.command()\n @opt\n def tmp_cli():\n pass\n\n runner = CliRunner()\n if mode == \"config\":\n args = [\"--\" + option_name, mock_settings_file[0]]\n result = runner.invoke(tmp_cli, args)\n assert dict(climate.settings) == mock_settings_file[1]\n assert result.exit_code == 0\n elif mode == \"noconfig\":\n args = []\n result = runner.invoke(tmp_cli, args)\n assert dict(climate.settings) == {}\n assert result.exit_code == 0\n elif mode == \"wrongfile\":\n args = [\"--\" + option_name, \"badlfkjasfkj\"]\n result = runner.invoke(tmp_cli, args)\n assert result.exit_code == 2\n expected_output = (\n \"Usage: tmp-cli [OPTIONS]\\n\"\n \"Try 'tmp-cli --help' for help.\\n\\n\"\n \"Error: Invalid value for '--{}' / '-{}': \"\n \"File 'badlfkjasfkj' does not exist.\"\n \"\\n\"\n ).format(option_name, option_name[0])\n assert result.output == expected_output\n else: # pragma: nocover\n assert False, \"Incorrect mode\"",
"def create_options(**kwargs):\n kwargs.setdefault(\"select\", [])\n kwargs.setdefault(\"extended_default_select\", [])\n kwargs.setdefault(\"extended_default_ignore\", [])\n kwargs.setdefault(\"extend_select\", [])\n kwargs.setdefault(\"ignore\", [])\n kwargs.setdefault(\"extend_ignore\", [])\n kwargs.setdefault(\"disable_noqa\", False)\n kwargs.setdefault(\"enable_extensions\", [])\n kwargs.setdefault(\"per_file_ignores\", [])\n return argparse.Namespace(**kwargs)",
"def set_options(args):\n\n (options, args) = parser.parse_args(args)\n return options",
"def add_options(self, parser):\n parser.add_argument(\n '--pytest',\n action='store_true',\n default=False,\n dest='pytest',\n help='Use pytest runner instead of nose.')\n parser.add_argument(\n '--app',\n metavar='APP_LABEL',\n action='append',\n dest='app_names',\n help='A Django app label to add to the list of installed apps. '\n 'This is only required for tests that use apps not '\n 'enabled by extensions.')\n parser.add_argument(\n '-e',\n '--extension',\n metavar='EXTENSION_CLASS',\n dest='extension_class',\n help='The full module and class path to the extension to test.')\n parser.add_argument(\n '-m',\n '--module',\n action='append',\n metavar='MODULE_NAME',\n dest='module_names',\n help='The name(s) of the extension module(s) to test. For '\n 'example, if your tests are in \"myextension.tests\", you '\n 'might want to use \"myextension\". This may require '\n 'specifying multiple modules in the extension, and any '\n 'dependencies. You may want to use --extension instead.')\n parser.add_argument(\n '--pdb',\n action='append_const',\n dest='test_options',\n const='--pdb',\n help='Drop into a debugger on any failures or errors.')\n parser.add_argument(\n '--tree-root',\n metavar='PATH',\n default=os.getcwd(),\n help='The path to the root of the source tree.')\n parser.add_argument(\n '--with-coverage',\n action='append_const',\n dest='test_options',\n const='--with-coverage',\n help='Display a report on code covered or missed by tests.')\n parser.add_argument(\n '-x',\n '--stop',\n action='append_const',\n dest='test_options',\n const='-x',\n help='Stop running tests after the first failure.')\n parser.add_argument(\n 'tests',\n metavar='TEST',\n nargs='*',\n help='Specific tests to run. This can be in the form of '\n 'mypackage.mymodule, mypackage.mymodule:TestsClass, or '\n 'mypackage.mymodule:TestsClass.test_method.')",
"def test_build_option_parser(self):\n usage = \"Something\"\n epilog = \"Something\"\n argparse.ArgumentParser = mock.Mock()\n parser = cmd_utils.build_option_parser(usage=usage, epilog=epilog,)\n argparse.ArgumentParser.assert_called_with(\n usage=usage, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog,\n )\n assert parser.add_argument.call_count == 5\n\n args = [call[1] for call in parser.add_argument.mock_calls]\n expected = [\n (\"--version\",),\n (\"-v\", \"--verbose\"),\n (\"--server\",),\n (\"--cluster_name\",),\n (\"-s\", \"--save\"),\n ]\n assert args == expected\n\n defaults = [call[2].get(\"default\") for call in parser.add_argument.mock_calls]\n assert defaults == [None, None, None, None, None]",
"def testParseOptions(self):\n options = cli_test_lib.TestOptions()\n options.storage_format = 'sqlite'\n options.task_storage_format = 'sqlite'\n\n test_tool = tools.CLITool()\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options, test_tool)\n\n self.assertEqual(test_tool._storage_format, options.storage_format)\n self.assertEqual(\n test_tool._task_storage_format, options.task_storage_format)\n\n with self.assertRaises(errors.BadConfigObject):\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options, None)\n\n with self.assertRaises(errors.BadConfigOption):\n options.storage_format = 'bogus'\n storage_format.StorageFormatArgumentsHelper.ParseOptions(\n options, test_tool)",
"def test_options(self):\n\n wf._items = []\n\n sys.argv = ['drive.py', '']\n main(None)\n self.assertEqual(len(wf._items), 2)\n self.assertEqual(wf._items[0].title, OPTIONS[0]['title'])\n self.assertEqual(wf._items[1].title, OPTIONS[1]['title'])\n wf._items = []",
"def getCmdOptions():\n #print( \"getCmdOptions() entered...\\n )\"\n my_opts = {}\n err = None\n required_opts = { 'number': True, 'host': True,'port': True, 'help': True, 'debug': True, 'stdout': True, 'logfile': True }\n rc = 1\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hdsn:H:p:l:\", [\"help\", \"debug\", \"stdout\", \"number=\", \"host=\", \"port=\", \"logfile=\"]) #@UnusedVariable\n except(getopt.GetoptError, err):\n # print help information and exit:\n print(str(err)) # will print something like \"option -a not recognized\"\n usage()\n sys.exit(2)\n\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n elif o in (\"-n\", \"--number\"):\n my_opts['number'] = a\n elif o in (\"-H\", \"--host\"):\n my_opts['host'] = a\n elif o in (\"-p\", \"--port\"):\n my_opts['port'] = a\n required_opts['port'] = True\n elif o in (\"-l\", \"--logfile\"):\n my_opts['logfile'] = a\n elif o in (\"-s\", \"--stdout\"):\n my_opts['stdout'] = True\n elif o in (\"-d\", \"--debug\"):\n my_opts['debug'] = True\n else:\n rc = 0\n assert False, \"unhandled option\"\n #Endif\n #Endfor\n\n if(rc == 0):\n usage()\n\n #for k, v in required_opts.iteritem():\n for k, v in required_opts.items(): #@UnusedVariable\n if(required_opts[k] == False):\n msg = sys.argv[0] + \" Must provide: \" + \"--\" + str(k)\n print(msg)\n rc = 0\n #Endif\n #Endfor\n\n if(rc == 0):\n usage()\n sys.exit(2)\n #Endif\n\n resetInit(my_opts)",
"def get_options(cmd_args=None):\n cmd_parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n cmd_parser.add_argument(\n '-i',\n '--input_file',\n help=\"\"\"a log file to be cleaned up\"\"\",\n type=str,\n default='')\n cmd_parser.add_argument(\n '-s',\n '--salt',\n help=\"\"\"the salt for anonymizing IPs [optional, defaults to hardcoded one]\"\"\",\n type=str,\n default=salt)\n\n args = cmd_parser.parse_args(cmd_args)\n\n options = {}\n options['input_file'] = args.input_file\n options['salt'] = args.salt\n\n return options",
"def test_creation(self):\n \n from pystarlab.starlab import Option\n opt = Option(parameter=\"n\",\n long_name=\"specify number of particles\",\n is_required=True,\n default_value=None)\n \n self.assertIsInstance(opt, Option)\n self.assertEquals(opt.parameter, \"n\")\n self.assertTrue(opt.is_required)\n self.assertEquals(opt.long_name, \"specify number of particles\")\n self.assertIsNone(opt.default_value)\n self.assertIsNone(opt.value)",
"def browser_options():\n opts = Options()\n opts.add_argument('--disable-dev-shm-usage')\n if settings.SELENIUM_DEBUG: opts.add_argument('--auto-open-devtools-for-tabs')\n if settings.USER_AGENT != 'default': opts.add_argument(f'user-agent={settings.USER_AGENT}')\n # Fallback, falls Chrome Installation in Program Files installiert ist\n if settings.CHROME_PATH: opts.binary_location = settings.CHROME_PATH\n if os.environ.get('DOCKER_ENV'):\n opts.add_argument('--no-sandbox')\n return opts",
"def options(self, **kwds):\n opts = dict(self.opts)\n for k in kwds:\n try:\n # Ensure that the key exists because we want to change\n # existing options, not add new ones.\n _ = opts[k]\n except KeyError:\n raise ValueError(\"invalid option {!r}\".format(k))\n opts[k] = kwds[k]\n return type(self)(self.cls, opts, self.kwargs)",
"def options(**kwargs: Any) -> Options:\n if _options_stack.top is None:\n return Options(**kwargs)\n else:\n defaults: Options = _options_stack.top\n return replace(defaults, **kwargs)",
"def _get_options(self):\n return self.options",
"async def test_options_flow(hass):\n # Create a new MockConfigEntry and add to HASS (we're bypassing config\n # flow entirely)\n entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG, entry_id=\"test\")\n entry.add_to_hass(hass)\n\n # Initialize an options flow\n result = await hass.config_entries.options.async_init(entry.entry_id)\n\n # Verify that the first options step is a user form\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n\n # Enter some fake data into the form\n result = await hass.config_entries.options.async_configure(\n result[\"flow_id\"],\n user_input={platform: platform != SENSOR for platform in PLATFORMS},\n )\n\n # Verify that the flow finishes\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == \"test_username\"\n\n # Verify that the options were updated\n assert entry.options == {BINARY_SENSOR: True, SENSOR: False, SWITCH: True}",
"def options(self, parser, env):\n pass"
]
| [
"0.6592457",
"0.6500611",
"0.6408008",
"0.6261256",
"0.62606627",
"0.6241025",
"0.61252075",
"0.60680246",
"0.60680246",
"0.6058753",
"0.6010663",
"0.59718144",
"0.5945146",
"0.59211415",
"0.590275",
"0.58848876",
"0.587975",
"0.5825693",
"0.579893",
"0.57809275",
"0.57769144",
"0.5776459",
"0.57760906",
"0.57704586",
"0.57694924",
"0.5762252",
"0.5755186",
"0.57406425",
"0.57278186",
"0.57246304"
]
| 0.6683773 | 0 |
Returns a list of fingerprintable (option type, option value) pairs for the given scope. Note that this method only collects values for a single scope, NOT from all enclosing scopes as in the Options class! | def get_fingerprintable_for_scope(self, bottom_scope, include_passthru=False):
pairs = []
if include_passthru:
pu_args = self.passthru_args_for_scope(bottom_scope)
pairs.extend((str, arg) for arg in pu_args)
option_values = self.for_scope(bottom_scope)
for option_name, option_type in fingerprintable[bottom_scope].items():
pairs.append((option_type, option_values[option_name]))
return pairs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_options(options, passthru_args=None, fingerprintable_options=None):\n fingerprintable = fingerprintable_options or defaultdict(dict)\n\n class FakeOptions(object):\n def for_scope(self, scope):\n # TODO(John Sirois): Some users pass in A dict of scope -> _FakeOptionValues instead of a\n # dict of scope -> (dict of option name -> value). Clean up these usages and kill this\n # accommodation.\n options_for_this_scope = options.get(scope) or {}\n if isinstance(options_for_this_scope, _FakeOptionValues):\n options_for_this_scope = options_for_this_scope.option_values\n\n scoped_options = {}\n if scope:\n scoped_options.update(self.for_scope(enclosing_scope(scope)).option_values)\n scoped_options.update(options_for_this_scope)\n return _FakeOptionValues(scoped_options)\n\n def for_global_scope(self):\n return self.for_scope('')\n\n def passthru_args_for_scope(self, scope):\n return passthru_args or []\n\n def items(self):\n return options.items()\n\n @property\n def scope_to_flags(self):\n return {}\n\n def get_fingerprintable_for_scope(self, bottom_scope, include_passthru=False):\n \"\"\"Returns a list of fingerprintable (option type, option value) pairs for\n the given scope.\n\n Note that this method only collects values for a single scope, NOT from\n all enclosing scopes as in the Options class!\n\n :param str bottom_scope: The scope to gather fingerprintable options for.\n :param bool include_passthru: Whether to include passthru args captured by `bottom_scope` in the\n fingerprintable options.\n \"\"\"\n pairs = []\n if include_passthru:\n pu_args = self.passthru_args_for_scope(bottom_scope)\n pairs.extend((str, arg) for arg in pu_args)\n\n option_values = self.for_scope(bottom_scope)\n for option_name, option_type in fingerprintable[bottom_scope].items():\n pairs.append((option_type, option_values[option_name]))\n return pairs\n\n def __getitem__(self, scope):\n return self.for_scope(scope)\n\n return FakeOptions()",
"def get_options(self):\n additional_data = self.get_additional_data()\n options_out = []\n for key, value in additional_data['DIMENSION_VALUES'].items():\n key_label = ' '.join(key.split('_')).strip().title()\n data = {'specification': key_label, 'specification_key': key, 'choices': value}\n options_out.append(data)\n return options_out",
"def get_options(self):\n return self._scoped_options",
"def create_options_for_optionables(optionables,\n options=None,\n options_fingerprintable=None,\n passthru_args=None):\n all_options = defaultdict(dict)\n fingerprintable_options = defaultdict(dict)\n bootstrap_option_values = None\n\n if options_fingerprintable:\n for scope, opts in options_fingerprintable.items():\n fingerprintable_options[scope].update(opts)\n\n def register_func(on_scope):\n scoped_options = all_options[on_scope]\n scoped_fingerprintables = fingerprintable_options[on_scope]\n register = _options_registration_function(scoped_options, scoped_fingerprintables)\n register.bootstrap = bootstrap_option_values\n register.scope = on_scope\n return register\n\n # TODO: This sequence is a bit repetitive of the real registration sequence.\n\n # Register bootstrap options and grab their default values for use in subsequent registration.\n GlobalOptionsRegistrar.register_bootstrap_options(register_func(GLOBAL_SCOPE))\n bootstrap_option_values = _FakeOptionValues(all_options[GLOBAL_SCOPE].copy())\n\n # Now register the full global scope options.\n GlobalOptionsRegistrar.register_options(register_func(GLOBAL_SCOPE))\n\n for optionable in optionables:\n optionable.register_options(register_func(optionable.options_scope))\n\n if options:\n for scope, opts in options.items():\n all_options[scope].update(opts)\n\n return create_options(all_options,\n passthru_args=passthru_args,\n fingerprintable_options=fingerprintable_options)",
"def get_explicitly_set_fields_by_scope(self, scope=Scope.content):\r\n result = {}\r\n for field in self.fields.values():\r\n if (field.scope == scope and field.is_set_on(self)):\r\n result[field.name] = field.read_json(self)\r\n return result",
"def all_options():\n return _OptionRegistry.values()",
"def _function_scope_options(self, fn_scope):\n # Top-level function receive the options that were directly requested.\n # All others receive the options corresponding to a recursive conversion.\n # Note: this mainly controls the user_requested flag, which is important\n # primarily because the FunctionScope context also creates a\n # ControlStatusCtx(autograph=ENABLED) when user_requested is True. See\n # function_wrappers.py.\n if fn_scope.level == 2:\n return self.ctx.user.options\n return self.ctx.user.options.call_options()",
"def vars(self, scope: str = '') -> VarCollection:\n return self.__wrapped__.vars(scope=scope)",
"def encode_options(options: Dict[str, Union[str, float, int]]) -> List[str]:\n d = list()\n rev_dict = {v: k for k, v in type_mappings.items()}\n for k, v in options.items():\n t = type(v)\n if t not in rev_dict:\n raise OptionParsingError(f\"Unknown option type {t}.\")\n arg = f'{k}={v}={rev_dict[t]}'\n d.append(arg)\n return d",
"def get_options(self):\r\n return self._option_values",
"def to_list(self):\n import tc\n opts_list = []\n for k, v in self.__class__.__dict__.iteritems():\n if isinstance(v, tc.TC):\n opts_list.append((k, v))\n opts_list = sorted(opts_list)\n return opts_list",
"def valuerefs(self):\n return [ref(value) for value in self.itervalues()]",
"def get_all_options(self): \n return self._options.items()",
"def get_specific_options(cls):\n for option in cls._specific_options.items():\n yield option",
"def register_options_on_scope(cls, options, scope):\n cls.register_options(options.registration_function_for_scope(cls.qualify_scope(scope)))",
"def _all_opt_infos(self):\n for info in self._opts.values():\n yield info, None\n for group in self._groups.values():\n for info in group._opts.values():\n yield info, group",
"def scopes(self):\n return [scope.rsplit('/', 1)[1] for scope in self.opts.scopes]",
"def list_cache_opts():\n return [(g, copy.deepcopy(o)) for g, o in _cache_opts]",
"def list_scrubber_opts():\n return [(g, copy.deepcopy(o)) for g, o in _scrubber_opts]",
"def _fields_to_cache(self):\r\n scope_map = defaultdict(set)\r\n for descriptor in self.descriptors:\r\n for field in descriptor.fields.values():\r\n scope_map[field.scope].add(field)\r\n return scope_map",
"def options(self):\n result = []\n for typ in type(self).mro():\n result.extend(k for k, v in typ.__dict__.items()\n if isinstance(v, Option))\n return dict((o, getattr(self, o)) for o in result)",
"def getPermissions(self, scope):\n\n return [permissions.api_enum_for_permission(p)\n for p in permissions.get_permissions(scope)]",
"def values(self):\n return list(item.value for item in self.mechanisms)",
"def _collect_options(self, option_index):\n input_option = list()\n if not option_index:\n for k in self._options.keys():\n input_option.append(self._options.get(k))\n else:\n for index in option_index:\n input_option.append(self._options.get(index))\n return input_option",
"def option_registrations_iter(self):\n\n def normalize_kwargs(orig_args, orig_kwargs):\n nkwargs = copy.copy(orig_kwargs)\n dest = self.parse_dest(*orig_args, **nkwargs)\n nkwargs[\"dest\"] = dest\n if not (\"default\" in nkwargs and isinstance(nkwargs[\"default\"], RankedValue)):\n type_arg = nkwargs.get(\"type\", str)\n member_type = nkwargs.get(\"member_type\", str)\n default_val = self.to_value_type(nkwargs.get(\"default\"), type_arg, member_type)\n if isinstance(default_val, (ListValueComponent, DictValueComponent)):\n default_val = default_val.val\n nkwargs[\"default\"] = RankedValue(Rank.HARDCODED, default_val)\n return nkwargs\n\n # Yield our directly-registered options.\n for args, kwargs in self._option_registrations:\n normalized_kwargs = normalize_kwargs(args, kwargs)\n yield args, normalized_kwargs",
"def vars(self, scope: str = '') -> VarCollection:\n if scope:\n return VarCollection((scope + k, v) for k, v in self.vc.items())\n return VarCollection(self.vc)",
"def get_options(cls):\n for option in cls._general_options.items():\n yield option\n for option in cls._specific_options.items():\n yield option",
"def options() -> List:\n return list(c.value for c in Plugin)",
"def availableValues(self):\n return [x.name for x in self._field.enum_type.values]",
"def values(self):\n return {n: getattr(self, n) for n in self._hparam_types.keys()}"
]
| [
"0.6032837",
"0.57206696",
"0.54889756",
"0.5399676",
"0.53649104",
"0.5362223",
"0.53584826",
"0.5325002",
"0.5310102",
"0.5294651",
"0.5274094",
"0.5268435",
"0.5239358",
"0.52050084",
"0.5195042",
"0.51844776",
"0.5182056",
"0.5179435",
"0.51662195",
"0.5162545",
"0.5096324",
"0.5075009",
"0.50421596",
"0.5037394",
"0.5024371",
"0.50208473",
"0.50189185",
"0.5014139",
"0.500657",
"0.4997697"
]
| 0.7801775 | 0 |
Gets the largest possible amplitude representable by a given sample width The formula is 2^(n1) 1 where n is the number of bits the first 1 is because the result is signed the second 1 is because the value is 0 based e.g. if n=3 then 2^(31)1 => 3 if n=4 then 2^(41)1 => 7 | def calculateMaxAmplitude(sampleWidth: int) -> int:
return 2 ** (sampleWidth * NUM_BITS_IN_A_BYTE - 1) - 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkfrequency(inputgiven):\n data_size = 40000\n wav_file = wave.open(inputgiven, 'r')\n data = wav_file.readframes(data_size)\n wav_file.close()\n data = struct.unpack('{n}h'.format(n=data_size), data)\n print max(data)",
"def getNormalisedWidth( self, width ):\n\t\treturn int( self.waveread.getnframes() * float(width) )",
"def get_power(frames, num_fft):\n #a = get_magnitude(frames, num_fft)\n #b = np.square(a)\n #print('max : ', np.max(a))\n #print('min : ', np.min(a))\n #print('sq max : ', np.max(b))\n #print('sq min : ', np.min(b))\n #print(a.shape)\n #print(b.shape)\n #return b/num_fft\n return np.square(get_magnitude(frames, num_fft) / np.sqrt(num_fft))",
"def peak_height(self):\n return np.array([max(self.waveform[ch]) for ch in range(self.nchannels)])",
"def DSS28_beamwidth(freq):\n return 0.54/freq",
"def get_waveform_halfwidth(waveform, sampling_rate=30000.):\n w = resample(waveform,200)#upsample to smooth the data\n time = np.linspace(0,len(waveform)/sampling_rate,200)\n trough = np.where(w==np.min(w))[0][0]\n peak = np.where(w==np.max(w))[0][0]\n \n #dur = time[trough:][np.where(w[trough:]==np.max(w[trough:]))[0][0]] - time[trough]\n if w[peak] > np.abs(w[trough]):\n dur = time[peak:][np.where(w[peak:]>=0.5*np.min(w[peak:]))[0][0]] - time[peak] \n else:\n dur = time[trough:][np.where(w[trough:]<=0.5*np.max(w[trough:]))[0][0]] - time[trough] \n if peak<trough:\n dur=-dur\n return dur",
"def lmax(self):\n cond = (self.transmit / self.transmit.max()) > 1./100\n return max(self.wavelength[cond])",
"def calculate_wavelength_metric(wavelength_min, wavelength_max):\n length_max = np.log(550) * 2\n wavelength = np.abs(wavelength_max + wavelength_min) / 2\n log_wl = np.log(wavelength)\n default_met = np.array(log_wl / length_max)\n scaled_met = 1.75 * (default_met - 0.5) + 0.5\n if wavelength == 0:\n return 0\n else:\n return scaled_met.clip(min=10e-11, max=1)",
"def max_power_spectrum(sig, FS):\n\n if np.std(sig) == 0:\n return float(max(signal.welch(sig, int(FS), nperseg=len(sig))[1]))\n else:\n return float(max(signal.welch(sig/np.std(sig), int(FS), nperseg=len(sig))[1]))",
"def perfectrefl(wavelength):\n return 1.0",
"def pickNarrow(length):\n return(int(np.ceil(np.log10(length))))",
"def get_max_width(binary_mask):\n start_px = 0\n end_px = 0\n\n for i, row in enumerate(binary_mask):\n max = np.argmax(row)\n if max > 0:\n start_px = i\n break\n\n for i, row in enumerate(binary_mask[::-1]):\n max = np.argmax(row)\n if max > 0:\n end_px = i\n break\n\n return binary_mask.shape[0] - start_px - end_px",
"def width(self):\n return (self.norm / max(self.transmit)) * Unit(self.wavelength_unit)",
"def bit_smarter(limit):\n c_lengths = {}\n\n for s in range(1, limit+1):\n c_lengths[s] = s_collatz_length(s, c_lengths)\n\n return max(c_lengths, key=lambda x: c_lengths[x])",
"def calBitLen(n, p):\n m = int(-(n*math.log(p))/BloomFilter.ln2p2)\n # round up to 32 bits\n if m%32: m += (32-m%32)\n return m",
"def max_level(data: np.ndarray) -> int:\n shape = data.shape[1:] # exclude channel dimension\n return min(shape).bit_length() - 1",
"def get_mag_for_size(slide, size):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = np.average([max_dim/size_dim for max_dim, size_dim in zip(max_size, size)])\n return max_mag/downsample",
"def wavelength_to_wavenumber(wavelength):\n return 1. / wavelength",
"def highest_value():\n maximum_number = 0\n for i in xrange(length):\n challenger = frames[i]\n if abs(challenger) > maximum_number:\n maximum_number = abs(challenger)\n return maximum_number",
"def twos_power_max(number):\n\n bits = bits_list(number)\n return len(bits) - 1",
"def est_maxlevel(dims,bandwidth):\n lev = math.floor((math.log(min(dims))/math.log(2)-2)/bandwidth)\n lev=int(lev)\n return lev",
"def waveform_width(waveform, cutoff=0.75):\n waveform = np.squeeze(waveform)\n if np.ndim(waveform) != 1:\n raise ValueError('Expected 1-dimensional waveform.')\n if len(waveform) < 2:\n raise ValueError('Too short waveform.')\n if not (0 <= cutoff < 1):\n raise ValueError('Cuttoff must be in range [0, 1).')\n\n min_border = max(1, int(len(waveform) * cutoff))\n idx_min = np.argmin(waveform[:min_border])\n idx_max = np.argmax(waveform[idx_min:]) + idx_min\n width = idx_max - idx_min\n\n return width",
"def findMaximal(freqSet):",
"def get_length(self):\r\n check_mixer()\r\n frequency, format, channels = (ffi.new('int*'), ffi.new('uint16_t*'),\r\n ffi.new('int*'))\r\n sdl.Mix_QuerySpec(frequency, format, channels)\r\n if format == sdl.AUDIO_S8 or format == sdl.AUDIO_U8:\r\n mixerbytes = 1.0\r\n else:\r\n mixerbytes = 2.0\r\n numsamples = self.chunk.alen / mixerbytes / channels[0]\r\n return numsamples / frequency[0]",
"def perceptual_amplitude_dbb(frequency: float) -> float:\n # See http://www.sengpielaudio.com/BerechnungDerBewertungsfilter.pdf\n\n num = 12200.0 ** 2. * frequency ** 3\n den = (frequency ** 2. + 20.6) * (frequency ** 2. + 12200. ** 2.) * np.sqrt(frequency ** 2. + 158.5 ** 2.)\n return num / den",
"def get_width_constant(width, width_constant_array):\n return min ( map (lambda y: [abs(y-width),y], width_constant_array))[1]",
"def determine_window_size(rectified_signal):\n logging.debug('running determine_window_size function')\n first_peak = first_peak_detect(rectified_signal, 1)\n second_peak = first_peak_detect(rectified_signal, first_peak + 1)\n return (second_peak - first_peak) * WINDOW_MULTIPLIER",
"def maxwavelen(self):\n return self._maxwavelen",
"def scott_rule_of_thumb(n_samples, dimension):\n return n_samples ** (-1.0 / (dimension + 4))",
"def probing_frequency(dur: int) -> float:\n freq = min(dur / 8.0, 1.0)\n freq = max(dur / 64.0, freq)\n return max(freq, 0.1)"
]
| [
"0.665759",
"0.61814004",
"0.6125938",
"0.61236525",
"0.59300905",
"0.5914083",
"0.5900648",
"0.58958656",
"0.5878462",
"0.5856483",
"0.57987416",
"0.57597476",
"0.57282406",
"0.5662221",
"0.56454945",
"0.5642162",
"0.5641904",
"0.55994546",
"0.5583797",
"0.55745655",
"0.5537634",
"0.5503305",
"0.54981434",
"0.54577035",
"0.54542774",
"0.54060644",
"0.5402065",
"0.53962153",
"0.53918314",
"0.5346527"
]
| 0.8331578 | 0 |
Output frames using the same parameters as this Wav | def outputFrames(self, frames: bytes, outputFN: str) -> None:
outWave = wave.open(outputFN, "w")
outWave.setparams(
[
self.nchannels,
self.sampleWidth,
self.frameRate,
len(frames),
self.comptype,
self.compname,
]
)
outWave.writeframes(frames) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stream_frames(video_capture):",
"def write_video(frames, filename, fps=20):\n \n # On Mac systems, copy ffmeg binaries to your PATH (http://ffmpegmac.net/)\n \n if platform.system() == 'Windows':\n err_str = 'Don\\'t know how to write a movie for %s platform' % platform.system()\n raise NotImplementedError(err_str)\n\n \n if len(frames.shape) == 4:\n pix_fmt = 'rgb24'\n else:\n pix_fmt = 'gray'\n \n # normalize\n max_pix_val = np.percentile(frames, 99.9)\n if frames.dtype in (np.bool, bool):\n frames = frames.astype(np.uint8)\n frames -= frames.min()\n frames[frames>max_pix_val] = max_pix_val\n if max_pix_val > 0:\n frames *= 255. / max_pix_val\n frames = frames.astype(np.uint8)\n \n # figure out which av program is installed\n program_name = ''\n try:\n subprocess.check_call(['avconv', '-h'], stdout=DEVNULL, stderr=DEVNULL)\n program_name = 'avconv'\n except OSError:\n try:\n subprocess.check_call(['ffmpeg', '-h'], stdout=DEVNULL, stderr=DEVNULL)\n program_name = 'ffmpeg'\n except OSError:\n pass\n if not program_name:\n raise OSError('Can\\'t find avconv or ffmpeg')\n \n # prepare pipe to av converter program\n size_str = '%ix%i' % (frames.shape[1], frames.shape[2])\n cmd = [program_name,\n '-y', # (optional) overwrite output file if it exists\n '-f', 'rawvideo',\n '-vcodec','rawvideo',\n '-s', size_str, # size of one frame\n '-pix_fmt', pix_fmt,\n '-r', str(fps), # frames per second\n '-i', '-', # input comes from a pipe\n '-an', # no audio\n '-qscale', '1',\n '-vcodec','mjpeg',\n filename]\n \n pipe = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=DEVNULL, stderr=subprocess.STDOUT)\n \n # write frames \n for frame in frames:\n frame = np.fliplr(frame)\n pipe.stdin.write(frame.tostring())\n pipe.stdin.close()\n pipe.wait()",
"def write(frame):\n video_writer.write(frame)",
"def write_frames(self, images):\n for img in images:\n self.write_frame(img)",
"def create_video():\n print(\"Generating output video\")\n frame_array = []\n files = [f for f in os.listdir(MODIFIED_FRAMES_DIR) if isfile(join(MODIFIED_FRAMES_DIR, f))]\n #for sorting the file names properly\n # files.sort(key = lambda x: x[3:-4])\n files = sorted(files,key=lambda x: int(os.path.splitext(x)[0]))\n for i in range(len(files)):\n filename= MODIFIED_FRAMES_DIR + files[i]\n # print(filename)\n #reading each files\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width,height)\n \n #inserting the frames into an image array\n frame_array.append(img)\n \n out = cv2.VideoWriter(OUTPUT_FILE,cv2.VideoWriter_fourcc(*'DIVX'), FRAME_RATE, size)\n for i in range(len(frame_array)):\n # writing to a image array\n out.write(frame_array[i])\n out.release()\n print(\"Output video generated successfully...\")\n\n # img_array = []\n # for filename in glob.glob(MODIFIED_FRAMES_DIR+'/*.jpg'):\n # img = cv2.imread(filename)\n # height, width, layers = img.shape\n # size = (width,height)\n # img_array.append(img)\n\n # height, width, layers = img_array[0].shape\n # size = (width,height)\n # out = cv2.VideoWriter('output.mov',cv2.VideoWriter_fourcc(*'DIVX'), 15, size) \n # for i in range(len(img_array)):\n # out.write(img_array[i])\n # out.release()",
"def outputSingleFrame(self, frame=None):\n if frame is None:\n frame = 1\n\n self.loadFringe(frame=frame)\n\n outputName = self.inputFilenames['ofd'][:-4] + '_single_f' + str(frame) + '.ofd'\n\n with open(outputName, 'wb') as f:\n self.rawBScan.astype('uint16').tofile(f)",
"def frame_dump(filename, frametime, output_filename='out.png', \n meth='ffmpeg fast', subseek_cushion=20., verbose=False, dry_run=False,\n very_verbose=False):\n \n if meth == 'mplayer':\n raise ValueError(\"mplayer not supported\")\n elif meth == 'ffmpeg best':\n # Break the seek into a coarse and a fine\n coarse = np.max([0, frametime - subseek_cushion])\n fine = frametime - coarse\n syscall = 'ffmpeg -y -ss %r -i %s -ss %r -vframes 1 %s' % (\n coarse, filename, fine, output_filename)\n elif meth == 'ffmpeg accurate':\n syscall = 'ffmpeg -y -i %s -ss %r -vframes 1 %s' % (\n filename, frametime, output_filename)\n elif meth == 'ffmpeg fast':\n syscall = 'ffmpeg -y -ss %r -i %s -vframes 1 %s' % (\n frametime, filename, output_filename)\n \n if verbose:\n print(syscall)\n if not dry_run:\n #os.system(syscall)\n syscall_l = syscall.split(' ')\n syscall_result = subprocess.check_output(syscall_l, \n stderr=subprocess.STDOUT)\n if very_verbose:\n print(syscall_result)",
"def save_video(frames, output_file, output_fps=24):\n if not osp.exists(osp.dirname(output_file)):\n os.makedirs(osp.dirname(output_file))\n\n num_frames, height, width, channels = frames.shape\n frame_size = (width, height)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n video = cv2.VideoWriter(output_file, fourcc, output_fps, frame_size)\n\n for i in range(num_frames):\n video.write(frames[i])\n cv2.destroyAllWindows()\n video.release()",
"def gen_frame():\n while True:\n frame = camera_stream()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/png\\r\\n\\r\\n' + frame + b'\\r\\n') # concate frame one by one and show result",
"def write_to_video(frames_list, file_name, fps):\n frame_width = frames_list[0].shape[0]\n frame_height = frames_list[0].shape[1]\n fourcc = cv2.VideoWriter_fourcc(*'MP4V')\n out = cv2.VideoWriter(file_name,fourcc,\n fps, (frame_height,frame_width))\n\n for frame in frames_list:\n \n out.write(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n out.release()",
"def record(self):\n\n while True:\n frames = []\n self.stream.start_stream()\n for i in range(self.num_frames):\n data = self.stream.read(FRAMES_PER_BUFFER)\n frames.append(data)\n self.output.seek(0)\n w = wave.open(self.output, 'wb')\n w.setnchannels(CHANNELS)\n w.setsampwidth(self.audio.get_sample_size(FORMAT))\n w.setframerate(RATE)\n w.writeframes(b''.join(frames))\n w.close()\n yield",
"def write_frame(self, img):\n if img.shape[0] % 2 != 0:\n print(\"Warning: height is not divisible by 2! Dropping last row\")\n img = img[:-1]\n if img.shape[1] % 2 != 0:\n print(\"Warning: width is not divisible by 2! Dropping last column\")\n img = img[:, :-1]\n if self.post_processor:\n img = self.post_processor.process(img)\n if self.width is None:\n self.width = img.shape[0]\n self.height = img.shape[1]\n assert os.path.exists(self.directory)\n fn = FRAME_FN_TEMPLATE % self.frame_counter\n self.frame_fns.append(fn)\n imwrite(img, os.path.join(self.frame_directory, fn))\n self.frame_counter += 1\n if self.frame_counter % self.next_video_checkpoint == 0:\n if self.automatic_build:\n self.make_video()\n self.next_video_checkpoint *= 2",
"def seqIo_frImgs(fName, header=[], aviName=[], Is=[], sDir=[], name='I', ndig=5, f0=0, f1=1e6):\n \n if aviName!=[]: #avi movie exists\n vc = cv2.VideoCapture(aviName)\n if vc.isOpened(): rval = True\n else:\n rval = False\n print('video not readable')\n return\n fps = vc.get(cv2.cv.CV_CAP_PROP_FPS)\n NUM_FRAMES = int(vc.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))\n print(NUM_FRAMES)\n IM_TOP_H = vc.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)\n IM_TOP_W = vc.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)\n header['width']=IM_TOP_W\n header['height']=IM_TOP_H\n header['fps']=fps\n\n sw = seqIo_writer(fName,header)\n print('creating seq from AVI')\n # initialize timer\n timer = pb.ProgressBar(widgets=['Converting ', pb.Percentage(), ' -- ',\n pb.FormatLabel('Frame %(value)d'), '/',\n pb.FormatLabel('%(max)d'), ' [', pb.Timer(), '] ',\n pb.Bar(), ' (', pb.ETA(), ') '], maxval=NUM_FRAMES)\n for f in range(NUM_FRAMES):\n rval, im = vc.read()\n if rval:\n im= im.astype(np.uint8)\n sw.addFrame(im)\n timer.update(f)\n sw.close()\n timer.finish()\n elif Is==[]:\n assert(os.path.isdir(sDir))\n sw = seqIo_writer(fName,header)\n frmstr = '%s/%s%%0%ii.%s' % (sDir,name,ndig,header.ext)\n for frame in range(f0,f1):\n f = frmstr % frame\n if not os.path.isfile(f):break\n fid = open(f, 'r')\n if fid<0: sw.close(); assert(False)\n I = fid.read()\n fid.close()\n b = bytearray(I)\n assert (b[0] == 255 and b[1] == 216 and b[-2] == 255 and b[-1] == 217); # JPG\n I = np.array(list(b)).astype(np.uint8)\n sw.addFrame(I,0,0)\n sw.close()\n if frame==f0: print('No images found')\n else:\n nd = len(Is.shape)\n if nd==2: nd=3\n assert(nd<=4)\n nFrm = Is.shape[nd-1]\n header['height']=Is.shape[0]\n header['width']=Is.shape[1]\n sw =seqIo_writer(fName,header)\n if nd==3:\n for f in range(nFrm): sw.addFrame(Is[:,:,f])\n if nd==4:\n for f in range(nFrm): sw.addFrame(Is[:,:,:,f])\n sw.close()",
"def _write_frame(self : \"animation\",\n frame : \"np.ndarray\"\n ):\n self._writer.append_data(frame)\n self._frame_number += 1\n self._prevFrame = frame",
"def __init__(self, frames):\n self._frames = frames\n self._out = None",
"def __init__(self, frames):\n self._frames = frames\n self._out = None",
"def __init__(self, frames):\n self._frames = frames\n self._out = None",
"def __init__(self, frames):\n self._frames = frames\n self._out = None",
"def __init__(self, frames):\n self._frames = frames\n self._out = None",
"def __init__(self, frames):\n self._frames = frames\n self._out = None",
"def display_frames_as_gif(frames, video_name):\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)\n #plt.figure(figsize=(frames[0].shape[1] / 72.0, frames[0].shape[0] / 72.0), dpi = 72)\n patch = plt.imshow(frames[0])\n plt.axis('off')\n\n def animate(i):\n patch.set_data(frames[i])\n\n anim = animation.FuncAnimation(plt.gcf(), animate, frames = len(frames), interval=50)\n# display(display_animation(anim, default_mode='loop'))\n anim.save(result_floder + '/' + video_name, writer=writer)",
"def _generate_video(\n out_file,\n n_frames=5,\n width=100,\n height=50,\n seed=0,\n fps=24,\n broken=False,\n):\n is_mpeg = out_file.endswith(\".mpeg\")\n video_format = \"libx264rgb\"\n pixel_format = \"rgb24\"\n\n if is_mpeg:\n video_format = \"mpeg1video\"\n pixel_format = \"yuv420p\"\n\n if broken:\n n_frames = 0\n\n np.random.seed(seed)\n container = av.open(out_file, mode=\"w\")\n stream = container.add_stream(video_format, rate=fps)\n stream.width = width\n stream.height = height\n stream.pix_fmt = pixel_format\n\n if is_mpeg:\n frames = [av.VideoFrame(width, height, pixel_format) for i in range(n_frames)]\n else:\n # save lossless video\n stream.options[\"crf\"] = \"0\"\n images = (np.random.randn(n_frames, height, width, 3) * 255).astype(np.uint8)\n frames = [\n av.VideoFrame.from_ndarray(image, format=pixel_format) for image in images\n ]\n\n for frame in frames:\n for packet in stream.encode(frame):\n container.mux(packet)\n\n if not broken:\n # flush the stream\n # video cannot be loaded if this is omitted\n packet = stream.encode(None)\n container.mux(packet)\n\n container.close()\n\n pil_images = [frame.to_image() for frame in frames]\n return pil_images",
"def saveFramesToVideo(frames, videoPath): \n fourcc = cv2.VideoWriter_fourcc('a','v','c','1')\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n [height,width]=frames[0].shape[0:2]\n writer = cv2.VideoWriter(videoPath, fourcc, 30, (width, height), 1)\n for i in range(frames.shape[0]):\n frameBGR = yiq2bgrUint(frames[i])\n writer.write(frameBGR)\n writer.release()",
"def save_frames(frames, out_dir, as_row=True, as_gif=False):\n os.makedirs(out_dir, exist_ok=True)\n if frames.dtype == torch.uint8: # save_image needs float value in [0, 1]\n frames = frames.float()\n frames = frames / 255.\n if as_gif:\n gif_dir = 'gif_images'\n os.makedirs(os.path.join(out_dir, gif_dir), exist_ok=True)\n for i, frames_i in enumerate(frames):\n if as_row:\n out_file = os.path.join(out_dir, f'img_{i:04d}.png')\n save_image(frames_i.clone(), out_file, nrow=frames_i.shape[0])\n if as_gif:\n for j, frame in enumerate(frames_i):\n out_file = os.path.join(out_dir, gif_dir, f'img_{i:04d}_{j:04d}.png')\n save_image(frame.unsqueeze(0), out_file)\n \n out_file = os.path.join(out_dir, f'img_{i:04d}.gif')\n make_gif(os.path.join(out_dir, gif_dir), out_file, pattern=f'img_{i:04d}_*', fps=10)\n \n print(f'Saved images to {out_dir}')",
"def create_png(input_filename, output_filename_w, output_filename_s, image_width, image_height, fft_size, f_max, f_min, wavefile, palette, channel):\n \n print \"processing file %s:\\n\\t\" % input_file,\n \n audio_file = audiolab.sndfile(input_filename, 'read') #opens the wavfile; audio_file is an object now\n \n samples_per_pixel = audio_file.get_nframes() / float(image_width)\n nyquist_freq = (audio_file.get_samplerate() / 2) + 0.0\n \"\"\"\n Initializes AudioProcessor class, which does FFT analysis and spits \n out amplitudes and frequencies to the SpectrogramImage and WaveformImage \n classes below later. For a stereo wav file, this selects a single channel \n to analyze. We might want to analyze both channels to give more input to\n the visualizer,though.\n \"\"\"\n processor = AudioProcessor(audio_file, fft_size, channel, numpy.hanning)\n \n if wavefile==1:\n waveform = WaveformImage(image_width, image_height, palette)\n spectrogram = SpectrogramImage(image_width, image_height, fft_size, f_max, f_min, nyquist_freq, palette)\n \n for x in range(image_width):\n #shows progress\n if x % (image_width/10) == 0:\n sys.stdout.write('.')\n sys.stdout.flush()\n \n seek_point = int(x * samples_per_pixel)\n next_seek_point = int((x + 1) * samples_per_pixel)\n \n (spectral_centroid, db_spectrum) = processor.spectral_centroid(seek_point)\n \n #let's have a look at the spectral centroid and the db_spectrum\n #print \"Spectral Centroid:\" + str(spectral_centroid)\n #print \"DB Spectrum:\" + str(db_spectrum)\n \n if wavefile==1:\n #aha! The peaks and spectral centroid make up the waveform.\n #Since the spectral centroid indicates timbre (often referred to as color),\n #it's probably what colors the waveform.\n peaks = processor.peaks(seek_point, next_seek_point)\n #let's have a look at these peaks\n #print \"Peaks:\" + str(peaks)\n waveform.draw_peaks(x, peaks, spectral_centroid)\n \n spectrogram.draw_spectrum(x, db_spectrum)\n \n if wavefile==1:\n waveform.save(output_filename_w)\n spectrogram.save(output_filename_s)\n \n print \" done\"",
"def anim_save(z, filename, display=True, vext='.mp4',\n T_movie=T_movie, verbose=False, **kwargs):\n\n import tempfile\n# from scipy.misc.pilutil import toimage\n import imageio\n if z.ndim == 4: # colored movie\n N_X, N_Y, three, N_frame = z.shape\n else: # grayscale\n N_X, N_Y, N_frame = z.shape\n fps = int(N_frame / T_movie)\n def make_frames(z):\n files = []\n tmpdir = tempfile.mkdtemp()\n\n if verbose:\n print('Saving sequence ' + filename + ' as a ' + vext + ' format')\n for frame in range(N_frame):\n fname = 'frame%06d.png' % frame\n full_fname = os.path.join(tmpdir, fname)\n image = np.rot90(z[..., frame])\n imageio.imsave(full_fname, (image*255).astype(np.uint8), compression=0, quantize=256)\n files.append(fname)\n return tmpdir, files\n\n def test_ffmpeg():\n ret = os.system('ffmpeg -version')\n if not ret==0:\n raise Exception('Do you have ffmpeg installed in your PATH?')\n\n def remove_frames(tmpdir, files):\n \"\"\"\n Remove frames from the temp folder\n\n \"\"\"\n for fname in files: os.remove(os.path.join(tmpdir, fname))\n if not(tmpdir == None): os.rmdir(tmpdir)\n\n if verbose:\n verb_ = ''\n else:\n verb_ = ' 2>/dev/null'\n if vext == '.mpg':\n # 1) create temporary frames\n tmpdir, files = make_frames(z)\n # 2) convert frames to movie\n if verbose: test_ffmpeg()\n options = ' -f image2 -r ' + str(fps) + ' -y '\n os.system('ffmpeg -i ' + tmpdir + '/frame%06d.png ' + options + filename + vext + verb_)\n # 3) clean up\n remove_frames(tmpdir, files)\n\n elif vext == '.mp4': # specially tuned for iPhone/iPod http://www.dudek.org/blog/82\n # 1) create temporary frames\n tmpdir, files = make_frames(z)\n # 2) convert frames to movie\n if verbose: test_ffmpeg()\n options = ' -f mp4 -pix_fmt yuv420p -c:v libx264 -g ' + str(fps) + ' -r ' + str(fps) + ' -y '\n cmd = 'ffmpeg -i ' + tmpdir + '/frame%06d.png ' + options + filename + vext + verb_\n os.system(cmd)\n # 3) clean up\n remove_frames(tmpdir, files)\n\n elif vext == '.webm':\n # 1) create temporary frames\n tmpdir, files = make_frames(z)\n # 2) convert frames to movie\n if verbose: test_ffmpeg()\n options = ' -f webm -pix_fmt yuv420p -vcodec libvpx -qmax 12 -g ' + str(fps) + ' -r ' + str(fps) + ' -y '\n cmd = 'ffmpeg -i ' + tmpdir + '/frame%06d.png ' + options + filename + vext + verb_\n os.system(cmd)\n # 3) clean up\n remove_frames(tmpdir, files)\n\n elif vext == '.mkv': # specially tuned for iPhone/iPod http://www.dudek.org/blog/82\n # 1) create temporary frames\n tmpdir, files = make_frames(z)\n # 2) convert frames to movie\n if verbose: test_ffmpeg()\n options = ' -y -f image2pipe -c:v png -i - -c:v libx264 -preset ultrafast -qp 0 -movflags +faststart -pix_fmt yuv420p -g ' + str(fps) + ' -r ' + str(fps) + + ' -y '\n cmd = 'cat ' + tmpdir + '/*.png | ffmpeg ' + options + filename + vext + verb_\n os.system(cmd)\n # 3) clean up\n remove_frames(tmpdir, files)\n\n elif vext == '.gif': # http://www.uoregon.edu/~noeckel/MakeMovie.html\n # 1) create temporary frames\n tmpdir, files = make_frames(z)\n # 2) convert frames to movie\n ret = os.system('convert -version')\n if not ret==0:\n raise Exception('Do you have convert installed in your PATH?')\n options = ' -set delay 8 -colorspace GRAY -colors 256 -dispose 1 -loop 0 '\n os.system('convert ' + tmpdir + '/frame*.png ' + options + filename + vext + verb_)\n # 3) clean up\n remove_frames(tmpdir, files)\n\n elif vext == '.png':\n tmpdir, files = make_frames(z)\n import shutil\n shutil.copytree(tmpdir, filename)\n remove_frames(tmpdir, files)\n\n elif vext == '.zip':\n do_bmp = False # I was asked at some point to generate bmp files - it is highly unlikely to happen again...\n tmpdir, files = make_frames(z)\n import zipfile\n with zipfile.ZipFile(filename + vext, \"w\") as zf:\n if do_bmp:\n # convert to BMP for optical imaging\n files_bmp = []\n for fname in files:\n fname_bmp = os.path.splitext(fname)[0] + '.bmp'\n # generates 8-bit bmp (old format)\n os.system('convert ' + fname + ' ppm:- | convert -size 256x256+0 -colors 256 -colorspace Gray - BMP2:' + fname_bmp)\n files_bmp.append(fname_bmp)\n zf.write(fname_bmp)\n remove_frames(tmpdir=None, files=files_bmp)\n else:\n for fname in files:\n full_fname = os.path.join(tmpdir, fname)\n zf.write(full_fname, arcname=fname)\n remove_frames(tmpdir, files)\n\n elif vext == '.mat':\n from scipy.io import savemat\n savemat(filename + vext, {'z':z})\n\n elif vext == '.npy':\n np.save(filename + vext, z)\n\n elif vext == '.h5':\n from tables import open_file, Float32Atom\n with open_file(filename + vext, 'w') as hf:\n o = hf.create_carray(hf.root, 'stimulus', Float32Atom(), z.shape)\n o = z\n else:\n print(' WARNING: extension ', vext , 'not existing! ')",
"def video(self, file, draw_frames):\n # Just loop through the generator as we're only interested\n # in the output at the end.\n for _ in self.video_generator(file, draw_frames):\n continue\n\n self._save_tracks(file)",
"def output(self, frame):\n raise NotImplementedError(\"output() needs to be implemented \"\n \"by subclass of Driver\")",
"def enframe(samples, winlen, winshift):\n # The window length is sampling_rate*window_length_in_ms\n length = len(samples)\n start_indices = np.arange(0, length, winshift)\n end_indices = np.arange(winlen, length, winlen - winshift)\n pairs = zip(start_indices, end_indices)\n\n output = [samples[i[0]: i[1]] for i in pairs]\n\n # myplot(output, 'Framing')\n\n return output",
"def save_video_frames(video_in, image_dir, image_prefix, frame_offset=1):\n print('Analyzing file: ' + video_in)\n print('Storing in directory: ' + image_dir)\n print('Frame offset: ' + str(frame_offset))\n vidcap = cv2.VideoCapture(video_in)\n success, image = vidcap.read()\n filename_count = 0\n frame_count = 0\n while success:\n success,image = vidcap.read()\n frame_count += 1\n if (frame_count % frame_offset == 0):\n filename = os.path.join(image_dir, '%s_frame%d.jpg' % (image_prefix, filename_count))\n cv2.imwrite(filename, image) # save frame as JPEG file\n filename_count += 1\n print(str(filename_count) + ' frames saved')"
]
| [
"0.6546506",
"0.6398653",
"0.63754684",
"0.63245595",
"0.61990476",
"0.61445135",
"0.61210597",
"0.6068414",
"0.6063512",
"0.5967238",
"0.5961839",
"0.5925323",
"0.5919997",
"0.58821714",
"0.5870627",
"0.5870627",
"0.5870627",
"0.5870627",
"0.5870627",
"0.5870627",
"0.5841754",
"0.5836374",
"0.58346355",
"0.58225256",
"0.58181286",
"0.57629395",
"0.57515574",
"0.57492346",
"0.57447606",
"0.5731952"
]
| 0.7252709 | 0 |
Gets the index in the frame list for the given time | def _getIndexAtTime(self, startTime: float) -> int:
return round(startTime * self.frameRate * self.sampleWidth) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_frameidx(self, fps):\n return int(self.hours * MIN_PER_H * S_PER_MIN * fps \\\n + self.minutes * S_PER_MIN * fps \\\n + self.seconds * fps \\\n + self.milliseconds // (100 / fps))",
"def timestep_idx(self, timestep):\n timestep = pd.to_datetime(timestep)\n idx = np.where(self.time_index == timestep)[0][0]\n\n return idx",
"def time(self):\r\n return self._idx",
"def get_index_before_time(messages, time):\n # Getting the timestamp that specifies the cutoff.\n timestamp = messages[0].timestamp + time\n\n for index, message in enumerate(messages):\n if message.timestamp > timestamp:\n return index - 1",
"def frame_index(self):\n return self._findex",
"def frame_idx(self) -> int:\n pass",
"def _get_frame_index(self, frame):\n if isinstance(frame, cf.CoordinateFrame):\n frame = frame.name\n #frame_names = [getattr(item[0], \"name\", item[0]) for item in self._pipeline]\n frame_names = [step.frame if isinstance(step.frame, str) else step.frame.name for step in self._pipeline]\n return frame_names.index(frame)",
"def get_meta_idx(frames_metadata,\n time_idx,\n channel_idx,\n slice_idx,\n pos_idx,\n row_start,\n col_start):\n frame_idx = frames_metadata.index[\n (frames_metadata['channel_idx'] == int(channel_idx)) &\n (frames_metadata['time_idx'] == int(time_idx)) &\n (frames_metadata[\"slice_idx\"] == int(slice_idx)) &\n (frames_metadata[\"pos_idx\"] == int(pos_idx)) &\n (frames_metadata[\"row_start\"] == int(row_start)) &\n (frames_metadata[\"col_start\"] == int(col_start))\n ].tolist()\n return frame_idx[0]",
"def getKeyIndexFromTime(self, *args):\n return _osgAnimation.QuatInterpolator_getKeyIndexFromTime(self, *args)",
"def get_frame_index(self, global_idx):\n vid_idx_idx = np.searchsorted(self.num_frames_array, global_idx, side='right')-1\n frame_idx = global_idx - self.num_frames_array[vid_idx_idx]\n vid_idx = self.task_ids[int(vid_idx_idx)]\n return vid_idx, frame_idx",
"def get_scan_by_time(self, time):\n scan_ids = tuple(self.index)\n lo = 0\n hi = len(scan_ids)\n while hi != lo:\n mid = (hi + lo) // 2\n sid = scan_ids[mid]\n sid = sid.decode('utf-8')\n scan = self.get_scan_by_id(sid)\n if not self._validate(scan):\n sid = scan_ids[mid - 1]\n scan = self.get_scan_by_id(sid)\n if not self._validate(scan):\n sid = scan_ids[mid - 2]\n scan = self.get_scan_by_id(sid)\n\n scan_time = scan.scan_time\n if scan_time == time:\n return scan\n elif (hi - lo) == 1:\n return scan\n elif scan_time > time:\n hi = mid\n else:\n lo = mid\n if hi == 0 and not self._use_index:\n raise TypeError(\"This method requires the index. Please pass `use_index=True` during initialization\")",
"def solar_time_index(self):\n if self._solar_time_index is None:\n with Resource(self.solar_fpath) as res:\n self._solar_time_index = res.time_index\n return self._solar_time_index",
"def index(self):\n return self.frame.index",
"def solar_time_index(self):\n return self.data.solar_time_index",
"def time_index(self):\n if self._time_index is None and self.cf_file is not None:\n with Outputs(self.cf_file) as cfh:\n if 'time_index' in cfh.datasets:\n self._time_index = cfh.time_index\n\n return self._time_index",
"def frames_to_indices(self, start_frame: Union[int, None] = None, end_frame: Union[int, None] = None):\n # must be implemented in subclass\n if start_frame is None:\n init = 0\n else:\n init = np.searchsorted(self._spikestimes, start_frame, side=\"left\")\n if end_frame is None:\n endi = self._spikestimes.shape[0]\n else:\n endi = np.searchsorted(self._spikestimes, end_frame, side=\"left\")\n return slice(init, endi, 1)",
"def find_index(weather_data: dict, date: datetime) -> int:\n weather_list = weather_data['list']\n for index, weather in enumerate(weather_list):\n if weather['dt_txt'] == date.strftime('%Y-%m-%d %H:%M:%S'):\n return index\n return 0",
"def find_time_idx(nc, needle):\n tm = nc.variables[\"time\"]\n tstr = tm.units.replace(\"days since \", \"\")\n t0 = datetime.datetime.strptime(tstr.split()[0], \"%Y-%m-%d\")\n cal360 = True if tm.calendar == \"360_day\" else False\n cal365 = True if tm.calendar == \"365_day\" else False\n times = tm[:]\n for i, time in enumerate(times):\n if cal360:\n time = time - 15\n years = time / 360\n months = (time % 360) / 30\n ts = datetime.datetime(t0.year + years, 1 + months, 1)\n elif cal365:\n years = int(time / 365)\n months = int((time % 365) / 30)\n ts = datetime.datetime(t0.year + years, 1 + months, 1)\n else:\n ts = t0 + datetime.timedelta(days=time)\n if ts.year == needle.year and ts.month == needle.month:\n print \"Returning: %s/%s for needle: %s\" % (i, len(times), needle)\n return i\n return None",
"def get_idx(self, key):\n found = [i for i, e in enumerate(self.list) if e.key == key]\n if found:\n return found[0]\n\n else:\n return -1",
"def __call__(self,time):\n \n fname = []\n tind =[]\n for t in time:\n flag=1\n for f in self.timelookup.keys():\n\n if t >= self.timelookup[f][0] and t<=self.timelookup[f][-1]:\n# print 'Found tstep %s'%datetime.strptime(t,'%Y-%m-%d %H:%M:%S')\n tind.append(othertime.findNearest(t,self.timelookup[f][:]))\n fname.append(f)\n flag=0\n\n# if flag:\n# print 'Warning - could not find matching file for time:%s'%datetime.strptime(t,'%Y-%m-%d %H:%M:%S')\n# tind.append(-1)\n# fname.append(-1)\n \n return tind, fname",
"def _get_indx(self, t):\n t = np.array(t)\n a = (t[:, np.newaxis] <= self._data['stop']) & (t[:, np.newaxis] >=\n self._data['start'])\n return np.array([np.where(row)[0][0] for row in a])",
"def window_index_time(t,windowsize,overlap):\r\n \r\n try:\r\n t=t.tolist()\r\n except:\r\n t=t\r\n \r\n t1=t[0]\r\n t2=t1 + timedelta(seconds=windowsize)\r\n pt1=[0]\r\n pt2=[othertime.findNearest(t2,t)]\r\n while t2 < t[-1]:\r\n t1 = t2 - timedelta(seconds=overlap)\r\n t2 = t1 + timedelta(seconds=windowsize)\r\n\r\n pt1.append(othertime.findNearest(t1,t))\r\n pt2.append(othertime.findNearest(t2,t))\r\n \r\n return pt1, pt2",
"def seperate_time(sp):\n\n\tind = np.where(sp > 0)[0]\n\treturn (ind)",
"def dayHourIndex(stime, day, hour, speriod):\n\tsecIntoWeek = stime % secInWeek\n\tsecIntoWeekReqd = day * secInday + hour * secInHour\n\t\n\tif secIntoWeek <= secIntoWeekReqd:\n\t\ttsIndex = int((secIntoWeekReqd - secIntoWeek) / speriod)\n\telse:\n\t\ttsIndex = int((secIntoWeekReqd - secIntoWeek + secInWeek) / speriod)\n\treturn tsIndex",
"def time_to_position(tracks, point):\n\n index1 = [index for index, track_point in enumerate(tracks[0]) if track_point == point][0]\n index2 = [index for index, track_point in enumerate(tracks[1]) if track_point == point][0]\n\n # We add one to the length of each track as 0,0 to first point is missing from the track data\n return index1 + 1 + index2 + 1",
"def get_resource_index(self):\n result = -1\n max_sleep_time = self.time_window\n with self._lock:\n while result == -1:\n for i in range(0, self.num_keys):\n curr_sleep_time = max((self.timers[i][0] + self.time_window) - time.time(), 0)\n\n max_sleep_time = min(max_sleep_time, curr_sleep_time)\n\n if self.timers[i][1] >= self.window_limit and self.timers[i][0] + self.time_window < time.time():\n self.timers[i][0] = 0\n self.timers[i][1] = 0\n\n if self.timers[i][1] < self.window_limit:\n result = i\n break\n\n if result == -1: # case when all streams are rate limited\n # logging.warning('sleeping for %d seconds.' % max_sleep_time)\n # time.sleep(max_sleep_time)\n return -1 * max_sleep_time\n\n if self.timers[result][0] == 0:\n self.timers[result][0] = time.time()\n\n self.timers[result][1] += 1\n\n return result",
"def get_index(df, index='date_time'): \n for i, full in enumerate(df.axes):\n if full.name == index:\n return (i, full)",
"def _get_signal_index(self, signal):\n # Process signal :\n signal = signal.replace(', :', '').replace(':, ', '')[1:-1]\n # Find index :\n idx = tuple(int(k) for k in signal.split(', '))\n return self._navidx.index(idx)",
"def get_list_index(self):\r\n _debug('simq03b_api.get_list_index')\r\n \r\n s = self.query('LIST:IND?')\r\n return int(s)",
"def index_in_epoch(self):\n return self._index_in_epoch"
]
| [
"0.7054845",
"0.6783032",
"0.657959",
"0.649517",
"0.6385905",
"0.6303458",
"0.626827",
"0.61946434",
"0.6028419",
"0.600086",
"0.5976669",
"0.58895767",
"0.58638656",
"0.5843955",
"0.5803436",
"0.577362",
"0.57635385",
"0.5718886",
"0.5686386",
"0.5670949",
"0.5649976",
"0.56488675",
"0.56411153",
"0.5626642",
"0.5602324",
"0.5592344",
"0.55581266",
"0.55483735",
"0.55347264",
"0.55195886"
]
| 0.72265846 | 0 |
Build an AudioGenerator with parameters derived from a Wav or QueryWav | def fromWav(cls, wav: AbstractWav) -> "AudioGenerator":
return AudioGenerator(wav.sampleWidth, wav.frameRate) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_audio_builder(_encode_queue, _app_config, _lock, _only_wav, _dump_sequencer_log):\n global sequence_builder\n WordNetCache._lock = _lock\n sequence_builder = SequenceBuilder(app_config=_app_config,\n encode_queue=_encode_queue,\n only_wav=_only_wav,\n dump_sequencer_log=_dump_sequencer_log)",
"def _create(self, sound: (list, tuple), data: dict):\n # data substitution\n # TODO: use other params\n inversion: int = data['inversion']\n single_tone: bool = data['single_tone']\n with_bass: bool = data['with_bass']\n bass_note: int = data['bass_note']\n transpose: int = data['transpose']\n octave: int = data['octave']\n pitch: float = data['pitch']\n sec: float = data['sec']\n sampling_rate: int = data['sampling_rate']\n volume_adjustment: (str, float) = data['volume_adjustment']\n title: str = data['title']\n at: str = data['at']\n\n # -*- data sanitization -*-\n # transpose range\n if transpose < -11 or 11 < transpose:\n raise ValueError('\\'transpose\\' should be between -11 and 11.')\n\n # pitch range\n if pitch < 410 or 494 < pitch:\n raise ValueError('\\'pitch\\' should be between 410 and 494.')\n\n # file title regulation\n if not re.fullmatch(r'.+?\\.wav$', title):\n title += '.wav'\n\n # wave initialization\n wave = SoundGenerator.oscillator(0, sec, sampling_rate)\n # -*- end of the data sanitization -*-\n\n # elements' frequencies\n fn = -1 # fn is a num the one before\n\n # wave synthesize\n for i in sound:\n if fn >= i:\n # 15 = 12(octave) + 3(C base-> A base convert)\n f = pitch * 2 ** ((15 + i) / 12)\n else:\n f = pitch * 2 ** ((3 + i) / 12)\n\n wave += SoundGenerator.oscillator(f, sec, sampling_rate)\n\n # memory a number the one before\n fn = i\n\n # volume controlling\n if volume_adjustment == 'auto':\n wave *= 0.1\n elif isinstance(volume_adjustment, (int, float)):\n wave *= volume_adjustment\n else:\n ValueError('\\'volume_adjustment\\' should be \\'auto\\' or float.')\n\n # wave convert\n wave = (wave * float(2 ** 15 - 1)).astype(np.int16)\n\n # path management\n if at is None:\n pth = os.path.join(os.getcwd(), title)\n else:\n os.chdir(at)\n pth = os.path.join(os.getcwd(), title)\n\n # make wave_file\n wavfile.write(pth, sampling_rate, wave)",
"def audio_data_generator(input_audio_paths, config):\n if not input_audio_paths:\n raise ValueError(\"Empty paths\")\n for file_path in input_audio_paths:\n try:\n data = load_audio_data(file_path, config)\n yield speech.StreamingRecognizeRequest(audio_content=data)\n except pydub.exceptions.CouldntDecodeError:\n logging.warn(\"Failed to read audio data from file %s\", file_path)",
"def for_audio_params(\n cls,\n nbits=50,\n freq=3000,\n chk_size_frm=43008,\n sr=44100,\n header_size_words=1,\n header_pattern=None,\n ):\n # repetition: how many times to repeat each bit to make a word\n repetition = int(floor(sr / (2 * freq)))\n # word_size_frm: the size of a word, in frames\n word_size_frm = int(nbits * repetition)\n # redundancy: how many times to repeat a word to make (along with header) a phrase\n redundancy = int(floor((chk_size_frm / 2) / word_size_frm) - header_size_words)\n\n self = cls(\n nbits=nbits,\n redundancy=redundancy,\n repetition=repetition,\n header_size_words=header_size_words,\n header_pattern=header_pattern,\n )\n self.freq = freq\n self.sr = sr\n self.chk_size_frm = chk_size_frm\n self.redundancy = redundancy\n self.repetition = repetition\n return self",
"def _build_link_audio_synthdef(channel_count):\n name = 'system_link_audio_{}'.format(channel_count)\n builder = synthdeftools.SynthDefBuilder(\n name=name,\n out=0,\n in_=16,\n gate=1,\n fade_time=0.02,\n done_action=2,\n )\n with builder:\n start_value = builder['fade_time'] <= 0\n envelope = synthdeftools.Envelope(\n amplitudes=[start_value, 1.0, 0.0],\n durations=[1.0, 1.0],\n curves=synthdeftools.EnvelopeShape.SINE,\n release_node=1.0,\n )\n envelope = ugentools.EnvGen.kr(\n done_action=builder['done_action'],\n envelope=envelope,\n gate=builder['gate'],\n time_scale=builder['fade_time'],\n )\n input_ = ugentools.InFeedback.ar(\n bus=builder['in_'],\n channel_count=channel_count,\n )\n ugentools.Out.ar(\n bus=builder['out'],\n source=input_ * envelope,\n )\n globals()[name] = builder.build()\n __all__.append(name)",
"def __init__(self, origin_dir, dest_dir, val_percentage=0.2, test_percentage=0.3):\r\n self.origin_dir = origin_dir\r\n self.dest_dir = dest_dir\r\n self.val_percentage = val_percentage\r\n self.test_percentage = test_percentage\r\n\r\n self.all_wavs = [] # all wav info list\r\n self.data_index = {\"train\": [], \"valid\": [], \"test\": []}\r\n\r\n # Detail information for an audio\r\n # utt_id: audio hash id, noise_volume: , age: the age of speaker,\r\n # keyword_id: keyword int id, 你好小顺(0), 小顺小顺(1)\r\n # noise_type: 电视剧/动漫/游戏/音乐/直播/说话声/无噪声\r\n # speaker_id: speaker id\r\n # record_speed: fast,normal, slow\r\n # record_equipment: record equipment\r\n # gender: gender of speaker\r\n self.wav_desc = {\r\n \"utt_id\": \"\",\r\n \"noise_volume\": \"00db\",\r\n \"age\": \"00\",\r\n \"keyword_id\": 0,\r\n \"noise_type\": \"TV\",\r\n \"speaker_id\": \"\",\r\n \"distance\": \"\",\r\n \"record_speed\": \"\",\r\n \"record_equipment\": \"\",\r\n \"gender\": \"\"}\r\n\r\n self.keywords_dict = {\"你好小顺\": 0, \"小顺小顺\": 1}\r\n\r\n if not os.path.exists(self.dest_dir):\r\n os.mkdir(os.path.join(self.dest_dir))\r\n os.mkdir(os.path.join(self.dest_dir, \"resources\"))\r\n os.mkdir(os.path.join(self.dest_dir, \"audios\"))",
"def generate_audio():\n text, lang = introduction()\n ses = boto3.Session(profile_name=\"default\")\n pol = ses.client(\"polly\")\n res = pol.synthesize_speech(Text=text, LanguageCode=lang, OutputFormat=\"mp3\", VoiceId=VOICE)\n return res",
"def call(self, features, training=True):\n conditioning = self.encode(features, training=training)\n conditioning = self.quantizer(conditioning)\n audio_gen = self.decode(conditioning, training=training)\n if training:\n for loss_obj in self.loss_objs:\n if isinstance(loss_obj, QuantizationLoss):\n loss = loss_obj(conditioning['z_for_loss'], conditioning['z_preq'])\n else:\n loss = loss_obj(features['audio'], audio_gen)\n self._losses_dict[loss_obj.name] = loss\n return audio_gen",
"def __init__(self, source_sig_path=None, audio_key=None,\n signal_length=8192, random_speech_samples=True, **kwargs):\n self.speech_set = None\n if audio_key is None:\n msg = \"To generate the audio data, a path to a source signal\" \\\n \" database is needed\"\n assert source_sig_path is not None, msg\n speech_db_part = kwargs.get('speech_db_part', None)\n msg = \"To generate the audio data, a name of a dataset with \" \\\n \"source signals is needed.\"\n assert speech_db_part is not None, msg\n self.speech_set = \\\n JsonDatabase(source_sig_path).get_dataset(speech_db_part)\n self.random_speech_samples = random_speech_samples\n if source_sig_path is None:\n msg = \"The key under which the audio recording can be found in \" \\\n \"the JSON of the database is required.\"\n assert audio_key is not None, msg\n self.signal_length = signal_length\n self.mic_pair = kwargs.get('mic_pair')\n self.audio_key = audio_key",
"def initialize(self, formGenerator, AWGChannels, modulationMode=None, MWSource=None, mixer=None, formGeneratorType='AWG'):\n instrumentManager = InstrumentManager()\n # optional microwave source attached to this pulse generator\n self._MWSource = instrumentManager.getInstrument(\n MWSource) if MWSource is not None else None\n # optional mixer attached to this pulse generator\n self._mixer = instrumentManager.getInstrument(\n mixer) if mixer is not None else None\n # hardware generator attached to this pulse generator\n self._AWG = instrumentManager.getInstrument(formGenerator)\n # type of generators in AWG, AFG, NOne, ...\n self._formGeneratorType = formGeneratorType\n # dictionary of parameters\n self._params = dict()\n self._params[\"MWSource\"] = MWSource\n # confusion here: should be change for formGenerator\n self._params[\"formGenerator\"] = formGenerator\n self._params[\"modulationMode\"] = modulationMode\n self._params[\"AWGChannels\"] = AWGChannels\n self._params[\"mixer\"] = mixer\n # Obsolete. Replaced by pulseList. Mainted for compatinbbility reasons\n self.pulses = dict()\n self._params[\"pulses\"] = self.pulses\n # Obsolete. Replaced by markersList. Mainted for compatinbbility\n # reasons\n self.markersDict = dict()\n self._params[\"markersDict\"] = self.markersDict\n # Obsolete.\n self.totalPulse = numpy.zeros(\n self.numberOfPoints(), dtype=numpy.complex128)\n self.index = 0 # Obsolete.\n self.indexMarker = 0 # Obsolete.\n # List of pulses (object of class pulse)\n self.pulseList = []\n # List of markers (object of class marker)\n self.markersList1 = ()\n # An array of zeros into which the markers will be concatented\n self.markerArray1 = zeros(self.numberOfPoints(), dtype=numpy.int8)\n # total number of markers attached to this pulse generator\n # #self._AWG.markersPerChannel()*\n self.markersChannels = 2 if self._params[\n \"modulationMode\"] == 'IQMixer' else 1\n if self.markersChannels == 2:\n # List of markers (object of class marker)\n self.markersList2 = ()\n # An array of zeros into which the markers will be concatented\n self.markerArray2 = zeros(self.numberOfPoints(), dtype=numpy.int8)\n self.preparePulseSequence()\n self.sendPulseSequence()\n return",
"def make_audio_track(language_pair, items, part_number):\n global sequence_builder\n try:\n sequence_builder.make_audio_track(language_pair, items, part_number)\n except Exception as e:\n print(str(e))\n print_exc()",
"def generate(self, num_steps):\n music = []\n with tf.variable_scope('batch_size'):\n batch_size = tf.shape(self._inputs)[0]\n\n intro_states = []\n # Generators' forward pass\n for i in range(self.num_tracks):\n with tf.variable_scope(f'inputs/{self.tracks[i]}'):\n inputs = tf.concat([self._x_encoded[i], self._x_feedback], axis=-1)\n\n with tf.variable_scope(f'intro_state/{self.tracks[i]}'):\n state = self.generators[i].steps(inputs)\n intro_states.append(state)\n\n #\n with tf.variable_scope('feedback_sampler'):\n samples_h, _, _ = tf.scan(\n self._feedback_recurrence,\n tf.zeros((num_steps, 1)),\n initializer=(\n tf.zeros((batch_size, self._num_dims_generator, self.num_tracks)),\n intro_states,\n self._feedback_final_state)\n )\n\n with tf.variable_scope('samples/encoded/'):\n samples_h = tf.unstack(tf.transpose(samples_h, [1, 0, 2, 3]), axis=-1)\n\n for i in range(self.num_tracks):\n # Decoding inputs into the original format\n with tf.variable_scope(f'samples/{self.tracks[i]}/'):\n _, samples = self.encoders[i].decode(samples_h[i])\n\n music.append(samples)\n\n with tf.variable_scope('samples/'):\n return tf.stack(music, axis=3, name='music')",
"def __init__(self, passing_wavelengths=None, reflectivity=None):\n self.passing_wavelengths=passing_wavelengths\n self.reflectivity=reflectivity",
"def prepare_audio(a_name, target=False):\n samprate = 16000 # Sampling Rate\n length = 16 # Amount of blocks for 1 walkthrough\n overlap = 8 # Step between samples in amount of blocks\n fft = 1024 # Length of block (64ms)\n\n # Upload and preparing data sets\n # audio_path = \"raw_data_wav/\"\n # full_a_name = audio_path + a_name\n print('loading %s' % a_name)\n audio, _ = lr.load(a_name, sr=samprate)\n audio = filter_audio(audio) # Removing silence and spaces between words\n data = lr.stft(audio, n_fft=fft).swapaxes(0, 1) # Export spectrogram\n samples = []\n\n for i in range(0, len(data) - length, overlap):\n samples.append(np.abs(data[i:i + length])) # Create training sample\n\n results_shape = (len(samples), 1)\n results = np.ones(results_shape) if target else np.zeros(results_shape)\n\n return np.array(samples), results",
"def __init__(self):\n super().__init__(interface.Audio, DEFAULT_PRIORITIES)",
"def generateAudio(audiotype: str, audiometadata: dict):\n try:\n audiotype = audiotype.lower()\n\n if audiotype == \"song\":\n file = Song(audiometadata)\n elif audiotype == \"podcast\":\n file = Podcast(audiometadata)\n elif audiotype == \"audiobook\":\n file = Audiobook(audiometadata)\n else:\n return None\n\n return file\n\n except MetadataValueError as error:\n raise MetadataValueError(error)\n\n except MetadataGenerationError as error:\n raise MetadataGenerationError(error)",
"def generate_waveform(self, mel, normalize=True, batched=True,\n target=8000, overlap=800, do_save_wav=True):\n wav = self.vocoder_manager.infer_waveform(mel,\n normalize=normalize,\n batched=batched,\n target=target,\n overlap=overlap,\n do_save_wav=do_save_wav\n )\n return wav",
"def _create_wave_file(self):\n is_wave_open = False\n try:\n wv = wave.open(self.audio_file_name, mode='wb')\n is_wave_open = True\n wv.setparams((1, # 1 channel (mono)\n 2, # 2 bytes per sample * 1 channel\n self.sample_rate,\n 0, # Initial number of samples.\n 'NONE',\n 'not compressed'))\n wv.writeframes(self.sample_buffer)\n except:\n print('Error creating audio file')\n if is_wave_open:\n wv.close()",
"def __init__(self, dic=None, lm=None):\n print(\"Starting\")\n self.pipeline = gst.parse_launch('autoaudiosrc ! audioconvert ! audioresample ! pocketsphinx name=asr ! fakesink')\n bus = self.pipeline.get_bus()\n bus.add_signal_watch()\n bus.connect('message::element', self.element_message)\n\n asr=self.pipeline.get_by_name('asr')\n asr.set_property('lm', lm)\n asr.set_property('dict', dic)\n\n self.pipeline.set_state(gst.State.PLAYING)",
"def sounding_generator(\n option_dict, desired_num_examples=None, desired_full_id_strings=None,\n desired_times_unix_sec=None):\n\n full_storm_id_strings, storm_times_unix_sec, storm_to_file_indices = (\n _find_examples_to_read(\n option_dict=option_dict, desired_num_examples=desired_num_examples,\n desired_full_id_strings=desired_full_id_strings,\n desired_times_unix_sec=desired_times_unix_sec)\n )\n print('\\n')\n\n example_file_names = option_dict[trainval_io.EXAMPLE_FILES_KEY]\n num_examples_per_batch = option_dict[trainval_io.NUM_EXAMPLES_PER_BATCH_KEY]\n\n sounding_field_names = option_dict[trainval_io.SOUNDING_FIELDS_KEY]\n sounding_heights_m_agl = option_dict[trainval_io.SOUNDING_HEIGHTS_KEY]\n\n target_name = option_dict[trainval_io.TARGET_NAME_KEY]\n binarize_target = option_dict[trainval_io.BINARIZE_TARGET_KEY]\n normalization_type_string = option_dict[trainval_io.NORMALIZATION_TYPE_KEY]\n\n if normalization_type_string is not None:\n normalization_param_file_name = option_dict[\n trainval_io.NORMALIZATION_FILE_KEY\n ]\n min_normalized_value = option_dict[trainval_io.MIN_NORMALIZED_VALUE_KEY]\n max_normalized_value = option_dict[trainval_io.MAX_NORMALIZED_VALUE_KEY]\n\n num_classes = target_val_utils.target_name_to_num_classes(\n target_name=target_name, include_dead_storms=False)\n\n if soundings.PRESSURE_NAME in sounding_field_names:\n sounding_field_names_to_read = sounding_field_names\n else:\n sounding_field_names_to_read = (\n sounding_field_names + [soundings.PRESSURE_NAME]\n )\n\n this_example_dict = input_examples.read_example_file(\n netcdf_file_name=example_file_names[0], read_all_target_vars=False,\n target_name=target_name, metadata_only=True)\n\n dummy_radar_field_names = [\n this_example_dict[input_examples.RADAR_FIELDS_KEY][0]\n ]\n dummy_radar_heights_m_agl = numpy.array(\n [this_example_dict[input_examples.RADAR_HEIGHTS_KEY][0]], dtype=int\n )\n\n sounding_matrix = None\n target_values = None\n sounding_pressure_matrix_pascals = None\n\n next_example_index = 0\n\n while True:\n batch_indices = _find_next_batch(\n example_to_file_indices=storm_to_file_indices,\n num_examples_per_batch=num_examples_per_batch,\n next_example_index=next_example_index)\n\n if batch_indices is None:\n raise StopIteration\n\n next_example_index = numpy.max(batch_indices) + 1\n this_file_index = storm_to_file_indices[batch_indices[0]]\n\n these_full_id_strings = [\n full_storm_id_strings[k] for k in batch_indices\n ]\n these_times_unix_sec = storm_times_unix_sec[batch_indices]\n\n print('Reading data from: \"{0:s}\"...'.format(\n example_file_names[this_file_index]\n ))\n\n this_example_dict = input_examples.read_specific_examples(\n netcdf_file_name=example_file_names[this_file_index],\n read_all_target_vars=False, target_name=target_name,\n full_storm_id_strings=these_full_id_strings,\n storm_times_unix_sec=these_times_unix_sec,\n include_soundings=True,\n radar_field_names_to_keep=dummy_radar_field_names,\n radar_heights_to_keep_m_agl=dummy_radar_heights_m_agl,\n sounding_field_names_to_keep=sounding_field_names_to_read,\n sounding_heights_to_keep_m_agl=sounding_heights_m_agl)\n\n pressure_index = this_example_dict[\n input_examples.SOUNDING_FIELDS_KEY\n ].index(soundings.PRESSURE_NAME)\n\n this_pressure_matrix_pascals = this_example_dict[\n input_examples.SOUNDING_MATRIX_KEY][..., pressure_index]\n\n this_sounding_matrix = this_example_dict[\n input_examples.SOUNDING_MATRIX_KEY]\n\n if soundings.PRESSURE_NAME not in sounding_field_names:\n this_sounding_matrix = numpy.delete(\n this_sounding_matrix, pressure_index, axis=-1)\n\n if sounding_matrix is None:\n sounding_matrix = this_sounding_matrix + 0.\n sounding_pressure_matrix_pascals = this_pressure_matrix_pascals + 0.\n target_values = (\n this_example_dict[input_examples.TARGET_VALUES_KEY] + 0\n )\n else:\n sounding_matrix = numpy.concatenate(\n (sounding_matrix, this_sounding_matrix), axis=0\n )\n\n sounding_pressure_matrix_pascals = numpy.concatenate(\n (sounding_pressure_matrix_pascals,\n this_pressure_matrix_pascals), axis=0\n )\n\n target_values = numpy.concatenate((\n target_values,\n this_example_dict[input_examples.TARGET_VALUES_KEY]\n ))\n\n if normalization_type_string is not None:\n sounding_matrix = dl_utils.normalize_soundings(\n sounding_matrix=sounding_matrix,\n field_names=sounding_field_names,\n normalization_type_string=normalization_type_string,\n normalization_param_file_name=normalization_param_file_name,\n min_normalized_value=min_normalized_value,\n max_normalized_value=max_normalized_value\n ).astype(numpy.float32)\n\n target_array = _finalize_targets(\n target_values=target_values, binarize_target=binarize_target,\n num_classes=num_classes)\n\n storm_object_dict = {\n SOUNDING_MATRIX_KEY: sounding_matrix,\n TARGET_ARRAY_KEY: target_array,\n FULL_IDS_KEY: this_example_dict[input_examples.FULL_IDS_KEY],\n STORM_TIMES_KEY: this_example_dict[input_examples.STORM_TIMES_KEY],\n SOUNDING_PRESSURES_KEY: sounding_pressure_matrix_pascals + 0.\n }\n\n sounding_matrix = None\n target_values = None\n sounding_pressure_matrix_pascals = None\n\n yield storm_object_dict",
"def ar(\n cls,\n channel_count=1,\n duration=1,\n envbufnum=-1,\n frequency=440,\n max_grains=512,\n pan=0,\n trigger=0,\n ):\n import supriya.synthdefs\n calculation_rate = supriya.CalculationRate.AUDIO\n ugen = cls._new_expanded(\n calculation_rate=calculation_rate,\n channel_count=channel_count,\n duration=duration,\n envbufnum=envbufnum,\n frequency=frequency,\n max_grains=max_grains,\n pan=pan,\n trigger=trigger,\n )\n return ugen",
"def from_audio(\n cls,\n audio,\n window_type=\"hann\",\n window_samples=None,\n window_length_sec=None,\n overlap_samples=None,\n overlap_fraction=None,\n fft_size=None,\n decibel_limits=(-100, -20),\n dB_scale=True,\n scaling=\"spectrum\",\n ):\n if not isinstance(audio, Audio):\n raise TypeError(\"Class method expects Audio class as input\")\n\n # determine window_samples\n if window_samples is not None and window_length_sec is not None:\n raise ValueError(\n \"You may not specify both `window_samples` and `window_length_sec`\"\n )\n elif window_samples is None and window_length_sec is None:\n window_samples = 512 # defaults to 512 samples\n elif window_length_sec is not None:\n window_samples = int(audio.sample_rate * window_length_sec)\n # else: use user-provided window_samples argument\n\n # determine overlap_samples\n if overlap_samples is not None and overlap_fraction is not None:\n raise ValueError(\n \"You may not specify both `overlap_samples` and `overlap_fraction`\"\n )\n elif overlap_samples is None and overlap_fraction is None:\n # default is 50% overlap\n overlap_samples = window_samples // 2\n elif overlap_fraction is not None:\n assert (\n overlap_fraction >= 0 and overlap_fraction < 1\n ), \"overlap_fraction must be >=0 and <1\"\n overlap_samples = int(window_samples * overlap_fraction)\n # else: use the provided overlap_samples argument\n\n frequencies, times, spectrogram = scipy.signal.spectrogram(\n x=audio.samples,\n fs=audio.sample_rate,\n window=window_type,\n nperseg=int(window_samples),\n noverlap=int(overlap_samples),\n nfft=fft_size,\n scaling=scaling,\n )\n\n # convert to decibels\n # -> avoid RuntimeWarning by setting negative values to -np.inf (mapped to min_db later)\n if dB_scale:\n spectrogram = 10 * np.log10(\n spectrogram,\n where=spectrogram > 0,\n out=np.full(spectrogram.shape, -np.inf),\n )\n\n # limit the decibel range (-100 to -20 dB by default)\n min_db, max_db = decibel_limits\n spectrogram[spectrogram > max_db] = max_db\n spectrogram[spectrogram < min_db] = min_db\n\n new_obj = cls(\n spectrogram,\n frequencies=frequencies,\n times=times,\n decibel_limits=decibel_limits,\n window_samples=window_samples,\n overlap_samples=overlap_samples,\n window_type=window_type,\n audio_sample_rate=audio.sample_rate,\n scaling=scaling,\n )\n return new_obj",
"def audio_batch(request):\n sample_rate_orig = 16000\n sample_rate_new = 8000\n test_input = np.zeros((2, request.param, sample_rate_orig), dtype=np.int16)\n test_output = np.zeros((2, request.param, sample_rate_new), dtype=np.int16)\n return test_input, test_output, sample_rate_orig, sample_rate_new",
"def build_sampler(self, params):\n\n def sampler(logits_1, logits_2, key):\n q_kwargs = dict(transpose=True) if self.use_transpose else {}\n x = self.model.bind(params).sample(logits_1, key)\n y = self.model.bind(params).sample(logits_2, key, **q_kwargs)\n return jnp.zeros([10, 10]).at[x, y].set(1.)\n\n return sampler",
"def createMelody(song, outputSongFileName, timing=4):\n wavInput = (())\n wavInput1 = (())\n wavInput2 = (())\n wavInput3 = (())\n\n # Remove the beginning and end portions of the canvas that are blank\n while song[0] == ['R','R','R','R']:\n del song[0]\n while song[-1] == ['R','R','R','R']:\n del song[-1]\n\n for notesList in song:\n\n remove_dup(notesList)\n\n notesNum = []\n for i in range(len(notesList)):\n if (notesList[i].upper() == 'R'):\n notesNum.append('')\n elif (notesList[i].upper() == 'A' or notesList[i].upper() == 'B'):\n notesNum.append('3')\n else:\n notesNum.append('4')\n\n wavInput = ((notesList[0].lower() + str(notesNum[0]), timing),) + wavInput\n wavInput1 = ((notesList[1].lower() + str(notesNum[1]), timing),) + wavInput1\n wavInput2 = ((notesList[2].lower() + str(notesNum[2]), timing),) + wavInput2\n wavInput3 = ((notesList[3].lower() + str(notesNum[3]), timing),) + wavInput3\n\n\n wavInput = wavInput[::-1]\n wavInput1 = wavInput1[::-1]\n wavInput2 = wavInput2[::-1]\n wavInput3 = wavInput3[::-1]\n\n wavNames = [\".wav1.wav\",\".wav2.wav\",\".wav3.wav\",\".wav4.wav\"]\n wavInputs = [wavInput,wavInput1,wavInput2,wavInput3]\n\n validWavInputs = []\n\n for i in range(len(wavInputs)):\n if isAllRests(wavInputs[i]) == False:\n validWavInputs.append(wavInputs[i])\n\n validWavNames = wavNames[:len(validWavInputs)]\n\n call(['python','GenerateWavFiles.py',str(validWavNames) + \"@\" + str(validWavInputs)])\n\n sounds = []\n for i in range(len(validWavNames)):\n sounds.append(AudioSegment.from_wav(validWavNames[i]))\n\n combined = sounds[0]\n for i in range(1, len(sounds)):\n combined = combined.overlay(sounds[i])\n\n combined.export(outputSongFileName, format='wav')",
"def from_audio(\n cls,\n audio,\n window_type=\"hann\",\n window_samples=None,\n window_length_sec=None,\n overlap_samples=None,\n overlap_fraction=None,\n fft_size=None,\n decibel_limits=(-100, -20),\n dB_scale=True,\n scaling=\"spectrum\",\n n_mels=64,\n norm=\"slaney\",\n htk=False,\n ):\n\n if not isinstance(audio, Audio):\n raise TypeError(\"Class method expects Audio class as input\")\n\n # Generate a linear-frequency spectrogram\n # with raw stft values rather than decibels\n linear_spec = Spectrogram.from_audio(\n audio,\n window_type=window_type,\n window_samples=window_samples,\n window_length_sec=window_length_sec,\n overlap_samples=overlap_samples,\n overlap_fraction=overlap_fraction,\n fft_size=fft_size,\n decibel_limits=decibel_limits,\n dB_scale=dB_scale,\n scaling=scaling,\n )\n\n # choose n_fft to ensure filterbank.size[1]==spectrogram.size[0]\n n_fft = int(linear_spec.spectrogram.shape[0] - 1) * 2\n # Construct mel filter bank\n filter_bank = librosa.filters.mel(\n sr=audio.sample_rate, n_fft=n_fft, n_mels=n_mels, norm=norm, htk=htk\n )\n # normalize filter bank: rows should sum to 1 #TODO: is this correct?\n fb_constant = np.sum(filter_bank, 1).mean()\n filter_bank = filter_bank / fb_constant\n\n # Apply filter bank to spectrogram with matrix multiplication\n melspectrogram = np.dot(filter_bank, linear_spec.spectrogram)\n\n if dB_scale: # convert to decibels\n melspectrogram = 10 * np.log10(\n melspectrogram,\n where=melspectrogram > 0,\n out=np.full(melspectrogram.shape, -np.inf),\n )\n\n # limit the decibel range (-100 to -20 dB by default)\n # values below lower limit set to lower limit,\n # values above upper limit set to uper limit\n min_db, max_db = decibel_limits\n melspectrogram[melspectrogram > max_db] = max_db\n melspectrogram[melspectrogram < min_db] = min_db\n\n # Calculate mel frequency bins\n frequencies = librosa.filters.mel_frequencies(\n n_mels=n_mels, fmin=0, fmax=audio.sample_rate / 2, htk=htk\n )\n\n return cls(\n melspectrogram,\n frequencies=frequencies,\n times=linear_spec.times,\n decibel_limits=decibel_limits,\n window_samples=window_samples,\n overlap_samples=overlap_samples,\n window_type=window_type,\n audio_sample_rate=audio.sample_rate,\n scaling=scaling,\n )",
"def prepare_recording(self, example):\n audio = load_audio(example[self.audio_key], dtype=np.float32)\n assert audio.shape[0] >= 2\n if audio.shape[0] > 2:\n if 'mic_pair' in example.keys():\n mic_pair = example['mic_pair']\n audio = audio[np.asarray(mic_pair)]\n else:\n audio = audio[np.asarray(self.mic_pair)]\n recording_len = audio.shape[-1]\n start_offset = 0\n end = recording_len\n if \"offset\" in example.keys() and \"onset\" in example.keys():\n end = example['offset']\n start_offset = example['onset']\n if end <= self.signal_length + start_offset:\n # current recording has not the sufficient length for the specified\n # desired signal length\n # example is skipped later during prefetch or catch\n raise FilterException\n if not self.random_speech_samples:\n slice_start = str_to_random_generator(\n example[\"example_id\"]).integers(\n start_offset, end - self.signal_length)\n else:\n slice_start = np.random.randint(\n start_offset, recording_len - self.signal_length)\n prepared_audio = audio[:, slice_start:slice_start + self.signal_length]\n return prepared_audio",
"def forward(self, audio):\n feature_extractor = self.feature_extractor\n wave_gan = self.wave_gan\n pqmf = self.pqmf\n use_noise_input = self.use_noise_input\n config = self.config\n pad_fn = self.pad_fn\n\n # Added for processing single audio file as in deepspeech armory [Sonal 29Oct20]\n if audio.ndim == 1:\n num_samples = audio.shape[0]\n mel_spectrogram = feature_extractor.transform(audio)\n # Setup inputs\n inputs = ()\n if use_noise_input:\n noise = torch.randn(\n 1,\n 1,\n len(mel_spectrogram) * config[\"hop_size\"],\n device=mel_spectrogram.device,\n )\n inputs += (noise,)\n\n mel_spectrogram = pad_fn(mel_spectrogram.unsqueeze(0).transpose(2, 1))\n inputs += (mel_spectrogram,)\n # Generate\n if config[\"generator_params\"][\"out_channels\"] == 1:\n reconstructed_audio = wave_gan(*inputs).view(-1)\n reconstructed_audio = reconstructed_audio[:num_samples]\n else:\n reconstructed_audio = pqmf.synthesis(wave_gan(*inputs)).view(-1)\n reconstructed_audio = reconstructed_audio[:num_samples]\n return reconstructed_audio\n\n else:\n reconstructions = []\n num_samples = audio.shape[1]\n for idx in range(audio.shape[0]):\n recording = audio[idx, :]\n mel_spectrogram = feature_extractor.transform(recording)\n # Setup inputs\n inputs = ()\n if use_noise_input:\n noise = torch.randn(\n 1,\n 1,\n len(mel_spectrogram) * config[\"hop_size\"],\n device=recording.device,\n )\n inputs += (noise,)\n mel_spectrogram = pad_fn(mel_spectrogram.unsqueeze(0).transpose(2, 1))\n inputs += (mel_spectrogram,)\n # Generate\n if config[\"generator_params\"][\"out_channels\"] == 1:\n reconstructed_audio = wave_gan(*inputs).view(-1)\n reconstructed_audio = reconstructed_audio[:num_samples]\n else:\n reconstructed_audio = pqmf.synthesis(wave_gan(*inputs)).view(-1)\n reconstructed_audio = reconstructed_audio[:, :num_samples]\n reconstructions.append(reconstructed_audio)\n return torch.stack(reconstructions)",
"def prepare(params, samples):\r\n return",
"def __init__(\n self,\n models,\n tgt_dict,\n tgt_dict_mt,\n beam_size=1,\n beam_size_mt=1,\n max_len_a=0,\n max_len_b=200,\n max_len_a_mt=0,\n max_len_b_mt=200,\n max_len=0,\n min_len=1,\n normalize_scores=True,\n len_penalty=1.0,\n len_penalty_mt=1.0,\n unk_penalty=0.0,\n temperature=1.0,\n match_source_len=False,\n no_repeat_ngram_size=0,\n eos=None,\n eos_mt=None,\n symbols_to_strip_from_output=None,\n lm_model=None,\n lm_weight=1.0,\n ):\n super().__init__()\n\n from examples.speech_to_speech.unity.sequence_generator import SequenceGenerator\n\n self.generator = SequenceGenerator(\n models,\n tgt_dict,\n beam_size=beam_size,\n max_len_a=max_len_a,\n max_len_b=max_len_b,\n max_len=max_len,\n min_len=min_len,\n normalize_scores=normalize_scores,\n len_penalty=len_penalty,\n unk_penalty=unk_penalty,\n temperature=temperature,\n match_source_len=match_source_len,\n no_repeat_ngram_size=no_repeat_ngram_size,\n search_strategy=search.BeamSearch(tgt_dict),\n eos=eos,\n symbols_to_strip_from_output=symbols_to_strip_from_output,\n lm_model=lm_model,\n lm_weight=lm_weight,\n )\n self.eos = self.generator.eos\n\n self.generator_mt = SequenceGenerator(\n models,\n tgt_dict_mt,\n beam_size=beam_size_mt,\n max_len_a=max_len_a_mt,\n max_len_b=max_len_b_mt,\n max_len=max_len,\n min_len=min_len,\n normalize_scores=normalize_scores,\n len_penalty=len_penalty_mt,\n unk_penalty=unk_penalty,\n temperature=temperature,\n match_source_len=match_source_len,\n no_repeat_ngram_size=no_repeat_ngram_size,\n search_strategy=search.BeamSearch(tgt_dict_mt),\n eos=eos_mt,\n symbols_to_strip_from_output=symbols_to_strip_from_output,\n )"
]
| [
"0.61170983",
"0.6047016",
"0.5824852",
"0.57329017",
"0.56791836",
"0.56745744",
"0.5672484",
"0.5632816",
"0.5611577",
"0.55961704",
"0.55796725",
"0.5570685",
"0.5484561",
"0.54663813",
"0.5406149",
"0.5401842",
"0.5399455",
"0.5349309",
"0.53475773",
"0.5336562",
"0.53358096",
"0.5297552",
"0.52931905",
"0.52854484",
"0.528217",
"0.5271693",
"0.5231938",
"0.5226621",
"0.522058",
"0.5142898"
]
| 0.7031771 | 0 |
Finds the nearest zero crossing, searching in one direction Can do a 'reverse' search by setting reverse to True. In that case, the sample list is searched from back to front. targetTime is the startTime if reverse=False and the endTime if reverse=True | def _findNextZeroCrossing(
startTime: float,
samples: Tuple[int, ...],
frameRate: float,
reverse: bool,
) -> Optional[float]:
zeroI = _getNearestZero(samples, reverse)
if zeroI is None:
zeroI = _getZeroThresholdCrossing(samples, reverse)
if zeroI is None:
return None
return startTime + zeroI / float(frameRate) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def findNearestZeroCrossing(\n self, targetTime: float, timeStep: float = ZERO_CROSSING_TIMESTEP\n ) -> float:\n\n leftStartTime = rightStartTime = targetTime\n\n samplesPerStep = timeStep * self.frameRate\n if samplesPerStep < 2:\n raise errors.ArgumentError(\n f\"'timeStep' ({timeStep}) must be large enough to contain \"\n f\"multiple samples for audio framerate ({self.frameRate})\"\n )\n\n # Find zero crossings\n smallestLeft = None\n smallestRight = None\n oneSampleDuration = 1 / self.frameRate\n while True:\n # Increasing our timeStep by one sample enables\n # us to find zero-crossings that sit at the boundary\n # of two samples (two different iterations of this loop)\n smallestLeft = self._iterZeroCrossings(\n leftStartTime, lambda x: x > 0, timeStep + oneSampleDuration, True\n )\n smallestRight = self._iterZeroCrossings(\n rightStartTime,\n lambda x: x + timeStep < self.duration,\n timeStep + oneSampleDuration,\n False,\n )\n\n if smallestLeft is not None or smallestRight is not None:\n break\n # TODO: I think this case shouldn't be possible\n elif leftStartTime < 0 and rightStartTime > self.duration:\n raise (errors.FindZeroCrossingError(0, self.duration))\n else:\n # oneSampleDuration is not added here\n leftStartTime -= timeStep\n rightStartTime += timeStep\n\n # Under ordinary circumstances, this should not occur\n if smallestLeft is None and smallestRight is None:\n raise errors.FindZeroCrossingError(0, self.duration)\n\n return utils.chooseClosestTime(targetTime, smallestLeft, smallestRight)",
"def binary_search_time(values: list[any], low: int, high: int, target: int) -> any:\n if low <= high:\n middle_index = (low+high) // 2\n middle_value = values[middle_index]['time']\n if middle_value < target:\n return binary_search_time(values, middle_index+1, high, target)\n elif middle_value > target:\n return binary_search_time(values, low, middle_index-1, target)\n else:\n return values[middle_index]\n return values[high]",
"def compute_time_offset(source, target, search_range=200):\r\n assert source.shape[1] == target.shape[1]\r\n best_offset = 688\r\n time_offset = 0\r\n if best_offset >= 0:\r\n time_offset = target[best_offset, 0] - source[0, 0]\r\n elif best_offset < 0:\r\n time_offset = target[0, 0] - source[best_offset, 0]\r\n print('Best offset: {}, time_offset: {}'.format(best_offset, time_offset / nano_to_sec))\r\n return time_offset",
"def SearchMaxElongation(body, startTime):\n if body == Body.Mercury:\n s1 = 50.0\n s2 = 85.0\n elif body == Body.Venus:\n s1 = 40.0\n s2 = 50.0\n else:\n raise InvalidBodyError()\n syn = _SynodicPeriod(body)\n iter = 1\n while iter <= 2:\n plon = EclipticLongitude(body, startTime)\n elon = EclipticLongitude(Body.Earth, startTime)\n rlon = _LongitudeOffset(plon - elon) # clamp to (-180, +180]\n\n # The slope function is not well-behaved when rlon is near 0 degrees or 180 degrees\n # because there is a cusp there that causes a discontinuity in the derivative.\n # So we need to guard against searching near such times.\n if rlon >= -s1 and rlon < +s1:\n # Seek to the window [+s1, +s2].\n adjust_days = 0.0\n # Search forward for the time t1 when rel lon = +s1.\n rlon_lo = +s1\n # Search forward for the time t2 when rel lon = +s2.\n rlon_hi = +s2\n elif rlon > +s2 or rlon < -s2:\n # Seek to the next search window at [-s2, -s1].\n adjust_days = 0.0\n # Search forward for the time t1 when rel lon = -s2.\n rlon_lo = -s2\n # Search forward for the time t2 when rel lon = -s1.\n rlon_hi = -s1\n elif rlon >= 0.0:\n # rlon must be in the middle of the window [+s1, +s2].\n # Search BACKWARD for the time t1 when rel lon = +s1.\n adjust_days = -syn / 4.0\n rlon_lo = +s1\n rlon_hi = +s2\n # Search forward from t1 to find t2 such that rel lon = +s2.\n else:\n # rlon must be in the middle of the window [-s2, -s1].\n # Search BACKWARD for the time t1 when rel lon = -s2.\n adjust_days = -syn / 4.0\n rlon_lo = -s2\n # Search forward from t1 to find t2 such that rel lon = -s1.\n rlon_hi = -s1\n\n t_start = startTime.AddDays(adjust_days)\n t1 = SearchRelativeLongitude(body, rlon_lo, t_start)\n if t1 is None:\n return None\n\n t2 = SearchRelativeLongitude(body, rlon_hi, t1)\n if t2 is None:\n return None\n\n # Now we have a time range [t1,t2] that brackets a maximum elongation event.\n # Confirm the bracketing.\n m1 = _neg_elong_slope(body, t1)\n if m1 >= 0.0:\n raise InternalError() # there is a bug in the bracketing algorithm!\n\n m2 = _neg_elong_slope(body, t2)\n if m2 <= 0.0:\n raise InternalError() # there is a bug in the bracketing algorithm!\n\n # Use the generic search algorithm to home in on where the slope crosses from negative to positive.\n tx = Search(_neg_elong_slope, body, t1, t2, 10.0)\n if tx is None:\n return None\n\n if tx.tt >= startTime.tt:\n return Elongation(body, tx)\n\n # This event is in the past (earlier than startTime).\n # We need to search forward from t2 to find the next possible window.\n # We never need to search more than twice.\n startTime = t2.AddDays(1.0)\n iter += 1",
"def nearest_test_pulse(self):",
"def find_nearest_time(self, time):\n\n idx = np.searchsorted(self.times, time, side=\"left\")\n if idx > 0 and (idx == len(self.times) or math.fabs(time - self.times[idx-1]) < math.fabs(time - self.times[idx])):\n return self.times[idx-1]\n else:\n return self.times[idx]",
"def closest_in_time(images, target):\n\n tgt_mjd = fits.getheader(target, ext=1)['mjd-obs']\n mjds = np.array([fits.getheader(i, ext=1)['mjd-obs'] for i in images])\n\n return images[abs(mjds - tgt_mjd).argsort()[0]]",
"def find_relative_time_reference(self, search_direction, tags, timex, timesIndex):\n \n if search_direction == 'Backward':\n ts = [r[2] for r in self.timexImpactZones if r[0]<=timex.getStartPos() and r[1]>=timex.getEndPos()]\n if ts:\n return ts[-1]\n else:\n return None\n \n parts = tags[timesIndex+1:]\n \n anchorTagSet = set(['Vaccine', 'Drug', 'Vaccination', 'Injection', 'Hospitalization', 'Administration']) \n doseNum = 0\n doseTag = [tg for tg in parts if tg[1]=='DoseIndicator']\n counts = []\n doseTagRange = 5\n if doseTag:\n counts = [(i, tg[0]) for i, tg in enumerate(parts) if tg[1]=='Count'] \n for i, tag in enumerate(parts):\n if tag[1]=='Drug' or tag[1]=='Vaccine':\n if counts:\n dist = 10000\n doseword = None\n for k, w in counts:\n if abs(k-i) < dist:\n dist = abs(k-i)\n doseword = w\n if doseword and dist<doseTagRange:\n doseNum = util.text2num.convertOrdinal(doseword) \n \n t = self.find_time_reference_with_tag(tag[1], tag[0], timex.getSentNum(), doseNum)\n if t:\n return t\n \n if tag[1] in ['Hospitalization', 'Administration']:\n t = self.find_time_reference_with_tag(tag[1], tag[0], timex.getSentNum())\n if t:\n return t\n \n if tag[1] in ['Vaccination', 'Injection']:\n if i+2<len(parts) and parts[i+1][0] in ['with', 'of', 'to'] and parts[i+2][1] in ['Drug', 'Vaccine']:\n continue\n t = self.find_time_reference_with_tag(tag[1], tag[0], timex.getSentNum())\n if t:\n return t\n \n ##: No reference tag is found, search backward for a valid time range\n ##: In ideal case, this should \"return None\" directly. However, considering that the current lexicon is not \n ##: complete enough, it's very likely some Vaccines or drugs are not tagged, we return the previous time\n ##: in the current development stage.\n ts = [r[2] for r in self.timexImpactZones if r[0]<=timex.getStartPos() and r[1]>=timex.getEndPos()]\n if ts:\n return ts[-1]\n \n return None",
"def find_nearest(numbers, target):\n numbers = np.asarray(numbers)\n idx = (np.abs(numbers - target)).argmin()\n return numbers[idx]",
"def RisetimeFinder(X, Y,startIndex,peakIndex,baseline):\n # Channel1Data is from first TOF\n # Channel2Data is from second TOF\n hitAmplitude = Y[peakIndex]\n UpperThreshold = baseline - (.7 * (baseline - hitAmplitude))\n LowerThreshold = baseline - (.3 * (baseline - hitAmplitude))\n riseTimestart = 0\n riseTimeend = 0\n riseIndex = 0\n fallIndex = 0\n diffs = Y[startIndex:peakIndex]-UpperThreshold\n value = np.min(abs(diffs))\n noiserms = np.std(Y[:50])*5\n YStart = Y[startIndex]\n YSign =np.sign(Y[startIndex])\n #print(value,diffs)\n #print(np.where(value == abs(diffs))[0][0])\n riseIndex = int(np.where(value == abs(diffs))[0][0]) + startIndex\n diffs = Y[startIndex:peakIndex]-LowerThreshold\n value = np.min(abs(diffs))\n fallIndex = int(np.where(value == abs(diffs))[0][0]) + startIndex\n riseTimestart = Interpolator(X, Y, riseIndex-1,riseIndex+1,UpperThreshold)\n riseTimeend = Interpolator(X, Y, fallIndex-1,fallIndex+1,LowerThreshold)\n #print(UpperThreshold,LowerThreshold)\n result = dict()\n result['risetime'] = riseTimestart-riseTimeend\n result['starttime'] = riseTimeend\n if riseTimestart < X[startIndex] or riseTimestart > X[EndIndex] or riseTimeend < X[startIndex] or riseTimeend > X[EndIndex]:\n result['risetime']= False\n if riseTimestart - riseTimeend > (X[EndIndex] - X[startIndex]):\n result['risetime']= False\n if riseTimestart - riseTimeend <= 0:\n result['risetime']= False\n if riseIndex == 0 or fallIndex ==0:\n result['risetime']= False\n if YSign > 0:\n if(YStart > baseline + noiserms):\n result['risetime']= False\n if YSign < 0:\n if(YStart < baseline - noiserms):\n result['risetime']= False\n if len(np.unique(np.sign(np.diff(Y[fallIndex:startIndex])))) > 1:\n result['risetime']= False\n\n return result",
"def findPathSegmentsToPoint(self, startTime, startPoint, startSpeed, startUnitVelocity, targetPoint, velocityOfTarget, legalRotDirection):\n pass",
"def Search(func, context, t1, t2, dt_tolerance_seconds):\n dt_days = abs(dt_tolerance_seconds / _SECONDS_PER_DAY)\n f1 = func(context, t1)\n f2 = func(context, t2)\n iter = 0\n iter_limit = 20\n calc_fmid = True\n while True:\n iter += 1\n if iter > iter_limit:\n raise Error('Excessive iteration in Search')\n\n dt = (t2.tt - t1.tt) / 2.0\n tmid = t1.AddDays(dt)\n if abs(dt) < dt_days:\n # We are close enough to the event to stop the search.\n return tmid\n\n if calc_fmid:\n fmid = func(context, tmid)\n else:\n # We already have the correct value of fmid from the previous loop.\n calc_fmid = True\n\n # Quadratic interpolation:\n # Try to find a parabola that passes through the 3 points we have sampled:\n # (t1,f1), (tmid,fmid), (t2,f2).\n q = _QuadInterp(tmid.ut, t2.ut - tmid.ut, f1, fmid, f2)\n if q:\n (q_x, q_ut, q_df_dt) = q\n tq = Time(q_ut)\n fq = func(context, tq)\n if q_df_dt != 0.0:\n dt_guess = abs(fq / q_df_dt)\n if dt_guess < dt_days:\n # The estimated time error is small enough that we can quit now.\n return tq\n\n # Try guessing a tighter boundary with the interpolated root at the center.\n dt_guess *= 1.2\n if dt_guess < dt / 10.0:\n tleft = tq.AddDays(-dt_guess)\n tright = tq.AddDays(+dt_guess)\n if (tleft.ut - t1.ut)*(tleft.ut - t2.ut) < 0.0:\n if (tright.ut - t1.ut)*(tright.ut - t2.ut) < 0.0:\n fleft = func(context, tleft)\n fright = func(context, tright)\n if fleft < 0.0 and fright >= 0.0:\n f1 = fleft\n f2 = fright\n t1 = tleft\n t2 = tright\n fmid = fq\n calc_fmid = False\n continue\n\n # Quadratic interpolation attempt did not work out.\n # Just divide the region in two parts and pick whichever one appears to contain a root.\n if f1 < 0.0 and fmid >= 0.0:\n t2 = tmid\n f2 = fmid\n continue\n\n if fmid < 0.0 and f2 >= 0.0:\n t1 = tmid\n f1 = fmid\n continue\n\n # Either there is no ascending zero-crossing in this range\n # or the search window is too wide (more than one zero-crossing).\n return None",
"def closest_to(self, a, b):\n diff_a = abs(a.ts - self.ts)\n diff_b = abs(b.ts - self.ts)\n if diff_a < diff_b and diff_a < TIME_THRESHOLD:\n return a\n elif diff_b < TIME_THRESHOLD:\n return b\n return None",
"def test_find_closest_waypoints_nearest(self):\n planner = WaypointPlanner(make_example_base_waypoints())\n\n planner.position = Vector3(0, 0, 0)\n waypoints = planner.find_closest_waypoints(1)\n self.assertEqual(1, len(waypoints))\n self.assertEqual(0, waypoints[0].pose.pose.position.x)\n self.assertEqual(0, waypoints[0].pose.pose.position.y)\n self.assertEqual(0, waypoints[0].pose.pose.position.z)\n\n planner.position = Vector3(0.9, 0.9, 0)\n waypoints = planner.find_closest_waypoints(2)\n self.assertEqual(2, len(waypoints))\n self.assertEqual(1, waypoints[0].pose.pose.position.x)\n self.assertEqual(1, waypoints[0].pose.pose.position.y)\n self.assertEqual(2, waypoints[1].pose.pose.position.x)\n self.assertEqual(2, waypoints[1].pose.pose.position.y)\n\n # Check it wraps back around to the start.\n planner.position = Vector3(0.9, 0.9, 0)\n waypoints = planner.find_closest_waypoints(3)\n self.assertEqual(3, len(waypoints))\n self.assertEqual(1, waypoints[0].pose.pose.position.x)\n self.assertEqual(1, waypoints[0].pose.pose.position.y)\n self.assertEqual(2, waypoints[1].pose.pose.position.x)\n self.assertEqual(2, waypoints[1].pose.pose.position.y)\n self.assertEqual(0, waypoints[2].pose.pose.position.x)\n self.assertEqual(0, waypoints[2].pose.pose.position.y)",
"def get_closest_record(self, time):\n dist = 10000000\n record = -1\n # TODO: optimise a bit\n for i, itime in enumerate(self.times):\n if (abs(time-itime)) < dist:\n dist = abs(time-itime)\n record = i\n\n return record",
"def _closest_front_opponent(self, raw_obs, o, target):\n delta = target - o\n min_d = None\n closest = None\n for p in raw_obs['right_team']:\n delta_opp = p - o\n if np.dot(delta, delta_opp) <= 0:\n continue\n d = self._object_distance(o, p)\n if min_d is None or d < min_d:\n min_d = d\n closest = p\n\n # May return None!\n return closest",
"def get_closest_bus_stop(due_time, stop_src, stop_ids, route_id):\n\n # Get index of where the SRC stop is in the tupple to serve as the high-bound, and store that position in original. Also, store the original due time, as it will be needed\n high_bound = 0\n original = 0\n original_due_time = due_time\n for i in range(0, len(stop_ids)):\n if str(stop_ids[i]) == stop_src:\n high_bound = i\n original = i\n break\n\n # Innitialize pointer to be halfway between the lowbound (set to 0 index) and the highbound (the SRC stop).\n pointer = original//4\n low_bound = 0\n\n # Optimally we want to find the stop where our bus is just 1 minute away, for better accuracy. But sometimes that is not possible, so we will\n # need to look for a bus further away. This variable, arrival_within_minutes, starts with 1 minutes, and will be increased as necessary.\n arrival_within_minutes = 1\n\n # Search until we find where the bus is\n while True:\n last_due_time = 0\n # Search while our due time is not 'Due' or within the specified minutes\n while due_time != 'Due' or int(due_time) > arrival_within_minutes:\n # Once more, get the buses for the stop we are currently looking at\n first_3_buses = get_due_time(str(stop_ids[pointer]), route_id)\n\n # Get just the first bus, since we already have the 3 buses from our SRC stop (this one is just looking for where one of those 3 buses is)\n possible_stop = filter_buses(first_3_buses)\n # Store the new due time, from the bus stop our binary algorithm selected\n new_due_time_due = possible_stop['duetime']\n\n # If the new due_time is the same as the last_due_time it means the algorithm got stuck without finding a better value, and we need to break, and change our\n # arrival_within_minutes for a longer time\n if new_due_time_due == last_due_time:\n break\n\n # If we found a 'Due' or within the arrival_within_minutes, return that index. That is the index of the stop where our bus is at/close to.\n if possible_stop['duetime'] == 'Due' or int(possible_stop['duetime']) <= arrival_within_minutes:\n # ('Found the bus with', new_due_time_due, 'minutes due time.')\n # This for loop is to check if the previous bus stop(s) have the same due time, and find a closer more accurae stop\n # print('Original pointer:', pointer)\n for i in range(pointer - 1, 0, -1):\n if new_due_time_due == (filter_buses(get_due_time(str(stop_ids[i]), route_id))['duetime']):\n pointer = i\n # print('New pointer:', pointer)\n else:\n break\n # Return the pointer, the index of the stop\n return pointer\n else:\n # If the due time at the possible stop is less than the one at SRC, we're on the right path, and need to look for a stop farther from the SRC\n if int(possible_stop['duetime']) < int(due_time):\n # Store the new, better due time\n due_time = possible_stop['duetime']\n # Change the highbound to the pointer and reduce our pointer again to halfway between lowbound and highbound\n high_bound = pointer\n pointer -= ((high_bound - low_bound)//4)\n else:\n # If the due time at the possible stop is bigger than the one at SRC, we've gone too far, and need to look for a stop closer to the SRC\n # The lowbound becomes the pointer and we move the pointer, again, to halfway between the lowbound and the highbound\n low_bound = pointer\n pointer += ((high_bound - low_bound)//4)\n # If we found a better (shortter) due time, we store this one for the next iteration and keep looking for an even better one\n last_due_time = new_due_time_due\n\n # If the algorithm comes to this part, it means we didn't find a stop where our bus was due wihin 1 (or more) minutes. So we need to increase the\n # arrival_within minutes to keep searching.\n arrival_within_minutes += 1\n\n # Reset our lowbound, highbound and pointer to restart the search\n low_bound = 0\n high_bound = original\n pointer = original // 4\n\n # If we start looking for a stop, previous to the SRC, were our bus has MORE duetime, we've gonne too far. Possibly, there are two buses running very close to one another,\n # and they may be due to our SRC stop at the same time (seen before too many times with the 17). In this case, we need to increase the original bound to take the stop where\n # we found the previous bus.\n if arrival_within_minutes > int(original_due_time):\n high_bound += 1\n return high_bound\n\n # Just a token return\n return 0",
"def SearchMoonPhase(targetLon, startTime, limitDays):\n # To avoid discontinuities in the _moon_offset function causing problems,\n # we need to approximate when that function will next return 0.\n # We probe it with the start time and take advantage of the fact\n # that every lunar phase repeats roughly every 29.5 days.\n # There is a surprising uncertainty in the quarter timing,\n # due to the eccentricity of the moon's orbit.\n # I have seen more than 0.9 days away from the simple prediction.\n # To be safe, we take the predicted time of the event and search\n # +/-1.5 days around it (a 3-day wide window).\n # But we must return None if the final result goes beyond limitDays after startTime.\n uncertainty = 1.5\n ya = _moon_offset(targetLon, startTime)\n if ya > 0.0:\n ya -= 360.0 # force searching forward in time, not backward\n est_dt = -(_MEAN_SYNODIC_MONTH * ya) / 360.0\n dt1 = est_dt - uncertainty\n if dt1 > limitDays:\n return None # not possible for moon phase to occur within the specified window\n dt2 = min(limitDays, est_dt + uncertainty)\n t1 = startTime.AddDays(dt1)\n t2 = startTime.AddDays(dt2)\n return Search(_moon_offset, targetLon, t1, t2, 1.0)",
"def __findFarestPoint__( self, outPoint ):\n end = outPoint;\n endInside = self.inside( end );\n if endInside: return outPoint;\n start = self.center;\n startInside = self.inside( start );\n \n while( True ):\n if ( utility.euclideanDistSqr( start, end ) <= 4 ):\n return start;\n mid = utility.devide( utility.add( start, end ), 2);\n if self.inside( mid ):\n start = mid;\n else:\n end = mid;",
"def target_position(self, time):\n \"\"\"\n start_pos = self.points[self.cur_start]\n seg_time = time - self.last_checkpoint_time\n\n #The arguement of target-velocity dosent matter\n cur_pos = self.target_velocity(time)*seg_time + start_pos\n\n \n # or time > (self.total_time / 4)*(self.cur_start + 1)\n cur_pos_norm = length(cur_pos - start_pos)\n\n next_corner = self.points[(self.cur_start + 1)%4]\n \n seg_norm = length(next_corner - start_pos)\n print(\"cur_pos : \", cur_pos, \"segment: \", self.cur_start, seg_norm - cur_pos_norm)\n\n if cur_pos_norm >= seg_norm:\n self.cur_start = (self.cur_start + 1) % 4\n self.last_checkpoint_time = time\n return cur_pos\n \"\"\"\n\n #Possibly use rospy.sleep()\n total_time = self.total_time\n\n\n if time < total_time/4:\n return self.path1.target_position(time)\n\n elif time - total_time/4 == 0:\n rospy.sleep(0.5)\n\n elif time < total_time/2:\n return self.path2.target_position(time - (total_time/4 + 0.5))\n # return self.path2.target_position(time - (total_time/4 ))\n\n\n elif time - total_time/2 == 0:\n rospy.sleep(0.5)\n\n elif time <= total_time/4*3:\n return self.path3.target_position(time - (total_time/2 + 1))\n # return self.path3.target_position(time - (total_time/2))\n\n\n elif time - total_time/4*3 == 0:\n rospy.sleep(0.5)\n\n else:\n return self.path4.target_position(time - (total_time/4*3 + 1.5))\n # return self.path4.target_position(time - (total_time/4*3))",
"def target(self, time, points, dt, num_way):\n start_index = min(int(time / dt), num_way - 1)\n end_index = min(start_index + 1, num_way - 1)\n start_point = points[start_index]\n end_point = points[end_index]\n fraction = float(time % dt) / dt\n return linear_interpolation_two_points(start_point, end_point, fraction).reshape(3)",
"def distance(st_one, st_two, start, end, nsamples):\n t = np.linspace(start+(end-start)/nsamples, end, nsamples)\n st_one = np.insert(st_one, 0, start)\n st_one = np.append(st_one, end)\n st_two = np.insert(st_two, 0, start)\n st_two = np.append(st_two, end)\n\n # We compute the corner spikes for all the time instants we consider\n # corner_spikes is a 4 column matrix [t, tp1, tf1, tp2, tf2]\n corner_spikes = np.zeros((nsamples,5))\n\n ibegin_one = 0\n ibegin_two = 0\n corner_spikes[:,0] = t\n for itc, tc in enumerate(t):\n corner_spikes[itc,1:3], ibegin_t1 = _find_corner_spikes(tc, st_one,\n ibegin_one,\n start, end)\n corner_spikes[itc,3:5], ibegin_t2 = _find_corner_spikes(tc, st_two,\n ibegin_two,\n start, end)\n\n #print corner_spikes\n xisi = np.zeros((nsamples,2))\n xisi[:,0] = corner_spikes[:,2] - corner_spikes[:,1]\n xisi[:,1] = corner_spikes[:,4] - corner_spikes[:,3]\n norm_xisi = np.sum(xisi,axis=1)**2.0\n\n # We now compute the smallest distance between the spikes in st_two\n # and the corner spikes of st_one\n # with np.tile(st_two,(N,1)) we build a matrix :\n # np.tile(st_two,(N,1)) = [st_two st_two st_two]' -\n # np.tile(reshape(corner_spikes,(N,1)), st_two.size) =\n # [corner corner corner]'\n\n dp1 = np.min(np.fabs(np.tile(st_two,(nsamples,1))\n - np.tile(np.reshape(corner_spikes[:,1],(nsamples,1)),\n st_two.size)),\n axis=1)\n df1 = np.min(np.fabs(np.tile(st_two,(nsamples,1))\n - np.tile(np.reshape(corner_spikes[:,2],(nsamples,1)),\n st_two.size)),\n axis=1)\n # And the smallest distance between the spikes in st_one and the corner spikes of st_two\n dp2 = np.min(np.fabs(np.tile(st_one,(nsamples,1))\n - np.tile(np.reshape(corner_spikes[:,3],\n (nsamples,1)),st_one.size)),\n axis=1)\n df2 = np.min(np.fabs(np.tile(st_one,(nsamples,1))\n - np.tile(np.reshape(corner_spikes[:,4],(nsamples,1)),\n st_one.size)),\n axis=1)\n\n xp1 = t - corner_spikes[:,1]\n xf1 = corner_spikes[:,2] - t\n xp2 = t - corner_spikes[:,3]\n xf2 = corner_spikes[:,4] - t\n\n S1 = (dp1 * xf1 + df1 * xp1)/xisi[:,0]\n S2 = (dp2 * xf2 + df2 * xp2)/xisi[:,1]\n\n inst_dist = (S1 * xisi[:,1] + S2 * xisi[:,0]) / (norm_xisi/2.0)\n\n return t, inst_dist",
"def __call__(self,time):\n \n fname = []\n tind =[]\n for t in time:\n flag=1\n for f in self.timelookup.keys():\n\n if t >= self.timelookup[f][0] and t<=self.timelookup[f][-1]:\n# print 'Found tstep %s'%datetime.strptime(t,'%Y-%m-%d %H:%M:%S')\n tind.append(othertime.findNearest(t,self.timelookup[f][:]))\n fname.append(f)\n flag=0\n\n# if flag:\n# print 'Warning - could not find matching file for time:%s'%datetime.strptime(t,'%Y-%m-%d %H:%M:%S')\n# tind.append(-1)\n# fname.append(-1)\n \n return tind, fname",
"def get_hour_offsets(self):\n starttime = self.parameters['startlocaltime']\n stoptime = self.parameters['stoplocaltime']\n timediff = (stoptime - starttime)\n logger.debug(\"Start time: {} | Stop time: {}\".format(starttime, stoptime))\n if timediff > config.TIME_THRESHOLD:\n logger.debug(\"Time delta is {}. This is significantly larger than anticipated\".format(timediff))\n else:\n logger.debug(\"Time delta is {}. Using start time as the global time\".format(timediff))\n\n \"\"\"\n timediff = (stoptime - starttime).total_seconds()\n logger.debug(\"Start time: {} | Stop time: {}\".format(starttime, stoptime))\n #TODO: How do we want to handle large images with huge time differences?\n if timediff > config.TIME_THRESHOLD:\n logger.debug(\"Time delta is {}. This is significantly larger than anticipated\".format(timediff))\n starttime = starttime\n else:\n logger.debug(\"Time delta is {}. Using start time as the global time\".format(timediff))\n \"\"\"\n #Given the image start time, find the nearest index and set to the middle,\n # then find the adjacent two nodes in both directions to get allow a\n # cubic interpolation.\n #image_time = starttime.hour + starttime.minute / 60.0\n # This grabs the hour that is nearest, but hour is circular\n image_time = starttime\n if abs(image_time - 24) < abs(image_time - 23.5):\n image_time -= 24\n mididx, midhour = utils.getnearest(self.times, image_time)\n logger.debug(\"Time is {}. The nearest lookup node is {}\".format(image_time, mididx))\n minidx = mididx - 2\n maxidx = mididx + 2\n\n hourslice = np.arange(minidx, maxidx + 1, dtype=np.int8)\n\n hourslice[hourslice < 0] += 18\n\n if hourslice[-1] >= len(self.times):\n #The hour slice needs to be shifted over the time break\n hourslice[hourslice >= len(self.times)] -= len(self.times)\n logger.debug(\"Using indices {} and start time of {}.\".format(hourslice, image_time))\n return hourslice, image_time",
"def binary_search_tweets_by_date(tweets, targetDate, start, end):\n # no exact match in tweets\n if (start >= end):\n # TODO will this cover edge cases?? (end and beginning of list?)\n print(\"closest? %d\" % (start - 1))\n return start - 1\n\n middle = int((start + end) / 2)\n value = tweets[middle].created_at\n\n if value > targetDate:\n return binary_search_tweets_by_date(tweets, targetDate, middle+1, end)\n if value < targetDate:\n return binary_search_tweets_by_date(tweets, targetDate, start, middle-1)\n # found exact match\n return middle",
"def SearchPeakMagnitude(body, startTime):\n # s1 and s2 are relative longitudes within which peak magnitude of Venus can occur.\n s1 = 10.0\n s2 = 30.0\n if body != Body.Venus:\n raise InvalidBodyError()\n\n iter = 1\n while iter <= 2:\n # Find current heliocentric relative longitude between the\n # inferior planet and the Earth.\n plon = EclipticLongitude(body, startTime)\n elon = EclipticLongitude(Body.Earth, startTime)\n rlon = _LongitudeOffset(plon - elon)\n # The slope function is not well-behaved when rlon is near 0 degrees or 180 degrees\n # because there is a cusp there that causes a discontinuity in the derivative.\n # So we need to guard against searching near such times.\n if -s1 <= rlon < +s1:\n # Seek to the window [+s1, +s2].\n adjust_days = 0.0\n # Search forward for the time t1 when rel lon = +s1.\n rlon_lo = +s1\n # Search forward for the time t2 when rel lon = +s2.\n rlon_hi = +s2\n elif rlon >= +s2 or rlon < -s2:\n # Seek to the next search window at [-s2, -s1].\n adjust_days = 0.0\n # Search forward for the time t1 when rel lon = -s2.\n rlon_lo = -s2\n # Search forward for the time t2 when rel lon = -s1.\n rlon_hi = -s1\n elif rlon >= 0:\n # rlon must be in the middle of the window [+s1, +s2].\n # Search BACKWARD for the time t1 when rel lon = +s1.\n syn = _SynodicPeriod(body)\n adjust_days = -syn / 4\n rlon_lo = +s1\n # Search forward from t1 to find t2 such that rel lon = +s2.\n rlon_hi = +s2\n else:\n # rlon must be in the middle of the window [-s2, -s1].\n # Search BACKWARD for the time t1 when rel lon = -s2.\n syn = _SynodicPeriod(body)\n adjust_days = -syn / 4\n rlon_lo = -s2\n # Search forward from t1 to find t2 such that rel lon = -s1.\n rlon_hi = -s1\n\n t_start = startTime.AddDays(adjust_days)\n t1 = SearchRelativeLongitude(body, rlon_lo, t_start)\n t2 = SearchRelativeLongitude(body, rlon_hi, t1)\n\n # Now we have a time range [t1,t2] that brackets a maximum magnitude event.\n # Confirm the bracketing.\n m1 = _mag_slope(body, t1)\n if m1 >= 0.0:\n raise InternalError()\n\n m2 = _mag_slope(body, t2)\n if m2 <= 0.0:\n raise InternalError()\n\n # Use the generic search algorithm to home in on where the slope crosses from negative to positive.\n tx = Search(_mag_slope, body, t1, t2, 10.0)\n if tx is None:\n # The search should have found the ascending root in the interval [t1, t2].\n raise InternalError()\n\n if tx.tt >= startTime.tt:\n return Illumination(body, tx)\n\n # This event is in the past (earlier than startTime).\n # We need to search forward from t2 to find the next possible window.\n # We never need to search more than twice.\n startTime = t2.AddDays(1.0)\n iter += 1\n\n # We should have found the peak magnitude in at most 2 iterations.\n raise InternalError()",
"def SearchRelativeLongitude(body, targetRelLon, startTime):\n if body == Body.Earth:\n raise EarthNotAllowedError()\n if body == Body.Moon or body == Body.Sun:\n raise InvalidBodyError()\n syn = _SynodicPeriod(body)\n direction = +1 if _IsSuperiorPlanet(body) else -1\n # Iterate until we converge on the desired event.\n # Calculate the error angle, which will be a negative number of degrees,\n # meaning we are \"behind\" the target relative longitude.\n error_angle = _rlon_offset(body, startTime, direction, targetRelLon)\n if error_angle > 0.0:\n error_angle -= 360.0 # force searching forward in time\n time = startTime\n iter = 0\n while iter < 100:\n # Estimate how many days in the future (positive) or past (negative)\n # we have to go to get closer to the target relative longitude.\n day_adjust = (-error_angle/360.0) * syn\n time = time.AddDays(day_adjust)\n if abs(day_adjust) * _SECONDS_PER_DAY < 1.0:\n return time\n prev_angle = error_angle\n error_angle = _rlon_offset(body, time, direction, targetRelLon)\n if abs(prev_angle) < 30.0 and prev_angle != error_angle:\n # Improve convergence for Mercury/Mars (eccentric orbits)\n # by adjusting the synodic period to more closely match the\n # variable speed of both planets in this part of their respective orbits.\n ratio = prev_angle / (prev_angle - error_angle)\n if 0.5 < ratio < 2.0:\n syn *= ratio\n iter += 1\n raise NoConvergeError()",
"def backtrack(\n candidates: list, path: list, answer: list, target: int, previous_index: int\n) -> None:\n if target == 0:\n answer.append(path.copy())\n else:\n for index in range(previous_index, len(candidates)):\n if target >= candidates[index]:\n path.append(candidates[index])\n backtrack(candidates, path, answer, target - candidates[index], index)\n path.pop(len(path) - 1)",
"def find_closest(A, target):\n idx = A.searchsorted(target)\n idx = np.clip(idx, 1, len(A)-1)\n left = A[idx-1]\n right = A[idx]\n idx -= target - left < right - target\n return idx",
"def find_start_end_rests(audio_data, sr, hop_length=HOP_LENGTH, n_fft=N_FFT):\r\n \r\n # Compute the 3rd percentile of the envelope and \r\n # deem anything below this value as silence \r\n envelope = frame(audio_data, hop_length=hop_length, frame_length=n_fft).max(axis=0)\r\n lower_bound = np.percentile(envelope, 5.0)\r\n \r\n # Implement the search as loop, this should be faster than vectorisation\r\n k = 0\r\n while envelope[k] <= lower_bound:\r\n k += 1\r\n \r\n # Return 0 if there is no start rest\r\n if k == 0:\r\n time_start = 0.0\r\n else:\r\n # The first value of the output of the frame function correspond to the time of\r\n # n_fft, then the times are spaced according to hop_length \r\n time_start = ((k-1)*hop_length + n_fft)/float(sr)\r\n \r\n j = len(envelope)-1\r\n while envelope[j] <= lower_bound:\r\n j -= 1\r\n \r\n # Return the length of the track if the is no end rest\r\n if j == len(envelope)-1:\r\n time_end = len(audio_data)/float(sr)\r\n else:\r\n time_end = ((j-1)*hop_length + n_fft)/float(sr)\r\n \r\n times_start_end_rests = [time_start, time_end]\r\n \r\n return(times_start_end_rests)"
]
| [
"0.6999155",
"0.59568244",
"0.59183306",
"0.5838276",
"0.577363",
"0.569039",
"0.56570524",
"0.5603086",
"0.55886364",
"0.54183924",
"0.5383854",
"0.5383606",
"0.53572553",
"0.53330415",
"0.52899903",
"0.52376866",
"0.5219681",
"0.51855856",
"0.51751965",
"0.51744354",
"0.5164647",
"0.5156001",
"0.5150172",
"0.51484436",
"0.5137115",
"0.5123184",
"0.50957555",
"0.50898343",
"0.5050174",
"0.50489414"
]
| 0.716974 | 0 |
Returns a list of intervals, each one labeled 'keep' or 'delete' | def _computeKeepDeleteIntervals(
start: float,
stop: float,
keepIntervals: List[Tuple[float, float]] = None,
deleteIntervals: List[Tuple[float, float]] = None,
) -> List[Tuple[float, float, str]]:
if keepIntervals and deleteIntervals:
raise errors.ArgumentError(
"You cannot specify both 'keepIntervals' or 'deleteIntervals'."
)
elif not keepIntervals and not deleteIntervals:
computedKeepIntervals = [(start, stop)]
computedDeleteIntervals = []
elif deleteIntervals:
deleteTimestamps = [(interval[0], interval[1]) for interval in deleteIntervals]
computedKeepIntervals = utils.invertIntervalList(deleteTimestamps, start, stop)
computedDeleteIntervals = deleteTimestamps
elif keepIntervals:
keepTimestamps = [(interval[0], interval[1]) for interval in keepIntervals]
computedKeepIntervals = keepTimestamps
computedDeleteIntervals = utils.invertIntervalList(keepTimestamps, start, stop)
annotatedKeepIntervals = [
(start, end, _KEEP) for start, end in computedKeepIntervals
]
annotatedDeleteIntervals = [
(start, end, _DELETE) for start, end in computedDeleteIntervals
]
intervals = sorted(annotatedKeepIntervals + annotatedDeleteIntervals)
return intervals | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def keep_intervals(self, intervals, simplify=True, record_provenance=True):\n tables = self.dump_tables()\n tables.keep_intervals(intervals, simplify, record_provenance)\n return tables.tree_sequence()",
"def delete_intervals(self, intervals, simplify=True, record_provenance=True):\n tables = self.dump_tables()\n tables.delete_intervals(intervals, simplify, record_provenance)\n return tables.tree_sequence()",
"def exclude_intervals(self):\n if not hasattr(self, \"_exclude_intervals\"):\n self._exclude_intervals = Table(\n names=[\"start\", \"stop\", \"states\", \"comment\", \"source\"],\n dtype=[str, str, str, str, str],\n )\n return self._exclude_intervals",
"def intervals(start, end, delta, intervals_right_closed=False):\r\n result = []\r\n interval_start, interval_end = None, None\r\n while True:\r\n interval_start = start if interval_end is None else interval_end\r\n interval_end = min(end, interval_start + delta)\r\n result.append(thalesians.tsa.intervals.Interval(\r\n interval_start, interval_end,\r\n not intervals_right_closed, intervals_right_closed))\r\n if interval_end == end: break\r\n return result",
"def make_intervals(self):\n if not self:\n return []\n intervals = []\n self.sort()\n index_of_the_first = 0\n for i in range(len(self) - 1): # i: indexes from zero to len(self)\n if self[i] + 1 == self[i+1] or self[i] == self[i+1]:\n continue\n # elif self[i] == self[i+1]:\n # not_uniq.append( (self.count(self[i]), self[i]) )\n else:\n intervals.append((self[index_of_the_first], self[i]))\n index_of_the_first = i + 1\n # And now the last element:\n last_index = len(self) - 1\n intervals.append((self[index_of_the_first], self[last_index]))\n return intervals",
"def prune(self, interval, keep_end=False):\n self._times, self._values = pad_lists(interval, self._times, self._values, keep_end=keep_end)",
"def getDisjointIntervals(X: np.ndarray) -> list:\n if len(X) == 0:\n return []\n else:\n cc = 0\n currentToken = -1\n intervals = []\n reading = False\n while cc < len(X):\n\n if (X[cc] > 0) and (not reading):\n idxLeft = cc\n currentToken = X[cc]\n reading = True\n\n elif (X[cc] != currentToken) and reading:\n idxRight = (cc - 1)\n record = (int(idxLeft), int(idxRight), int(currentToken))\n intervals.append(record)\n\n if X[cc] > 0:\n idxLeft = cc\n currentToken = X[cc]\n reading = True\n else:\n reading = False\n\n cc += 1\n\n # termination case\n if reading:\n assert cc == len(X)\n idxRight = cc - 1\n record = (int(idxLeft), int(idxRight), int(currentToken))\n intervals.append(record)\n\n return intervals",
"def intervals(start, end, delta):\n intervals = []\n current = copy.deepcopy(start)\n while current < end:\n intervals.append((unix_to_iso(current.strftime('%s')),\n unix_to_iso((current + delta).strftime('%s'))))\n current += delta\n return intervals",
"def get_interval_list_predefined_gap(traces_list, gap_interval):\n\n intv = 0\n interval_list = []\n pre_traces = []\n\n for timst in traces_list:\n timst = timst.replace(microsecond=0)\n pre_traces.append(timst)\n\n for i in range(0, len(pre_traces)-1):\n iat = (pre_traces[i+1]-pre_traces[i]).total_seconds()\n if iat <= gap_interval:\n current_trace = pre_traces[i]\n while current_trace < pre_traces[i+1]:\n interval_list.append(current_trace)\n current_trace = current_trace + datetime.timedelta(0,1)\n else:\n interval_list.append(pre_traces[i])\n\n if i == len(pre_traces)-2:\n interval_list.append(pre_traces[i+1])\n\n return interval_list",
"def throw_random_gap_list(lengths, mask, save_interval_func, allow_overlap=False):\n # Use mask to find the gaps; gaps is a list of (length,start,end)\n lengths = [length for length in lengths if length > 0]\n min_length = min(lengths)\n gaps = []\n start = end = 0\n while True:\n start = mask.next_clear(end)\n if start == mask.size:\n break\n end = mask.next_set(start)\n if end - start >= min_length:\n gaps.append((end - start, start, None))\n # Sort (long regions first)\n gaps.sort()\n gaps.reverse()\n # Throw\n throw_random_private(lengths, gaps, save_interval_func, allow_overlap, three_args=False)",
"def findIntervals(intervals, length, weights, maximum, graph):\r\n intervals_used = []\r\n vertex = maximum\r\n while vertex > 1:\r\n for k in range(length):\r\n if vertex == intervals[k][1] and weights[k] != 0 \\\r\n and graph[vertex] == graph[intervals[k][0]] + weights[k]:\r\n intervals_used.insert(0, intervals[k]) # insertion at the beginning\r\n vertex = intervals[k][0] + 1 # +1 because of adjacent intervals\r\n vertex -= 1\r\n \r\n return intervals_used",
"def add_exclude_intervals(self):\n exclude_intervals = get_command_sheet_exclude_intervals()\n\n for row in exclude_intervals:\n states = row[\"states\"].split()\n if row[\"states\"] == \"\" or self.state_name in states:\n self.add_exclude_interval(\n start=row[\"start\"],\n stop=row[\"stop\"],\n comment=row[\"comment\"],\n source=\"Command Events sheet\",\n )",
"def check_intervals(what):\n\n intervals = []\n result = []\n\n for interval in what:\n for seen_interval in intervals:\n if ((interval[1] > seen_interval[0]) and (interval[0] <= seen_interval[0])) or \\\n ((interval[0] < seen_interval[1]) and (interval[0] >= seen_interval[0])):\n result.append(interval)\n break\n\n intervals.append(interval)\n\n return result",
"def inj_seg(self, exclude_coinc_flags=None):\n\n if exclude_coinc_flags is None:\n exclude_coinc_flags = []\n\n tmp_list = segments.segmentlist([])\n for key in self.exc_dict.keys():\n if key[3:] not in exclude_coinc_flags:\n tmp_list.extend(self.exc_dict[key])\n for key in self.seg_dict.keys():\n if key[3:] not in exclude_coinc_flags:\n tmp_list.extend(self.seg_dict[key])\n for key in self.bitmask_dict.keys():\n if key[3:] not in exclude_coinc_flags:\n tmp_list.extend(self.bitmask_dict[key])\n if self.schedule_time:\n seg = segments.segment(self.schedule_time, self.schedule_time + 1)\n seg_list = segments.segmentlist([seg])\n tmp_list.extend(seg_list)\n for time in self.gracedb_time:\n seg = segments.segment(time, time + 1)\n seg_list = segments.segmentlist([seg])\n tmp_list.extend(seg_list)\n return tmp_list",
"def remove_interval(s, start, stop):\n #s[:start] will get the string from start of string to 'start'->value stored in start\n #s[stop:] will get the string from 'stop'->value stored in the stop to end of the string\n temp_list = s[:start] + s[stop+1:]\n return temp_list",
"def calculate_intervals(tick_times: List[float]) -> List[float]:\n return [tick_times[i] - tick_times[i - 1] for i in range(1, len(tick_times))]",
"def intervals(b, min_length=1, forgivingJump=True, removeSmallRel=True, removeSmallFact=0.1, mergeCloseRel=False, mergeCloseFact=0.2):\r\n b = np.asarray(b)\r\n total = np.sum(b)\r\n\r\n min_length=max(min_length,1)\r\n if forgivingJump:\r\n min_jump=min_length\r\n else:\r\n min_jump=1\r\n\r\n if total==0:\r\n IStart = np.array([])\r\n IEnd = np.array([])\r\n Lengths= np.array([])\r\n return IStart, IEnd, Lengths\r\n elif total==1:\r\n i = np.where(b)[0][0]\r\n IStart = np.array([i])\r\n IEnd = np.array([i])\r\n Lengths= np.array([1])\r\n else:\r\n n = len(b)\r\n Idx = np.arange(n)[b]\r\n delta_Idx=np.diff(Idx)\r\n jumps =np.where(delta_Idx>min_jump)[0]\r\n if len(jumps)==0:\r\n IStart = np.array([Idx[0]])\r\n IEnd = np.array([Idx[-1]])\r\n else:\r\n istart=Idx[0]\r\n jumps=np.concatenate(([-1],jumps,[len(Idx)-1]))\r\n IStart = Idx[jumps[:-1]+1] # intervals start right after a jump\r\n IEnd = Idx[jumps[1:]] # intervals stops at jump\r\n Lengths = IEnd-IStart+1\r\n\r\n # Removing intervals smaller than min_length\r\n bKeep = Lengths>=min_length\r\n IStart = IStart[bKeep]\r\n IEnd = IEnd[bKeep]\r\n Lengths = Lengths[bKeep]\r\n # Removing intervals smaller than less than a fraction of the max interval\r\n if removeSmallRel:\r\n bKeep = Lengths>=removeSmallFact*np.max(Lengths)\r\n IStart = IStart[bKeep]\r\n IEnd = IEnd[bKeep]\r\n Lengths = Lengths[bKeep]\r\n\r\n # Distances between intervals\r\n if mergeCloseRel:\r\n if len(IStart)<=2:\r\n pass\r\n else:\r\n D = IStart[1:]-IEnd[0:-1]\r\n #print('D',D,np.max(D),int(np.max(D) * mergeCloseFact))\r\n min_length = max(int(np.max(D) * mergeCloseFact), min_length)\r\n if min_length<=1:\r\n pass \r\n else:\r\n #print('Readjusting min_length to {} to accomodate for max interval spacing of {:.0f}'.format(min_length, np.mean(D)))\r\n return intervals(b, min_length=min_length, forgivingJump=True, removeSmallRel=removeSmallRel, removeSmallFact=removeSmallFact, mergeCloseRel=False)\r\n return IStart, IEnd, Lengths",
"def _intervals(parts, duration) -> list:\n part_duration = duration / parts\n return [(floor(i * part_duration), floor((i + 1) * part_duration)) for i in range(parts)]",
"def filter_subspans(spans: List[Tuple[int, int]]) -> List[Tuple[int, int]]:\n filtered = []\n\n for span in spans:\n accept = True\n for compared in spans:\n if span[0] >= compared[0] and span[1] <= compared[1]:\n if span[0] != compared[0] or span[1] != compared[1]:\n accept = False\n if accept:\n filtered.append(span)\n\n filtered = list(dict.fromkeys(filtered)) # remove duplicates if present\n\n return filtered",
"def apply_list_intervals(list_, intervals):\n return [list_[interval] for interval in intervals]",
"def overlaps(interval,intervals):\n return [x for x in intervals if interval.overlaps(x)]",
"def illegal_vertical_intervals(a_list, b_list):\n allowed_intervals = ['1', 'b3', '3', '4', '5', 'b6', '6']\n pairs = vertical_intervals(a_list, b_list)\n return [(i, t) for i, t in pairs if i[0] not in allowed_intervals]",
"def _build_intervals(self) -> List[Tuple[datetime.datetime, datetime.datetime]]:\n if self.granularity == 'HOUR':\n days = max(min((self.bounds[1] - self.bounds[0]).days,\n self.GRANULARITIES['HOUR'][1]),\n self.GRANULARITIES['HOUR'][0])\n interval_length = datetime.timedelta(days=days)\n offset = datetime.timedelta(hours=1)\n elif self.granularity == 'MONTH':\n # no need to split requests for monthly data\n days = max((self.bounds[1] - self.bounds[0]).days,\n self.GRANULARITIES['MONTH'][0])\n interval_length = datetime.timedelta(days=days)\n offset = datetime.timedelta(days=1)\n else:\n days = max(min((self.bounds[1] - self.bounds[0]).days,\n self.GRANULARITIES['DAY'][1]),\n self.GRANULARITIES['DAY'][0])\n interval_length = datetime.timedelta(days=days)\n offset = datetime.timedelta(days=1)\n\n time_pointer = self.bounds[1]\n intervals = []\n while time_pointer > self.bounds[0]:\n upper = time_pointer\n time_pointer -= interval_length\n intervals.append((time_pointer, upper))\n time_pointer -= offset\n return intervals",
"def get_segments(weights, threshold):\n marker_list = [True if i >= threshold else False for i in weights]\n i = 0\n final_pairs = []\n while i < len(weights):\n if marker_list[i]:\n start = i\n while i < len(weights) and marker_list[i]:\n i = i + 1\n end = i - 1\n if end-start > 1:\n final_pairs.append(start)\n final_pairs.append(end)\n i = i + 1\n return np.array(final_pairs)",
"def DeGap(consensus,cutoff = 0.95):\n newCon = []\n for i in consensus:\n if i[0][0] == '-' and i[1] > cutoff:\n print i\n continue\n newCon.append(i)\n return newCon",
"def throw_random_intervals(lengths, regions, save_interval_func=None, allow_overlap=False):\n # Copy regions\n regions = sorted((x[1] - x[0], x[0], x) for x in regions)\n # Sort (long regions first)\n regions.reverse()\n # Throw\n if save_interval_func is not None:\n throw_random_private(lengths, regions, save_interval_func, allow_overlap)\n return\n else:\n intervals = []\n\n def save_interval_func(s, e, rgn):\n return intervals.append(overwrite_start_end(s, e, rgn))\n\n throw_random_private(lengths, regions, save_interval_func, allow_overlap)\n return intervals",
"def keep_spans(cls, labels: Set[str], spans: Iterator[Span]) -> List[Span]:\n spans = filter(lambda sp: sp.label in labels, spans)\n spans = cls.dedupe_sort_spans(spans)\n return spans",
"def segments(self):\n return (self._subset((i,i+1)) for i in range(len(self)-1))",
"def new_interval(self, epsilons):\n original = list()\n shrinked = list()\n\n for (i, j) in self.contingent_constraints:\n orig = (-self.stnu[j][i]['weight'], self.stnu[i][j]['weight'])\n original.append(orig)\n\n low = epsilons[(j, '-')].varValue\n high = epsilons[(j, '+')].varValue\n\n self.stnu.shrink_contingent_constraint(i, j, low, high)\n new = (-self.stnu[j][i]['weight'], self.stnu[i][j]['weight'])\n shrinked.append(new)\n\n return original, shrinked",
"def get_interval_seqs(interval_alignment: AlignIO.MultipleSeqAlignment):\n gapless_seqs = [str(record.seq.ungap(\"-\")) for record in interval_alignment]\n\n callback_seqs, expanded_seqs = [], []\n expanded_set = set()\n for seq in remove_duplicates(gapless_seqs):\n if len(expanded_set) == 0:\n callback_seqs.append(seq)\n if not set(seq).issubset(allowed_bases):\n continue\n alternatives = [iupac[base] for base in seq]\n for tuple_product in itertools.product(*alternatives):\n expanded_str = \"\".join(tuple_product)\n if expanded_str not in expanded_set:\n expanded_set.add(expanded_str)\n expanded_seqs.append(expanded_str)\n\n if len(expanded_set) == 0:\n logging.warning(\n \"WARNING: Every sequence must have contained an N in this slice - redo sequence curation because this is nonsense\"\n )\n logging.warning(f'Sequences were: {\" \".join(callback_seqs)}')\n logging.warning(\n \"Using these sequences anyway, and should be ignored downstream\"\n )\n return callback_seqs\n return expanded_seqs"
]
| [
"0.6381332",
"0.6008828",
"0.58163404",
"0.5717993",
"0.5708518",
"0.5695179",
"0.56371427",
"0.5585018",
"0.55599874",
"0.54790545",
"0.54719436",
"0.5403952",
"0.5388549",
"0.53085476",
"0.5296689",
"0.52775645",
"0.5258872",
"0.51756686",
"0.5153444",
"0.513539",
"0.5132201",
"0.5101664",
"0.5093562",
"0.5087071",
"0.5060514",
"0.50487363",
"0.50475645",
"0.50279826",
"0.5024502",
"0.49678963"
]
| 0.6803382 | 0 |
Generate a flask server. | def make_server() -> Flask:
app: Flask = Flask(__name__)
return app | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def flask_server():\n return 'http://localhost:5000'",
"def run_server():\n app = init_app()\n app.run(host=app.config['HOST'], port=app.config['PORT'])",
"def init():\n server = Flask(__name__)\n \n return server",
"def flask_server(real_model):\n def run_app(port):\n if not real_model:\n app.config['FAKE_MODEL'] = True\n app.run(port=port, use_reloader=False)\n\n server_process = Process(target=run_app, args=(PORT, ))\n server_process.start()\n\n # Give 2 secs for the Flask server to start up\n time.sleep(2)\n\n yield f'http://localhost:{PORT}'\n\n server_process.terminate()",
"def runserver():\n\tapp.run(host = '0.0.0.0', port = 5000)",
"def runServer(self, services=[]):\n self.add_routes()\n #debug = self.general_section.get(\"debug\")\n host = self.general_section.get(\"host\")\n use_reloader = ast.literal_eval(self.general_section.get(\"use_reloader\"))\n app_port = int(self.general_section.get(\"port\"))\n cFCGI = ast.literal_eval(self.fcgi_section.get(\"enabled\"))\n fcgi_port = int(self.fcgi_section.get(\"port\"))\n must_have_client_cert = ast.literal_eval(self.certificates_flask_section.get(\"force_client_certificate\"))\n if cFCGI:\n logger.info(\"registering fcgi server at %s:%i\", host, fcgi_port)\n from flup.server.fcgi import WSGIServer\n WSGIServer(self._app, bindAddress=(host, fcgi_port)).run()\n else:\n logger.info(\"registering app server at %s:%i\", host, app_port)\n # this workaround makes sure that the client cert can be acquired later (even when running the development server)\n # copied all this stuff from the actual flask implementation, so we can intervene and adjust the ssl context\n # self._app.run(host=host, port=app_port, ssl_context='adhoc', debug=debug, request_handler=ClientCertHTTPRequestHandler)\n\n # the code from flask's `run...`\n # see https://github.com/mitsuhiko/flask/blob/master/flask/app.py\n #options = {}\n try:\n # now the code from werkzeug's `run_simple(host, app_port, self._app, **options)`\n # see https://github.com/mitsuhiko/werkzeug/blob/master/werkzeug/serving.py\n #from werkzeug.debug import DebuggedApplication\n import socket\n #application = DebuggedApplication(self._app, True)\n \n # Set up an SSL context\n context = SSL.Context(SSL.SSLv23_METHOD)\n certs_path = os.path.normpath(os.path.join(os.path.dirname(__file__), \"../../..\", \"cert\"))\n context_crt = os.path.join(certs_path, \"server.crt\")\n context_key = os.path.join(certs_path, \"server.key\")\n try:\n context.use_certificate_file(context_crt)\n context.use_privatekey_file(context_key)\n except Exception as e:\n logger.critical(\"error starting flask server. Cert or key is missing under %s\", certs_path)\n sys.exit(e)\n \n def inner():\n #server = serving.make_server(host, app_port, self._app, False, 1, ClientCertHTTPRequestHandler, False, 'adhoc')\n server = serving.make_server(host, app_port, self._app, False, 1, ClientCertHTTPRequestHandler, False, ssl_context=context)\n #server = serving.make_server(host, app_port, self._app, False, 1, ClientCertHTTPRequestHandler, False, ssl_context=(context_crt, context_key)) \n # The following line is the reason why I copied all that code!\n if must_have_client_cert:\n # FIXME: what works with web app does not work with cli. Check this out\n server.ssl_context.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT, lambda a,b,c,d,e: True)\n # before enter in the loop, start the supplementary services\n for s in services:\n s.start()\n # That's it\n server.serve_forever()\n address_family = serving.select_ip_version(host, app_port)\n test_socket = socket.socket(address_family, socket.SOCK_STREAM)\n test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n test_socket.bind((host, app_port))\n test_socket.close()\n # Disable reloader only by explicit config setting\n if use_reloader == False:\n serving.run_simple(host, app_port, self._app, use_reloader=False)\n else:\n serving.run_with_reloader(inner, None, 1)\n finally:\n self._app._got_first_request = False",
"def generate(self) -> Flask:\n app = Flask(self.name, *self.args, **self.kwargs)\n app = self.setup_app_config(app)\n app = self.add_app_headers(app)\n app = self.add_xsrf_error_handler(app)\n\n return app",
"def main() -> None:\r\n # Configures and starts logging.\r\n app.config[\"logger\"] = start_logging()\r\n # Starts and runs Flask server on localhost:5000 if True.\r\n if app.config[\"localhost\"]:\r\n app.run()\r\n # Else starts and runs Flask server that listens on all IPs. Meaning, Flask\r\n # server can be accessed via executing machine's IP address, e.g.,\r\n # 100.68.241.2:5000 - so Flask server can be reached from machines in same\r\n # network or accessed via the public IP (remotely), e.g., 31.220.200.5:5000\r\n else:\r\n app.run(host=\"0.0.0.0\")",
"def serve() -> None:\n uvicorn.run(\n \"bartender.web.application:get_app\",\n workers=settings.workers_count,\n host=settings.host,\n port=settings.port,\n reload=settings.reload,\n log_level=settings.log_level,\n factory=True,\n )",
"def runserver():\n app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG, threaded=config.THREADED)",
"def main():\r\n LOG.info('Starting server build.')\r\n web.run_app(init_app(),\r\n host=os.environ.get('APP_HOST', CONFIG.registry.get('app_host', '0.0.0.0')),\r\n port=int(os.environ.get('APP_PORT', CONFIG.registry.get('app_port', 8080))),\r\n shutdown_timeout=0,\r\n ssl_context=application_security())",
"def server_it():\n\n app = flask.Flask(__name__, static_url_path='/static')\n\n app.route('/')(serve_index)\n\n\n @app.route('/brain/data/<path:path>')\n def serve_brain_data(path):\n data_dir = 'generated/data'\n return flask.send_from_directory(data_dir, path)\n\n @app.route('/brain/<path:path>')\n def serve_roygbiv_html(path):\n try:\n return flask.send_from_directory('brain', path)\n except Exception as e:\n import roygbiv\n viz_dir = os.path.join(os.path.dirname(roygbiv.__file__), 'web')\n return flask.send_from_directory(viz_dir, path)\n\n # GWAS app\n @app.route('/gwas/data/<path:path>')\n def serve_gwas_data(path):\n data_dir = 'generated/data'\n return flask.send_from_directory(data_dir, path)\n\n @app.route('/gwas/')\n @app.route('/gwas/index.html')\n def serve_default():\n import ping.viz\n viz_dir = os.path.dirname(ping.viz.__file__)\n man_dir = os.path.join(viz_dir, 'manhattan')\n return flask.send_from_directory(man_dir, 'manhattan.html')\n\n @app.route('/gwas/<path:path>')\n def serve_gwas_html(path):\n import ping.viz\n viz_dir = os.path.dirname(ping.viz.__file__)\n man_dir = os.path.join(viz_dir, 'manhattan')\n return flask.send_from_directory(man_dir, path)\n\n # Scatter app\n @app.route('/plots/<path:path>')\n def serve_plot(path):\n return flask.send_from_directory('generated/plots', path)\n @app.route('/2015/<path:path>')\n def serve_old(path):\n return flask.send_from_directory('2015', path)\n app.debug = True\n app.run()",
"def create_app():\n app = Flask(__name__)\n\n\n @app.route('/')\n def barebones():\n return 'the barebones'\n\n return app",
"def create_flask_app():\n app = Flask(__name__)\n CORS(app, supports_credentials=True)\n random.seed('example_tag_server')\n app.secret_key = random_string(100)\n return app",
"def set_flask():\r\n app.run(host='0.0.0.0',\r\n port=5010,\r\n debug=False)",
"def run_server(app: Flask, ip: str, port: int) -> Flask:\n app.run(ip, port)\n\n return app",
"def run_server(kit, local, port, debug):\n host = '127.0.0.1' if local else '0.0.0.0'\n apps = len(kit.flasks)\n if not apps:\n print 'No Flask app found!'\n return\n elif apps == 1:\n app = kit.flasks[0]\n else:\n app_number = getenv('KIT_FLASK_APP', None)\n if not app_number:\n s = '%s Flask applications found:\\n\\n # Name\\n' % (apps, )\n s += '\\n'.join(\n '%04s %s' % (index, flask_app.name)\n for index, flask_app in enumerate(kit.flasks)\n )\n s += '\\n\\nWhich # would you like to run? '\n app_number = raw_input(s)\n environ['KIT_FLASK_APP'] = app_number\n app = kit.flasks[int(app_number)]\n app.run(host=host, port=port, debug=debug, extra_files=[kit.path])",
"def start_server(self):\n app.run(host=str(self.__constants.host),\n port=int(self.__constants.port),\n debug=bool(self.__constants.runindebug))",
"def serve(port):\n app.run(host='0.0.0.0', port=port, debug=True)",
"def make_app() -> Flask:\n logger.info('creating flask application')\n app = Flask(\n 'pasta',\n static_url_path='/static',\n static_folder='./static',\n template_folder='./views')\n config.flask.SECRET_KEY = os.urandom(32)\n config.flask.SERVER_NAME = None\n app.config.from_mapping(config.flask)\n return app",
"def run_server(self, _):\n if not ENABLE_SERVER:\n logger.info('server not enabled, exit')\n return\n app.run(host=API_HOST, port=API_PORT, threaded=API_THREADED)",
"def main():\n try:\n port = 8080\n ip = '0.0.0.0'\n http_server = WSGIServer((ip, port),\n app,\n log=logging,\n error_log=logging,\n )\n print(\"Server started at: {0}:{1}\".format(ip, port))\n http_server.serve_forever()\n except Exception as exc:\n logger.error(exc.message)\n logger.exception(traceback.format_exc())\n finally:\n # Do something here\n pass",
"def main():\n # Debug is enabled by default, can be disabled by environment variable\n debug = not os.environ.get(\"NO_DEBUG\", False)\n if debug:\n # Workaround for the werkzeug reloader removing the current directory\n # from the path. It's nasty, but it works! Inspired by:\n # https://github.com/mitsuhiko/flask/issues/1246\n os.environ[\"PYTHONPATH\"] = os.getcwd()\n # Enable PTVSD in werkzeug watched processes only\n # if \"WERKZEUG_RUN_MAIN\" in os.environ:\n # ptvsd.enable_attach()\n # Run the Flask app\n APP.run(host=\"0.0.0.0\", port=8000)",
"def create_app():\n\n # --------------------- #\n # Initial configuration #\n # --------------------- #\n\n instance_path = config.INSTANCE_PATH\n\n # Creates the instance path if it doesn't exist\n if not os.path.exists(instance_path):\n os.makedirs(instance_path)\n\n app = Flask(__name__, instance_path=instance_path)\n\n # Logging utility setup\n if app.config['ENV'] == 'development' or app.config['DEBUG'] is True:\n log_level = logging.DEBUG\n else:\n if hasattr(logging, config.DEBUG_LOG_LEVEL):\n log_level = getattr(logging, config.DEBUG_LOG_LEVEL)\n else:\n print(\n \"WARNING: log level value from config file is not a valid attribute: {}\".format(config.DEBUG_LOG_LEVEL))\n print(f\"Defaulting to '{logging.WARNING}\")\n log_level = logging.WARNING\n\n logging.basicConfig(\n format='[%(asctime)s] %(levelname)s in %(module)s: %(message)s',\n level=log_level,\n filename=config.LOG_PATH\n )\n\n # Intercepts generic server errors and logs them\n @app.errorhandler(werkzeug.exceptions.HTTPException)\n def handle_errors(e):\n logging.error(str(e))\n return str(e), 500\n\n # Handles correct favicon\n @app.route('/favicon.ico')\n def favicon():\n return send_from_directory(os.path.join(app.root_path, 'static'),\n 'favicon.ico', mimetype='image/vnd.microsoft.icon')\n\n # --------- #\n # Web pages #\n # --------- #\n\n # Root page\n @app.route('/')\n def index():\n \"\"\"Simple root page.\n\n The \"@app.route('/')\" decorator assigns this function\n to the '/' address, so that when you visit '/', a\n request is sent to the server, which will call this function.\n\n Once this function is called it returns an html page\n produced from the 'index.html' file.\n\n Returns\n -------\n html page\n \"\"\"\n return render_template('index.html')\n\n @app.route('/test_drawing')\n def test_drawing_page():\n return render_template('test_drawing.html')\n\n @app.route('/log', methods=['GET'])\n def view_log():\n \"\"\"Display the log\"\"\"\n if request.values.get(\"clear\") == \"True\":\n with open(config.LOG_PATH, \"w\") as log_file:\n log_file.write(\"\")\n logging.info(\"Log file cleared from browser.\")\n print(request.values.get(\"clear\"))\n with open(config.LOG_PATH) as log_file:\n log_text = log_file.read()\n return render_template('log.html', log_text=log_text)\n\n @app.route('/study_legacy')\n def study_legacy():\n \"\"\"Renders the study page.\n\n Returns\n -------\n html page\n\n \"\"\"\n return render_template('study-legacy.html')\n\n @app.route('/study')\n def study():\n \"\"\"Renders the study page.\n\n Returns\n -------\n html page\n\n \"\"\"\n return render_template('study.html')\n\n # --------------------- #\n # API-related functions #\n # --------------------- #\n\n @app.route('/api_initialise_gp_and_sample', methods=['GET', 'POST'])\n def api_initialise_gp_and_sample():\n \"\"\"Initialises a GP based on the given parameters.\n\n The parameters are retrieved from the settings file. After initialising\n the GP it samples a function from it to be the true function. Finally\n it chooses a query point uniformly at random.\n\n All the data is sent to the frontend as a JSON object to be used by the frontend.\n\n Returns\n -------\n JSON data\n\n \"\"\"\n\n # Retrieves the data from the request object\n interface_settings = utils.get_response_and_log(request)\n logging.debug(\"Interface settings: {}\".format(str(interface_settings)))\n\n # Loads the settings file\n settings_file_name = interface_settings[\n 'settings_name'] # if 'settings_name' in interface_settings else 'default'\n settings = io.load_settings(settings_file_name)\n logging.debug(\"File settings: {}\".format(str(settings)))\n\n # Integrate the settings with those provided by the interface, if any\n for key in interface_settings.keys():\n if key not in settings:\n settings[key] = interface_settings[key]\n\n # Fail early and provide some error message when crucial data is missing.\n try:\n utils.assert_required_data(settings, ['x_limits', 'n_points', 'noise'])\n except AssertionError as e:\n logging.error(str(e))\n logging.error(\"Provided keys: {}\".format(settings.keys()))\n return str(e), 400 # BAD_REQUEST\n\n # Generate user and session IDs if not provided\n user_id: int = settings['user_id'] if 'user_id' in settings else io.get_new_user_id(\n study_name=settings_file_name)\n settings['user_id'] = str(user_id)\n\n # Ensure save dir exists\n if not (\"save\" in settings and settings[\"save\"] == False):\n io.ensure_savedir_exists(study_name=settings_file_name, sub_path=str(user_id))\n\n session_id: int = settings['session_id'] if 'session_id' in settings else io.get_new_session_id(user_id,\n study_name=settings_file_name)\n settings['user_id'] = str(user_id)\n settings['session_id'] = str(session_id)\n\n # Call GP data_gp_initialisation function\n x, y_true, query_index, mean_vector, confidence_up, confidence_down = user_study_gp.data_gp_initialisation(\n settings['x_limits'][0],\n settings['x_limits'][1],\n settings['n_points'],\n settings['kernel'],\n settings['kernel_args'],\n settings['noise']\n )\n\n # Convert the data to JSON\n data = {\n \"settings\": settings,\n \"iteration\": 0,\n \"new_point_index\": query_index, # index of new point to be queried\n \"new_point_x\": x[query_index], # new point to be queried\n 'x_data': [], # queried data points (initially empty)\n 'y_data': [], # values given by the user for the queried points (initially empty)\n 'y_data_actual': [], # actual value of f(queried point)\n 'x_limits': settings['x_limits'],\n 'n_points': settings['n_points'],\n \"x\": x, # x points in the interval (linspace)\n \"y\": y_true, # f(x) true values in the x points\n \"mean\": mean_vector,\n \"std\": confidence_up + confidence_down, # list concatenation\n }\n\n # Update session_id to match session, when running a full user study\n if \"max_sessions\" in settings:\n if \"update_session\" in interface_settings and interface_settings[\"update_session\"] == True:\n data[\"session\"] = interface_settings[\"session\"] + 1\n else:\n data[\"session\"] = 0\n session_id = data[\"session\"]\n\n if \"save\" in settings and settings[\"save\"] == False:\n logging.debug(\"Not saving data because of settings[\\\"save\\\"] = False\")\n else:\n io.save_data(data,\n study_name=settings_file_name,\n user_id=user_id,\n session_id=session_id,\n incremental=settings['save_split'])\n\n return utils.remove_nan(json.dumps(data))\n\n @app.route('/api_update_gp', methods=['GET', 'POST'])\n def api_update_gp():\n \"\"\"Updates a GP based on the given parameters.\n\n The parameters are retrieved from the request object. It updates the GP with the new points. Finally it chooses\n a new query point.\n\n All the data is sent to the frontend as a JSON object.\n\n Returns\n -------\n JSON data\n\n \"\"\"\n logging.info(\"Called: api_update_gp\")\n data = utils.get_response_and_log(request)\n try:\n utils.assert_required_data(data,\n [\n 'settings', # settings of the user study\n 'x_data', # queried data points\n 'y_data', # values by the user for the queried points\n \"x_limits\", # beginning and end of the interval\n \"x\", # x points\n \"iteration\" # current iteration\n ])\n except AssertionError as e:\n logging.error(str(e))\n logging.error(\"Provided keys: {}\".format(data.keys()))\n return str(e), 400 # BAD_REQUEST\n\n if (\"x_data\" in data and \"y_data\" in data) and (len(data[\"x_data\"]) >= 1 and len(data[\"y_data\"]) >= 1):\n logging.info(\"Received new data point: ({}, {}), updating..\".format(\n data[\"x_data\"][-1],\n data[\"y_data\"][-1])\n )\n\n settings = data['settings']\n\n # Update vanilla GP\n query_index, mean_vector, upper_confidence, lower_confidence = user_study_gp.update(data[\"x\"],\n settings[\"kernel\"],\n settings[\"kernel_args\"],\n data[\"x_data\"],\n data[\"y_data\"],\n settings[\"noise\"])\n\n # Update data\n data[\"new_point_index\"] = query_index\n data[\"new_point_x\"] = data[\"x\"][query_index]\n data[\"mean\"] = mean_vector\n data[\"std\"] = upper_confidence + lower_confidence\n data[\"iteration\"] += 1\n\n data_json = utils.remove_nan(json.dumps(data))\n if \"save\" in settings and settings[\"save\"] == False:\n logging.debug(\"Not saving data because of settings[\\\"save\\\"] = False\")\n else:\n logging.debug(f'Study name: {settings[\"settings_name\"]}')\n io.save_data(data,\n study_name=settings[\"settings_name\"],\n user_id=settings['user_id'],\n session_id=settings['session_id'],\n incremental=settings['save_split'])\n return data_json\n\n return app",
"def run_simple_server(tb_app):\n # Mute the werkzeug logging.\n base_logging.getLogger('werkzeug').setLevel(base_logging.WARNING)\n\n try:\n server = serving.make_server(FLAGS.host, FLAGS.port, tb_app, threaded=True)\n server.daemon_threads = True\n except socket.error:\n if FLAGS.port == 0:\n msg = 'TensorBoard unable to find any open port'\n else:\n msg = (\n 'TensorBoard attempted to bind to port %d, but it was already in use'\n % FLAGS.port)\n logging.error(msg)\n print(msg)\n exit(-1)\n\n port = server.socket.getsockname()[1]\n msg = 'Starting TensorBoard %s at http://%s:%d' % (tb_app.tag, FLAGS.host,\n port)\n print(msg)\n logging.info(msg)\n print('(Press CTRL+C to quit)')\n sys.stdout.flush()\n\n server.serve_forever()",
"def create_app() -> Flask:\n\n flask_app = Flask('extraction_api', template_folder='./template')\n flask_app.secret_key = \"super secret key\"\n # import blueprints\n flask_app.register_blueprint(extraction_app)\n\n return flask_app",
"def main():\n import sys\n FILES.extend(sys.argv[1:])\n app.debug = True\n app.run(port=5001, threaded=False)",
"def run():\n app.run(debug=True, port=5001)",
"def main():\n return run_server(**parse_server_args())",
"def serve(port=3000, httpdir=\"/srv/http\", channels={}, livestream={}, secret_key=\"ChangeMe\"):\n\n app = Flask(__name__)\n\n app.config['http_dir'] = httpdir\n app.config['channels'] = channels\n app.config['livestream'] = livestream\n app.config['SECRET_KEY'] = secret_key\n\n login_manager.init_app(app)\n\n app.register_blueprint(blueprint)\n\n # blueprints cannot handle 404 or 405 errors, so stick this on the\n # app directly.\n @app.errorhandler(404)\n def page_not_found(error):\n return send_file(in_http_dir(\"404.html\"))\n\n return app.run(port=port)"
]
| [
"0.7725426",
"0.74924284",
"0.7400841",
"0.7311307",
"0.7267347",
"0.71777993",
"0.7129913",
"0.7076276",
"0.70508975",
"0.7045996",
"0.7012823",
"0.6981721",
"0.68616796",
"0.68501663",
"0.68371254",
"0.68179923",
"0.6806177",
"0.680467",
"0.67978334",
"0.6772853",
"0.67343664",
"0.670458",
"0.6695043",
"0.662046",
"0.6618466",
"0.66107047",
"0.65989983",
"0.658514",
"0.6552597",
"0.6538994"
]
| 0.81040865 | 0 |
Function for setting up hoist on an app. | def add_hoist(self, app: Flask, handle_errors: bool = True, auth: list = [""], premade_pages: bool = True) -> Flask:
if hasattr(app, 'HOIST_INTERNALSERVER'):
raise HoistExistsError('hoist is already set up on app')
app.HOIST_INTERNALSERVER = Server(app, handle_errors)
@app.route('/hoist/send', methods=['POST'])
def hoist_send() -> str:
return self.get_response(app, auth, app.HOIST_INTERNALSERVER._received, 'msg')
if premade_pages:
@app.route('/hoist', methods=['POST', 'GET'])
def hoist_home() -> str:
if request.method == 'POST':
return jsonify({'RESPONSE': f'Version {__version__}'})
# done with html instead of flask.render_template so i dont have to touch the apps template_folder property
html = HTML.replace('{{ version }}', __version__).replace('{{ serverUrl }}', request.base_url)
return html
return app | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_app(app, hive_setting):\n global about\n about = About(app, hive_setting)\n app.register_blueprint(blueprint)",
"def init_app(app, hive_setting):\n # global scripting\n # scripting = Scripting(app=app, hive_setting=hive_setting)\n app.register_blueprint(blueprint)",
"def init_app(app, hive_setting):\n # global scripting\n # scripting = Scripting(app=app, hive_setting=hive_setting)\n app.register_blueprint(blueprint)",
"def setup_app(command, conf, vars):\n load_environment(conf.global_conf, conf.local_conf)",
"def _setup(app_obj):",
"def init_app(app):\n app.load_extension(__name__)",
"def setup_app(command, conf, vars):\n load_environment(conf.global_conf, conf.local_conf)\n\n # Create the tables if they don't already exist\n # meta.metadata.drop_all(bind=meta.engine, checkfirst=True)\n meta.metadata.create_all(bind=meta.engine)",
"def setup_application(self):\n pass",
"def setup(self, app_args):\n raise NotImplementedError",
"def setup_app(command, conf, vars):\n load_environment(conf.global_conf, conf.local_conf)\n\n # Create the tables if they don't already exist\n model._Base.metadata.create_all(bind=meta.engine)",
"def init_app(self, app):\n app.config.setdefault('YAWT_INDEXER_IFC', 'yawtext.whoosh')\n app.config.setdefault('YAWT_INDEXER_WHOOSH_INFO_FIELDS', {})\n app.config.setdefault('YAWT_INDEXER_WHOOSH_FIELDS',\n {'content': TEXT()})",
"def setup(app):\n wheel = ensure_wheel()\n subprocess.check_call([\n \"jupyter\", \"lite\", \"build\", f\"--LiteBuildConfig.federated_extensions={wheel}\",\n ], cwd=DEMO)",
"def init_app(state):\n app = state.app\n\n app.config.setdefault('SPLIT_ALLOW_MULTIPLE_EXPERIMENTS', False)\n app.config.setdefault('SPLIT_DB_FAILOVER', False)\n app.config.setdefault('SPLIT_IGNORE_IP_ADDRESSES', [])\n app.config.setdefault('SPLIT_ROBOT_REGEX', r\"\"\"\n (?i)\\b(\n Baidu|\n Gigabot|\n Googlebot|\n libwww-perl|\n lwp-trivial|\n msnbot|\n SiteUptime|\n Slurp|\n WordPress|\n ZIBB|\n ZyBorg\n )\\b\n \"\"\")\n\n app.jinja_env.globals.update({\n 'ab_test': ab_test,\n 'finished': finished\n })\n\n @app.template_filter()\n def percentage(number):\n number *= 100\n if abs(number) < 10:\n return \"%.1f%%\" % round(number, 1)\n else:\n return \"%d%%\" % round(number)",
"def setup(app):\n app.connect('builder-inited', generate_area_file)\n\n # Add option to only build a couple areas since all take a while--ONLY FOR DEV\n app.add_config_value('metpy_generate_all_areas', default=True, rebuild='html', types=bool)",
"def setup(self, app: VisModel):\n self.app = app",
"def setup_app(command, conf, vars):\n\n load_environment(conf.global_conf, conf.local_conf)\n\n if asbool(conf.get('reset_database', 'false')):\n reset_db(meta.engine)\n\n initialize_dictionaries(meta.engine)\n\n initialize_db_defaults(meta.engine)",
"async def startup_handler(app):\n\n spotify_client_id = os.environ.get(SPOTIFY_CLIENT_ID)\n spotify_client_secret = os.environ.get(SPOTIFY_CLIENT_SECRET)\n\n # Save dependencies in the HTTP app.\n http.register_dependency(app, SPOTIFY_CLIENT_ID, spotify_client_id)\n http.register_dependency(app, SPOTIFY_CLIENT_SECRET, spotify_client_secret)\n\n async def cleanup(app):\n \"\"\"Perform required cleanup on shutdown\"\"\"\n # await client_session.close()\n\n app.on_shutdown.append(cleanup)",
"def setup_app(command, conf, vars):\n load_environment(conf.global_conf, conf.local_conf)\n\n # Create the tables if they don't already exist\n from practice.model.meta import Base, Session\n\n log.info(\"Creating tables\")\n\n Base.metadata.drop_all(checkfirst=True, bind=Session.bind)\n Base.metadata.create_all(bind=Session.bind)\n\n log.info(\"Successfully setup\")",
"def setup(app):\n app.add_directive('show_tasks', ShowTasksDirective)",
"def setup_app(command, conf, vars):\n load_environment(conf.global_conf, conf.local_conf)\n model.metadata.create_all()\n\n # Initialisation here ... this sort of stuff:\n\n # some_entity = model.Session.query(model.<modelfile>.<Some_Entity>).get(1)\n # e.g. foo = model.Session.query(model.identity.User).get(1)\n # from datetime import datetime\n # some_entity.poked_on = datetime.now()\n # model.Session.add(some_entity)\n u = User(name=\"cemeyer2\", superuser=True, enabled=True)\n populate_user_from_active_directory(u)\n s = SolutionSemester(year=-1, season=u\"Fall\", isSolution=True)\n s2 = BaseSemester(year=-2, season=u'Fall', isSolution=True)\n Session.commit()",
"def setup_app(app):\n try:\n config_key = ndb.Key('WordListConfig', os.environ['CONFIG_MODEL_ID'])\n app.wordlist_config = config_key.get()\n except:\n print('Cannot load config from Datastore', file=sys.stderr)\n sys.exit(1)",
"def add_app(self):\n \n pass",
"def setup(app):\n app.connect(\"html-page-context\", html_page_context)\n app.connect(\"build-finished\", build_finished)\n app.sitemap_links = set()\n\n return PARALLEL_SAFE",
"def init(app):\n from sirepo import feature_config\n from sirepo import simulation_db\n\n if _uri_to_route:\n return\n global _app\n _app = app\n for n in _REQUIRED_MODULES + feature_config.cfg.api_modules:\n register_api_module(importlib.import_module('sirepo.' + n))\n _init_uris(app, simulation_db)",
"def setup_environ(app, global_conf, app_conf):\n from example.lib.templating import Templating\n templating = Templating(app_conf)\n\n def application(environ, start_response):\n environ['restish.templating'] = templating\n environ['couchish'] = adminish.config.make_couchish_store(app_conf, 'example.model')\n environ['adminish'] = adminish.config.make_adminish_config(environ['couchish'].config.types)\n return app(environ, start_response)\n\n return application",
"def setup(app):\n # Connect our functions to Sphinx events\n app.connect(\"config-inited\", config_autosaltsls)\n app.connect(\"builder-inited\", run_autosaltsls)\n\n # Defined the config options we have\n app.add_config_value(\"autosaltsls_build_root\", \".\", \"env\")\n app.add_config_value(\"autosaltsls_display_master_indices\", True, \"html\")\n app.add_config_value(\"autosaltsls_doc_prefix\", \"###\", \"html\")\n app.add_config_value(\"autosaltsls_comment_ignore_prefix\", \"#!\", \"html\")\n app.add_config_value(\"autosaltsls_comment_prefix\", \"#\", \"html\")\n app.add_config_value(\"autosaltsls_indented_comments\", False, \"html\")\n app.add_config_value(\"autosaltsls_index_template_path\", \"\", \"env\")\n app.add_config_value(\"autosaltsls_remove_first_space\", True, \"html\")\n app.add_config_value(\"autosaltsls_sources\", None, \"env\")\n app.add_config_value(\"autosaltsls_sources_root\", \"..\", \"env\")\n app.add_config_value(\"autosaltsls_source_url_root\", None, \"html\")\n app.add_config_value(\"autosaltsls_write_index_page\", False, \"env\")\n\n # Add an object type for the sls files\n app.add_object_type(\n \"sls\", \"sls\", objname=\"sls file\", indextemplate=\"pair: %s; sls file\"\n )",
"def make_shell_context():\n return dict(app=app)",
"def make_shell_context():\n return dict(app=app)",
"def setup(app):\n app.add_directive('alias', AliasDirective)",
"def setup(app):\n app.add_config_value('discourse_url', None, 'html')\n app.add_directive('discourse', DiscourseDirective)\n app.add_node(DiscourseNode,\n html=(DiscourseNode.visit, DiscourseNode.depart),\n latex=(DiscourseNode.visit, DiscourseNode.depart),\n text=(DiscourseNode.visit, DiscourseNode.depart))\n app.connect('html-page-context', event_html_page_context)\n return {'version': __version__}"
]
| [
"0.6373233",
"0.6173054",
"0.6173054",
"0.5939405",
"0.5933032",
"0.5818298",
"0.58132255",
"0.58080477",
"0.56636417",
"0.5651495",
"0.5648509",
"0.5567701",
"0.5566128",
"0.5564374",
"0.553212",
"0.54755706",
"0.545845",
"0.54438645",
"0.5426669",
"0.54038495",
"0.5375148",
"0.53713375",
"0.5367822",
"0.53654206",
"0.5365235",
"0.53583825",
"0.5356053",
"0.5356053",
"0.52887225",
"0.5286771"
]
| 0.6345041 | 1 |
Function for setting up a hoist proxy on an app. | def add_proxy(self, app: Flask, handle_errors: bool = True, auth: list = [""]) -> Flask:
raise NotImplemented('proxys are not yet supported')
if hasattr(app, 'HOIST_INTERNALPROXY'):
raise HoistExistsError('hoist is already set up on app')
app.HOIST_INTERNALPROXY = HoistProxy(app, handle_errors)
@app.route('/hoist/proxy/connect', methods=['POST'])
def hoist_proxy_connect() -> str:
return self.get_response(app, auth, app.HOIST_INTERNALPROXY._connect, 'data')
@app.route('/hoist/proxy/disconnect', methods=['POST'])
def hoist_proxy_disconnect() -> str:
return self.get_response(app, auth, app.HOIST_INTERNALPROXY._disconnect, 'data')
return app | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_hoist(self, app: Flask, handle_errors: bool = True, auth: list = [\"\"], premade_pages: bool = True) -> Flask:\n if hasattr(app, 'HOIST_INTERNALSERVER'):\n raise HoistExistsError('hoist is already set up on app')\n\n app.HOIST_INTERNALSERVER = Server(app, handle_errors)\n\n @app.route('/hoist/send', methods=['POST'])\n def hoist_send() -> str:\n return self.get_response(app, auth, app.HOIST_INTERNALSERVER._received, 'msg')\n\n if premade_pages:\n @app.route('/hoist', methods=['POST', 'GET'])\n def hoist_home() -> str:\n if request.method == 'POST':\n return jsonify({'RESPONSE': f'Version {__version__}'})\n\n # done with html instead of flask.render_template so i dont have to touch the apps template_folder property\n \n html = HTML.replace('{{ version }}', __version__).replace('{{ serverUrl }}', request.base_url)\n\n return html\n \n\n return app",
"def init_app(app, hive_setting):\n global about\n about = About(app, hive_setting)\n app.register_blueprint(blueprint)",
"def _swift_proxy_setup(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings')):\n self._pull_configs('proxy')\n self._swift_install('proxy')\n self._set_onhold('proxy')\n self._final_install_touches('proxy')",
"def add_view(config):\n config.add_route('ogcproxy', '/ogcproxy')\n config.add_view('papyrus_ogcproxy.views:ogcproxy', route_name='ogcproxy')",
"def setup(self, app_args):\n raise NotImplementedError",
"def setup(hass, config):\n hass.http.register_view(APIAIWebhookView)\n return True",
"def init(app):\n from sirepo import feature_config\n from sirepo import simulation_db\n\n if _uri_to_route:\n return\n global _app\n _app = app\n for n in _REQUIRED_MODULES + feature_config.cfg.api_modules:\n register_api_module(importlib.import_module('sirepo.' + n))\n _init_uris(app, simulation_db)",
"def _setup(app_obj):",
"def init_app(app, hive_setting):\n # global scripting\n # scripting = Scripting(app=app, hive_setting=hive_setting)\n app.register_blueprint(blueprint)",
"def init_app(app, hive_setting):\n # global scripting\n # scripting = Scripting(app=app, hive_setting=hive_setting)\n app.register_blueprint(blueprint)",
"async def startup_handler(app):\n\n spotify_client_id = os.environ.get(SPOTIFY_CLIENT_ID)\n spotify_client_secret = os.environ.get(SPOTIFY_CLIENT_SECRET)\n\n # Save dependencies in the HTTP app.\n http.register_dependency(app, SPOTIFY_CLIENT_ID, spotify_client_id)\n http.register_dependency(app, SPOTIFY_CLIENT_SECRET, spotify_client_secret)\n\n async def cleanup(app):\n \"\"\"Perform required cleanup on shutdown\"\"\"\n # await client_session.close()\n\n app.on_shutdown.append(cleanup)",
"def init_app(self, app):\n # Avoid double initialization\n if self._tornado_app is app:\n return None\n if self._tornado_app is not None:\n raise RuntimeError(\n \"This API has already been registered on a tornado application.\"\n )\n\n self._tornado_app = app\n app.settings[\"jsonapi\"] = self\n\n # Add the handler.\n url_rule = tornado.web.url(\n self.uri + \"/.*\", Handler, dict(jsonapi=self), name=\"jsonapi\"\n )\n app.add_handlers(\".*\", [url_rule])\n return None",
"def __init__(self, urlWebApp: str, proxies: dict):\n self.__urlWebApp = urlWebApp\n self.__proxies = proxies",
"def setup(app):\n app.add_directive('alias', AliasDirective)",
"def entry_point(proxy_port_number):\n\n setup_sockets(proxy_port_number)\n print(\"*\" * 50)\n print(\"[entry_point] Implement me!\")\n print(\"*\" * 50)\n return None",
"def setup(app):\n wheel = ensure_wheel()\n subprocess.check_call([\n \"jupyter\", \"lite\", \"build\", f\"--LiteBuildConfig.federated_extensions={wheel}\",\n ], cwd=DEMO)",
"def setup_app(command, conf, vars):\n load_environment(conf.global_conf, conf.local_conf)",
"def set_proxy(self):",
"def init_app(self, app):\n app.config.setdefault('YAWT_INDEXER_IFC', 'yawtext.whoosh')\n app.config.setdefault('YAWT_INDEXER_WHOOSH_INFO_FIELDS', {})\n app.config.setdefault('YAWT_INDEXER_WHOOSH_FIELDS',\n {'content': TEXT()})",
"def setup_app(app):\n try:\n config_key = ndb.Key('WordListConfig', os.environ['CONFIG_MODEL_ID'])\n app.wordlist_config = config_key.get()\n except:\n print('Cannot load config from Datastore', file=sys.stderr)\n sys.exit(1)",
"def setup(app):\n app.connect(\"html-page-context\", html_page_context)\n app.connect(\"build-finished\", build_finished)\n app.sitemap_links = set()\n\n return PARALLEL_SAFE",
"def switch_proxy(self, proxy):",
"def setup(self, app: VisModel):\n self.app = app",
"async def _create_proxy(self):\n self._proxy = await self._controller.fopen_tcp_proxy(\n Cellular._DRONE_WEB_API_PORT\n )\n\n self._drone_http_url = f\"http://{self._proxy.address}:{self._proxy.port}\"\n\n if self._autoconfigure and self._user_apc_token is None:\n self.logger.info(\"cellular auto pairing and configuration\")\n # generate a new anonymous user APC token and configure the cellular.\n self._fautoconfigure_with_new_token()",
"def proxy_settings(self):\n if config.proxy_host is None or config.proxy_host == \"\":\n return\n\n proxy = urllib2.ProxyHandler({\"http\": config.proxy_host})\n opener = urllib2.build_opener(proxy)\n urllib2.install_opener(opener)",
"def pibooth_startup(cfg, app):",
"def get_mapproxy(tileset):\n \n mapproxy_cf, seed_cf = generate_confs(tileset)\n\n # Create a MapProxy App\n app = MapProxyApp(mapproxy_cf.configured_services(), mapproxy_cf.base_config)\n\n # Wrap it in an object that allows to get requests by path as a string.\n return TestApp(app), mapproxy_cf",
"def configure_proxy(self, proxy):\n server_name = self.get_external_domain()\n tls_enabled = self.get_tls()\n ircd_enabled = self.charm_config.get(\"enable-ircd\")\n federation_enabled = self.get_federation()\n\n if tls_enabled:\n self.external_port = 443\n else:\n self.external_port = 80\n\n proxy_config = [\n {\n \"mode\": \"http\",\n \"external_port\": self.external_port,\n \"internal_host\": self.get_internal_host(),\n \"internal_port\": 8008,\n \"subdomain\": server_name,\n },\n ]\n\n if federation_enabled:\n proxy_config.append(\n {\n \"mode\": self.get_federation_mode(),\n \"external_port\": 8448,\n \"internal_host\": self.get_internal_host(),\n \"internal_port\": 8448,\n }\n )\n\n if ircd_enabled:\n proxy_config.append(\n {\n \"mode\": self.get_irc_mode(),\n \"external_port\": self.get_irc_port(),\n \"internal_host\": self.get_internal_host(),\n \"internal_port\": self.irc_internal_port,\n }\n )\n\n proxy.configure(proxy_config)",
"def configure_app(self):\n self.app.route('/', callback=self.get_api)",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config[CONF_NAME]\n host = config[CONF_HOST]\n entity = OppleLight(name, host)\n\n add_entities([entity])\n\n _LOGGER.debug(\"Init light %s %s\", host, entity.unique_id)"
]
| [
"0.5974772",
"0.5644863",
"0.5587678",
"0.55734426",
"0.55438834",
"0.54780686",
"0.544415",
"0.5442074",
"0.5438981",
"0.5438981",
"0.54374284",
"0.5357864",
"0.534319",
"0.52833265",
"0.5277465",
"0.52770686",
"0.5252061",
"0.52485543",
"0.52338004",
"0.5226942",
"0.52260596",
"0.52177846",
"0.5182108",
"0.5157988",
"0.5111882",
"0.51095957",
"0.50929636",
"0.5073112",
"0.50712454",
"0.50621796"
]
| 0.7220567 | 0 |
Function for running a flask app with a thread. | def thread_server(self, app: Flask, ip: str, port: int) -> Flask:
server: Thread = Thread(target = self.run_server, args = (app, ip, port))
server.start()
return app | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(debug, threaded, host, port):\n \n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)",
"def run(debug, threaded, host, port):\r\n\r\n HOST, PORT = host, port\r\n print(\"running on %s:%d\" % (HOST, PORT))\r\n app.run(host=HOST, port=PORT, debug=True, threaded=threaded)",
"def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)",
"def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)",
"def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)",
"def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)",
"def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)",
"def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)",
"def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)",
"def run(debug, threaded, host, port):\r\n\r\n HOST, PORT = host, port\r\n print(\"running on %s:%d\" % (HOST, PORT))\r\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)",
"def run(debug, threaded, host, port):\r\n\r\n HOST, PORT = host, port\r\n print(\"running on %s:%d\" % (HOST, PORT))\r\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)",
"def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print (\"running on %s:%d\" % (HOST, PORT))\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)",
"def run():\n app.run()",
"def runserver():\n app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG, threaded=config.THREADED)",
"def main():\n # Debug is enabled by default, can be disabled by environment variable\n debug = not os.environ.get(\"NO_DEBUG\", False)\n if debug:\n # Workaround for the werkzeug reloader removing the current directory\n # from the path. It's nasty, but it works! Inspired by:\n # https://github.com/mitsuhiko/flask/issues/1246\n os.environ[\"PYTHONPATH\"] = os.getcwd()\n # Enable PTVSD in werkzeug watched processes only\n # if \"WERKZEUG_RUN_MAIN\" in os.environ:\n # ptvsd.enable_attach()\n # Run the Flask app\n APP.run(host=\"0.0.0.0\", port=8000)",
"def run_server(app: Flask, ip: str, port: int) -> Flask:\n app.run(ip, port)\n\n return app",
"def keep_alive():\r\n app = Flask(\"\")\r\n @app.route(\"/\")\r\n def home():\r\n return \"Your bot is now alive!\"\r\n\r\n def run():\r\n app.run(host=\"0.0.0.0\", port=8080)\r\n\r\n\r\n server = Thread(target=run)\r\n server.start()",
"def run_forever(self):\n self.app.run()",
"def start_flask_app(port: int) -> None:\n logging.info('Starting flask app')\n start_app(port=port)",
"def main():\r\n run_wsgi_app(app)",
"def run(self):\n self.app.run()",
"def run(self):\n self.app.run()",
"def main():\n import sys\n FILES.extend(sys.argv[1:])\n app.debug = True\n app.run(port=5001, threaded=False)",
"def run():\n register_component(\"press\")\n run_app(host=\"0.0.0.0\", port=8080, debug=True, workers=os.cpu_count())",
"def run_server(self, _):\n if not ENABLE_SERVER:\n logger.info('server not enabled, exit')\n return\n app.run(host=API_HOST, port=API_PORT, threaded=API_THREADED)",
"def flask_server(real_model):\n def run_app(port):\n if not real_model:\n app.config['FAKE_MODEL'] = True\n app.run(port=port, use_reloader=False)\n\n server_process = Process(target=run_app, args=(PORT, ))\n server_process.start()\n\n # Give 2 secs for the Flask server to start up\n time.sleep(2)\n\n yield f'http://localhost:{PORT}'\n\n server_process.terminate()",
"def run_server():\n app = init_app()\n app.run(host=app.config['HOST'], port=app.config['PORT'])",
"def run_apiserver(threadname='APIServer'):\n print(\"Starting \",threadname)\n \n if platform == \"linux\" or platform == \"linux2\":\n proc = subprocess.Popen([\"/home/vivek/Development/iot-learning/raspberrypi/runFlask.sh\",str(flash_port)])\n elif platform ==\"win32\":\n proc = subprocess.Popen([\"D:/Vivek/Google Drive/Projects/iot/iot-learning/raspberrypi/runFlask.cmd\",str(flash_port)]) \n\n exit_code=proc.wait()\n print(exit_code)",
"def run_simple_server(tb_app):\n # Mute the werkzeug logging.\n base_logging.getLogger('werkzeug').setLevel(base_logging.WARNING)\n\n try:\n server = serving.make_server(FLAGS.host, FLAGS.port, tb_app, threaded=True)\n server.daemon_threads = True\n except socket.error:\n if FLAGS.port == 0:\n msg = 'TensorBoard unable to find any open port'\n else:\n msg = (\n 'TensorBoard attempted to bind to port %d, but it was already in use'\n % FLAGS.port)\n logging.error(msg)\n print(msg)\n exit(-1)\n\n port = server.socket.getsockname()[1]\n msg = 'Starting TensorBoard %s at http://%s:%d' % (tb_app.tag, FLAGS.host,\n port)\n print(msg)\n logging.info(msg)\n print('(Press CTRL+C to quit)')\n sys.stdout.flush()\n\n server.serve_forever()",
"def start():\n app.run()"
]
| [
"0.7175091",
"0.71026933",
"0.708844",
"0.708844",
"0.708844",
"0.708844",
"0.708844",
"0.708844",
"0.708844",
"0.7080741",
"0.7080741",
"0.70706445",
"0.69488204",
"0.6924091",
"0.68434423",
"0.67426664",
"0.66715854",
"0.6625805",
"0.6581537",
"0.65777326",
"0.65772414",
"0.65772414",
"0.6526711",
"0.65262264",
"0.6523832",
"0.65229553",
"0.6496736",
"0.6488485",
"0.64609647",
"0.645075"
]
| 0.757574 | 0 |
Get the move for the given engine and color. Check validity of the move. | def get_move(board, engine, color, move_num, time, **kwargs):
legal_moves = board.get_legal_moves(color)
if not legal_moves:
return None
elif len(legal_moves) == 1:
return legal_moves[0]
else:
try:
move = engine.get_move(copy.deepcopy(board), color, move_num, time[color], time[-color])
except Exception, e:
print traceback.format_exc()
raise SystemError(color)
if move not in legal_moves:
print "legal list", [move_string(m) for m in legal_moves]
print "illegal", move_string(move), "=", move
raise LookupError(color)
return move | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_move(board, engine, color, move_num, time, **kwargs):\n legal_moves = board.get_legal_moves(color)\n\n if not legal_moves:\n return None\n elif len(legal_moves) == 1:\n return legal_moves[0]\n else:\n try:\n move = engine.get_move(copy.deepcopy(board), color, move_num, time[color], time[-color])\n except Exception as e:\n print(traceback.format_exc())\n raise SystemError(color)\n\n if move not in legal_moves:\n print(\"legal list\", [move_string(m) for m in legal_moves])\n print(\"illegal\", move_string(move), \"=\", move)\n raise LookupError(color)\n\n return move",
"def get_move(self, board, color_to_play):\n move = self.MCTS.get_move(board, color_to_play, self.n_simualtions_per_move, self.exploration)\n self.update(move)\n return move",
"def board_from_move(self, move, color):\n new_board = KhetBoard(self.color_pieces[TeamColor.silver] + self.color_pieces[TeamColor.red])\n new_board.apply_move(move)\n new_board.apply_laser(color)\n return new_board",
"def genmove(self, color, game) -> Move:\n # print(color)\n # print(game.play_history)\n # print(self.mc.states)\n if not len(game.play_history) == (len(self.mc.states) - 1):\n # Last play not yet in our states:\n last_player, last_move = game.play_history[-1]\n # pprint(game.play_history)\n # print(last_player, last_move)\n missing_state = self.mc.board.next_state(\n self.mc.states[-1], last_move)\n self.mc.update(missing_state)\n\n # print('Current board in our mc:')\n # _b = self.mc.states[-1][0]\n # _b = self.mc.board.from_tuple(_b)\n # print(_b)\n\n move = self.mc.get_play()\n\n # Update our saved states\n resulting_state = self.mc.board.next_state(\n self.mc.states[-1], move)\n self.mc.update(resulting_state)\n\n return move",
"def get_move(self, board):\n color = 1\n interval = [-math.inf, math.inf]\n if board.count(color) + board.count(-1 * color) < 6:\n self.step_count = 0\n self.step_count += 2 \n if self.step_count < 45:\n _, move = self._max(board, color, 0, *interval)\n else:\n _, move = self._max(board, color, -2, *interval)\n return move",
"def make_move(the_board, color):\n legal_moves = the_board.legal_moves(color)\n return random.choice(legal_moves) if len(legal_moves) > 0 else (-1, -1)",
"def get_move(self, board):\n\n valid_moves = [move for move in board.legal_moves]\n is_valid_move = False\n while not is_valid_move:\n move = input(\"Enter a valid move in uci format: \").lower()\n if len(move) == 4 or len(move) == 5:\n try:\n player_move = chess.Move.from_uci(move)\n\n if board.is_legal(player_move):\n try:\n board.push(player_move)\n return player_move\n except:\n print(\"invalid move...\")\n else:\n print(\"invalid move...\")\n except:\n print(\"invalid move...\")\n else:\n print(\"invalid move...\")",
"def getMove(self, board):\n pass",
"def get_available_moves(self, team_color):\n squares = self.squares_with_pieces_of_color(team_color)\n moves = []\n for square in squares:\n moves.extend(square.get_moves(self))\n return moves",
"def getOpponentMove(move, playerBoard, oppBoard, playerSeeds, oppSeeds):\r\n pass",
"def search(\n self, color, board, valid_actions, \n output_move_row, output_move_column):\n # If you want to \"simulate a move\", you can call the following function:\n # transition(board, self.player, valid_actions[0])\n\n # To prevent your agent to fail silently we should an\n # explicit trackback printout.\n try:\n # while True:\n # pass\n time.sleep(3)\n randidx = random.randint(0, len(valid_actions) - 1)\n random_action = valid_actions[randidx]\n output_move_row.value = random_action[0]\n output_move_column.value = random_action[1]\n except Exception as e:\n print(type(e).__name__, ':', e)\n print('search() Traceback (most recent call last): ')\n traceback.print_tb(e.__traceback__)",
"def get_move(board, player):\n row, col = 0, 0\n return row, col",
"def get_move(board, player):\r\n row, col = 0, 0\r\n return row, col",
"def select_move_minimax(board, color):\n move, score = minimax_max_node(board, color, 0, 3)\n return move",
"def make_move(the_board, color):\n root = Node(the_board, None, color)\n value = max_value(root, -math.inf, math.inf, 5, time.time())\n child = root.get_child_with_value(value)\n if child is None:\n return (-1,-1)\n else:\n return child.get_move()",
"def get_legal_moves(self, color):\n moves = [] # stores the legal moves.\n # Get all the squares with pieces of the given color.\n for x in range(self.n):\n for y in range(self.n):\n if self[x][y]==0:\n moves.append((x,y))\n return moves",
"def getBestMove(self, playerColor):\n moves = self.getValidMoves(playerColor)\n movesAndFlips = {}\n columns = \"ABCDEFGH\"\n\n for move in moves:\n # initialize dictionary, python dictionaries don't value initialize like c++ maps\n movesAndFlips[move] = 0\n originalCol = columns.index(move[0])\n originalRow = int(move[1]) - 1\n placementDirections = self.flankingDirections(originalCol, originalRow, playerColor)\n for direction in placementDirections:\n col = originalCol\n row = originalRow\n while True:\n try:\n adjacent = direction(self, col, row)\n if adjacent.color != \"empty\" and adjacent.color != playerColor:\n movesAndFlips[move] += 1\n col = adjacent.col\n row = adjacent.row\n else:\n break\n except offBoardError:\n continue\n \n\n maxFlips = 0\n bestMove = None\n for possibleMove in moves:\n if movesAndFlips[possibleMove] > maxFlips:\n bestMove = possibleMove\n maxFlips = movesAndFlips[possibleMove]\n\n return bestMove",
"def execute_move(self, move, color):\n\n (x, y) = move\n\n # Add the piece to the empty square.\n assert self[x][y] == 0\n self[x][y] = color",
"async def move(self, board, valid_actions):\n self._move = None\n output_move_row = Value('d', -1)\n output_move_column = Value('d', 0)\n try:\n # await self.search(board, valid_actions) \n p = Process(\n target=self.search, \n args=(\n self._color, board, valid_actions, \n output_move_row, output_move_column))\n p.start()\n while p.is_alive():\n await asyncio.sleep(0.1)\n self._move = np.array([output_move_row.value,output_move_column.value],dtype=np.int32)\n except asyncio.CancelledError as e:\n print('The previous player is interrupted by a user or a timer.')\n except Exception as e:\n print(type(e).__name__)\n print('move() Traceback (most recent call last): ')\n traceback.print_tb(e.__traceback__)\n finally:\n p.kill()\n self._move = np.array(\n [output_move_row.value, output_move_column.value],\n dtype=np.int32)\n return self.best_move",
"def select_move_minimax(board, color):\n best_utility = -math.inf\n new_color = 1 if color == 2 else 2\n possible_moves = get_possible_moves(board, color)\n best_move = 0,0\n if len(possible_moves) > 0:\n best_move = possible_moves[0]\n sorted_states_list = []\n for move in possible_moves:\n new_board = play_move(board, color, move[0], move[1])\n sort_utility = compute_utility(new_board, color)\n heappush(sorted_states_list, (sort_utility, new_board, move))\n\n sorted_states = [x[1] for x in sorted_states_list]\n moves = [x[2] for x in sorted_states_list]\n index = 0\n for board_state in sorted_states:\n if board_state in caching_states:\n\n utility = caching_states[board_state]\n else:\n utility = minimax_min_node(board_state, new_color)\n caching_states[board_state] = utility\n\n if utility > best_utility:\n best_move = moves[index]\n best_utility = utility\n index += 1\n\n return best_move",
"def _get_move_result(self, unlocked_before_move : bool, err = None):\n if err:\n return Moveresult.INVALID\n elif self.current_turn.entity in self.game_state.get_completed_characters():\n return Moveresult.EXIT\n elif self.game_state.is_character_expelled(self.current_turn.entity):\n return Moveresult.EJECT\n elif self.game_state.is_current_level_unlocked() and not unlocked_before_move:\n return Moveresult.KEY\n else:\n return Moveresult.OK",
"def getMove(self, board):\r\n moves = self._getAvailableActions(board)\r\n return moves[-1]",
"def search(\n self, color, board, valid_actions, \n output_move_row, output_move_column):\n raise NotImplementedError('You will have to implement this.')",
"def select_next_move(self, stats, board, color, C):\n\n bestscore = None\n bestmove = None\n\n # my_id = MCTSRandomPlayer.to_board_id(board)\n\n children = []\n for action in SP.available_actions(board):\n # clone and play mode - can be play and rollback mode\n next_board = board[:]\n SP.play(next_board, action, color)\n children.append((action, stats[MCTSRandomPlayer.to_board_id(next_board)]))\n\n total_n = sum(x[0] for (_, x) in children)\n\n for child_move, child_stat in children:\n n, w = child_stat\n if n == 0: # 한번도 안가봤으면 가보자!\n return child_move, False\n else: # 승률이 높고 (exploitation), 가장 적게 가본 곳이 좋은 곳 (exploration)\n score = (w / n) + C * math.sqrt(2 * math.log(total_n) / n)\n # if my_id == 70645:\n # print(\"CHECK IN \", my_id, child_move, w, n, score, bestscore, next_id)\n # if next_id == 119797:\n # print(\"JUMP IN \", my_id, child_move, w, n, score, bestscore, next_id)\n if bestscore is None or score > bestscore:\n bestscore = score\n bestmove = child_move\n\n # if my_id == 70645:\n # print(\"SELECTED\", bestmove, bestscore)\n\n assert bestmove is not None\n return bestmove, True",
"def get_move(self, board):\n # First, check if we can win in the next move\n winning_move = self.get_winning_move(board, self.letter)\n if winning_move is not None:\n return winning_move\n # Check if the player could win on their next move, and block them.\n blocking_move = self.get_winning_move(board, self.opponent_letter)\n if blocking_move is not None:\n return blocking_move\n # Try to take one of the corners, if they are free.\n corner_move = self.move_in_a_corner(board)\n if corner_move is not None:\n return corner_move\n # Try to take the center, if it is free.\n if board.size % 2 == 1:\n if board.is_position_availible(board.letters[board.size // 2]\n + board.numbers[board.size // 2]):\n return board.letters[board.size // 2] + board.numbers[board.size // 2]\n # Move on one of the sides.\n return self.choose_random_move_from_list(board, list(board.positions.keys()))",
"def get_move(self, game):\n return",
"def nextMoveGNU(self, move=\"go\", board=None):\n # get move\n if self.pawning:\n while not rospy.is_shutdown():\n rows = [2,3,4,5]\n piece = ChessPiece.WHITE_PAWN\n if board.side == board.BLACK:\n rows = [7,6,5,4]\n piece = ChessPiece.BLACK_PAWN\n for row in rows:\n for col in ['a','b','c','d','e','f','g','h']:\n p1 = board.getPiece(col,row)\n if p1 != None and abs(p1.type) == piece:\n p2 = board.getPiece(col,row+1)\n if p2 == None:\n # this is a candidate\n m = col + str(row) + col + str(row+1)\n self.history.append(m)\n return m\n else:\n self.engine.sendline(move)\n if self.engine.expect(['My move is','Illegal move']) == 1:\n return None\n self.engine.expect('([a-h][1-8][a-h][1-8][RrNnBbQq(\\r\\n)])')\n m = self.engine.after.rstrip()\n self.history.append(m)\n return m",
"def getMove(self, board):\r\n raise NotImplementedError(\"must be implemented in subclass\")",
"def get_legal_moves(self, color):\n moves = set() # stores the legal moves.\n color = max(0, color)\n\n # Get all the squares with pieces of the given color.\n for y in range(self.n):\n for x in range(self.n):\n if self[x][y]==color:\n newmoves = self.get_moves_for_square((x,y))\n moves.update(newmoves)\n return list(moves)",
"def best_move(self, state, curr_player):\n\t\t# determine opponent's color\n\t\tif curr_player == self.colors[0]:\n\t\t\topp_player = self.colors[1]\n\t\telse:\n\t\t\topp_player = self.colors[0]\n\n\t\treturn self.value(state, curr_player)"
]
| [
"0.7830646",
"0.73871523",
"0.65856457",
"0.64223164",
"0.6086268",
"0.602455",
"0.59717345",
"0.59675634",
"0.59641623",
"0.59008396",
"0.5896429",
"0.58737993",
"0.58548576",
"0.5846879",
"0.5806216",
"0.5797129",
"0.57966024",
"0.5773774",
"0.5763558",
"0.57583654",
"0.57547826",
"0.5682608",
"0.5681818",
"0.56815803",
"0.5673717",
"0.5624416",
"0.5589994",
"0.5579938",
"0.5571538",
"0.55406344"
]
| 0.7831866 | 0 |
function to get a list of rsids and chromosome numbers | def get_rsids(input_file: str) -> list:
column_names: list = ["rsid", "chr"]
# catching the error if the file does not have the column names
try:
rsid_file: pd.DataFrame = pd.read_csv(input_file, usecols=column_names)
except KeyError:
print("The expected header was not found within the file")
print(
"Please ensure that the file has at least two columns: 'rsid' and 'chr'"
)
sys.exit(1)
rsid_list: list = rsid_file.rsid.values.tolist()
chr_list: list = rsid_file["chr"].values.tolist()
total_rsid_list: list = list(zip(rsid_list, chr_list))
return total_rsid_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getIDs():",
"def get_RSOPuidsByRoi(rts):\n \n RSOPuidsByRoi = []\n \n roiCntSeqs = rts.ROIContourSequence\n \n for r in range(len(roiCntSeqs)):\n cntSeqs = roiCntSeqs[r].ContourSequence\n \n # Initialise the list of ReferencedSOPInstanceUIDs for this ROI:\n RSOPuids = []\n \n for seq in cntSeqs:\n uid = seq.ContourImageSequence[0].ReferencedSOPInstanceUID\n \n RSOPuids.append(uid)\n \n RSOPuidsByRoi.append(RSOPuids)\n \n return RSOPuidsByRoi",
"def getLigandResIds(ligchemid:str, struct: Structure)->List[Residue]:\n \"\"\"*ligchemids are of type https://www.rcsb.org/ligand/IDS\"\"\"\n ligandResidues: List[Residue] = list(filter(lambda x: x.get_resname() == ligchemid, list( struct.get_residues() )))\n return ligandResidues",
"def _getSequentialRoms(self):\n return self._roms",
"def get_RSOPuids_in_RIS(seg):\n \n rsopuids = [] \n \n sequences = seg.ReferencedSeriesSequence[0]\\\n .ReferencedInstanceSequence\n \n for sequence in sequences:\n uid = sequence.ReferencedSOPInstanceUID\n \n rsopuids.append(uid)\n \n return rsopuids",
"def full_chromosomes(reader):\n for line in reader.header.get_lines(\"contig\"):\n if line.id in CHROMS:\n name = line.id\n length = line.length or 1_000_000_000\n yield \"{}:{}-{}\".format(name, 1, length)",
"def get_chromosomes(self) -> list:\n return [chromosome for chromosome in self._population]",
"def _chrom_names(fasta_file):\n from pysam import FastaFile\n with FastaFile(fasta_file) as fa:\n chroms = list(fa.references)\n return chroms",
"def get_ids(self) -> List[str]:",
"def geneIds(self):\n\t\treturn self._dataframe.index.tolist()",
"def get_chromosome_names(bam):\n\n # ref_tid = {str(name): int(bam.get_tid(name)) + 1 for name in bam.get_reference_name}\n\n ref_tid = {} # 'I' | 0, 'II' | 1, ...\n for i in range(bam.nreferences): #if bam.nreferences does not work, use range(17) #16 chromosomes and the mitochondrial chromosome\n ref_name = bam.get_reference_name(i)\n ref_tid[ref_name] = bam.get_tid(ref_name)\n\n return ref_tid",
"def prep_reference(self):\n\n # if basin\n if self.config.metric == 'basin':\n df = pd.read_csv(self.config.gcam_basin_names_file, usecols=['basin_id'])\n m = sorted(df['basin_id'].tolist())\n\n # if AEZ, use 1 through 18 - this will not change\n elif self.config.metric == 'aez':\n m = list(range(1, 19, 1))\n\n # read in region ids\n rdf = pd.read_csv(self.config.gcam_region_names_file, usecols=['gcam_region_id'])\n r = sorted(rdf['gcam_region_id'].tolist())\n\n return m, r",
"def _iter_rIds(self):\n sldMasterId_lst = self._sldMasterIdLst.sldMasterId_lst\n for sldMasterId in sldMasterId_lst:\n yield sldMasterId.rId",
"def getLigandNbrs(resids: List[Residue], struct:Structure)->List[ResidueDict]:\n\n ns = NeighborSearch(list( struct.get_atoms() ))\n nbrs = []\n\n for r in resids:\n # a ligand consists of residues\n resatoms = r.child_list[0]\n # each residue has an atom plucked at random\n for nbrresidues in ns.search(resatoms.get_coord(), 5,level='R'):\n # we grab all residues in radius around that atom and extend the list of neighbors with those\n nbrs.extend([nbrresidues])\n\n # Filter out the residues that constitute the ligand itself\n filtered = [] \n for neighbor in nbrs:\n present = 0\n for constit in resids:\n if ResidueDict(constit)==ResidueDict( neighbor ):\n present = 1\n if present == 0:\n filtered.append(ResidueDict(neighbor))\n\n return [ * map(lambda x: addBanClass(x) , set(filtered) ) ]",
"def get_hotel_chromosomes(hotel_ids):\n if not hotel_ids:\n return None\n hotel_id_in = ','.join([str(h) for h in hotel_ids])\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"\n SELECT hotel_id, chromosome_id, normalized_score\n FROM hotel_chromosome\n WHERE hotel_id in (%s)\n ORDER BY hotel_id, chromosome_id\n \"\"\" % hotel_id_in\n )\n results = cursor.fetchall()\n hotel_chromosomes = {}\n for key, group in groupby(results, lambda x: x[0]):\n chromosomes = [0.0]*CHROMOSOME_LENGTH\n for g in group:\n chromosomes[g[1]] = g[2]\n hotel_chromosomes[key] = chromosomes\n cursor.close()\n return hotel_chromosomes",
"def determine_matched_residue_positions(match_pdb_path):\n positions_block = os.path.basename(os.path.normpath(match_pdb_path)).split('_')[2]\n resnames = [a for a in re.split(\"[0-9]*\", positions_block) if a]\n resnums = [int(a) for a in re.split(\"[a-zA-Z]*\", positions_block) if a]\n\n return [(a, b) for a, b in zip(resnames, resnums)]",
"def print_chromosome_list(chromosomes):\n for chromosome in chromosomes:\n print(chromosome)",
"def genes():\n return [\"b2935\", \"b0723\", \"b0451\"]",
"def genes():\n data=pd.read_csv(config['stan'], sep=\" \")\n return list(set(data['Gene_id']))",
"def __repr__(self):\n return repr(self.chromosome_list)",
"def chromosomes(self):\n chromosomes_set = set()\n chromosomes = []\n for region in self.regions:\n if region.chromosome not in chromosomes_set:\n chromosomes_set.add(region.chromosome)\n chromosomes.append(region.chromosome)\n return chromosomes",
"def list_of_gene_IDs_and_symbols_from_file(input_seq_file):\n print(\"Find entrezgene IDs for reporter genes in RNAseq run\")\n lines = []\n with open(input_seq_file, 'U') as input_seq_file:\n next(input_seq_file)\n for line in input_seq_file:\n new_line = line.strip().split('\\t')[0]\n new_line = re.sub('\"', '', new_line) # inputs contained quotes\n lines.append(new_line)\n\n query_list = pd.Series(data=lines).tolist()\n\n print(\"length of query list: \", len(query_list))\n return query_list",
"def hostRefSeq(chr,start,end,strand):\n cursor=gbdbConnect()\n selSQL=\"SELECT * from refGene WHERE chrom='%s' AND txStart<='%d' AND txEnd>='%d'\" % (chr,int(start),int(end))\n cursor.execute(selSQL)\n rows=cursor.fetchall()\n results=[]\n if cursor.rowcount==0:\n return False\n else:\n for row in rows:\n results.append(row)\n return results",
"def createChromosomes(self) -> ChromList:\n raise NotImplementedError",
"def get_raster_ids(self):\n return numpy.array(range(self._lo_atom, self._lo_atom + self._n_atoms))",
"def getKromosom(self):\n intx = int(\"\".join(self.kromosom[:4]),2)\n inty = int(\"\".join(self.kromosom[4:]),2)\n return [intx,inty]",
"def get_contig_rpkms(identifier, path, minscore):\n \n print('Parsing file:', path)\n \n bamfile = pysam.AlignmentFile(path, \"rb\")\n \n # We can only get secondary alignment reference names, not indices. So we must\n # make an idof dict to look up the indices.\n idof = {contig: i for i, contig in enumerate(bamfile.references)}\n contiglengths = bamfile.lengths\n halfreads = [0] * len(contiglengths)\n \n nhalfreads = 0\n for segment in filter_segments(bamfile, minscore):\n nhalfreads += 1\n \n # Read w. unmapped mates count twice as they represent a whole read\n value = 2 if segment.mate_is_unmapped else 1\n \n for reference in get_all_references(segment):\n id = idof[reference]\n halfreads[id] += value\n \n bamfile.close()\n \n print('Done parsing file:', path)\n \n rpkms = list()\n \n # Compensate for having paired reads\n millionmappedreads = nhalfreads / 2e6\n \n for contiglength, nhalfreads in zip(contiglengths, halfreads):\n kilobases = contiglength / 1000\n rpkms.append(nhalfreads / (kilobases * millionmappedreads))\n \n return identifier, rpkms",
"def intervals_and_sources(self, chromosomes):\n num_intervals = sum([len(ints) for ints in chromosomes.itervalues()])\n intervals = np.empty(num_intervals, dtype=np.uint32)\n sources = np.empty(num_intervals, dtype=np.uint8)\n interval_num = 0\n for chromosome, interval_list in sorted(chromosomes.iteritems(), key=lambda x: x[0]):\n for species, end in interval_list:\n intervals[interval_num] = self.genome_index(chromosome, end)\n sources[interval_num] = species\n interval_num += 1\n return intervals, sources",
"def mel_ncRNA_list(list):\n\tncRNA = [] #initiates list\n\tfor i in list:\n\t\tif i[2] == 'ncRNA':\n\t\t\tpreidRNA = i[8].split(';')[0]\n\t\t\t#[ID=FBgn0031208];Name=CG11023;Ontology_term=SO:0000010,SO:0000087,GO:0016929,GO:0016926;Dbxref=FlyBase:FBan0011023,FlyBase_Annotation_IDs:CG11023,GB_protein:ACZ94128,GB_protein:AAO41164,GB:AI944728,GB:AJ564667,GB_protein:CAD92822,GB:BF495604,UniProt/TrEMBL:Q86BM6,INTERPRO:IPR003653,GB_protein:AGB92323,UniProt/TrEMBL:M9PAY1,OrthoDB7_Drosophila:EOG796K1P,OrthoDB7_Diptera:EOG7X1604,EntrezGene:33155,UniProt/TrEMBL:E1JHP8,UniProt/TrEMBL:Q6KEV3,OrthoDB7_Insecta:EOG7Q8QM7,OrthoDB7_Arthropoda:EOG7R5K68,OrthoDB7_Metazoa:EOG7D59MP,InterologFinder:33155,BIOGRID:59420,FlyAtlas:CG11023-RA,GenomeRNAi:33155;gbunit=AE014134;derived_computed_cyto=21A5-21A5'\n\t\t\tncRNA.append(preidRNA)\n\treturn ncRNA\n\t#['ID=FBtr0309810', 'ID=FBtr0347585', 'ID=FBtr0345732', 'ID=FBtr0345733', 'ID=FBtr0344052', 'ID=FBtr0344053', 'ID=FBtr0344032', 'ID=FBtr0336836', 'ID=FBtr0336837', 'ID=FBtr0336984', 'ID=FBtr0336985', 'ID=FBtr0336986', 'ID=FBtr0336987', 'ID=FBtr0336988', 'ID=FBtr0347594', 'ID=FBtr0347595']",
"def get_chroms(chromfile):\n chroms = {}\n with open(chromfile) as c:\n for line in c:\n try:\n chrom, length = line.strip().split()\n chroms[chrom] = length\n except ValueError:\n chroms[line.strip()] = 1\n return chroms"
]
| [
"0.63581735",
"0.61864585",
"0.6182336",
"0.5998185",
"0.59294564",
"0.59235895",
"0.5898394",
"0.57552207",
"0.575061",
"0.57460576",
"0.5702741",
"0.56992364",
"0.5642003",
"0.56264406",
"0.5624463",
"0.56141466",
"0.5597766",
"0.5588504",
"0.55786145",
"0.5577533",
"0.55609524",
"0.5559711",
"0.5505999",
"0.5500436",
"0.54930586",
"0.5471176",
"0.54617316",
"0.5457883",
"0.5442941",
"0.5428179"
]
| 0.64786863 | 0 |
Permute labels of l2 to match l1 as much as possible | def best_map(l1, l2):
if len(l1) != len(l2):
print("L1.shape must == L2.shape")
exit(0)
label1 = np.unique(l1)
n_class1 = len(label1)
label2 = np.unique(l2)
n_class2 = len(label2)
n_class = max(n_class1, n_class2)
G = np.zeros((n_class, n_class))
for i in range(0, n_class1):
for j in range(0, n_class2):
ss = l1 == label1[i]
tt = l2 == label2[j]
G[i, j] = np.count_nonzero(ss & tt)
A = la.linear_assignment(-G)
new_l2 = np.zeros(l2.shape)
for i in range(0, n_class2):
new_l2[l2 == label2[A[i][1]]] = label1[A[i][0]]
return new_l2.astype(int) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def best_map(l1, l2):\n if len(l1) != len(l2):\n print(\"L1.shape must == L2.shape\")\n exit(0)\n\n label1 = np.unique(l1)\n n_class1 = len(label1)\n\n label2 = np.unique(l2)\n n_class2 = len(label2)\n\n n_class = max(n_class1, n_class2)\n G = np.zeros((n_class, n_class))\n\n for i in range(0, n_class1):\n for j in range(0, n_class2):\n ss = l1 == label1[i]\n tt = l2 == label2[j]\n G[i, j] = np.count_nonzero(ss & tt)\n\n A = la(-G)\n\n new_l2 = np.zeros(l2.shape)\n for i in range(0, n_class2):\n new_l2[l2 == label2[A[i][1]]] = label1[A[i][0]]\n return new_l2.astype(int)",
"def propagateLabel(self, l1, l2):\n\n if l1 != l2:\n winner = min(l1, l2)\n loser = max(l1, l2)\n loserN = 0\n superiorN = 0\n for i,l in enumerate(self.labels):\n if l == loser:\n loserN += 1\n self.labels[i] = winner\n if l > loser:\n superiorN += 1\n self.labels[i] = l - 1\n\n # print('Loser Label is ' + str(loser) + ' . With ' + str(loserN) + ' associated cells. Winner label is ' + str(winner))",
"def update_label(label1, label2, idx):\n for i in range(0, len(idx)):\n label1[i] = label2[idx[i]]\n return label1",
"def map_and_extend_leaves_unlabelled(label1, leaves1, nodes1, links1, label2, leaves2, nodes2, links2, mode, ED):\n nl1 = len(leaves1)\n nl2 = len(leaves2)\n dist_M = np.zeros((nl1, nl2))\n for i in range(0, nl1):\n for j in range(0, nl2):\n dist_M[i,j] = np.linalg.norm(nodes1[leaves1[i]][[0,2]]-nodes2[leaves2[j]][[0,2]])\n # Mapping the leaves with minimum weight matching of euclidean distance matrices\n row_ind, col_ind = linear_sum_assignment(dist_M)\n label1 = update_label(label1, label2, col_ind)\n # Since we already have some matched labeled leaves, we can turn to partial agreement case.\n nlabel1, nleaves1, nodes1, links1, nnlabel = map_and_extend_leaves(label1, leaves1, nodes1, links1, label2, leaves2, nodes2, links2, mode, ED)\n\n return nlabel1, nleaves1, nodes1, links1",
"def mapping_leaves(leaves1, leaves2, label1, label2, nodes1, links1, nodes2, links2, mode, ED, nnlabel):\n \n # Varibles for labeled leaves\n nleaves1 = []\n nleaves2 = []\n nlabel = []\n \n # Variables for unlabeled leaves\n UKleaves1 = []\n UKleaves2 = []\n UKlabel1 = []\n UKlabel2 = []\n \n nleaves = np.zeros(len(leaves1))\n label = np.zeros(len(leaves1))\n UK1idx = []\n UK2idx = []\n for i in range(0, len(leaves1)):\n if label1[i] in label2 and label1[i] < MAX_NODES:\n nleaves1.append(leaves1[i])\n nlabel.append(label1[i])\n idx = label2.index(label1[i])\n nleaves[idx] = leaves1[i]\n label[idx] = nnlabel[i]\n else:\n UKleaves1.append(leaves1[i])\n UKlabel1.append(label1[i])\n UK1idx.append(i)\n if label2[i] in label1 and label2[i] < MAX_NODES:\n nleaves2.append(leaves2[i])\n else:\n UKleaves2.append(leaves2[i])\n UKlabel2.append(label2[i])\n UK2idx.append(i)\n if len(UK1idx)>0:\n # Calculated the distance matrix from unmatched leaves to matched leaves\n dist1 = get_tree_dist_between_leaves(UKleaves1, nleaves1, nodes1, links1, mode, ED)\n dist2 = get_tree_dist_between_leaves(UKleaves2, nleaves2, nodes2, links2, mode, ED)\n # Calculate resorting rule with minimum weight matching of distance matrices\n dict1 = map_nodes_leaves(dist2, dist1)\n for i in range(0, len(dict1)):\n # Update labels using resorting rule.\n nleaves[UK2idx[i]] = leaves1[UK1idx[dict1[i]]]\n label[UK2idx[i]] = nnlabel[UK1idx[dict1[i]]]\n return nleaves, label",
"def map_and_extend_leaves(label1, leaves1, nodes1, links1, label2, leaves2, nodes2, links2, mode, ED):\n nl1 = len(leaves1)\n nl2 = len(leaves2)\n tmplabel = []\n nleaves1 = []\n nleaves2 = []\n nlabel1 = []\n nlabel2 = []\n nnodes2 = deepcopy(nodes2)\n nlinks2 = deepcopy(links2)\n # Mapping leaves whose labels can find in pivot tree.\n for i in range(0, nl2):\n if label2[i] in label1:\n tmplabel.append(label2[i])\n \n for i in range(0, len(tmplabel)):\n nleaves1.append(leaves1[list(label1).index(tmplabel[i])])\n nleaves2.append(leaves2[list(label2).index(tmplabel[i])])\n lcnt = len(tmplabel)\n nleaves1 = move_the_labelled_to_the_front(nleaves1, leaves1)\n nleaves2 = move_the_labelled_to_the_front(nleaves2, leaves2)\n nlabel1 = move_the_labelled_to_the_front(tmplabel, label1)\n nlabel2 = move_the_labelled_to_the_front(tmplabel, label2)\n\n # Recording the index of mapped leaves.\n idx1 = compute_sorted_index(nleaves1, len(nodes1), nodes1)\n idx2 = compute_sorted_index(nleaves2, len(nnodes2), nnodes2)\n\n # Resort the nodes and links according to mapped leaves.\n nodes1, links1 = rearange_nodes_links_old(idx1, nodes1, links1)\n nnodes2, nlinks2 = rearange_nodes_links_old(idx2, nnodes2, nlinks2)\n \n nleaves1 = list(range(0, nl1))\n nleaves2 = list(range(0, nl2))\n\n # Calculated the distance matrix from unmatched leaves to matched leaves\n dist_1 = get_leaves_dist(lcnt, len(leaves1), nodes1, links1, mode, ED)\n dist_2 = get_leaves_dist(lcnt, len(leaves2), nnodes2, nlinks2, mode, ED)\n nnlabel = nlabel1[:]\n\n # Add dummy leaves and links to input tree to make it has the same number of leaves with the pivot tree.\n # For unmatched leaves, we update their labeling with minimum weight matching of distance matrices. For detail, please refer to the paper.\n nodes1, links1, nleaves1, nlabel1 = add_leaves_and_links(nodes1, links1, nleaves1, nlabel1, nlabel2, dist_2, dist_1, lcnt)\n for i in range(len(nnlabel), len(nlabel1)):\n nnlabel.append(MAX_NODES)\n return nlabel1, nleaves1, nodes1, links1, nnlabel",
"def consolidate_labels(labels):\n return map(RNN_model.consolidate_label , labels)",
"def batch_features_labels2(features, labels_1, labels_2, batch_size):\n # 用 yield迭代器。\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels_1[start:end], labels_2[start:end]",
"def combine_labels(labels):\n whole_tumor = labels[:, :3, :, :, :].sum(1) # could have 2 or 3\n tumor_core = labels[:, 1:3, :, :, :].sum(1)\n enhanced_tumor = labels[:, 2:3, :, :, :].sum(1)\n whole_tumor[whole_tumor != 0] = 1\n tumor_core[tumor_core != 0] = 1\n enhanced_tumor[enhanced_tumor != 0] = 1\n return whole_tumor, tumor_core, enhanced_tumor # (bs, ?, ?, ?)",
"def combinations( l1, l2 ):\n for i in range(len(l1)):\n yield zip( l1,l2)\n l1.insert(0,l1.pop())",
"def remap_context_labels(self):\n c_contexts = list(self.context[self.iter])\n unique_contexts = uniqify(c_contexts)\n remap_dict = dict(zip(unique_contexts,\n range(1, len(unique_contexts) + 1)))\n\n remapped = copy.deepcopy(self.context[self.iter])\n for old, new in remap_dict.iteritems():\n self.context[self.iter][remapped==old] = new",
"def _merge_datasets(dataset1, dataset2):\n\n # Number of labels in dataset 1\n _NUM_LABELS_D1 = len(np.unique(dataset1['labels']))\n\n # Number of labels in dataset 2\n _NUM_LABELS_D2 = len(np.unique(dataset2['labels']))\n\n # Call the optimization function to train on the first dataset and predict on the second dataset\n ds2_labels_using_ds1 = _optimization(dataset1, dataset2, nb_epochs=NUM_EPOCHS)\n\n # Initialize the label counting matrix\n label_counter = np.zeros(shape=(_NUM_LABELS_D2, _NUM_LABELS_D1))\n\n # Fill the label counting matrix accordingly\n for i in range(len(ds2_labels_using_ds1)):\n label_counter[int(dataset2['labels'][i]), int(ds2_labels_using_ds1[i])] += 1\n\n Matrix1 = np.matrix.copy(label_counter)\n\n # Initialize the new set of labels for dataset 2\n ds2_new_labels = np.zeros(shape=(len(ds2_labels_using_ds1), 2))\n\n # Determine the new labels for dataset 2\n for i in range(len(ds2_labels_using_ds1)):\n if dataset2['labels'][i] == np.argmax(label_counter[:, int(ds2_labels_using_ds1[i])]):\n ds2_new_labels[i, :] = np.array([ds2_labels_using_ds1[i], dataset2['labels'][i]])\n else:\n ds2_new_labels[i, :] = np.array([ds2_labels_using_ds1[i], -1])\n\n # Call the optimization function to train on the second dataset and predict on the first dataset\n ds1_labels_using_ds2 = _optimization(dataset2, dataset1, nb_epochs=NUM_EPOCHS)\n\n # Initialize the label counting matrix\n label_counter = np.zeros(shape=(_NUM_LABELS_D1, _NUM_LABELS_D2))\n\n # Fill the label counting matrix accordingly\n for i in range(len(ds1_labels_using_ds2)):\n label_counter[int(dataset1['labels'][i]), int(ds1_labels_using_ds2[i])] += 1\n\n Matrix2 = np.matrix.copy(label_counter.T)\n\n # Initialize the new set of labels for dataset 1\n ds1_new_labels = np.zeros(shape=(len(ds1_labels_using_ds2), 2))\n\n # Determine the new labels for dataset 1\n for i in range(len(ds1_labels_using_ds2)):\n if ds1_labels_using_ds2[i] == np.argmax(label_counter[int(dataset1['labels'][i]), :]):\n ds1_new_labels[i, :] = np.array([dataset1['labels'][i], ds1_labels_using_ds2[i]])\n else:\n ds1_new_labels[i, :] = np.array([dataset1['labels'][i], -1])\n\n # Concatenate all labels from both datasets\n all_labels = np.concatenate((ds1_new_labels, ds2_new_labels), axis=0)\n\n # Transform the tuple labels to scalar labels\n already_explored_rows = []\n\n label = 0\n\n vector_label = np.zeros(shape=(all_labels.shape[0], 1))\n\n for i in range(all_labels.shape[0]):\n if np.where((all_labels == all_labels[i, :]).all(axis=1))[0][0] not in already_explored_rows:\n rows = np.where((all_labels == all_labels[i, :]).all(axis=1))[0]\n vector_label[rows] = label\n label += 1\n for j in range(len(rows)):\n already_explored_rows.append(rows[j])\n\n vector_label = np.squeeze(vector_label)\n\n # One hot encoded version of the labels\n hot_labels = _one_hot_encode(vector_label, len(set(vector_label)))\n\n # Initialize the concatenated dataset\n new_dataset = {'labels': vector_label, 'hot_labels': hot_labels, 'actual_lengths': {}}\n\n # Fill the corresponding keys for the concatenated dataset\n for key in dataset1.keys():\n if (key != 'labels') and (key != 'hot_labels') and (key != 'actual_lengths'):\n new_dataset[key] = np.concatenate((dataset1[key], dataset2[key]), axis=0)\n if key == 'actual_lengths':\n for key2 in dataset1[key]:\n new_dataset[key][key2] = np.concatenate((dataset1[key][key2], dataset2[key][key2]), axis=0)\n\n # Return the merged dataset as a dictionary\n return new_dataset, Matrix1, Matrix2",
"def lcombine( v1, v2, k1, k2 ):\n return [ x*k1 + y*k2 for (x,y) in izip(v1,v2) ]",
"def mutual_info_fast(l1, l2, l1_entropy, l2_entropy):\n return l1_entropy + l2_entropy - entropy(joint_dataset(l1, l2))",
"def correspondences(labels1,labels2,return_counts=True):\n q = 100000\n assert amin(labels1)>=0 and amin(labels2)>=0\n assert amax(labels2)<q\n combo = labels1*q+labels2\n result = unique(combo, return_counts=return_counts)\n if return_counts:\n result, counts = result\n result = array([result//q,result%q,counts])\n else:\n result = array([result//q,result%q])\n return result",
"def make_same_length(l1, l2):\n ln = max(len(l1), len(l2))\n l1.extend([None] * (ln - len(l1)))\n l2.extend([None] * (ln - len(l2)))",
"def joint_dataset(l1, l2):\n N = np.max(l1) + 1\n return l2 * N + l1",
"def prepare_labels(labels, class_mask):\n mask = [1 if elt else -1 for elt in class_mask]\n mask = np.array(mask)\n return labels.dot(mask)",
"def apply_remap_values(labels: np.ndarray, label_map: Dict[int, int]) -> np.ndarray:\n for l1, l2 in label_map.items():\n labels[labels == l1] = l2",
"def transform_multilabel_as_multihot(label_list,label_size):\n result=np.zeros(label_size)\n #set those location as 1, all else place as 0.\n result[label_list] = 1\n return result",
"def transform_multilabel_as_multihot(label_list,label_size):\n result=np.zeros(label_size)\n #set those location as 1, all else place as 0.\n result[label_list] = 1\n return result",
"def normalize_labels(labels):\n number_of_labels = len(labels)\n number_of_species = get_number_of_species()\n labels_norm = np.zeros(shape=(number_of_labels, number_of_species))\n for i in range(number_of_labels):\n for label in labels[i]:\n labels_norm[i][label] = 1\n return labels_norm",
"def associate_clusters(self, labels_1, labels_2):\n if not torch.is_tensor(labels_1):\n labels_1 = torch.cat(labels_1).cuda()\n\n if not torch.is_tensor(labels_2):\n labels_2 = torch.cat(labels_2).cuda()\n\n assert labels_1.shape == labels_2.shape, \"Shape mismatch: {}, {}\".format(labels_1.shape, labels_2.shape)\n\n # do not associate the outlier ID with anything\n unique_labels_1 = list(set(labels_1.unique().tolist()) - {self.OUTLIER_LABEL})\n unique_labels_2 = list(set(labels_2.unique().tolist()) - {self.OUTLIER_LABEL})\n\n assert not set(unique_labels_1).intersection(set(unique_labels_2)), \\\n \"Labels overlap: {}, {}\".format(unique_labels_1, unique_labels_2)\n\n association_costs = np.zeros((len(unique_labels_1), len(unique_labels_2)), np.float32)\n recall_12 = np.zeros((len(unique_labels_1), len(unique_labels_2)), np.float32)\n\n # iterate over pairs of labels\n for i1, i2 in [(i1, i2) for i1 in range(len(unique_labels_1)) for i2 in range(len(unique_labels_2))]:\n l1, l2 = unique_labels_1[i1], unique_labels_2[i2]\n l1_active_pts = labels_1 == l1\n l2_active_pts = labels_2 == l2\n\n intersection = (l1_active_pts & l2_active_pts).float().sum()\n union = (l1_active_pts | l2_active_pts).float().sum()\n iou = intersection / union\n\n # print(\"IoU ({}, {}) = {}\".format(l1, l2, iou.item()))\n association_costs[i1, i2] = 1. - iou.item()\n recall_12[i1, i2] = intersection / l1_active_pts.sum(dtype=torch.float32)\n\n idxes_1, idxes_2 = linear_sum_assignment(association_costs)\n\n associations = []\n unassigned_labels_1 = set(unique_labels_1)\n unassigned_labels_2 = set(unique_labels_2)\n\n for i1, i2 in zip(idxes_1, idxes_2):\n l1, l2 = unique_labels_1[i1], unique_labels_2[i2]\n associations.append((l1, l2))\n unassigned_labels_1.remove(l1)\n unassigned_labels_2.remove(l2)\n\n return associations, unassigned_labels_1, unassigned_labels_2, association_costs[idxes_1, idxes_2], \\\n (recall_12, unique_labels_1, unique_labels_2)",
"def kl(self, other, xs, reversesd=False, **kwargs):\n raise NotImplementedError",
"def labels_to_labels(class_labels, num_classes =4):\n levels = []\n for label in class_labels:\n levels_from_label = label_to_levels(int(label), num_classes=num_classes)\n levels.append(levels_from_label)\n return torch.stack(levels).cuda()",
"def nn_set2set_match(descs1, descs2):\n idxs = nn_set2set_match_cuda(descs1.unsqueeze(0).cuda(), descs2.unsqueeze(0).cuda()).detach().cpu().long()\n return idxs[0]",
"def propagate_labels_simple(regions,labels):\n rlabels,_ = label(regions)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n for o,i in cors.T: outputs[o] = i\n outputs[0] = 0\n return outputs[rlabels]",
"def propagate_labels(image,labels,conflict=0):\n rlabels,_ = label(image)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n oops = -(1<<30)\n for o,i in cors.T:\n if outputs[o]!=0: outputs[o] = oops\n else: outputs[o] = i\n outputs[outputs==oops] = conflict\n outputs[0] = 0\n return outputs[rlabels]",
"def mutual_info(l1, l2):\n return entropy(l1) + entropy(l2) - entropy(joint_dataset(l1, l2))",
"def _create_labels_and_mapping(self, labels, mapping):\n numbered_classes = list(enumerate(list(labels), start=0))\n if mapping:\n new_mapping = {number: str(mapping[label]) for number, label in numbered_classes}\n else:\n new_mapping = {number: str(label) for number, label in numbered_classes}\n new_labels = [new_mapping[numbered[0]] for numbered in numbered_classes]\n\n return new_labels, new_mapping"
]
| [
"0.7047975",
"0.59920067",
"0.5903628",
"0.5840224",
"0.56918395",
"0.5676302",
"0.5490699",
"0.54621464",
"0.54600734",
"0.5444879",
"0.542543",
"0.53949237",
"0.5379088",
"0.53709126",
"0.53610796",
"0.528245",
"0.5277736",
"0.5275124",
"0.52157223",
"0.5203912",
"0.5203912",
"0.51591295",
"0.51562357",
"0.51433986",
"0.51427406",
"0.5132887",
"0.5127449",
"0.5127081",
"0.5117068",
"0.51100725"
]
| 0.71196675 | 0 |
Returns the dataframe with addmission type compressed so that emergency and urgent are both marked as urgent. | def compressing_admission_type(data):
data.admission_type = data.admission_type.apply(lambda x: 'EMERGENCY' if x
== 'URGENT' else x)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compressing_admit_location(data):\n\n data.admission_location = data.admission_location.apply(lambda x: 'ER_ADMIT'\n if (x == 'EMERGENCY ROOM ADMIT ')\n else x)\n\n data.admission_location = data.admission_location.apply(lambda x: 'REFERRAL'\n if (x == 'HMO REFERRAL/SICK') |\n (x == 'PHYS REFERRAL/NORMAL DELI') |\n (x == 'CLINIC REFERRAL/PREMATURE')\n else x)\n\n data.admission_location = data.admission_location.apply(lambda x: 'TRANSFER'\n if (x == 'TRANSFER FROM HOSP/EXTRAM') |\n (x == 'TRANSFER FROM SKILLED NUR') |\n (x == 'TRANSFER FROM OTHER HEALT') |\n (x == 'TRSF WITHIN THIS FACILITY')\n else x)\n\n data.admission_location = data.admission_location.apply(lambda x: 'OTHER/UNKNOWN'\n if (x == '** INFO NOT AVAILABLE **')\n else x)\n\n return data",
"def _prepare(self):\n\n if os.path.isfile(\"DATA/diabetes/admission_type_id.csv\") == False:\n download_data()\n\n id_mapping = pd.read_csv(\"DATA/diabetes/admission_type_id.csv\", index_col = 0)\n data = pd.read_csv(\"DATA/diabetes/diabetic_data.csv\")\n\n # binarize admission type\n admdf = pd.DataFrame()\n for adtype, ad_id in zip(id_mapping.description, id_mapping.index):\n admdf[adtype] = (data.admission_type_id == ad_id)\n\n # binarize categorical text columns\n catdf = pd.DataFrame()\n dtype = data.race.dtype # grab datatype\n features = [\"race\", \"gender\", \"age\", \"diabetesMed\", \"insulin\", \"change\", \"readmitted\"]\n for feature in features:\n if data[feature].dtype == dtype:\n catdf = pd.concat([catdf, binarize(data[feature])], axis = 1)\n else:\n catdf = pd.concat([catdf, data[feature]], axis = 1)\n\n # choose non-binary columns\n nonbindf = data[[\"num_medications\", \"num_procedures\", \"num_lab_procedures\", \"number_outpatient\", \n \"number_emergency\", \"number_inpatient\", \"number_diagnoses\"]]\n\n self.data = pd.concat([catdf, admdf, nonbindf], axis = 1)",
"def one_hot_encode_alarms(df):\n\n print('computing one-hot-encoded alarms')\n # take all rows with alarms (others are assumed to be None)\n df_alarms = df[df['category'].notnull()]\n\n # get a list of the unique alarm names\n alarm_names = list(df_alarms['category'].unique())\n\n # Create MultiLabelBinarizer object - include None (i.e. no alarm) as well\n one_hot = MultiLabelBinarizer(classes=[None] + alarm_names)\n\n # group the category labels that share the same pk_id and pk_timestamp (i.e. a device experienced multiple alarms simultaneously)\n labels = df_alarms.groupby(['pk_id', 'pk_timestamp'])['category'].apply(list)\n\n # One-hot encode the alarms\n labels_ohe = one_hot.fit_transform(labels)\n\n # drop the category column (no longer needed) and remove resulting duplicates\n df_alarms.drop(columns=['category'], inplace=True)\n df_alarms.drop_duplicates(inplace=True)\n\n # add \"alarm \" to the alarm columns\n alarm_colnames = ['alarm ' + str(alarm) for alarm in one_hot.classes_]\n\n labels_ohe_df = pd.DataFrame(labels_ohe, columns=alarm_colnames, index=df_alarms.index)\n\n # drop the categories column\n print('preparing dataframe for merging with one-hot-encoded alarms')\n df.drop(columns=['category'], inplace=True)\n df.drop_duplicates(inplace=True)\n\n # add the labels columns for the rest of the No Alarm device\n for colname in alarm_colnames:\n if 'None' in colname:\n df[colname] = 1\n else:\n df[colname] = 0\n print('adding one-hot-encoded alarms to dataframe')\n df.update(labels_ohe_df)\n\n return df",
"def compress(self):\n aggregation_functions = {}\n for attribute in self.__df.columns:\n aggregation_functions[attribute] = self.__aggregate\n grouped_df = self.__df.groupby(by=[self.__config.get_key_attribute()], as_index=False)\n self.__df = grouped_df.agg(aggregation_functions)\n self.__df = self.__df.astype(self.__config.get_data_types())",
"def dummies_types(self, column='type', drop_first=True):\n df_dummies = self.df[column].str.get_dummies('')\n if drop_first == True:\n df_dummies = df_dummies.drop(['E','S','F','P'], axis=1)\n self.df = self.df.join(df_dummies)\n return self.df",
"def fill_missing_admission_type(df):\n for admit_type in df[\"admission_type\"].unique():\n type_facilities = df[df[\"admission_type\"] == admit_type][\"facility\"].unique()\n\n df[\"admission_type\"] = np.where(\n (df[\"admission_type\"].isnull() & df[\"facility\"].isin(type_facilities)),\n admit_type,\n df[\"admission_type\"],\n )\n\n return df",
"def compress_icd9_codes(data):\n\n data.icd9_code = data.icd9_code.apply(lambda x: '.1' if 'V' in x else x)\n data.icd9_code = data.icd9_code.apply(lambda x: '.8' if 'M' in x else x)\n data.icd9_code = data.icd9_code.apply(lambda x: '.5' if 'E' in x else x)\n data.icd9_code = data.icd9_code.apply(lambda x: x[:3] if ('E' not in x) &\n ('M' not in x) &\n ('V' not in x)\n else x)\n\n data.icd9_code = data.icd9_code.astype(float)\n\n\n data['diagnoses'] = data.apply(icd9_descriptions, axis=1)\n data = data.drop(columns=['icd9_code'])\n\n return data",
"def select_features(dfp):\n df = dfp[['5', '7', '12']].copy() \n df.columns=['type', 'duration','location']\n for col in df.columns:\n strings = df[col].unique()\n if col != \"location\":\n mapper = dict( zip(strings, range(len(strings))) )\n df[col].replace(mapper, inplace=True)\n else:\n df[col] = df[col].str.contains(\"LONDON\").astype(int)\n return df",
"def compressing_marital_status(data):\n\n data.marital_status = data.marital_status.apply(lambda x: 'LIFE_PARTNER'\n if (x == 'MARRIED') |\n (x == 'LIFE PARTNER')\n else x)\n\n data.marital_status = data.marital_status.apply(lambda x: 'SINGLE'\n if (x == 'WIDOWED') |\n (x == 'DIVORCED') |\n (x == 'SEPARATED')\n else x)\n\n data.marital_status = data.marital_status.apply(lambda x: 'OTHER/UNKNOWN'\n if (x == 'UNKNOWN (DEFAULT)')\n else x)\n\n return data",
"def one_hot_encoder(df, cols):\r\n\r\n for col in cols:\r\n if(\"admission\" in col):\r\n dummies = pd.get_dummies(df[col], drop_first=False)\r\n else:\r\n dummies = pd.get_dummies(df[col], prefix=col, drop_first=False)\r\n df = pd.concat([df, dummies], axis=1) \r\n df.drop([col],axis=1, inplace=True)\r\n return df",
"def map_admissions(df): \r\n\r\n df['tmp'] = np.nan\r\n col = 'admission_source_id'\r\n df.loc[((df[col].between(4,6)) | (df[col] == 10) | (df[col] == 18) | (df[col] == 22) | (df[col].between(25,26))), 'tmp'] = \"Transfer_Source\"\r\n df.loc[df[col].between(1,3), 'tmp'] = \"Referral_Source\"\r\n df.loc[((df[col].between(11,14))| (df[col].between(23,24))), 'tmp'] = \"Birth_Source\"\r\n df.loc[df[col] == 7, 'tmp'] = \"Emergency_Source\"\r\n df.loc[((df[col] == 8) | (df[col]==19)), 'tmp'] = \"Other\"\r\n \r\n df['tmp'].fillna(value = \"Unknown\", inplace=True)\r\n df[col] = df['tmp']\r\n df.drop(columns=['tmp'], inplace=True)\r\n\r\n\r\n ##mapping admission type_id\r\n df['tmp'] = np.nan\r\n col = 'admission_type_id'\r\n df.loc[df[col] == 1, 'tmp'] = 'Emergency_Type'\r\n df.loc[df[col] == 2, 'tmp'] = 'Urgent_Type'\r\n df.loc[df[col] == 3, 'tmp'] = 'Elective_Type'\r\n df.loc[df[col] == 7, 'tmp'] = 'Trauma_Type'\r\n df.loc[df[col] == 4, 'tmp'] = 'Newborn_Type'\r\n\r\n df['tmp'].fillna(value = \"Unknown\", inplace=True)\r\n df[col] = df['tmp']\r\n df.drop(columns=['tmp'], inplace=True)\r\n\r\n return df",
"def _convert_to_dummies_pandas(df: pd.DataFrame) -> pd.DataFrame:\n df = df.copy()\n print(\"Raw dataset shape = {}\".format(df.shape))\n print(\"Creating one hot encoded categories...\")\n for col in df.columns:\n if col in CATEGORICAL_TRANS and col != \"isFraud\" and not col.startswith(\"card\"):\n print(\"Handling category: {}\".format(col))\n # Convert to categorical type, may not be necessary.\n df[col] = pd.Categorical(df[col])\n one_hot_encoded = pd.get_dummies(df[col], prefix=col)\n df = df.drop(columns=[col]) # remove the original categorical column.\n # Add the one-hot-encoded column.\n df = pd.concat([df, one_hot_encoded], axis=1)\n print(\"One-hot-encoded dataset shape = {}\".format(df.shape))\n return df",
"def compressing_careunit(data):\n\n data.first_careunit = data.first_careunit.apply(lambda x: 'ICU' if\n (x == 'MICU') |\n (x == 'SICU') |\n (x == 'CCU') |\n (x == 'CSRU') |\n (x == 'TSICU')\n else x)\n return data",
"def clean_ferc714_hourly_demand_matrix(df: pd.DataFrame) -> pd.DataFrame:\n ts = pudl.analysis.timeseries_cleaning.Timeseries(df)\n ts.flag_ruggles()\n return ts.to_dataframe(copy=False)",
"def split_inpatient(df):\n skilled_mask = (\n df[\"admit_reason\"]\n .str.lower()\n .str.contains(\"skilled|rehab|pt|ot|skil|restorative\")\n ) & (\n (df[\"admission_type\"] == \"Nursing Home\")\n | (df[\"admission_type\"] == \"Rehab Unit / Facility\")\n )\n\n respite_mask = (\n df[\"admit_reason\"].str.lower().str.contains(\"respite|resp|behavior\")\n ) & (\n (df[\"admission_type\"] == \"Nursing Home\")\n | (df[\"admission_type\"] == \"Rehab Unit / Facility\")\n )\n\n custodial_mask = (\n df[\"admit_reason\"]\n .str.lower()\n .str.contains(\n \"custodial|cust|long term|eol|end of life|hosp|permanent|functional decline|cutodial|ltc|hospic\"\n )\n ) & (\n (df[\"admission_type\"] == \"Nursing Home\")\n | (df[\"admission_type\"] == \"End of Life\")\n | (df[\"admission_type\"] == \"Rehab Unit / Facility\")\n )\n\n df[\"admit_reason\"] = np.where(skilled_mask, \"skilled\", df[\"admit_reason\"])\n df[\"admit_reason\"] = np.where(respite_mask, \"respite\", df[\"admit_reason\"])\n df[\"admit_reason\"] = np.where(custodial_mask, \"custodial\", df[\"admit_reason\"])\n\n # break up by admit type\n acute_mask = df[\"admission_type\"] == \"Acute Hospital\"\n\n psych_mask = df[\"admission_type\"] == \"Psych Unit / Facility\"\n\n nf_mask = df[\"admission_type\"].isin(\n [\"Nursing Home\", \"Rehab Unit / Facility\", \"End of Life\"]\n )\n\n df[\n (-df[\"admit_reason\"].isin([\"skilled\", \"respite\", \"custodial\"])) & (nf_mask)\n ].to_csv(f\"{output_folder}\\\\nf_missing_reason.csv\", index=False)\n\n acute = df[acute_mask].copy()\n psych = df[psych_mask].copy()\n nf = df[nf_mask].copy()\n\n assert df.shape[0] == (acute.shape[0] + psych.shape[0] + nf.shape[0])\n return acute, psych, nf",
"def make_and_append_negative_data(self):\n negative_df = self.get_negative_data()\n self.df = pd.concat((self.df, negative_df))",
"def dummify_all_categorical(df):\n\n df = pd.get_dummies(df)\n df = dummify(df, \"detailed industry recode\")\n df = dummify(df, \"detailed occupation recode\") ## add some variables that are encoded as int64 but that are in fact categorical\n return df",
"def simplifyEducation(dfIn, dfOut):\n\n edu = pd.get_dummies(dfIn.NAME_EDUCATION_TYPE, prefix = 'EDU')\n dfOut = pd.concat([dfOut, edu], axis = 1)\n return dfOut",
"def one_hot(df):\r\n # One-hot encode into \r\n cols = ['job', 'marital', 'education', 'month', 'day_of_week', 'poutcome']\r\n for each in cols:\r\n dummies = pd.get_dummies(df[each], prefix=each, drop_first=False)\r\n df = pd.concat([df, dummies], axis=1)\r\n df = df.drop(cols,axis=1)\r\n return df",
"def _long_to_wide(data, data_type, possible_data_types=[\"cases\", \"deaths\", \"recovered\"], sort_by=None):\n # If there are multiple data type columns, only keep the one specified\n cols_to_drop = [col for col in possible_data_types if col != data_type and col in data.columns]\n data = data.drop(columns=cols_to_drop)\n\n # Spread the table, a la tidyr\n id_cols = [col for col in data.columns if col != data_type]\n data = data.set_index(id_cols) # Putting these in the index keeps them from being spread\n data = data.unstack(level=0, fill_value=0)\n data.columns = data.columns.droplevel(0)\n data.columns = data.columns.map(lambda x: x.date() if isinstance(x, pd.Timestamp) else x) # We don't want the whole timestamp\n data.columns.name = None\n if sort_by is not None:\n data = data.sort_index(level=sort_by)\n data = data.reset_index() # Take the saved columns out of the index\n\n return data",
"def encode_augmentation_type(data):\n lookup = {'none': 0, 'inplace': 1, 'full': 2}\n return [lookup[datum['augmentation_type']] for datum in data]",
"def onehot_features(data):\n\n# Binary Features\n columns = ['Weekend', 'Revenue']\n for col in columns:\n data[col] = data[col].apply(lambda x: float(1) if x else float(0))\n\n columns = ['Month', 'OperatingSystems', 'Browser', 'Region', 'TrafficType',\n 'VisitorType']\n for col in columns:\n enc = OneHotEncoder()\n data_array = enc.fit_transform(data[[col]]).toarray()\n enc_data = pd.DataFrame(data_array)\n enc_data.columns = list(enc.get_feature_names([col]))\n data = data.join(enc_data)\n\n data = data.drop(columns={'Month', 'Month_May', 'OperatingSystems',\n 'OperatingSystems_2', 'Browser', 'Browser_2',\n 'Region', 'Region_1.0', 'TrafficType',\n 'TrafficType_2', 'VisitorType',\n 'VisitorType_Returning_Visitor'})\n return data",
"def create_ext_df(row, dtype, dummy_y=False, order=False):\n\n temp_df = pd.DataFrame(\n {\n 'Time': clean_ext_entry(row['packet_times'], dtype),\n 'pkt_size': clean_ext_entry(row['packet_sizes'], dtype),\n 'pkt_src': clean_ext_entry(row['packet_dirs'], str)\n }\n )\n\n if dummy_y:\n temp_df['dummy_y'] = np.zeros(len(temp_df))\n\n if order:\n temp_df['order'] = np.arange(len(temp_df))\n\n\n return temp_df",
"def one_hot_encoding(self):\n \n try: \n\n # Encode dependent variable\n le = LabelEncoder()\n le.fit(self.data[\"consumption\"])\n df_dependent_enc = pd.DataFrame(le.transform(self.data[\"consumption\"]))\n\n # Encode independent variable\n categorical_features = Config.FEATURE_DEFINITION[\"category_cols\"]\n categorical_df = self.data.loc[:, self.data.columns.isin(categorical_features)]\n oe = OrdinalEncoder()\n oe.fit(categorical_df)\n df_catindependent_enc = pd.DataFrame(oe.transform(categorical_df))\n df_catindependent_enc.columns = categorical_df.columns\n\n except KeyError: \n\n st.write(\"Cannot perform one-hot encoding for numerical variables. Please check if variables are properly defined.\")\n st.write(self.data.columns != \"consumption\")\n df_dependent_enc = []\n df_catindependent_enc = []\n\n else:\n \n return df_dependent_enc, df_catindependent_enc",
"def get_trauma_patients(con) -> pd.DataFrame:\n combined_diagnoses = get_reason_for_admission(con)\n trauma = combined_diagnoses[\n (\n (combined_diagnoses['surgical'] == 1)\n & (combined_diagnoses['diagnosis'].str.contains(re_trauma_surg, na=False, flags=re.IGNORECASE))\n ) | (\n (combined_diagnoses['surgical'] == 0)\n & (combined_diagnoses['diagnosis'].str.contains(re_trauma_med, na=False, flags=re.IGNORECASE))\n )\n ]\n\n return trauma",
"def add_category(df):\n df[\"category\"] = df.apply(lambda row: transform_cat(row), axis=1)\n df = drop_cols(df, [\"booking_bool\", \"click_bool\"])\n return df",
"def add_cols_to_cleaned_df(df):\n\n core_cols = ['time','lat','lon','depth','year','month','week','dayofyear','float_id','cycle']\n template_cols = core_cols + bgc_data_columns\n template_df = pd.DataFrame(columns=template_cols)\n df = template_df.append(df)[template_cols]\n return df",
"def consolidate_mel(mel,delivery=False):\n c_MEL={}\n WP=00\n \n mel['Part No.']=mel['WP Activity/ Part No.']\n mel['Part No.']=mel['Part No.'].astype(str)\n\n #mel['Quantity']=mel['Quantity'].str.replace('m','',regex=False) \n\n mel['Quantity']=mel['Quantity'].fillna(value=0).astype(str) \n mel['Quantity']=mel['Quantity'].str.replace('meters','',regex=True) \n mel['Quantity']=mel['Quantity'].str.replace('m','',regex=False) \n\n\n mel['Quantity']=mel['Quantity'].astype('float')\n if delivery:\n for i, row in mel.iterrows():\n c_MEL[(str(row['Part No.'])+row['Delivery'])]={'Quantity':mel['Quantity'][(mel['Part No.'].astype(str)==str(row['Part No.'])) & (mel['Delivery']==row['Delivery'])].sum(),\n 'Part No.':row['Part No.'],\n 'Delivery':row['Delivery'],\n 'Equipment Description':row['Equipment Description'],\n 'WP':row['WP']}\n else:\n for i, row in mel.iterrows():\n c_MEL[(str(row['Part No.']))]={'Quantity':mel['Quantity'][mel['Part No.'].astype(str)==str(row['Part No.'])].sum(),\n 'Part No.':row['Part No.'],\n 'Equipment Description':row['Equipment Description']}\n \n c_MEL=pd.DataFrame(c_MEL).T \n return c_MEL",
"def merge_energy_datatypes(osm_path): \n #extract line data\n df_line = powerline_limited(osm_path) #extract required data\n if 'asset' in df_line.columns:\n df_line['asset'] = list(map(lambda x: x.lower(), df_line['asset'])) #make sure that asset column is in lowercase characters\n #reclassify assets \n mapping_dict = {\n \"cable\" : \"cable\", #underground\n \"minor_cable\" : \"cable\", \n #\"generator\" : \"generator\", #device used to convert power from one form to another\n \"line\" : \"line\", #overground\n \"minor_line\" : \"minor_line\", #overground\n #\"plant\" : \"plant\", #place where power is generated\n #\"substation\" : \"substation\"\n }\n df_line['asset'] = df_line.asset.apply(lambda x : mapping_dict[x]) #reclassification \n\n if 'voltage' in df_line.columns:\n df_line = df_line.drop(['voltage'], axis=1) \n \n #extract polygon data\n df_poly = power_polygon(osm_path) #extract required data\n df_poly['geometry'] =pygeos.buffer(df_poly.geometry,0) #avoid intersection\n \n #extract point data\n df_point = power_point(osm_path) #extract required data\n \n return pandas.concat([df_line, df_poly, df_point], ignore_index=True)",
"def modify_bidmc_table(df):\n df[\"UID\"] = df[\"UID\"].map(str)\n df = df.rename(columns={\"DischargeDateIndex\": \"date\"})\n df = df.drop(columns=[\"AdmitDateIndex\"])\n df[\"diag_cd\"] = df[\"diag_cd\"].map(lambda x: x.strip().upper())\n return df"
]
| [
"0.5149244",
"0.50546837",
"0.49958882",
"0.4830463",
"0.4723932",
"0.47156888",
"0.4679146",
"0.4667776",
"0.46540812",
"0.4605586",
"0.45829055",
"0.45716822",
"0.45474523",
"0.45360693",
"0.45358104",
"0.44724402",
"0.44665602",
"0.44596526",
"0.44342184",
"0.44335726",
"0.44201946",
"0.43978956",
"0.4393747",
"0.43881544",
"0.4385954",
"0.43845853",
"0.43801147",
"0.43678695",
"0.43420187",
"0.43276364"
]
| 0.65143573 | 0 |
Returns a dataframe with ages compressed into categorical groups. | def age_to_cat(data):
data['age'] = data.apply(assign_cats, axis=1)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _age_bins(df):\n df['age_9'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (4, 9)]].sum(axis=1))\n df['age_19'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (14, 17, 19)]].sum(axis=1))\n df['age_29'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (20, 21, 24, 29)]].sum(axis=1))\n df['age_39'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (34, 39)]].sum(axis=1))\n df['age_49'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (44, 49)]].sum(axis=1))\n df['age_59'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (54, 59)]].sum(axis=1))\n df['age_69'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (61, 64, 66, 69)]].sum(axis=1))\n df['age_79'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (74, 79)]].sum(axis=1))\n df['age_80_over'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (84, 99)]].sum(axis=1))\n\n df = df.drop([f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (4, 9, 14, 17, 19, 20, 21, 24, 29, 34, 39, 44, 49,\n 54, 61, 64, 66, 69, 74, 79, 84, 99)\n ], axis=1)\n\n return df",
"def _transform_age_feature(df):\n df = df.apply(_build_age_range, axis='columns')\n dummies_age = pd.get_dummies(df['Age'], prefix='Age')\n print(\"For dataset with shape {}, the dummies for 'Age' are: {}\".format(df.shape, dummies_age.columns))\n df = pd.concat([df, dummies_age], axis=1)\n\n # Ensure that all dummies are created and that 'Training' and 'Test' datasets will have same number of columns. In\n # our case, 'Age_8' will not be created for 'Test' dataset. We could create it by hand but it is more robust to test\n # all cases\n # For 'Age', range has been splitted in 8\n for i in range(8):\n if 'Age_{}'.format(i) not in df:\n df['Age_{}'.format(i)] = 0\n\n return df",
"def create_age_buckets(df):\r\n print(df['age'].min())\r\n print(df['age'].max())\r\n old_values = df['age'].values.tolist()\r\n new_age_list = []\r\n new_age = 0\r\n for age in old_values:\r\n if age in range(18, 30):\r\n new_age = 1\r\n elif age in range(30, 40):\r\n new_age = 2\r\n elif age in range(40, 50):\r\n new_age = 3\r\n elif age in range(50, 65):\r\n new_age = 4\r\n elif age in range(65, 70):\r\n new_age = 5\r\n else:\r\n new_age = 6\r\n new_age_list.append(new_age)\r\n age_df = DataFrame(new_age_list, columns=['Age_Buckets'])\r\n df = concat([age_df, df], axis=1)\r\n return df",
"def age_binning(df):\n\n # bins that age is sorted into\n age_bins = np.append(np.array([0,1,4]), np.arange(10, 116, 5)) # 0, 1, 4 do not follow the 5 year bin pattern.\n\n # labels for age columns are the lower and upper ages of bin\n age_start_list = np.append(np.array([0,1]), np.arange(5, 111, 5))\n age_end_list = np.append(np.array([1]), np.arange(4, 116, 5))\n\n # Create 2 new age columns\n df['age_start'] = pd.cut(df['age'], age_bins, labels = age_start_list, right=False)\n df['age_end'] = pd.cut(df['age'], age_bins, labels = age_end_list, right = False)\n\n # Drop age variable\n df.drop('age', 1, inplace=True)\n\n # return dataframe with age_start,age_end features\n return (df)",
"def categorical(df):\n\n # variables which need to be transformed to categorical\n categorical = [\"prop_country_id\", \"visitor_location_country_id\"]\n\n for var in categorical:\n df = pd.concat([df, pd.get_dummies(df[var], prefix=var)], axis=1)\n del df[var]\n\n return df",
"def age_indicators(self):\n # Load table #\n age_indicators = self.parent.database[\"tblAgeIndicators\"]\n classifr_coefs = self.parent.classifiers_coefs\n # Join\n df = (age_indicators\n .left_join(classifr_coefs, on='user_defd_class_set_id')\n )\n # Place classifiers first\n df = df.set_index(self.parent.classifiers_names).reset_index()\n return df",
"def aggregate_absolute_cases_by_age(df):\n df.drop([\"Meldedatum\", \"Landkreis\", \"IdBundesland\", \"Bundesland\", \"ObjectId\"], axis=1, inplace=True)\n df = df.groupby(['IdLandkreis', 'Altersgruppe']).sum()\n df.reset_index(inplace=True)\n return df",
"def transform_and_create_new_features(df):\n # 'GENDER' FEATURE MANAGEMENT\n # Transform 'Gender' feature (categorical) to numerical one\n df['Gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)\n\n # 'EMBARKED' FEATURE MANAGEMENT\n # 1st approach: df['Port'] = df['Embarked'].map({'C': 1, 'S': 2, 'Q': 3}).astype(int)\n # Extract from 'pycon UK Tutorial':\n # \"Replacing {C, S, Q} by {1, 2, 3} would seem to imply the ordering C < S < Q when in fact they are simply arranged\n # alphabetically. To avoid this problem, we create dummy variables. Essentially this involves creating new columns\n # to represent whether the passenger embarked at C with the value 1 if true, 0 otherwise.\"\n dummies_embarked = pd.get_dummies(df['Embarked'], prefix='Embarked')\n df = pd.concat([df, dummies_embarked], axis=1)\n\n # 'AGE' & 'FARE' FEATURES MANAGEMENT\n df = _transform_age_feature(df)\n df = _transform_fare_feature(df)\n\n # CREATION OF A NEW FEATURE: Family size + Alone or not ?\n df['Family'] = df['SibSp'] + df['Parch']\n df['Alone'] = 0\n df.loc[df['Family'] == 0, 'Alone'] = 1\n\n # Drop all columns that are now useless\n df = df.drop(['Sex', 'Age', 'Fare', 'Embarked', 'SibSp', 'Parch'], axis=1)\n print(df.head(10))\n\n return df",
"def pull_u5_age_groups_formatted():\n age_start_map = {\n 'Early Neonatal': 0,\n 'Late Neonatal': 7/365,\n 'Post Neonatal': 28/365,\n '1 to 4': 1\n }\n\n age_end_map = {\n 'Early Neonatal': 7/365,\n 'Late Neonatal': 28/365,\n 'Post Neonatal': 365/365,\n '1 to 4': 5\n }\n\n # pull age \n age_groups = get_ids(\"age_group\")\n age_groups = age_groups[age_groups.age_group_id.isin([2, 3, 4, 5])]\n age_groups['age_start'] = age_groups.age_group_name.map(age_start_map)\n age_groups['age_end'] = age_groups.age_group_name.map(age_end_map)\n\n return age_groups",
"def train_cats(df):\n for n,c in df.items():\n if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()",
"def train_cats(df):\n for n,c in df.items():\n if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()",
"def get_aggregated_interactions(self) -> DataFrame:\n\n agg_results = (\n self._graph.edges.select(\"src\", \"weight\", \"book\")\n .groupBy(\"src\", \"book\")\n .agg(sum(\"weight\").alias(\"weight_sum\"))\n )\n pivoted_result = (\n agg_results.groupBy(\"src\")\n .pivot(\"book\", BOOKS)\n .agg(sum(\"weight_sum\").alias(\"book\"))\n .na.fill(0)\n )\n total_sum_result = pivoted_result.withColumn(\n \"total\", reduce(add, [coalesce(col(name), lit(0)) for name in BOOKS])\n )\n renamed_df = reduce(\n lambda df, col_name: df.withColumnRenamed(col_name, \"book_\" + col_name),\n BOOKS,\n total_sum_result,\n )\n renamed_df = renamed_df.withColumnRenamed(\"src\", \"character\")\n result_df = renamed_df.orderBy(col(\"total\").desc())\n\n return result_df",
"def cat_converter(df):\n \n categoricals = df.columns[df.dtypes == object] \n \n for column in categoricals: \n df[column] = pd.Categorical(df[column])\n new_column = column + '_new'\n df[new_column] = df[column].cat.codes\n df = df.drop([column], axis = 1)\n \n return df",
"def __age_categorize(self, age):\r\n # Baby age category - most at risk, highest categorical denomination\r\n if age <= 4:\r\n self.__age = 4\r\n # Youth age category - second most at risk\r\n elif 5 <= age <= 14:\r\n self.__age = 3\r\n # Adult age category - least at risk\r\n elif 15 <= age <= 64:\r\n self.__age = 1\r\n # Elderly age category - second least at risk\r\n else:\r\n self.__age = 2",
"def compress(self):\n aggregation_functions = {}\n for attribute in self.__df.columns:\n aggregation_functions[attribute] = self.__aggregate\n grouped_df = self.__df.groupby(by=[self.__config.get_key_attribute()], as_index=False)\n self.__df = grouped_df.agg(aggregation_functions)\n self.__df = self.__df.astype(self.__config.get_data_types())",
"def ca_to_coils_second_df(agent_df):",
"def ex_eight_animals_data_table():\n data_dict = {'Calf': [4, 5, 6, 7, 8],\n 'Sire': [1, 3, 1, 4, 3],\n 'Dam': ['Unknown', 2, 2, 5, 6],\n 'Sex': ['Male', 'Female', 'Female', 'Male', 'Male'],\n 'WWG': [4.5, 2.9, 3.9, 3.5, 5.0]}\n\n df = pd.DataFrame(data_dict)\n\n return(df)",
"def label_encode(df):\n\n X = df.copy()\n for colname in X.select_dtypes([\"category\"]):\n X[colname] = X[colname].cat.codes\n return X",
"def stack_table(A: pd.DataFrame) -> pd.DataFrame:\r\n A = pd.DataFrame(A.stack(dropna=False))\r\n A.columns = ['factor']\r\n return A",
"def pre_process_data(df):\n # setting `passengerID` as Index since it wont be necessary for the analysis\n df = df.set_index(\"PassengerId\")\n\n # convert 'Sex' values\n df['gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)\n\n # We see that 2 passengers embarked data is missing, we fill those in as the most common Embarked value\n df.loc[df.Embarked.isnull(), 'Embarked'] = df['Embarked'].mode()[0]\n\n # Replace missing age values with median ages by gender\n for gender in df['gender'].unique():\n median_age = df[(df['gender'] == gender)].Age.median()\n df.loc[(df['Age'].isnull()) & (df['gender'] == gender), 'Age'] = median_age\n\n # convert 'gender' values to new columns\n df = pd.get_dummies(df, columns=['gender'])\n\n # convert 'Embarked' values to new columns\n df = pd.get_dummies(df, columns=['Embarked'])\n\n # bin Fare into five intervals with equal amount of values\n df['Fare-bin'] = pd.qcut(df['Fare'], 5, labels=[1, 2, 3, 4, 5]).astype(int)\n\n # bin Age into seven intervals with equal amount of values\n # ('baby','child','teenager','young','mid-age','over-50','senior')\n bins = [0, 4, 12, 18, 30, 50, 65, 100]\n age_index = (1, 2, 3, 4, 5, 6, 7)\n df['Age-bin'] = pd.cut(df['Age'], bins, labels=age_index).astype(int)\n\n # create a new column 'family' as a sum of 'SibSp' and 'Parch'\n df['family'] = df['SibSp'] + df['Parch'] + 1\n df['family'] = df['family'].map(lambda x: 4 if x > 4 else x)\n\n # create a new column 'FTicket' as the first character of the 'Ticket'\n df['FTicket'] = df['Ticket'].map(lambda x: x[0])\n # combine smaller categories into one\n df['FTicket'] = df['FTicket'].replace(['W', 'F', 'L', '5', '6', '7', '8', '9'], '4')\n # convert 'FTicket' values to new columns\n df = pd.get_dummies(df, columns=['FTicket'])\n\n # get titles from the name\n df['title'] = df.apply(lambda row: re.split('[,.]+', row['Name'])[1], axis=1)\n\n # convert titles to values\n df['title'] = df['title'].map({' Capt': 'Other', ' Master': 'Master', ' Mr': 'Mr', ' Don': 'Other',\n ' Dona': 'Other', ' Lady': 'Other', ' Col': 'Other', ' Miss': 'Miss',\n ' the Countess': 'Other', ' Dr': 'Other', ' Jonkheer': 'Other', ' Mlle': 'Other',\n ' Sir': 'Other', ' Rev': 'Other', ' Ms': 'Other', ' Mme': 'Other', ' Major': 'Other',\n ' Mrs': 'Mrs'})\n # convert 'title' values to new columns\n df = pd.get_dummies(df, columns=['title'])\n\n df = df.drop(['Name', 'Ticket', 'Cabin', 'Sex', 'Fare', 'Age'], axis=1)\n\n return df",
"def add_category(df):\n df[\"category\"] = df.apply(lambda row: transform_cat(row), axis=1)\n df = drop_cols(df, [\"booking_bool\", \"click_bool\"])\n return df",
"def ex_pedigree_eight_animals():\n data_dict = {'Calf': [4, 5, 6, 7, 8],\n 'Sire': [1, 3, 1, 4, 3],\n 'Dam': ['Unknown', 2, 2, 5, 6]}\n\n A = pd.DataFrame(data_dict)\n return(A)",
"def grouping_cols(df, cat_percentage = 0.05, checking_itr = 10):",
"def transform(self, df):\n _df = df.copy()\n categorical = self.find_categorical(df)\n for cat in categorical:\n _df[cat] = pd.Categorical(_df[cat], categories=_test_categories[cat])\n\n return pd.get_dummies(_df[categorical]).as_matrix()",
"def zred_to_agebins(zred=0.0, agebins=[], **extras):\n tuniv = cosmo.age(zred).value * 1e9\n tbinmax = tuniv * 0.85\n ncomp = len(agebins)\n agelims = list(agebins[0]) + np.linspace(agebins[1][1], np.log10(tbinmax), ncomp-2).tolist() + [np.log10(tuniv)]\n return np.array([agelims[:-1], agelims[1:]]).T",
"def plots_data_age_sex(X):\n data_np =X\n bins = [54,59,64,69,74,79,84,92]\n data = pd.DataFrame(data=data_np, columns=['age', 'sex', 'agesex'])\n\n data['group_age']=np.digitize(data.age, bins, right=True)\n counts=data.groupby(['group_age', 'sex']).age.count().unstack()\n print(counts)\n\n counts.plot(kind='bar', stacked=True)\n plt.show()",
"def prepare_titanic_data(df):\n\n df.embark_town.fillna('Other', inplace=True)\n\n # Drop deck and embarked_town\n df.drop(columns=['deck', 'embark_town'], inplace=True)\n\n # Encoding: Objects (Categorical Variables) to Numeric\n # Use sklearn's LabelEncoder\n encoder = LabelEncoder()\n\n # Set Unknown and encode Embarked column to numbers\n # 2 == \"S\" == Southampton == 644 people\n # 0 == \"C\" == Cherbourg == 168 people\n # 1 == \"Q\" == Queenstown == 77 people\n # 3 == \"Unknown\" == 2 people\n df.embarked.fillna('Unknown', inplace=True)\n encoder.fit(df.embarked)\n df.embarked = encoder.transform(df.embarked)\n\n # Encode the Class (first class, second, etc...)\n # First class == 0\n # Second class == 1\n # Third class == 2\n encoder.fit(df[\"class\"])\n df[\"class_encoded\"] = encoder.transform(df[\"class\"])\n\n # Encode gender\n # male == 1 == 577 records\n # female == 0 == 314 records\n encoder.fit(df.sex)\n df[\"sex_encoded\"] = encoder.transform(df.sex)\n\n # Handle the 177 records with missing age values\n average_age = df.age.mean()\n df.age.fillna(average_age, inplace=True)\n\n scaler = MinMaxScaler()\n scaler.fit(df[['fare']])\n df[\"fare_scaled\"] = scaler.transform(df[['fare']])\n\n scaler = MinMaxScaler()\n scaler.fit(df[['age']])\n df[\"age_scaled\"] = scaler.transform(df[['age']])\n\n # Set the index to the passenger id\n df = df.set_index(\"passenger_id\")\n return df",
"def apply(self, df):\n encoded = []\n for feature_name, encoder in zip(self.feature_names, self.encoders):\n column = df[feature_name].to_numpy().reshape(-1, 1)\n encoded.append(pd.DataFrame(\n encoder.transform(column).todense(),\n index=df.index,\n columns=encoder.categories_[0]\n ))\n df = df.drop(columns=self.feature_names)\n df = pd.concat((df, *encoded), axis=1)\n return df",
"def top_cat(df_, feature, top=10):\n alphabet = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n labels = alphabet[:top]\n other = alphabet[top + 1]\n top_violation_codes = df_.groupby(feature)[feature].count().sort_values(ascending=False).head(\n top).index.tolist()\n map_values = {k: l for k, l in (zip(top_violation_codes, labels))} # [::-1]\n key_others = set(map_values.keys()) ^ (set(df_.loc[:, feature].values))\n map_others = {k: other for k in key_others}\n map_all = {**map_others, **map_values}\n df_.loc[:, feature] = df_.loc[:, feature].replace(map_all).astype('category')\n return df_",
"def top_cat(df_, feature, top=10):\n alphabet = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n labels = alphabet[:top]\n other = alphabet[top + 1]\n top_violation_codes = df_.groupby(feature)[feature].count().sort_values(ascending=False).head(\n top).index.tolist()\n map_values = {k: l for k, l in (zip(top_violation_codes, labels))} # [::-1]\n key_others = set(map_values.keys()) ^ (set(df_.loc[:, feature].values))\n map_others = {k: other for k in key_others}\n map_all = {**map_others, **map_values}\n df_.loc[:, feature] = df_.loc[:, feature].replace(map_all).astype('category')\n return df_"
]
| [
"0.63006103",
"0.6074821",
"0.5858649",
"0.5727977",
"0.5541528",
"0.55379224",
"0.5417069",
"0.53265554",
"0.5184373",
"0.5131714",
"0.5131714",
"0.51303464",
"0.50749356",
"0.5053229",
"0.50340205",
"0.5017828",
"0.501695",
"0.49977216",
"0.4987606",
"0.49444175",
"0.49178272",
"0.4916055",
"0.49137044",
"0.49134836",
"0.49075425",
"0.49039492",
"0.4889822",
"0.48572895",
"0.48556608",
"0.48556608"
]
| 0.6118041 | 1 |
Returns the dataframe with ethnicity compressed into only the majority groups, WHITE, ASIAN, HISPANIC/LATINO, BLACK_AFRICAN/OTHER and OTHER/UNKOWN. | def compressing_ethnicity(data):
data.ethnicity = data.ethnicity.apply(lambda x: 'WHITE'
if ("WHITE" in x) else x)
data.ethnicity = data.ethnicity.apply(lambda x: "ASIAN"
if ("ASIAN" in x) else x)
data.ethnicity = data.ethnicity.apply(lambda x: "HISPANIC/LATINO"
if ("LATINA" in x) |
("HISPANIC" in x)
else x)
data.ethnicity = data.ethnicity.apply(lambda x: "OTHER/UNKNOWN"
if (x == "AMERICAN INDIAN/ALASKA NATIVE FEDERALLY RECOGNIZED TRIBE") |
(x == "SOUTH AMERICAN") |
(x == "CARIBBEAN ISLAND") |
(x == "NATIVE HAWAIIAN OR OTHER PACIFIC ISLANDER") |
(x == "AMERICAN INDIAN/ALASKA NATIVE") |
(x == "MIDDLE EASTERN") |
(x == "PORTUGUESE") |
(x == "MULTI RACE ETHNICITY") |
(x == "PATIENT DECLINED TO ANSWER") |
(x == "OTHER") |
("UNKNOWN" in x) |
("OBTAIN" in x)
else x)
data.ethnicity = data.ethnicity.apply(lambda x: "BLACK_AFRICAN/OTHER"
if ("BLACK" in x) else x)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def categories(df):\n print 'going to code categories'\n print_time()\n\n ethnicities = ['Mexican', 'Italian', 'American']\n df['num_categories'] = 0\n for ethnicity in ethnicities:\n df.loc[df['categories'].str.contains(ethnicity, flags=re.I, na=False, case=False), 'category'] = ethnicity\n df.loc[df['categories'].str.contains(ethnicity, flags=re.I, na=False, case=False), 'num_categories'] += 1\n df.loc[df['category'].isnull(), 'category'] = 'Other'\n df.loc[df['num_categories'] > 1, 'category'] = 'Multiple'\n df = df[df['category'].notnull()]\n return df",
"def compress_icd9_codes(data):\n\n data.icd9_code = data.icd9_code.apply(lambda x: '.1' if 'V' in x else x)\n data.icd9_code = data.icd9_code.apply(lambda x: '.8' if 'M' in x else x)\n data.icd9_code = data.icd9_code.apply(lambda x: '.5' if 'E' in x else x)\n data.icd9_code = data.icd9_code.apply(lambda x: x[:3] if ('E' not in x) &\n ('M' not in x) &\n ('V' not in x)\n else x)\n\n data.icd9_code = data.icd9_code.astype(float)\n\n\n data['diagnoses'] = data.apply(icd9_descriptions, axis=1)\n data = data.drop(columns=['icd9_code'])\n\n return data",
"def get_data_frame_count_black_ethnicity_by_topic(data_frame: DataFrame) -> pb.DataFrame:\n data_frame_topic = data_frame \\\n .filter(data_frame[\"Stratification1\"].contains(\"Black, non-Hispanic\")) \\\n .distinct() \\\n .groupBy(\"TopicID\") \\\n .count() \\\n .sort(\"TopicID\")\n\n print(\"The following table represent the number of black ethnicity people group by the topic: \")\n data_frame_topic.show()\n data_frame_pandas = data_frame.toPandas()\n return data_frame_pandas",
"def clean_data(df):\n \n # Put in code here to execute all main cleaning steps:\n # convert missing value codes into NaNs, ...\n count_miss = df.isnull().sum(axis=0).values #find number of nans for each column\n count_miss = [val for val in count_miss]\n \n drop_cols = []\n\n for ind, val in enumerate(count_miss):\n if val > 200000:\n drop_cols.append(ind)\n \n df_drop_cols = list(azdias.columns[drop_cols])\n df = df.drop(df_drop_cols, axis=1)\n \n for col in range(df.shape[1]): #loop through columns\n column_name = df.columns[col] #get column name\n missing_list = feat_info.iloc[col,3] #get missing_or_unknown column from feature info\n missing_list = missing_list.replace('[','') #remove left bracket from string\n missing_list = missing_list.replace(']','') #remove right bracket from string\n missing_list = missing_list.split(',') #split into individual strings\n \n #find data that is natually missing and continue loop to omit\n if missing_list == ['']:\n continue\n \n else:\n for dat_type in missing_list: \n if df[column_name].dtype == 'object': #find values that contain x\n df.loc[df[column_name] == dat_type, column_name] = np.nan #replace x with nan\n \n else:\n dat_type = int(dat_type) #if no x, convert to integer and replace with nan\n df.loc[df[column_name] == dat_type, column_name] = np.nan\n \n # select, re-encode, and engineer column values.\n \n # encode OST_WEST_KZ\n df.loc[df['OST_WEST_KZ'] == 'W','OST_WEST_KZ'] = 0\n df.loc[df['OST_WEST_KZ'] == 'O','OST_WEST_KZ'] = 1\n \n # Re-encode categorical variable(s) to be kept in the analysis.\n \n \n #get list of attributes with type categorical\n feat_info[feat_info['type'] == 'categorical']\n \n cat_new_cols = [] #initialize\n for i in feat_info[feat_info['type'] == 'categorical']['attribute']:\n cat_new_cols.append(i)\n \n for cols in df.columns:\n if cols in cat_new_cols:\n if df[cols].nunique(dropna=True) > 2: #if the number of unique values is greater than 2 \n df = df.drop(cols, axis=1) #drop from the analysis\n print(\"more than 2 categories: {}\".format(cols))\n \n else:\n if not df[cols].unique()[0] > 0:\n #if not df[cols].unique()[0] > 0:\n dummies = pd.get_dummies(df[cols], prefix=cols)\n df = df.drop(cols, axis=1) #create dummy variable\n df = df.join(dummies)\n print(\"transformed to dummy variable: {}\".format(cols))\n \n # create variable: MOVEMENT\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([1,3,5,8,10,12,14]),'MOVEMENT'] = 1\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([2,4,6,7,9,11,13,15]),'MOVEMENT'] = 2\n \n #Capture Decade\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([1,2]), 'DECADE'] = 40\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([3,4]), 'DECADE'] = 50\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([5,6,7]), 'DECADE'] = 60\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([8,9]), 'DECADE'] = 70\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([10,11,12,13]), 'DECADE'] = 80\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([14,15]), 'DECADE'] = 90\n \n df['CAMEO_INTL_2015'] = df['CAMEO_INTL_2015'].astype(float)\n\n # create new variable: WEALTH\n df.loc[df['CAMEO_INTL_2015'].isin([51,52,53,54,55]), 'WEALTH'] = 1\n df.loc[df['CAMEO_INTL_2015'].isin([41,42,43,44,45]), 'WEALTH'] = 2\n df.loc[df['CAMEO_INTL_2015'].isin([31,32,33,34,35]), 'WEALTH'] = 3\n df.loc[df['CAMEO_INTL_2015'].isin([21,22,23,24,25]), 'WEALTH'] = 4\n df.loc[df['CAMEO_INTL_2015'].isin([11,12,13,14,15]), 'WEALTH'] = 5\n \n # create new variable: LIFE_STAGE\n df.loc[df['CAMEO_INTL_2015'].isin([11,21,31,41,51]),'LIFE_STAGE'] = 1\n df.loc[df['CAMEO_INTL_2015'].isin([12,22,32,42,52]),'LIFE_STAGE'] = 2\n df.loc[df['CAMEO_INTL_2015'].isin([13,23,33,43,53]),'LIFE_STAGE'] = 3\n df.loc[df['CAMEO_INTL_2015'].isin([14,24,34,44,54]),'LIFE_STAGE'] = 4\n df.loc[df['CAMEO_INTL_2015'].isin([15,25,35,45,55]),'LIFE_STAGE'] = 5\n \n # remove selected columns and rows, ...\n df = df.drop('PRAEGENDE_JUGENDJAHRE', axis=1)\n df = df.drop('CAMEO_INTL_2015',axis=1)\n \n # Return the cleaned dataframe.\n return df",
"def category_grouping(data):\n data['TrafficType'] = data['TrafficType'].apply(lambda x: 'Other' if x in\n ['7', '9', '12', '14',\n '15', '16', '17', '18',\n '19']\n else x)\n data['OperatingSystems'] = data['OperatingSystems'].apply(lambda x: 'Other'\n if x in\n ['4', '5', '6',\n '7', '8']\n else x)\n data['Browser'] = data['Browser'].apply(lambda x: 'Other' if x in\n ['3', '7', '9', '11', '12', '13']\n else x)\n data['VisitorType'] = data['VisitorType'].apply(lambda x: x if\n x == 'Returning_Visitor'\n else 'New_or_Other')\n return data",
"def fix_nationality_field(self):\n where = self.df[\"Nationality\"] == \"None\"\n where &= self.df[\"Nationality\"] == \"white\"\n self.df[\"Nationality\"].loc[where] = 0\n self.df[\"Nationality\"].loc[np.invert(where)] = 1",
"def get_China_exhubei(df) -> pandas.core.frame.DataFrame:\n return df[(df['countryCode']=='CN') & (df['province']!='Hubei Province') & ~(df['province'].isnull()) \\\n & ~(df['city'].isnull())]",
"def dummify_all_categorical(df):\n\n df = pd.get_dummies(df)\n df = dummify(df, \"detailed industry recode\")\n df = dummify(df, \"detailed occupation recode\") ## add some variables that are encoded as int64 but that are in fact categorical\n return df",
"def clean_up_raw(df_raw):\n # exclude_subset = ['well', 'tile', 'cell', 'intensity', 'blob'] # causes issues with later joins, maybe a pandas bug\n import lasagna.utils\n df_raw[CYCLE] = df_raw[CYCLE].astype(int)\n df_raw = df_raw.sort_values([WELL, TILE, CELL, BLOB, CYCLE, CHANNEL])\n return df_raw",
"def cleanup_df(df: pd.DataFrame) -> pd.DataFrame:\n df = df.copy()\n \n # Map column names\n if \"PO2\" in df.columns:\n df[\"PO2_ATM\"] = df[\"PO2\"]\n\n # Fix fuzzy binary values\n _bm = {\"Tak\": 1, \"Nie\": 0, \"Nie wiadomo\": -1, np.nan: -1}\n\n def map_binary_values(x) -> int:\n \"\"\"Map binary values within the column.\"\"\"\n return _bm.get(x, -1)\n\n _bin_vars = [x for x in df.columns if df[x].isin([\"Tak\", \"Nie\", \"Nie wiadomo\"]).any()]\n for col in _bin_vars:\n df[col] = df[col].map(map_binary_values)\n \n # Fix numeric columns in biochem \n # We add \"*_out_of_bound\" to all columns for consistency with potential test data being OOB\n for col in basic_bio_codes:\n if df[col].dtype == 'object':\n df[f'{col}_out_of_bound'] = df[col].str.match('.*(<|>).*').fillna(False).astype(int)\n df[col] = df[col].str.replace(',', '.').str.replace('[^0-9.]', '', regex=True).astype('float64')\n else:\n warnings.warn(f\"Column within bounds: {col!r}\")\n df[f'{col}_out_of_bound'] = 0\n \n # Fix height and weight being swapped\n t1 = df[\"WZROST\"] < df[\"MASA_CIALA\"]\n df.loc[t1, ['WZROST', 'MASA_CIALA']] = df.loc[t1, ['MASA_CIALA', 'WZROST']].values\n df.loc[t1, \"BMI\"] = df.loc[t1, \"MASA_CIALA\"] / (df.loc[t1, \"WZROST\"]/100)**2\n \n # Fix out-of-bounds values - hacky but fast\n df.loc[df[\"MASA_CIALA\"] < 10, [\"MASA_CIALA\", \"BMI\"]] = np.nan\n df.loc[df[\"WZROST\"] < 50, [\"WZROST\", \"BMI\"]] = np.nan\n df.loc[df[\"ODDECH\"] > 60, \"ODDECH\"] = np.nan\n df.loc[df[\"PO2_ATM\"] < 10, \"PO2_ATM\"] = np.nan\n df.loc[df[\"C55.103.02_WBC\"] > 60, \"C55.103.02_WBC\"] = np.nan # unsure\n\n return df",
"def deal_with_ordinal(df):\n print(\" --- Dealing with Ordinals.\")\n thresh = get_min_filled_threshold(df)\n columns = df.columns\n ordinal = [x for x in columns if x.startswith('o_')]\n\n for col in ordinal:\n filled = get_non_missing_count(df[col])\n if filled < thresh:\n df[col] = df[col].fillna(-10)\n else:\n mode = df[col].value_counts().idxmax()\n df[col] = df[col].fillna(mode)\n median = df[col].median()\n df[col] = df[col].apply(lambda x: sigmoid(x-median))\n \n print(\" --- Finished dealing with Ordinals.\")\n return df",
"def compressing_marital_status(data):\n\n data.marital_status = data.marital_status.apply(lambda x: 'LIFE_PARTNER'\n if (x == 'MARRIED') |\n (x == 'LIFE PARTNER')\n else x)\n\n data.marital_status = data.marital_status.apply(lambda x: 'SINGLE'\n if (x == 'WIDOWED') |\n (x == 'DIVORCED') |\n (x == 'SEPARATED')\n else x)\n\n data.marital_status = data.marital_status.apply(lambda x: 'OTHER/UNKNOWN'\n if (x == 'UNKNOWN (DEFAULT)')\n else x)\n\n return data",
"def prepare_titanic_data(df):\n\n df.embark_town.fillna('Other', inplace=True)\n\n # Drop deck and embarked_town\n df.drop(columns=['deck', 'embark_town'], inplace=True)\n\n # Encoding: Objects (Categorical Variables) to Numeric\n # Use sklearn's LabelEncoder\n encoder = LabelEncoder()\n\n # Set Unknown and encode Embarked column to numbers\n # 2 == \"S\" == Southampton == 644 people\n # 0 == \"C\" == Cherbourg == 168 people\n # 1 == \"Q\" == Queenstown == 77 people\n # 3 == \"Unknown\" == 2 people\n df.embarked.fillna('Unknown', inplace=True)\n encoder.fit(df.embarked)\n df.embarked = encoder.transform(df.embarked)\n\n # Encode the Class (first class, second, etc...)\n # First class == 0\n # Second class == 1\n # Third class == 2\n encoder.fit(df[\"class\"])\n df[\"class_encoded\"] = encoder.transform(df[\"class\"])\n\n # Encode gender\n # male == 1 == 577 records\n # female == 0 == 314 records\n encoder.fit(df.sex)\n df[\"sex_encoded\"] = encoder.transform(df.sex)\n\n # Handle the 177 records with missing age values\n average_age = df.age.mean()\n df.age.fillna(average_age, inplace=True)\n\n scaler = MinMaxScaler()\n scaler.fit(df[['fare']])\n df[\"fare_scaled\"] = scaler.transform(df[['fare']])\n\n scaler = MinMaxScaler()\n scaler.fit(df[['age']])\n df[\"age_scaled\"] = scaler.transform(df[['age']])\n\n # Set the index to the passenger id\n df = df.set_index(\"passenger_id\")\n return df",
"def test_group_small_cols(self):\n taxa = DataTableFactory(PACKET_DIR).taxonomy()\n taxa = group_small_cols(taxa, top=2)\n self.assertEqual(taxa.shape[1], 3)",
"def clean_data(df):\r\n \r\n # list of categories to use as column names \r\n categories_cols = [names.split('-')[0] for names in df['categories'][0].split(';')]\r\n \r\n # creating 36 individual category columns\r\n for i in range(len(categories_cols)):\r\n df[categories_cols[i]] = [int(row.split(';')[i].split('-')[1]) for row in df['categories']]\r\n \r\n # labels 0 and 2 in 'related' class are similar (refer to notebook)\r\n # change 2s into 0s to make it more simple\r\n df['related'] = df['related'].map({0:0,1:1,2:0})\r\n \r\n # drop 'categories' column\r\n df.drop('categories', axis=1, inplace=True)\r\n \r\n # drop duplicates\r\n df.drop_duplicates(inplace=True)\r\n \r\n return df",
"def clean_up_df(df):\n df['Age'] = df['Age'].fillna(df['Age'].median())\n df['Gender'] = df['Sex'].map({'female':0, 'male':1}).astype(int)\n df['Family'] = df['Parch'] + df['SibSp']\n df['Fare'] = df['Fare'].fillna(df['Fare'].mean())\n df = df.drop(['SibSp','Parch','Sex','Name','Cabin','Embarked','Ticket'],axis=1)\n return df",
"def compressing_careunit(data):\n\n data.first_careunit = data.first_careunit.apply(lambda x: 'ICU' if\n (x == 'MICU') |\n (x == 'SICU') |\n (x == 'CCU') |\n (x == 'CSRU') |\n (x == 'TSICU')\n else x)\n return data",
"def categorical(df):\n\n # variables which need to be transformed to categorical\n categorical = [\"prop_country_id\", \"visitor_location_country_id\"]\n\n for var in categorical:\n df = pd.concat([df, pd.get_dummies(df[var], prefix=var)], axis=1)\n del df[var]\n\n return df",
"def ex_eight_animals_data_table():\n data_dict = {'Calf': [4, 5, 6, 7, 8],\n 'Sire': [1, 3, 1, 4, 3],\n 'Dam': ['Unknown', 2, 2, 5, 6],\n 'Sex': ['Male', 'Female', 'Female', 'Male', 'Male'],\n 'WWG': [4.5, 2.9, 3.9, 3.5, 5.0]}\n\n df = pd.DataFrame(data_dict)\n\n return(df)",
"def _clean_study_summary(df):\n # clean age, patientweight, patientheight columns\n for column in [\"age\", \"patientweight\", \"patientheight\"]:\n df[column] = df[column].replace(\"\", -1)\n\n for column in [\"studyidk\", \"age\"]:\n df[column] = pd.to_numeric(df[column], errors=\"coerce\").astype(int)\n\n for column in [\"patientweight\", \"patientheight\"]:\n df[column] = pd.to_numeric(df[column], errors=\"coerce\")\n\n # remove outliers\n for column in [\"age\", \"patientweight\", \"patientheight\"]:\n boxplot = plt.boxplot(df[column])\n outlier_min, outlier_max = [item.get_ydata()[0] for item in boxplot[\"caps\"]]\n df[column] = df[column].apply(lambda x: 1 if x > outlier_max else x)\n df[column] = df[column].apply(lambda x: 1 if x < outlier_min else x)\n\n # create BMI column and clean outliers\n # (formula from https://www.cdc.gov/nccdphp/dnpao/growthcharts/training/bmiage/page5_1.html)\n df[\"bmi\"] = df.apply(\n lambda x: ((x.patientweight / x.patientheight / x.patientheight) * 10000),\n axis=1,\n )\n boxplot = plt.boxplot(df[\"bmi\"])\n outlier_min, outlier_max = [item.get_ydata()[0] for item in boxplot[\"caps\"]]\n df[\"bmi\"] = df[\"bmi\"].apply(lambda x: 1 if x > outlier_max else x)\n df[\"bmi\"] = df[\"bmi\"].apply(lambda x: 1 if x < outlier_min else x)\n\n # clean gender column\n df[\"gender\"] = df[\"gender\"].replace(\"\", \"U\")\n\n # clean findingcode column\n df[\"findingcode\"] = df[\"findingcode\"].apply(lambda x: x.split(\",\"))\n\n return df",
"def clean_dataframe(self, df_in , what = ''): \n \n if what == 'era5fb': # cleaning the era5 feedback only \n df = df_in[np.isfinite(df_in['obsvalue@body'])]\n try: \n df = df.loc[ df['vertco_type@body'] != 2 ] \n except:\n pass\n df = df.reindex()\n df = df[np.isfinite(df['vertco_reference_1@body'])]\n #print('check lengths: ' , len(df_in) , len(df) )\n new_ind = np.array ( range(len(df))) \n df['index'] =new_ind\n df = df.set_index('index')\n \n else: \n ### check if can be optimized ???\n df = df_in.loc[ df_in['z_coordinate_type'] != 2 ] # case where the levels are given in terms of geopotential only (pressure not available)\n \n df = df.loc[ (df['observation_value'] != -99999.0) \n & (df['observation_value'] != -999.0) \n & (df['observation_value'] != -9999) \n & (df['observation_value'] != -9999.0) \n & (df['observation_value'] != -999.9) \n & (df['observation_value'] != -8888 )\n & (df['observation_value'] != -8888.0 )\n \n #& (df['z_coordinate_type'] != 2) \n & (df['z_coordinate'] != -99999.0) \n & (df['z_coordinate'] != -9999.0 )\n & (df['z_coordinate'] != 999 )\n & (df['z_coordinate'] != 999.0 )\n \n \n ] #cleaning the values \n #clean = clean.loc[ (clean['z_coordinate_type'] != 2)] #cleaning the values\n #clean = clean.loc[ (clean['z_coordinate'] != -99999.0 )] #cleaning the values\n \n df = df[np.isfinite(df['observation_value'])] # excluding nan values \n df = df[np.isfinite(df['z_coordinate'])]\n \n return df",
"def most_discriminating( features_df, labels_df, top=5):\n \n columns = features_df.shape[1]\n labels_df = labels_df[['file', 'candy_id']].set_index('file')\n qualities = np.zeros(columns)\n \n _left = 0\n _right = 1\n\n _c = 0\n _h = 1\n\n # globals\n cases = float(labels_df['candy_id'].count()) # total cases\n\n p_c_A = (labels_df['candy_id'] == 0).sum() / cases\n p_h_A = 1.0 - p_c_A\n\n\n for feature in range(columns):\n\n branch_cases = np.zeros(2) # total on each branch\n pi = np.zeros(2) # proportion on each branch\n\n split = np.array([\n #c, h\n [0, 0], #left\n [0, 0] #right\n ])\n\n for index, value in features_df[feature].iteritems():\n split[value][labels_df.loc[index][0]] += 1\n\n branch_cases[_left] = split[_left].sum()\n branch_cases[_right] = split[_right].sum()\n \n if branch_cases[_left] == 0.0 or branch_cases[_right] == 0.0:\n qualities[feature] = 0\n continue\n \n pi[_left] = branch_cases[_left] / cases\n pi[_right] = branch_cases[_right] / cases\n\n p_c_B = split[_left][_c] / branch_cases[_left]\n p_h_B = split[_left][_h] / branch_cases[_left]\n\n p_c_C = split[_right][_c] / branch_cases[_right]\n p_h_C = split[_right][_h] / branch_cases[_right]\n\n gini_tree = 1.0 - (math.pow(p_c_A, 2) + math.pow(p_h_A, 2))\n\n gini_left = 1.0 - (math.pow(p_c_B, 2) + math.pow(p_h_B, 2))\n gini_right = 1.0 - (math.pow(p_c_C, 2) + math.pow(p_h_C, 2))\n\n quality = gini_tree - pi[_left] * gini_left - pi[_right] * gini_right\n\n qualities[feature] = quality\n return list(reversed(qualities.argsort()))[:top]",
"def get_human_naive_airr(self) -> pd.DataFrame:\n return pd.read_feather(self.figure_data_paths.human_naive_path)",
"def aggregated_item_codes( dfs ):\n for (series, regexes) in [\n (\"inversion\" , ac.regexes_for_2_codes() )\n , (\"funcionamiento\" , ac.regexes_for_2_codes() )\n , (\"ingresos\" , ac.regexes_for_ingresos() ) ]:\n df = dfs[series]\n\n # build some columns\n (category, top, child) = regexes\n df[\"item categ\"] = (\n df[\"item code\"]\n . str.extract( category ) )\n df[\"item top\"] = ~ pd.isnull(\n df[\"item code\"]\n . str.extract( top ) )\n df[\"item child\"] = ~ pd.isnull(\n df[\"item code\"]\n . str.extract( child ) )\n\n df = ( # keep only rows labeled with top categories\n # or the first generation below the top categories\n df[ (df[\"item top\"])\n | (df[\"item child\"]) ] )\n\n # Verify that codigo-top is the boolean negative of codigo-child.\n # (That's not true before we drop rows categorized deeper than top or child.)\n assert ( len ( df[ ( (df[\"item top\"].astype(int)) +\n (df[\"item child\"]).astype(int) )\n != 1 ] )\n == 0 )\n df = df.drop( columns = [\"item child\"] )\n\n dfs[series] = df\n return dfs",
"def get_population_by_race_columns():\n return {\n 'DP05_0070E': 'Population (Total)',\n 'DP05_0071E': 'Population (Hispanic or Latino)',\n 'DP05_0077E': 'Population (White alone, Non-Hispanic)',\n 'DP05_0078E': 'Population (Black or African American alone, Non-Hispanic)',\n 'DP05_0080E': 'Population (Asian alone, Non-Hispanic)',\n\n # These will be grouped into an \"Other\" category\n 'DP05_0079E': 'Population (American Indian and Alaska Native alone, Non-Hispanic)',\n 'DP05_0081E': 'Population (Native Hawaiian and Other Pacific Islander alone, Non-Hispanic)',\n 'DP05_0082E': 'Population (Some other race alone, Non-Hispanic)',\n 'DP05_0083E': 'Population (Two or more races, Non-Hispanic)'\n }",
"def grouping_cols(df, cat_percentage = 0.05, checking_itr = 10):",
"def behav_data_inverted(df):\n # Apparently groupby with categorical dtype is broken\n # See https://github.com/pandas-dev/pandas/issues/22512#issuecomment-422422573\n df[\"class_\"] = df[\"class_\"].astype(str)\n inverted_map = (\n df[(df[\"morph_pos\"] == 1)]\n .groupby([\"subj\", \"morph_dim\"], observed=True)\n .agg(lambda x: x.iloc[0])[\"class_\"]\n == \"R\"\n )\n df = df.join(\n inverted_map.to_frame(name=\"inverted\"),\n on=(\"subj\", \"morph_dim\"),\n how=\"left\",\n sort=False,\n )\n df[\"greater_response\"] = (df[\"response\"] == \"R\") != (df[\"inverted\"])\n return df",
"def original_clean():\n dataset = pd.read_csv('Parliment-1984.csv')\n X = dataset.iloc[:, 1:].values\n y = dataset.iloc[:, 0].values\n\n for i in range(0, 434):\n if y[i] == 'democrat':\n y[i] = 0\n elif y[i] == 'republican':\n y[i] = 1\n y = y.astype(int)\n\n for a in range(0, 434):\n for b in range(0, 16):\n if 'y' in X[a][b]:\n X[a][b] = 1\n elif 'n' in X[a][b]:\n X[a][b] = 0\n\n medians = []\n for x in range(0, 16):\n acceptable = []\n for z in range(0, 434):\n if (X[z][x] == 1) or (X[z][x] == 0):\n acceptable.append(X[z][x])\n med = np.median(acceptable)\n medians.append(int(med))\n\n for c in range(0, 434):\n for d in range(0, 16):\n if (X[c][d] != 1) and (X[c][d] != 0):\n X[c][d] = medians[d]\n X = X.astype(float)\n X = normalize(X)\n return X, y",
"def get_zeng_labels(genes_of_interest, min_genes_per_group=5):\n # genes of interest should be a list of entrez_ids\n zeng = utils.prep_zeng()\n zeng = zeng.drop('cortical_marker', axis=1).drop_duplicates(\n subset=['gene_symbol', 'entrez_id'])\n\n zeng = zeng[zeng.entrez_id.isin(genes_of_interest)]\n\n all_dummies = []\n for col in ['pattern_description', 'level_description', 'expression_level', 'celltype_markers', 'layer_markers']:\n dummy_cols = pd.get_dummies(zeng.loc[:, col], prefix=col)\n all_dummies.append(dummy_cols)\n\n zeng_labels = pd.concat(all_dummies, axis=1)\n zeng_labels.index = zeng.entrez_id\n\n # return zeng_labels[zeng_labels.index.isin(genes_of_interest)]\n cols_to_drop = zeng_labels.sum()[zeng_labels.sum() < min_genes_per_group].index\n zeng_labels.drop(cols_to_drop, axis=1, inplace=True)\n zeng_labels = zeng_labels.add_prefix('zeng_')\n\n # return as boolean to be consistent with GOdf\n return zeng_labels.astype('bool')",
"def encode_nans_table(table):\n col_names = table.labels\n for col in col_names :\n table = encode_nans(table, col)\n\n return table"
]
| [
"0.5341186",
"0.5316503",
"0.52776337",
"0.52562404",
"0.51598245",
"0.5094428",
"0.5088444",
"0.5017636",
"0.50078815",
"0.49480984",
"0.49448717",
"0.4937764",
"0.4870405",
"0.4847865",
"0.48402515",
"0.48117942",
"0.47928178",
"0.4764874",
"0.47402084",
"0.4731294",
"0.46723574",
"0.46464145",
"0.46451807",
"0.46412012",
"0.46235162",
"0.46131217",
"0.45969528",
"0.4585478",
"0.45702663",
"0.45691314"
]
| 0.73783153 | 0 |
Returns the dataframe with marital status compressed to only LIFE_PARTNER, SINGLE, OTHER/UNKOWN. | def compressing_marital_status(data):
data.marital_status = data.marital_status.apply(lambda x: 'LIFE_PARTNER'
if (x == 'MARRIED') |
(x == 'LIFE PARTNER')
else x)
data.marital_status = data.marital_status.apply(lambda x: 'SINGLE'
if (x == 'WIDOWED') |
(x == 'DIVORCED') |
(x == 'SEPARATED')
else x)
data.marital_status = data.marital_status.apply(lambda x: 'OTHER/UNKNOWN'
if (x == 'UNKNOWN (DEFAULT)')
else x)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getAllStatus(self) -> DataFrame:\n return self.writer.getAllStatus()",
"def read_elia_activated_energy_volumes(filename,status):\r\n df = pd.read_excel(filename,skiprows=2,parse_dates=False)\r\n df[\"Timestamp\"] = df[\"Date\"]+\" \"+df['Quarter'].map(lambda x: str(x)[:-9])\r\n pd.to_datetime(df[\"Timestamp\"])\r\n df.set_index(\"Timestamp\",inplace=True)\r\n if ((status == \"validated\") | (status == \"valid\")):\r\n df = df.drop(df[df.Status != \"Validated\"].index)\r\n df = df.drop([\"Date\",\"Quarter\",\"Status\"], axis=1)\r\n \r\n if ((len(df.columns)<13) & (len(df.columns)>11)) :\r\n df.columns.values[0:13] = [\"NRV in MW\", \"GUV in MW\", \"IGCC+ in MW\", \"R2+ in MW\", \"Bids+ in MW\", \"R3+ in MW\", \"R3DP+ in MW\", \"GDV in MW\", \"IGCC- in MW\", \"R2- in MW\", \"Bids- in MW\", \"R3- in MW\"]\r\n if len(df.columns)<= 11:\r\n df.columns.values[0:12] = [\"NRV in MW\", \"GUV in MW\", \"IGCC+ in MW\", \"R2+ in MW\", \"Bids+ in MW\", \"R3+ in MW\", \"GDV in MW\", \"IGCC- in MW\", \"R2- in MW\", \"Bids- in MW\", \"R3- in MW\"]\r\n if len(df.columns)>14:\r\n df.columns.values[0:16] = [\"NRV in MW\", \"SR in MW\",\"GUV in MW\", \"IGCC+ in MW\",\"R2+ in MW\",\"Bids+ in MW\",\"R3 std in MW\",\"R3 flex in MW\",\"ICH in MW\",\"inter TSO import in MW\",\"GDV in MW\",\"IGCC- in MW\",\"R2- in MW\",\"Bids- in MW\",\"inter TSO export in MW\"]\r\n \r\n return df",
"def read_elia_imbalanceprices(filename,status):\r\n \r\n df = pd.read_excel(filename,skiprows=1,parse_dates=False)\r\n df[\"Timestamp\"] = df[\"Date\"]+\" \"+df['Quarter'].map(lambda x: str(x)[:-9])\r\n pd.to_datetime(df[\"Timestamp\"])\r\n df.set_index(\"Timestamp\",inplace=True)\r\n if ((status == \"validated\") | (status == \"valid\")):\r\n df = df.drop(df[df.Status != \"Validated\"].index)\r\n df = df.drop([\"Date\",\"Quarter\",\"Status\"], axis=1)\r\n \r\n if len(df.columns) == 3:\r\n df.columns.values[0:3] = [\"NRV in MW\",\"POS in euro/MWh\", \"NEG in euro/MWh\"]\r\n \r\n if len(df.columns) == 7:\r\n df.columns.values[0:7] = [\"NRV in MW\",\"SI in MW\",\"alpha in euro/MWh\",\"MIP in euro/MWh\", \"MDP in euro/MWh\",\"POS in euro/MWh\", \"NEG in euro/MWh\"]\r\n \r\n if len(df.columns) == 8:\r\n df.columns.values[0:8] = [\"NRV in MW\",\"SI in MW\",\"alpha in euro/MWh\",\"MIP in euro/MWh\", \"MDP in euro/MWh\",\"SR in euro/MWh\",\"POS in euro/MWh\", \"NEG in euro/MWh\"]\r\n\r\n return df",
"def behav_data_inverted(df):\n # Apparently groupby with categorical dtype is broken\n # See https://github.com/pandas-dev/pandas/issues/22512#issuecomment-422422573\n df[\"class_\"] = df[\"class_\"].astype(str)\n inverted_map = (\n df[(df[\"morph_pos\"] == 1)]\n .groupby([\"subj\", \"morph_dim\"], observed=True)\n .agg(lambda x: x.iloc[0])[\"class_\"]\n == \"R\"\n )\n df = df.join(\n inverted_map.to_frame(name=\"inverted\"),\n on=(\"subj\", \"morph_dim\"),\n how=\"left\",\n sort=False,\n )\n df[\"greater_response\"] = (df[\"response\"] == \"R\") != (df[\"inverted\"])\n return df",
"def read_elia_activated_energy_prices(filename,status):\r\n \r\n df = pd.read_excel(filename,skiprows=2,parse_dates=False)\r\n df[\"Timestamp\"] = df[\"Date\"]+\" \"+df['Quarter'].map(lambda x: str(x)[:-9])\r\n pd.to_datetime(df[\"Timestamp\"])\r\n df.set_index(\"Timestamp\",inplace=True)\r\n if ((status == \"validated\") | (status == \"valid\")):\r\n df = df.drop(df[df.Status != \"Validated\"].index)\r\n df = df.drop([\"Date\",\"Quarter\",\"Status\"], axis=1)\r\n \r\n if len(df.columns)>14:\r\n df.columns.values[0:16] = [\"NRV in MW\",\"SR in euro/MWh\",\"MIP in euro/MWh\",\"IGGC+ in euro/MWh\", \"R2+ in euro/MWh\",\"Bids+ in euro/MWh\",\"R3 std in euro/MWh\", \"R3 flex in euro/MWh\", \"ICH in euro/MWh\", \"inter TSO import in euro/MWh\", \"MDP in euro/MWh\", \"IGCC- in euro/MWh\", \"R2- in euro/MWh\", \"Bids- in euro/MWh\", \"R3- in euro/MWh\"]\r\n\r\n if len(df.columns)<12:\r\n df.columns.values[0:12] = [\"NRV in MW\",\"MIP in euro/MWh\",\"IGGC+ in euro/MWh\", \"R2+ in euro/MWh\",\"Bids+ in euro/MWh\", \"R3+ in euro/MWh\", \"MDP in euro/MWh\", \"IGCC- in euro/MWh\", \"R2- in euro/MWh\", \"Bids- in euro/MWh\", \"R3- in euro/MWh\"]\r\n\r\n return df",
"def get_human_naive_airr(self) -> pd.DataFrame:\n return pd.read_feather(self.figure_data_paths.human_naive_path)",
"def preprocess(self):\n\n print('[ INFO ]: Preprocessing abalone data...')\n\n # Rename headers of data frame\n abalone_data = pd.read_csv(self.abalone_path, header=None)\n abalone_data.columns = [\n 'sex','length','diameter','height','whole_weight','shucked_weight',\n 'viscera_weight','shell_weight','rings'\n ]\n categorical_features = [\n 'sex'\n ]\n continuous_features = [\n 'length','diameter','height','whole_weight','shucked_weight',\n 'viscera_weight','shell_weight'\n ]\n predictor = 'rings'\n\n df = alg.continuous_to_discrete(self, abalone_data, continuous_features)\n\n classes = abalone_data[predictor].unique().tolist()\n\n features = [df.columns[j] for j in range(len(df.columns)) if df.columns[j] != predictor]\n\n return df, features, predictor, classes",
"def MEGA_ANALYSIS(\n excel_data = \"D:\\\\Ali USB Backup\\\\1 PhD\\\\Mega Analysis\\\\4. SystReview Single Table (NEW CROSSTAB) March 2020.xlsx\",\n n_rows = 1045,\n usecols = \"A:DG\",\n header = 1,\n exclude_data=False, **kwargs):\n\n\n df = pd.read_excel(excel_data, nrows=n_rows, usecols=usecols, header=header, \n #index_col=[4,0] # only if you want multi-index\n )\n\n\n# 0. CLEANUPS: remove empty rows and columns\n print('\\n0. DataFrame pre-processing and cleaning:')\n df = cleaning(df)\n\n# 1. Exclusions\n if exclude_data:\n print('\\n\\n1. Excluding some data based on ground truths')\n if not kwargs:\n df = exclusions(df, \n POST_ictals=True,\n PET_hypermetabolism=True,\n SPECT_PET=False,\n CONCORDANCE=False)\n elif kwargs:\n df = exclusions(df, \n POST_ictals=kwargs['POST_ictals'],\n PET_hypermetabolism=kwargs['PET_hypermetabolism'],\n SPECT_PET=kwargs['SPECT_PET'],\n CONCORDANCE=kwargs['CONCORDANCE'])\n\n print('\\ndf.shape after exclusions: ', df.shape)\n else:\n print('1. No Exclusions.')\n\n# 2. checking for missing labels e.g. Semiology Categories Labels:\n print('\\n2. Checking for missing values for columns')\n missing_columns(df)\n \n print('\\n Checking for dtypes: first localisation_labels column is: ', df.columns[17], '...last one is', df.columns[88])\n # localisation_labels = df.columns[17:72] # old July 2019\n localisation_labels = df.columns[17:88] # news March 2020\n for col in df[localisation_labels]:\n for val in df[col]:\n if ( type(val) != (np.float) ) & ( type(val) != (np.int) ):\n print(type(val), col, val)\n \n # 3 ffill References:\n df.Reference.fillna(method='ffill', inplace=True)\n print('\\n3. forward filled references')\n\n # 4 check no other entries besides \"ES\" and \"y\" in list(df['sEEG and/or ES'].unique())\n # March 2020 updated for sEEG_ES = 'sEEG (y) and/or ES (ES)' # March 2020 version\n sEEG_ES = 'sEEG (y) and/or ES (ES)' # March 2020 version\n \n print(\"\\n4. 'sEEG and/or ES' column only contains ['ES', nan, 'y']: \")\n print(list(df[sEEG_ES].unique()) == ['ES', np.nan, 'y'])\n if not (list(df[sEEG_ES].unique()) == ['ES', np.nan, 'y']):\n print('the set includes:', list(df['sEEG and/or ES'].unique()) )\n\n # 5. print some basic progress stats:\n print('\\n\\n 5. BASIC PROGRESS:')\n print('Number of articles included in this analysis:', int( df['Reference'].nunique()) )\n print('Number of patients:', int( df['Tot Pt included'].sum()) )\n print('Number of lateralising datapoints:', df.Lateralising.sum())\n print('Number of localising datapoints:', df.Localising.sum())\n\n df_ground_truth = progress_stats(df)\n\n # plot progress by ground truth\n progress_venn(df_ground_truth, method='Lateralising')\n progress_venn(df_ground_truth, method='Localising')\n\n\n # 6. plot progress by study type (CS, SS, ET, Other)\n print(\"6. Venn diagrams by patient selection priors (study type)\")\n df_study_type = progress_study_type(df)\n progress_venn_2(df_study_type, method='Lateralising')\n progress_venn_2(df_study_type, method='Localising')\n\n print('Other criteria: ', df.loc[df['Other (e.g. Abs)'].notnull()]['Other (e.g. Abs)'].unique() )\n print('Lateralising Other Total/Exclusives: ', df_study_type.loc['OTHER', ('Lateralising Datapoints', 'Total')], '/',\n df_study_type.loc['OTHER', ('Lateralising Datapoints', 'Exclusive')] )\n print('Localising Other Total/Exclusives: ', df_study_type.loc['OTHER', ('Localising Datapoints', 'Total')], '/',\n df_study_type.loc['OTHER', ('Localising Datapoints', 'Exclusive')] )\n \n\n \n\n return df, df_ground_truth, df_study_type",
"def createFeatureFrame(mode):\r\n \r\n text = textFeature(mode)\r\n sentiment = clfFeature('sentiment', mode)\r\n actors = clfFeature('actors', mode)\r\n directors = clfFeature('directors', mode)\r\n genre = clfFeature('genre', mode)\r\n titles = clfFeature('titles', mode)\r\n featureframe = pd.concat([text, sentiment, actors, directors, genre, titles], axis=1)\r\n \r\n return featureframe",
"def get_annotation_dataframe_compact(self): \n temp_df = pd.DataFrame(self.annotation_line_list)\n # make a list with the annotations for each bbox (each row of the fata frame)\n temp_df['annon'] = list(zip(list(zip(temp_df['xmin'], temp_df['ymin'], temp_df['xmax'], temp_df['ymax'])), temp_df['class_name']))\n # group the df based on im_full_path\n grouped = temp_df.groupby(['img_full_path'])\n # create tuples of the grouped rows columns\n df_serie = grouped['annon'].aggregate(lambda x: tuple(x))\n return df_serie.to_frame()",
"def pre_process(self, frame: pd.DataFrame) -> pd.DataFrame:\n # frame = rows_filtering(frame)\n # frame = feature_dropping(frame)\n # frame = feature_values_fixing(frame)\n\n # frame = extreme_values_handling(frame, [])\n # missing_value_imputation(frame, [])\n\n # data_type_conversion(frame)\n # frame = feature_engineering(frame, self.GENERATE_USER_FEATURES)\n # feature_renaming(frame)\n\n return frame",
"def _finalize_features(self) -> DataFrameLike:\n all_features_dict = dict(ChainMap(*self._final_features.values()))\n return pd.DataFrame(all_features_dict)",
"def get_test_df_complete():\n import requests\n from zipfile import ZipFile\n import StringIO\n zip_to_download = \"https://resources.lendingclub.com/LoanStats3b.csv.zip\"\n r = requests.get(zip_to_download)\n zipfile = ZipFile(StringIO.StringIO(r.content))\n file_csv = zipfile.namelist()[0]\n # we are using the c parser for speed\n df = pd.read_csv(zipfile.open(file_csv), skiprows=[0], na_values=['n/a', 'N/A', ''],\n parse_dates=['issue_d', 'last_pymnt_d', 'next_pymnt_d', 'last_credit_pull_d'])\n zipfile.close()\n df = df[:-2]\n nb_row = float(len(df.index))\n df['na_col'] = np.nan\n df['constant_col'] = 'constant'\n df['duplicated_column'] = df.id\n df['many_missing_70'] = np.nan\n df.loc[1:int(0.3 * nb_row), 'many_missing_70'] = 1\n df['bad'] = 1\n index_good = df['loan_status'].isin(\n ['Fully Paid', 'Current', 'In Grace Period'])\n df.loc[index_good, 'bad'] = 0\n return df",
"def prepDataFrameForPreprocessor(df, drop_backers_count = True, drop_staff_pick = True):\n #df = pd.read_csv('data/df_clean.csv')\n df = dropCols(df,drop_backers_count,drop_staff_pick) #drop unnecessary columns\n df = dateTimeUpdate(df) #create time delta columns, add these and drop the original timesptamps\n #df.drop(df.columns[df.columns.str.contains('unnamed',case = False)],axis = 1, inplace = True)\n df = df[(df['state']=='failed') | (df['state']=='successful')]\n \n #labelencode y from string to int\n label_encoder = preprocessing.LabelEncoder()\n label_encoder.fit([\"failed\",\"successful\"])\n \n df[\"state\"] = label_encoder.transform(df.state)\n \n return df",
"def valid_statuses(self):\n return [\n \"dish_maintenance\",\n \"dish_ok\",\n \"RF_maintenance\",\n \"RF_ok\",\n \"digital_maintenance\",\n \"digital_ok\",\n \"calibration_maintenance\",\n \"calibration_ok\",\n \"calibration_triage\",\n ]",
"def training_features(training_data: pd.DataFrame):\n return pd.get_dummies(\n training_data.drop(columns=[\"outstanding_balance\", \"status\", \"account_no\"])\n )",
"def moa_df(self) -> pd.DataFrame:\n return (\n self.image_df[[\"compound\", \"concentration\", \"moa\"]]\n .drop_duplicates()\n .sort_values([\"compound\", \"concentration\"])\n .reset_index(drop=True)\n )",
"def format_tf_dataframe(self, file_path):\n xls = pd.ExcelFile(file_path)\n df = pd.read_excel(xls, \"2021 Prices\")\n df.replace(\"\", np.nan, inplace=True)\n # Make all online versions end in ' online'\n df[\"Journal Name \"] = df[\"Journal Name \"].str.replace(\n \"( online| Online| \\WOnline\\W | \\Wonline\\W)\", \" online\", regex=True\n )\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df[df[\"Journal Name \"].notna()]\n # Filter out non-online versions when an online version is present, otherwise keep the print version\n vals = df.loc[\n df[\"Journal Name \"].str.contains(\" online\"), \"Journal Name \"\n ].str.replace(\" online\", \"\")\n df = df[~df[\"Journal Name \"].isin(vals)]\n self.df = df",
"def format_tf_dataframe(self, file_path):\n xls = pd.ExcelFile(file_path)\n df = pd.read_excel(xls, \"2021 Prices\")\n df.replace(\"\", np.nan, inplace=True)\n # Make all online versions end in ' online'\n df[\"Journal Name \"] = df[\"Journal Name \"].str.replace(\n \"( online| Online| \\WOnline\\W | \\Wonline\\W)\", \" online\", regex=True\n )\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df[df[\"Journal Name \"].notna()]\n # Filter out non-online versions when an online version is present, otherwise keep the print version\n vals = df.loc[\n df[\"Journal Name \"].str.contains(\" online\"), \"Journal Name \"\n ].str.replace(\" online\", \"\")\n df = df[~df[\"Journal Name \"].isin(vals)]\n self.df = df",
"def data_preprocessor(df):\n #df.wine_type = df.wine_type.map({'white':0, 'red':1})\n return df",
"def CheckWarningFlags(data_table):\n\n df = pd.DataFrame.from_dict(data_table)\n reason_dict_list = []\n\n # Checking EPS Growth positive gradient\n df['EPS Growth'] = df['EPS Growth'].map(lambda x: x.rstrip('%')).astype(float) / 100\n if df.loc[df['EPS Growth'].diff(-1) < 0].Year.tolist():\n warning_data = df.loc[df['EPS Growth'].diff(-1) < 0].Year.tolist()\n eps_string = ''\n\n for year in range(len(warning_data)-1, -1, -1):\n eps_string = eps_string + str(warning_data[year]) + ', '\n\n reason_dict_list.append(dict(reason=f'Há redução na taxa de crescimento em {eps_string}'))\n\n # Checking ROE mean\n df['ROE'] = df['ROE'].map(lambda x: float(x))\n if df.ROE.mean() < 0.15:\n reason_dict_list.append(dict(reason=f'A média do ROE é de {df.ROE.mean():.2f}, menor que 0,15'))\n\n # Checking ROA mean\n df['ROA'] = df['ROA'].map(lambda x: float(x))\n if df.ROA.mean() < 0.07:\n reason_dict_list.append(dict(reason=f'A média do ROA é de {df.ROA.mean():.2f}, menor que 0,07'))\n\n # Checking Long Term Debt is < 5 * net income\n df['Total Long Term Debt'] = df['Total Long Term Debt'].map(lambda x: x.replace(',', '')).astype(float)\n df['Net Income'] = df['Net Income'].map(lambda x: x.replace(',', '')).astype(float)\n\n if df['Total Long Term Debt'].head(1).values[0] > 5 * df['Net Income'].head(1).values[0]:\n reason_dict_list.append(dict(reason=f'A Dívida de Longo Prazo maior que cinco vezes o Lucro Líquido.'))\n\n return reason_dict_list",
"def g_mob_preproc(g_mob: pd.DataFrame) -> pd.DataFrame:\n\n # Assert sub_region_2 exists\n assert (\n \"sub_region_2\" in g_mob.columns\n ), \"sub_region_2 no longer in google mobility data. Check renamed/redefined columns\"\n\n # Filter out county and country level aggregations\n g_mob = g_mob[g_mob[\"sub_region_2\"].isna()]\n g_mob.drop([\"census_fips_code\", \"sub_region_2\"], axis=1, inplace=True)\n\n return g_mob",
"def define_state_transition_SettlementStatusProcessingAdvice():\n\n state_chart_name = 'FSwiftSettStatusProcessAdviceIn'\n old_state_chart_name = ''\n state_chart = {\n 'Ready': {'Identified': 'Paired',\n 'NotIdentified': 'Unpaired'},\n\n 'Unpaired': {'Identified': 'Paired'},\n\n 'Paired': {'Acknowledge': 'Acknowledged',\n 'NoMatch': 'NotMatched',\n 'Match': 'Matched',\n 'Pending': 'PendingSettlement',\n 'Failing': 'FailingSettlement',\n 'Reject': 'Rejected',\n 'Cancel': 'Cancelled',},\n\n 'Acknowledged': {'NoMatch': 'NotMatched',\n 'Match': 'Matched',\n 'Pending': 'PendingSettlement',\n 'Failing': 'FailingSettlement',\n 'Cancel': 'Cancelled',\n 'Reject': 'Rejected',\n 'AmndCancRequest':'AmendCancelRequested',\n 'AmndCancPending':'AmendCancelPending',\n 'Done':'Processed'},\n\n 'Matched': {'Pending': 'PendingSettlement',\n 'Cancel': 'Cancelled',\n 'Reject': 'Rejected',\n 'Failing': 'FailingSettlement',\n 'AmndCancRequest':'AmendCancelRequested',\n 'AmndCancPending':'AmendCancelPending',\n 'Done':'Processed'\n },\n\n 'FailingSettlement':{'AmndCancRequest':'AmendCancelRequested',\n 'AmndCancPending':'AmendCancelPending',\n 'Reject': 'Rejected',\n 'Cancel': 'Cancelled',\n 'Done':'Processed'},\n\n 'NotMatched': {'Match': 'Matched',\n 'Pending': 'PendingSettlement',\n 'Failing': 'FailingSettlement',\n 'Cancel': 'Cancelled',\n 'Reject': 'Rejected',\n 'AmndCancRequest':'AmendCancelRequested',\n 'AmndCancPending':'AmendCancelPending',\n 'Done':'Processed'},\n\n 'AmendCancelRequested': {'AmndCancComplete':'AmendCancelCompleted',\n 'Done':'Processed'},\n\n 'AmendCancelPending': {'AmndCancComplete':'AmendCancelCompleted',\n 'Done':'Processed'},\n\n 'PendingSettlement': {'Failing': 'FailingSettlement',\n 'Cancel': 'Cancelled',\n 'AmndCancRequest':'AmendCancelRequested',\n 'AmndCancPending':'AmendCancelPending',\n 'Reject': 'Rejected',\n 'Done':'Processed'},\n 'Cancelled' : { 'Done':'Processed'},\n 'AmendCancelCompleted' : { 'Done':'Processed'},\n\n 'Rejected': {'Cancel': 'Cancelled',\n 'Done': 'Processed'}\n\n }\n\n co_string = 'Paired,73,-105;Cancelled,676,329;Not Match,696,-223;Match,1271,173;AmendCancelPending,728,189;Matched,347,-133;Unpaired,-178,-351;Acknowledge,435,-110;Reject,229,538;AmendCancelCompleted,862,90;Rejected,177,338;NotMatched,432,-283;Acknowledged,549,-429;Processed,1072,-131;Pending Settlement,1193,495;PendingSettlement,304,22;FailingSettlement,255,179;Ready,-237,-98;AmendCancelRequested,737,0;'\n #state_charts_list.append({state_chart_name: [state_chart] + [co_string]})\n\n return state_chart_name, state_chart, old_state_chart_name, co_string",
"def _rm_air_temperature_status_flag(cube: Cube) -> Cube:\n coord_name = \"air_temperature status_flag\"\n try:\n coord = cube.coord(coord_name)\n except CoordinateNotFoundError:\n coord = None\n\n if coord:\n if coord.attributes != {\n \"flag_meanings\": \"above_surface_pressure below_surface_pressure\",\n \"flag_values\": np.array([0, 1], dtype=\"int8\"),\n }:\n raise ValueError(\n f\"'{coord_name}' coordinate is not of the expected form.\"\n )\n ncube = CubeList()\n for cc in cube.slices_over(\"realization\"):\n coord = cc.coord(coord_name)\n if np.ma.is_masked(coord.points):\n raise ValueError(\n f\"'{coord_name}' coordinate has unexpected mask values.\"\n )\n mask = np.asarray(coord.points)\n cc.data[mask.astype(bool)] = np.nan\n cc.remove_coord(coord_name)\n ncube.append(cc)\n cube = ncube.merge_cube()\n return cube",
"def build_all_skill_frame(self, binary=True, top_effect=10):\n self.count_words(top_only=False)\n array = scipy.sparse.lil_matrix((len(self.data_profile), len(self.feature)))\n effect = 1\n for i in tqdm(range(len(self.data_profile))):\n rang = np.zeros(len(self.feature))\n if 'skills' in self.data_profile[i].keys():\n for skills in self.data_profile[i]['skills']:\n for j in range(len(self.data_profile[i]['skills'])):\n if self.data_profile[i]['skills'][j]['title'] == 'Top Skills':\n effect = top_effect\n else:\n effect = 1\n for skill in self.data_profile[i]['skills'][j]['skills']:\n if skill['title'] in self.feature:\n if 'endoresementCount' in skill.keys():\n if '+' in skill['endoresementCount']:\n count = 100\n else:\n count = int(skill['endoresementCount'])\n index = self.feature.index(skill['title'])\n array[i,index] = count * self.coff[index] * effect\n self.df = pd.DataFrame(data=array.A, columns=self.feature)\n if binary:\n self.df = (self.df != 0).astype('int')\n return self.df",
"def clean_up_raw(df_raw):\n # exclude_subset = ['well', 'tile', 'cell', 'intensity', 'blob'] # causes issues with later joins, maybe a pandas bug\n import lasagna.utils\n df_raw[CYCLE] = df_raw[CYCLE].astype(int)\n df_raw = df_raw.sort_values([WELL, TILE, CELL, BLOB, CYCLE, CHANNEL])\n return df_raw",
"def getBaseMonetaria(self):\n #Obtener la url de descarga del cvs\n urlPackage=\"https://datos.gob.ar/api/3/action/package_show?id=sspm-factores-explicacion-base-monetaria\"\n s=requests.get(urlPackage).content\n objJson = json.loads(s)\n resultado = objJson['result']['resources']\n selector = 0\n ultimoResultado = resultado[selector]\n urlDescarga = ultimoResultado['url']\n descripcion = ultimoResultado['description']\n print(\"Descargando: {}\".format(descripcion))\n print(\"Archivo: {}\".format(urlDescarga))\n \n #Descargar la url con cvs y generar pandas dataframe\n contenidoCVS = requests.get(urlDescarga).content\n flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))\n df_temp = pd.read_csv(flujoCVS)\n \n #transform string to datetime\n df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')\n df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date\n #set index\n df_temp.set_index('indice_tiempo', inplace=True)\n \n return df_temp",
"def _preprocess(self):\n\n self.df = self.df[(self.df['days_b_screening_arrest'] <= 30)\n & (self.df['days_b_screening_arrest'] >= -30)\n & (self.df['is_recid'] != -1)\n & (self.df['c_charge_degree'] != 'O')\n & (self.df['score_text'] != 'N/A')]\n\n self.df['c_jail_out'] = pd.to_datetime(self.df['c_jail_out'])\n self.df['c_jail_in'] = pd.to_datetime(self.df['c_jail_in'])\n self.df['length_of_stay'] = (self.df['c_jail_out']\n - self.df['c_jail_in'])\n\n self.df['score_factor'] = np.where(self.df['score_text']\n != 'Low',\n 'HighScore', 'LowScore')\n self.df['y_pred'] = (self.df['score_factor'] == 'HighScore')",
"def prediction_features(prediction_data: pd.DataFrame):\n return pd.get_dummies(\n prediction_data.drop(columns=[\"outstanding_balance\", \"status\", \"account_no\"])\n )",
"def __init__(self):\n self.tables = pd.DataFrame({\n \"tables\": TABLES,\n \"year\": GREEN_YEARS + YELLOW_YEARS,\n \"color\": [\"green\" for i in GREEN_YEARS] +\n [\"yellow\" for i in YELLOW_YEARS]\n })"
]
| [
"0.4965136",
"0.47967532",
"0.4701401",
"0.4634161",
"0.45349148",
"0.45303738",
"0.45190105",
"0.44369784",
"0.44259185",
"0.43993515",
"0.4398487",
"0.4366172",
"0.43481007",
"0.43333754",
"0.4299631",
"0.42878827",
"0.42877072",
"0.42802107",
"0.42802107",
"0.42614394",
"0.42581704",
"0.42552498",
"0.42547774",
"0.4253712",
"0.423732",
"0.4224297",
"0.4220774",
"0.42176774",
"0.4217254",
"0.4212805"
]
| 0.6573321 | 0 |
Returns the dataframe with admit location compressed to only ER_ADMIT, REFERRAL, TRANSFER, and OTHER/UNKNOWN. | def compressing_admit_location(data):
data.admission_location = data.admission_location.apply(lambda x: 'ER_ADMIT'
if (x == 'EMERGENCY ROOM ADMIT ')
else x)
data.admission_location = data.admission_location.apply(lambda x: 'REFERRAL'
if (x == 'HMO REFERRAL/SICK') |
(x == 'PHYS REFERRAL/NORMAL DELI') |
(x == 'CLINIC REFERRAL/PREMATURE')
else x)
data.admission_location = data.admission_location.apply(lambda x: 'TRANSFER'
if (x == 'TRANSFER FROM HOSP/EXTRAM') |
(x == 'TRANSFER FROM SKILLED NUR') |
(x == 'TRANSFER FROM OTHER HEALT') |
(x == 'TRSF WITHIN THIS FACILITY')
else x)
data.admission_location = data.admission_location.apply(lambda x: 'OTHER/UNKNOWN'
if (x == '** INFO NOT AVAILABLE **')
else x)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_cardiac_arrest_patients(con) -> pd.DataFrame:\n combined_diagnoses = get_reason_for_admission(con)\n cardiacarrest = combined_diagnoses[\n (\n (combined_diagnoses['surgical'] == 1)\n & (combined_diagnoses['diagnosis'].str.contains(re_cardiacarrest_surg, na=False, flags=re.IGNORECASE))\n ) | (\n (combined_diagnoses['surgical'] == 0)\n & (combined_diagnoses['diagnosis'].str.contains(re_cardiacarrest_med, na=False, flags=re.IGNORECASE))\n )\n ]\n\n return cardiacarrest",
"def admit_from_er(df):\n from_er = pd.read_csv(\n f\"{raw_data}\\\\er_adm.csv\",\n usecols=[\"MemberID\", \"AdmissionDate\", \"Facility\"],\n parse_dates=[\"AdmissionDate\"],\n )\n\n from_er = cognify_facility_changes(from_er, \"Facility\")\n\n from_er[\"merge\"] = (\n from_er[\"MemberID\"].astype(str)\n + from_er[\"AdmissionDate\"].astype(str)\n + from_er[\"Facility\"]\n )\n\n df[\"merge\"] = (\n df[\"member_id\"].astype(str) + df[\"admission_date\"].astype(str) + df[\"facility\"]\n )\n df[\"er\"] = np.where(df[\"merge\"].isin(from_er[\"merge\"].tolist()), 1, 0)\n\n df.drop([\"merge\"], axis=1, inplace=True)\n return df",
"def add_loc_cols(df):\r\n\r\n\tdf['STATE'] = [int(i[1:3]) for i in df.gisjoin]\r\n\tdf['COUNTY'] = [int(i[4:7]) for i in df.gisjoin]\r\n\tdf['TRACT'] = [int(i[7:-4]) for i in df.gisjoin]\r\n\tdf['BLOCK'] = [int(i[-4:]) for i in df.gisjoin]\r\n\r\n\tif df.STATE[0] > 9:\r\n\t\traise Exception(\"Warning! Code might be incorrect for states with fips code > 9\")\r\n\r\n\treturn df",
"def internal_external(self) -> pd.DataFrame:\n # load the report folder internal-external trips trip list\n trips = pd.read_csv(\n os.path.join(self.scenario_path,\n \"report\",\n \"internalExternalTrips.csv\"),\n usecols=[\"personID\", # person id\n \"tripID\", # unique trip id\n \"tripMode\", # trip mode\n \"weightTrip\", # trip weight\n \"weightPersonTrip\", # person trip weight\n \"timeTotal\", # total trip time\n \"distanceDrive\", # driven distance\n \"distanceDriveTransit\", # driven distance to transit\n \"distanceTotal\", # total trip distance\n \"costTotal\", # total trip cost\n \"inbound\", # direction of trip\n \"departTimeFiveTod\"]) # abm five time of day\n\n # change inbound from true/false to 1/0 to match other trip files\n trips[\"inbound\"] = trips[\"inbound\"].astype(int)\n\n # add Not Applicable trip purpose as model has no trip purposes\n # trips[\"tripPurposeDestination\"] = \"Not Applicable\"\n trips[\"purpose\"] = \"Not Applicable\"\n\n # create trip vmt\n trips[\"vmt\"] = (trips[\"distanceDrive\"].fillna(0) +\n trips[\"distanceDriveTransit\"].fillna(0))\n\n # telecommute status\n trips = trips.merge(self.person_data, left_on=\"personID\", right_on=\"person_id\")\n\n # return fields of interest\n return trips[[\"personID\",\n \"tripID\",\n \"tripMode\",\n \"weightTrip\",\n \"weightPersonTrip\",\n \"timeTotal\",\n \"distanceTotal\",\n \"costTotal\",\n \"departTimeFiveTod\",\n \"purpose\",\n \"telestatus\",\n \"vmt\"]]",
"def get_reason_for_admission(con) -> pd.DataFrame:\n\n # gets the SQL source file\n dirname = os.path.dirname(os.path.abspath(__file__))\n filename = './sql/diagnosis/reason_for_admission.sql'\n sql_filename = os.path.join(dirname, filename)\n\n with open(sql_filename, 'r') as file:\n sql = file.read()\n result = read_sql(sql, con)\n\n return result",
"def split_inpatient(df):\n skilled_mask = (\n df[\"admit_reason\"]\n .str.lower()\n .str.contains(\"skilled|rehab|pt|ot|skil|restorative\")\n ) & (\n (df[\"admission_type\"] == \"Nursing Home\")\n | (df[\"admission_type\"] == \"Rehab Unit / Facility\")\n )\n\n respite_mask = (\n df[\"admit_reason\"].str.lower().str.contains(\"respite|resp|behavior\")\n ) & (\n (df[\"admission_type\"] == \"Nursing Home\")\n | (df[\"admission_type\"] == \"Rehab Unit / Facility\")\n )\n\n custodial_mask = (\n df[\"admit_reason\"]\n .str.lower()\n .str.contains(\n \"custodial|cust|long term|eol|end of life|hosp|permanent|functional decline|cutodial|ltc|hospic\"\n )\n ) & (\n (df[\"admission_type\"] == \"Nursing Home\")\n | (df[\"admission_type\"] == \"End of Life\")\n | (df[\"admission_type\"] == \"Rehab Unit / Facility\")\n )\n\n df[\"admit_reason\"] = np.where(skilled_mask, \"skilled\", df[\"admit_reason\"])\n df[\"admit_reason\"] = np.where(respite_mask, \"respite\", df[\"admit_reason\"])\n df[\"admit_reason\"] = np.where(custodial_mask, \"custodial\", df[\"admit_reason\"])\n\n # break up by admit type\n acute_mask = df[\"admission_type\"] == \"Acute Hospital\"\n\n psych_mask = df[\"admission_type\"] == \"Psych Unit / Facility\"\n\n nf_mask = df[\"admission_type\"].isin(\n [\"Nursing Home\", \"Rehab Unit / Facility\", \"End of Life\"]\n )\n\n df[\n (-df[\"admit_reason\"].isin([\"skilled\", \"respite\", \"custodial\"])) & (nf_mask)\n ].to_csv(f\"{output_folder}\\\\nf_missing_reason.csv\", index=False)\n\n acute = df[acute_mask].copy()\n psych = df[psych_mask].copy()\n nf = df[nf_mask].copy()\n\n assert df.shape[0] == (acute.shape[0] + psych.shape[0] + nf.shape[0])\n return acute, psych, nf",
"def external_internal(self) -> pd.DataFrame:\n # load the report folder external-internal trips trip list\n trips = pd.read_csv(\n os.path.join(self.scenario_path,\n \"report\",\n \"externalInternalTrips.csv\"),\n usecols=[\"tripID\", # unique trip id\n \"tripMode\", # trip mode\n \"weightTrip\", # trip weight\n \"weightPersonTrip\", # person trip weight\n \"timeTotal\", # total trip time\n \"distanceTotal\", # total trip distance\n \"costTotal\", # total trip cost\n \"tripPurpose\"]) # trip purpose\n\n trips[\"purpose\"] = trips[\"tripPurpose\"]\n\n # return fields of interest\n return trips[[\"tripID\",\n \"tripMode\",\n \"weightTrip\",\n \"weightPersonTrip\",\n \"timeTotal\",\n \"distanceTotal\",\n \"costTotal\",\n \"purpose\"]]",
"def compressing_admission_type(data):\n\n data.admission_type = data.admission_type.apply(lambda x: 'EMERGENCY' if x\n == 'URGENT' else x)\n return data",
"def view_restrns_date(self):\n \n for count,restrn in enumerate(self.final_dataframe.keys()):\n \n if count in (2,3,4,5):\n \n \"\"\"\n 2 == FTNT, 3 == FARERULE, 4 == ALTRULE, 5 == GENRULE\n \"\"\"\n \n # View columns - pick only those are applicable to FTNT,FR, AGR and GR\n cols = list(self.final_dataframe['View'].loc[:,'RESTRICTION_LOAD_TRANS':'RESTRICTION_EXPIRE_TRANS']) + list(self.final_dataframe['View'].loc[:,'RESTRICTION_SRC':'UNAVAIL'])\n \n # Prepare New dataframe for each restriction in displayed in view\n self.view_dict[restrn] = self.final_dataframe['View'].loc[self.final_dataframe['View']['RESTRICTION_SRC'] == restrn,cols]\n \n # Reset index for newly created dataframe\n self.view_dict[restrn].reset_index(drop =True,inplace =True)\n \n # Drop duplicate from newly created dataframe if any\n self.view_dict[restrn].drop_duplicates(inplace = True )\n \n # capture load and expire trans/date for each restriction & keep in restriction date dataframe\n self.view_restrn_date[restrn] = self.final_dataframe['View'].loc[self.final_dataframe['View']['RESTRICTION_SRC'] == restrn,'RESTRICTION_LOAD_TRANS':'RESTRICTION_EXPIRE_TRANS']\n \n # Reset index for newly created dataframe\n self.view_restrn_date[restrn].reset_index(drop =True,inplace =True)\n \n #print(self.view_restrn_date[restrn])\n \n elif count == 6:\n \n \"\"\"\"\n Repeat above step for \"No Key Found\" if any\n \"\"\"\n self.view_dict['No Key Found'] = self.final_dataframe['View'].loc[self.final_dataframe['View']['RESTRICTION_KEY'] == 'No Key Found','RESTRICTION_SRC':'UNAVAIL']\n self.view_dict['No Key Found'].reset_index(drop =True,inplace =True)\n self.view_restrn_date['No Key Found'] = self.final_dataframe['View'].loc[self.final_dataframe['View']['RESTRICTION_KEY'] == 'No Key Found','RESTRICTION_LOAD_TRANS':'RESTRICTION_EXPIRE_TRANS']\n #self.view_restrn_date['No Key Found'].set_index(keys = ['RESTRICTION_KEY'],drop =True,inplace =True)",
"def clean_dataframe(self, df_in , what = ''): \n \n if what == 'era5fb': # cleaning the era5 feedback only \n df = df_in[np.isfinite(df_in['obsvalue@body'])]\n try: \n df = df.loc[ df['vertco_type@body'] != 2 ] \n except:\n pass\n df = df.reindex()\n df = df[np.isfinite(df['vertco_reference_1@body'])]\n #print('check lengths: ' , len(df_in) , len(df) )\n new_ind = np.array ( range(len(df))) \n df['index'] =new_ind\n df = df.set_index('index')\n \n else: \n ### check if can be optimized ???\n df = df_in.loc[ df_in['z_coordinate_type'] != 2 ] # case where the levels are given in terms of geopotential only (pressure not available)\n \n df = df.loc[ (df['observation_value'] != -99999.0) \n & (df['observation_value'] != -999.0) \n & (df['observation_value'] != -9999) \n & (df['observation_value'] != -9999.0) \n & (df['observation_value'] != -999.9) \n & (df['observation_value'] != -8888 )\n & (df['observation_value'] != -8888.0 )\n \n #& (df['z_coordinate_type'] != 2) \n & (df['z_coordinate'] != -99999.0) \n & (df['z_coordinate'] != -9999.0 )\n & (df['z_coordinate'] != 999 )\n & (df['z_coordinate'] != 999.0 )\n \n \n ] #cleaning the values \n #clean = clean.loc[ (clean['z_coordinate_type'] != 2)] #cleaning the values\n #clean = clean.loc[ (clean['z_coordinate'] != -99999.0 )] #cleaning the values\n \n df = df[np.isfinite(df['observation_value'])] # excluding nan values \n df = df[np.isfinite(df['z_coordinate'])]\n \n return df",
"def order_by_col_from_union(self):\n return exclusions.open()",
"def map_admissions(df): \r\n\r\n df['tmp'] = np.nan\r\n col = 'admission_source_id'\r\n df.loc[((df[col].between(4,6)) | (df[col] == 10) | (df[col] == 18) | (df[col] == 22) | (df[col].between(25,26))), 'tmp'] = \"Transfer_Source\"\r\n df.loc[df[col].between(1,3), 'tmp'] = \"Referral_Source\"\r\n df.loc[((df[col].between(11,14))| (df[col].between(23,24))), 'tmp'] = \"Birth_Source\"\r\n df.loc[df[col] == 7, 'tmp'] = \"Emergency_Source\"\r\n df.loc[((df[col] == 8) | (df[col]==19)), 'tmp'] = \"Other\"\r\n \r\n df['tmp'].fillna(value = \"Unknown\", inplace=True)\r\n df[col] = df['tmp']\r\n df.drop(columns=['tmp'], inplace=True)\r\n\r\n\r\n ##mapping admission type_id\r\n df['tmp'] = np.nan\r\n col = 'admission_type_id'\r\n df.loc[df[col] == 1, 'tmp'] = 'Emergency_Type'\r\n df.loc[df[col] == 2, 'tmp'] = 'Urgent_Type'\r\n df.loc[df[col] == 3, 'tmp'] = 'Elective_Type'\r\n df.loc[df[col] == 7, 'tmp'] = 'Trauma_Type'\r\n df.loc[df[col] == 4, 'tmp'] = 'Newborn_Type'\r\n\r\n df['tmp'].fillna(value = \"Unknown\", inplace=True)\r\n df[col] = df['tmp']\r\n df.drop(columns=['tmp'], inplace=True)\r\n\r\n return df",
"def get_antireqs(self, condition = \"\"):\n columns = [\"index\", \"course_code\", \"antireq\", \"extra_info\"]\n data = self.select(\"*\", self.antireqs_table, condition)\n df = pd.DataFrame(data, columns=columns)\n df.set_index(\"index\", inplace=True)\n return df",
"def external_external(self) -> pd.DataFrame:\n # load the report folder external-external trips trip list\n trips = pd.read_csv(\n os.path.join(self.scenario_path,\n \"report\",\n \"externalExternalTrips.csv\"),\n usecols=[\"tripID\", # unique trip id\n \"tripMode\", # trip mode\n \"weightTrip\", # trip weight\n \"weightPersonTrip\", # person trip weight\n \"timeTotal\", # total trip time\n \"distanceTotal\", # total trip distance\n \"costTotal\"]) # total trip cost\n\n # add Not Applicable trip purpose as model has no trip purposes\n trips[\"purpose\"] = \"Not Applicable\"\n\n # return fields of interest\n return trips[[\"tripID\",\n \"tripMode\",\n \"weightTrip\",\n \"weightPersonTrip\",\n \"timeTotal\",\n \"distanceTotal\",\n \"costTotal\",\n \"purpose\"]]",
"def _ea_in_disposition_col0_and_empty_ipr_d_col():\n for disposition_row in unused_list:\n # Check disposition\n ddi_index = views_index[disposition_row[15]]\n # Checks disposition column value and checks for IPR D value.\n # If no IPR D in extattrs dict stores the src data for updates.\n if disposition_row[0] in ea_ipr_d_values and 'IPR Designation' not\\\n in ddi_data[ddi_index][disposition_row[1]]['extattrs']:\n import_merge_disposition.append(\n [disposition_row[15],\n disposition_row[1],\n disposition_row[14],\n disposition_row[0]])",
"def get_special_mappings_df() -> pd.DataFrame:\n return pd.read_csv(SPECIAL_MAPPINGS_PATH, sep='\\t')",
"def _sort_dataframe(self, dataframe):\r\n print('Not allowed')",
"def get_infection_patients(con) -> pd.DataFrame:\n print('Querying reason for admission...')\n combined_diagnoses = get_reason_for_admission(con)\n print('Selecting patients with presumed infection...')\n infection = combined_diagnoses[\n (\n (\n # use reasons for admission\n # surgical admissions with sepsis\n (combined_diagnoses['surgical'] == 1)\n & (combined_diagnoses['diagnosis'].str.contains(re_sepsis_surg, na=False, flags=re.IGNORECASE))\n ) | (\n # medical admissions with sepsis\n (combined_diagnoses['surgical'] == 0)\n & (combined_diagnoses['diagnosis'].str.contains(re_sepsis_med, na=False, flags=re.IGNORECASE))\n ) | (\n # uses documentation at admission form (Early Goal Directed Therapy)\n (combined_diagnoses['sepsis_at_admission'] == 1)\n ) | (\n # uses administered (therapeutic) antibiotics for determining sepsis\n (combined_diagnoses['sepsis_antibiotics_bool'] == 1)\n ) | (\n # uses combination of administered antibiotics (that sometimes are used as prophylaxis) AND\n # drawn cultures for determining sepsis\n (combined_diagnoses['other_antibiotics_bool'] == 1)\n & (combined_diagnoses['sepsis_cultures_bool'] == 1)\n )\n ) & ~((combined_diagnoses['sepsis_at_admission'] == 0).fillna(False))\n # exclude all diagnoses where explicitly 'no sepsis' was documented, forces comparing `pd.NA`\n # to be considered False\n ]\n return infection",
"def airport_san(self) -> pd.DataFrame:\n # load the report folder airport trips SAN trip list\n trips = pd.read_csv(\n os.path.join(self.scenario_path,\n \"report\",\n \"airportSANTrips.csv\"),\n usecols=[\"tripID\", # unique trip id\n \"tripMode\", # trip mode\n \"weightTrip\", # trip weight\n \"weightPersonTrip\", # person trip weight\n \"timeTotal\", # total trip time\n \"distanceTotal\", # total trip distance\n \"costTotal\", # total trip cost\n \"tripPurpose\"]) # trip purpose\n\n trips[\"purpose\"] = trips[\"tripPurpose\"]\n\n # return fields of interest\n return trips[[\"tripID\",\n \"tripMode\",\n \"weightTrip\",\n \"weightPersonTrip\",\n \"timeTotal\",\n \"distanceTotal\",\n \"costTotal\",\n \"purpose\"]]",
"def get_human_naive_airr(self) -> pd.DataFrame:\n return pd.read_feather(self.figure_data_paths.human_naive_path)",
"def clean_up(df: pd.DataFrame) -> pd.DataFrame:\n\n # Hereafter df is sorted by date, which is helpful as it allows using .iloc[-1]\n # to get current (or most recent known) situation per location\n # (Otherwise we'd have to groupby agg -> min date, and then filter)\n df = df.sort_values(\n [Columns.LOCATION_NAME, Columns.DATE, Columns.CASE_TYPE], ascending=True\n )\n\n return df",
"def wrangle_religion():\n df = pd.read_excel(\"U_S_Religion_Census_2010 _clean_data.xlsx\")\n df.columns = df.columns.str.replace(\" \", \"_\")\n df.columns = df.columns.str.replace(\",\", \"\")\n return df",
"def _prepare(self):\n\n if os.path.isfile(\"DATA/diabetes/admission_type_id.csv\") == False:\n download_data()\n\n id_mapping = pd.read_csv(\"DATA/diabetes/admission_type_id.csv\", index_col = 0)\n data = pd.read_csv(\"DATA/diabetes/diabetic_data.csv\")\n\n # binarize admission type\n admdf = pd.DataFrame()\n for adtype, ad_id in zip(id_mapping.description, id_mapping.index):\n admdf[adtype] = (data.admission_type_id == ad_id)\n\n # binarize categorical text columns\n catdf = pd.DataFrame()\n dtype = data.race.dtype # grab datatype\n features = [\"race\", \"gender\", \"age\", \"diabetesMed\", \"insulin\", \"change\", \"readmitted\"]\n for feature in features:\n if data[feature].dtype == dtype:\n catdf = pd.concat([catdf, binarize(data[feature])], axis = 1)\n else:\n catdf = pd.concat([catdf, data[feature]], axis = 1)\n\n # choose non-binary columns\n nonbindf = data[[\"num_medications\", \"num_procedures\", \"num_lab_procedures\", \"number_outpatient\", \n \"number_emergency\", \"number_inpatient\", \"number_diagnoses\"]]\n\n self.data = pd.concat([catdf, admdf, nonbindf], axis = 1)",
"def clean_location(df):\n \n local = df['location'].astype(str)\n \n #geocoders read X St at Y St better than X & Y or X/Y\n local = local.str.replace(\"&\", \"at\")\n local = local.str.replace(\"/\", \"at\")\n \n #OpenAddress dataset has addresses in title case\n local = local.str.title()\n\n return df.assign(location=local.values)",
"def modify_bidmc_table(df):\n df[\"UID\"] = df[\"UID\"].map(str)\n df = df.rename(columns={\"DischargeDateIndex\": \"date\"})\n df = df.drop(columns=[\"AdmitDateIndex\"])\n df[\"diag_cd\"] = df[\"diag_cd\"].map(lambda x: x.strip().upper())\n return df",
"def _rearrange_columns(self, df):\n if self.all_columns is None:\n content_columns = [c for c in df.columns if not c.startswith(\"_\")]\n indicator_columns = [\"__in_{}\".format(t) for t in self.table_names\n ] if self.add_full_join_indicators else []\n fanout_columns = _get_fanout_columns(\n self.table_info) if self.add_full_join_fanouts else []\n self.all_columns = content_columns + indicator_columns + fanout_columns\n df = df[self.all_columns]\n if not self.disambiguate_column_names:\n df.columns = [\n c if c.startswith(\"_\") else c.split(\":\")[1] for c in df.columns\n ]\n return df",
"def get_tr_list(slot, br_data_df, agent_full_name, agent_directory):\r\n agent_df = pd.read_csv(f'{agent_full_name}.csv', header=0, delimiter=\",\", engine='python')\r\n agents_df = agents_data()\r\n br_data_df['new_col'] = br_data_df['agent_type'].astype(str) ### esto no sé si debería cambiarlo\r\n br_data_df = br_data_df.loc[br_data_df['new_col'] == \"tc\"]\r\n br_data_df = br_data_df.reset_index(drop=True)\r\n to = str()\r\n if slot == 1:\r\n ca_location_1 = agent_df.loc[0, 'location_1']\r\n br_data_df['location_ca'] = str(ca_location_1) ### location 1!!!!\r\n br_data_df['dash'] = \"-\"\r\n br_data_df[\"from_to\"] = br_data_df[\"location_ca\"] + br_data_df[\"dash\"] + br_data_df[\"location\"]\r\n to = \"location_\" + ca_location_1 # location 1!!!!!\r\n elif slot == 2:\r\n ca_location_2 = agent_df.loc[0, 'location_2']\r\n br_data_df['location_ca'] = str(ca_location_2) ### location 2!!!!\r\n br_data_df['dash'] = \"-\"\r\n br_data_df[\"from_to\"] = br_data_df[\"location_ca\"] + br_data_df[\"dash\"] + br_data_df[\"location\"]\r\n to = \"location_\" + ca_location_2 # location 2!!!!!\r\n active_users_location_df = br_data_df\r\n ca_locations_dist_df = locations_min_distances()\r\n ca_locations_dist_df = ca_locations_dist_df[['id_min', to]]\r\n tr_list = br_data_df['from_to'].tolist()\r\n values = []\r\n keys = []\r\n for i in tr_list:\r\n a = ca_locations_dist_df.loc[ca_locations_dist_df[to] == i]\r\n id_loop = a.loc[a.index[-1], 'id_min']\r\n tr_to_loop = a.loc[a.index[-1], to]\r\n keys.append(id_loop)\r\n values.append(tr_to_loop)\r\n segment = dict(zip(keys, values))\r\n segment_df = pd.DataFrame([segment])\r\n segment_df = segment_df.T\r\n indexes = segment_df.index.values.tolist()\r\n segment_df = segment_df.rename(columns={0: \"segment\"})\r\n segment_df.insert(loc=0, column='id_min', value=indexes)\r\n segment_df = segment_df.sort_values(by=['id_min'])\r\n segment_df = segment_df.reset_index(drop=True) # segment_df contains the location of active tr and id_name sorted by shortest distance to them\r\n tr_list = active_users_location_df['agent'].tolist()\r\n jid_names = pd.DataFrame()\r\n for i in tr_list:\r\n a = agents_df.loc[agents_df['Name'] == i]\r\n jid_names = jid_names.append(a)\r\n active_users_location_df = active_users_location_df.rename(columns={'from_to': 'segment'})\r\n #print(f'active_users_location_df: {active_users_location_df}')\r\n #print(f'segment_df: {segment_df}')\r\n results = active_users_location_df.merge(segment_df, on='segment')\r\n\r\n results = results.rename(columns={'agent': 'Name'})\r\n results = results.merge(jid_names, on='Name')\r\n results = results.sort_values(by=['id_min'])\r\n results = results[['Name', 'location', 'segment', 'id_min', 'User name']]\r\n return results",
"def read_life_expectancy() -> pd.DataFrame:\n\n life_df = pd.read_csv(\"data/API_SP.DYN.LE00.IN_DS2_en_csv_v2_988752.csv\",\n header=2, usecols=[0,62], names=[\"Country\", \"Life expectancy\"])\n\n index = life_df[life_df[\"Country\"]==\"Iran, Islamic Rep.\"].index.values[0]\n life_df.loc[index, \"Country\"] = \"Iran\"\n index = life_df[life_df[\"Country\"] == \"United States\"].index.values[0]\n life_df.loc[index, \"Country\"] = \"US\"\n index = life_df[life_df[\"Country\"] == \"Russian Federation\"].index.values[0]\n life_df.loc[index, \"Country\"] = \"Russia\"\n\n life_df = life_df.dropna()\n\n return life_df",
"def addMasking(self):\n self.abundance_df['masked'] = [False]*len(self.abundance_df.index)\n self.abundance_df['colour'] = ['undefined']*len(self.abundance_df.index)",
"def exploratoryAnalysis(df):\r\n # applying uniform names to the item fat content categories\r\n df['Item_Fat_Content'].replace(['LF', 'reg', 'low fat'], ['Low Fat', 'Regular', 'Low Fat'], inplace=True)\r\n\r\n # creating a new feature based on the store age rather than the establishment year\r\n df['Outlet_Age'] = df['Outlet_Establishment_Year'].apply(lambda x: 2021 - x)\r\n df.drop('Outlet_Establishment_Year', axis=1, inplace=True)\r\n\r\n # dropping non-numerical Identifier columns which do not provide any usage for regression analysis\r\n df.drop('Item_Identifier', axis=1, inplace=True)\r\n df.drop('Outlet_Identifier', axis=1, inplace=True)\r\n\r\n return df"
]
| [
"0.5126708",
"0.48463076",
"0.47908324",
"0.4612375",
"0.45886907",
"0.45694908",
"0.45675093",
"0.45248556",
"0.44767952",
"0.44722673",
"0.4440027",
"0.44355112",
"0.4434323",
"0.44185796",
"0.4376863",
"0.4371849",
"0.43587345",
"0.43450913",
"0.4340943",
"0.43282348",
"0.43215725",
"0.43043125",
"0.42988405",
"0.4287358",
"0.42622328",
"0.42590147",
"0.42530248",
"0.42469925",
"0.42449084",
"0.42322475"
]
| 0.64302427 | 0 |
Returns the dataframe with the 6000 unique ICD9 codes reduced into 17 diagnoses categories based on standard definitions. A new column is created to contain diagnoses. | def compress_icd9_codes(data):
data.icd9_code = data.icd9_code.apply(lambda x: '.1' if 'V' in x else x)
data.icd9_code = data.icd9_code.apply(lambda x: '.8' if 'M' in x else x)
data.icd9_code = data.icd9_code.apply(lambda x: '.5' if 'E' in x else x)
data.icd9_code = data.icd9_code.apply(lambda x: x[:3] if ('E' not in x) &
('M' not in x) &
('V' not in x)
else x)
data.icd9_code = data.icd9_code.astype(float)
data['diagnoses'] = data.apply(icd9_descriptions, axis=1)
data = data.drop(columns=['icd9_code'])
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dummify_diagnoses(df,unique_diag,diagnosis_col='Diagnosis_1'):\n header=unique_diag.tolist().append('patient_link')\n dummy_diag=pd.DataFrame(columns=header)\n\n for row in range(df.shape[0]):\n pat_diag=lower_errors(df.iloc[row][diagnosis_col]).split(' , ')\n # print(pat_diag)\n dict_dummy_diag=dict(zip(unique_diag,np.zeros(len(unique_diag))))\n # dict_dummy_diag['patient_link']=df.iloc[row]['patient_link']\n #pd.DataFrame(np.zeros(len(unique_diag)).reshape(-1),columns=unique_diag)\n for diag in pat_diag:\n if diag in unique_diag:\n dict_dummy_diag[diag]=1\n else:\n continue\n tmp_dummy_diag=pd.DataFrame(dict_dummy_diag, index=[row])\n tmp_dummy_diag['patient_link']=df.iloc[row]['patient_link']\n dummy_diag = pd.concat([dummy_diag,tmp_dummy_diag], axis=0)\n\n return dummy_diag",
"def parsediag(dfin,col_icd,col_id,icd_version):\n df = dfin.copy()\n \n if icd_version==9:\n toccs = icd9toccs\n elif icd_version==10:\n toccs = icd10toccs\n else:\n print('ERROR: Please set icd_version to 9 or 10')\n return None\n \n df = toccs(df,col_icd=col_icd)\n \n if icd_version==9:\n tocci = icd9tocci\n elif icd_version==10:\n tocci = icd10tocci\n \n df = tocci(df,col_icd=col_icd)\n \n df = icdtoelixcomo(df,col_icd=col_icd)\n \n df = df.loc[:,list(dfin.columns)+['CCS CATEGORY','CCS CATEGORY DESCRIPTION','CHRONIC','ElixComo','ElixComoScore']]\n \n \n return df",
"def map_diagnosis(df):\r\n\r\n diagnosis_cols = ['diag_1', 'diag_2', 'diag_3']\r\n\r\n for col in diagnosis_cols:\r\n df['tmp'] = np.nan\r\n df.loc[(df[col].str.contains(\"250\")), col] = '250'\r\n df.loc[(df[col].str.startswith('V')) | (df[col].str.startswith('E')), col] = '-999' \r\n\r\n df[col] = df[col].astype(float)\r\n \r\n #convert the correct ranges based on values given in paper\r\n df.loc[(((df[col] >=390) & (df[col]<=460)) | (df[col] == 785)), 'tmp'] = 'Circulatory'\r\n df.loc[(((df[col] >=460) & (df[col]<=519)) | (df[col] == 786)), 'tmp'] = 'Respiratory'\r\n df.loc[(((df[col] >=520) & (df[col]<=579)) | (df[col] == 787)), 'tmp'] = 'Digestive'\r\n df.loc[(((df[col] >=580) & (df[col]<=629)) | (df[col] == 788)), 'tmp'] = 'Genitourinary'\r\n df.loc[((df[col] >=800) & (df[col]<=999)), 'tmp'] = 'Injury'\r\n df.loc[((df[col] >=710) & (df[col]<=739)), 'tmp'] = 'Musculoskeletal'\r\n df.loc[((df[col] >=140) & (df[col]<=239)), 'tmp'] = 'Neoplasms'\r\n df.loc[(df[col] == 250), 'tmp'] = 'Diabetes'\r\n \r\n df['tmp'].fillna(value = \"Other\", inplace=True)\r\n \r\n df[col] = df['tmp']\r\n df.drop(columns=['tmp'], inplace=True)\r\n\r\n return df",
"def convert_physician_diagnoses_code(diagnoses_code):\n if diagnoses_code in ICD_9_DEFAULT_CODES_FOR_DIAGNOSIS:\n diagnoses_icd_9_code = \\\n ICD_9_DEFAULT_CODES_FOR_DIAGNOSIS.get(diagnoses_code)\n if diagnoses_icd_9_code in \\\n (\"Blank\", \"Blank diagnosis\", \"Diagnosis of 'none'\",\n \"Noncodable diagnosis\", \"Noncodable\", \"Illegible diagnosis\"):\n return \"\"\n return diagnoses_icd_9_code\n\n # 1975-76 - Instead of a \"Y\" to prefix codes in the supplementary\n # classification, an ampersand (&) was used\n # 1977 - 78 - Same as above, except that the prefix character is a dash(-)\n # For year 1973 till 1978 `diagnoses_code` is 4 length character\n if len(diagnoses_code) < 5 and (\n diagnoses_code.startswith(\"&\") or diagnoses_code.startswith(\"-\")\n or diagnoses_code.startswith(\"Y\")\n ):\n diagnoses_code = \"V{}\".format(diagnoses_code[1:])\n\n # Character format\n # For inapplicable fourth or fifth digits, a dash is inserted.\n # 0010[-] - V829[-] = 001.0[0]-V82.9[0]\n elif \"-\" in diagnoses_code[3:]:\n diagnoses_code = diagnoses_code.replace(\"-\", \"0\")\n # Reference from documentation:\n # -9 = Blank\n elif \"-00009\" in diagnoses_code:\n return \"\"\n\n # The prefix “1” preceding the 3-digit diagnostic codes represents\n # diagnoses 001-999, e.g. ‘1381’=’381’=otitis media. And “138100”=”381.00”\n if diagnoses_code.startswith(\"1\"):\n diagnoses_code = diagnoses_code.lstrip(\"1\")\n\n # The prefix “2” preceding the 3 - digit diagnostic codes represents \"V\"\n # code diagnoses VO1 - V82, e.g., ‘2010’=’V10’ and “201081” = “V10.81”\n elif diagnoses_code.startswith(\"2\"):\n if diagnoses_code.startswith(\"20\"):\n diagnoses_code = \"V{}\".format(diagnoses_code[2:])\n else:\n diagnoses_code = \"V{}\".format(diagnoses_code[1:])\n\n # There is an implied decimal between the third and fourth digits\n diagnoses_icd_9_code = \"{}.{}\".format(\n diagnoses_code[:3], diagnoses_code[3:]\n )\n\n return diagnoses_icd_9_code",
"def modify_phc_tabe(df):\n df[\"UID\"] = df[\"UID\"].map(str)\n df = df.rename(columns={\"DiagnosisDateIndex\": \"date\"})\n df[\"diag_cd\"] = df[\"Code\"].map(\n lambda x: str(x).strip().replace(\".\", \"\").upper())\n return df",
"def icd9toccs(df,col_icd='icd9'):\n ccs9 = load_ccs9()\n output = df.merge(ccs9,how='left',left_on=col_icd,right_on='ICD-9-CM CODE')\n if col_icd!='ICD-9-CM CODE':\n output.drop('ICD-9-CM CODE',axis=1,inplace=True)\n return output",
"def find_unique_diag(df_diag_column):\n all_diag=df_diag_column.apply(lambda x: lower_errors(x)).unique()\n all_diag[7].split(' , ')\n unique_diag=[]\n for diag in all_diag:\n if len(diag)==0:\n continue\n else:\n unique_diag.append(diag.split(' , '))\n flat_list = [item for sublist in unique_diag for item in sublist]\n unique_diag=pd.Series(flat_list).unique()\n return unique_diag",
"def naics_expansion(facility_NAICS):\n\n # load naics 2 to naics 6 crosswalk\n cw_load = load_sector_length_crosswalk_w_nonnaics()\n cw = cw_load[['NAICS_4', 'NAICS_5', 'NAICS_6']]\n\n # subset the naics 4 and 5 columns\n cw4 = cw_load[['NAICS_4', 'NAICS_5']]\n cw4 = cw4.drop_duplicates(subset=['NAICS_4'], keep=False).reset_index(drop=True)\n naics4 = cw4['NAICS_4'].values.tolist()\n\n # subset the naics 5 and 6 columns\n cw5 = cw_load[['NAICS_5', 'NAICS_6']]\n cw5 = cw5.drop_duplicates(subset=['NAICS_5'], keep=False).reset_index(drop=True)\n naics5 = cw5['NAICS_5'].values.tolist()\n\n # for loop in reverse order longest length naics minus 1 to 2\n # appends missing naics levels to df\n for i in range(4, 6):\n if i == 4:\n sector_list = naics4\n sector_merge = \"NAICS_4\"\n sector_add = \"NAICS_5\"\n elif i == 5:\n sector_list = naics5\n sector_merge = \"NAICS_5\"\n sector_add = \"NAICS_6\"\n\n # subset df to NAICS with length = i \n df_subset = facility_NAICS.loc[facility_NAICS[\"NAICS\"].apply(lambda x: len(x) == i)]\n\n # subset the df to the rows where the tmp sector columns are in naics list\n df_subset = df_subset.loc[(df_subset['NAICS'].isin(sector_list))]\n\n # merge the naics cw\n new_naics = pd.merge(df_subset, cw[[sector_merge, sector_add]],\n how='left', left_on=['NAICS'], right_on=[sector_merge])\n # drop columns and rename new sector columns\n new_naics['NAICS'] = new_naics[sector_add]\n new_naics = new_naics.drop(columns=[sector_merge, sector_add])\n\n # drop records with NAICS that have now been expanded\n facility_NAICS = facility_NAICS[~facility_NAICS['NAICS'].isin(sector_list)]\n\n # append new naics to df\n facility_NAICS = pd.concat([facility_NAICS, new_naics], sort=True)\n\n return facility_NAICS",
"def create_diags(self):\n\n num_diags = self.rows + self.cols - 2\n diag_counts = [0 for i in range(num_diags)]\n for diag_index in range(num_diags):\n first = (0,0)\n second = (0,0)\n if diag_index < self.rows - 1:\n first = (diag_index+1,0)\n elif diag_index == self.rows - 1:\n first = (diag_index,0)\n else:\n first = (self.rows-1,diag_index-self.rows+1)\n if diag_index < self.cols - 1:\n second = (0,diag_index+1)\n elif diag_index == self.cols - 1:\n second = (0,diag_index)\n else:\n second = (diag_index-self.cols+1,self.cols-1)\n #print str(first) + \" \" + str(second)\n diag_counts[diag_index] = dist_points(first,second) \n \n \"\"\"holds the sum of edges in diagonals previous to a given edge\"\"\"\n diag_full = [0 for i in range(num_diags + 1)]\n for i in range(1,num_diags+1):\n diag_full[i] = diag_full[i-1] + diag_counts[i-1]\n\n #print diag_counts\n #print diag_full\n return diag_full",
"def icd9tocci(df,col_icd='icd9'):\n cci9 = load_cci9()\n return df.merge(cci9,how='left',left_on=col_icd,right_on='ICD-9-CM CODE')",
"def icd9_descriptions(row):\n\n if 1 <= row['icd9_code'] <= 139:\n val = 'Parasitic_Disease'\n elif 140 <= row['icd9_code'] <= 239:\n val = 'Neoplasm'\n elif 240 <= row['icd9_code'] <= 279:\n val = 'Endocrine'\n elif 280 <= row['icd9_code'] <= 289:\n val = \"Blood\"\n elif 290 <= row['icd9_code'] <= 319:\n val = \"Mental_Disorder\"\n elif 320 <= row['icd9_code'] <= 389:\n val = \"Nervous_System\"\n elif 390 <= row['icd9_code'] <= 459:\n val = \"Circulatory_System\"\n elif 460 <= row['icd9_code'] <= 519:\n val = \"Respiratory_System\"\n elif 520 <= row['icd9_code'] <= 579:\n val = \"Digestive_System\"\n elif 580 <= row['icd9_code'] <= 629:\n val = \"Genitourinary_System\"\n elif 630 <= row['icd9_code'] <= 679:\n val = \"Pregnancy\"\n elif 680 <= row['icd9_code'] <= 709:\n val = \"Skin\"\n elif 710 <= row['icd9_code'] <= 739:\n val = \"Musculoskeletal\"\n elif 740 <= row['icd9_code'] <= 759:\n val = \"Congenital_Anomalies\"\n elif 760 <= row['icd9_code'] <= 779:\n val = \"Perinatal\"\n elif 780 <= row['icd9_code'] <= 799:\n val = \"Ill-Defined\"\n elif 800 <= row['icd9_code'] <= 999:\n val = \"Injury/Poison\"\n elif row['icd9_code'] < .4:\n val = \"Supplemental_factors\"\n elif .4 <= row['icd9_code'] < .7:\n val = \"External_Cause_Inj_Poison\"\n elif .7 <= row['icd9_code'] < .9:\n val = \"Morphology_of_Neoplasms\"\n else:\n val = row['icd9_code']\n\n return val",
"def modify_bidmc_table(df):\n df[\"UID\"] = df[\"UID\"].map(str)\n df = df.rename(columns={\"DischargeDateIndex\": \"date\"})\n df = df.drop(columns=[\"AdmitDateIndex\"])\n df[\"diag_cd\"] = df[\"diag_cd\"].map(lambda x: x.strip().upper())\n return df",
"def icd10toccs(df,col_icd='icd10'):\n ccs10 = load_ccs10()\n output = df.merge(ccs10,how='left',left_on=col_icd,right_on='ICD-10-CM CODE')\n if col_icd!='ICD-10-CM CODE':\n output.drop('ICD-10-CM CODE',axis=1,inplace=True)\n return output",
"def get_cardiothoracic_surgery_patients(con) -> pd.DataFrame:\n combined_diagnoses = get_reason_for_admission(con)\n return combined_diagnoses[\n (combined_diagnoses['surgical'] == 1)\n & (combined_diagnoses['diagnosis'].str.contains(re_cardiosurg, na=False, flags=re.IGNORECASE))]",
"def add_climatology_cols(df):\n return df",
"def _build_datasets_sis3305(self):\n bc_arr = np.where(self._active_brdch[\"SIS 3305\"])\n\n for board, channel in zip(bc_arr[0], bc_arr[1]):\n brd = board + 1\n ch = channel + 1\n slot = self.get_slot(brd, \"SIS 3305\")\n if 1 <= ch <= 4:\n fpga_str = \"FPGA 1\"\n else:\n fpga_str = \"FPGA 2\"\n ch = ch - 4\n\n for cname in self._active_config:\n # create main dataset\n dset_name = f\"{cname} [Slot {slot}: SIS 3305 {fpga_str} ch {ch}]\"\n shape = (self._sn_size, self._nt)\n data = np.empty(shape=shape, dtype=np.int16)\n self.create_dataset(dset_name, data=data)\n\n # create header dataset\n hdset_name = f\"{dset_name} headers\"\n shape = (self._sn_size,)\n dtype = np.dtype(\n [\n (\"Shot number\", np.int32),\n (\"Scale\", np.float32),\n (\"Offset\", np.float32),\n (\"Min\", np.uint16),\n (\"Max\", np.uint16),\n (\"Clipped\", np.int8),\n ]\n )\n dheader = np.empty(shape=shape, dtype=dtype)\n dheader[\"Shot number\"] = np.arange(\n 1, shape[0] + 1, 1, dtype=dheader[\"Shot number\"].dtype\n )\n dheader[\"Scale\"] = 0.0019550342\n dheader[\"Offset\"] = -1.0\n dheader[\"Min\"] = data.min(axis=1)\n dheader[\"Max\"] = data.max(axis=1)\n dheader[\"Clipped\"] = 0\n self.create_dataset(hdset_name, data=dheader)",
"def load_cci10():\n \n cci10 = pd.read_csv(pkg_resources.resource_filename(__name__,'cci_icd10cm_2019_1.csv'))\n \n cci10.columns = [i.strip('\\'') for i in cci10.columns]\n \n for col in cci10.columns:\n cci10.loc[:,col] = cci10[col].str.strip('\\'')\n cci10 = cci10.replace(r'^\\s*$', np.nan, regex=True)\n cci10.columns = [i.replace('CHRONIC INDICATOR','CHRONIC') for i in cci10.columns]\n \n dict_bodysystem = [\n ('1','Infectious and parasitic disease'),\n ('2','Neoplasms'),\n ('3','Endocrine, nutritional, and metabolic diseases and immunity disorders'),\n ('4','Diseases of blood and blood-forming organs'),\n ('5','Mental disorders'),\n ('6','Diseases of the nervous system and sense organs'),\n ('7','Diseases of the circulatory system'),\n ('8','Diseases of the respiratory system'),\n ('9','Diseases of the digestive system'),\n ('10','Diseases of the genitourinary system'),\n ('11','Complications of pregnancy, childbirth, and the puerperium'),\n ('12','Diseases of the skin and subcutaneous tissue'),\n ('13','Diseases of the musculoskeletal system'),\n ('14','Congenital anomalies'),\n ('15','Certain conditions originating in the perinatal period'),\n ('16','Symptoms, signs, and ill-defined conditions'),\n ('17','Injury and poisoning'),\n ('18','Factors influencing health status and contact with health services'),\n ]\n \n cci10 = cci10.merge(pd.DataFrame(dict_bodysystem,columns=['BODY SYSTEM','BODY SYSTEM DESCRIPTION']),how='left',on='BODY SYSTEM')\n \n cci10.loc[:,'ICD-10-CM CODE'] = cci10['ICD-10-CM CODE'].str.replace(' ','')\n \n return cci10",
"def create_crime_cat(df):\n df['CrimeType'] = ''\n for ct in crime_type_cols:\n c_int = df[ct].astype('int32')\n sub = c_int[c_int == 1]\n df.CrimeType.iloc[sub.index] = ct",
"def categories(df):\n print 'going to code categories'\n print_time()\n\n ethnicities = ['Mexican', 'Italian', 'American']\n df['num_categories'] = 0\n for ethnicity in ethnicities:\n df.loc[df['categories'].str.contains(ethnicity, flags=re.I, na=False, case=False), 'category'] = ethnicity\n df.loc[df['categories'].str.contains(ethnicity, flags=re.I, na=False, case=False), 'num_categories'] += 1\n df.loc[df['category'].isnull(), 'category'] = 'Other'\n df.loc[df['num_categories'] > 1, 'category'] = 'Multiple'\n df = df[df['category'].notnull()]\n return df",
"def diag(diagnoal):\n raise NotImplementedError",
"def load_cci9():\n cci9 = pd.read_csv(pkg_resources.resource_filename(__name__,'cci2015.csv'),skiprows=1)\n cci9.columns = [i.strip('\\'') for i in cci9.columns]\n \n for col in cci9.columns:\n cci9.loc[:,col] = cci9[col].str.strip('\\'')\n cci9 = cci9.replace(r'^\\s*$', np.nan, regex=True)\n cci9.columns=[i.replace('CATEGORY DESCRIPTION','CHRONIC') for i in cci9.columns]\n\n dict_bodysystem=[\n ('1' ,'Infectious and parasitic disease'),\n ('2' ,'Neoplasms'),\n ('3' ,'Endocrine, nutritional, and metabolic diseases and immunity disorders'),\n ('4' ,'Diseases of blood and blood-forming organs'),\n ('5' ,'Mental disorders'),\n ('6' ,'Diseases of the nervous system and sense organs'),\n ('7' ,'Diseases of the circulatory system'),\n ('8' ,'Diseases of the respiratory system'),\n ('9' ,'Diseases of the digestive system'),\n ('10','Diseases of the genitourinary system'),\n ('11','Complications of pregnancy, childbirth, and the puerperium'),\n ('12','Diseases of the skin and subcutaneous tissue'),\n ('13','Diseases of the musculoskeletal system'),\n ('14','Congenital anomalies'),\n ('15','Certain conditions originating in the perinatal period'),\n ('16','Symptoms, signs, and ill-defined conditions'),\n ('17','Injury and poisoning'),\n ('18','Factors influencing health status and contact with health service'),\n ]\n \n cci9 = cci9.merge(pd.DataFrame(dict_bodysystem,columns=['BODY SYSTEM','BODY SYSTEM DESCRIPTION']),how='left',on='BODY SYSTEM')\n \n cci9.loc[:,'ICD-9-CM CODE'] = cci9['ICD-9-CM CODE'].str.replace(' ','')\n \n return cci9",
"def _tranform_idjj(df, age1720=False, exit=False):\n global _SIMPLECOUNT_COLUMNS\n\n try:\n df.columns = ['age', 'year', 'fk_simplecount_county'] + df.columns.tolist()[3:]\n\n if not age1720:\n indicator_list = [701, 702, 703, 710, 711, 720, 721, 722, 730, 731, 732, 733, 734, 740, 741]\n c_age = df['age'].isin(range(13, 16+1))\n else:\n indicator_list = [704, 705, 706, 712, 713, 723, 724, 725, 735, 736, 737, 738, 739, 742, 743]\n c_age = df['age'].isin(range(17, 20+1))\n\n if exit:\n indicator_list = [i + 50 for i in indicator_list]\n \n c_new = df['admtypo'].isin(['CE', 'CER', 'DR', 'IC', 'MVN', 'PVN', 'RAM'])\n c_ce = df['admtypo'] == 'CE'\n c_tv = df['admtypo'].isin(['TMV', 'TPV'])\n c_male = df['sex'] == 'M'\n c_female = ~c_male\n c_whi = df['race'] == 'WHI'\n c_blk = df['race'] == 'BLK'\n c_hsp = df['race'] == 'HSP'\n c_pers = df['offtype9'] == 1\n c_prop = df['offtype9'] == 2\n c_drug = df['offtype9'] == 3\n c_weap = df['offtype9'] == 4\n c_sex = df['offtype9'] == 5\n c_felo = df['hclass'].isin(['M','X',1,2,3,4])\n c_misd = ~c_felo\n\n c_first3 = [c_new, c_ce, c_tv]\n c_others = [c_male, c_female, c_whi, c_blk, c_hsp, c_pers, c_prop, c_drug, c_weap, c_sex, c_felo, c_misd]\n \n def helper(c, indicator_id, first3):\n df['fk_simplecount_indicator'] = indicator_id\n g = ['fk_simplecount_indicator', 'year', 'fk_simplecount_county']\n if first3:\n return df[c_age & c].groupby(g).size().reset_index(name='value')\n else:\n return df[c_age & c_new & c].groupby(g).size().reset_index(name='value')\n\n out = pd.DataFrame()\n for i in range(3):\n out = out.append(helper(c_first3[i], indicator_list[i], first3=True))\n \n for i in range(len(c_others)):\n out = out.append(helper(c_others[i], indicator_list[i+3], first3=False))\n \n out = out[out['fk_simplecount_county'].isin(range(1,102+1))]\n return out[_SIMPLECOUNT_COLUMNS]\n except:\n raise",
"def icdconvert(df,col_icd='icd',icd_version=9):\n if icd_version==9:\n source='icd9'\n target='icd10'\n df_gem=load_icd9to10()\n elif icd_version==10:\n source='icd10'\n target='icd9'\n df_gem=load_icd10to9()\n else:\n print('invalid icd version {}, please set curicd to 9 or 10'.format(icd_version))\n return None\n \n print('Comment: because of the discrepancy between icd9 and icd10, diagnosis codes may be mapped to many codes or no codes')\n \n output=df.merge(df_gem,how='left',left_on=col_icd,right_on=source)\n print('{:.2f}% mapped'.format(output[target].notnull().mean()*100))\n if 'flag' in output.columns:\n output.drop('flag',axis=1,inplace=True)\n if col_icd!=source in output.columns:\n output.drop(source,axis=1,inplace=True)\n \n return output",
"def build_dataframe() -> pd.DataFrame:\n df = pd.DataFrame(\n np.random.randint(0, 1000, size=(1000, 6)), columns=list(\"ABCDEF\")\n )\n\n return df",
"def ndc2rxcui(df_med,col_ndc='ndc'):\n print('Converting NDC to RXCUI')\n output=[]\n ndclist=df_med[col_ndc].unique()\n lenndc = len(ndclist)\n for i in range(0,len(ndclist)):\n print('{}/{}, {:.2f}% complete'.format((i+1),lenndc,(i+1)/lenndc*100), end='\\r', flush=True)\n curndc=ndclist[i]\n r=requests.get('https://rxnav.nlm.nih.gov/REST/ndcstatus.json?ndc='+str(curndc)).json()['ndcStatus']\n if 'ndcHistory' in r:\n for entry in r['ndcHistory']:\n output.append({\n 'ndc':curndc,\n 'rxcui':entry['activeRxcui'],\n 'start':pd.to_datetime(entry['startDate']+'01'),\n 'end':pd.to_datetime(entry['endDate']+'01'),\n })\n else:\n print('NDC code [{}] was not able to be mapped to rxcui'.format(curndc))\n time.sleep(1/20)\n output=pd.DataFrame(output).replace({r'^\\s*$':None}, regex=True).dropna()\n return output",
"def build_dispersion_diags(self):\n N = self.N\n j = self._j # Index of the mid-point\n diags = np.zeros((2*self._j+1, self.N))\n\n dx3 = np.power(self.dx, 3.)\n\n ## This tells us how the diagonal matrix construction works\n #diags[j-2,:-4] = np.arange(1,N+1)[4:]\n #diags[j-1,:-2] = np.arange(1,N+1)[2:]\n #diags[j+1,:] = np.arange(1,N+1)\n #diags[j+2,:] = np.arange(1,N+1)\n\n #diags[j,0:2] = 11\n #diags[j+1,1:3] = 12\n #diags[j+2,2:4] = 13\n #diags[j+3,3:5]= 14\n\n\n # Original method had assymmetric diagonals\n #cff = -self.beta/(2*dx3)\n #diags[j-2,:] += -1*cff\n #diags[j-1,:] += 2*cff\n #diags[j+1,:] += -2*cff\n #diags[j+2,:] += 1*cff\n\n # Need to stagger these diagonals so lower and upper bands are symmetric\n cff = -1/(2*dx3)\n beta = self.beta\n diags[j-2,:-4] += -1*cff*beta[4:]\n diags[j-1,:-2] += 2*cff*beta[2:]\n diags[j+1,:] += -2*cff*beta\n diags[j+2,:] += 1*cff*beta\n\n ## Left boundary - use forward differencing\n diags[j-1,0] = 0\n diags[j,0:2] = -2*cff*beta[0]\n diags[j+1,1:3] = 6*cff*beta[0]\n diags[j+2,2:4] = -6*cff*beta[0]\n diags[j+3,3:5] = 2*cff*beta[0]\n\n # Zero first two points\n #diags[j-1,0] = 0\n #diags[j,0:2] = 0 \n #diags[j+1,1:3] = 0 \n #diags[j+2,2:4] = 0 \n #if self._j>2:\n # diags[j+3,3:5] = 0 \n\n return diags",
"def disease_descriptors(civic_did8):\n return [civic_did8]",
"def _build_datasets_sis3302(self):\n bc_arr = np.where(self._active_brdch[\"SIS 3302\"])\n\n for board, channel in zip(bc_arr[0], bc_arr[1]):\n brd = board + 1\n ch = channel + 1\n slot = self.get_slot(brd, \"SIS 3302\")\n\n for cname in self._active_config:\n # create main dataset\n dset_name = f\"{cname} [Slot {slot}: SIS 3302 ch {ch}]\"\n shape = (self._sn_size, self._nt)\n data = np.empty(shape=shape, dtype=np.int16)\n self.create_dataset(dset_name, data=data)\n\n # create header dataset\n hdset_name = f\"{dset_name} headers\"\n shape = (self._sn_size,)\n dtype = np.dtype(\n [\n (\"Shot number\", np.int32),\n (\"Scale\", np.float32),\n (\"Offset\", np.float32),\n (\"Min\", np.uint16),\n (\"Max\", np.uint16),\n (\"Clipped\", np.int8),\n ]\n )\n dheader = np.empty(shape=shape, dtype=dtype)\n dheader[\"Shot number\"] = np.arange(\n 1, shape[0] + 1, 1, dtype=dheader[\"Shot number\"].dtype\n )\n dheader[\"Scale\"] = 7.7241166e-5\n dheader[\"Offset\"] = -2.531\n dheader[\"Min\"] = data.min(axis=1)\n dheader[\"Max\"] = data.max(axis=1)\n dheader[\"Clipped\"] = 0\n self.create_dataset(hdset_name, data=dheader)",
"def main():\n diagonals_in_hd()",
"def clinical_concept_mapper(self) -> pd.DataFrame:\n\n level_maps = []\n\n if self.ancestor_codes is not None:\n levels = {'concept': {'codes': self.concept_codes, 'strings': self.concept_strings},\n 'ancestor': {'codes': self.ancestor_codes,\n 'strings': self.ancestor_strings}}\n else:\n levels = {'concept': {'codes': self.concept_codes, 'strings': self.concept_strings}}\n\n for level in levels.keys():\n print('\\n*** Annotating Level: {}'.format(level))\n primary_key, data = self.primary_key, self.clinical_data.copy()\n code_level, code_strings = levels[level]['codes'][0], levels[level]['strings'] # type: ignore\n if level == 'ancestor' or any(x for x in data[code_level] if '|' in x):\n data = column_splitter(data, primary_key, [code_level], '|')[[primary_key] + [code_level]]\n data[code_level] = normalizes_source_codes(data[code_level].to_frame(), self.source_code_map)\n else:\n data[code_level] = normalizes_source_codes(data[code_level].to_frame(), self.source_code_map)\n\n # STEP 1: UMLS CUI + SEMANTIC TYPE ANNOTATION\n print('Performing UMLS CUI + Semantic Type Annotation')\n if self.umls_cui_data is not None and self.umls_tui_data is not None:\n umls_map = self.umls_cui_annotator(data.copy(), primary_key, code_level)\n sub = [code_level, 'UMLS_CODE', 'UMLS_CUI']\n data_stacked = data_frame_subsetter(umls_map[[primary_key] + sub], primary_key, sub)\n else:\n print('Did not provide MRCONSO and MRSTY Files -- Skipping UMLS Annotation Step')\n umls_map, clinical_subset = None, data[[primary_key, code_level]]\n data_stacked = data_frame_subsetter(clinical_subset, primary_key, [code_level])\n\n # STEP 2 - DBXREF ANNOTATION\n print('Performing DbXRef Annotation')\n stacked_dbxref = self.dbxref_mapper(data_stacked.copy(), primary_key, level)\n # files = 'resources/mappings/' + level + '_dbXRef_Mappings.csv'\n # stacked_dbxref.to_csv(files, sep=',', index=False, header=True)\n\n # STEP 3 - EXACT STRING MAPPING\n print('Performing Exact String Mapping')\n clinical_strings = self.clinical_data.copy()[[primary_key] + code_strings] # type: ignore\n split_strings = column_splitter(clinical_strings, primary_key, code_strings, '|') # type: ignore\n split_strings = split_strings[[primary_key] + code_strings] # type: ignore\n split_strings_stacked = data_frame_subsetter(split_strings, primary_key, code_strings) # type: ignore\n stacked_strings = self.exact_string_mapper(split_strings_stacked, primary_key, level)\n # files_str = 'resources/mappings/' + level + '_String_Mappings.csv'\n # stacked_strings.to_csv(files_str, sep=',', index=False, header=True)\n\n # STEP 4 - COMBINE RESULTS\n print('Aggregating Mapping Results')\n # dbXRef annotations\n if len(stacked_dbxref) != 0:\n ont_type_column = [col for col in stacked_dbxref.columns if 'TYPE' in col][0]\n dbxrefs = data_frame_grouper(stacked_dbxref.copy(), primary_key, ont_type_column,\n aggregates_column_values)\n else:\n dbxrefs = None\n\n # exact string annotations\n if len(stacked_strings) != 0:\n ont_type_column = [col for col in stacked_strings.columns if 'TYPE' in col][0]\n strings = data_frame_grouper(stacked_strings.copy(), primary_key, ont_type_column,\n aggregates_column_values)\n else:\n strings = None\n\n # umls annotations\n if umls_map is not None:\n umls, agg_cols = umls_map[[primary_key, 'UMLS_CUI', 'UMLS_SEM_TYPE']], ['UMLS_CUI', 'UMLS_SEM_TYPE']\n umls = aggregates_column_values(umls.copy(), primary_key, agg_cols, ' | ')\n umls.columns = [primary_key] + [level.upper() + '_' + x for x in umls.columns if x != primary_key]\n else:\n umls = None\n\n # combine annotations\n dfs = [x for x in [dbxrefs, strings, umls] if x is not None]\n if len(dfs) > 1:\n level_maps.append(reduce(lambda x, y: pd.merge(x, y, how='outer', on=primary_key), dfs))\n else:\n level_maps.append(dfs[0])\n\n # STEP 5 - COMBINE CONCEPT AND ANCESTOR DATA\n print('Combining Concept and Ancestor Maps')\n full_map = reduce(lambda x, y: pd.merge(x, y, how='outer', on=self.primary_key), level_maps)\n complete_map = pd.merge(self.clinical_data, full_map, how='left', on=self.primary_key)\n complete_map.columns = [x.upper() for x in complete_map.columns]\n complete_map.fillna('', inplace=True)\n\n return complete_map"
]
| [
"0.6540436",
"0.64982927",
"0.63286495",
"0.5763419",
"0.5753276",
"0.5722193",
"0.5579839",
"0.54484886",
"0.5376999",
"0.53520346",
"0.53453666",
"0.52551186",
"0.5247551",
"0.52435637",
"0.5228034",
"0.52043056",
"0.52031446",
"0.52030957",
"0.52011234",
"0.5195381",
"0.51915485",
"0.51558334",
"0.5139482",
"0.50938815",
"0.50865936",
"0.5027843",
"0.49897116",
"0.49880576",
"0.49801397",
"0.49682823"
]
| 0.67526495 | 0 |
This will take the parsed file and write out the python classes for it and any of its included files. | def createClassFile( p ):
create_modules( p["package"] )
name = p["protocol"]["name"]
name.lower()
path = os.path.join( *p["package"].split( "." ) )
with open( "./%s/%s.py" % ( path, name ), "w" ) as f:
for i in p["imports"]:
createClassFile( i )
c = Klass( package=p["package"], includes=p["imports"], **p["protocol"] )
f.write( c.generate() ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def analyze(self):\n for f in self.files:\n tokenizer = Tokenizer(f)\n self.write_tokens(tokenizer)\n compilation_engine = CompilationEngine(tokenizer, f)\n compilation_engine.compile()\n self.write_syntax_tree(compilation_engine)\n compilation_engine.VMwriter.create_file(f[:-5])",
"def generate_wrapper(self):\n\n # If there is an input file, parse it\n if self.package_info_path is not None:\n info_parser = PackageInfoParser(self.package_info_path,\n self.source_root)\n info_parser.parse()\n self.package_info = info_parser.package_info\n else:\n pass\n\n # Generate a header collection\n self.collect_source_hpp_files()\n\n # Attempt to assign source paths to each class, assuming the containing \n # file name is the class name\n for eachModule in self.package_info.module_info:\n for eachClass in eachModule.class_info:\n for eachPath in self.package_info.source_hpp_files:\n base = ntpath.basename(eachPath)\n if eachClass.name == base.split('.')[0]:\n eachClass.source_file_full_path = eachPath\n if eachClass.source_file is None:\n eachClass.source_file = base\n\n # Attempt to automatically generate template args for each class\n for eachModule in self.package_info.module_info:\n info_generator = CppInfoHelper(eachModule)\n for eachClass in eachModule.class_info:\n info_generator.expand_templates(eachClass, \"class\")\n\n # Generate the header collection\n header_collection_path = self.generate_header_collection()\n\n # Parse the header collection\n self.parse_header_collection(header_collection_path)\n\n # Update the Class and Free Function Info from the parsed code\n self.update_class_info()\n self.update_free_function_info()\n self.update_enum_info()\n\n # Write the modules\n for eachModule in self.package_info.module_info:\n module_writer = CppModuleWrapperWriter(self.global_ns,\n self.source_ns,\n eachModule,\n self.get_wrapper_template(),\n self.wrapper_root)\n module_writer.write()",
"def visit(self, file_object):\n\n module = ast.parse(file_object.read())\n # TODO remove prefixes such as C:\\Users\\...\n module_name = file_object.name.rstrip(\".py\").replace(\"/\", \".\")\n\n exec(\"import %s\"%module_name)\n self._module = eval(module_name)\n\n self.found = {\"vars\":[], \"classes\":[], \"funcs\":[]}\n self.found_classes = set()\n\n self._explorer(self, [module_name]).visit(module)",
"def process_dart(self):\n self.dartfiles = set()\n self.jsfiles = set()\n self.htmlfiles = set()\n self.cssfiles = set()\n self.otherfiles = set()\n for src in self.source:\n if isinstance(src,str):\n node = self.path.find_node(src)\n else:\n node = src\n if node.suffix() == '.dart':\n self.dartfiles.add(node)\n elif node.suffix() == '.js':\n self.jsfiles.add(node)\n elif node.suffix() == '.html':\n self.htmlfiles.add(node)\n elif node.suffix() == '.css':\n self.cssfiles.add(node)\n else:\n self.otherfiles.add(node)\n self.source = []\n self.outdir = self.path.make_node(self.target + '/').get_bld()\n self.outdir.mkdir()",
"def crunch(self):\n while True:\n lst = self.want_line(r'\\s*\\.file\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.globl\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.ident\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.section\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.type\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.size\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.(bss)\\s+')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.(data)\\s+')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.(text)\\s+')\n if lst:\n self.erase(lst[0])\n continue\n break\n if osarch_is_amd64():\n self.crunch_amd64(lst)\n elif osarch_is_ia32():\n self.crunch_ia32(lst)\n self.__tag = None",
"def compile_class(self):\n\t\t\n\t\txml = '<class>\\n' + self.tokenizer.keyword() + self.tokenizer.identifier() + self.tokenizer.symbol()\n\n\t\tself.outfile.write(xml)",
"def processFile (filename, ostream):\n\n istream = open(filename)\n header = CHeader(istream)\n istream.close()\n\n processClassDocs(ostream, header.classDocs)\n processClasses(ostream, header.classes)\n processFunctions(ostream, header.functions)\n\n ostream.flush()",
"def writeImports2File(self, file, indent = \" \"):\r\n # import each entity and its associated graphical file\r\n for obj in self.listNodes.keys():\r\n file.write(indent+\"from \"+obj+\" import \"+obj+\"\\n\")\r\n if not obj[0:4] == \"ASG_\":\r\n file.write(indent+\"from graph_\"+obj+\" import graph_\"+obj+\"\\n\")",
"def _generate_objects_file(self):\n xmls = glob(f'{ROOT}/Annotations/**/*.xml', recursive=True)",
"def genCode(self, fileName, allowedTypes, genGraph = 1, isRootNode = 0, \r\n metaModelName = None, export = 0, newTypes = None, \r\n nodesToGenList = [], openModelStringList=[], attrGenFix=False):\r\n file = open(fileName, \"w+t\" )\r\n\r\n dir, fil = os.path.split(fileName)\r\n funcName = string.split (fil, \".\")\t\t\t\t\t# compose class name\r\n\r\n if export == 0:\r\n file.write('\"\"\"\\n')\r\n file.write(\"__\"+ fil +\"_____________________________________________________\\n\")\r\n file.write(\"\\n\") \r\n file.write(\"Automatically generated AToM3 Model File (Do not modify directly)\\n\")\r\n file.write(\"Author: \"+USER_NAME+\"\\n\")\r\n file.write(\"Modified: \"+time.asctime()+\"\\n\") \r\n file.write(\"__\"+ len(fil)*\"_\" +\"_____________________________________________________\\n\")\r\n file.write('\"\"\"\\n')\r\n #file.write('from graph_ASG_ERmetaMetaModel import *\\n')\t\t# just for the case!\r\n file.write('from stickylink import *\\n')\t\t\t\t# necessary if we describe some graphLinks...\r\n file.write('from widthXfillXdecoration import *\\n')\t\t\t# necessary if we describe some graphLinks...\r\n\r\n # import the subclass ...\r\n if( self.getClass() not in self.nodeTypes ):\r\n file.write('from '+self.getClass()+' import *\\n')\r\n \r\n # import all the node types...\r\n for nodetype in self.nodeTypes:\r\n if( self.listNodes[nodetype] != [] ): \r\n file.write('from '+nodetype+' import *\\n') \r\n \r\n # Import all the graphical appearences of the node types... that\r\n # are actually used! \r\n # Added by Denis Dube, last modified on Sept. 9, 2004\r\n if( genGraph ): \r\n # STEP 1: Find all graphObjects used in the model\r\n graph_objectDict = dict()\r\n for nodetype in self.listNodes.keys():\r\n for node in self.listNodes[nodetype]:\r\n if( node.graphClass_ ):\r\n graph_objectDict[ node.graphObject_.getGraphClassName() ]=1\r\n # STEP 2: Create the import statements for each graphObject\r\n for graphObject in graph_objectDict.keys():\r\n file.write('from '+graphObject+' import *\\n')\r\n # NOTE: I think the next two statements are caution overkill...\r\n #file.write('try: from '+graphObject+' import *\\n')\r\n #file.write('except: print \"WARNING: unable to load the graphical appearence file: '+graphObject+'.py\" \\n')\r\n \r\n # import the basic types...\r\n for typ in allowedTypes.keys():\r\n typeInstance, params = allowedTypes[typ]\r\n typeName = typeInstance.__name__\r\n file.write('from '+typeName+' import *\\n')\r\n \r\n # Generate the ASG constructor\r\n if( attrGenFix ):\r\n self.__genASGconstructor( file, funcName ) \r\n else:\r\n # Old way\r\n file.write('\\ndef '+funcName[0]+'(self, rootNode):\\n')\r\n \r\n # Generate code for the ASGroot attributes\r\n if( isRootNode ): \r\n # Should attrGenFix be always true? More testing required\r\n #todo: attrGenFix == True always?\r\n if( attrGenFix ): self.__genAttributesROOT( file )\r\n else: self.genAttributesCode(file, genGraph, \"rootNode\")\r\n\r\n self.writeGraph2File(file, genGraph, isRootNode, None, \" \", 1, funcName[0], nodesToGenList=nodesToGenList)\r\n\r\n # generate code for the sub-models\r\n counter = 0\r\n if( not nodesToGenList ):\r\n for nodetype in self.nodeTypes:\r\n for node in self.listNodes[nodetype]: \r\n newFile = funcName[0]+str(counter)\r\n res = node.genCode(os.path.join(dir, newFile+'.py'), allowedTypes, genGraph, 0)\r\n counter = counter + 1\r\n else: \r\n for node in nodesToGenList:\r\n newFile = funcName[0]+str(counter)\r\n res = node.genCode(os.path.join(dir, newFile+'.py'), allowedTypes, genGraph, 0)\r\n counter = counter + 1\r\n \r\n\r\n if isRootNode:\r\n hierarchical = self.isHierarchical()\r\n if export == 0:\r\n if hierarchical:\r\n file.write('def main'+funcName[0]+'(self, ASGroot):\\n')\r\n # file.write(' self.ASGroot = '+self.getClass()+'(self)\\n')\r\n file.write(' self.'+funcName[0]+'(self, ASGroot)\\n\\n')\r\n file.write(' self.'+funcName[0]+'_connections(self, ASGroot)\\n\\n')\r\n file.write('newfunction = main'+funcName[0]+'\\n\\n')\r\n else:\r\n file.write('newfunction = '+funcName[0]+'\\n\\n')\r\n if newTypes and len(newTypes)>0: # generate a list of newly added types\r\n file.write('loadedTypes = [')\r\n counter = 0\r\n for nt in newTypes:\r\n if counter > 0: file.write(',')\r\n file.write(str(nt))\r\n counter = counter + 1\r\n file.write(']\\n')\r\n \r\n self.genLoadedMMName( file )\r\n if( attrGenFix ): file.write( '\\natom3version = \\'0.3\\'\\n' )\r\n file.close()\r\n return funcName[0] \t\t\t\t# this indicates that we've done something\r",
"def _load_classes(self):\n classdocs = self._docset.get_classes()\n for classdoc in classdocs:\n files = [self._docmap[filedoc] for filedoc in classdoc.get_files()]\n classobj = Class(classdoc, files)\n self._docmap[classdoc] = classobj\n self._classes.add(classobj)",
"def list_classes(filename, output_file):\n file_pointer = open(filename)\n file_split = filename.replace(\"/\",\".\")\n file_split = file_split.split(\".\")\n\n class_re = re.compile(\"^class ([A-Za-z]+[^\\(:]*)\")\n method_re = re.compile(\"^ def ([a-z][a-z_]*)\")\n # remove_self_re = re.compile(r\"self(, )?\")\n first = True\n\n for line in file_pointer:\n\n class_names = class_re.findall(line)\n if len(class_names) > 0:\n if first:\n first = False\n output_file.write(\"Classes\\n\")\n output_file.write(\"^^^^^^^\\n\")\n output_file.write(\"- \")\n module = file_split[4]\n class_name = class_names[0]\n output_file.write(f\":class:`~arcade.{module}.{class_name}`\")\n output_file.write(\"\\n\")\n\n method_names = method_re.findall(line)\n for method_name in method_names:\n # method_name = name[2]\n output_file.write(f\" - :func:`~arcade.{module}.{class_name}.{method_name}`\\n\")\n # name = remove_self_re.sub(\"\", name)\n\n if not first:\n output_file.write(\"\\n\")",
"def write_models(in_directory: Union[str, Path],\n out_directory: Union[str, Path],\n *,\n max_lines: int = None\n ) -> None:\n # Ensure directories are of type 'Path'\n in_directory = Path(in_directory)\n out_directory = Path(out_directory)\n\n module_class = []\n\n # Write models file\n for csvfile in in_directory.glob('*.*sv'):\n info = _get_info_from_filename(csvfile.name)\n model_name = info['name']\n dialect = get_dialect_from_suffix(info['format'])\n print(f\"Reading from {csvfile}\")\n module_name = snake_case(model_name)\n class_name = snake_to_capwords(module_name)\n module_class.append((module_name, class_name))\n write_model(out_directory / f'{module_name}.py',\n class_name,\n read_xsv_file(csvfile, encoding='utf-8', dialect=dialect, load_at_most=max_lines))\n print(f\"Writing to {(out_directory / f'{snake_case(model_name)}.py')}\\n\")\n\n # Check for required files\n has_base = False\n for pyfile in out_directory.glob('*.py'):\n if pyfile.name == 'base.py':\n has_base = True\n\n # Write required files\n if not has_base:\n print(f'base.py not detected in {out_directory}, writing one')\n write_base((out_directory / 'base.py'))\n\n print(f'__init__.py generated.')\n lines = ['# import modules to run it through declarative base'] + \\\n [f'from .{module_name} import {class_name}' for module_name, class_name in module_class] + \\\n ['']\n lines += [f\"models = [{', '.join(class_name for _, class_name in module_class)}]\",\n '']\n open_and_write_file((out_directory / '__init__.py'), '\\n'.join(lines))",
"def process(txt, out):\n r = re.compile(\"<py (.*) >\")\n txt = txt.split('\\n')\n for line in txt:\n res = r.match(line)\n if res: out.write(eval(res.group(1)))\n else: out.write(line + '\\n')",
"def process_class_list(self, module, classes):",
"def _write_tables(cls):\n path = inspect.getfile(cls)\n parent = os.path.split(path)[0]\n # Need to change directories to get the file written at the right\n # location.\n cwd = os.getcwd()\n os.chdir(parent)\n tabname = cls._table_name('lex', relative=True)\n lex.lex(object=cls, lextab=tabname, optimize=True, debug=False)\n tabname = cls._table_name('tab', relative=True)\n yacc.yacc(module=cls, tabmodule=tabname, optimize=True, debug=False)\n os.chdir(cwd)",
"def generate(self):\n self._open_file()\n # copied from GenerateCSPEC.py\n self._write_header_and_defaults()\n self._write_source()\n self._write_sample()\n\n self._write_all_components()\n self._write_mantle_module()\n self._write_segment()\n self._write_all_ids()\n self._write_footer()\n self._close_file()",
"def parseClasses(file_name):\n\tlines = file(file_name).read().strip().split('\\n')\n\tlines = [x.strip() for x in lines if len(x.strip()) > 0]\n\tclasses = []\n\tfor l in lines:\n\t\tclasses = classes + [clean(x) for x in l.split(',')]\n\treturn classes",
"def post_move_class_propagation(token_stream, parse_tree, args):\n has_import = False\n has_exact_import = False\n\n file_to_check = open(file=args.file, mode='r')\n for line in file_to_check.readlines():\n text_line = line.replace('\\n', '').replace('\\r', '').strip()\n if (text_line.startswith('import') and text_line.endswith(source_package + '.' + class_identifier + ';')) \\\n or (text_line.startswith('import') and text_line.endswith(source_package + '.*;')):\n has_import = True\n break\n if (text_line.startswith('import') and text_line.endswith(target_package + '.' + class_identifier + ';')) \\\n or (text_line.startswith('import') and text_line.endswith(target_package + '.*;')):\n has_exact_import = True\n break\n\n if not has_exact_import:\n print(f\"Start checking file \\\"{file_to_check.name}\\\" *** {file_counter}/100\")\n\n replace_dependent_object_listener = ReplaceDependentObjectsListener(\n common_token_stream=token_stream, source_package=source_package, target_package=target_package,\n class_identifier=class_identifier, filename=args.file, has_import=has_import\n )\n walker = ParseTreeWalker()\n walker.walk(t=parse_tree, listener=replace_dependent_object_listener)\n\n with open(args.file, mode='w', newline='') as f:\n f.write(replace_dependent_object_listener.token_stream_rewriter.getDefaultText().replace(\"\\r\", \"\"))\n\n print(f\"Finish checking file \\\"{file_to_check.name}\\\" *** {file_counter}/100\")",
"def update_class_info(self):\n\n for eachModule in self.package_info.module_info:\n if eachModule.use_all_classes:\n classes = self.source_ns.classes(allow_empty=True)\n for eachClass in classes:\n if eachModule.is_decl_in_source_path(eachClass):\n class_info = CppClassInfo(eachClass.name)\n class_info.module_info = eachModule\n class_info.decl = eachClass\n eachModule.class_info.append(class_info)\n else:\n for eachClass in eachModule.class_info:\n classes = self.source_ns.classes(eachClass.name,\n allow_empty=True)\n if len(classes) == 1:\n eachClass.decl = classes[0]",
"def generate(self):\n try:\n self._parse_groups()\n self._parse_types()\n self._parse_enums()\n self._parse_features()\n self._parse_extensions()\n self._add_extra_enums()\n self._parse_and_build_commands()\n self._build_all_enums()\n self._build_enum_groups()\n self._generate_files()\n except Exception as exception:\n print('Generate failed: {}'.format(str(exception)))\n raise",
"def Build(self, out_file):\n raise NotImplementedError",
"def updateIncludeFiles(self):\n for filename, filetype in self._get_include_files():\n lines = open(filename).readlines()\n found_version_line = False\n\n if self.Verbose:\n print 'Reading %s' % filename\n \n if filetype is 'PyRex':\n lines, write_out = self._update_pyrex_file(lines, filename)\n elif filetype is 'Header':\n lines, write_out = self._update_header_file(lines, filename)\n else:\n raise TypeError, \"Unknown include file type %s\" % filetype\n\n if write_out:\n self._file_writer(lines, filename)",
"def GetClassesFromFile(self,file_path):\n classes = []\n try:\n fl = open(file_path,\"r\")\n for line in fl.readlines():\n if \"class\" in line and \":\" in line:\n line = line.strip(\"class \")\n line2 = \"\"\n for i in line:\n if i!=\":\": line2+=i\n\n classes.append(line2)\n if classes:\n return classes\n else:\n return False\n fl.close()\n except:\n return False",
"def process_input_file(filename):\n\n # Parse the input file\n try:\n ast = parser.parse(open(filename, 'r').read())\n except pyparsing.ParseBaseException as e:\n print \"Parse error in %s: %s\" % (os.path.basename(filename), str(e))\n sys.exit(1)\n\n ofinput = of_g.OFInput()\n\n # Now for each structure, generate lists for each member\n for s in ast:\n if s[0] == 'struct':\n name = s[1].replace(\"ofp_\", \"of_\", 1)\n members = [dict(m_type=x[0], name=x[1]) for x in s[2]]\n ofinput.classes[name] = members\n ofinput.ordered_classes.append(name)\n if name in type_maps.inheritance_map:\n # Clone class into header class and add to list\n ofinput.classes[name + \"_header\"] = members[:]\n ofinput.ordered_classes.append(name + \"_header\")\n elif s[0] == 'metadata':\n if s[1] == 'version':\n log(\"Found version: wire version \" + s[2])\n if s[2] == 'any':\n ofinput.wire_versions.update(of_g.wire_ver_map.keys())\n elif int(s[2]) in of_g.supported_wire_protos:\n ofinput.wire_versions.add(int(s[2]))\n else:\n debug(\"Unrecognized wire protocol version\")\n sys.exit(1)\n found_wire_version = True\n\n if not ofinput.wire_versions:\n debug(\"Missing #version metadata\")\n sys.exit(1)\n\n return ofinput",
"def analyze_files(self):\n for file in os.listdir(self.directory):\n if file[-3:] == (\".py\"):\n fopen = open(os.path.join(self.directory, file), \"r\")\n try:\n if not (py_file := fopen):\n raise FileNotFoundError\n\n with py_file: # close file after opening\n class_count: int = 0\n fun_count: int = 0\n l_count: int = 0\n ch_count: int = 0\n for line in py_file: # calculate values for the file\n if line.strip().startswith(\"class \"):\n class_count = class_count+1\n elif line.strip().startswith(\"def \"):\n fun_count = fun_count+1\n\n l_count = l_count+1\n ch_count = ch_count+len(line)\n\n self.files_summary[str(os.path.join(self.directory, file))] = {\"class\": class_count, \"function\": fun_count, \"line\": l_count,\n \"char\": ch_count}\n except FileNotFoundError:\n print(f\"File {py_file} is not found or can not be opened\")\n fopen.close()",
"def create_modules( package ):\n\n #we need to make the package directory.\n #we need to make the folder that this\n #parsed file will live in.\n # currentPath + package\n paths = package.split( \".\" )\n package = os.path.join( \"./\", os.path.join( *paths ) )\n os.makedirs( package )\n\n #Create the __init__.py files\n temp = \"./\"\n for p in paths:\n temp = os.path.join( temp, p )\n open( \"%s/__init__.py\" % temp, \"a\" ).close()",
"def process_unittest(filename):\n print(\"Generating {}\".format(filename))\n nb = 0\n nb_err = 0\n _main_root = os.path.dirname(filename)\n _VFS = {}\n for _mydir in (\"Lib\",):\n for _root, _dir, _files in os.walk(os.path.join(_main_root, _mydir)):\n if 'unittest' not in _root:\n if 'test' not in _root:\n continue\n\n if '__pycache__' in _root:\n continue\n\n for _file in _files:\n _ext = os.path.splitext(_file)[1]\n if _ext not in ('.py'):\n continue\n nb += 1\n\n file_name = os.path.join(_root, _file)\n encoding = \"utf-8\"\n try:\n src = open(file_name, encoding=encoding).read()\n except:\n encoding = \"iso-8859-1\"\n src = open(file_name, encoding=encoding).read()\n\n if _ext.lower() == '.py':\n try:\n _data = python_minifier.minify(src)\n except Exception as error:\n print(error)\n nb_err += 1\n\n _vfs_filename = os.path.join(_root, _file).replace(_main_root, '')\n _vfs_filename = _vfs_filename.replace(\"\\\\\", \"/\")\n\n mod_name = _vfs_filename[len(_mydir) + 2:].replace('/', '.')\n mod_name, ext = os.path.splitext(mod_name)\n is_package = mod_name.endswith('__init__')\n if is_package:\n mod_name = mod_name[:-9]\n _VFS[mod_name] = [_data, 1]\n else:\n _VFS[mod_name] = [_data]\n print((\"Adding %s %s\" % (mod_name, _vfs_filename)))\n print('%s files, %s errors' % (nb, nb_err))\n\n with open(filename, \"w\") as file_to_write_VFS:\n file_to_write_VFS.write('__BRYTHON__.libs = __BRYTHON__.libs || {};\\n')\n file_to_write_VFS.write(\"__BRYTHON__.=libs['unittest']=%s;\\n\\n\" % json.dumps(_VFS))\n\n file_to_write_VFS.write(\"\"\"\n __BRYTHON__.import_from_unittest function(mod_name){\n var stored = __BRYTHON__.libs['unittest'][mod_name]\n if(stored!==undefined){\n var module_contents = stored[0]\n var $is_package = stored[1]\n var path = 'py_unittest'\n var module = {name:mod_name,__class__:$B.$ModuleDict,$is_package:$is_package}\n if($is_package){var package=mod_name}\n else{\n var elts = mod_name.split('.')\n elts.pop()\n var package = elts.join('.')\n }\n $B.modules[mod_name].$package = $is_package\n $B.modules[mod_name].__package__ = package\n\n run_py(module,path,module_contents)\n return true\n }\n return null\n}\n// add this import function to brython by doing the following:\n// <body onload=\"brython({custom_import_funcs:[__BRYTHON__.import_from_unittest]})\">\n// this will allow us to import unittest modules.\n\"\"\")",
"def makeLibrary(self):\n #------------------------------------------ Instance for the output file\n outputFile = open(\"%s/%s\" % (self.sceneryPath,self.libTxtFileName),\"w\")\n #------------------------------------------------------ write the header\n for line in self.header:\n outputFile.write(\"%s\\n\" % (line))\n #------------------------------------------------- Loop over all folders\n packageContent = os.walk(self.sceneryPath)\n for folder in packageContent:\n for fileName in folder[2]:\n fileType = fileName.split(\".\")[-1]\n if fileType in self.objectTypes:\n realPath = folder[0][len(self.sceneryPath)+1:].replace(\"\\\\\",\"/\")\n filePath = \"%s/%s\" % (realPath,fileName)\n print filePath\n outputFile.write(\"EXPORT %s%s %s%s\\n\" % (self.libPrefix,filePath,self.realPathPrefix,filePath))\n outputFile.close()",
"def main():\n parse_file(sys.argv[1])"
]
| [
"0.60291487",
"0.5969035",
"0.5952592",
"0.59292203",
"0.5850286",
"0.5848573",
"0.57895297",
"0.57641435",
"0.5670383",
"0.563218",
"0.56036806",
"0.5602101",
"0.5512467",
"0.5472885",
"0.54719174",
"0.54622746",
"0.54340696",
"0.54271436",
"0.54252",
"0.5416181",
"0.5411878",
"0.5403243",
"0.54029244",
"0.5397627",
"0.5395578",
"0.53834856",
"0.53605187",
"0.5358485",
"0.53341615",
"0.530367"
]
| 0.6426552 | 0 |
Regularize alpha' and beta' to get alpha and beta. | def _get_alpha_beta(self):
alpha = tf.nn.softplus(self.alpha_prime)
beta = -alpha + tf.nn.softplus(self.beta_prime)
return alpha, beta | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_alpha_beta(self, a, b):\n beta = a / b\n alpha = a * beta\n return alpha, beta",
"def getBeta(self, alpha):\n return 2.0*(2.0-alpha) + -4.0*np.sqrt(1.0-alpha)",
"def B(alpha, beta):\n return math.gamma(apha) * math.gamma(beta) / math.gamma(alpha + beta)",
"def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)",
"def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)",
"def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)",
"def beta(self) -> bool:\n return \"b\" in self.modifier if self.modifier else \"beta\" in self.string",
"def betaln(alpha, beta):\n ln_beta = gammaln(alpha) + gammaln(beta) - gammaln(alpha + beta)\n return ln_beta",
"def RestrictionAlphaOrBeta(self, alphaCompId, betaCompId):\n self.problem.logger.debug(\"RestrictionAlphaOrBeta: alphaCompId={}, betaCompId={}\".format(alphaCompId, betaCompId))\n if self.solverTypeOptimize:\n\n bvars1 = [self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)]\n #self.solver.add(PbEq([(x, 1) for x in bvars1], 0))\n\n bvars2 = [self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)]\n #self.solver.add(PbGe([(x, 1) for x in bvars2], 1))\n\n\n self.solver.add(Or(PbEq([(x, 1) for x in bvars1], 0),\n PbGe([(x, 1) for x in bvars2], 1))\n )\n else:\n self.solver.assert_and_track(\n Or(sum([If(self.a[betaCompId * self.nrVM + j], 1, 0) for j in range(self.nrVM)]) == 0,\n sum([If(self.a[betaCompId * self.nrVM + j], 1, 0) for j in range(self.nrVM)]) >= 1),\n \"LabelAlphaOrBeta: \" + str(self.labelIdx))\n self.labelIdx += 1\n if self.solverTypeOptimize:\n\n bvars1 = [self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]\n # self.solver.add(PbEq([(x, 1) for x in bvars1], 0))\n\n bvars2 = [self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]\n # self.solver.add(PbGe([(x, 1) for x in bvars2], 1))\n\n self.solver.add(Or(PbEq([(x, 1) for x in bvars1], 0),\n PbGe([(x, 1) for x in bvars2], 1))\n )\n else:\n self.solver.assert_and_track(\n Or(sum([If(self.a[alphaCompId * self.nrVM + j], 1 , 0) for j in range(self.nrVM)]) == 0,\n sum([If(self.a[alphaCompId * self.nrVM + j], 1, 0) for j in range(self.nrVM)]) >= 1), \"LabelAlphaOrBeta: \" + str(self.labelIdx))\n self.labelIdx += 1\n\n if self.solverTypeOptimize:\n\n bvars1 = [self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)]\n bvars2 = [self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]\n bvars = bvars1 + bvars2\n\n self.solver.add(PbGe([(x, 1) for x in bvars], 1))\n else:\n self.solver.assert_and_track(sum([If(self.a[betaCompId * self.nrVM + j], 1, 0) for j in range(self.nrVM)]) +\n sum([If(self.a[alphaCompId * self.nrVM + j], 1, 0) for j in range(self.nrVM)]) >= 1, \"LabelAlphaOrBeta: \" + str(self.labelIdx))\n self.labelIdx += 1",
"def alpha_beta(returns, factor_returns):\n\n ret_index = returns.index\n beta, alpha = sp.sp.stats.linregress(factor_returns.loc[ret_index].values,\n returns.values)[:2]\n\n return alpha * APPROX_BDAYS_PER_YEAR, beta",
"def choose_alpha(self, val_X, val_Y, possible_alpha):\n best_acc = -1.\n best_alpha = 0.\n for alpha in possible_alpha:\n cur_acc = self.validate(val_X, val_Y, alpha)\n if self.verbose:\n print(f\"When alpha = {alpha}, accuracy is {cur_acc}\")\n if cur_acc > best_acc:\n best_acc = cur_acc\n best_alpha = alpha\n return best_alpha",
"def B(alpha: float, beta: float) -> float:\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)",
"def validate_batch_comparison(beta, alpha):\n return beta >= 1 / alpha",
"def beta_r(r, beta):\n return beta",
"def beta(alpha, aw, ap):\n if alpha == 0:\n return np.zeros_like(aw)\n elif alpha == 1:\n return np.ones_like(aw)\n else:\n return 1-(1 / (ap - aw) * (-aw + np.sqrt((1-alpha)*ap**2 + alpha*aw**2)))",
"def create_beta_priors(df):\n df['alpha'] = np.minimum(np.maximum((1 - df.expected) * np.power(df.expected, 2) / df.variance - df.expected, 0.1), 15)\n df['beta'] = df.alpha / df.expected - df.alpha\n return df",
"def _beta(self):\n return _handle_ab(self.solution, self.use_const)[1]",
"def boost(beta):\n if abs(beta) >= 1:\n print('***ERROR in SpecialRelativity.boost, beta is {:.3f} .'.format(beta) )\n return( np.array( [ [1.0, 0], [0, 1.0] ] ) )\n gamma = 1.0/np.sqrt( 1.0-beta*beta )\n ch = gamma\n sh = gamma*beta\n return( np.array( [ [ch, -sh], [-sh, ch] ] ) )",
"def get_ralpha(self, alpha, xalpha):\n A = self.A\n b = self.b\n ralpha = dot(A, xalpha - b)\n return ralpha",
"def getGamma(self, alpha, beta):\n return np.power(beta,2.0)/2.0/alpha",
"def monoclinic(cls, a, b, c, beta):\r\n return cls.from_parameters(a, b, c, 90, beta, 90)",
"def _se_beta(self):\n return _handle_ab(self._se_all, self.use_const)[1]",
"def litBetaAlpha(inc,wave,m,d):\n psi = blazeYaw(inc,wave,m,d)\n beta1 = cos(inc)*cos(psi)\n alpha1 = cos(inc)*sin(psi)-m*wave/d\n return beta1,alpha1",
"def __evalAlphas(self):\n #breit wheeler\n self.__alphaObjBW = alpha(self.getMomenta('bw'),self.__config)\n self.__alphaBW = [self.__alphaObjBW(index) for index in [1,2,3]]\n #compton\n self.__alphaObjC = alpha(self.getMomenta('c'),self.__config)\n self.__alphaC = [self.__alphaObjC(index) for index in [1,2,3]]\n #breit wheeler exchange\n self.__alphaObjBWx = alpha(self.getMomenta('bwx'),self.__config)\n self.__alphaBWx = [self.__alphaObjBWx(index) for index in [1,2,3]]\n #compton exchange\n self.__alphaObjCx = alpha(self.getMomenta('cx'),self.__config)\n self.__alphaCx = [self.__alphaObjCx(index) for index in [1,2,3]]\n self.__allAlphas = [self.__alphaBW,self.__alphaC,self.__alphaBWx,self.__alphaCx]",
"def alpha(self, tokens):\n if not tokens:\n tokens = tuple()\n\n A_set = self.A(tokens)\n result = 1\n # heuristic, way more efficient\n if len(A_set):\n result = self.beta * len(A_set) / self.count(tuple(tokens))\n return result",
"def test_additive_alpha_beta(self):\n np.random.seed(912838)\n\n param_pairs = [(1.0, 0.0), (0.0, 1.0), (1.0, 1.0)]\n tmax = 4*self.dt\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_step = np.random.randn(self.Ns)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n W0 = np.copy(self.syns.W)\n changes = []\n\n for params in param_pairs:\n self.rule.alpha = params[0]\n self.rule.beta = params[1]\n\n self.syns.W = np.copy(W0)\n sim.run(tmax)\n\n changes.append(self.syns.W - W0)\n\n self.assertTrue(np.allclose(changes[-1], changes[0] + changes[1]))",
"def test_additive_alpha_beta(self):\n np.random.seed(912838)\n\n param_pairs = [(1.0, 0.0), (0.0, 1.0), (1.0, 1.0)]\n tmax = 4*self.dt\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_step = np.random.randn(self.Ns)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n W0 = np.copy(self.syns.W)\n changes = []\n\n for params in param_pairs:\n self.rule.alpha = params[0]\n self.rule.beta = params[1]\n\n self.syns.W = np.copy(W0)\n sim.run(tmax)\n\n changes.append(self.syns.W - W0)\n\n self.assertTrue(np.allclose(changes[-1], changes[0] + changes[1]))",
"def test_beta_bounds(self):\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 0.9)[0], 13.44239853)\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 0.9)[1], 21.66666666)\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 0.9)[2], 29.89093480)\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 0.9)[3], 5.0)\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 90.0)[0], 13.44239853)\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 90.0)[1], 21.66666666)\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 90.0)[2], 29.89093480)\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 90.0)[3], 5.0)",
"def alph_bet(self, x, y, min_ar, max_ar):\n alpha=[]\n beta=[]\n for i in range(x.shape[-1]):\n alpha.append(1 / (max_ar[i] - min_ar[i]))\n beta.append(min_ar[i])\n for i in range(y.shape[-1]):\n alpha.append(1 / (max_ar[i+x.shape[-1]] - min_ar[i+x.shape[-1]]))\n beta.append(min_ar[i+x.shape[-1]])\n self.alpha.data=tr.tensor(alpha)\n self.beta.data=tr.tensor(beta)",
"def get_alphas(portfolio_returns,risk_free,market_returns,betas):\r\n \r\n R = portfolio_returns\r\n Rf = risk_free\r\n Beta = betas\r\n Rm = market_returns\r\n alpha = R - Rf - (Beta*(Rm-Rf))\r\n \r\n return alpha"
]
| [
"0.6881302",
"0.57053274",
"0.56613654",
"0.565913",
"0.565913",
"0.565913",
"0.5611329",
"0.5522475",
"0.54939735",
"0.5400471",
"0.53844523",
"0.5373884",
"0.5366616",
"0.5361639",
"0.53530854",
"0.53477436",
"0.53169733",
"0.5291241",
"0.52699775",
"0.5250733",
"0.5244091",
"0.52252007",
"0.5217217",
"0.521713",
"0.5164862",
"0.5093193",
"0.5093193",
"0.5076991",
"0.5030103",
"0.50274616"
]
| 0.62813294 | 1 |
Computes the log det Jacobian, as per the paper. | def _inverse_log_det_jacobian(self, x):
alpha, beta = self._get_alpha_beta()
diff = x - self.x0
r = tf.linalg.norm(diff, axis=-1, keepdims=True)
h = 1. / (alpha + r)
h_prime = -(h ** 2)
beta_h = beta * h
log_det_jacobian = tf.reduce_sum(
(self.dim - 1) * tf.math.log1p(beta_h)
+ tf.math.log1p(beta_h + beta * h_prime * r), axis=-1)
return log_det_jacobian | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _forward_log_det_jacobian(self, x):\n d = self._compute_shared(x=x)\n relx = (x - d.x_k) / d.w_k\n relx = relx # tf.where(d.out_of_bounds, 0.5*tf.ones_like(x), relx)\n grad = (\n 2 * tf.math.log(d.s_k) +\n tf.math.log(d.d_kp1 * relx**2 + 2 * d.s_k * relx * (1 - relx) + # newln\n d.d_k * (1 - relx)**2) -\n 2 * tf.math.log((d.d_kp1 + d.d_k - 2 * d.s_k) * relx *\n (1 - relx) + d.s_k))\n return grad # tf.where(d.out_of_bounds, tf.zeros_like(grad), grad)",
"def log_abs_det_jacobian(self, z):\n pre_u = self.u_ + self.u\n pre_w = self.w_ + self.w\n a = F.softplus(self.a + self.inv)\n w = F.softmax(pre_w, dim=3)\n u = F.softmax(pre_u, dim=3)\n # Perform computation\n pre_sigm = torch.sum(u * a * z, 3) + self.b\n sigm = torch.sigmoid(pre_sigm)\n x_pre = torch.sum(w * sigm, dim=3)\n x_pre_clipped = x_pre * (1 - self.eps) + self.eps * 0.5\n logj = F.log_softmax(pre_w, dim=3) + logsigmoid(pre_sigm) + logsigmoid(-pre_sigm) + torch.log(a)\n # n, d, d2, dh\n logj = logj + F.log_softmax(pre_u, dim=3)\n # n, d, d2, dh, d1\n logj = torch.log(torch.sum(torch.exp(logj),3))\n # n, d, d2, d1\n logdet_ = logj + np.log(1 - self.eps) - (torch.log(x_pre_clipped) + torch.log(-x_pre_clipped + 1))\n return logdet_",
"def pddet(A):\r\n L = jitchol(A)\r\n logdetA = 2*sum(np.log(np.diag(L)))\r\n return logdetA",
"def log_abs_det_jacobian(self, z):\n self.a = F.softplus(self.a)\n self.w = F.softmax(self.w, dim=1)\n pre_sigm = self.a * z + self.b\n sigm = torch.sigmoid(pre_sigm)\n x_pre = self.w * sigm\n if (len(z.shape) > 2):\n x_pre = torch.sum(self.w * sigm, dim=1)\n x_pre_clipped = x_pre * (1 - self.eps) + self.eps * 0.5\n logj = F.log_softmax(self.w, dim=1) + logsigmoid(pre_sigm) + logsigmoid(-pre_sigm) + torch.log(self.a)\n logj = torch.log(torch.sum(torch.exp(logj)))#,2).sum(2)\n logdet = logj + np.log(1 - self.eps) - (torch.log(x_pre_clipped) + torch.log(-x_pre_clipped + 1))\n return sum_dims(logdet)",
"def log_abs_det_jacobian(self, x, y):\n if self.training:\n var = torch.var(y, dim=0, keepdim=True)\n else:\n # NOTE: You wouldn't typically run this function in eval mode, but included for gradient tests\n var = self.moving_variance\n return (-self.constrained_gamma.log() + 0.5 * torch.log(var + self.epsilon))",
"def log_abs_det_jacobian(self, x, y, intermediates=None):\n if intermediates is None:\n logdet = self.bn_arn(x)[1]\n return logdet.sum(-1)\n else:\n logdet = intermediates\n return logdet.sum(-1)",
"def log_jacobian_tensor(self, x):\n pass",
"def log_abs_det_jacobian(self, x, y, intermediates=None):\n if intermediates is None:\n log_scale = self.arn(x)[1]\n log_scale = _clamp_preserve_gradients(\n log_scale, self.log_scale_min_clip, self.log_scale_max_clip\n )\n return log_scale.sum(-1)\n else:\n log_scale = intermediates\n return log_scale.sum(-1)",
"def log_det_Jxz(self):\n return self.log_det_xz",
"def log_det_Jxz(self):\n return self.log_det_xz",
"def log_det_Jxz(self):\n return self.log_det_xz",
"def log_det_Jxz(self):\n return self.log_det_xz",
"def log_det_Jzx(self):\n #return self.log_det_zx.output\n log_det_Jzxs = []\n for l in self.layers:\n if hasattr(l, 'log_det_Jzx'):\n log_det_Jzxs.append(l.log_det_Jzx)\n if len(log_det_Jzxs) == 0:\n return tf.ones((self.output_x.shape[0],))\n if len(log_det_Jzxs) == 1:\n return log_det_Jzxs[0]\n return tf.reduce_sum(log_det_Jzxs, axis=0, keepdims=False)",
"def fast_logdet(matrix):\n sign, ld = np.linalg.slogdet(matrix)\n if not sign > 0:\n return -np.inf\n return ld",
"def grad_log(self, X):\n # \"\"\"\n # Evaluate the gradients (with respect to the input) of the log density at\n # each of the n points in X. This is the score function.\n\n # X: n x d numpy array.\n XB = np.dot(X, self.B)\n Y = 0.5*XB + self.c\n E2y = np.exp(2*Y)\n # n x dh\n Phi = old_div((E2y-1.0),(E2y+1))\n # n x dx\n T = np.dot(Phi, 0.5*self.B.T)\n S = self.b - X + T\n return S",
"def log_likelihood(self):\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z",
"def log_pdf_derivative(x):\n return gs.autodiff.jacobian(log_pdf_at_x(x))(base_point)",
"def jacobian_d(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_d(x, out=out, **kwargs)",
"def jacobian_d(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_d(x, out=out, **kwargs)",
"def transform_and_compute_jacobian(self, xj):\n x = xj[:, :self.d].detach()\n log_j = xj[:, -1]\n\n x.requires_grad = True\n y = self.flow_(x)\n\n n_batch = xj.shape[0]\n\n jx = torch.zeros(n_batch, self.d, self.d).to(log_j.device)\n directions = torch.eye(self.d).to(log_j).unsqueeze(0).repeat(n_batch, 1, 1)\n\n for i in range(self.d):\n jx[:, i, :] = torch.autograd.grad(y, x, directions[:, i, :],\n allow_unused=True, create_graph=True, retain_graph=True)[0]\n x.requires_grad = False\n x.grad = None\n\n log_det_j = torch.log(torch.abs(torch.det(jx)))\n return torch.cat([y.detach(), (log_j + log_det_j).unsqueeze(1)], 1)",
"def log_det_Jxz(self):\n #return self.log_det_xz.output\n log_det_Jxzs = []\n for l in self.layers:\n if hasattr(l, 'log_det_Jxz'):\n log_det_Jxzs.append(l.log_det_Jxz)\n if len(log_det_Jxzs) == 0:\n return tf.ones((self.output_z.shape[0],))\n if len(log_det_Jxzs) == 1:\n return log_det_Jxzs[0]\n return tf.reduce_sum(log_det_Jxzs, axis=0, keepdims=False)",
"def log_den(self, X):\n raise NotImplementedError()",
"def _core_calc_degrad(self,bd,Ld) :\n\t\tdegrad = np.dot(Ld,bd) # Do matrix multiplication \n\t\tdegrad = np.exp(degrad) # Exponentiate to convert log to real\n\t\treturn degrad",
"def log_density(\n self,\n theta_unc: FloatArray,\n *,\n propto: bool = True,\n jacobian: bool = True,\n ) -> float:\n lp = ctypes.pointer(ctypes.c_double())\n err = ctypes.pointer(ctypes.c_char_p())\n rc = self._log_density(\n self.model, int(propto), int(jacobian), theta_unc, lp, err\n )\n if rc:\n raise self._handle_error(err.contents, \"log_density\")\n return lp.contents.value",
"def log_det_Jzx(self):\n return self.log_det_zx",
"def log_det_Jzx(self):\n return self.log_det_zx",
"def log_det_Jzx(self):\n return self.log_det_zx",
"def log_det_Jzx(self):\n return self.log_det_zx",
"def jacobian(self, dt):\n raise NotImplementedError",
"def log_likelihood(self, theta, x, **kwargs):\n\n u, logdet_dudx, log_a = self.forward(theta, x, **kwargs)\n\n constant = float(-0.5 * self.n_inputs * np.log(2.0 * np.pi))\n # log_likelihood = torch.log(torch.sum(torch.exp(log_a - 0.5 * u ** 2 + logdet_dudx), dim=2))\n log_likelihood = torch.logsumexp(log_a - 0.5 * u**2 + logdet_dudx, dim=2)\n log_likelihood = constant + torch.sum(log_likelihood, dim=1)\n\n return u, log_likelihood"
]
| [
"0.80300593",
"0.7394804",
"0.73900914",
"0.72244877",
"0.70630896",
"0.6860125",
"0.6753442",
"0.67231876",
"0.66373676",
"0.66373676",
"0.66373676",
"0.66373676",
"0.65847796",
"0.654238",
"0.6513903",
"0.6488694",
"0.6466396",
"0.6461488",
"0.6461488",
"0.644598",
"0.64187604",
"0.64055586",
"0.62978065",
"0.6277813",
"0.6250764",
"0.6250764",
"0.6250764",
"0.6250764",
"0.6248207",
"0.62266886"
]
| 0.7451525 | 1 |
Builds a deep flow of the specified type. | def _make_deep_flow(flow_type, flow_depth, flow_width, dim):
if flow_type not in ['maf', 'radial', 'affine']:
raise ValueError(f'Flow type {flow_type} is not maf, radial, or affine.')
if flow_type == 'maf':
return _make_maf_flow(flow_depth, flow_width)
elif flow_type == 'radial':
return _make_radial_flow(dim, flow_depth)
elif flow_type == 'affine':
return _make_affine_flow(dim, flow_depth) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _copy_node_type_with_flowrules (cls, type_iter, target, log):\n for obj in type_iter:\n if obj.id not in target:\n c_obj = target.add_node(deepcopy(obj))\n log.debug(\"Copy NFFG node: %s\" % c_obj)\n else:\n for p in obj.ports:\n if p.id not in target.network.node[obj.id].ports:\n new_port = target.network.node[obj.id].add_port(id=p.id,\n properties=p.properties)\n log.debug(\"Copy port %s to NFFG element %s\" % (p, obj))\n if hasattr(p, 'flowrules'):\n log.debug(\"Merging flowrules of port %s of node %s\" %\n (p.id, obj.id))\n for fr in p.flowrules:\n if fr.id not in (f.id for f in new_port.flowrules):\n new_port.flowrules.append(copy.deepcopy(fr))\n else:\n old_port = target.network.node[obj.id].ports[p.id]\n for fr in p.flowrules:\n if fr.id not in (f.id for f in old_port.flowrules):\n old_port.flowrules.append(copy.deepcopy(fr))\n return target",
"def _make_radial_flow(dim, flow_depth):\n bijectors = []\n bijectors.append(tfp.bijectors.BatchNormalization())\n for _ in range(flow_depth):\n bijectors.append(ReversedRadialFlow(dim))\n bijectors.append(tfp.bijectors.BatchNormalization())\n return tfp.bijectors.Chain(list(reversed(bijectors)))",
"def build(self, input_shape):\n with tf.name_scope(self.name):\n # Using the PyTorch default hyperparameters.\n self.batch_norm = tf.keras.layers.BatchNormalization(epsilon=1e-5,\n momentum=0.9)\n self.latent_dim = input_shape[-1]\n if self.flow_width is None:\n self.flow_width = 2 * self.latent_dim\n self.flows = []\n for _ in range(self.num_classes):\n flow = tfp.distributions.TransformedDistribution(\n distribution=tfp.distributions.MultivariateNormalDiag(\n loc=tf.zeros(self.latent_dim, dtype=tf.float32),\n scale_diag=tf.ones(self.latent_dim, dtype=tf.float32)),\n bijector=_make_deep_flow(self.flow_type,\n self.flow_depth,\n self.flow_width,\n self.latent_dim))\n self.flows.append(flow)",
"def _build_depth_graph(self):\n self.depth_net = DepthNetwork(self.cfg.STRUCTURE, is_training=False)\n images = self.images_placeholder[tf.newaxis]\n poses = self.poses_placeholder[tf.newaxis]\n intrinsics = self.intrinsics_placeholder[tf.newaxis]\n\n # fix the input shape\n images = tf.reshape(images, [1, 5, 192, 1088, 3])\n Ts = VideoSE3Transformation(matrix=poses)\n\n depths = self.depth_net.forward(Ts, images, intrinsics)\n self.outputs['depths'] = depths",
"def build(self, backend: Optional[str] = 'thread', copy_flow: bool = False, *args, **kwargs) -> 'Flow':\n\n op_flow = self._build_graph(copy_flow)\n\n if not backend:\n op_flow.logger.warning('no specified backend, build_level stays at %s, '\n 'and you can not run this flow.' % op_flow._build_level)\n elif backend in {'thread', 'process'}:\n op_flow._service_contexts.clear()\n for v in op_flow._service_nodes.values():\n p_args = v['parsed_args']\n p_args.parallel_backend = backend\n # for thread and process backend which runs locally, host_in and host_out should not be set\n p_args.host_in = BaseService.default_host\n p_args.host_out = BaseService.default_host\n op_flow._service_contexts.append((Flow._service2builder[v['service']], p_args))\n op_flow._build_level = Flow.BuildLevel.RUNTIME\n else:\n raise NotImplementedError('backend=%s is not supported yet' % backend)\n\n return op_flow",
"def build_net(env, seeds, model_type, hidden_size, noise_std, action_type):\r\n torch.manual_seed(seeds[0])\r\n if action_type == gym.spaces.box.Box:\r\n \taction_space = env.action_space.shape[0] \r\n else:\r\n action_space = env.action_space.n\r\n if model_type == \"ff\":\r\n net = model.Model_FF(env.observation_space.shape[0], action_space, hidden_size, action_type)\r\n elif model_type == \"cnn\":\r\n net = model.Model_CNN1D(env.observation_space.shape[0], action_space, hidden_size)\r\n else:\r\n net = model.Model_LSTM(env.observation_space.shape[0], action_space, hidden_size, action_type)\r\n for seed in seeds[1:]:\r\n net = mutate(net, seed, noise_std, copy_net=False)\r\n\r\n return net",
"def _copy_node_type (cls, type_iter, target, log):\n for obj in type_iter:\n if obj.id not in target:\n c_obj = target.add_node(deepcopy(obj))\n log.debug(\"Copy NFFG node: %s\" % c_obj)\n else:\n for p in obj.ports:\n if p.id not in target.network.node[obj.id].ports:\n target.network.node[obj.id].add_port(id=p.id,\n properties=p.properties)\n # TODO: Flowrules are not copied!\n log.debug(\"Copy port %s to NFFG element %s\" % (p, obj))\n return target",
"def deep_type(obj, depth = None, max_sample = None, get_type = None):\n return _deep_type(obj, [], 0, depth, max_sample, get_type)",
"def from_flow_spec(cls, flow_spec=None, deep_copy_tasks=True):\n flow = Flow(**Flow.sanitize_flow_kwargs(flow_spec))\n for i, task in enumerate(flow_spec.get('tasks', [])):\n if deep_copy_tasks:\n task = copy.deepcopy(task)\n flow.add_task(task=task)\n return flow",
"def construct_time_invariant_flow(flow_dict, D_Z, T):\n\n layer_ind = 1\n layers = []\n TIF_flow_type = flow_dict[\"TIF_flow_type\"]\n repeats = flow_dict[\"repeats\"]\n\n if TIF_flow_type == \"ScalarFlowLayer\":\n flow_class = ElemMultLayer\n name_prefix = \"ScalarFlow_Layer\"\n\n elif TIF_flow_type == \"FullyConnectedFlowLayer\":\n flow_class = FullyConnectedFlowLayer\n name_prefix = FullyConnectedFlow_Layer\n\n elif TIF_flow_type == \"AffineFlowLayer\":\n flow_class = AffineFlowLayer\n name_prefix = \"AffineFlow_Layer\"\n\n elif TIF_flow_type == \"StructuredSpinnerLayer\":\n flow_class = StructuredSpinnerLayer\n name_prefix = \"StructuredSpinner_Layer\"\n\n elif TIF_flow_type == \"StructuredSpinnerTanhLayer\":\n flow_class = StructuredSpinnerTanhLayer\n name_prefix = \"StructuredSpinnerTanh_Layer\"\n\n elif TIF_flow_type == \"PlanarFlowLayer\":\n flow_class = PlanarFlowLayer\n name_prefix = \"PlanarFlow_Layer\"\n\n elif TIF_flow_type == \"RadialFlowLayer\":\n flow_class = RadialFlowLayer\n name_prefix = \"RadialFlow_Layer\"\n\n elif TIF_flow_type == \"TanhLayer\":\n flow_class = TanhLayer\n name_prefix = \"Tanh_Layer\"\n\n else:\n raise NotImplementedError()\n\n if flow_dict[\"scale_layer\"]:\n layers.append(ElemMultLayer(\"ScalarFlow_Layer_0\", D_Z))\n layer_ind += 1\n\n for i in range(repeats):\n layers.append(flow_class(\"%s%d\" % (name_prefix, layer_ind), D_Z))\n layer_ind += 1\n\n return layers",
"def build_folded_type(ctx, state, const):\n\n def typeconst(t):\n \"\"\"Create a constant purely to hold types for a recursive call.\"\"\"\n return _Constant(t, None, None, const.op)\n\n def build_pyval(state, const):\n if const.value is not None and const.tag in ('prim', 'tuple'):\n return state, ctx.convert.constant_to_var(const.value)\n else:\n return build_folded_type(ctx, state, const)\n\n def expand(state, elements):\n vs = []\n for e in elements:\n state, v = build_pyval(state, e)\n vs.append(v)\n return state, vs\n\n def join_types(state, ts):\n xs = [typeconst(t) for t in ts]\n state, vs = expand(state, xs)\n val = ctx.convert.build_content(vs)\n return state, val\n\n def collect(state, convert_type, params):\n state, t = join_types(state, params)\n ret = ctx.convert.build_collection_of_type(state.node, convert_type, t)\n return state, ret\n\n def collect_tuple(state, elements):\n state, vs = expand(state, elements)\n return state, ctx.convert.build_tuple(state.node, vs)\n\n def collect_list(state, params, elements):\n if elements is None:\n return collect(state, ctx.convert.list_type, params)\n elif len(elements) < MAX_VAR_SIZE:\n state, vs = expand(state, elements)\n return state, ctx.convert.build_list(state.node, vs)\n else:\n # Without constant folding we construct a variable wrapping every element\n # in the list and store it; however, we cannot retrieve them all. So as an\n # optimisation, we will add the first few elements as pyals, then add one\n # element for every contained type, and rely on the fact that the tail\n # elements will contribute to the overall list type, but will not be\n # retrievable as pyvals.\n # TODO(b/175443170): We should use a smaller MAX_SUBSCRIPT cutoff; this\n # behaviour is unrelated to MAX_VAR_SIZE (which limits the number of\n # distinct bindings for the overall typevar).\n n = MAX_VAR_SIZE - len(params) - 1\n elts = elements[:n] + tuple(typeconst(t) for t in params)\n state, vs = expand(state, elts)\n return state, ctx.convert.build_list(state.node, vs)\n\n def collect_map(state, params, elements):\n m_var = ctx.convert.build_map(state.node)\n m = m_var.data[0]\n # Do not forward the state while creating dict literals.\n node = state.node\n # We want a single string type to store in the Dict.K type param.\n # Calling set_str_item on every k/v pair will lead to a type param with a\n # lot of literal strings as bindings, causing potentially severe performance\n # issues down the line.\n str_key = ctx.convert.str_type.instantiate(node)\n if elements is not None and len(elements) < MAX_VAR_SIZE:\n for (k, v) in elements.items():\n _, v = build_pyval(state, v)\n k_var = ctx.convert.constant_to_var(k)\n m.setitem(node, k_var, v)\n if isinstance(k, str):\n m.merge_instance_type_params(node, str_key, v)\n else:\n m.merge_instance_type_params(node, k_var, v)\n else:\n # Treat a too-large dictionary as {Union[keys] : Union[vals]}. We could\n # store a subset of the k/v pairs, as with collect_list, but for\n # dictionaries it is less obvious which subset we should be storing.\n # Perhaps we could create one variable per unique value type, and then\n # store every key in the pyval but reuse the value variables.\n k_types, v_types = params\n _, v = join_types(state, v_types)\n for t in k_types:\n _, k = build_folded_type(ctx, state, typeconst(t))\n m.setitem(node, k, v)\n m.merge_instance_type_params(node, k, v)\n return state, m_var\n\n tag, params = const.typ\n if tag == 'prim':\n if const.value:\n return state, ctx.convert.constant_to_var(const.value)\n else:\n val = ctx.convert.primitive_class_instances[params]\n return state, val.to_variable(state.node)\n elif tag == 'list':\n return collect_list(state, params, const.elements)\n elif tag == 'set':\n return collect(state, ctx.convert.set_type, params)\n elif tag == 'tuple':\n # If we get a tuple without const.elements, construct it from the type.\n # (e.g. this happens with a large dict with tuple keys)\n if not const.elements:\n elts = tuple(typeconst(t) for t in params)\n else:\n elts = const.elements\n return collect_tuple(state, elts)\n elif tag == 'map':\n return collect_map(state, params, const.elements)\n else:\n assert False, ('Unexpected type tag:', const.typ)",
"def _build(layer, height):\n if len(layer) == 1:\n return layer\n odd = None\n if len(layer) % 2:\n # promote to higher level\n odd = layer.pop(-1)\n # layer.append(layer[-1])\n new_layer = []\n for idx in range(0, len(layer), 2):\n node = Node(layer[idx].val + layer[idx + 1].val)\n node.h = height + 1\n node.l, node.r = layer[idx], layer[idx + 1]\n layer[idx].p, layer[idx + 1].p = node, node\n new_layer.append(node)\n if odd:\n odd.h += 1\n new_layer.append(odd)\n return new_layer",
"def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()",
"def build_downstream(\n self,\n build_downstream: dict,\n downstream_input_size: int,\n downstream_output_size: int,\n downstream_input_stride: int,\n ):\n return FrameLevelLinear(\n downstream_input_size, downstream_output_size, **build_downstream\n )",
"def _make_maf_flow(flow_depth, flow_width):\n # If not otherwise specified, make the hidden layers of the flow twice\n # as wide as the latent dimension, to make them expressive enough to\n # parameterize a shift and scale for each dimension.\n bijectors = []\n bijectors.append(tfp.bijectors.BatchNormalization())\n # Build the deep MAF flow.\n # Each layer outputs two params per dimension, for shift and scale.\n bijectors.append(\n tfp.bijectors.MaskedAutoregressiveFlow(\n tfp.bijectors.AutoregressiveNetwork(\n params=2, hidden_units=[flow_width]*flow_depth,\n activation='relu')))\n # For numerical stability of training, we need these batch norms.\n bijectors.append(tfp.bijectors.BatchNormalization())\n return tfp.bijectors.Chain(list(reversed(bijectors)))",
"def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')",
"def depth(self, create, depth, **kwargs): # pylint: disable=unused-argument\r\n # pylint: disable=no-member\r\n if depth == 0:\r\n self.load_item.side_effect = lambda x: LeafModuleFactory(descriptor_cls=HtmlDescriptor)\r\n else:\r\n self.load_item.side_effect = lambda x: ContainerModuleFactory(descriptor_cls=VerticalDescriptor, depth=depth - 1)",
"def create_flow(self, conf, dpid, params):\n\t\tpass",
"def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n self._net_outputs = self.online_convnet(self.state_ph, training=True)\n self._q_argmax = tf.argmax(self._net_outputs.q_values, axis=1)[0]\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n training=True)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)",
"def build_fully_connected_graph(input_dim, output_dim, num_intermediate):\n from .models import DAG\n num_emit, num_rec = num_intermediate + input_dim, num_intermediate + output_dim\n activations = torch.zeros(num_rec, dtype=torch.long)\n connections = torch.zeros(num_rec, num_emit, dtype=torch.long)\n for i in range(num_rec):\n num_anc = min(input_dim + i, num_emit)\n connections[i, :num_anc] = 1\n return DAG(input_dim, output_dim, num_intermediate, connections, activations, check_valid=True)",
"def create_pipeline_flow(\n self, cmp_deriv_subject_directory, nipype_deriv_subject_directory\n ):\n acquisition_model = self.stages[\"Diffusion\"].config.diffusion_imaging_model\n recon_tool = self.stages[\"Diffusion\"].config.recon_processing_tool\n\n recon_model = \"DTI\"\n\n if acquisition_model == \"DSI\":\n recon_model = \"SHORE\"\n else:\n if recon_tool == \"Dipy\" and self.stages[\"Diffusion\"].config.dipy_recon_config.local_model:\n recon_model = \"CSD\"\n elif recon_tool == \"MRtrix\" and self.stages[\"Diffusion\"].config.mrtrix_recon_config.local_model:\n recon_model = \"CSD\"\n\n tracking_model = self.stages[\"Diffusion\"].config.diffusion_model\n\n if tracking_model == \"Deterministic\":\n tracking_model = \"DET\"\n elif tracking_model == \"Probabilistic\":\n tracking_model = \"PROB\"\n\n if self.parcellation_scheme == \"Lausanne2018\":\n bids_atlas_label = \"L2018\"\n elif self.parcellation_scheme == \"NativeFreesurfer\":\n bids_atlas_label = \"Desikan\"\n elif self.parcellation_scheme == \"Custom\":\n bids_atlas_label = self.custom_atlas_name\n if self.custom_atlas_res is not None and self.custom_atlas_res != \"\":\n bids_atlas_label += f'_res-{self.custom_atlas_res}'\n\n # Clear previous outputs\n self.clear_stages_outputs()\n\n # Create diffusion workflow with input and output Identityinterface nodes\n diffusion_flow = pe.Workflow(\n name=\"diffusion_pipeline\",\n base_dir=os.path.abspath(nipype_deriv_subject_directory),\n )\n\n diffusion_inputnode = pe.Node(\n interface=util.IdentityInterface(\n fields=[\n \"diffusion\",\n \"bvecs\",\n \"bvals\",\n \"T1\",\n \"aseg\",\n \"aparc_aseg\",\n \"brain\",\n \"T2\",\n \"brain_mask\",\n \"wm_mask_file\",\n \"roi_volumes\",\n \"roi_graphMLs\",\n \"subjects_dir\",\n \"subject_id\",\n \"parcellation_scheme\",\n ]\n ),\n name=\"inputnode\",\n )\n diffusion_inputnode.inputs.parcellation_scheme = self.parcellation_scheme\n diffusion_inputnode.inputs.atlas_info = self.atlas_info\n\n diffusion_outputnode = pe.Node(\n interface=util.IdentityInterface(fields=[\"connectivity_matrices\"]),\n name=\"outputnode\",\n )\n\n diffusion_flow.add_nodes([diffusion_inputnode, diffusion_outputnode])\n\n # Data import\n datasource = self.create_datagrabber_node(\n base_directory=cmp_deriv_subject_directory,\n bids_atlas_label=bids_atlas_label\n )\n\n # Data sinker for output\n sinker = self.create_datasinker_node(\n base_directory=cmp_deriv_subject_directory,\n bids_atlas_label=bids_atlas_label,\n recon_model=recon_model,\n tracking_model=tracking_model\n )\n\n # fmt:off\n diffusion_flow.connect(\n [\n (datasource, diffusion_inputnode, [(\"diffusion\", \"diffusion\"),\n (\"bvecs\", \"bvecs\"),\n (\"bvals\", \"bvals\"),\n (\"T1\", \"T1\"),\n (\"aseg\", \"aseg\"),\n (\"aparc_aseg\", \"aparc_aseg\"),\n (\"brain\", \"brain\"),\n (\"brain_mask\", \"brain_mask\"),\n (\"wm_mask_file\", \"wm_mask_file\")]),\n ]\n )\n # fmt:on\n\n merge_roi_volumes = pe.Node(interface=Merge(5), name=\"merge_roi_volumes\")\n merge_roi_graphmls = pe.Node(interface=Merge(5), name=\"merge_roi_graphmls\")\n\n def remove_non_existing_scales(roi_volumes):\n \"\"\"Returns a list which do not contained any empty element.\n\n Parameters\n ----------\n roi_volumes : list\n A list of output parcellations that might contain empty element\n in the case of the monoscale Desikan scheme for instance\n\n Returns\n -------\n out_roi_volumes : list\n The list with no empty element\n \"\"\"\n out_roi_volumes = []\n for vol in roi_volumes:\n if vol is not None:\n out_roi_volumes.append(vol)\n return out_roi_volumes\n\n # fmt:off\n diffusion_flow.connect(\n [\n (datasource, merge_roi_volumes, [(\"roi_volume_s1\", \"in1\"),\n (\"roi_volume_s2\", \"in2\"),\n (\"roi_volume_s3\", \"in3\"),\n (\"roi_volume_s4\", \"in4\"),\n (\"roi_volume_s5\", \"in5\")]),\n (datasource, merge_roi_graphmls, [(\"roi_graphml_s1\", \"in1\"),\n (\"roi_graphml_s2\", \"in2\"),\n (\"roi_graphml_s3\", \"in3\"),\n (\"roi_graphml_s4\", \"in4\"),\n (\"roi_graphml_s5\", \"in5\")]),\n (merge_roi_volumes, diffusion_inputnode, [((\"out\", remove_non_existing_scales), \"roi_volumes\")],),\n (merge_roi_graphmls, diffusion_inputnode, [((\"out\", remove_non_existing_scales), \"roi_graphMLs\")],),\n ]\n )\n # fmt:on\n\n if self.stages[\"Preprocessing\"].enabled:\n preproc_flow = self.create_stage_flow(\"Preprocessing\")\n # fmt:off\n diffusion_flow.connect(\n [\n (diffusion_inputnode, preproc_flow, [(\"diffusion\", \"inputnode.diffusion\"),\n (\"brain\", \"inputnode.brain\"),\n (\"aseg\", \"inputnode.aseg\"),\n (\"aparc_aseg\", \"inputnode.aparc_aseg\"),\n (\"brain_mask\", \"inputnode.brain_mask\"),\n (\"wm_mask_file\", \"inputnode.wm_mask_file\"),\n (\"roi_volumes\", \"inputnode.roi_volumes\"),\n (\"bvecs\", \"inputnode.bvecs\"),\n (\"bvals\", \"inputnode.bvals\"),\n (\"T1\", \"inputnode.T1\")]),\n ]\n )\n # fmt:on\n\n if self.stages[\"Registration\"].enabled:\n reg_flow = self.create_stage_flow(\"Registration\")\n # fmt:off\n diffusion_flow.connect(\n [\n # (diffusion_inputnode,reg_flow,[('T2','inputnode.T2')]),\n (preproc_flow, reg_flow, [(\"outputnode.T1\", \"inputnode.T1\"),\n (\"outputnode.act_5TT\", \"inputnode.act_5TT\"),\n (\"outputnode.gmwmi\", \"inputnode.gmwmi\"),\n (\"outputnode.bvecs_rot\", \"inputnode.bvecs\"),\n (\"outputnode.bvals\", \"inputnode.bvals\"),\n (\"outputnode.wm_mask_file\", \"inputnode.wm_mask\"),\n (\"outputnode.partial_volume_files\", \"inputnode.partial_volume_files\",),\n (\"outputnode.roi_volumes\", \"inputnode.roi_volumes\"),\n (\"outputnode.brain\", \"inputnode.brain\"),\n (\"outputnode.brain_mask\", \"inputnode.brain_mask\"),\n (\"outputnode.brain_mask_full\", \"inputnode.brain_mask_full\"),\n (\"outputnode.diffusion_preproc\", \"inputnode.target\"),\n (\"outputnode.dwi_brain_mask\", \"inputnode.target_mask\")]),\n (preproc_flow, sinker, [(\"outputnode.bvecs_rot\", \"dwi.@bvecs_rot\"),\n (\"outputnode.diffusion_preproc\", \"dwi.@diffusion_preproc\"),\n (\"outputnode.dwi_brain_mask\", \"dwi.@diffusion_brainmask\")]),\n ]\n )\n # fmt:on\n if self.stages[\"Registration\"].config.registration_mode == \"BBregister (FS)\":\n # fmt:off\n diffusion_flow.connect(\n [\n (diffusion_inputnode, reg_flow, [(\"subjects_dir\", \"inputnode.subjects_dir\"), (\"subject_id\", \"inputnode.subject_id\")]),\n ]\n )\n # fmt:on\n\n if self.stages[\"Diffusion\"].enabled:\n diff_flow = self.create_stage_flow(\"Diffusion\")\n # fmt:off\n diffusion_flow.connect(\n [\n (preproc_flow, diff_flow, [(\"outputnode.diffusion_preproc\", \"inputnode.diffusion\")]),\n (reg_flow, diff_flow, [(\"outputnode.wm_mask_registered_crop\", \"inputnode.wm_mask_registered\",),\n (\"outputnode.brain_mask_registered_crop\", \"inputnode.brain_mask_registered\",),\n (\"outputnode.partial_volumes_registered_crop\", \"inputnode.partial_volumes\",),\n (\"outputnode.roi_volumes_registered_crop\", \"inputnode.roi_volumes\",),\n (\"outputnode.act_5tt_registered_crop\", \"inputnode.act_5tt_registered\",),\n (\"outputnode.gmwmi_registered_crop\", \"inputnode.gmwmi_registered\",),\n (\"outputnode.grad\", \"inputnode.grad\"),\n (\"outputnode.bvals\", \"inputnode.bvals\"),\n (\"outputnode.bvecs\", \"inputnode.bvecs\")]),\n (reg_flow, sinker, [(\"outputnode.target_epicorrected\", \"dwi.@bdiffusion_reg_crop\",),\n (\"outputnode.grad\", \"dwi.@diffusion_grad\"),\n (\"outputnode.affine_transform\", \"xfm.@affine_transform\"),\n (\"outputnode.warp_field\", \"xfm.@warp_field\"),\n (\"outputnode.T1_registered_crop\", \"anat.@T1_reg_crop\"),\n (\"outputnode.act_5tt_registered_crop\", \"anat.@act_5tt_reg_crop\",),\n (\"outputnode.gmwmi_registered_crop\", \"anat.@gmwmi_reg_crop\"),\n (\"outputnode.brain_registered_crop\", \"anat.@brain_reg_crop\"),\n (\"outputnode.brain_mask_registered_crop\", \"anat.@brain_mask_reg_crop\",),\n (\"outputnode.wm_mask_registered_crop\", \"anat.@wm_mask_reg_crop\",),\n (\"outputnode.roi_volumes_registered_crop\", \"anat.@roivs_reg_crop\",),\n (\"outputnode.partial_volumes_registered_crop\", \"anat.@pves_reg_crop\",)],),\n ]\n )\n # fmt:on\n\n if self.stages[\"Connectome\"].enabled:\n self.stages[\"Connectome\"].config.probtrackx = False\n self.stages[\"Connectome\"].config.subject = self.global_conf.subject\n con_flow = self.create_stage_flow(\"Connectome\")\n # fmt:off\n diffusion_flow.connect(\n [\n (diffusion_inputnode, con_flow, [(\"parcellation_scheme\", \"inputnode.parcellation_scheme\"),\n (\"atlas_info\", \"inputnode.atlas_info\"),\n (\"roi_graphMLs\", \"inputnode.roi_graphMLs\")]),\n (diff_flow, con_flow, [(\"outputnode.track_file\", \"inputnode.track_file\"),\n (\"outputnode.FA\", \"inputnode.FA\"),\n (\"outputnode.ADC\", \"inputnode.ADC\"),\n (\"outputnode.AD\", \"inputnode.AD\"),\n (\"outputnode.RD\", \"inputnode.RD\"),\n (\"outputnode.roi_volumes\", \"inputnode.roi_volumes_registered\",),\n (\"outputnode.skewness\", \"inputnode.skewness\"),\n (\"outputnode.kurtosis\", \"inputnode.kurtosis\"),\n (\"outputnode.P0\", \"inputnode.P0\"),\n (\"outputnode.mapmri_maps\", \"inputnode.mapmri_maps\"),\n (\"outputnode.shore_maps\", \"inputnode.shore_maps\")]),\n (con_flow, diffusion_outputnode, [(\"outputnode.connectivity_matrices\", \"connectivity_matrices\")]),\n (diff_flow, sinker, [(\"outputnode.fod_file\", \"dwi.@fod_file\"),\n (\"outputnode.FA\", \"dwi.@FA\"),\n (\"outputnode.ADC\", \"dwi.@ADC\"),\n (\"outputnode.AD\", \"dwi.@AD\"),\n (\"outputnode.RD\", \"dwi.@RD\"),\n (\"outputnode.skewness\", \"dwi.@skewness\"),\n (\"outputnode.kurtosis\", \"dwi.@kurtosis\"),\n (\"outputnode.P0\", \"dwi.@P0\"),\n (\"outputnode.mapmri_maps\", \"dwi.@mapmri_maps\"),\n (\"outputnode.shore_maps\", \"dwi.@shore_maps\")]),\n (con_flow, sinker, [(\"outputnode.streamline_final_file\", \"dwi.@streamline_final_file\"),\n (\"outputnode.connectivity_matrices\", \"dwi.@connectivity_matrices\")]),\n ]\n )\n # fmt:on\n\n return diffusion_flow",
"def depth(self, create, depth, **kwargs): # pylint: disable=unused-argument\r\n # pylint: disable=no-member\r\n if depth == 0:\r\n self.get_module.side_effect = lambda x: LeafModuleFactory(descriptor_cls=HtmlDescriptor)\r\n else:\r\n self.get_module.side_effect = lambda x: ContainerModuleFactory(descriptor_cls=VerticalDescriptor, depth=depth - 1)",
"def build_wall(self, type, pos1, pos2, thickness=1):\n raise NotImplementedError",
"def create_target_direction(net, net2, dir_type='states'):\n\n assert (net2 is not None)\n # direction between net2 and net\n if dir_type == 'weights':\n w = get_weights(net)\n w2 = get_weights(net2)\n direction = get_diff_weights(w, w2)\n elif dir_type == 'states':\n s = net.state_dict()\n s2 = net2.state_dict()\n direction = get_diff_states(s, s2)\n\n return direction",
"def flow(self, n):\n pn_link_flow(self._impl, n)",
"def _build_networks(self):\n # Calling online_convnet will generate a new graph as defined in\n # self._get_network_template using whatever input is passed, but will always\n # share the same weights.\n self.online_convnet = tf.make_template('Online', self._network_template)\n self.target_convnet = tf.make_template('Target', self._network_template)\n self._net_outputs = self.online_convnet(self.state_ph)\n\n self._replay_net_outputs = self.online_convnet(self._replay.states)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)\n\n if self.acting_policy == 'hyperbolic':\n self._q_argmax = tf.argmax(self._net_outputs.hyp_q_value, axis=1)[0]\n elif self.acting_policy == 'largest_gamma':\n self._q_argmax = tf.argmax(self._net_outputs.q_values[-1], axis=1)[0]\n else:\n raise NotImplementedError",
"def makeSiteList(chain,type,depth):\n\n\t# make a list of the available sites open to bind a given binderType\n\tsites = []\n\t\t\n\tfor i in range(len(chain)):\n\t\tif (chain[i]['depth'] >= depth):\n\t\t\tif (chain[i]['type'] in type['binds']):\n\t\t\t\tfor j in range(len(chain[i]['sites'])):\n\t\t\t\t\tif (chain[i]['sites'][j] == None):\n\t\t\t\t\t\tsites.append( [i,j] )\n\t\n\treturn sites",
"def _make_affine_flow(dim, flow_depth):\n bijectors = []\n bijectors.append(tfp.bijectors.BatchNormalization())\n for _ in range(flow_depth):\n bijectors.append(\n tfp.bijectors.Shift(tf.Variable(tf.zeros(dim), trainable=True)))\n bijectors.append(\n tfp.bijectors.ScaleMatvecDiag(\n tf.Variable(tf.ones(dim), trainable=True)))\n bijectors.append(tfp.bijectors.BatchNormalization())\n return tfp.bijectors.Chain(list(reversed(bijectors)))",
"def create_reference_step(step_structure: tree.Structure[Any]) -> ReferenceStep:\n return tree.unflatten_as(\n step_structure,\n [_RefNode(x) for x in range(len(tree.flatten(step_structure)))])",
"def compute_flow(self, loads):\n assert len(loads) == self.n_nodes\n flow = np.zeros([self.n_nodes, self.n_nodes])\n for k, l in self.lines:\n flow[k, l] = self.line_flow(k, l, loads)\n return flow"
]
| [
"0.59155285",
"0.5603136",
"0.5450468",
"0.54186976",
"0.5205651",
"0.5181642",
"0.49135038",
"0.48851934",
"0.48439947",
"0.4814305",
"0.48012322",
"0.4797545",
"0.47965562",
"0.47371408",
"0.47015324",
"0.46939322",
"0.46512794",
"0.46494555",
"0.46481785",
"0.46470216",
"0.46390012",
"0.46338424",
"0.46322963",
"0.46135142",
"0.46089804",
"0.45861837",
"0.4568892",
"0.45643017",
"0.45601752",
"0.45561066"
]
| 0.79175943 | 0 |
Builds a deep stack of radial flows. | def _make_radial_flow(dim, flow_depth):
bijectors = []
bijectors.append(tfp.bijectors.BatchNormalization())
for _ in range(flow_depth):
bijectors.append(ReversedRadialFlow(dim))
bijectors.append(tfp.bijectors.BatchNormalization())
return tfp.bijectors.Chain(list(reversed(bijectors))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _make_deep_flow(flow_type, flow_depth, flow_width, dim):\n if flow_type not in ['maf', 'radial', 'affine']:\n raise ValueError(f'Flow type {flow_type} is not maf, radial, or affine.')\n if flow_type == 'maf':\n return _make_maf_flow(flow_depth, flow_width)\n elif flow_type == 'radial':\n return _make_radial_flow(dim, flow_depth)\n elif flow_type == 'affine':\n return _make_affine_flow(dim, flow_depth)",
"def _build_depth_graph(self):\n self.depth_net = DepthNetwork(self.cfg.STRUCTURE, is_training=False)\n images = self.images_placeholder[tf.newaxis]\n poses = self.poses_placeholder[tf.newaxis]\n intrinsics = self.intrinsics_placeholder[tf.newaxis]\n\n # fix the input shape\n images = tf.reshape(images, [1, 5, 192, 1088, 3])\n Ts = VideoSE3Transformation(matrix=poses)\n\n depths = self.depth_net.forward(Ts, images, intrinsics)\n self.outputs['depths'] = depths",
"def _compute_flows(paths):\n assert isinstance(paths, list)\n\n # allocate memory\n flows = np.zeros([len(paths), FLAGS.target_height, FLAGS.target_width, 2],\n dtype=np.float32)\n\n # load all images\n pbar = ProgressBar(max_value=len(paths))\n for i in range(len(paths)-1):\n im1 = skimage.io.imread(paths[i])\n im2 = skimage.io.imread(paths[i+1])\n\n im1 = cv2.cvtColor(im1, cv2.COLOR_RGB2GRAY)\n im2 = cv2.cvtColor(im2, cv2.COLOR_RGB2GRAY)\n im1 = skimage.transform.resize(\n im1, [FLAGS.target_height, FLAGS.target_width], preserve_range=True,\n mode='constant', anti_aliasing=True)\n im2 = skimage.transform.resize(\n im2, [FLAGS.target_height, FLAGS.target_width], preserve_range=True,\n mode='constant', anti_aliasing=True)\n flow = cv2.calcOpticalFlowFarneback(\n im1, im2, flow=None, pyr_scale=0.5, levels=3, winsize=15,\n iterations=3, poly_n=5, poly_sigma=1.2, flags=0)\n\n # store images\n flows[i] = flow\n pbar.update(i)\n\n # Replicate the flow for last frame\n flows[-1] = flow\n return flows",
"def _buildtree(self):\n self.pricetree = np.zeros((self.steps+1,self.steps+1))\n self.pricetree[0][0] = self.p\n for j in range(self.steps):\n for i in range(j+1):\n self.pricetree[j+1][i+1] = self.pricetree[j][i]*self.down\n self.pricetree[j+1][0] = self.pricetree[j][0]*self.up",
"def build_graph(self):\n n_classes = self.n_classes\n\n (self.feed('data')\n .conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)\n .conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool1')\n .conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)\n .conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool2')\n .conv(3, 3, 256, 1, 1, name='conv3_1')\n .conv(3, 3, 256, 1, 1, name='conv3_2')\n .conv(3, 3, 256, 1, 1, name='conv3_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool3')\n .conv(3, 3, 512, 1, 1, name='conv4_1')\n .conv(3, 3, 512, 1, 1, name='conv4_2')\n .conv(3, 3, 512, 1, 1, name='conv4_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool4')\n .conv(3, 3, 512, 1, 1, name='conv5_1')\n .conv(3, 3, 512, 1, 1, name='conv5_2')\n .conv(3, 3, 512, 1, 1, name='conv5_3'))\n\n self.compute_rDeRF() # dummy\n\n # Classification\n (self.feed('conv5_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool6')\n .reshape(shape=(-1, 7, 7, 512), name='pool6_reshape')\n .fc(4096, name='fc6')\n .dropout(0.5, name='drop6')\n .fc(4096, name='fc7')\n .dropout(0.5, name='drop7')\n # .make_time(name='drop7_reduced')\n .fc(n_classes, relu=False, name='cls_score')\n .softmax(name='cls_prob'))\n pass",
"def build_pipeline(roots):\n if not isinstance(roots, list):\n roots = [roots]\n\n build_graph = BuildGraph()\n build_graph.add_targets(roots)\n\n return build_graph",
"def job_tree(self):\n\n # 1. Enforce depth of 1 for steps\n def depth_one(steps):\n depth_one = []\n for step in steps:\n if type(step) is list:\n if type(step[0]) is list:\n depth_one.append(step[0])\n else:\n depth_one.append(step)\n else:\n depth_one.append([step])\n return depth_one\n\n # 2. Convert steps to list of node objects (0,1,2,3...)\n def assign_nodes(steps):\n nodes = [i for i in range(len(steps))]\n objects = list(\n set([elem for sublist in steps for elem in sublist]))\n\n # checks for multiple src and dst objects -- added when looking for\n # mutiples\n split_objects = []\n for obj in objects:\n if len(obj) > 1:\n new_objs = obj.split(\", \")\n split_objects.extend(new_objs)\n else:\n split_objects.append(obj)\n objects = split_objects\n del(split_objects)\n\n # populate with leafless trees (Node objects, no edges)\n for node in nodes:\n nodes[node] = Node(str(node))\n\n # search for leafy trees\n for obj in objects:\n\n # accounts for multiple drc/dst objects\n leaves = []\n for i, sublist in enumerate(steps):\n for string in sublist:\n if string.count(',') > 0:\n if obj in string:\n leaves.append(i)\n else:\n if obj in sublist:\n leaves.append(i)\n leaves = sorted(list(set(leaves)))\n\n if len(leaves) > 1:\n viable_edges = []\n\n # compute cross-product\n for leaf1 in leaves:\n for leaf2 in leaves:\n if str(leaf1) != str(leaf2) and sorted((leaf1, leaf2)) not in viable_edges:\n viable_edges.append(sorted((leaf1, leaf2)))\n\n # form edge networks\n for edge in viable_edges:\n n1, n2 = nodes[edge[0]], nodes[edge[1]]\n n1.add_edge(n2)\n n2.add_edge(n1)\n nodes[int(n1.name)], nodes[int(n2.name)] = n1, n2\n return nodes\n\n # 3. Determine number of trees and regroup by connected nodes\n def connected_nodes(nodes):\n proto_trees = []\n nodes = set(nodes)\n\n while nodes:\n n = nodes.pop()\n group = {n}\n queue = [n]\n while queue:\n n = queue.pop(0)\n neighbors = n.edges\n neighbors.difference_update(group)\n nodes.difference_update(neighbors)\n group.update(neighbors)\n queue.extend(neighbors)\n proto_trees.append(group)\n return proto_trees\n\n # 4. Convert nodes to nested dictionary of parent-children relations\n # i.e. adding depth -- also deals with tree-node sorting and path\n # optimization\n def build_tree_dict(trees, steps):\n # node sorting in trees\n sorted_trees = []\n for tree in trees:\n sorted_trees.append(\n sorted(tree, key=lambda x: int(x.name)))\n\n # retrieve values of the nodes (the protocol's containers)\n # for each tree ... may want to use dictionary eventually\n all_values = []\n for tree in sorted_trees:\n values = [steps[int(node.name)] for node in tree]\n all_values.append(values)\n\n # create relational tuples:\n all_digs = []\n singles = []\n dst_potentials = []\n for tree_idx in range(len(sorted_trees)):\n edge_flag = False\n tree_digs = []\n for node_idx in range(len(sorted_trees[tree_idx])):\n\n # digs: directed graph vectors\n digs = []\n dst_nodes = []\n node_values = all_values[tree_idx][node_idx]\n src_node = str(sorted_trees[tree_idx][node_idx].name)\n\n # ACTION ON MULTIPLE OBJECTS (E.G. TRANSFER FROM SRC -> DST\n # WELLS)\n # Outcome space: {1-1, 1-many, many-1, many-many}\n if len(node_values) == 2:\n # single destination (x-1)\n if node_values[1].count(\",\") == 0:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[1] == sublist[0]]\n # multiple destinations (x-many)\n elif node_values[1].count(\",\") > 0:\n dst_nodes = []\n for dst in node_values[1].replace(\", \", \"\"):\n for i, sublist in enumerate(steps):\n if i not in dst_nodes and dst == sublist[0]:\n dst_nodes.append(i)\n\n # ACTION ON A SINGLE OBJECT\n elif len(node_values) == 1:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[0] == sublist[0]]\n\n # Constructing tuples in (child, parent) format\n for dst_node in dst_nodes:\n dig = (int(dst_node), int(src_node))\n digs.append(dig)\n\n # else: an edge-case for dictionaries constructed with no edges\n # initiates tree separation via flag\n if digs != []:\n edge_flag = False\n tree_digs.append(digs)\n else:\n edge_flag = True\n digs = [(int(src_node), int(src_node))]\n tree_digs.append(digs)\n\n # digraph cycle detection: avoids cycles by overlooking set\n # repeats\n true_tree_digs = []\n for digs in tree_digs:\n for dig in digs:\n if tuple(sorted(dig, reverse=True)) not in true_tree_digs:\n true_tree_digs.append(\n tuple(sorted(dig, reverse=True)))\n\n # edge-case for dictionaries constructed with no edges\n if true_tree_digs != [] and edge_flag == False:\n all_digs.append(true_tree_digs)\n elif edge_flag == True:\n all_digs.extend(tree_digs)\n\n # Enforces forest ordering\n all_digs = sorted(all_digs, key=lambda x: x[0])\n\n # job tree traversal to find all paths:\n forest = []\n for digs_set in all_digs:\n\n # pass 1: initialize nodes dictionary\n nodes = OrderedDict()\n for tup in digs_set:\n id, parent_id = tup\n # ensure all nodes accounted for\n nodes[id] = OrderedDict({'id': id})\n nodes[parent_id] = OrderedDict({'id': parent_id})\n\n # pass 2: create trees and parent-child relations\n for tup in digs_set:\n id, parent_id = tup\n node = nodes[id]\n # links node to its parent\n if id != parent_id:\n # add new_node as child to parent\n parent = nodes[parent_id]\n if not 'children' in parent:\n # ensure parent has a 'children' field\n parent['children'] = []\n children = parent['children']\n children.append(node)\n\n desired_tree_idx = sorted(list(nodes.keys()))[0]\n forest.append(nodes[desired_tree_idx])\n return forest\n\n # 5. Convert dictionary-stored nodes to unflattened, nested list of\n # parent-children relations\n def dict_to_list(forest):\n forest_list = []\n for tree in forest:\n tString = str(json.dumps(tree))\n tString = tString.replace('\"id\": ', \"\").replace('\"children\": ', \"\").replace(\n '[{', \"[\").replace('}]', \"]\").replace('{', \"[\").replace('}', \"]\")\n\n # find largest repeated branch (if applicable)\n # maybe think about using prefix trees or SIMD extensions for better\n # efficiency\n x, y, length, match = 0, 0, 0, ''\n for y in range(len(tString)):\n for x in range(len(tString)):\n substring = tString[y:x]\n if len(list(re.finditer(re.escape(substring), tString))) > 1 and len(substring) > length:\n match = substring\n length = len(substring)\n\n # checking for legitimate branch repeat\n if \"[\" in match and \"]\" in match:\n hits = []\n index = 0\n if len(tString) > 3:\n while index < len(tString):\n index = tString.find(str(match), index)\n if index == -1:\n break\n hits.append(index)\n index += len(match)\n\n # find all locations of repeated branch and remove\n if len(hits) > 1:\n for start_loc in hits[1:]:\n tString = tString[:start_loc] + \\\n tString[start_loc:].replace(match, \"]\", 1)\n\n # increment all numbers in string to match the protocol\n newString = \"\"\n numString = \"\"\n for el in tString:\n if el.isdigit(): # build number\n numString += el\n else:\n if numString != \"\": # convert it to int and reinstantaite numString\n numString = str(int(numString) + 1)\n newString += numString\n newString += el\n numString = \"\"\n tString = newString\n del newString\n\n forest_list.append(ast.literal_eval(tString))\n return forest_list\n\n # 6. Print job tree(s)\n def print_tree(lst, level=0):\n print(' ' * (level - 1) + '+---' * (level > 0) + str(lst[0]))\n for l in lst[1:]:\n if type(l) is list:\n print_tree(l, level + 1)\n else:\n print(' ' * level + '+---' + l)\n\n # 1\n steps = depth_one(self.object_list)\n # 2\n nodes = assign_nodes(steps)\n # 3\n proto_forest = connected_nodes(nodes)\n # 4\n forest = build_tree_dict(proto_forest, steps)\n # 5\n self.forest_list = dict_to_list(forest)\n # 6\n print(\"\\n\" + \"A suggested Job Tree based on container dependency: \\n\")\n for tree_list in self.forest_list:\n print_tree(tree_list)",
"def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()",
"def expand(self, network, radius):\n expansion = []\n neighbors = frozenset().union(*[set(network.getrow(g).indices) for g in self.genes]) \n for neighb in neighbors: \n if neighb in self.genes:\n continue\n preds = list(set(network.getrow(neighb).indices) & self.genes) \n if len(preds)>2:\n pass\n dist_seed = self.dist[preds].min() + 1\n if dist_seed > radius:\n continue\n \n next_pattern = copy.deepcopy(self)\n next_pattern.genes.add(neighb)\n next_pattern.edges |= set((pred, neighb) for pred in preds) \n next_pattern.added = neighb \n next_pattern.dist[neighb] = dist_seed\n expansion += [next_pattern]\n\n return expansion if len(expansion) > 0 else [self]",
"def _build_forward_graph(self):\n\n print('[*] Building a Neural Turing Machine.')\n\n self._initalize_state()\n\n # present start token\n controller_out = self.controller.emit_feature_vector(self.start_token, self.r_t[0], reuse=None)\n self._read_write(controller_out, reuse=None)\n\n # present inputs\n print('Input chain: ')\n for t in range(0, self.sequence_length):\n print_progress(float(t + 1) / self.sequence_length)\n\n controller_out = self.controller.emit_feature_vector(self.inputs[t], self.r_t[-1], reuse=True)\n self._read_write(controller_out, reuse=True)\n\n # present end token\n controller_out = self.controller.emit_feature_vector(self.end_token, self.r_t[-1], reuse=True)\n self._read_write(controller_out, reuse=True)\n\n # present outputs\n print('Output chain: ')\n for t in range(0, self.sequence_length):\n print_progress(float(t + 1) / self.sequence_length)\n\n controller_out = self.controller.emit_feature_vector(self.zeros, self.r_t[-1], reuse=True)\n self._read_write(controller_out, reuse=True)\n\n reuse = None if t == 0 else True\n self.outputs.append(self._decode_read_vector(self.r_t[-1], reuse=reuse))\n print('Done.')",
"def __init__(self, stacks):\n self.logger = logging.getLogger(__name__)\n self.__graph = nx.DiGraph()\n self.__generate_graph(stacks)",
"def build_graph(self):\n pass",
"def generate_dag_graph(self):\n # generate ranom graph\n G = nx.DiGraph()\n G.add_nodes_from(range(self.nodes))\n return self.fix_graph(G)",
"def _build_graph(self):\n pass",
"def build_network(self):\n # Position the node centers\n self.set_node_centers()\n\n # Set the nodes\n self.nodes = []\n for i in range(self.n_states):\n node = Node(\n self.node_centers[i],\n self.node_radius,\n self.labels[i]\n )\n self.nodes.append(node)",
"def _build_reprojection_graph(self):\n EPS = 1e-8\n depths = self.depths_placeholder[tf.newaxis]\n poses = self.poses_placeholder[tf.newaxis]\n intrinsics = self.intrinsics_placeholder[tf.newaxis]\n\n batch, num, ht, wd = tf.unstack(tf.shape(depths), num=4)\n Ts = VideoSE3Transformation(matrix=poses)\n intrinsics = intrinsics_vec_to_matrix(intrinsics)\n\n ii, jj = tf.meshgrid(tf.range(0, num), tf.range(num, num+1))\n ii = tf.reshape(ii, [-1])\n jj = tf.reshape(jj, [-1])\n\n Tij = Ts.gather(jj) * Ts.gather(ii).inv()\n X0 = projective_ops.backproject(depths, intrinsics)\n X1 = Tij(X0)\n\n coords = projective_ops.project(X1, intrinsics)\n depths = X1[..., 2]\n\n indicies = tf.cast(coords[..., ::-1] + .5, tf.int32)\n indicies = tf.reshape(indicies, [-1, 2])\n depths = tf.reshape(depths, [-1])\n\n depth = tf.scatter_nd(indicies, depths, [ht, wd])\n count = tf.scatter_nd(indicies, tf.ones_like(depths), [ht, wd])\n\n depth = depth / (count + EPS)\n self.outputs['depth_reprojection'] = depth",
"def stack(self):\n # Fetch the zeroth layer data, which is the original input\n \tdata = self.data_container[0]\n # Initialize network that will contain the stack\n \tself.init_stacked_net(data)\n # Add the weights layer by layer from the individual networks.\n \t# The weights container has [(I_1,O_1),(I_2,O_2),...(I_n,O_n)],\n \t# you need to unfold it as I_1,I_2...I_n:O_n,...O_2,O_1\n \tself.stacked_net.weights = [a[0] for a \\\n in self.weights_container] + [a[1] for a \\\n in self.weights_container][::-1]\n \tself.stacked_net.biases = [a[0] for a in self.bias_container]\\\n + [a[1] for a in self.bias_container][::-1]",
"def build_graph(self):\n self.import_tree(ZOO_PATH, self.import_zoo, self.verify_zoos)\n self.import_tree(WILD_PATH, self.import_wild, self.verify_wilds)\n self.import_tree(PANDA_PATH, self.import_redpanda, self.verify_pandas)\n self.import_tree(MEDIA_PATH, self.import_media, self.verify_media)",
"def generate_schedule(top_length, top_depth, left_length, left_depth):\n # The process of calculating the schedule starts from the leftmost\n # topmost element which is active from 0..top_depth timesteps.\n out = np.zeros((left_length, top_length, top_depth), dtype=\"i\")\n out[0][0] = np.arange(top_depth)\n\n # Fill the first col: Every column runs one \"step\" behind the column on\n # its left.\n for col in range(1, top_length):\n out[0][col] = out[0][col - 1] + 1\n\n # Fill the remaining rows. Similarly, all rows run one \"step\" behind the\n # row on their top.\n for row in range(1, left_length):\n out[row][0] = out[row - 1][0] + 1\n for col in range(1, top_length):\n out[row][col] = out[row][col - 1] + 1\n\n return out",
"def create_nodes(self):\n # Create a 'cylinder' of nodes for each layer\n for layer in range(1, self.layers+1):\n diameter = self.dia_dict[layer-1] # Calculate the diameter of the current ring\n n = self.ring_n\n # Calculate the angle by dividing a full circle by the number of nodes\n steps = (2*np.pi) / n\n\n # Create a list of node names for each layer apart for easy access later\n node_layer = []\n\n # Now for the depth of the cylinder we loop over the height\n for h in range(self.height):\n # For the first layer we also create a center\n if layer == 1:\n c_name = f'N.{h}.c'\n self.fem.AddNode(c_name, 0, 0, h)\n self.center_layer.append(c_name)\n\n # Then for each step in the 'ring' we create a node based on it's geometry\n for c in range(n):\n x = np.cos(c*steps) * (diameter/2)\n y = np.sin(c*steps) * (diameter/2)\n name = f'R.{layer}.{h}.{c}'\n self.fem.AddNode(name, x, y, h)\n node_layer.append(name)\n\n # Then the new layer of nodes is saved\n if layer == 1:\n self.node_layers.append(self.center_layer)\n self.node_layers.append(node_layer)",
"def make_dag(self, expand=set()):\n G = nx.DiGraph()\n\n ## Inputs-to-Functions\n for f in self.functions:\n # Expand composed models\n if isinstance(f, FunctionModel) and (f.name in expand):\n G_ref = f.model.make_dag(expand=expand - {f})\n G_sub = nx.DiGraph()\n # Add nodes\n G_sub.add_node(f.name + \".var\")\n G_sub.add_node(f.name + \".out\")\n for g in f.model.functions:\n G_sub.add_node(f.name + \".\" + g.name)\n # Add node metadata\n nx.set_node_attributes(G_sub, f.name, \"parent\")\n\n # Add edges\n for u, v, d in G_ref.edges(data=True):\n # Add renamed edge\n if u == \"(var)\":\n G_sub.add_edge(f.name + \".var\", f.name + \".\" + v, **d)\n elif v == \"(out)\":\n G_sub.add_edge(f.name + \".\" + u, f.name + \".out\", **d)\n else:\n G_sub.add_edge(f.name + \".\" + u, f.name + \".\" + v, **d)\n\n # Compose the graphs\n G = nx.compose(G, G_sub)\n\n i_var = set(self.var).intersection(set(f.var))\n if len(i_var) > 0:\n s_var = \"{}\".format(i_var)\n if isinstance(f, FunctionModel) and (f.name in expand):\n G.add_edge(\"(var)\", f.name + \".var\", label=s_var)\n else:\n G.add_edge(\"(var)\", f.name, label=s_var)\n\n ## Function-to-Function\n for i0 in range(len(self.functions)):\n for i1 in range(i0 + 1, len(self.functions)):\n f0 = self.functions[i0]\n f1 = self.functions[i1]\n i_var = set(f0.out).intersection(set(f1.var))\n\n ## If connected\n if len(i_var) > 0:\n s_var = \"{}\".format(i_var)\n ## Handle composed models\n if isinstance(f0, FunctionModel) and (f0.name in expand):\n name0 = f0.name + \".out\"\n else:\n name0 = f0.name\n if isinstance(f1, FunctionModel) and (f1.name in expand):\n name1 = f1.name + \".out\"\n else:\n name1 = f1.name\n\n G.add_edge(name0, name1, label=s_var)\n\n ## Functions-to-Outputs\n for f in self.functions:\n i_out = set(self.out).intersection(set(f.out))\n\n if len(i_out) > 0:\n s_out = \"{}\".format(i_out)\n ## Target composed model's out\n if isinstance(f, FunctionModel) and (f.name in expand):\n G.add_edge(f.name + \".out\", \"(out)\", label=s_out)\n ## An ordinary function\n else:\n G.add_edge(f.name, \"(out)\", label=s_out)\n\n # Add node metadata\n nx.set_node_attributes(G, {f.name: {\"parent\": self.name}})\n\n # Final metadata\n nx.set_node_attributes(G, {\"(var)\": {\"parent\": self.name}})\n nx.set_node_attributes(G, {\"(out)\": {\"parent\": self.name}})\n\n return G",
"def expand2(self, network, radius):\n expansion = []\n neighbors = frozenset().union(*[set(network.getrow(g).indices) for g in self.genes]) \n for neighb in neighbors: \n if neighb in self.genes:\n continue\n dist_from_pattern = self.dist[network.getrow(neighb).indices] \n dist_of_added = dist_from_pattern[dist_from_pattern > -1].min() + 1\n if dist_of_added > radius:\n continue\n \n next_pattern = copy.deepcopy(self)\n next_pattern.genes.add(neighb)\n #next_pattern.edges.add((pred, neighb))\n next_pattern.added = neighb \n next_pattern.dist[neighb] = dist_of_added\n expansion += [next_pattern]\n\n return expansion if len(expansion) > 0 else [self]",
"def make_layers(self, n_repetitions: int = 1) -> List[List[tuple]]:\n if n_repetitions <= 0:\n raise ValueError(\"The number of repetitions must be positve\")\n\n root = [self.items]\n graph_layers = [root] + [[]] * (self.depth * 2)\n\n for _ in range(n_repetitions):\n layers = self.random_layers()\n for h in range(1, len(layers)):\n graph_layers[h] = graph_layers[h] + layers[h]\n\n return graph_layers",
"def expandX(self, network, radius):\n expansion = []\n for gene in self.genes:\n for neighb in network.getrow(gene).indices:\n if neighb in self.genes:\n continue\n if self.dist[gene]+1 > radius:\n continue\n next_pattern = copy.deepcopy(self)\n next_pattern.genes.add(neighb)\n #next_pattern.edges.add((gene, neighb))\n next_pattern.added = neighb \n next_pattern.dist[neighb] = self.dist[gene] + 1\n expansion += [next_pattern]\n\n return expansion if len(expansion) > 0 else [self]",
"def make_stack(self):\n self.snapshot = Snapshot()\n self.snapshot.clean(self.ref)\n \n self.values = {}\n self.classes = []\n self.stack = Stack(self, self.snapshot)",
"def _load_flows(paths):\n assert isinstance(paths, list)\n # allocate memory\n flows = np.zeros([len(paths), FLAGS.target_height, FLAGS.target_width, 2],\n dtype=np.float32)\n\n # load all flows\n pbar = ProgressBar(max_value=len(paths)-1)\n for i in range(len(paths)):\n # Load flow\n flow = np.load(paths[i])\n\n # Central crop flow\n h, w, _ = flow.shape\n left = (w - FLAGS.target_width) // 2\n top = (h - FLAGS.target_height) // 2\n right = left + FLAGS.target_width\n bottom = top + FLAGS.target_height\n flow = flow[left:right, top:bottom, :]\n\n # Store flow\n flows[i] = flow\n pbar.update(i)\n return flows",
"def build_graph(self):\n raise NotImplementedError",
"def flow_model():\n return {\n 'nodes': [\n {\n 'id': 'left_root',\n 'name': 'left_root',\n 'value': 3.5,\n 'rank': 'l',\n },\n {\n 'id': 'right_root',\n 'name': 'right_root',\n 'value': 3.5,\n 'rank': 'r',\n },\n ], 'edges': [\n {\n 'source': 'left_root',\n 'target': 'right_root',\n 'value': 1.0,\n },\n ]\n }",
"def generate_possible_moves_rr(self, game):\n gp1_games = []\n gp2_games = []\n for color in game.paths:\n # TODO clean up expression\n path = game.paths[color]\n # if path.is_complete():\n # continue\n\n # Get grow points and points adjacent to them\n gp1, gp2 = path.get_grow_points()\n adj2gp1 = utils.get_adjacent_points(gp1)\n adj2gp2 = utils.get_adjacent_points(gp2)\n\n # Handle adding adjacent points to grow points separately - in order to maintain RR order.\n for possible in adj2gp1:\n if path.can_be_added_to_path(possible, 1):\n copy_game = deepcopy(game)\n \"\"\":type: Flow\"\"\"\n copy_game.paths[color].add_to_path(possible, 1)\n gp1_games.append(copy_game)\n\n for possible in adj2gp2:\n if path.can_be_added_to_path(possible, 1):\n copy_game = deepcopy(game)\n \"\"\":type: Flow\"\"\"\n copy_game.paths[color].add_to_path(possible, 2)\n gp2_games.append(copy_game)\n\n # Add games to queue\n self.queue += gp1_games + gp2_games",
"def _build_graph(self,tree):\n list_val = [] # input of computation graph\n list_h = [] # output of computation graph\n def build_cell(node, children_h):\n if (node.has_child()):\n assert len(children_h) > 0\n val, h = self.builder.get_recursive_module(children_h)\n else:\n val, h = self.builder.get_leaf_module()\n return val, h\n\n def recursive_build_graph(node):\n if(node.has_child()):\n h_1 = recursive_build_graph(node.children[0])\n h_2 = recursive_build_graph(node.children[1])\n input_val, h = build_cell(node, [h_1, h_2])\n else:\n input_val, h = build_cell(node, None)\n list_val.append(input_val)\n list_h.append(h)\n return h\n\n recursive_build_graph(tree)\n return list_val, list_h"
]
| [
"0.5709303",
"0.56593513",
"0.5271387",
"0.52517676",
"0.5136963",
"0.5127356",
"0.5101479",
"0.50962454",
"0.50852436",
"0.50673264",
"0.5063419",
"0.50551564",
"0.5041631",
"0.50215495",
"0.5019181",
"0.5014093",
"0.5013853",
"0.4989534",
"0.49769366",
"0.49767402",
"0.4942842",
"0.49395412",
"0.49213716",
"0.49114132",
"0.49102357",
"0.48979747",
"0.4883703",
"0.48662496",
"0.48606625",
"0.48568183"
]
| 0.7035504 | 0 |
Builds a deep stack of affine flows. | def _make_affine_flow(dim, flow_depth):
bijectors = []
bijectors.append(tfp.bijectors.BatchNormalization())
for _ in range(flow_depth):
bijectors.append(
tfp.bijectors.Shift(tf.Variable(tf.zeros(dim), trainable=True)))
bijectors.append(
tfp.bijectors.ScaleMatvecDiag(
tf.Variable(tf.ones(dim), trainable=True)))
bijectors.append(tfp.bijectors.BatchNormalization())
return tfp.bijectors.Chain(list(reversed(bijectors))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _make_deep_flow(flow_type, flow_depth, flow_width, dim):\n if flow_type not in ['maf', 'radial', 'affine']:\n raise ValueError(f'Flow type {flow_type} is not maf, radial, or affine.')\n if flow_type == 'maf':\n return _make_maf_flow(flow_depth, flow_width)\n elif flow_type == 'radial':\n return _make_radial_flow(dim, flow_depth)\n elif flow_type == 'affine':\n return _make_affine_flow(dim, flow_depth)",
"def stack(self):\n # Fetch the zeroth layer data, which is the original input\n \tdata = self.data_container[0]\n # Initialize network that will contain the stack\n \tself.init_stacked_net(data)\n # Add the weights layer by layer from the individual networks.\n \t# The weights container has [(I_1,O_1),(I_2,O_2),...(I_n,O_n)],\n \t# you need to unfold it as I_1,I_2...I_n:O_n,...O_2,O_1\n \tself.stacked_net.weights = [a[0] for a \\\n in self.weights_container] + [a[1] for a \\\n in self.weights_container][::-1]\n \tself.stacked_net.biases = [a[0] for a in self.bias_container]\\\n + [a[1] for a in self.bias_container][::-1]",
"def make_dag(self, expand=set()):\n G = nx.DiGraph()\n\n ## Inputs-to-Functions\n for f in self.functions:\n # Expand composed models\n if isinstance(f, FunctionModel) and (f.name in expand):\n G_ref = f.model.make_dag(expand=expand - {f})\n G_sub = nx.DiGraph()\n # Add nodes\n G_sub.add_node(f.name + \".var\")\n G_sub.add_node(f.name + \".out\")\n for g in f.model.functions:\n G_sub.add_node(f.name + \".\" + g.name)\n # Add node metadata\n nx.set_node_attributes(G_sub, f.name, \"parent\")\n\n # Add edges\n for u, v, d in G_ref.edges(data=True):\n # Add renamed edge\n if u == \"(var)\":\n G_sub.add_edge(f.name + \".var\", f.name + \".\" + v, **d)\n elif v == \"(out)\":\n G_sub.add_edge(f.name + \".\" + u, f.name + \".out\", **d)\n else:\n G_sub.add_edge(f.name + \".\" + u, f.name + \".\" + v, **d)\n\n # Compose the graphs\n G = nx.compose(G, G_sub)\n\n i_var = set(self.var).intersection(set(f.var))\n if len(i_var) > 0:\n s_var = \"{}\".format(i_var)\n if isinstance(f, FunctionModel) and (f.name in expand):\n G.add_edge(\"(var)\", f.name + \".var\", label=s_var)\n else:\n G.add_edge(\"(var)\", f.name, label=s_var)\n\n ## Function-to-Function\n for i0 in range(len(self.functions)):\n for i1 in range(i0 + 1, len(self.functions)):\n f0 = self.functions[i0]\n f1 = self.functions[i1]\n i_var = set(f0.out).intersection(set(f1.var))\n\n ## If connected\n if len(i_var) > 0:\n s_var = \"{}\".format(i_var)\n ## Handle composed models\n if isinstance(f0, FunctionModel) and (f0.name in expand):\n name0 = f0.name + \".out\"\n else:\n name0 = f0.name\n if isinstance(f1, FunctionModel) and (f1.name in expand):\n name1 = f1.name + \".out\"\n else:\n name1 = f1.name\n\n G.add_edge(name0, name1, label=s_var)\n\n ## Functions-to-Outputs\n for f in self.functions:\n i_out = set(self.out).intersection(set(f.out))\n\n if len(i_out) > 0:\n s_out = \"{}\".format(i_out)\n ## Target composed model's out\n if isinstance(f, FunctionModel) and (f.name in expand):\n G.add_edge(f.name + \".out\", \"(out)\", label=s_out)\n ## An ordinary function\n else:\n G.add_edge(f.name, \"(out)\", label=s_out)\n\n # Add node metadata\n nx.set_node_attributes(G, {f.name: {\"parent\": self.name}})\n\n # Final metadata\n nx.set_node_attributes(G, {\"(var)\": {\"parent\": self.name}})\n nx.set_node_attributes(G, {\"(out)\": {\"parent\": self.name}})\n\n return G",
"def compute_flow(self, loads):\n assert len(loads) == self.n_nodes\n flow = np.zeros([self.n_nodes, self.n_nodes])\n for k, l in self.lines:\n flow[k, l] = self.line_flow(k, l, loads)\n return flow",
"def _build_depth_graph(self):\n self.depth_net = DepthNetwork(self.cfg.STRUCTURE, is_training=False)\n images = self.images_placeholder[tf.newaxis]\n poses = self.poses_placeholder[tf.newaxis]\n intrinsics = self.intrinsics_placeholder[tf.newaxis]\n\n # fix the input shape\n images = tf.reshape(images, [1, 5, 192, 1088, 3])\n Ts = VideoSE3Transformation(matrix=poses)\n\n depths = self.depth_net.forward(Ts, images, intrinsics)\n self.outputs['depths'] = depths",
"def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()",
"def make_stack(self):\n self.snapshot = Snapshot()\n self.snapshot.clean(self.ref)\n \n self.values = {}\n self.classes = []\n self.stack = Stack(self, self.snapshot)",
"def pack_experience(states, actions, rewards, next_states, dones):\n\n# pdb.set_trace()\n \n return (states.flatten(),\n actions.flatten(),\n rewards,\n next_states.flatten(),\n dones)",
"def get_flow_4frames(self, flows_forward, flows_backward):\n d = flows_forward[0].shape[1]\n flows_backward2 = []\n for flows in flows_backward:\n flow_list = []\n for i in range(d - 1, 0, -1):\n flow_n1 = flows[:, i - 1, :, :, :]\n flow_n2 = flows[:, i, :, :, :]\n flow_list.insert(0, flow_n1 + flow_warp(flow_n2, flow_n1.permute(0, 2, 3, 1)))\n flows_backward2.append(torch.stack(flow_list, 1))\n flows_forward2 = []\n for flows in flows_forward:\n flow_list = []\n for i in range(1, d):\n flow_n1 = flows[:, i, :, :, :]\n flow_n2 = flows[:, i - 1, :, :, :]\n flow_list.append(flow_n1 + flow_warp(flow_n2, flow_n1.permute(0, 2, 3, 1)))\n flows_forward2.append(torch.stack(flow_list, 1))\n return flows_backward2, flows_forward2",
"def _compute_flows(paths):\n assert isinstance(paths, list)\n\n # allocate memory\n flows = np.zeros([len(paths), FLAGS.target_height, FLAGS.target_width, 2],\n dtype=np.float32)\n\n # load all images\n pbar = ProgressBar(max_value=len(paths))\n for i in range(len(paths)-1):\n im1 = skimage.io.imread(paths[i])\n im2 = skimage.io.imread(paths[i+1])\n\n im1 = cv2.cvtColor(im1, cv2.COLOR_RGB2GRAY)\n im2 = cv2.cvtColor(im2, cv2.COLOR_RGB2GRAY)\n im1 = skimage.transform.resize(\n im1, [FLAGS.target_height, FLAGS.target_width], preserve_range=True,\n mode='constant', anti_aliasing=True)\n im2 = skimage.transform.resize(\n im2, [FLAGS.target_height, FLAGS.target_width], preserve_range=True,\n mode='constant', anti_aliasing=True)\n flow = cv2.calcOpticalFlowFarneback(\n im1, im2, flow=None, pyr_scale=0.5, levels=3, winsize=15,\n iterations=3, poly_n=5, poly_sigma=1.2, flags=0)\n\n # store images\n flows[i] = flow\n pbar.update(i)\n\n # Replicate the flow for last frame\n flows[-1] = flow\n return flows",
"def build_ast(expression):\n\n # use a directed graph to store the tree\n G = DiGraph()\n\n stack = []\n\n for n in expression:\n # Since the graph does not maintain the order of adding nodes/edges\n # add an extra attribute 'pos' so we can always sort to the correct order\n if isinstance(n, OperatorNode):\n if n.ttype == ept.TOK_TYPE_OP_IN:\n arg2 = stack.pop()\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_node(arg2, pos=2)\n G.add_edge(arg1, n)\n G.add_edge(arg2, n)\n else:\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_edge(arg1, n)\n\n elif isinstance(n, FunctionNode):\n args = [stack.pop() for _ in range(n.num_args)]\n args.reverse()\n for i, a in enumerate(args):\n G.add_node(a, pos=i)\n G.add_edge(a, n)\n # for i in range(n.num_args):\n # G.add_edge(stack.pop(),n)\n else:\n G.add_node(n, pos=0)\n\n stack.append(n)\n\n return G, stack.pop()",
"def execute_inner_graph(*args):\r\n # Check if you need to go back in time over the sequences (the\r\n # first argument is n_steps, the second is go_backwards)\r\n nsteps = args[0]\r\n invert = False\r\n if args[1]:\r\n nsteps = nsteps * -1\r\n if nsteps < 0:\r\n new_ins = [x[::-1] for x in args[2: 2 + n_ins]]\r\n else:\r\n new_ins = [x for x in args[2: 2 + n_ins]]\r\n nsteps = abs(nsteps)\r\n # Simplify the inputs by slicing them according to the taps\r\n nw_inputs = []\r\n for inp, info in zip(new_ins, inputs_info):\r\n taps = [x['tap'] for x in info]\r\n\r\n if numpy.min(taps) < 0:\r\n _offset = abs(numpy.min(taps))\r\n else:\r\n _offset = 0\r\n nw_inputs += [inp[_offset + k:] for k in taps]\r\n # Simplify the states by slicing them according to the taps.\r\n # Note that if the memory buffer for the inputs and outputs is\r\n # the same, by changing the outputs we also change the outputs\r\n nw_states_inputs = []\r\n nw_states_outs = []\r\n for st, info in zip(args[2 + n_ins:2 + n_ins + n_states],\r\n states_info):\r\n taps = [x['tap'] for x in info]\r\n\r\n membuf = numpy.zeros((nsteps + abs(numpy.min(taps)), 4))\r\n if abs(numpy.min(taps)) != 1:\r\n membuf[:abs(numpy.min(taps))] = st[:abs(numpy.min(taps))]\r\n else:\r\n membuf[:abs(numpy.min(taps))] = st\r\n\r\n nw_states_inputs += [membuf[abs(numpy.min(taps)) + k:]\r\n for k in taps]\r\n nw_states_outs.append(membuf[abs(numpy.min(taps)):])\r\n\r\n parameters_vals = args[2 + n_ins + n_states:]\r\n out_mem_buffers = [numpy.zeros((nsteps, 4)) for k in\r\n xrange(n_outputs)]\r\n shared_values = [x.copy() for x in original_shared_values]\r\n\r\n for step in xrange(nsteps):\r\n arg_pos = 0\r\n to_add = None\r\n for in_info in inputs_info:\r\n for info in in_info:\r\n arg = nw_inputs[arg_pos][step]\r\n arg_pos += 1\r\n # Construct dummy graph around input\r\n if info['use']:\r\n if to_add is None:\r\n to_add = arg * 2\r\n else:\r\n to_add = to_add + arg * 2\r\n arg_pos = 0\r\n for dx, st_info in enumerate(states_info):\r\n if to_add is not None:\r\n nw_states_outs[dx][step] = to_add\r\n for info in st_info:\r\n arg = nw_states_inputs[arg_pos][step]\r\n arg_pos += 1\r\n if info['use']:\r\n nw_states_outs[dx][step] += arg * 3\r\n for arg, info in zip(parameters_vals, parameters_info):\r\n if info['use']:\r\n if to_add is None:\r\n to_add = arg * 4\r\n else:\r\n to_add = to_add + arg * 4\r\n if to_add is not None:\r\n shared_values = [sh * 5 + to_add for sh in shared_values]\r\n for state in nw_states_outs:\r\n state[step] += to_add\r\n for out in out_mem_buffers:\r\n out[step] = to_add ** 2\r\n else:\r\n shared_values = [sh * 5 for sh in shared_values]\r\n for out in out_mem_buffers:\r\n out[step] = 2\r\n return nw_states_outs + out_mem_buffers, shared_values",
"def get_flow_6frames(self, flows_forward, flows_backward, flows_forward2, flows_backward2):\n d = flows_forward2[0].shape[1]\n flows_backward3 = []\n for flows, flows2 in zip(flows_backward, flows_backward2):\n flow_list = []\n for i in range(d - 1, 0, -1):\n flow_n1 = flows2[:, i - 1, :, :, :]\n flow_n2 = flows[:, i + 1, :, :, :]\n flow_list.insert(0, flow_n1 + flow_warp(flow_n2, flow_n1.permute(0, 2, 3, 1)))\n flows_backward3.append(torch.stack(flow_list, 1))\n flows_forward3 = []\n for flows, flows2 in zip(flows_forward, flows_forward2):\n flow_list = []\n for i in range(2, d + 1):\n flow_n1 = flows2[:, i - 1, :, :, :]\n flow_n2 = flows[:, i - 2, :, :, :]\n flow_list.append(flow_n1 + flow_warp(flow_n2, flow_n1.permute(0, 2, 3, 1)))\n flows_forward3.append(torch.stack(flow_list, 1))\n return flows_backward3, flows_forward3",
"def _make_maf_flow(flow_depth, flow_width):\n # If not otherwise specified, make the hidden layers of the flow twice\n # as wide as the latent dimension, to make them expressive enough to\n # parameterize a shift and scale for each dimension.\n bijectors = []\n bijectors.append(tfp.bijectors.BatchNormalization())\n # Build the deep MAF flow.\n # Each layer outputs two params per dimension, for shift and scale.\n bijectors.append(\n tfp.bijectors.MaskedAutoregressiveFlow(\n tfp.bijectors.AutoregressiveNetwork(\n params=2, hidden_units=[flow_width]*flow_depth,\n activation='relu')))\n # For numerical stability of training, we need these batch norms.\n bijectors.append(tfp.bijectors.BatchNormalization())\n return tfp.bijectors.Chain(list(reversed(bijectors)))",
"def _build_forward_graph(self):\n\n print('[*] Building a Neural Turing Machine.')\n\n self._initalize_state()\n\n # present start token\n controller_out = self.controller.emit_feature_vector(self.start_token, self.r_t[0], reuse=None)\n self._read_write(controller_out, reuse=None)\n\n # present inputs\n print('Input chain: ')\n for t in range(0, self.sequence_length):\n print_progress(float(t + 1) / self.sequence_length)\n\n controller_out = self.controller.emit_feature_vector(self.inputs[t], self.r_t[-1], reuse=True)\n self._read_write(controller_out, reuse=True)\n\n # present end token\n controller_out = self.controller.emit_feature_vector(self.end_token, self.r_t[-1], reuse=True)\n self._read_write(controller_out, reuse=True)\n\n # present outputs\n print('Output chain: ')\n for t in range(0, self.sequence_length):\n print_progress(float(t + 1) / self.sequence_length)\n\n controller_out = self.controller.emit_feature_vector(self.zeros, self.r_t[-1], reuse=True)\n self._read_write(controller_out, reuse=True)\n\n reuse = None if t == 0 else True\n self.outputs.append(self._decode_read_vector(self.r_t[-1], reuse=reuse))\n print('Done.')",
"def _make_stack(self, block, num_layers, inplanes, outplanes, kernel_size=3,\n SE=False, expansion=3, stride=1):\n\n norm_layer = self._norm_layer\n act_layer = self._act_layer\n downsample = None\n\n # if stride > 1\n # or if block input planes != block output planes (only possible for first block in stack)\n # downsamples skip connection by 1x1-conv filter\n if stride != 1 or inplanes != outplanes:\n downsample = nn.Sequential(\n conv1x1(inplanes, outplanes, stride=stride),\n norm_layer(outplanes)\n )\n\n layers = []\n\n # first block in stack can have stride > 1\n layers.append(block(inplanes, outplanes, expansion=expansion, kernel_size=kernel_size,\n SE=SE, stride=stride, dropout=self._dropout, downsample=downsample,\n norm_layer=norm_layer, act_layer=act_layer))\n\n # other layers in stack\n # for each layer: inplanes = outplanes, stride=1, downsample=None\n for _ in range(1, num_layers):\n layers.append(block(outplanes, outplanes, expansion=expansion, kernel_size=kernel_size,\n SE=SE, stride=1, dropout=self._dropout, norm_layer=norm_layer,\n act_layer=act_layer))\n\n return nn.Sequential(*layers)",
"def propagate(self, feats, flows, module_name, updated_flows=None):\n n, t, _, h, w = flows.size()\n if 'backward' in module_name:\n flow_idx = range(0, t + 1)[::-1]\n clip_idx = range(0, (t + 1) // self.clip_size)[::-1]\n else:\n flow_idx = range(-1, t)\n clip_idx = range(0, (t + 1) // self.clip_size)\n if '_1' in module_name:\n updated_flows[f'{module_name}_n1'] = []\n updated_flows[f'{module_name}_n2'] = []\n feat_prop = torch.zeros_like(feats['shallow'][0])\n if self.cpu_cache:\n feat_prop = feat_prop\n last_key = list(feats)[-2]\n for i in range(0, len(clip_idx)):\n idx_c = clip_idx[i]\n if i > 0:\n if '_1' in module_name:\n flow_n01 = flows[:, flow_idx[self.clip_size * i - 1], :, :, :]\n flow_n12 = flows[:, flow_idx[self.clip_size * i], :, :, :]\n flow_n23 = flows[:, flow_idx[self.clip_size * i + 1], :, :, :]\n flow_n02 = flow_n12 + flow_warp(flow_n01, flow_n12.permute(0, 2, 3, 1))\n flow_n13 = flow_n23 + flow_warp(flow_n12, flow_n23.permute(0, 2, 3, 1))\n flow_n03 = flow_n23 + flow_warp(flow_n02, flow_n23.permute(0, 2, 3, 1))\n flow_n1 = torch.stack([flow_n02, flow_n13], 1)\n flow_n2 = torch.stack([flow_n12, flow_n03], 1)\n if self.cpu_cache:\n flow_n1 = flow_n1\n flow_n2 = flow_n2\n else:\n module_name_old = module_name.replace('_2', '_1')\n flow_n1 = updated_flows[f'{module_name_old}_n1'][i - 1]\n flow_n2 = updated_flows[f'{module_name_old}_n2'][i - 1]\n if self.cpu_cache:\n if 'backward' in module_name:\n feat_q = feats[last_key][idx_c].flip(1)\n feat_k = feats[last_key][clip_idx[i - 1]].flip(1)\n else:\n feat_q = feats[last_key][idx_c]\n feat_k = feats[last_key][clip_idx[i - 1]]\n elif 'backward' in module_name:\n feat_q = feats[last_key][idx_c].flip(1)\n feat_k = feats[last_key][clip_idx[i - 1]].flip(1)\n else:\n feat_q = feats[last_key][idx_c]\n feat_k = feats[last_key][clip_idx[i - 1]]\n feat_prop_warped1 = flow_warp(feat_prop.flatten(0, 1), flow_n1.permute(0, 1, 3, 4, 2).flatten(0, 1)).view(n, feat_prop.shape[1], feat_prop.shape[2], h, w)\n feat_prop_warped2 = flow_warp(feat_prop.flip(1).flatten(0, 1), flow_n2.permute(0, 1, 3, 4, 2).flatten(0, 1)).view(n, feat_prop.shape[1], feat_prop.shape[2], h, w)\n if '_1' in module_name:\n feat_prop, flow_n1, flow_n2 = self.deform_align[module_name](feat_q, feat_k, feat_prop, [feat_prop_warped1, feat_prop_warped2], [flow_n1, flow_n2], True)\n updated_flows[f'{module_name}_n1'].append(flow_n1)\n updated_flows[f'{module_name}_n2'].append(flow_n2)\n else:\n feat_prop = self.deform_align[module_name](feat_q, feat_k, feat_prop, [feat_prop_warped1, feat_prop_warped2], [flow_n1, flow_n2], False)\n if 'backward' in module_name:\n feat = [feats[k][idx_c].flip(1) for k in feats if k not in [module_name]] + [feat_prop]\n else:\n feat = [feats[k][idx_c] for k in feats if k not in [module_name]] + [feat_prop]\n if self.cpu_cache:\n feat = [f for f in feat]\n feat_prop = feat_prop + self.backbone[module_name](torch.cat(feat, dim=2))\n feats[module_name].append(feat_prop)\n if self.cpu_cache:\n feats[module_name][-1] = feats[module_name][-1].cpu()\n torch.cuda.empty_cache()\n if 'backward' in module_name:\n feats[module_name] = feats[module_name][::-1]\n feats[module_name] = [f.flip(1) for f in feats[module_name]]\n return feats",
"def _make_stack(self, block, planes, blocks, stride=1, dilate=False):\n\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n\n # use dilation instead of striding if true\n if dilate:\n self.dilation *= stride\n stride = 1\n\n # apply conv-1x1 to input identity if stride > 1 or output channels != input channels for dim. matching\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion)\n )\n\n layers = []\n # first layer\n # input = batch_size x self.inplanes x H x H\n # output = batch_size x planes * block.expansion x H/stride x H/stride\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n # subsequent layers\n for _ in range(1, blocks):\n # input = output = batch_size x planes * block.expansion x H' x H'\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)",
"def _build(self):\n flat_initial_state = nest.flatten(self._initial_state)\n if self._mask is not None:\n flat_mask = nest.flatten(self._mask)\n flat_learnable_state = [\n _single_learnable_state(state, state_id=i, learnable=mask)\n for i, (state, mask) in enumerate(zip(flat_initial_state, flat_mask))]\n else:\n flat_learnable_state = [_single_learnable_state(state, state_id=i)\n for i, state in enumerate(flat_initial_state)]\n\n return nest.pack_sequence_as(structure=self._initial_state,\n flat_sequence=flat_learnable_state)",
"def generate_schedule(top_length, top_depth, left_length, left_depth):\n # The process of calculating the schedule starts from the leftmost\n # topmost element which is active from 0..top_depth timesteps.\n out = np.zeros((left_length, top_length, top_depth), dtype=\"i\")\n out[0][0] = np.arange(top_depth)\n\n # Fill the first col: Every column runs one \"step\" behind the column on\n # its left.\n for col in range(1, top_length):\n out[0][col] = out[0][col - 1] + 1\n\n # Fill the remaining rows. Similarly, all rows run one \"step\" behind the\n # row on their top.\n for row in range(1, left_length):\n out[row][0] = out[row - 1][0] + 1\n for col in range(1, top_length):\n out[row][col] = out[row][col - 1] + 1\n\n return out",
"def get_aligned_feature_6frames(self, x, flows_backward, flows_forward):\n n = x.size(1)\n x_backward = [torch.zeros_like(x[:, -1, ...])]\n for i in range(n + 1, 2, -1):\n x_i = x[:, i - 2, ...]\n flow1 = flows_backward[0][:, i - 3, ...]\n if i == n + 1:\n x_ii = torch.zeros_like(x[:, -1, ...])\n flow2 = torch.zeros_like(flows_backward[1][:, -1, ...])\n x_iii = torch.zeros_like(x[:, -1, ...])\n flow3 = torch.zeros_like(flows_backward[2][:, -1, ...])\n elif i == n:\n x_ii = x[:, i - 1, ...]\n flow2 = flows_backward[1][:, i - 3, ...]\n x_iii = torch.zeros_like(x[:, -1, ...])\n flow3 = torch.zeros_like(flows_backward[2][:, -1, ...])\n else:\n x_ii = x[:, i - 1, ...]\n flow2 = flows_backward[1][:, i - 3, ...]\n x_iii = x[:, i, ...]\n flow3 = flows_backward[2][:, i - 3, ...]\n x_i_warped = flow_warp(x_i, flow1.permute(0, 2, 3, 1), 'bilinear')\n x_ii_warped = flow_warp(x_ii, flow2.permute(0, 2, 3, 1), 'bilinear')\n x_iii_warped = flow_warp(x_iii, flow3.permute(0, 2, 3, 1), 'bilinear')\n x_backward.insert(0, self.pa_deform(torch.cat([x_i, x_ii, x_iii], 1), [x_i_warped, x_ii_warped, x_iii_warped], x[:, i - 3, ...], [flow1, flow2, flow3]))\n x_forward = [torch.zeros_like(x[:, 0, ...])]\n for i in range(0, n - 1):\n x_i = x[:, i, ...]\n flow1 = flows_forward[0][:, i, ...]\n if i == 0:\n x_ii = torch.zeros_like(x[:, 0, ...])\n flow2 = torch.zeros_like(flows_forward[1][:, 0, ...])\n x_iii = torch.zeros_like(x[:, 0, ...])\n flow3 = torch.zeros_like(flows_forward[2][:, 0, ...])\n elif i == 1:\n x_ii = x[:, i - 1, ...]\n flow2 = flows_forward[1][:, i - 1, ...]\n x_iii = torch.zeros_like(x[:, 0, ...])\n flow3 = torch.zeros_like(flows_forward[2][:, 0, ...])\n else:\n x_ii = x[:, i - 1, ...]\n flow2 = flows_forward[1][:, i - 1, ...]\n x_iii = x[:, i - 2, ...]\n flow3 = flows_forward[2][:, i - 2, ...]\n x_i_warped = flow_warp(x_i, flow1.permute(0, 2, 3, 1), 'bilinear')\n x_ii_warped = flow_warp(x_ii, flow2.permute(0, 2, 3, 1), 'bilinear')\n x_iii_warped = flow_warp(x_iii, flow3.permute(0, 2, 3, 1), 'bilinear')\n x_forward.append(self.pa_deform(torch.cat([x_i, x_ii, x_iii], 1), [x_i_warped, x_ii_warped, x_iii_warped], x[:, i + 1, ...], [flow1, flow2, flow3]))\n return [torch.stack(x_backward, 1), torch.stack(x_forward, 1)]",
"def TransformerEncoderStack(self, name, num_layers=1):\n blocks = [\n self.TransformerEncoderLayer(name='iter_{:0>3d}'.format(d))\n for d in range(num_layers)\n ]\n return self.Stack(name, blocks)",
"def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()",
"def Stack(self, name, blocks, output_all_layer_hiddens=False):\n if output_all_layer_hiddens:\n graph_inputs = ['input']\n graph_outputs = []\n graph_modules = []\n layer_input = 'input'\n for idx, block in enumerate(blocks):\n layer_output = 'output_{}'.format(idx)\n graph_modules.append((f'{layer_input}->{layer_output}', block))\n graph_outputs.append(layer_output)\n layer_input = layer_output\n return self._Graph(name, graph_inputs, graph_outputs, *graph_modules)\n else:\n return self._MaybeSplit(name, blocks) or self._Seq(name, *blocks)",
"def job_tree(self):\n\n # 1. Enforce depth of 1 for steps\n def depth_one(steps):\n depth_one = []\n for step in steps:\n if type(step) is list:\n if type(step[0]) is list:\n depth_one.append(step[0])\n else:\n depth_one.append(step)\n else:\n depth_one.append([step])\n return depth_one\n\n # 2. Convert steps to list of node objects (0,1,2,3...)\n def assign_nodes(steps):\n nodes = [i for i in range(len(steps))]\n objects = list(\n set([elem for sublist in steps for elem in sublist]))\n\n # checks for multiple src and dst objects -- added when looking for\n # mutiples\n split_objects = []\n for obj in objects:\n if len(obj) > 1:\n new_objs = obj.split(\", \")\n split_objects.extend(new_objs)\n else:\n split_objects.append(obj)\n objects = split_objects\n del(split_objects)\n\n # populate with leafless trees (Node objects, no edges)\n for node in nodes:\n nodes[node] = Node(str(node))\n\n # search for leafy trees\n for obj in objects:\n\n # accounts for multiple drc/dst objects\n leaves = []\n for i, sublist in enumerate(steps):\n for string in sublist:\n if string.count(',') > 0:\n if obj in string:\n leaves.append(i)\n else:\n if obj in sublist:\n leaves.append(i)\n leaves = sorted(list(set(leaves)))\n\n if len(leaves) > 1:\n viable_edges = []\n\n # compute cross-product\n for leaf1 in leaves:\n for leaf2 in leaves:\n if str(leaf1) != str(leaf2) and sorted((leaf1, leaf2)) not in viable_edges:\n viable_edges.append(sorted((leaf1, leaf2)))\n\n # form edge networks\n for edge in viable_edges:\n n1, n2 = nodes[edge[0]], nodes[edge[1]]\n n1.add_edge(n2)\n n2.add_edge(n1)\n nodes[int(n1.name)], nodes[int(n2.name)] = n1, n2\n return nodes\n\n # 3. Determine number of trees and regroup by connected nodes\n def connected_nodes(nodes):\n proto_trees = []\n nodes = set(nodes)\n\n while nodes:\n n = nodes.pop()\n group = {n}\n queue = [n]\n while queue:\n n = queue.pop(0)\n neighbors = n.edges\n neighbors.difference_update(group)\n nodes.difference_update(neighbors)\n group.update(neighbors)\n queue.extend(neighbors)\n proto_trees.append(group)\n return proto_trees\n\n # 4. Convert nodes to nested dictionary of parent-children relations\n # i.e. adding depth -- also deals with tree-node sorting and path\n # optimization\n def build_tree_dict(trees, steps):\n # node sorting in trees\n sorted_trees = []\n for tree in trees:\n sorted_trees.append(\n sorted(tree, key=lambda x: int(x.name)))\n\n # retrieve values of the nodes (the protocol's containers)\n # for each tree ... may want to use dictionary eventually\n all_values = []\n for tree in sorted_trees:\n values = [steps[int(node.name)] for node in tree]\n all_values.append(values)\n\n # create relational tuples:\n all_digs = []\n singles = []\n dst_potentials = []\n for tree_idx in range(len(sorted_trees)):\n edge_flag = False\n tree_digs = []\n for node_idx in range(len(sorted_trees[tree_idx])):\n\n # digs: directed graph vectors\n digs = []\n dst_nodes = []\n node_values = all_values[tree_idx][node_idx]\n src_node = str(sorted_trees[tree_idx][node_idx].name)\n\n # ACTION ON MULTIPLE OBJECTS (E.G. TRANSFER FROM SRC -> DST\n # WELLS)\n # Outcome space: {1-1, 1-many, many-1, many-many}\n if len(node_values) == 2:\n # single destination (x-1)\n if node_values[1].count(\",\") == 0:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[1] == sublist[0]]\n # multiple destinations (x-many)\n elif node_values[1].count(\",\") > 0:\n dst_nodes = []\n for dst in node_values[1].replace(\", \", \"\"):\n for i, sublist in enumerate(steps):\n if i not in dst_nodes and dst == sublist[0]:\n dst_nodes.append(i)\n\n # ACTION ON A SINGLE OBJECT\n elif len(node_values) == 1:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[0] == sublist[0]]\n\n # Constructing tuples in (child, parent) format\n for dst_node in dst_nodes:\n dig = (int(dst_node), int(src_node))\n digs.append(dig)\n\n # else: an edge-case for dictionaries constructed with no edges\n # initiates tree separation via flag\n if digs != []:\n edge_flag = False\n tree_digs.append(digs)\n else:\n edge_flag = True\n digs = [(int(src_node), int(src_node))]\n tree_digs.append(digs)\n\n # digraph cycle detection: avoids cycles by overlooking set\n # repeats\n true_tree_digs = []\n for digs in tree_digs:\n for dig in digs:\n if tuple(sorted(dig, reverse=True)) not in true_tree_digs:\n true_tree_digs.append(\n tuple(sorted(dig, reverse=True)))\n\n # edge-case for dictionaries constructed with no edges\n if true_tree_digs != [] and edge_flag == False:\n all_digs.append(true_tree_digs)\n elif edge_flag == True:\n all_digs.extend(tree_digs)\n\n # Enforces forest ordering\n all_digs = sorted(all_digs, key=lambda x: x[0])\n\n # job tree traversal to find all paths:\n forest = []\n for digs_set in all_digs:\n\n # pass 1: initialize nodes dictionary\n nodes = OrderedDict()\n for tup in digs_set:\n id, parent_id = tup\n # ensure all nodes accounted for\n nodes[id] = OrderedDict({'id': id})\n nodes[parent_id] = OrderedDict({'id': parent_id})\n\n # pass 2: create trees and parent-child relations\n for tup in digs_set:\n id, parent_id = tup\n node = nodes[id]\n # links node to its parent\n if id != parent_id:\n # add new_node as child to parent\n parent = nodes[parent_id]\n if not 'children' in parent:\n # ensure parent has a 'children' field\n parent['children'] = []\n children = parent['children']\n children.append(node)\n\n desired_tree_idx = sorted(list(nodes.keys()))[0]\n forest.append(nodes[desired_tree_idx])\n return forest\n\n # 5. Convert dictionary-stored nodes to unflattened, nested list of\n # parent-children relations\n def dict_to_list(forest):\n forest_list = []\n for tree in forest:\n tString = str(json.dumps(tree))\n tString = tString.replace('\"id\": ', \"\").replace('\"children\": ', \"\").replace(\n '[{', \"[\").replace('}]', \"]\").replace('{', \"[\").replace('}', \"]\")\n\n # find largest repeated branch (if applicable)\n # maybe think about using prefix trees or SIMD extensions for better\n # efficiency\n x, y, length, match = 0, 0, 0, ''\n for y in range(len(tString)):\n for x in range(len(tString)):\n substring = tString[y:x]\n if len(list(re.finditer(re.escape(substring), tString))) > 1 and len(substring) > length:\n match = substring\n length = len(substring)\n\n # checking for legitimate branch repeat\n if \"[\" in match and \"]\" in match:\n hits = []\n index = 0\n if len(tString) > 3:\n while index < len(tString):\n index = tString.find(str(match), index)\n if index == -1:\n break\n hits.append(index)\n index += len(match)\n\n # find all locations of repeated branch and remove\n if len(hits) > 1:\n for start_loc in hits[1:]:\n tString = tString[:start_loc] + \\\n tString[start_loc:].replace(match, \"]\", 1)\n\n # increment all numbers in string to match the protocol\n newString = \"\"\n numString = \"\"\n for el in tString:\n if el.isdigit(): # build number\n numString += el\n else:\n if numString != \"\": # convert it to int and reinstantaite numString\n numString = str(int(numString) + 1)\n newString += numString\n newString += el\n numString = \"\"\n tString = newString\n del newString\n\n forest_list.append(ast.literal_eval(tString))\n return forest_list\n\n # 6. Print job tree(s)\n def print_tree(lst, level=0):\n print(' ' * (level - 1) + '+---' * (level > 0) + str(lst[0]))\n for l in lst[1:]:\n if type(l) is list:\n print_tree(l, level + 1)\n else:\n print(' ' * level + '+---' + l)\n\n # 1\n steps = depth_one(self.object_list)\n # 2\n nodes = assign_nodes(steps)\n # 3\n proto_forest = connected_nodes(nodes)\n # 4\n forest = build_tree_dict(proto_forest, steps)\n # 5\n self.forest_list = dict_to_list(forest)\n # 6\n print(\"\\n\" + \"A suggested Job Tree based on container dependency: \\n\")\n for tree_list in self.forest_list:\n print_tree(tree_list)",
"def __init__(\n self,\n input_dim: int,\n output_dim: int,\n nr_params: int,\n generic_architecture: bool,\n num_stacks: int,\n num_blocks: int,\n num_layers: int,\n layer_widths: List[int],\n expansion_coefficient_dim: int,\n trend_polynomial_degree: int,\n batch_norm: bool,\n dropout: float,\n activation: str,\n **kwargs,\n ):\n super().__init__(**kwargs)\n\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.nr_params = nr_params\n self.input_chunk_length_multi = self.input_chunk_length * input_dim\n self.target_length = self.output_chunk_length * input_dim\n self.dropout = dropout\n self.batch_norm = batch_norm\n self.activation = activation\n\n if generic_architecture:\n self.stacks_list = [\n _Stack(\n num_blocks,\n num_layers,\n layer_widths[i],\n nr_params,\n expansion_coefficient_dim,\n self.input_chunk_length_multi,\n self.target_length,\n _GType.GENERIC,\n batch_norm=(\n self.batch_norm and i == 0\n ), # batch norm only on first block of first stack\n dropout=self.dropout,\n activation=self.activation,\n )\n for i in range(num_stacks)\n ]\n else:\n num_stacks = 2\n trend_stack = _Stack(\n num_blocks,\n num_layers,\n layer_widths[0],\n nr_params,\n trend_polynomial_degree + 1,\n self.input_chunk_length_multi,\n self.target_length,\n _GType.TREND,\n batch_norm=self.batch_norm,\n dropout=self.dropout,\n activation=self.activation,\n )\n seasonality_stack = _Stack(\n num_blocks,\n num_layers,\n layer_widths[1],\n nr_params,\n -1,\n self.input_chunk_length_multi,\n self.target_length,\n _GType.SEASONALITY,\n batch_norm=self.batch_norm,\n dropout=self.dropout,\n activation=self.activation,\n )\n self.stacks_list = [trend_stack, seasonality_stack]\n\n self.stacks = nn.ModuleList(self.stacks_list)\n\n # setting the last backcast \"branch\" to be not trainable (without next block/stack, it doesn't need to be\n # backpropagated). Removing this lines would cause logtensorboard to crash, since no gradient is stored\n # on this params (the last block backcast is not part of the final output of the net).\n self.stacks_list[-1].blocks[-1].backcast_linear_layer.requires_grad_(False)\n self.stacks_list[-1].blocks[-1].backcast_g.requires_grad_(False)",
"def create_stack():\n\n return Stack()",
"def calc_layer_stack(layers, num_points, tol=1e-5, max_iterations=inf,\r\n Evac_start=None, Evac_end=None):\r\n total_thickness = sum(layer.lz for layer in layers)\r\n points = np.linspace(0, total_thickness, num=num_points)\r\n # Note: layer_list is NOT the same as layers = [layer0, layer1, ...],\r\n # layer_list is [layer0, layer0, ... layer1, layer1, ... ], i.e. the\r\n # layer of each successive point.\r\n layer_list = [where_am_I(layers, pt)['current_layer']\r\n for pt in points]\r\n matl_list = [layer.matl for layer in layer_list]\r\n eps_0 = np.array([matl.eps_0 for matl in matl_list])\r\n charge_from_dopants = np.zeros(num_points)\r\n for i in range(num_points):\r\n if layer_list[i].n_or_p == 'n':\r\n charge_from_dopants[i] = layer_list[i].dope\r\n elif layer_list[i].n_or_p == 'p':\r\n charge_from_dopants[i] = -layer_list[i].dope\r\n elif layer_list[i].n_or_p == 'i':\r\n charge_from_dopants[i] = 0\r\n else:\r\n raise ValueError(\"n_or_p should be either 'n' or 'p'!\")\r\n ni = np.array([matl.ni for matl in matl_list])\r\n Evac_minus_Ei = np.array([matl.Evac_minus_Ei for matl in matl_list])\r\n \r\n Evac = calc_core(points, eps_0, charge_from_dopants, Evac_minus_Ei, ni,\r\n tol=tol, max_iterations=max_iterations,\r\n Evac_start=Evac_start, Evac_end=Evac_end)\r\n return {'points':points, 'Evac':Evac}",
"def build_fully_connected_graph(input_dim, output_dim, num_intermediate):\n from .models import DAG\n num_emit, num_rec = num_intermediate + input_dim, num_intermediate + output_dim\n activations = torch.zeros(num_rec, dtype=torch.long)\n connections = torch.zeros(num_rec, num_emit, dtype=torch.long)\n for i in range(num_rec):\n num_anc = min(input_dim + i, num_emit)\n connections[i, :num_anc] = 1\n return DAG(input_dim, output_dim, num_intermediate, connections, activations, check_valid=True)",
"def __init__(self):\n self.input_stack = []\n self.output_stack = []"
]
| [
"0.59337586",
"0.5399486",
"0.5390985",
"0.5343883",
"0.53140175",
"0.51704687",
"0.5104873",
"0.50996596",
"0.5072876",
"0.50484717",
"0.5034771",
"0.50327533",
"0.5028579",
"0.50236404",
"0.4965254",
"0.4961731",
"0.49526247",
"0.49449387",
"0.4935633",
"0.49265182",
"0.49210018",
"0.48729357",
"0.4867129",
"0.4856851",
"0.48390326",
"0.48256308",
"0.48191965",
"0.4800067",
"0.47875008",
"0.47824576"
]
| 0.6329806 | 0 |
function used to create suffixes for all word,tag pairs | def __wordsToSuffixes__(self):
suffixes = defaultdict(int)
for word, tag in self.getWordTagDict():
for suffix in self.getSuffixesForWord(word):
suffixes[(suffix, tag)] += 1
return suffixes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getSuffixesForWord(self, word):\n suffixes = self.word_suffixes.get(word, False)\n if suffixes is not False:\n return suffixes\n suffixes = []\n if word.isalpha():\n boundary = min(5, len(word))\n for i in range(1, boundary):\n suffixes.append(word[-i:])\n suffixes = tuple(suffixes)\n self.word_suffixes[word] = suffixes\n return suffixes",
"def suffixes(word: str) -> Iterator[str]:\n if not word:\n return\n for i, _ in enumerate(word):\n yield word[i:]",
"def addSuffixes(self, alist):\n for i, (word, filename) in enumerate(alist):\n withsuffix = self._findVideoFile(filename)\n alist[i] = (word, withsuffix)\n return alist",
"def suffixDict():\n return {'trpk': 'tpke', 'forges': 'frgs', 'bypas': 'byp', 'mnr': 'mnr', 'viaduct': 'via', 'mnt': 'mt',\n 'lndng': 'lndg', 'vill': 'vlg', 'aly': 'aly', 'mill': 'ml', 'pts': 'pts', 'centers': 'ctrs', 'row': 'row', 'cnter': 'ctr',\n 'hrbor': 'hbr', 'tr': 'trl', 'lndg': 'lndg', 'passage': 'psge', 'walks': 'walk', 'frks': 'frks', 'crest': 'crst', 'meadows': 'mdws',\n 'freewy': 'fwy', 'garden': 'gdn', 'bluffs': 'blfs', 'vlg': 'vlg', 'vly': 'vly', 'fall': 'fall', 'trk': 'trak', 'squares': 'sqs',\n 'trl': 'trl', 'harbor': 'hbr', 'frry': 'fry', 'div': 'dv', 'straven': 'stra', 'cmp': 'cp', 'grdns': 'gdns', 'villg': 'vlg',\n 'meadow': 'mdw', 'trails': 'trl', 'streets': 'sts', 'prairie': 'pr', 'hts': 'hts', 'crescent': 'cres', 'pass': 'pass',\n 'ter': 'ter', 'port': 'prt', 'bluf': 'blf', 'avnue': 'ave', 'lights': 'lgts', 'rpds': 'rpds', 'harbors': 'hbrs',\n 'mews': 'mews', 'lodg': 'ldg', 'plz': 'plz', 'tracks': 'trak', 'path': 'path', 'pkway': 'pkwy', 'gln': 'gln',\n 'bot': 'btm', 'drv': 'dr', 'rdg': 'rdg', 'fwy': 'fwy', 'hbr': 'hbr', 'via': 'via', 'divide': 'dv', 'inlt': 'inlt',\n 'fords': 'frds', 'avenu': 'ave', 'vis': 'vis', 'brk': 'brk', 'rivr': 'riv', 'oval': 'oval', 'gateway': 'gtwy',\n 'stream': 'strm', 'bayoo': 'byu', 'msn': 'msn', 'knoll': 'knl', 'expressway': 'expy', 'sprng': 'spg',\n 'flat': 'flt', 'holw': 'holw', 'grden': 'gdn', 'trail': 'trl', 'jctns': 'jcts', 'rdgs': 'rdgs',\n 'tunnel': 'tunl', 'ml': 'ml', 'fls': 'fls', 'flt': 'flt', 'lks': 'lks', 'mt': 'mt', 'groves': 'grvs',\n 'vally': 'vly', 'ferry': 'fry', 'parkway': 'pkwy', 'radiel': 'radl', 'strvnue': 'stra', 'fld': 'fld',\n 'overpass': 'opas', 'plaza': 'plz', 'estate': 'est', 'mntn': 'mtn', 'lock': 'lck', 'orchrd': 'orch',\n 'strvn': 'stra', 'locks': 'lcks', 'bend': 'bnd', 'kys': 'kys', 'junctions': 'jcts', 'mountin': 'mtn',\n 'burgs': 'bgs', 'pine': 'pne', 'ldge': 'ldg', 'causway': 'cswy', 'spg': 'spg', 'beach': 'bch', 'ft': 'ft',\n 'crse': 'crse', 'motorway': 'mtwy', 'bluff': 'blf', 'court': 'ct', 'grov': 'grv', 'sprngs': 'spgs',\n 'ovl': 'oval', 'villag': 'vlg', 'vdct': 'via', 'neck': 'nck', 'orchard': 'orch', 'light': 'lgt',\n 'sq': 'sq', 'pkwy': 'pkwy', 'shore': 'shr', 'green': 'grn', 'strm': 'strm', 'islnd': 'is',\n 'turnpike': 'tpke', 'stra': 'stra', 'mission': 'msn', 'spngs': 'spgs', 'course': 'crse',\n 'trafficway': 'trfy', 'terrace': 'ter', 'hway': 'hwy', 'avenue': 'ave', 'glen': 'gln',\n 'boul': 'blvd', 'inlet': 'inlt', 'la': 'ln', 'ln': 'ln', 'frst': 'frst', 'clf': 'clf',\n 'cres': 'cres', 'brook': 'brk', 'lk': 'lk', 'byp': 'byp', 'shoar': 'shr', 'bypass': 'byp',\n 'mtin': 'mtn', 'ally': 'aly', 'forest': 'frst', 'junction': 'jct', 'views': 'vws', 'wells': 'wls', 'cen': 'ctr',\n 'exts': 'exts', 'crt': 'ct', 'corners': 'cors', 'trak': 'trak', 'frway': 'fwy', 'prarie': 'pr', 'crossing': 'xing',\n 'extn': 'ext', 'cliffs': 'clfs', 'manors': 'mnrs', 'ports': 'prts', 'gatewy': 'gtwy', 'square': 'sq', 'hls': 'hls',\n 'harb': 'hbr', 'loops': 'loop', 'mdw': 'mdw', 'smt': 'smt', 'rd': 'rd', 'hill': 'hl', 'blf': 'blf',\n 'highway': 'hwy', 'walk': 'walk', 'clfs': 'clfs', 'brooks': 'brks', 'brnch': 'br', 'aven': 'ave',\n 'shores': 'shrs', 'iss': 'iss', 'route': 'rte', 'wls': 'wls', 'place': 'pl', 'sumit': 'smt', 'pines': 'pnes',\n 'trks': 'trak', 'shoal': 'shl', 'strt': 'st', 'frwy': 'fwy', 'heights': 'hts', 'ranches': 'rnch',\n 'boulevard': 'blvd', 'extnsn': 'ext', 'mdws': 'mdws', 'hollows': 'holw', 'vsta': 'vis', 'plains': 'plns',\n 'station': 'sta', 'circl': 'cir', 'mntns': 'mtns', 'prts': 'prts', 'shls': 'shls', 'villages': 'vlgs',\n 'park': 'park', 'nck': 'nck', 'rst': 'rst', 'haven': 'hvn', 'turnpk': 'tpke', 'expy': 'expy', 'sta': 'sta',\n 'expr': 'expy', 'stn': 'sta', 'expw': 'expy', 'street': 'st', 'str': 'st', 'spurs': 'spur', 'crecent': 'cres',\n 'rad': 'radl', 'ranch': 'rnch', 'well': 'wl', 'shoals': 'shls', 'alley': 'aly', 'plza': 'plz', 'medows': 'mdws',\n 'allee': 'aly', 'knls': 'knls', 'ests': 'ests', 'st': 'st', 'anx': 'anx', 'havn': 'hvn', 'paths': 'path', 'bypa': 'byp',\n 'spgs': 'spgs', 'mills': 'mls', 'parks': 'park', 'byps': 'byp', 'flts': 'flts', 'tunnels': 'tunl', 'club': 'clb', 'sqrs': 'sqs',\n 'hllw': 'holw', 'manor': 'mnr', 'centre': 'ctr', 'track': 'trak', 'hgts': 'hts', 'rnch': 'rnch', 'crcle': 'cir', 'falls': 'fls',\n 'landing': 'lndg', 'plaines': 'plns', 'viadct': 'via', 'gdns': 'gdns', 'gtwy': 'gtwy', 'grove': 'grv', 'camp': 'cp', 'tpk': 'tpke',\n 'drive': 'dr', 'freeway': 'fwy', 'ext': 'ext', 'points': 'pts', 'exp': 'expy', 'ky': 'ky', 'courts': 'cts', 'pky': 'pkwy', 'corner': 'cor',\n 'crssing': 'xing', 'mnrs': 'mnrs', 'unions': 'uns', 'cyn': 'cyn', 'lodge': 'ldg', 'trfy': 'trfy', 'circle': 'cir', 'bridge': 'brg',\n 'dl': 'dl', 'dm': 'dm', 'express': 'expy', 'tunls': 'tunl', 'dv': 'dv', 'dr': 'dr', 'shr': 'shr', 'knolls': 'knls', 'greens': 'grns',\n 'tunel': 'tunl', 'fields': 'flds', 'common': 'cmn', 'orch': 'orch', 'crk': 'crk', 'river': 'riv', 'shl': 'shl', 'view': 'vw',\n 'crsent': 'cres', 'rnchs': 'rnch', 'crscnt': 'cres', 'arc': 'arc', 'btm': 'btm', 'blvd': 'blvd', 'ways': 'ways', 'radl': 'radl',\n 'rdge': 'rdg', 'causeway': 'cswy', 'parkwy': 'pkwy', 'juncton': 'jct', 'statn': 'sta', 'gardn': 'gdn', 'mntain': 'mtn',\n 'crssng': 'xing', 'rapid': 'rpd', 'key': 'ky', 'plns': 'plns', 'wy': 'way', 'cor': 'cor', 'ramp': 'ramp', 'throughway': 'trwy',\n 'estates': 'ests', 'ck': 'crk', 'loaf': 'lf', 'hvn': 'hvn', 'wall': 'wall', 'hollow': 'holw', 'canyon': 'cyn', 'clb': 'clb',\n 'cswy': 'cswy', 'village': 'vlg', 'cr': 'crk', 'trce': 'trce', 'cp': 'cp', 'cv': 'cv', 'ct': 'cts', 'pr': 'pr', 'frg': 'frg',\n 'jction': 'jct', 'pt': 'pt', 'mssn': 'msn', 'frk': 'frk', 'brdge': 'brg', 'cent': 'ctr', 'spur': 'spur', 'frt': 'ft', 'pk': 'park',\n 'fry': 'fry', 'pl': 'pl', 'lanes': 'ln', 'gtway': 'gtwy', 'prk': 'park', 'vws': 'vws', 'stravenue': 'stra', 'lgt': 'lgt',\n 'hiway': 'hwy', 'ctr': 'ctr', 'prt': 'prt', 'ville': 'vl', 'plain': 'pln', 'mount': 'mt', 'mls': 'mls', 'loop': 'loop',\n 'riv': 'riv', 'centr': 'ctr', 'is': 'is', 'prr': 'pr', 'vl': 'vl', 'avn': 'ave', 'vw': 'vw', 'ave': 'ave', 'spng': 'spg',\n 'hiwy': 'hwy', 'dam': 'dm', 'isle': 'isle', 'crcl': 'cir', 'sqre': 'sq', 'jct': 'jct', 'jctn': 'jct', 'mountain': 'mtn',\n 'keys': 'kys', 'parkways': 'pkwy', 'drives': 'drs', 'tunl': 'tunl', 'jcts': 'jcts', 'knl': 'knl', 'center': 'ctr',\n 'driv': 'dr', 'tpke': 'tpke', 'sumitt': 'smt', 'canyn': 'cyn', 'ldg': 'ldg', 'harbr': 'hbr', 'rest': 'rst', 'shoars': 'shrs',\n 'vist': 'vis', 'gdn': 'gdn', 'islnds': 'iss', 'hills': 'hls', 'cresent': 'cres', 'point': 'pt', 'lake': 'lk', 'vlly': 'vly',\n 'strav': 'stra', 'crossroad': 'xrd', 'bnd': 'bnd', 'strave': 'stra', 'stravn': 'stra', 'knol': 'knl', 'vlgs': 'vlgs',\n 'forge': 'frg', 'cntr': 'ctr', 'cape': 'cpe', 'height': 'hts', 'lck': 'lck', 'highwy': 'hwy', 'trnpk': 'tpke', 'rpd': 'rpd',\n 'boulv': 'blvd', 'circles': 'cirs', 'valleys': 'vlys', 'vst': 'vis', 'creek': 'crk', 'mall': 'mall', 'spring': 'spg',\n 'brg': 'brg', 'holws': 'holw', 'lf': 'lf', 'est': 'est', 'xing': 'xing', 'trace': 'trce', 'bottom': 'btm',\n 'streme': 'strm', 'isles': 'isle', 'circ': 'cir', 'forks': 'frks', 'burg': 'bg', 'run': 'run', 'trls': 'trl',\n 'radial': 'radl', 'lakes': 'lks', 'rue': 'rue', 'vlys': 'vlys', 'br': 'br', 'cors': 'cors', 'pln': 'pln',\n 'pike': 'pike', 'extension': 'ext', 'island': 'is', 'frd': 'frd', 'lcks': 'lcks', 'terr': 'ter',\n 'union': 'un', 'extensions': 'exts', 'pkwys': 'pkwy', 'islands': 'iss', 'road': 'rd', 'shrs': 'shrs',\n 'roads': 'rds', 'glens': 'glns', 'springs': 'spgs', 'missn': 'msn', 'ridge': 'rdg', 'arcade': 'arc',\n 'bayou': 'byu', 'crsnt': 'cres', 'junctn': 'jct', 'way': 'way', 'valley': 'vly', 'fork': 'frk',\n 'mountains': 'mtns', 'bottm': 'btm', 'forg': 'frg', 'ht': 'hts', 'ford': 'frd', 'hl': 'hl',\n 'grdn': 'gdn', 'fort': 'ft', 'traces': 'trce', 'cnyn': 'cyn', 'cir': 'cir', 'un': 'un', 'mtn': 'mtn',\n 'flats': 'flts', 'anex': 'anx', 'gatway': 'gtwy', 'rapids': 'rpds', 'villiage': 'vlg', 'flds': 'flds',\n 'coves': 'cvs', 'rvr': 'riv', 'av': 'ave', 'pikes': 'pike', 'grv': 'grv', 'vista': 'vis', 'pnes': 'pnes',\n 'forests': 'frst', 'field': 'fld', 'branch': 'br', 'grn': 'grn', 'dale': 'dl', 'rds': 'rds', 'annex': 'anx',\n 'sqr': 'sq', 'cove': 'cv', 'squ': 'sq', 'skyway': 'skwy', 'ridges': 'rdgs', 'hwy': 'hwy', 'tunnl': 'tunl',\n 'underpass': 'upas', 'cliff': 'clf', 'lane': 'ln', 'land': 'land', 'bch': 'bch', 'dvd': 'dv', 'curve': 'curv',\n 'cpe': 'cpe', 'summit': 'smt', 'gardens': 'gdns'}",
"def FindSuffix(self):\n self.numSuffixes = 0\n self.forceStress = 0\n resultslist = []\n for f in self.suffixes.finditer(self.wd):\n resultslist.append((f.group(), f.start()))\n if not resultslist: return\n # make sure *end* of word is in list! otherwise, 'DESP erate'\n if resultslist[-1][1] + len(resultslist[-1][0]) < len(self.wd):\n return\n resultslist.reverse()\n for res in resultslist:\n # if no vowel left before, false suffix ('singing')\n # n.b.: will choke on 'quest' etc! put in dictionary, I guess\n if not sre.search('[aeiouy]', self.wd[:res[1]]): break\n if res[0] == 'ing' and self.wd[res[1]-1] == self.wd[res[1]-2]:\n self.sylBounds.append(res[1] - 1) # freq special case\n else: self.sylBounds.append(res[1]) # sorted later\n self.wd = self.wd[:res[1]]\n self.numSuffixes += 1\n if res[0] in STRESSSUFFIX:\n self.forceStress = 0 - len(self.sylBounds)\n if res[0] in MULTISUFFIX:\n # tricky bit! it *happens* that secondary division in all these\n # comes after its first character; NOT inevitable!\n # also does not allow for 3-syl: 'ically' (which are reliable!)\n self.sylBounds.append(res[1]+1)\n self.numSuffixes += 1",
"def suffix_replace(original, old, new):\n ...",
"def disambiguateWordsOld(self, word_list, tag_list):\n\t\t# print u\" \".join(word_list).encode('utf8');\n\t\t# print u\" \".join(tag_list).encode('utf8');\t\t\t\n\t\n\t\tif len(word_list)==0 or len(word_list)!=len(tag_list):\n\t\t\treturn word_list;\n\t\telse:\n\t\t\tnewwordlist=[];\n\t\t\twordtaglist=zip(word_list,tag_list);\n\t\t\t# print wordtaglist\n\t\t\tfor i in range(len(wordtaglist)):\n\t\t\t\tif i+1<=len(wordtaglist):\n\t\t\t\t\t# do tests with next word\n\t\t\t\t\t# إذا كانت الكلمة الحالية \"أن\" تكون \"أنْ\" حرف نصب إذا سبقت فعلا\n\t\t\t\t\t# وتكون أنّ، من أخوات إنّ إذا كان ما بعدها اسما\n\t\t\t\t\tif wordtaglist[i][0]==u'أن' and self.tagger.isVerbTag(wordtaglist[i+1][1]):\n\t\t\t\t\t\t# print' case1';\n\t\t\t\t\t\twordtaglist[i]=(u'أَنْ','t');\n\t\t\t\t\telif wordtaglist[i][0]==u'أن' and self.tagger.isNounTag(wordtaglist[i+1][1]):\n\t\t\t\t\t\t# print' case 2';\n\t\t\t\t\t\twordtaglist[i]=(u'أَنَّ','t');\n\t\t\t\tnewwordlist.append(wordtaglist[i][0]);\n\t\t\treturn newwordlist;",
"def feature(root, suffix):\r\n if suffix == '$':\r\n return ('$', suffix)\r\n return (root[-1], suffix[0])",
"def test_get_suffixes(self):\n\n ans = self.short_sf.get_suffixes()\n\n self.assertEqual(ans, [(0, 0), (1, 1), (0, 1), (0, 2), (1, 0), (1, 2)])",
"def _postprocess(self, tags: List[str], words: List[str], pos: List[str]):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)",
"def _postprocess(\n self,\n tags: List[str],\n words: List[str],\n pos: bool = False,\n ):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)",
"def tag_bioes(tags, match_index, term_length):\n\n if term_length == 1:\n tags[match_index] = \"S\"\n else:\n for i in range(term_length):\n if i == 0:\n tags[match_index + i] = \"B\"\n elif i == term_length - 1:\n tags[match_index + i] = \"E\"\n else:\n tags[match_index + i] = \"I\"\n return tags",
"def build_suffix_array(text):\r\n # Initialization with one char suffixes.\r\n order = sortCharacters(text)\r\n classes = computeCharClasses(text, order)\r\n L = 1\r\n while L < len(text):\r\n order = sortDoubled(text, L, order, classes)\r\n classes = updateClasses(order, classes, L)\r\n L = 2*L\r\n return order",
"def make_tags(tag, word):\n tag1 = \"<{}>\".format(tag)\n tag2 = \"</{}>\".format(tag)\n final = tag1 + word + tag2\n return final",
"def _get_manber_myers_suffixes(self, seq:str=None) -> List:\n if not seq: seq = self.seq\n return self._sort_manber_myers([i for i in range(len(seq))])",
"def _parse_suffix(i, doc):\n\n i, = parse_pattern(i, doc, \"// <<< end of configuration section >>>\")\n i, = parse_pattern(i, doc, \"#endif\")\n\n return parse_blanks(i, doc)",
"def _read_suffixes(lookup, suffixes):\n for uid in suffixes:\n d = suffixes[uid]\n s = lookup[uid] # suffixes keys are ids, so get suffix component\n for key in d: # set values from value dict\n try:\n kc = lookup[int(key)] # use int because json turn keys to string\n except KeyError:\n continue\n s[kc] = d[key]",
"def _applySuffixes(self, value, suffixes):\n\t\tif suffixes:\n\t\t\tfor args in suffixes:\n\t\t\t\tname=args[0]\n\t\t\t\tif (name == u'Invocation'):\n\t\t\t\t\tif ((type(args[1]) == list) or (type(args[1]) == tuple)):\n\t\t\t\t\t\tvalue = F.invoke_args(value, args[1])\n\t\t\t\t\telif True:\n\t\t\t\t\t\tvalue = F.invoke(value, args[1])\n\t\t\t\telif (name == u'ComputationInfix'):\n\t\t\t\t\top=self.normalizeOperator(args[1])\n\t\t\t\t\tif (op == u'..'):\n\t\t\t\t\t\trvalue=args[2]\n\t\t\t\t\t\tif (isinstance(rvalue, interfaces.IIteration) and (not rvalue.hasAnnotation(u'parens'))):\n\t\t\t\t\t\t\tvalue = F.enumerate(value, rvalue.getLeftOperand().detach())\n\t\t\t\t\t\t\trvalue.setLeftOperand(value)\n\t\t\t\t\t\t\tvalue = rvalue\n\t\t\t\t\t\telif True:\n\t\t\t\t\t\t\tvalue = F.enumerate(value, args[2])\n\t\t\t\t\telif True:\n\t\t\t\t\t\tvalue = F.compute(F._op(op, self.getOperatorPriority(op)), value, args[2])\n\t\t\t\telif (name == u'Decomposition'):\n\t\t\t\t\tfor _ in args[1]:\n\t\t\t\t\t\tvalue = F.resolve(_, value)\n\t\t\t\telif (name == u'Access'):\n\t\t\t\t\tvalue = F.access(value, args[1])\n\t\t\t\telif (name == u'Slice'):\n\t\t\t\t\tvalue = F.slice(value, args[1], args[2])\n\t\t\t\telif (name == u'IterationSuffix'):\n\t\t\t\t\tvalue = args[1](value, args[2])\n\t\t\t\telif (name == u'Chain'):\n\t\t\t\t\tif (type(value) is list):\n\t\t\t\t\t\tipdb.set_trace()\n\t\t\t\t\talloc=value\n\t\t\t\t\tref=None\n\t\t\t\t\tif (not isinstance(alloc, interfaces.IAllocation)):\n\t\t\t\t\t\tname=((u'_c' + str(self.varcounter)) + u'_')\n\t\t\t\t\t\tself.varcounter = (self.varcounter + 1)\n\t\t\t\t\t\tslot=F._slot(name)\n\t\t\t\t\t\talloc=F.allocate(slot, value)\n\t\t\t\t\t\tref = F._ref(name)\n\t\t\t\t\telif True:\n\t\t\t\t\t\tref = alloc.getSlot()\n\t\t\t\t\tres=[alloc]\n\t\t\t\t\tfor g in (args[1] or []):\n\t\t\t\t\t\tres.append(self._applySuffixes(ref.copy(), g))\n\t\t\t\t\tvalue = res\n\t\t\t\telif True:\n\t\t\t\t\traise Exception((((u'sugar2.writer._applySuffixes: Suffix not supported yet: ' + str(name)) + u' in ') + str(args)))\n\t\treturn value",
"def get_suffix_configuration(lst):\n suffix_conf = ''\n for elem in lst: \n suffix_conf += '_'\n if type(elem) != str: \n elem = str(elem)\n suffix_conf += elem\n return suffix_conf",
"def gen_word(prefixes, suffixes):\n word = ''\n for time in range(random.choice([1, 2, 3])):\n word += random.choice(prefixes)[0]\n word += random.choice(suffixes)[0]\n return word",
"def add_suffix(word, suffix):\n suffix, sep, rest = suffix.partition(' ')\n expanded = _add_suffix(word, suffix)\n return expanded + sep + rest",
"def disambiguate(label, labels):\n label = label.replace(' ', '_')\n if label not in labels:\n return label\n suffix = 1\n while label + ('_%i' % suffix) in labels:\n suffix += 1\n return label + ('_%i' % suffix)",
"def get_suffix_ml_model():\n suffix = ''\n \n # consider if the model uses tail or not\n if gml.USE_TAIL: \n suffix += '_use_tail'\n else: \n suffix += '_no_tail'\n\n # consider the way of picking target variable for the model\n if gml.WAY_MODEL_TGT == 'absolute':\n suffix += '_absolute'\n elif gml.WAY_MODEL_TGT == 'relative':\n suffix += '_relative'\n else: \n exit('error on the function that gets suffix')\n\n return suffix",
"def join_mwp(tags: List[str]) -> List[str]:\n ret = []\n verb_flag = False\n for tag in tags:\n if \"V\" in tag:\n # Create a continuous 'V' BIO span\n prefix, _ = tag.split(\"-\", 1)\n if verb_flag:\n # Continue a verb label across the different predicate parts\n prefix = \"I\"\n ret.append(f\"{prefix}-V\")\n verb_flag = True\n else:\n ret.append(tag)\n verb_flag = False\n\n return ret",
"def construct_suffix_array(T, SA, n, sigma):\n if len(T) == 1: # special case\n SA[0] = 0\n return SA\n\n t = classify(T, n) # step 1: classification\n lms = find_lms_suffixes(t, n) # step 2: finding the indices of LMS suffixes\n m = len(lms)\n\n # print_types(t)\n\n alpha, sizes = buckets(T, sigma) # finding the bucket sizes and alphabet of T\n heads, tails = bucket_intervals(alpha, sizes, sigma)\n induced_sorting(\n lms, tails, heads, SA, t, T, n, m, alpha, sizes, sigma\n ) # first induced sort\n\n ordered_lms = array(\"L\", [0] * len(lms))\n\n reduced_text, blocks_unique, sigma_reduced = get_reduced_substring(\n t, SA, lms, ordered_lms, T, n, m\n )\n reduced_SA = array(\"l\", [-1] * m) # reduced SA\n if blocks_unique: # base case\n # compute suffix array manually\n for i in range(m):\n reduced_SA[reduced_text[i]] = i\n else:\n construct_suffix_array(reduced_text, reduced_SA, m, sigma_reduced)\n\n # use the suffix array to sort the LMS suffixes\n for i in range(m):\n ordered_lms[i] = lms[reduced_SA[i]]\n\n heads, tails = bucket_intervals(alpha, sizes, sigma) # reset bucket tails and heads\n for i in range(n):\n SA[i] = 0 # clear suffix array\n induced_sorting(ordered_lms, tails, heads, SA, t, T, n, m, alpha, sizes, sigma)",
"def w2f(sents,i,j,filename,freq):\n w = sents[i][j][0] #current word\n pos = sents[i][j][1] #POS of current word\n f = [ \n 'bias', #non-contextual feature \n 'w=' + w, #current word \n 'w.istitle=%s' % w.istitle(), #first letter - capitalized\n 'pos=' + pos, # POS tag\n 'w.intitle=%s' % contained_in_title(w, filename), # w matches title\n 'w.lowtitle=%s' % lower_in_title(w, filename), # w lower matches title\n 'w.freq=%s' % frequency(w, freq), # freq of w \n 'w.stopword=%s' % stop_word(w), # # stop word\n ]\n \n # previous word features\n if j>0:\n pw = sents[i][j-1][0] #previous word\n ppos = sents[i][j-1][1] #POS of previous word\n f.extend([ \n 'pw=' + pw, # previous word \n 'pw.istitle=%s' % pw.istitle(), #first letter - capitalized\n 'ppos=' + ppos, # POS tag\n 'pw.intitle=%s' % contained_in_title(pw, filename), # w matches title\n 'pw.lowtitle=%s' % lower_in_title(pw,filename), # w lower matches title\n 'pw.freq=%s' % frequency(pw, freq), # freq of w\n 'pw.stopword=%s' % stop_word(w), # # stop word\n ])\n else: \n f.append('BOS') #first word of a sentence\n\n # next word features\n if j<len(sents[i])-1:\n nw = sents[i][j+1][0] #next word\n npos = sents[i][j+1][1] #POS of next word\n f.extend([ \n 'nw=' + nw, # previous word\n 'nw.istitle=%s' % nw.istitle(), #first letter - capitalized\n 'npos=' + npos, #POS tag\n 'nw.intitle=%s' % contained_in_title(nw, filename), # w matches title\n 'nw.lowtitle=%s' % lower_in_title(nw,filename), # w lower matches title\n 'nw.freq=%s' % frequency(nw, freq), # freq of w\n 'nw.stopword=%s' % stop_word(w), # # stop word\n ])\n else: \n f.append('EOS') # last word of a sentence\n\n #if j>1: ...\n #if j<len(sents[i])-2: ...\n #if j>0 and j<len(sents[i])-1: ...\n return f",
"def suffixes(self):\n suffixes = []\n for constraint, suffix in self.conf.get(\"suffixes\", {}).items():\n if constraint in self.spec:\n suffixes.append(suffix)\n suffixes = list(dedupe(suffixes))\n if self.hash:\n suffixes.append(self.hash)\n return suffixes",
"def _build_tag_suffix() -> str:\n now = datetime.datetime.now(tz=datetime.timezone.utc).astimezone()\n return now.strftime(\".%Y%m%d.0\")",
"def _gen_words(sentence, labels):\r\n word = \"\"\r\n words = []\r\n for token, label in zip(sentence, labels):\r\n word += token\r\n if label in [1, 3]:\r\n words.append(word)\r\n word = \"\"\r\n return words",
"def generate_good_suffix(self, pattern: str, suffix, prefix):\n M = self.M\n # i is the index of scanning index in pattern, range from [0,M-2], to scan substring before pattern[M-1]\n for i in range(M - 1):\n # j is the index of suffix index\n j = i\n # k is the suffix array length, also index, should range from [1,M-1]\n k = 0\n while j >= 0 and pattern[j] == pattern[M - 1 - k]:\n j -= 1\n k += 1\n suffix[k] = j + 1\n if j == -1:\n prefix[k] = True"
]
| [
"0.69789803",
"0.64744955",
"0.64021295",
"0.63340497",
"0.61450887",
"0.6061978",
"0.59539205",
"0.5947468",
"0.5944209",
"0.5911151",
"0.582733",
"0.5802634",
"0.5780319",
"0.572288",
"0.5706446",
"0.5699643",
"0.56922",
"0.5691394",
"0.5691183",
"0.5689901",
"0.5635199",
"0.5593269",
"0.5578803",
"0.5516348",
"0.5512248",
"0.55070275",
"0.5501541",
"0.5500442",
"0.54867584",
"0.5467996"
]
| 0.73820376 | 0 |
function used to create prefixes for all word,tag pairs | def __wordsToPrefixes__(self):
prefixes = defaultdict(int)
for word, tag in self.getWordTagDict():
for prefix in self.getPrefixesForWord(word):
prefixes[(prefix, tag)] += 1
return prefixes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _addPrefixes(data):\n prevTags = None\n newData = []\n\n for n, (token, tags) in enumerate(data):\n\n newTags = []\n\n for t in tags:\n p = \"B\" if ((prevTags is None) or (t not in prevTags)) else \"I\"\n newTags.append(\"%s-%s\" % (p, t))\n\n newData.append((token, newTags))\n prevTags = tags\n\n return newData",
"def getPrefixesForWord(self, word):\n prefixes = self.word_prefixes.get(word, False)\n if prefixes is not False:\n return prefixes\n prefixes = []\n if word.isalpha():\n boundary = min(5, len(word))\n for i in range(2, boundary):\n prefixes.append(word[:i])\n prefixes = tuple(prefixes)\n self.word_prefixes[word] = prefixes\n return prefixes",
"def _prefixes(self, title, filter_stopwords=False):\n for word in self._clean_words(title, filter_stopwords=filter_stopwords):\n prefixer = partial(word.__getslice__, 0)\n for prefix in imap(prefixer, range(1, len(word) + 1)):\n yield prefix",
"def _build_prefix(self):\r\n pattern = self.string2\r\n m = len(pattern)\r\n p = [None]*m\r\n p[0] = 0\r\n k = 0\r\n for i in range(1,m):\r\n while k > 0 and pattern[i] != pattern[k]:\r\n k = p[k-1]\r\n if pattern[k] == pattern[i]:\r\n k = k+1\r\n p[i] = k\r\n self._prefix = p",
"def prefix_replace(original, old, new):\n ...",
"def apply_prefix(string):\n for short, long in PREFIXES.items():\n if string.startswith(long):\n return short + ':' + string[len(long):]\n return string",
"def prefix(prefix_list):\n def add_attribute(func):\n if not hasattr(func, \"prefix\"):\n func.prefix = []\n func.prefix.append(prefix_list)\n return func\n return add_attribute",
"def add_prefix(prefix = \"Peptides\"):\n var_list = gen_cell_lines_states_replicates()\n prefix = prefix\n res_list = []\n for i in var_list:\n unit_str = prefix + \" \"\n unit_str += i\n res_list.append(unit_str)\n return res_list",
"def prefixes(s):\n if s:\n yield from prefixes(s[:-1])\n yield s",
"def build_prefix(self):\r\n pattern = self.pattern\r\n m = len(pattern)\r\n p = [None]*m\r\n p[0] = 0\r\n k = 0\r\n for i in range(1,m):\r\n while k > 0 and pattern[i] != pattern[k]:\r\n k = p[k-1]\r\n if pattern[k] == pattern[i]:\r\n k = k+1\r\n p[i] = k\r\n self._prefix = p",
"def prefix(self, prefix, *args):\n new_prefix = '%s%s' % (self.prefixes[-1], prefix % args)\n self.prefixes.append(new_prefix)\n try:\n yield\n finally:\n assert self.prefixes.pop() == new_prefix",
"def prefixes(s):\n output = ''\n for i in range(len(s) + 1):\n add = s[0:i]\n output += add\n return output",
"def create_prefixes(audio_uploads_path: Path, transcription_uploads_path: Path, prefix_information,\n base_path: Path, prefix_name: str) -> set:\n prefixes = set()\n count = 0\n for data in prefix_information:\n count += 1\n label_filename = data.utterance.transcription.file_info.name\n\n # using the prefix of the label file to specify the prefix\n prefix, extension = os.path.splitext(label_filename)\n cleaned_prefix = strip_unsafe_characters(prefix)\n prefixes.add(cleaned_prefix)\n\n # copy transcription to \"/label\" directory\n label_src_path = transcription_uploads_path / label_filename\n label_dest_path = base_path / \"label\" / (cleaned_prefix+extension)\n copyfile(str(label_src_path), str(label_dest_path))\n\n # copy audio to \"/wav\" directory\n audio_filename = data.utterance.audio.file_info.name\n audio_src_path = audio_uploads_path / audio_filename\n audio_dest_path = base_path / \"wav\" / (cleaned_prefix+\".wav\")\n copyfile(str(audio_src_path), str(audio_dest_path))\n\n if len(prefixes) != count:\n raise ValueError(\"Duplicate prefix found\")\n\n prefix_file_path = base_path / prefix_name\n with prefix_file_path.open(mode='w') as pf:\n for prefix in prefixes:\n pf.write(prefix)\n pf.write(os.linesep)\n return prefixes",
"def genPrefixAntString(self,estimatedVar,prefix=\"_\"):\n self.prefixAntString = self.antString\n for name in estimatedVar:\n self.prefixAntString = replaceVariable(self.prefixAntString,\n name,prefix+name)",
"def add_sister_prefixes_helper(a, ephrases, enode, i):\n\n j = i+enode.length\n if logger.level >= 3:\n logger.write(\"(i,j) = %s\\n\" % ((i,j),))\n x = enode.label\n j1 = i\n for ci in range(len(enode.children)):\n child = enode.children[ci]\n j1 += child.length\n if logger.level >= 3:\n logger.write(\"(i,j1) = %s\\n\" % ((i,j1),))\n if j1 < j and (i,j1) in ephrases:\n\n # constprefix3:\n #x1 = sym.fromtag(\"%s*\" % x)\n\n # subcat-lr2:\n #subcat = [sister.label for sister in enode.children[ci+1:] if sister.required]\n #x1 = sym.fromtag(\"/\".join([\"%s*\"%x]+subcat))\n \n # markov1:\n x1 = sym.fromtag(\"%s/%s\" % (x, enode.children[ci+1].label))\n\n # markov2:\n #x1 = sym.fromtag(\"%s(%s)\" % (x, enode.children[ci].label))\n \n a.espans.setdefault((i,j1),[]).append(x1)\n prefix_labels.add(x1)\n \n for child in enode.children:\n add_sister_prefixes_helper(a, ephrases, child, i)\n i += child.length",
"def neuronyms(input_str, k):\n n = len(input_str)\n result = []\n\n for length in range(k, n-k+1):\n for start in range (1, n - length):\n prefix = input_str[:start]\n suffix = input_str[(start+length):]\n res_str = prefix+str(length)+suffix\n result.append(res_str)\n\n return result",
"def _prefixed_items_from_list(items: List[namedtuple], item_prefix, prefix, tag_names: Set[str] = set([])):\n result = {}\n for index, nt in enumerate(items):\n result[\"%s%d\" % (item_prefix, index)] = _parse(nt, prefix, tag_names)\n return result",
"def s0_full_definition(array_with_prefixes, array2_with_prefixless_objects):\n prefixes = []\n prefix = str(\"\")\n for i in range(len(array_with_prefixes)):\n is_end_of_word = False\n for char in array_with_prefixes[i]:\n if char == \"(\":\n prefixes.append(prefix)\n prefix = str(\"\")\n is_end_of_word = True\n elif is_end_of_word == False:\n #concatinate prefix and char as long as it isn't the end of the word\n prefix += char\n \n #Iterate through the prefixes array and assign the corresponding objects to those prefixes\n for i in range(len(prefixes)):\n if prefixes[i] == \"CLEAR\":\n single_string = get_clear_parameter(array_with_prefixes[i])\n for i in range(len(array2_with_prefixless_objects)):\n if single_string == array2_with_prefixless_objects[i].object_name:\n single_parameter = array2_with_prefixless_objects[i]\n single_parameter.on = None\n \n elif prefixes[i] == \"ON\":\n double_string = get_on_parameters(array_with_prefixes[i])\n parameters_array = []\n for i in range(len(array2_with_prefixless_objects)):\n if double_string[0] == array2_with_prefixless_objects[i].object_name:\n parameters_array.append(array2_with_prefixless_objects[i])\n \n for i in range(len(array2_with_prefixless_objects)):\n if double_string[1] == array2_with_prefixless_objects[i].object_name:\n parameters_array.append(array2_with_prefixless_objects[i])\n parameters_array[1].on = parameters_array[0].object_name\n\n elif prefixes[i] == \"HEAVIER\":\n parameters_array = []\n double_string = get_heavier_parameters(array_with_prefixes[i])\n \n for i in range(len(array2_with_prefixless_objects)):\n if double_string[0] == array2_with_prefixless_objects[i].object_name:\n parameters_array.append(array2_with_prefixless_objects[i])\n \n for i in range(len(array2_with_prefixless_objects)):\n if double_string[1] == array2_with_prefixless_objects[i].object_name:\n parameters_array.append(array2_with_prefixless_objects[i])\n parameters_array[0].is_heavier_than.append(parameters_array[1].object_name)",
"def prefixCombiner(prefix, itemlist, glue=''):\n result = []\n for item in itemlist:\n result.append(prefix + glue + item)\n return result",
"def add_prefix(inputs, prefix):\n\n outputs = dict()\n for name, value in inputs.items():\n outputs[f\"{prefix}.{name}\"] = value\n\n return outputs",
"def test_ipam_prefixes_create(self):\n pass",
"async def prefix(self, _bot, message: discord.Message):\n mention = [self.user.mention + ' ', f'<@!{self.user.id}> ']\n additional_prefixes = await self.get_prefixes(message.guild)\n return self.cfg['bot']['prefixes'] + mention + additional_prefixes",
"def prefixes(s):\r\n if s:\r\n yield from prefixes(s[:-1]) # First yield prefixes from s[:-1], then yield the last one s.\r\n yield s",
"def make_tags(tag, word):\n tag1 = \"<{}>\".format(tag)\n tag2 = \"</{}>\".format(tag)\n final = tag1 + word + tag2\n return final",
"def _gennames(prefix, base, number):\n for index in xrange(number):\n yield \"%s%d\" % (prefix, base + index)",
"def generate_word(model, whitelist, topk, radix=\"\"):\n if whitelist.empty():\n yield \"\", 0\n else:\n for prefix, prefix_proba in iterate_continutations(model, radix, whitelist, topk):\n if prefix is None:\n continue\n for suffix, suffix_proba in generate_word(\n model,\n whitelist.sub(LetterBag(prefix)),\n topk,\n radix + prefix):\n if suffix is None:\n continue\n yield prefix + suffix, prefix_proba + suffix_proba\n yield None, 0",
"async def process_prefix_list(\n guild: disnake.Guild,\n ctx: commands.Context = None,\n inter: AppCmdInter = None,\n allowed_mentions=None,\n):\n await create_guild_model(guild)\n guild = await Guild.get(guild.id)\n msg = f\"The following are the custom prefixes for {guild.name}:\\n\" + \", \".join(\n guild.prefixes\n )\n await send_message(msg=msg, ctx=ctx, inter=inter, allowed_mentions=allowed_mentions)",
"def _generate_new_prefix(current_prefix, class_name):\n return (\n \"_\".join((current_prefix, class_name)).upper()\n if current_prefix\n else class_name.upper()\n )",
"def _prefixed(nt: namedtuple, prefix):\n result = {}\n for key, value in nt._asdict().items():\n result[prefix + key] = value\n return result",
"def prefixer(self,sn):\n\n\t\t#---\"spot\" is a tuple of spotname and the part name\n\t\t#---namer takes the spotname (called spot in the yaml defn of namer) and the simulation name\n\t\t#---we include the partname when accessing self.spots\n\t\ttry: \n\t\t\tspot = spotname,partname = (self.spotname_lookup(sn),self.trajectory_format)\n\t\t\tprefix = self.spots[spot]['namer'](spotname,sn)\n\t\texcept: raise Exception('[ERROR] prefixer failure on simulation \"%s\" (check your namer)'%sn)\n\t\treturn prefix"
]
| [
"0.73429704",
"0.66815627",
"0.6533916",
"0.6423588",
"0.6234334",
"0.6187238",
"0.6106463",
"0.61048114",
"0.6104211",
"0.6086887",
"0.60644424",
"0.6063431",
"0.6009029",
"0.6005014",
"0.5957649",
"0.5849945",
"0.582023",
"0.58040166",
"0.58022004",
"0.5770319",
"0.57546115",
"0.57479507",
"0.5739384",
"0.5733679",
"0.5715238",
"0.57057655",
"0.56935525",
"0.5667215",
"0.56433815",
"0.56234604"
]
| 0.71576387 | 1 |
function used to generate suffixes for a given word | def getSuffixesForWord(self, word):
suffixes = self.word_suffixes.get(word, False)
if suffixes is not False:
return suffixes
suffixes = []
if word.isalpha():
boundary = min(5, len(word))
for i in range(1, boundary):
suffixes.append(word[-i:])
suffixes = tuple(suffixes)
self.word_suffixes[word] = suffixes
return suffixes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def suffixes(word: str) -> Iterator[str]:\n if not word:\n return\n for i, _ in enumerate(word):\n yield word[i:]",
"def FindSuffix(self):\n self.numSuffixes = 0\n self.forceStress = 0\n resultslist = []\n for f in self.suffixes.finditer(self.wd):\n resultslist.append((f.group(), f.start()))\n if not resultslist: return\n # make sure *end* of word is in list! otherwise, 'DESP erate'\n if resultslist[-1][1] + len(resultslist[-1][0]) < len(self.wd):\n return\n resultslist.reverse()\n for res in resultslist:\n # if no vowel left before, false suffix ('singing')\n # n.b.: will choke on 'quest' etc! put in dictionary, I guess\n if not sre.search('[aeiouy]', self.wd[:res[1]]): break\n if res[0] == 'ing' and self.wd[res[1]-1] == self.wd[res[1]-2]:\n self.sylBounds.append(res[1] - 1) # freq special case\n else: self.sylBounds.append(res[1]) # sorted later\n self.wd = self.wd[:res[1]]\n self.numSuffixes += 1\n if res[0] in STRESSSUFFIX:\n self.forceStress = 0 - len(self.sylBounds)\n if res[0] in MULTISUFFIX:\n # tricky bit! it *happens* that secondary division in all these\n # comes after its first character; NOT inevitable!\n # also does not allow for 3-syl: 'ically' (which are reliable!)\n self.sylBounds.append(res[1]+1)\n self.numSuffixes += 1",
"def gen_word(prefixes, suffixes):\n word = ''\n for time in range(random.choice([1, 2, 3])):\n word += random.choice(prefixes)[0]\n word += random.choice(suffixes)[0]\n return word",
"def __wordsToSuffixes__(self):\n suffixes = defaultdict(int)\n for word, tag in self.getWordTagDict():\n for suffix in self.getSuffixesForWord(word):\n suffixes[(suffix, tag)] += 1\n return suffixes",
"def suffix():\r\n\r\n return _random.choice(\r\n [\r\n 'Sr.', 'Jr.', 'II', 'III', 'IV', 'V'\r\n ]\r\n )",
"def suffix(rem):\n if rem == 0:\n suf = ''\n else:\n if rem <= 600: #Class A suffix -- only letters.\n rem = rem - 1\n suf = base34[rem // 25]\n if rem % 25 > 0:\n suf = suf + base34[rem % 25 - 1]# second class A letter, if present.\n else: #rems > 600 : First digit of suffix is a number. Second digit may be blank, letter, or number.\n rem = rem - 601\n suf = base10[rem // 35]\n if rem % 35 > 0:\n suf = suf + base34[rem % 35 - 1]\n return suf",
"def add_suffix(word, suffix):\n suffix, sep, rest = suffix.partition(' ')\n expanded = _add_suffix(word, suffix)\n return expanded + sep + rest",
"def removesuffix(self, x) -> String:\n pass",
"def _get_manber_myers_suffixes(self, seq:str=None) -> List:\n if not seq: seq = self.seq\n return self._sort_manber_myers([i for i in range(len(seq))])",
"def generate_good_suffix(self, pattern: str, suffix, prefix):\n M = self.M\n # i is the index of scanning index in pattern, range from [0,M-2], to scan substring before pattern[M-1]\n for i in range(M - 1):\n # j is the index of suffix index\n j = i\n # k is the suffix array length, also index, should range from [1,M-1]\n k = 0\n while j >= 0 and pattern[j] == pattern[M - 1 - k]:\n j -= 1\n k += 1\n suffix[k] = j + 1\n if j == -1:\n prefix[k] = True",
"def _replace_suffix(self, word, suffix, replacement):\n assert word.endswith(suffix), \"Given word doesn't end with given suffix\"\n if suffix == \"\":\n return word + replacement\n else:\n return word[: -len(suffix)] + replacement",
"def suffixDict():\n return {'trpk': 'tpke', 'forges': 'frgs', 'bypas': 'byp', 'mnr': 'mnr', 'viaduct': 'via', 'mnt': 'mt',\n 'lndng': 'lndg', 'vill': 'vlg', 'aly': 'aly', 'mill': 'ml', 'pts': 'pts', 'centers': 'ctrs', 'row': 'row', 'cnter': 'ctr',\n 'hrbor': 'hbr', 'tr': 'trl', 'lndg': 'lndg', 'passage': 'psge', 'walks': 'walk', 'frks': 'frks', 'crest': 'crst', 'meadows': 'mdws',\n 'freewy': 'fwy', 'garden': 'gdn', 'bluffs': 'blfs', 'vlg': 'vlg', 'vly': 'vly', 'fall': 'fall', 'trk': 'trak', 'squares': 'sqs',\n 'trl': 'trl', 'harbor': 'hbr', 'frry': 'fry', 'div': 'dv', 'straven': 'stra', 'cmp': 'cp', 'grdns': 'gdns', 'villg': 'vlg',\n 'meadow': 'mdw', 'trails': 'trl', 'streets': 'sts', 'prairie': 'pr', 'hts': 'hts', 'crescent': 'cres', 'pass': 'pass',\n 'ter': 'ter', 'port': 'prt', 'bluf': 'blf', 'avnue': 'ave', 'lights': 'lgts', 'rpds': 'rpds', 'harbors': 'hbrs',\n 'mews': 'mews', 'lodg': 'ldg', 'plz': 'plz', 'tracks': 'trak', 'path': 'path', 'pkway': 'pkwy', 'gln': 'gln',\n 'bot': 'btm', 'drv': 'dr', 'rdg': 'rdg', 'fwy': 'fwy', 'hbr': 'hbr', 'via': 'via', 'divide': 'dv', 'inlt': 'inlt',\n 'fords': 'frds', 'avenu': 'ave', 'vis': 'vis', 'brk': 'brk', 'rivr': 'riv', 'oval': 'oval', 'gateway': 'gtwy',\n 'stream': 'strm', 'bayoo': 'byu', 'msn': 'msn', 'knoll': 'knl', 'expressway': 'expy', 'sprng': 'spg',\n 'flat': 'flt', 'holw': 'holw', 'grden': 'gdn', 'trail': 'trl', 'jctns': 'jcts', 'rdgs': 'rdgs',\n 'tunnel': 'tunl', 'ml': 'ml', 'fls': 'fls', 'flt': 'flt', 'lks': 'lks', 'mt': 'mt', 'groves': 'grvs',\n 'vally': 'vly', 'ferry': 'fry', 'parkway': 'pkwy', 'radiel': 'radl', 'strvnue': 'stra', 'fld': 'fld',\n 'overpass': 'opas', 'plaza': 'plz', 'estate': 'est', 'mntn': 'mtn', 'lock': 'lck', 'orchrd': 'orch',\n 'strvn': 'stra', 'locks': 'lcks', 'bend': 'bnd', 'kys': 'kys', 'junctions': 'jcts', 'mountin': 'mtn',\n 'burgs': 'bgs', 'pine': 'pne', 'ldge': 'ldg', 'causway': 'cswy', 'spg': 'spg', 'beach': 'bch', 'ft': 'ft',\n 'crse': 'crse', 'motorway': 'mtwy', 'bluff': 'blf', 'court': 'ct', 'grov': 'grv', 'sprngs': 'spgs',\n 'ovl': 'oval', 'villag': 'vlg', 'vdct': 'via', 'neck': 'nck', 'orchard': 'orch', 'light': 'lgt',\n 'sq': 'sq', 'pkwy': 'pkwy', 'shore': 'shr', 'green': 'grn', 'strm': 'strm', 'islnd': 'is',\n 'turnpike': 'tpke', 'stra': 'stra', 'mission': 'msn', 'spngs': 'spgs', 'course': 'crse',\n 'trafficway': 'trfy', 'terrace': 'ter', 'hway': 'hwy', 'avenue': 'ave', 'glen': 'gln',\n 'boul': 'blvd', 'inlet': 'inlt', 'la': 'ln', 'ln': 'ln', 'frst': 'frst', 'clf': 'clf',\n 'cres': 'cres', 'brook': 'brk', 'lk': 'lk', 'byp': 'byp', 'shoar': 'shr', 'bypass': 'byp',\n 'mtin': 'mtn', 'ally': 'aly', 'forest': 'frst', 'junction': 'jct', 'views': 'vws', 'wells': 'wls', 'cen': 'ctr',\n 'exts': 'exts', 'crt': 'ct', 'corners': 'cors', 'trak': 'trak', 'frway': 'fwy', 'prarie': 'pr', 'crossing': 'xing',\n 'extn': 'ext', 'cliffs': 'clfs', 'manors': 'mnrs', 'ports': 'prts', 'gatewy': 'gtwy', 'square': 'sq', 'hls': 'hls',\n 'harb': 'hbr', 'loops': 'loop', 'mdw': 'mdw', 'smt': 'smt', 'rd': 'rd', 'hill': 'hl', 'blf': 'blf',\n 'highway': 'hwy', 'walk': 'walk', 'clfs': 'clfs', 'brooks': 'brks', 'brnch': 'br', 'aven': 'ave',\n 'shores': 'shrs', 'iss': 'iss', 'route': 'rte', 'wls': 'wls', 'place': 'pl', 'sumit': 'smt', 'pines': 'pnes',\n 'trks': 'trak', 'shoal': 'shl', 'strt': 'st', 'frwy': 'fwy', 'heights': 'hts', 'ranches': 'rnch',\n 'boulevard': 'blvd', 'extnsn': 'ext', 'mdws': 'mdws', 'hollows': 'holw', 'vsta': 'vis', 'plains': 'plns',\n 'station': 'sta', 'circl': 'cir', 'mntns': 'mtns', 'prts': 'prts', 'shls': 'shls', 'villages': 'vlgs',\n 'park': 'park', 'nck': 'nck', 'rst': 'rst', 'haven': 'hvn', 'turnpk': 'tpke', 'expy': 'expy', 'sta': 'sta',\n 'expr': 'expy', 'stn': 'sta', 'expw': 'expy', 'street': 'st', 'str': 'st', 'spurs': 'spur', 'crecent': 'cres',\n 'rad': 'radl', 'ranch': 'rnch', 'well': 'wl', 'shoals': 'shls', 'alley': 'aly', 'plza': 'plz', 'medows': 'mdws',\n 'allee': 'aly', 'knls': 'knls', 'ests': 'ests', 'st': 'st', 'anx': 'anx', 'havn': 'hvn', 'paths': 'path', 'bypa': 'byp',\n 'spgs': 'spgs', 'mills': 'mls', 'parks': 'park', 'byps': 'byp', 'flts': 'flts', 'tunnels': 'tunl', 'club': 'clb', 'sqrs': 'sqs',\n 'hllw': 'holw', 'manor': 'mnr', 'centre': 'ctr', 'track': 'trak', 'hgts': 'hts', 'rnch': 'rnch', 'crcle': 'cir', 'falls': 'fls',\n 'landing': 'lndg', 'plaines': 'plns', 'viadct': 'via', 'gdns': 'gdns', 'gtwy': 'gtwy', 'grove': 'grv', 'camp': 'cp', 'tpk': 'tpke',\n 'drive': 'dr', 'freeway': 'fwy', 'ext': 'ext', 'points': 'pts', 'exp': 'expy', 'ky': 'ky', 'courts': 'cts', 'pky': 'pkwy', 'corner': 'cor',\n 'crssing': 'xing', 'mnrs': 'mnrs', 'unions': 'uns', 'cyn': 'cyn', 'lodge': 'ldg', 'trfy': 'trfy', 'circle': 'cir', 'bridge': 'brg',\n 'dl': 'dl', 'dm': 'dm', 'express': 'expy', 'tunls': 'tunl', 'dv': 'dv', 'dr': 'dr', 'shr': 'shr', 'knolls': 'knls', 'greens': 'grns',\n 'tunel': 'tunl', 'fields': 'flds', 'common': 'cmn', 'orch': 'orch', 'crk': 'crk', 'river': 'riv', 'shl': 'shl', 'view': 'vw',\n 'crsent': 'cres', 'rnchs': 'rnch', 'crscnt': 'cres', 'arc': 'arc', 'btm': 'btm', 'blvd': 'blvd', 'ways': 'ways', 'radl': 'radl',\n 'rdge': 'rdg', 'causeway': 'cswy', 'parkwy': 'pkwy', 'juncton': 'jct', 'statn': 'sta', 'gardn': 'gdn', 'mntain': 'mtn',\n 'crssng': 'xing', 'rapid': 'rpd', 'key': 'ky', 'plns': 'plns', 'wy': 'way', 'cor': 'cor', 'ramp': 'ramp', 'throughway': 'trwy',\n 'estates': 'ests', 'ck': 'crk', 'loaf': 'lf', 'hvn': 'hvn', 'wall': 'wall', 'hollow': 'holw', 'canyon': 'cyn', 'clb': 'clb',\n 'cswy': 'cswy', 'village': 'vlg', 'cr': 'crk', 'trce': 'trce', 'cp': 'cp', 'cv': 'cv', 'ct': 'cts', 'pr': 'pr', 'frg': 'frg',\n 'jction': 'jct', 'pt': 'pt', 'mssn': 'msn', 'frk': 'frk', 'brdge': 'brg', 'cent': 'ctr', 'spur': 'spur', 'frt': 'ft', 'pk': 'park',\n 'fry': 'fry', 'pl': 'pl', 'lanes': 'ln', 'gtway': 'gtwy', 'prk': 'park', 'vws': 'vws', 'stravenue': 'stra', 'lgt': 'lgt',\n 'hiway': 'hwy', 'ctr': 'ctr', 'prt': 'prt', 'ville': 'vl', 'plain': 'pln', 'mount': 'mt', 'mls': 'mls', 'loop': 'loop',\n 'riv': 'riv', 'centr': 'ctr', 'is': 'is', 'prr': 'pr', 'vl': 'vl', 'avn': 'ave', 'vw': 'vw', 'ave': 'ave', 'spng': 'spg',\n 'hiwy': 'hwy', 'dam': 'dm', 'isle': 'isle', 'crcl': 'cir', 'sqre': 'sq', 'jct': 'jct', 'jctn': 'jct', 'mountain': 'mtn',\n 'keys': 'kys', 'parkways': 'pkwy', 'drives': 'drs', 'tunl': 'tunl', 'jcts': 'jcts', 'knl': 'knl', 'center': 'ctr',\n 'driv': 'dr', 'tpke': 'tpke', 'sumitt': 'smt', 'canyn': 'cyn', 'ldg': 'ldg', 'harbr': 'hbr', 'rest': 'rst', 'shoars': 'shrs',\n 'vist': 'vis', 'gdn': 'gdn', 'islnds': 'iss', 'hills': 'hls', 'cresent': 'cres', 'point': 'pt', 'lake': 'lk', 'vlly': 'vly',\n 'strav': 'stra', 'crossroad': 'xrd', 'bnd': 'bnd', 'strave': 'stra', 'stravn': 'stra', 'knol': 'knl', 'vlgs': 'vlgs',\n 'forge': 'frg', 'cntr': 'ctr', 'cape': 'cpe', 'height': 'hts', 'lck': 'lck', 'highwy': 'hwy', 'trnpk': 'tpke', 'rpd': 'rpd',\n 'boulv': 'blvd', 'circles': 'cirs', 'valleys': 'vlys', 'vst': 'vis', 'creek': 'crk', 'mall': 'mall', 'spring': 'spg',\n 'brg': 'brg', 'holws': 'holw', 'lf': 'lf', 'est': 'est', 'xing': 'xing', 'trace': 'trce', 'bottom': 'btm',\n 'streme': 'strm', 'isles': 'isle', 'circ': 'cir', 'forks': 'frks', 'burg': 'bg', 'run': 'run', 'trls': 'trl',\n 'radial': 'radl', 'lakes': 'lks', 'rue': 'rue', 'vlys': 'vlys', 'br': 'br', 'cors': 'cors', 'pln': 'pln',\n 'pike': 'pike', 'extension': 'ext', 'island': 'is', 'frd': 'frd', 'lcks': 'lcks', 'terr': 'ter',\n 'union': 'un', 'extensions': 'exts', 'pkwys': 'pkwy', 'islands': 'iss', 'road': 'rd', 'shrs': 'shrs',\n 'roads': 'rds', 'glens': 'glns', 'springs': 'spgs', 'missn': 'msn', 'ridge': 'rdg', 'arcade': 'arc',\n 'bayou': 'byu', 'crsnt': 'cres', 'junctn': 'jct', 'way': 'way', 'valley': 'vly', 'fork': 'frk',\n 'mountains': 'mtns', 'bottm': 'btm', 'forg': 'frg', 'ht': 'hts', 'ford': 'frd', 'hl': 'hl',\n 'grdn': 'gdn', 'fort': 'ft', 'traces': 'trce', 'cnyn': 'cyn', 'cir': 'cir', 'un': 'un', 'mtn': 'mtn',\n 'flats': 'flts', 'anex': 'anx', 'gatway': 'gtwy', 'rapids': 'rpds', 'villiage': 'vlg', 'flds': 'flds',\n 'coves': 'cvs', 'rvr': 'riv', 'av': 'ave', 'pikes': 'pike', 'grv': 'grv', 'vista': 'vis', 'pnes': 'pnes',\n 'forests': 'frst', 'field': 'fld', 'branch': 'br', 'grn': 'grn', 'dale': 'dl', 'rds': 'rds', 'annex': 'anx',\n 'sqr': 'sq', 'cove': 'cv', 'squ': 'sq', 'skyway': 'skwy', 'ridges': 'rdgs', 'hwy': 'hwy', 'tunnl': 'tunl',\n 'underpass': 'upas', 'cliff': 'clf', 'lane': 'ln', 'land': 'land', 'bch': 'bch', 'dvd': 'dv', 'curve': 'curv',\n 'cpe': 'cpe', 'summit': 'smt', 'gardens': 'gdns'}",
"def get_suffix(self,prefix):\n return random.choice(self[prefix])",
"def test_get_suffixes(self):\n\n ans = self.short_sf.get_suffixes()\n\n self.assertEqual(ans, [(0, 0), (1, 1), (0, 1), (0, 2), (1, 0), (1, 2)])",
"def stem(self, word):\n word = word.lower()\n\n if word in self.stopwords:\n return word\n\n step1_success = False\n\n r1, r2 = self._r1r2_standard(word, self.__vowels)\n rv = self._rv_standard(word, self.__vowels)\n\n # STEP 0: Attached pronoun\n for suffix in self.__step0_suffixes:\n if not (word.endswith(suffix) and rv.endswith(suffix)):\n continue\n\n if (\n rv[: -len(suffix)].endswith(\n (\n \"ando\",\n \"ar\",\n \"er\",\n \"iendo\",\n \"ir\",\n )\n )\n ) or (\n rv[: -len(suffix)].endswith(\"yendo\")\n and word[: -len(suffix)].endswith(\"uyendo\")\n ):\n\n word = self.__replace_accented(word[: -len(suffix)])\n r1 = self.__replace_accented(r1[: -len(suffix)])\n r2 = self.__replace_accented(r2[: -len(suffix)])\n rv = self.__replace_accented(rv[: -len(suffix)])\n break\n\n # STEP 1: Standard suffix removal\n for suffix in self.__step1_suffixes:\n if not word.endswith(suffix):\n continue\n\n if suffix == \"amente\" and r1.endswith(suffix):\n step1_success = True\n word = word[:-6]\n r2 = r2[:-6]\n rv = rv[:-6]\n\n if r2.endswith(\"iv\"):\n word = word[:-2]\n r2 = r2[:-2]\n rv = rv[:-2]\n\n if r2.endswith(\"at\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith((\"os\", \"ic\", \"ad\")):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith(suffix):\n step1_success = True\n if suffix in (\n \"adora\",\n \"ador\",\n \"acion\",\n \"adoras\",\n \"adores\",\n \"aciones\",\n \"ante\",\n \"antes\",\n \"ancia\",\n \"ancias\",\n ):\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n\n if r2.endswith(\"ic\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif suffix in (\"logia\", \"logias\"):\n word = suffix_replace(word, suffix, \"log\")\n rv = suffix_replace(rv, suffix, \"log\")\n\n elif suffix in (\"ucion\", \"uciones\"):\n word = suffix_replace(word, suffix, \"u\")\n rv = suffix_replace(rv, suffix, \"u\")\n\n elif suffix in (\"encia\", \"encias\"):\n word = suffix_replace(word, suffix, \"ente\")\n rv = suffix_replace(rv, suffix, \"ente\")\n\n elif suffix == \"mente\":\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n\n if r2.endswith((\"ante\", \"able\", \"ible\")):\n word = word[:-4]\n rv = rv[:-4]\n\n elif suffix in (\"idad\", \"idades\"):\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n\n for pre_suff in (\"abil\", \"ic\", \"iv\"):\n if r2.endswith(pre_suff):\n word = word[: -len(pre_suff)]\n rv = rv[: -len(pre_suff)]\n\n elif suffix in (\"ivo\", \"iva\", \"ivos\", \"ivas\"):\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n if r2.endswith(\"at\"):\n word = word[:-2]\n rv = rv[:-2]\n else:\n word = word[: -len(suffix)]\n rv = rv[: -len(suffix)]\n break\n\n # STEP 2a: Verb suffixes beginning 'y'\n if not step1_success:\n for suffix in self.__step2a_suffixes:\n if rv.endswith(suffix) and word[-len(suffix) - 1 : -len(suffix)] == \"u\":\n word = word[: -len(suffix)]\n rv = rv[: -len(suffix)]\n break\n\n # STEP 2b: Other verb suffixes\n for suffix in self.__step2b_suffixes:\n if rv.endswith(suffix):\n word = word[: -len(suffix)]\n rv = rv[: -len(suffix)]\n if suffix in (\"en\", \"es\", \"eis\", \"emos\"):\n if word.endswith(\"gu\"):\n word = word[:-1]\n\n if rv.endswith(\"gu\"):\n rv = rv[:-1]\n break\n\n # STEP 3: Residual suffix\n for suffix in self.__step3_suffixes:\n if rv.endswith(suffix):\n word = word[: -len(suffix)]\n if suffix in (\"e\", \"\\xE9\"):\n rv = rv[: -len(suffix)]\n\n if word[-2:] == \"gu\" and rv.endswith(\"u\"):\n word = word[:-1]\n break\n\n word = self.__replace_accented(word)\n\n return word",
"def test_get_suffixes_random(self):\n\n ans = self.random_sf.get_suffixes()\n\n # check there are the right number of suffixes\n expected_num = sum([len(chstring) for chstring in self.rand_gs.values()])\n actual_num = len(ans)\n self.assertEqual(actual_num, expected_num)\n\n # check that the order is correct\n last_glidx, last_tidx = ans[0]\n last = self.random_sf.data[last_glidx][last_tidx:]\n for glyph_idx, tok_idx in ans[1:]:\n current = self.random_sf.data[glyph_idx][tok_idx:]\n self.assertTrue(last <= current)",
"def suffix_replace(original, old, new):\n ...",
"def build_suffix_array(text):\r\n # Initialization with one char suffixes.\r\n order = sortCharacters(text)\r\n classes = computeCharClasses(text, order)\r\n L = 1\r\n while L < len(text):\r\n order = sortDoubled(text, L, order, classes)\r\n classes = updateClasses(order, classes, L)\r\n L = 2*L\r\n return order",
"def get_suffix(word, length):\n if length <= 0:\n return \"\"\n if length <= len(word):\n start = len(word) - length\n return word[start:]\n else:\n return word.rjust(length, \"*\")",
"def get_suffix_ml_model():\n suffix = ''\n \n # consider if the model uses tail or not\n if gml.USE_TAIL: \n suffix += '_use_tail'\n else: \n suffix += '_no_tail'\n\n # consider the way of picking target variable for the model\n if gml.WAY_MODEL_TGT == 'absolute':\n suffix += '_absolute'\n elif gml.WAY_MODEL_TGT == 'relative':\n suffix += '_relative'\n else: \n exit('error on the function that gets suffix')\n\n return suffix",
"def stem(s):\n special = {'appall', 'kill', 'stroll', 'kiss', 'thrill', 'chugg', 'dress', 'err', 'express', 'fall', 'free', 'gall', 'add','cross', 'impress', 'inn', 'call', 'ball', 'bill', 'buzz'} \n ie_words = {'vying', 'lying', 'dying', 'tying'}\n short_ing = {'bring','sling','sping', 'bring', 'sing', 'ring', 'king', 'cling' ,'fling', 'wing', 'ding', 'ping', 'ting'}\n c_k_words = {'kick', 'muck', 'lock','pick', 'back', 'mock', 'peck', 'lock', 'nick'}\n\n if len(s) <= 3:\n return s\n if s[-3:] == 'ing' or s[-4:] == 'ings': \n if s in short_ing:\n return s\n elif s in special:\n return s[:-3]\n elif s[:-3] not in special and s[-4] == s[-5]:\n return s[:-4]\n elif s[:-3] not in c_k_words and s[-4] == 'k':\n return s[:-4]\n elif s == 'everything' or s == 'anything' or s == 'something':\n return s[:-5]\n elif s in ie_words:\n return s[0] + 'ie'\n else:\n return s[:-3]\n elif s[-3:] == 'ers':\n return s[:-3]\n elif s[-2:] == 'es':\n return s[:-2]\n elif s[-2:] == 'en':\n return s[:-2]\n elif s[-2:] == 'er':\n if s[-3] == s[-4]:\n return s[:-3]\n else:\n return s[:-2] \n elif s[-2:] == 'ed':\n if s[-3] == s[-4]:\n return s[:-3]\n else:\n return s[:-2]\n elif s[-3:] == 'ies':\n return s[:-2]\n elif s[-1:] == 's':\n return s[:-1]\n elif s[-1:] == 'e' and s not in ie_words:\n return s[:-1]\n elif s[-3:] == 'ful':\n return s[:-3]\n elif s[:2] == 'de':\n return s[2:]\n elif len(s) > 4 and s[-4:] == 'able' or s[-4] == 'ible':\n return s[:-4]\n elif s[:2] == 'in' or s[:2] == 'il' or s[:2] == 'ir':\n return s[2:]\n elif s[-1:] == 'y':\n return s[:-1] + 'i'\n else:\n return s",
"def stem(s):\n #special words\n double_cons = ['see', 'off', 'egg'] \n\n vowels = ['a','e', 'i', 'o', 'u']\n\n suffix = [['s', 'y'],\n ['ly', 'ic', 'er', 'or', 'ed', 'al', 'ny'],\n ['ial', 'ful', 'ing', 'ion', 'ity', 'ive', 'ous', 'ies', 'ier', 'ily',\n 'ish', 'ism', 'dom', 'ist', 'ate', 'men'],\n ['able', 'ible', 'tion', 'less', 'ment', 'ness', 'eous', 'ious', 'ical', 'ship']]\n\n if len(s) <= 4:\n return s\n\n #last four\n if len(s) > 5 and s[-4:] in suffix[3]:\n if s[-5] == s[-6]:\n if s[-5] == 's': #\"ss\"\n return s[:-4]\n return s[:-5]\n else:\n s = s[:-4]\n return s\n \n #last three\n elif s[-3:] in suffix[2]:\n if s[-4] == s[-5]:\n if s[-4] in ['s', 'l', 'z', 'f']:\n return s[:-3]\n elif s[:-3] in double_cons:\n return s[:-3]\n return s[:-4]\n else:\n return s[:-3]\n #last two\n elif s[-2:] in suffix[1]:\n if s[-3] == s[-4]:\n if s[-3] in ['s', 'l', 'z', 'f']:\n return s[:-2]\n return s[:-3]\n else:\n return s[:-2]\n\n #plurals and y\n if s[-1] in suffix[0]:\n if s[-2:] == 'es':\n return s[:-2]\n if s[-2] == 's':\n return s\n if s[-2] in vowels: #y\n return s\n elif s[-1] == 's':\n s = stem(s[:-1])\n return s\n\n return s",
"def is_suffix(suffix: str, word: str):\n return word.endswith(suffix)",
"def _get_bwt_seq(self, seq:str=None, suffixes:List=None) -> str:\n if not seq: seq = self.seq\n if not suffixes: suffixes = self.suffixes\n bwt_seq = \"\"\n if self.debug: print(f\"_get_bwt_seq function: sequence: {seq}, suffixes: {suffixes}\") \n for i in suffixes:\n if self.debug: print(f\"_get_bwt_seq function: suffix: {i}, sequence letter: {seq[(i-1 % len(seq))]}\") \n bwt_seq += seq[(i-1 % len(seq))]\n if self.debug: print(f\"_get_bwt_seq function: bwt sequence: {bwt_seq}\\n\") \n return bwt_seq",
"def suffix(sequence, l):\n if l > len(sequence):\n return sequence\n else:\n return sequence[-l:]",
"def random_text(n):\n start = random.choice(suffix_map.keys())\n for i in range(n):\n suffixes = suffix_map.get(start, None)\n if suffixes == None:\n # if the start isn't in map, we got to the end of the\n # original text, so we have to start again.\n random_text(n-i)\n return\n # choose a random suffix\n word = random.choice(suffixes)\n # Jodesty: *Need to learn how to format text output to fit on terminal screen\n output_words.append(word)\n # Jodesty: *what I have for now\n print word,\n start = shift(start, word)",
"def verb_lemma(word):\n if word.endswith(\"ed\"):\n if word[:-2].endswith(\"v\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"at\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"it\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"et\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ut\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ac\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"i\"):\n return word[:-3].lower() + \"y\"\n elif word[:-2].endswith(\"ir\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ag\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nc\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nu\"):\n return word[:-2].lower() + \"e\"\n else:\n return word[:-2].lower() \n elif word.endswith(\"ing\"):\n if word[:-3].endswith(\"v\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"at\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"it\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"et\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ut\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ac\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"i\"):\n return word[:-4].lower() + \"y\"\n elif word[:-3].endswith(\"ir\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ag\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nc\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nu\"):\n return word[:-3].lower() + \"e\"\n else:\n return word[:-3].lower()\n elif re.match(r\"(does|did|done)\", word):\n return (\"do\")\n elif re.match(r\"(is|are|am|was|will|were|been)\", word):\n return (\"be\")\n elif word == (\"'s\"):\n return (\"be\")\n elif re.match(r\"(had|has|'ve)\", word):\n return (\"have\")\n else:\n return word.lower()",
"def suffix_tree_from_seq(self, text):\n t = text+\"$\" #adiciona ao final da sequecia o simbolo $ que mostra que e' o fim da sequencia\n for i in range(len(t)): #percorremos toda a sequencia mas sempre a avançar uma casa\n self.add_suffix(t[i:], i) #e' cada vez uma sequencoa mais pequena que sera adicionada, ou seja sao os sufixos da sequencia",
"def generateWord2(self, parameters=None):\n\n\t\t##Initial set-up\n\t\t#A syllable consists of an optional onset, a nucleus, and an optional coda\n\t\t#Sources:\n\t\t# http://en.wikipedia.org/wiki/English_phonology#Phonotactics\n\t\t# http://en.wiktionary.org/wiki/Appendix:English_pronunciation\n\t\tonsets = [\"ch\", \"pl\", \"bl\", \"cl\", \"gl\", \"pr\", \"br\", \"tr\", \"dr\", \"cr\", \"gr\", \"tw\", \"dw\", \"qu\", \"pu\",\n\t\t\t\t \"fl\", \"sl\", \"fr\", \"thr\", \"shr\", \"wh\", \"sw\",\n\t\t\t\t \"sp\", \"st\", \"sk\", \"sm\", \"sn\", \"sph\", \"spl\", \"spr\", \"str\", \"scr\", \"squ\", \"sm\"] #Plus the normal consonants\n\t\tnuclei = [\"ai\", \"ay\", \"ea\", \"ee\", \"y\", \"oa\", \"au\", \"oi\", \"oo\", \"ou\"] #Plus the normal vowels\n\t\tcodas = [\"ch\", \"lp\", \"lb\", \"lt\", \"ld\", \"lch\", \"lg\", \"lk\", \"rp\", \"rb\", \"rt\", \"rd\", \"rch\", \"rk\", \"lf\", \"lth\",\n\t\t\t\t \"lsh\", \"rf\", \"rth\", \"rs\", \"rsh\", \"lm\", \"ln\", \"rm\", \"rn\", \"rl\", \"mp\", \"nt\", \"nd\", \"nch\", \"nk\", \"mph\",\n\t\t\t\t \"mth\", \"nth\", \"ngth\", \"ft\", \"sp\", \"st\", \"sk\", \"fth\", \"pt\", \"ct\", \"kt\", \"pth\", \"ghth\", \"tz\", \"dth\",\n\t\t\t\t \"ks\", \"lpt\", \"lfth\", \"ltz\", \"lst\", \"lct\", \"lx\",\"rmth\", \"rpt\", \"rtz\", \"rst\", \"rct\",\"mpt\", \"dth\",\n\t\t\t\t \"nct\", \"nx\", \"xth\", \"xt\"] #Plus normal consonants\n\n\t\tsimpleLetterChance = 65 #percent, whether a single letter is chosen instead of an onset/nucleus/coda\n\t\tbasicLetterChance = 75 #percent, whether a simple consonant/vowel is chosen over a more rare one\n\n\t\t#Prevent unnecessary and ugly code repetition\n\n\t\t#Start the word\n\t\trepeats = 1\n\t\tif parameters and len(parameters) > 0:\n\t\t\trepeats = SharedFunctions.parseInt(parameters[0], 1, 1, 25)\n\n\t\twords = []\n\t\tfor i in xrange(0, repeats):\n\t\t\tsyllableCount = 2\n\t\t\tif random.randint(1, 100) <= 50:\n\t\t\t\tsyllableCount -= 1\n\t\t\tif random.randint(1, 100) <= 35:\n\t\t\t\tsyllableCount += 1\n\n\t\t\tword = u\"\"\n\t\t\tfor j in range(0, syllableCount):\n\t\t\t\t#In most cases, add an onset\n\t\t\t\tif random.randint(1, 100) <= 75:\n\t\t\t\t\tif random.randint(1, 100) <= simpleLetterChance:\n\t\t\t\t\t\tword += self.getBasicOrSpecialLetter(\"consonant\", basicLetterChance)\n\t\t\t\t\telse:\n\t\t\t\t\t\tword += random.choice(onsets)\n\n\t\t\t\t#Nucleus!\n\t\t\t\tif random.randint(1, 100) <= simpleLetterChance:\n\t\t\t\t\tword += self.getBasicOrSpecialLetter(\"vowel\", basicLetterChance)\n\t\t\t\telse:\n\t\t\t\t\tword += random.choice(nuclei)\n\n\t\t\t\t#Add a coda in most cases (Always add it if this is the last syllable of the word and it'd be too short otherwise)\n\t\t\t\tif (j == syllableCount - 1 and len(word) < 3) or random.randint(1, 100) <= 75:\n\t\t\t\t\tif random.randint(1, 100) <= simpleLetterChance:\n\t\t\t\t\t\tword += self.getBasicOrSpecialLetter(\"consonant\", basicLetterChance)\n\t\t\t\t\telse:\n\t\t\t\t\t\tword += random.choice(codas)\n\n\t\t\tword = word[0].upper() + word[1:]\n\t\t\twords.append(word)\n\n\t\treturn u\", \".join(words)",
"def word_clean(self, word):\n word_ori = word\n if word not in self.vocab_list: # if the word is not in the vocabulary\n word = word.strip(\",.!?\") # delete punctuation, such as periods, commas\n for i in range(len(suffix_list)):\n (match, string) = rchop(word, suffix_list[i])\n if match:\n (_, suffix) = lchop(word_ori, string)\n return string, suffix\n\n (_, suffix) = lchop(word_ori, word)\n return word, suffix"
]
| [
"0.7386224",
"0.7214886",
"0.7020202",
"0.68448174",
"0.65395445",
"0.65102875",
"0.64363563",
"0.637313",
"0.63590467",
"0.6350543",
"0.63384783",
"0.63234866",
"0.6239201",
"0.62335086",
"0.6231242",
"0.6175014",
"0.6174881",
"0.6156801",
"0.6153791",
"0.6138962",
"0.6123187",
"0.6099947",
"0.603906",
"0.59440595",
"0.5936286",
"0.5934056",
"0.5907113",
"0.58731264",
"0.5854857",
"0.5845251"
]
| 0.7573023 | 0 |
function used to generate prefixes for a given word | def getPrefixesForWord(self, word):
prefixes = self.word_prefixes.get(word, False)
if prefixes is not False:
return prefixes
prefixes = []
if word.isalpha():
boundary = min(5, len(word))
for i in range(2, boundary):
prefixes.append(word[:i])
prefixes = tuple(prefixes)
self.word_prefixes[word] = prefixes
return prefixes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prefixes(s):\n output = ''\n for i in range(len(s) + 1):\n add = s[0:i]\n output += add\n return output",
"def replace_prefix(word, prefix):\r\n length_prefix = len(prefix)\r\n length_word = len(word)\r\n \r\n if length_prefix > length_word:\r\n return prefix\r\n\r\n #print(word[:length_prefix])\r\n word = prefix + word[length_prefix:]\r\n\r\n return word",
"def gen_word(prefixes, suffixes):\n word = ''\n for time in range(random.choice([1, 2, 3])):\n word += random.choice(prefixes)[0]\n word += random.choice(suffixes)[0]\n return word",
"def apply_prefix(string):\n for short, long in PREFIXES.items():\n if string.startswith(long):\n return short + ':' + string[len(long):]\n return string",
"def prefix(num):\n # determine which range it lies in, r1/r2 means reduction 1 or reduction 2\n divisors = [1e-24 * pow(10, 3 * x) for x in range(17)]\n prefixes = list(reversed(['Yotta (Y)', 'Zetta (Z)', 'Exa (E)', 'Peta (P)', 'Tera (T)', 'Giga (G)', 'Mega (M)',\n 'Kilo (K)', '', 'Milli (m)', 'Micro ($\\mu$)', 'Nano (n)', 'Pico (p)', 'Femto (f)',\n 'Atto (a)', 'Zepto (z)', 'Yocto (y)']))\n exp = np.floor(np.log10(np.abs(num)))\n if exp < 0:\n exp -= 3\n expIndex = int(exp / 3) + 8\n expIndex = 0 if expIndex < 0 else expIndex\n expIndex = len(prefixes)-1 if expIndex >= len(prefixes) else expIndex\n r1 = prefixes[expIndex]\n num1 = num / divisors[expIndex]\n if expIndex != len(prefixes):\n r2 = prefixes[expIndex + 1]\n num2 = num / divisors[expIndex + 1]\n else:\n num2 = None\n retStr = str(num1) + ' ' + r1\n if num2 is not None:\n retStr += '\\nor\\n' + str(num2) + ' ' + r2\n return retStr",
"def neuronyms(input_str, k):\n n = len(input_str)\n result = []\n\n for length in range(k, n-k+1):\n for start in range (1, n - length):\n prefix = input_str[:start]\n suffix = input_str[(start+length):]\n res_str = prefix+str(length)+suffix\n result.append(res_str)\n\n return result",
"def _build_prefix(self):\r\n pattern = self.string2\r\n m = len(pattern)\r\n p = [None]*m\r\n p[0] = 0\r\n k = 0\r\n for i in range(1,m):\r\n while k > 0 and pattern[i] != pattern[k]:\r\n k = p[k-1]\r\n if pattern[k] == pattern[i]:\r\n k = k+1\r\n p[i] = k\r\n self._prefix = p",
"def __wordsToPrefixes__(self):\n prefixes = defaultdict(int)\n for word, tag in self.getWordTagDict():\n for prefix in self.getPrefixesForWord(word):\n prefixes[(prefix, tag)] += 1\n return prefixes",
"def prefixes(s):\n if s:\n yield from prefixes(s[:-1])\n yield s",
"def _prefixes(self, title, filter_stopwords=False):\n for word in self._clean_words(title, filter_stopwords=filter_stopwords):\n prefixer = partial(word.__getslice__, 0)\n for prefix in imap(prefixer, range(1, len(word) + 1)):\n yield prefix",
"def generate_word(model, whitelist, topk, radix=\"\"):\n if whitelist.empty():\n yield \"\", 0\n else:\n for prefix, prefix_proba in iterate_continutations(model, radix, whitelist, topk):\n if prefix is None:\n continue\n for suffix, suffix_proba in generate_word(\n model,\n whitelist.sub(LetterBag(prefix)),\n topk,\n radix + prefix):\n if suffix is None:\n continue\n yield prefix + suffix, prefix_proba + suffix_proba\n yield None, 0",
"def get_prefix(word, length):\n if length <= 0:\n return \"\"\n if length <= len(word):\n return word[:length]\n else:\n return word.ljust(length, \"*\")",
"def _generate_new_prefix(current_prefix, class_name):\n return (\n \"_\".join((current_prefix, class_name)).upper()\n if current_prefix\n else class_name.upper()\n )",
"def prefix(pattern):\r\n return pattern[0:len(pattern)-1]",
"async def prefix(self, _bot, message: discord.Message):\n mention = [self.user.mention + ' ', f'<@!{self.user.id}> ']\n additional_prefixes = await self.get_prefixes(message.guild)\n return self.cfg['bot']['prefixes'] + mention + additional_prefixes",
"def prefix(pattern):\n return pattern[0:len(pattern)-1]",
"def get_shared_prefix(word1: str, word2: str) -> str:\n shared_prefix = \"\"\n for char1, char2 in zip(word1, word2):\n if char1 == char2:\n shared_prefix += char1\n else:\n break\n return shared_prefix",
"def prefix_replace(original, old, new):\n ...",
"def genPrefixAntString(self,estimatedVar,prefix=\"_\"):\n self.prefixAntString = self.antString\n for name in estimatedVar:\n self.prefixAntString = replaceVariable(self.prefixAntString,\n name,prefix+name)",
"def _gennames(prefix, base, number):\n for index in xrange(number):\n yield \"%s%d\" % (prefix, base + index)",
"def remove_prefix(word, prefixes=[]):\n result = word\n # Convert prefixes to list if user give string\n if not isinstance(prefixes, list):\n prefixes = [prefixes]\n for prefix in prefixes:\n if prefix == word[:len(prefix)]:\n result = word[len(prefix):]\n break\n return result",
"def build_prefix(self):\r\n pattern = self.pattern\r\n m = len(pattern)\r\n p = [None]*m\r\n p[0] = 0\r\n k = 0\r\n for i in range(1,m):\r\n while k > 0 and pattern[i] != pattern[k]:\r\n k = p[k-1]\r\n if pattern[k] == pattern[i]:\r\n k = k+1\r\n p[i] = k\r\n self._prefix = p",
"def prefix(self, prefix, *args):\n new_prefix = '%s%s' % (self.prefixes[-1], prefix % args)\n self.prefixes.append(new_prefix)\n try:\n yield\n finally:\n assert self.prefixes.pop() == new_prefix",
"def add_prefix(prefix = \"Peptides\"):\n var_list = gen_cell_lines_states_replicates()\n prefix = prefix\n res_list = []\n for i in var_list:\n unit_str = prefix + \" \"\n unit_str += i\n res_list.append(unit_str)\n return res_list",
"def _prefix(self):\n name = self.__class__.__name__\n return name[:2] + ''.join(c for c in name if c.isupper())[1:]",
"def prefix_fun(prefix_str: str) -> list:\n\n\n\t\tf = []\n\n\t\tfor i in range(len(prefix_str)):\n\t\t\tj = i\n\t\t\toffset = 0\n\t\t\twhile j > 0:\n\t\t\t\tj -= 1\n\t\t\t\t# print(i, j, offset, prefix_str[j], prefix_str[i-offset])\n\t\t\t\tif prefix_str[j] == prefix_str[i - offset]:\n\t\t\t\t\toffset += 1\n\t\t\t\telse:\n\t\t\t\t\tj += offset\n\t\t\t\t\toffset = 0\n\t\t\tf.append(offset)\n\t\t\t# print('append', offset)\n\n\t\treturn f",
"def prefixes(s):\r\n if s:\r\n yield from prefixes(s[:-1]) # First yield prefixes from s[:-1], then yield the last one s.\r\n yield s",
"def _addPrefixes(data):\n prevTags = None\n newData = []\n\n for n, (token, tags) in enumerate(data):\n\n newTags = []\n\n for t in tags:\n p = \"B\" if ((prevTags is None) or (t not in prevTags)) else \"I\"\n newTags.append(\"%s-%s\" % (p, t))\n\n newData.append((token, newTags))\n prevTags = tags\n\n return newData",
"def gen_prefix(pow1000):\n # Prefixes according to Table 5 of [BIPM 2006] (excluding hecto,\n # deca, deci, and centi).\n if pow1000 < -8 or pow1000 > 8:\n raise ValueError(\n \"Value is out of the range covered by the SI prefixes.\")\n return ['Y', # yotta (10^24)\n 'Z', # zetta (10^21)\n 'E', # exa (10^18)\n 'P', # peta (10^15)\n 'T', # tera (10^12)\n 'G', # giga (10^9)\n 'M', # mega (10^6)\n 'k', # kilo (10^3)\n '', # (10^0)\n 'm', # milli (10^-3)\n r'$\\mu$', # micro (10^-6)\n 'n', # nano (10^-9)\n 'p', # pico (10^-12)\n 'f', # femto (10^-15)\n 'a', # atto (10^-18)\n 'z', # zepto (10^-21)\n 'y'][8 - pow1000] # yocto (10^-24)",
"def prefix(name):\n def rule(symbol):\n return symbol.startswith(name) or None\n return rule"
]
| [
"0.6942218",
"0.683397",
"0.6781327",
"0.6771648",
"0.6712492",
"0.6617514",
"0.66067314",
"0.65857637",
"0.6574446",
"0.65668625",
"0.650434",
"0.64508694",
"0.6390748",
"0.63864726",
"0.63603944",
"0.6359629",
"0.6357503",
"0.63356817",
"0.6329151",
"0.63057876",
"0.627801",
"0.6258042",
"0.6254074",
"0.6183854",
"0.6171879",
"0.6161445",
"0.6153159",
"0.61101174",
"0.60828245",
"0.60658485"
]
| 0.7233872 | 0 |
function to retrieve 2 previous tags for word in sentence | def __get_previous_tags__(self, tags):
if len(self.tags) == 0:
return None, None
if self.index == 1:
return BEGIN, tags[self.index-1]
elif self.index == 0:
return BEGIN, BEGIN
else:
return tags[self.index-2], tags[self.index-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pos_tag(self,sentence):\n tagged = self.brill_tagger.tag(sentence.split())\n tagged_sentence = \" \".join([nltk.tag.tuple2str(tok) for tok in tagged])\n print tagged_sentence\n\n tag_list = [(each.split(\"/\")[0],each.split(\"/\")[1]) for each in tagged_sentence.split()]\n return tag_list",
"def previous(self, type=None):\n i = self.index - 1\n s = self.sentence\n while i > 0:\n if type in (s[i].type, None):\n return s[i]\n i -= 1",
"def tags_with(self, word):\n return tags_with_word",
"def tag(text, pos_tagger):\n features = [get_crf_features([word for word in sent]) for sent in text]\n tags = pos_tagger.predict(features)\n tagged_text = []\n for i in range(len(text)):\n tagged_sent = []\n for j in range(len(text[i])):\n tagged_sent.append((text[i][j], tags[i][j]))\n tagged_text.append(tagged_sent)\n #print(tags)\n return tags, tagged_text",
"def _get_sentence(sentence_data):\n return \" \".join([word for word, ne_tag in sentence_data])",
"def pos_tag(self, sentence):\n tags = []\n tokens = sentence.split(\" \")\n for i in range(len(tokens)):\n tags.append('')\n for i in range (len(tokens)):\n feat = []\n feat.append(self.features(tokens,tags,i))\n tag_predicted = self.postagger.predict(feat)[0]\n tags[i] = tag_predicted\n return tags",
"def previous(self, type=None):\n i = self.start - 1\n s = self.sentence\n while i > 0:\n if s[i].chunk is not None and type in (s[i].chunk.type, None):\n return s[i].chunk\n i -= 1",
"def mutate_tag_seq(words, seq1, seq2):\n if len(seq1) > len(words):\n return None\n seq_start = index_tag_seq(words, seq1)\n if seq_start > -1:\n pre = words[:seq_start]\n post = words[seq_start+len(seq1):]\n mutated = []\n for x in seq2:\n for j in range(len(seq1)): \n if x == words[seq_start+j].tag:\n mutated.append(words[seq_start+j])\n return pre + mutated + post\n return None",
"def features(self, sentence, tags, index):\n return{\n 'word': sentence[ index ],\n 'prevWord': '' if index == 0 else sentence[ index - 1 ],\n 'nextWord': '' if index == len( sentence ) -1 else sentence[ index + 1 ],\n 'isFirst': index == 0,\n 'isLast': index == len( sentence ) - 1,\n 'isCapitalized': sentence[index][0].upper() == sentence[ index ][ 0],\n 'isAllCaps': sentence[ index ].upper() == sentence[ index ],\n 'isAllLowers': sentence[ index ].lower() == sentence[ index ],\n 'prefix-1': sentence[ index ][ 0 ],\n 'prefix-2': '' if ( len(sentence) < 2 ) else sentence[ index ][:2],\n 'prefix-3': '' if ( len(sentence) < 3 ) else sentence[ index ][:3],\n 'prefix-4': '' if ( len(sentence) < 4 ) else sentence[ index ][:4],\n 'suffix-1': sentence[ index ][ -1 ],\n 'suffix-2': '' if ( len(sentence) < 2 ) else sentence[ index ][-2:],\n 'suffix-3': '' if ( len(sentence) < 3 ) else sentence[ index ][-3:],\n 'suffix-4': '' if ( len(sentence) < 4 ) else sentence[ index ][-4:],\n 'tag-1': '' if index == 0 else tags[ index - 1 ],\n 'tag-2': '' if index < 2 else tags[ index - 2 ]\n }",
"def __findPrevWord(self):\n self.activeWindow().searchCurrentWordBackward()",
"def retag_full_sentence(self, timexes, sentence, tagged):\n \n timexStrings = [t[0] for t in timexes]\n \n tokens, indices = util.tokenize_with_reserved_strings(sentence, timexStrings) \n\n tags = []\n ip = 0\n curchar = 0\n for i, w in enumerate(tokens):\n pos = sentence[curchar:].find(w)\n tPos = pos + curchar\n curchar = tPos + len(w)\n \n if w in timexStrings:\n tags.append((w, 'Timex', tPos))\n elif w in [',', '.', ';']:\n tags.append((w, 'Punctuation', tPos))\n elif self.re_signal.findall(w):\n tags.append((w, 'TimexSignal', tPos))\n else:\n t = [(i, tg) for i, tg in enumerate(tagged[ip:]) if tg[0]==w.lower()]\n if t:\n tags.append((t[0][1][0],t[0][1][1], tPos))\n ip = t[0][0] \n else:\n tags.append((w, 'unimportant', tPos)) \n \n return tags",
"def full_pos_tag(self, sentence):\n tags = self.pos_tag(sentence)\n for i in range(len( tags)):\n tags[i] = self.get_complete_tag(tags[i])\n return tags",
"def findTags(user_input, tagged_text):\n result = []\n for item in tagged_text:\n for w in user_input:\n if w[WORD] == item[WORD]:\n tup = (w[WORD], item[TAG])\n result.append(tup)\n continue\n\n return result",
"def entity_tag_sentence(sentence):\n return ne_chunk(sentence)",
"def _get_sentence(ne_tagged_line):\n return \" \".join([word for word, tag in ne_tagged_line])",
"def searchbrown_word(tag):\n brown_tagged_words = brown.tagged_words(categories='news')\n hitwords = []\n for i in range(len(brown_tagged_words)):\n if tag == brown_tagged_words[i][1]:\n hitwords.append(brown_tagged_words[i][0].lower())\n return hitwords",
"def get_tag_for_word(self, word: str):\n doc = self.model(word)\n for token in doc:\n return token.pos_",
"def find_text_in_tag(st, tag):\n if tag == \"e1\":\n st = st.replace(\"<e2>\", \"\")\n st = st.replace(\"</e2>\", \"\")\n elif tag == \"e2\":\n st = st.replace(\"<e1>\", \"\")\n st = st.replace(\"</e1>\", \"\")\n\n for i in range(len(st) - (len(tag)+2) + 1): # +2 is for < and >\n if st[i:i+len(tag)+2] == \"<\" + tag + \">\":\n for j in range(i+1, len(st) - (len(tag)+3) + 1):\n if st[j:j+len(tag)+3] == \"</\" + tag + \">\":\n return st[i+len(tag)+2:j], i - 1\n\n print(\"ERROR: tag \\\"{}\\\" in string \\\"{}\\\" not found!\".format(tag, st))",
"def get_prev_word(self, index, orignal=False):\n try:\n\n word = self.df.iloc[index][1]\n if word[-1] == \".\":\n if orignal:\n return word[:-1]\n return self.label_encoder.transform([word[:-1]])[0]\n else:\n # NOT A PERIOD\n # I think it would be better to return a <NAP> token\n # This might also help in cleaning the data\n # If orignal is true return word as is...\n if orignal:\n return word\n return self.label_encoder.transform([\"<NAP>\"])[0]\n except ValueError:\n # Returning -1 for unseen words\n return -1\n except IndexError:\n if orignal:\n return \"<START>\"\n return self.label_encoder.transform([\"<START>\"])[0]",
"def get_sentence(self):",
"def searchbrown_phrase(tags):\n l = len(tags)\n brown_tagged_words = brown.tagged_words(categories='news')\n hitwords = []\n for i in range(len(brown_tagged_words)-l+1):\n searchtags = [tag for _,tag in brown_tagged_words[i:i+l]]\n if tags == searchtags:\n hitwords.append(tuple([w.lower()\n for w,_ in brown_tagged_words[i:i+l]]))\n return hitwords",
"def _postprocess(self, tags: List[str], words: List[str], pos: List[str]):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)",
"def retagger(tags):\n if tags == 'Positive':\n return 'pos'\n else:\n return 'neg'",
"def tags():",
"def input_new_phrase(self, text):\n \n x_new_tokens = [word_idx[word] for word in text.split()]\n \n pred = self.model.predict(np.array([x_new_tokens]))\n pred = np.argmax(pred, axis=-1)[0]\n \n return [[word_list[w], tags[pred]] for (w, pred) in zip(range(len(x_new)), pred)]",
"def getSentenceInfo(sentence):\n\tpass",
"def parse_sentence(self, text):\n l = []\n tokens = word_tokenize(text)\n print(tokens)\n skip = 0\n i = -1 # index of token in tokens list\n for token in tokens:\n i += 1\n if skip:\n skip -= 1\n # CORONA TERMS:\n elif token.lower() in corona_words:\n l.append('covid')\n elif is_flag_emoji(token):\n try:\n l.append(flag.ISO3166[flag.dflagize(token)[1:3]])\n except:\n continue\n # HASHTAGS:\n elif token == '#' and i+1 < len(tokens):\n parse_hashtage(tokens[i+1], l, tokens)\n skip += 1\n # TAGS:\n elif token == '@' and i+1 < len(tokens):\n parst_tag(tokens[i+1], l)\n skip = True\n # Size AS A WORD:\n elif token.lower() in sizes.keys():\n l.append(parse_number('1', token))\n elif check_if_term_is_fraction(token):\n if i < len(tokens)-1 and tokens[i+1].lower() in percent:\n l.append(token + '%')\n skip += 1\n else:\n l.append(token)\n # NUMBERS:\n elif isNumber(token):\n token = clean_number(token)\n if (i < len(tokens) - 2) and (tokens[i+1].lower() in sizes.keys()) and (tokens[i+2].lower() in percent):\n l.append(parse_number(token, tokens[i+1]) + '%')\n skip += 2\n elif (i < len(tokens) - 1) and tokens[i+1].lower() in percent:\n l.append(parse_number(token) + '%')\n skip += 1\n elif (i < len(tokens) - 1) and tokens[i+1].lower() in sizes.keys():\n l.append(parse_number(token, tokens[i+1]))\n skip += 1\n elif (i < len(tokens) - 1) and check_if_term_is_fraction(tokens[i+1]):\n l.append(token +' '+ tokens[i+1])\n skip += 1\n else:\n l.append(parse_number(token))\n elif isNumber(token[0:len(token) - 1]) and token[len(token)-1].lower() in sizes:\n tokens.append(token[0:len(token) - 1])\n tokens.append(token[len(token)-1])\n # OTHER TOKENS:\n else:\n cleaning(token, tokens, l)\n\n text_tokens_without_stopwords = [w for w in l if w.lower() not in stop_words]\n print(text_tokens_without_stopwords)\n return text_tokens_without_stopwords",
"def untag(tagged_sentence):\n return [w for (w, t) in tagged_sentence]",
"def disambiguateWordsOld(self, word_list, tag_list):\n\t\t# print u\" \".join(word_list).encode('utf8');\n\t\t# print u\" \".join(tag_list).encode('utf8');\t\t\t\n\t\n\t\tif len(word_list)==0 or len(word_list)!=len(tag_list):\n\t\t\treturn word_list;\n\t\telse:\n\t\t\tnewwordlist=[];\n\t\t\twordtaglist=zip(word_list,tag_list);\n\t\t\t# print wordtaglist\n\t\t\tfor i in range(len(wordtaglist)):\n\t\t\t\tif i+1<=len(wordtaglist):\n\t\t\t\t\t# do tests with next word\n\t\t\t\t\t# إذا كانت الكلمة الحالية \"أن\" تكون \"أنْ\" حرف نصب إذا سبقت فعلا\n\t\t\t\t\t# وتكون أنّ، من أخوات إنّ إذا كان ما بعدها اسما\n\t\t\t\t\tif wordtaglist[i][0]==u'أن' and self.tagger.isVerbTag(wordtaglist[i+1][1]):\n\t\t\t\t\t\t# print' case1';\n\t\t\t\t\t\twordtaglist[i]=(u'أَنْ','t');\n\t\t\t\t\telif wordtaglist[i][0]==u'أن' and self.tagger.isNounTag(wordtaglist[i+1][1]):\n\t\t\t\t\t\t# print' case 2';\n\t\t\t\t\t\twordtaglist[i]=(u'أَنَّ','t');\n\t\t\t\tnewwordlist.append(wordtaglist[i][0]);\n\t\t\treturn newwordlist;",
"def parse(sentence,label_sentence,sign):\n span = []\n start = None\n for index, word in enumerate(sentence):\n if word==B_token:\n start = index\n elif word==S_token:\n # if ''.join(label_sentence[index:index+1]) in kb_set: ## 在数据库中发现实体名\n # span.append((index, index+1))\n # start = None\n # else:\n # start = None\n span.append((index, index + 1))\n start = None\n elif word==E_token and start is not None:\n end = index\n # if ''.join(label_sentence[start:end + 1]) in kb_set:\n # span.append((start, end+1))\n # start = None\n # else:\n # start = None\n span.append((start, end + 1))\n start = None\n # 相邻两entity可以合并则合并\n if len(span) <= 1 or sign == 'label':\n return span\n new_span = []\n for i in range(len(span)-1):\n if span[i][1]==span[i+1][0] and ''.join(label_sentence[span[i][0]:span[i+1][1]]) in kb_set:\n new_span.append((span[i][0], span[i+1][1]))\n if i == len(span)-2:\n return new_span\n else:\n new_span.append((span[i][0], span[i][1]))\n new_span.append((span[-1][0], span[-1][1]))\n return new_span"
]
| [
"0.6204437",
"0.61179423",
"0.6117627",
"0.60937554",
"0.60808796",
"0.6065707",
"0.60322285",
"0.6024134",
"0.5931161",
"0.5899085",
"0.5876507",
"0.58554846",
"0.5827743",
"0.5814774",
"0.581072",
"0.58079094",
"0.5801397",
"0.5755522",
"0.5753671",
"0.5709807",
"0.56989205",
"0.5595925",
"0.5577545",
"0.55727893",
"0.55565244",
"0.5546206",
"0.55423456",
"0.5513395",
"0.5509501",
"0.5503587"
]
| 0.6646759 | 0 |
function to return tag set which are possible for a given word, according to tags which were observed in the data for that word. cutoff parameter determines how many tags will be returned at most. add_common determines if tags which were not obeserved for the word will be added to set. | def getPossibleTagSet(self, data, cutoff=None, add_common=False):
full_tag_set_size = data.getTagSetSize()
if cutoff is None:
cutoff = ceil(full_tag_set_size * DEFAULT_CUTOFF_FRACT)
elif cutoff >= full_tag_set_size:
return data.getTagSet()
word = self.getWord()
tags_dict = data.getWordDict().get(word, False)
if tags_dict is False:
sorted_tags_list = data.getSortedTagsList()
else:
sorted_tags_list = sorted(tags_dict, key=tags_dict.get, reverse=True)
if data.isNumberWord(word) and "CD" not in sorted_tags_list[:cutoff]:
sorted_tags_list.insert(0, "CD")
remainder = cutoff - len(sorted_tags_list)
if remainder < 0:
return tuple(sorted_tags_list[:cutoff])
elif add_common is True and remainder > 0:
top_candidate_tags = data.getSortedTagsList()
sorted_tags_set = set(sorted_tags_list)
candidate_set = set(top_candidate_tags) - sorted_tags_set
while remainder > 0:
tag_candidate = top_candidate_tags.pop(0)
if tag_candidate in candidate_set:
sorted_tags_list.append(tag_candidate)
remainder -= 1
return tuple(sorted_tags_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_from_word_edges(self, word: str) -> Set[str]:\n all_edges = set()\n\n for def_dict in self.word_dictionary[word]:\n processed_def = self.get_filtered_set_tokens(\n definition=def_dict[\"definition\"]\n )\n\n if self.drop_self_cycles:\n if word not in processed_def:\n all_edges = all_edges.union(processed_def)\n else:\n all_edges = all_edges.union(processed_def)\n\n return all_edges",
"def get_rare_words(flag, threshold=5):\n word_freq = defaultdict(int)\n f = open(\"./gene.train\", \"r\")\n lines = f.readlines()\n for line in lines:\n entities = line.strip().split()\n for e in entities:\n word_freq[e] += 1\n\n word_freq_sorted = sorted(word_freq.items(), key=lambda x: x[1])\n rare_words_dict = {}\n for word, freq in word_freq_sorted:\n if freq < threshold:\n rare_words_dict[word] = get_token(word, flag)\n else:\n break\n f.close()\n return rare_words_dict",
"def load_common_words(language: str, tot_num: int) -> ty.Set[str]:\n logg = logging.getLogger(f\"c.{__name__}.load_common_words\")\n logg.setLevel(\"DEBUG\")\n logg.debug(\"Start load_common_words\")\n\n lang = pycountry.languages.get(name=language)\n lang_alpha2_tag = lang.alpha_2\n\n common_words_folder = get_package_folders(\"common_words\")\n common_words_path = common_words_folder / f\"{lang_alpha2_tag}.txt\"\n\n common_words = set()\n with common_words_path.open() as common_words_file:\n for line in common_words_file:\n common_words.add(line.strip())\n if len(common_words) == tot_num:\n break\n\n logg.debug(f\"common_words: {common_words}\")\n\n return common_words",
"def generate_wordnet_candidates(self, word):\n candidates = set()\n if self.check_if_replacable(word):\n for synset in wordnet.synsets(word):\n for lemma in synset.lemmas():\n converted = convert(lemma.name().lower(), word)\n if converted != word and converted != None:\n try:\n w1 = wordnet.synsets(word)[0]\n w2 = wordnet.synsets(converted)[0]\n similarity = w1.wup_similarity(w2)\n if isinstance(similarity,float) and w1.wup_similarity(w2) >0.6 :\n candidates.add(converted)\n except:\n pass\n # print(\"candidate\",word,candidates)\n return candidates",
"def create_word_list(self):\n return set(self.split(self.title)+self.split(self.conditions)+self.split(self.interventions))",
"def chunked_tags(train):\n cfdist = nltk.ConditionalFreqDist()\n for t in train:\n for word, tag, chtag in tree2conlltags(t):\n if chtag == \"O\":\n cfdist[tag].inc(False)\n else:\n cfdist[tag].inc(True)\n return [tag for tag in cfdist.conditions() if cfdist[tag].max() == True]",
"def set_common_words(x_train):\n vectorizer = CountVectorizer(tokenizer=simple_tokenizer)\n x_train_fit = vectorizer.fit_transform(x_train)\n \n words= vectorizer.get_feature_names()\n\n word_count = []\n for i in range(x_train_fit.shape[1]):\n count = np.sum(x_train_fit.getcol(i))\n tup = (count, words[i])\n word_count.append(tup)\n\n def comparator(tupEl):\n # sort on the count\n return tupEl[0]\n\n word_count.sort(key=comparator)\n for i in range(1,number_of_common_words):\n common_words.append(word_count[-i][1])",
"def gen_vocab(text_list, cutoff, stopwords):\n word_counts = Counter()\n for text in text_list:\n word_counts.update(word for word in text.split())\n # using dropwhile takes advantage of ordering\n for key, count in dropwhile(lambda key_count: key_count[1] >= cutoff, word_counts.most_common()):\n del word_counts[key]\n return list(set(word_counts.keys()) - set(stopwords))",
"def build(corpus: List[List[str]], size=5000, freq_cutoff=5):\n vocab = VocabEntry()\n word2freq = Counter(chain(*corpus))\n word2freq = {word: freq for word, freq in word2freq.items() if freq > freq_cutoff}\n words_selected = sorted(word2freq.keys(), key=lambda w: word2freq[w], reverse=True)[:size]\n for w in words_selected:\n vocab.add(w)\n print(\"vocabulary constructing completed, %d/%d words included......\" % (len(words_selected), len(word2freq)))\n return vocab",
"def common_words(words: Dict[str, int], count: int) -> None:\n \n original_order = list(words.values())\n max_to_min = list(reversed(sorted(words.values())))\n temp_lst1 = []\n temp_lst2 = []\n \n \n if count == len(words):\n words = words\n \n \n for num in range(len(max_to_min)):\n if len(temp_lst1) < count:\n temp_lst1.append(max_to_min[num])\n \n for nums in range(len(temp_lst1)):\n max_to_min.remove(temp_lst1[nums])\n \n \n temp_lst2 = remove_repeats(max_to_min, temp_lst1)\n \n a = reformat_order(original_order, temp_lst2)\n \n words_to_counts = combine_list(format_dict(words, a), a)\n \n words.clear()\n \n for keys in words_to_counts:\n words[keys] = words_to_counts[keys]",
"def build_cooccurrence(self, tag='N', targetList='resources/wordlist.owl.pkl', output=None, order=False, case=False, threshold=5):\n\n\t\t## load target word list\n\t\tlogging.debug('load targetList from %s' % (targetList))\n\t\twlist = set(pickle.load(open(targetList)))\n\n\t\t## occurrence of words (post-based)\n\t\tself.Occur = Counter()\n\t\t## co-occurrence of words (post-based)\n\t\tself.Cooccur = Counter()\n\n\t\tlogging.info('calculate occurrence and co-occurrence')\n\t\t## post-based\n\t\tfor pid in self.AllPairs:\n\n\t\t\tdist = self.AllPairs[pid]\n\n\t\t\t## filter out words\n\t\t\twords = set([w for w,p in dist.keys() if p.startswith(tag)])\n\n\t\t\t## intersection with ontology words\n\t\t\tinter = [w for w in words if w in wlist]\n\n\t\t\t## pairwise <inter-words>\n\t\t\tpairs = [ (m,n) for m in inter for n in words if m != n]\n\n\t\t\t## update co-occurrence\n\t\t\tfor pair in pairs:\n\n\t\t\t\tpair = map(lambda x:x.lower(), pair) if not case else pair\n\n\t\t\t\tkey = tuple(sorted(pair)) if not order else pair\n\t\t\t\t\n\t\t\t\tself.Cooccur[ key ] += 1\n\n\t\t\t## update occurrence\n\t\t\tfor word in words:\n\t\t\t\tword = word.lower() if not case else word\n\t\t\t\tself.Occur[ word ] += 1\n\n\t\t## pruning\n\t\tOccurPrune = Counter()\n\t\tfor w in self.Occur:\n\t\t\tOccurPrune[w] = self.Occur[w]\n\t\tself.Occur = OccurPrune\n\n\t\tCooccurPrune = Counter()\n\t\t#######################################\n\t\tfor key, val in self.Cooccur.items():\n\t\t\tif val > threshold:\n\t\t\t\tCooccurPrune[key] = val\n\t\t#######################################\n\t\tself.Cooccur = CooccurPrune\n\n\t\tif output:\n\t\t\tocDict = {\n\t\t\t\t'occur': self.Occur,\n\t\t\t\t'cooccur': self.Cooccur\n\t\t\t}\n\t\t\t## output could be `bk.cooccur.pkl`\n\t\t\t## add threshold infomation: `bk.cooccur.5.pkl`\n\t\t\t# if threshold:\n\t\t\t\t# output = output.replace('.pkl', '.'+str(threshold)+'.pkl')\n\n\t\t\tlogging.info('save co/occurrence to %s' % (output))\n\t\t\tpickle.dump(ocDict, open(output, 'w'))",
"def get_filtered_set_tokens(self, definition: str) -> Set[str]:\n\n doc = self.ppl(definition)\n\n if self.lemm_always:\n tokens = [word.lemma for sent in doc.sentences for word in sent.words]\n tokens = [t.lower() for t in tokens if t.lower() in self.word_dictionary]\n tokens = set(tokens)\n else:\n tokens = set()\n for sent in doc.sentences:\n for word in sent:\n if word.text.lower() in self.word_dictionary:\n tokens.add(word.text.lower())\n elif word.lemma.lower() in self.word_dictionary:\n tokens.add(word.lemma.lower())\n\n return tokens",
"def get_common(self, other, mapping):\n\n self_oov = defaultdict(lambda: 0)\n other_oov = defaultdict(lambda: 0)\n self_word_id = deepcopy(self.word_id)\n other_word_id = deepcopy(other.word_id)\n new_words = []\n map_ = mapping.map\n for i, w in enumerate(self.word_id):\n if w not in map_:\n self_oov[w] += 1\n del self_word_id[w]\n continue\n\n if map_[w] not in other.word_id:\n other_oov[map_[w]] += 1\n del self_word_id[w]\n\n for i, w in enumerate(other.word_id):\n if w not in map_:\n del other_word_id[w]\n\n logging.info(\"We could not find {} {} words in our dictionary.\".format(\n len(self_oov), self.lang))\n logging.info(\"We could not find {} {} words in our target words.\".format(\n len(other_oov), other.lang))\n logging.info(\"Our {} vocabulary has {} valid words.\".format(\n self.lang, len(self_word_id)))\n\n sorted_self_word_id = Embeddings.sorted_words(self_word_id)\n self_vectors = asarray([self.vectors[i] for w, i in sorted_self_word_id])\n self_words = [w for w, i in sorted_self_word_id]\n new_self = Embeddings(lang=self.lang, vectors=self_vectors, words=self_words)\n\n sorted_other_word_id = Embeddings.sorted_words(other_word_id)\n other_vectors = asarray([other.vectors[i] for w, i in sorted_other_word_id])\n other_words = [w for w, i in sorted_other_word_id]\n new_other = Embeddings(lang=self.lang, vectors=other_vectors, words=other_words)\n\n return (new_self, new_other)",
"def _usable_word(self, filtered_words):\n usable = set()\n for word in filtered_words:\n counter = 0\n for x in range(0, len(self._to_word)):\n if word[x] == self._from_word[x]:\n counter += 1\n if counter == len(self._to_word) - 1:\n usable.add(word)\n return usable",
"def _known_in(self, words):\n return set(word for word in words if self._word_2_frequency.get(word))",
"def get_words(self):\n words = self.wiki.get_words(cleaner=self.cleaner)\n df = pd.DataFrame({\"word\": words})\n df = df.drop_duplicates(\"word\")\n df = df.head(100)\n mask = df[\"word\"].isin(self.common[\"word\"])\n mask |= df[\"word\"].str.lower().isin(self.common[\"word\"])\n\n words = [ Word(word) for word in df[~mask][\"word\"] ]\n for word in words:\n word.get_definition(definer=self.definer)",
"def generate_words(combo,scrabble_words_dict):\n word_set = set()\n for w in itertools.permutations(combo):\n word = ''.join(w)\n if word in scrabble_words_dict:\n word_set.add(word)\n return word_set",
"def new_words_only(base_words, window_words):\n new_words = []\n if base_words:\n for wword in window_words:\n dont_include = False\n for bword in base_words:\n if CharNetRunner._do_words_intersect(wword, bword):\n dont_include = True\n break\n if not dont_include:\n new_words.append(wword)\n else:\n new_words = window_words\n\n return new_words",
"def common_words(self):\n order_centroids = self.model.cluster_centers_.argsort()[:, ::-1]\n clusters = self.model.labels_.tolist()\n vocab = self.vectorizer.vocabulary_\n return [ [vocab.keys()[vocab.values().index(i)] for i in\n order_centroids[cluster, :10]] for cluster in sorted(set(clusters))]",
"def candidates(self, min_count, stops=None, tags={\"NN\", \"NNS\", \"NNP\"}):\n if stops is None:\n stops = []\n candidates = set()\n for word_i, word_j in self.bigrams():\n # Filter out bigrams with stopwords.\n if word_i not in stops and word_j not in stops:\n # Make sure bigrams are alphabetical.\n if self.is_lexical(word_i, word_j):\n # Filter out infrequent bigrams.\n if self.bigrams()[word_i, word_j] >= min_count:\n if self.has_relevant_tag((word_i, word_j), tags):\n candidates.add((word_i, word_j))\n return candidates",
"def get_vocab(data_set):\n vocab = {'PADDING': 0, 'PUNCT': 1}\n inv_vocab = {0: 'PADDING', 1: 'PUNCT'}\n wid = 2\n max_len = -1\n for record in data_set:\n assert 'words' in record\n words = record['words']\n if len(words) > max_len:\n max_len = len(words)\n for w in words:\n if w not in vocab:\n vocab[w] = wid\n inv_vocab[wid] = w\n wid += 1\n print(\"The maximum length of the sentence is %d\" % max_len)\n print(\"Find %s different words in the dataset\" % len(vocab))\n char_string = ''\n for w in vocab:\n char_string += w\n chars = list(set(char_string))\n cid, char_vocab = 0, {}\n for ch in chars:\n if ch not in char_vocab:\n char_vocab[ch] = cid\n cid += 1\n print(\"Find %s different chars in the dataset\" % len(char_vocab))\n return vocab, char_vocab, max_len",
"def common_words(self, n=10):\n # remove some really common words\n ignore = ['a', 'i', 'it', 'the', 'and', 'in', 'he', 'she', 'to', 'at', 'of', 'that', 'as', 'is', 'his', 'my', 'for', 'was', 'me', 'we', 'be', 'on', 'so']\n filtered = [s for s in self.text if s not in ignore and len(s) >=3]\n dat = Counter(filtered)\n return dat.most_common(n)",
"def Viterbi_Most_Common_Tag(tagged_seq:Sequence[str],most_common_tag:str, train_bag:Sequence[Tuple[str, str]]=train_tagged_words)-> Sequence[Tuple[str, str]]:\n V = list(set([pair[0] for pair in train_bag]))\n words = [pair[0] for pair in tagged_seq]\n tags = [pair[1] for pair in tagged_seq]\n\n for word_index, word in enumerate(words):\n if word not in V:\n tags[word_index] = most_common_tag\n\n return list(zip(words, tags))",
"def MostCommonClassBaseline(training_set, test_set):\n pos_counts_dict = defaultdict(dict)\n max_pos_dict = dict()\n test_common_tags = []\n\n # Dictionary \"pos_counts_dict\" stores a dictionary for each word that stores counts of each pos of the word\n # This loop runs for each sentence (word, pos) in \"training_set\"\n for sentence in training_set:\n # This loop runs for each tuple (word, pos) in sentence\n for word_pos in sentence:\n # if word (word_pos[0]) not in \"pos_counts_dict\"\n if word_pos[0] not in pos_counts_dict:\n pos_counts_dict[word_pos[0]] = defaultdict(int)\n # increment for each tuple (word, pos) in sentence\n pos_counts_dict[word_pos[0]][word_pos[1]] += 1\n\n # Find most frequent tag associated to each word and store it in \"max_pos_dict\"\n # This loop runs for each word in \"pos_counts_dict\"\n for word in pos_counts_dict:\n count = 0\n tag = str()\n\n # This loop runs for each tag of the word\n for pos in pos_counts_dict[word]:\n if pos_counts_dict[word][pos] > count:\n count = pos_counts_dict[word][pos]\n tag = pos\n max_pos_dict[word] = tag\n\n # Match tag in \"max_pos_dict\" for each word of \"test_set\" and store in \"test_common_tags\"\n\n # This loop runs for each sentence (word, pos) in \"test_set\"\n for sentence in test_set:\n temp_sentence = []\n # This loop runs for no. of tuples (word, pos) in sentence\n for i in xrange(len(sentence)):\n # if word is in \"pos_counts_dict\" then store tuple (word, max count) in \"temp_sentence\"\n if sentence[i][0] not in pos_counts_dict:\n print \"Word not in training_set:\", tup[0]\n else:\n temp_sentence.append((sentence[i][0], max_pos_dict[sentence[i][0]]))\n test_common_tags.append(temp_sentence)\n\n return test_common_tags",
"def words(self):\n # BEGIN Question 2\n x= str(self.text).lower()\n # m = str(x).translate(string.punctuation)\n y= x.split()\n\n y = set([''.join(c for c in s if c not in string.punctuation) for s in y])\n y = [s for s in y if s]\n while(len(y) != 0):\n self.word_set.append(min(y))\n y.remove(min(y))\n\n\n return self.word_set\n # END Question 2",
"def select_relevant_pos_words(self, words):\n tagged_words = nltk.pos_tag(words)\n return [word for word, tag in tagged_words if tag in CONCEPT_TAGS]",
"def eliminate_common(self, text=None):\n if text is None:\n text = self.nltk_text(self.text)\n # Remove the upper case words\n # Remove common words\n # Remove stopwords\n # TODO: maybe check just for nouns / verbs ???\n text = set(w for w in text if w == w.lower() and\n w.lower() not in self.common_words and\n w.lower() not in stopwords.words('english'))\n\n return text",
"def get_related_topics(self,keyword,cut=0.5):\n \n ret = []\n\n if type(keyword) == str:\n if keyword in self.topic_map.keys():\n ret = [(keyword,1.0)]\n keyword = \"\"\n else:\n _keyword = []\n for k in keyword:\n if k in self.topic_map.keys():\n ret.append((k,1.0))\n else:\n _keyword.append(k) \n keyword = _keyword\n\n keyword_rels = set(self.get_related_keywords(keyword,self.keyword_map_rel,_score=False))\n\n if len(keyword_rels) > 0:\n for topic,topic_rels in self.topic_map.items():\n alike = keyword_rels.intersection(topic_rels)\n score = (len(alike) * (100/len(keyword_rels)))/100\n ret.append((topic,round(score,3)))\n ret.sort(key=lambda x : x[1], reverse=True)\n ret = [t for t in ret if t[1] >= cut]\n \n return ret",
"def get_valid_hot_words(self, start: float, end: float):\n\n valid_hot_words = []\n\n subs_length = len(self.re_subs)\n logger.debug(f'Subs Length: {subs_length}')\n\n for sub in range(1, subs_length):\n\n # Get the subtitles by index\n (subtitles, subtitles_start, subtitles_end) = self.get_subtitles(sub)\n\n # Skip to the start time\n if(subtitles_start < start):\n continue\n\n # Reached the end\n if(subtitles_end > end):\n break\n\n # Don't check empty subtitles (e.g.: {Quack})\n try:\n hot_word = subtitles.split()[0]\n except:\n continue\n\n # Don't take numbers as hot words\n if(hot_word.replace('.', '', 1).isdigit()): # The replace is if the number is a float\n continue\n\n # If no translation is needed -> Append the word and continue.\n # if(self.audio_language == self.subtitles_language):\n valid_hot_words.append({\n 'id': f'{hot_word}-{uuid.uuid4().hex[:6]}',\n 'hot_word': hot_word,\n 'subtitles': subtitles,\n 'start': subtitles_start,\n 'end': subtitles_end\n })\n \n if(self.subtitles_language != self.audio_language):\n valid_hot_words = self.translate_hot_words(valid_hot_words)\n\n logger.debug(\n f'Hot words before filtering and sorting: {valid_hot_words}')\n valid_hot_words = self.filter_hot_words(valid_hot_words)\n return valid_hot_words",
"def _ConsolidateKnownOverlappingTags(self, typ_tags: FrozenSet[str]\n ) -> FrozenSet[str]:\n return typ_tags"
]
| [
"0.5789491",
"0.5776199",
"0.57674026",
"0.5759853",
"0.56918997",
"0.55795056",
"0.55754375",
"0.5563476",
"0.5506468",
"0.5406385",
"0.5379914",
"0.5298454",
"0.5290626",
"0.52878016",
"0.52353746",
"0.52101964",
"0.520336",
"0.51952004",
"0.5188733",
"0.5182906",
"0.5138632",
"0.51347893",
"0.5129989",
"0.51292795",
"0.51218796",
"0.51192",
"0.51179874",
"0.50921756",
"0.50908554",
"0.5087924"
]
| 0.81583047 | 0 |
function used to split an iterable to N batches according to NUM_THREADS parameter used to split a process on a large set to smaller sets which will be run in threads | def split_iterable_to_batches(iterable):
iterable_length = len(iterable)
batch_size = int(ceil(iterable_length/NUM_THREADS))
for i in range(0, iterable_length, batch_size):
yield iterable[i:i + batch_size] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def chunks_threads(li, n):\n\tindex = int(len(li) / n + 0.5)\n\tfor i in range(n-1):\n\t\tyield li[i*index:i*index + index]\n\tyield li[n*index - index:]",
"def batch(size, iterable):\r\n return list(xbatch(size, iterable))",
"def split_to_batches(iterable, n=1):\n l = len(iterable)\n for ndx in range(0, l, n):\n yield iterable[ndx:min(ndx + n, l)]",
"def batch_split(self, batch_text, threads=8):\n pass",
"def batches(iterable, size):\n sourceiter = iter(iterable)\n while True:\n batchiter = islice(sourceiter, size)\n yield chain([next(batchiter)], batchiter)",
"def split_calculation_to_threads(iterable, func, args):\n args_list = []\n batches = list(split_iterable_to_batches(iterable))\n for batch in batches:\n temp = list(args)\n temp.insert(0, batch)\n args_list.append(tuple(temp))\n with Pool(NUM_THREADS) as p:\n results = p.starmap(func, args_list)\n return results",
"def grouped(iterable, n):\n batch_window = [None for _ in range(n)]\n cur_size = 0\n for item in iterable:\n batch_window[cur_size] = item\n cur_size += 1\n if cur_size >= n:\n batched = batch_window[:]\n batch_window = [None for _ in range(n)]\n cur_size = 0\n yield batched",
"def chunker( it, size ):\n \n # Variables\n it = iter( it )\n \n # Selecting a bunch of jobs\n while True:\n p = tuple( itertools.islice( it, size ) )\n if not p:\n break\n yield p",
"def chunks(lst, chunk_size=MAX_BATCH_SIZE):\n for i in range(0, len(lst), chunk_size):\n yield lst[i : i + chunk_size]",
"def chunkify(iterable, chunk_size):\n _it = iter(iterable)\n while True:\n batch = islice(_it, chunk_size)\n yield chain([batch.__next__()], batch)",
"def batch(iterable, size):\n sourceiter = iter(iterable)\n while True:\n batchiter = islice(sourceiter, size)\n yield list(chain([batchiter.next()], batchiter))",
"def iterate_minibatches(*to_split, **kwargs):\n batchsize=1\n shuffle=False\n if 'batchsize' in kwargs:\n batchsize = kwargs['batchsize']\n \n if 'shuffle' in kwargs:\n shuffle = kwargs['shuffle']\n\n res = [np.array(x) for x in to_split]\n size = res[0].shape[0]\n for x in res:\n assert x.shape[0] == size\n if shuffle:\n indices = np.arange(size)\n np.random.shuffle(indices)\n for start_idx in range(0, size, batchsize):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batchsize]\n else:\n excerpt = slice(start_idx, start_idx + batchsize)\n yield [x[excerpt] for x in res] if len(res) != 1 else x[excerpt]",
"def batchify(l, n):\n n = min(len(l), n)\n n = max(1, n)\n chunksize = int(math.ceil(len(l) / n))\n\n for i in range(0, len(l), chunksize):\n # Create an index range for l of chunksize items:\n yield l[i:i + chunksize]",
"def batches(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]",
"def batches(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]",
"def batches(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]",
"def split_iters(iter_ranges, n_threads = None):\n\n\n if n_threads is None:\n n_threads = cpu_count()\n \n counts = [safediv(r[1] - r[0], r[2]) for r in iter_ranges]\n # largest_dim = np.max(counts)\n total_count = float(np.sum(counts))\n split_factors = [ (c / total_count) ** 2 for c in counts ]\n if len(counts) > 2:\n # kludgy heuristic\n # if you're reading across multiple dimensions\n # assume there might be reuse of data read in \n # and try to split up work so it fits into cache \n expected_bytes = 8 \n for dim in counts:\n expected_bytes *= dim\n expected_kb = expected_bytes / 1024\n l2_cache_size = 8192\n n_pieces = max(n_threads, expected_kb / l2_cache_size)\n else: \n n_pieces = 2*n_threads \n \n # initialize work_items with an empty single range \n work_items = [[]]\n for (dim_idx,dim_count) in enumerate(counts):\n\n dim_start, _, dim_step = iter_ranges[dim_idx]\n n_dim_pieces = int(math.ceil(split_factors[dim_idx] * n_pieces))\n dim_factor = float(dim_count) / n_dim_pieces\n \n old_work_items = [p for p in work_items]\n work_items = []\n for i in xrange(n_dim_pieces):\n # copy all the var ranges, after which we'll modifying \n # the biggest dimension \n\n start = dim_start + int(math.floor(dim_step * dim_factor * i))\n stop = dim_start + int(math.floor(dim_step * dim_factor * (i+1)))\n \n dim_work_item = (start,stop,dim_step)\n for old_work_item in old_work_items:\n new_work_item = [r for r in old_work_item]\n new_work_item.append(dim_work_item) \n work_items.append(new_work_item)\n\n return work_items",
"def chunk(iterable, n):\n iterable = [e for e in iterable]\n avg_length = int(math.ceil(len(iterable) / n))\n return [iterable[i * avg_length:(i + 1) * avg_length] for i in range(n)]",
"def batch(iterable, n):\n iterable = [e for e in iterable]\n size = len(iterable)\n return [iterable[i:i + n] for i in range(0, size, n)]",
"def xbatch(size, iterable):\r\n l = len(iterable)\r\n for i in range(0, l, size):\r\n yield iterable[i:min(i + size, l)]",
"def grouper(n, iterable):\n it = iter(iterable)\n while True:\n chunk = tuple(itertools.islice(it, n))\n if not chunk:\n return\n yield chunk",
"def batched(\n iterable: Iterable[_T],\n batch_size: int,\n container_factory: 'Callable[[Iterator[_T]], Collection[_T]]' = tuple\n) -> 'Iterator[Collection[_T]]':\n\n iterator = iter(iterable)\n while True:\n batch = container_factory(islice(iterator, batch_size))\n if len(batch) == 0:\n return\n\n yield batch",
"def grouper(n, iterable):\n\tit = iter(iterable)\n\twhile True:\n\t\tchunk = tuple(itertools.islice(it, n))\n\t\tif not chunk:\n\t\t\treturn\n\t\tyield chunk",
"def chunks(l):\n for i in range(0, len(l), concurrent):\n yield l[i:i + concurrent]",
"def chunker(results, n):\n\n def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\n m = int(len(results) / n)\n return list(grouper(iterable=results, n=m, fillvalue=None))",
"def iter_chunks(chunksize, *iterables):\n iterables = iter(zip(*iterables))\n\n while 1:\n chunk = tuple(islice(iterables, chunksize))\n\n if not chunk:\n return\n\n yield chunk",
"def split_chunk(list, chunk_size):\n for i in range(0, len(list), chunk_size):\n yield list[i:i + chunk_size]",
"def _split_on_chunks(self, iterable, size):\n return utils.split_on_chunks(iterable, size)",
"def batch_by_size(iterable, max_buffer=20000):\n all_batches = []\n current_batch = []\n current_size = 0\n\n for next_item in iterable:\n # An approximated way to determine size\n next_size = len(str(next_item))\n expected_total_size = current_size + next_size\n\n if next_size > max_buffer:\n raise BufferExceedError('Buffer exceeded')\n\n elif expected_total_size > max_buffer:\n # If expected to exceed max size, then current batch is finalized\n all_batches.append(current_batch)\n current_batch = [next_item]\n current_size = next_size\n\n else:\n # Else add current set of instructions to current batch\n current_batch.append(next_item)\n current_size = expected_total_size\n\n # Group remaining instructions as a single batch\n if len(current_batch) > 0:\n all_batches.append(current_batch)\n\n return all_batches",
"def grouper(iterable, n):\n it = iter(iterable)\n while True:\n chunk = tuple(islice(it, n))\n if not chunk:\n return\n yield chunk"
]
| [
"0.7614108",
"0.74475783",
"0.7290103",
"0.7189978",
"0.7147045",
"0.7137233",
"0.7051565",
"0.69337153",
"0.6914948",
"0.68700993",
"0.6837756",
"0.6820978",
"0.6749746",
"0.6698243",
"0.6698243",
"0.6698243",
"0.6667783",
"0.66328615",
"0.6627433",
"0.6591588",
"0.6583701",
"0.657509",
"0.65747595",
"0.65697575",
"0.65645385",
"0.65638673",
"0.6554816",
"0.6540613",
"0.65386343",
"0.65362245"
]
| 0.77173144 | 0 |
function to split a calculation on an iterable set to seperate NUM_THREADS threads used to split gradient calculation and viterbi on entire dataset to smaller batches which run in parallel threads | def split_calculation_to_threads(iterable, func, args):
args_list = []
batches = list(split_iterable_to_batches(iterable))
for batch in batches:
temp = list(args)
temp.insert(0, batch)
args_list.append(tuple(temp))
with Pool(NUM_THREADS) as p:
results = p.starmap(func, args_list)
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loop_threaded():\n nonlocal index, total\n nonlocal d_tree\n nonlocal fn_inputReadCallback\n nonlocal fn_analysisCallback\n nonlocal fn_outputWriteCallback\n nonlocal dret_inputSet\n nonlocal dret_analyze\n nonlocal dret_outputSet\n nonlocal str_desc\n\n def thread_createOnFunction(path, data, str_namePrefix, fn_thread):\n \"\"\"\n Simply create a thread function and return it.\n \"\"\"\n nonlocal index\n ta = threading.Thread(\n name = '%s-%04d.%d' % (str_namePrefix, index, self.numThreads),\n target = fn_thread,\n args = (path, data, index),\n kwargs = kwargs\n )\n return ta\n\n def threadsInBatches_run(l_threadAnalysis):\n \"\"\"\n Run threads in batches of self.numThreads\n and also handle any remaining threads.\n \"\"\"\n index = 1\n if self.numThreads > total:\n self.numThreads = total\n threadFullLoops = int(total / self.numThreads)\n threadRem = total % self.numThreads\n alreadyRunCount = thread_batch(\n l_threadAnalysis,\n threadFullLoops,\n self.numThreads,\n 0)\n nextRunCount = thread_batch(\n l_threadAnalysis,\n 1,\n threadRem,\n alreadyRunCount)\n\n if int(self.verbosityLevel) and self.toConsole():\n iterator = tqdm( self.d_inputTree.items(),\n desc = str_desc)\n else:\n iterator = self.d_inputTree.items()\n\n # Read\n if fn_inputReadCallback:\n index = 1\n for path, data in iterator:\n dret_inputSet = inputSet_read(path, data)\n # filesRead += dret_inputSet['filesRead']\n index += 1\n\n # Analyze\n if fn_analysisCallback:\n index = 1\n l_threadAnalysis = []\n for path, data in iterator:\n l_threadAnalysis.append(thread_createOnFunction(\n path, data,\n 'analysisThread',\n # t_analyze\n analysis_do\n )\n )\n index += 1\n\n # And now batch them in groups\n threadsInBatches_run(l_threadAnalysis)\n tree_removeDeadBranches()\n # Write\n if fn_outputWriteCallback:\n index = 1\n for path, data in iterator:\n dret_outputSet = outputSet_write(path, d_tree[path])\n # filesSaved += dret_outputSet['filesSaved']\n index += 1",
"def split_iters(iter_ranges, n_threads = None):\n\n\n if n_threads is None:\n n_threads = cpu_count()\n \n counts = [safediv(r[1] - r[0], r[2]) for r in iter_ranges]\n # largest_dim = np.max(counts)\n total_count = float(np.sum(counts))\n split_factors = [ (c / total_count) ** 2 for c in counts ]\n if len(counts) > 2:\n # kludgy heuristic\n # if you're reading across multiple dimensions\n # assume there might be reuse of data read in \n # and try to split up work so it fits into cache \n expected_bytes = 8 \n for dim in counts:\n expected_bytes *= dim\n expected_kb = expected_bytes / 1024\n l2_cache_size = 8192\n n_pieces = max(n_threads, expected_kb / l2_cache_size)\n else: \n n_pieces = 2*n_threads \n \n # initialize work_items with an empty single range \n work_items = [[]]\n for (dim_idx,dim_count) in enumerate(counts):\n\n dim_start, _, dim_step = iter_ranges[dim_idx]\n n_dim_pieces = int(math.ceil(split_factors[dim_idx] * n_pieces))\n dim_factor = float(dim_count) / n_dim_pieces\n \n old_work_items = [p for p in work_items]\n work_items = []\n for i in xrange(n_dim_pieces):\n # copy all the var ranges, after which we'll modifying \n # the biggest dimension \n\n start = dim_start + int(math.floor(dim_step * dim_factor * i))\n stop = dim_start + int(math.floor(dim_step * dim_factor * (i+1)))\n \n dim_work_item = (start,stop,dim_step)\n for old_work_item in old_work_items:\n new_work_item = [r for r in old_work_item]\n new_work_item.append(dim_work_item) \n work_items.append(new_work_item)\n\n return work_items",
"def parallel_run():\n from IPython.parallel import Client\n\n c = Client() # here is where the client establishes the connection\n lv = c.load_balanced_view() # this object represents the engines (workers)\n\n\n rays = []\n maxs=25\n bounding = AABA(xmin=0, ymin=0, zmin=0, xmax=maxs, ymax=maxs, zmax=maxs,)\n gridd = np.zeros((maxs,maxs,maxs))\n # spectrum for red to nir leaves\n red_nir_leaves = spectrum(np.array([0.5, 0.85]), np.array([0.1, 0.6]), np.array([0.5, 0.1]))\n # spectrum for soil\n red_nir_soil = spectrum(np.array([0.5, 0.85]), np.array([0.3, 0.4]), np.array([0.0, 0.0]))\n\n\n # scattering setup\n scatt = BRDSF(red_nir_leaves, 0.0)\n lf = leaf(55.0, 0.8) # leaf angle distribution and leaf area density\n\n\n tasks = []\n for x in xrange(maxs):\n for y in xrange(maxs):\n tasks.append(lv.apply(prun, x,y, maxs, gridd, scatt, red_nir_soil, bounding, lf))\n\n result = [task.get() for task in tasks] # blocks until all results are back\n\n return results",
"def worker_train():\n #py_work = np.zeros(model.layer1_size, dtype=np.float32)\n\n while True:\n job = jobs.get(block=True)\n if job is None: # data finished, exit\n jobs.task_done()\n # print('thread %s break' % threading.current_thread().name)\n break\n \n #log.info(\"thread start!\")\n #lr = max(self.min_lr, self.lr * (1 - 1.0 * node_count[0]/total_node))\n lr = self.lr \n job_words = 0\n #pre=self.build_model(len(model.vocab), model.layer1_size, lamda = 0.0, learning_rate=lr)\n x1 = []\n x2 = []\n y = []\n #cur = 0\n for edge in job:\n if edge is not None:\n #cur+=1\n #if cur % 100 == 0:\n # log.info(\"edge[0].index = {}\".format(edge[0].index))\n #x.append([edge[0].index, edge[1].index)\n edge_0_emb = np.sum([G[model.vocab_t[edge[0].index]][nodeid]['weight'] *\n model.node_embedding[model.vocab[nodeid].index] \n for nodeid in G[model.vocab_t[edge[0].index]]], axis=0)\n #edge_0_emb = model.node_embedding[edge[0].index] \n edge_1_emb = np.sum([G[model.vocab_t[edge[1].index]][nodeid]['weight'] *\n model.node_embedding[model.vocab[nodeid].index] \n for nodeid in G[model.vocab_t[edge[1].index]]], axis=0)\n #edge_1_emb = model.node_embedding[edge[1].index]\n x1.append(edge_0_emb)\n #print(\"edge[0].index = \", edge[0].index)\n #print(\"0 nebor g = \", [nodeid for nodeid in G[model.vocab_t[edge[0].index]]])\n #print(\"edge[1].index = \", edge[1].index)\n #print(\"1 nebor g = \", [nodeid for nodeid in G[model.vocab_t[edge[1].index]]])\n #print(\"model.vocab_t = \", model.vocab_t)\n x2.append(edge_1_emb)\n \n #print(\"1 nebor g = \", G[model.vocab_t[edge[1].index]])\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n #y.append(weight)\n y.append(1.0)\n #for i in range(int(10 * (weight)) * self.negative):\n for i in range(self.negative):\n nodeidx = model.table[np.random.randint(model.table_size - 1)]\n if nodeidx != edge[0].index and \\\n (model.vocab_t[nodeidx] not in model.connected_path[model.vocab_t[edge[0].index]]\n or (model.connected_path[model.vocab_t[edge[0].index]][model.vocab_t[nodeidx]][0] < 0.1)):\n x1.append(edge_0_emb)\n x2.append(np.sum([G[model.vocab_t[nodeidx]][nodeid]['weight'] *\n model.node_embedding[model.vocab[nodeid].index] \n for nodeid in G[model.vocab_t[nodeidx]]], axis=0)\n )\n #x2.append(model.node_embedding[nodeidx]) \n y.append(0.0)\n else:\n i -= 1\n if nodeidx != edge[1].index and \\\n (model.vocab_t[nodeidx] not in model.connected_path[model.vocab_t[edge[1].index]]\n or (model.connected_path[model.vocab_t[edge[1].index]][model.vocab_t[nodeidx]][0] < 0.1)):\n x1.append(edge_1_emb)\n x2.append(np.sum([G[model.vocab_t[nodeidx]][nodeid]['weight'] *\n model.node_embedding[model.vocab[nodeid].index] \n for nodeid in G[model.vocab_t[nodeidx]]], axis=0)\n )\n #x2.append(model.node_embedding[nodeidx]) \n y.append(0.0)\n else:\n i-=1\n #log.info(\"edge end!\")\n #print(\"model.node_embedding = \", model.node_embedding)\n \n #for i in range(1):\n # feed_dict = {\n # pre.x1: x1,\n # pre.x2: x2,\n # pre.y: y,\n # pre.w2_init: model.w2\n # }\n #saver = tf.train.Saver()\n #print(\"model.w2 = \", model.w2)\n #_, loss, node_embeddings, w2 = sess.run([pre.d_updates, pre.reg_loss, pre.node_embeddings, pre.w2],\n #_, loss, w2, mut, mut_ori = sess.run([pre.d_updates, pre.reg_loss, pre.w2, pre.mut, pre.mut_ori],\n # feed_dict=feed_dict)\n #log.info(\"iter = {}, loss = {}\".format(i, loss))\n #if i == loop - 1:\n # print(\"y = \", y)\n # print(\"mut = \", mut)\n # print(\"w2.T = \", w2.T)\n #print(\"mut_ori = \", mut_ori)\n #loop = 10\n #for i in range(loop):\n # for node in G.nodes():\n # #node_emb = tf.nn.embedding_lookup(self.node_embeddings, model.vocab[node])\n # tmp = np.zeros(model.layer1_size, dtype=np.float32)\n # for nnodeid in G[node]:\n # nodeidx = model.vocab[nnodeid].index\n # #print(\"nodeidx = \", nodeidx)\n # tmp = tmp + model.node_embedding[nodeidx]\n # #model.node_embedding[model.vocab[node].index] = np.maximum(0, np.matmul(tmp, w2.T))\n # #model.node_embedding[model.vocab[node].index] = np.exp(np.matmul(tmp, w2.T))\n # model.node_embedding[model.vocab[node].index] = np.matmul(tmp, w2.T)\n # #e_x = np.exp(np.matmul(tmp, w2.T) - np.max(np.matmul(tmp, w2.T)))\n # #model.node_embedding[model.vocab[node].index] = e_x / e_x.sum()\n # #model.node_embedding = node_embeddings\n #print(\"model.node_embedding_next = \", model.node_embedding)\n #model.w2 = w2\n #print(\"model.w2_next = \", model.w2)\n #x = []\n #y = []\n #x.append([edge[1].index, edge[0].index])\n #weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n #y.append(weight)\n #y.append(1.0)\n #for i in range(self.negative):\n # nodeidx = model.table[np.random.randint(model.table_size)]\n # if edge[1].index != nodeidx:\n # x.append([edge[1].index, nodeidx])\n # y.append(0.0)\n #feed_dict = {\n # pre.x: x,\n # pre.y: y,\n # pre.node_embeddings_init: model.node_embedding\n #}\n \n #saver = tf.train.Saver()\n #_, loss, node_embeddings = sess.run([pre.d_updates, pre.reg_loss, pre.node_embeddings_n1],\n # feed_dict=feed_dict)\n\n #model.node_embedding = node_embeddings\n #model.node_embedding[edge[1].index] = node_embeddings[edge[1].index]\n #log.info(\"thread end!\")\n job_words += len(y)\n \n #log.info(\"train_loss: {}, node_embeddings = {}\".format(loss, model.node_embedding))\n \n #saver.restore(sess, INNER_MODEL_FILE)\n #job_words = sum(train_o1(model.node_embedding, edge, lr, self.negative, model.table,\n # py_size=model.layer1_size, py_work=py_work) for edge in job if edge is not None)\n #job_words = len(x)\n #log.info(\"train_jobs put!\")\n #log.info(\"train_jobs_full = {}\".format(train_jobs.full()))\n train_jobs.put([x1, x2, y])\n #train_jobs.put([x1, x2, y], block=False)\n #log.info(\"train_jobs put end!\")\n jobs.task_done()\n lock.acquire(timeout=30)\n try:\n node_count[0] += job_words\n\n elapsed = time.time() - start\n if elapsed >= next_report[0]:\n log.info(\"PROGRESS: at %.2f%% \\tnode_computed %d\\talpha %.05f\\t %.0f nodes/s\" %\n (100.0 * node_count[0] / total_node, node_count[0], lr, node_count[0] / elapsed if elapsed else 0.0))\n next_report[0] = elapsed + 5.0 # don't flood the log, wait at least a second between progress reports\n #log.info(\"jobs.qsize() = {}, train_jobs.qsize() = {}\".format(jobs.qsize(), train_jobs.qsize()))\n #train_jobs.put([x1, x2, y])\n finally:\n lock.release()",
"def multiprocess(inputs: list, worker_class: Any, num_threads: int = 40):\n\n input_queue = Queue() # type: ignore\n output_queue = Queue() # type: ignore\n\n for input_elm in inputs:\n input_queue.put(input_elm)\n\n threads = [worker_class(input_queue, output_queue)\n for _ in range(num_threads)]\n \n for thread in threads:\n thread.start()\n \n for thread in threads:\n thread.join()\n\n return get_all_nowait(output_queue)",
"def run_calculation():\n\n print(\"Creating %d-process pool\" % mp.cpu_count())\n\n pool = mp.Pool(mp.cpu_count())\n\n f = h5py.File('/testdata/mandelbrot.hdf5', 'w')\n\n print(\"Creating output dataset with shape %s x %s\" % (NX, NY))\n\n dset = f.create_dataset('mandelbrot', (NX, NY), 'i')\n dset.attrs['XSTART'] = XSTART\n dset.attrs['YSTART'] = YSTART\n dset.attrs['XEXTENT'] = XEXTENT\n dset.attrs['YEXTENT'] = YEXTENT\n\n result = pool.imap(compute_row, (x * xincr for x in range(NX)))\n\n for idx, arr in enumerate(result):\n if idx % 25 == 0: print(\"Recording row %s\" % idx)\n dset[idx] = arr\n\n print(\"Closing HDF5 file\")\n\n f.close()\n\n print(\"Shutting down process pool\")\n\n pool.close()\n pool.join()",
"def batch_split(self, batch_text, threads=8):\n pass",
"def train_loop(train_per_list, cut_off_list, C_list,\n factors, non_factors, data_path, executable_path, \n trial_factors_list=None): \n if trial_factors_list is None:\n trial_factors_list=[factors]\n sql_table = 'aggregated_ctr' #Data table\n # remove cross terms\n sql_features = list(set(sum([fs.split('*') for fs in factors], [])))\n# factors+=['campaign_id','ad_account_id','pub_account_id', \n# 'campaign_id*site', 'ad*pub_account_id']\n con_dict_dse={'host':'db.lqm.io','db':'dse',\n 'user':'dse','passwd':'dSe@lQm'}\n con_dict_mad={'host':'db.lqm.io','db':'madvertise_production',\n 'user':'readonly','passwd':'z0q909TVZj'}\n \n rtb_flag=[0,1]\n model_type=0\n has_intercept = True # bias term in LR\n tol = 0.00000001\n # NB these filenames are HARDCODED in write_sparse routines\n weights_file = 'train_ais.txt'\n train_file = 'train_svm.txt'\n test_file = 'test_svm.txt'\n probability_file = 'preds_SummModel_py.txt'\n results = []\n for train_per in train_per_list:\n test_per = ( add_hour(train_per[1], 1), add_hour(train_per[1], 3))\n # DATA RANGE IS INCLUSIVE => 00:00-02:00 = 3 HOURS\n train_df=mysql_lqm.MySQL_getdata(con_dict_dse,\n sql_table, train_per, sql_features, rtb_flag)\n train_df=mysql_lqm.add_features( train_df)\n test_df= mysql_lqm.MySQL_getdata(con_dict_dse,\n sql_table, test_per, sql_features, rtb_flag)\n test_df = mysql_lqm.add_features(test_df)\n \n sc, click_no_click_df, weights, targets \\\n = libLinear_functions.create_sparse_cat(train_df, factors, non_factors)\n\n \n for cut_off in cut_off_list:\n sparse_train_all = libLinear_functions.create_sparse(sc, cut_off, click_no_click_df)\n sparse_test_all = sc.transform(test_df)\n for trial_factors in trial_factors_list:\n trial_factors=trial_factors[:] # copy\n trial_factors.sort(key=lambda x: sc.factors.index(x))\n # libsvm expects the indices in ascending order\n print (trial_factors) \n sparse_train=sc.select_factors(sparse_train_all, trial_factors)\n sparse_test=sc.select_factors(sparse_test_all, trial_factors)\n libLinear_functions.write_sparse(sc, sparse_train, weights, targets, data_path, len(trial_factors))\n libLinear_functions.write_sparse_test(sc, sparse_test, data_path, n_columns_used= len(trial_factors))\n\n\n for C in C_list:\n model_file = \\\n '{start}_{stop}_cut_{cut_off}_C_{C:0.3}.model'.format(\n start=date_name(train_per[0]),\n stop=date_name(train_per[1]),\n cut_off=cut_off, C=C)\n fit(executable_path, data_path, train_file,\n model_file, weights_file, model_type, reg_param=C, tol=tol,\n has_intercept=has_intercept)\n \n \n pCTR = libLinear_functions.predict(executable_path, data_path, test_file,\n model_file, probability_file)\n if type(pCTR) is pd.Series:\n amounts = pd.DataFrame({\n 'no_clicks':test_df['instances' ]-test_df['clicks'],\n 'clicks':test_df['clicks']})\n mean_log_loss, weighted_log_loss = log_loss_weighted(pCTR, amounts)\n results.append([train_per[:],trial_factors[:],\n cut_off,C,amounts.clicks.sum(),amounts.no_clicks.sum(), mean_log_loss])\n results_df=pd.DataFrame(results,columns=['date','features','cutoff','C','clicks','no_clicks','lloss'])\n results_df.to_csv(data_path+'resultsX.txt',index=False, sep='|')\n # what to do if ERROR?\n return results_df, weighted_log_loss",
"def worker_train():\n py_work = np.zeros(model.layer1_size, dtype=np.float32)\n\n while True:\n job = jobs.get(block=True)\n if job is None: # data finished, exit\n jobs.task_done()\n # print('thread %s break' % threading.current_thread().name)\n break\n\n #lr = max(self.min_lr, self.lr * (1 - 1.0 * node_count[0]/total_node))\n lr = self.lr \n job_words = 0\n for edge in job:\n if edge is not None:\n if cluster_negtivate:\n node_set = set()\n if model.vocab_t[edge[0].index] not in nodeid2cluster:\n cls1 = -1\n else:\n cls1 = nodeid2cluster[model.vocab_t[edge[0].index]]\n node_set.add(cls1)\n if model.vocab_t[edge[1].index] not in nodeid2cluster:\n cls2 = -1\n else:\n cls2 = nodeid2cluster[model.vocab_t[edge[1].index]]\n node_set.add(cls2)\n neg_l = []\n #选择的负样本的node必须是有明确类别归属的\n for i in range(self.negative):\n nodeidx = model.table[np.random.randint(model.table_size)]\n if model.vocab_t[nodeidx] not in nodeid2cluster:\n i-=1\n continue\n else:\n cls_n = nodeid2cluster[model.vocab_t[nodeidx]]\n #加入不同边限制 G 里存放的是nodeid,不是idx\n if cls_n not in node_set and model.vocab_t[nodeidx] not in G[model.vocab_t[edge[0].index]] \\\n and model.vocab_t[nodeidx] not in G[model.vocab_t[edge[1].index]]:\n neg_l.append(nodeidx)\n neg_np = np.asarray(neg_l)\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n if weight > 0.0 and len(neg_np) > 0:\n #job_words += sum(train_o1(model.node_embedding, edge, lr, int(10 * (weight)) * self.negative, neg_np,\n # py_size=model.layer1_size, py_work=py_work) \n # for i in range(1))\n job_words += sum(train_o1(model.node_embedding, edge, weight, lr, self.negative, neg_np,\n py_size=model.layer1_size, py_work=py_work) \n for i in range(1))\n elif len(neg_np) == 0:\n #job_words += sum(train_o1(model.node_embedding, edge, lr, 0, neg_np,\n # py_size=model.layer1_size, py_work=py_work) \n # for i in range(1))\n job_words += sum(train_o1(model.node_embedding, edge, weight, lr, 0, neg_np,\n py_size=model.layer1_size, py_work=py_work) \n for i in range(1))\n else:\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n if weight >= 0.1:\n #job_words += sum(train_o1(model.node_embedding, edge, lr, int(10 * (weight)) * self.negative, model.table,\n # py_size=model.layer1_size, py_work=py_work) \n # for i in range(1))\n job_words += sum(train_o1(model.node_embedding, edge, weight, lr, self.negative, model.table,\n py_size=model.layer1_size, py_work=py_work) \n for i in range(1))\n #for i in range(int(10 * weight)))\n #job_words = sum(train_o1(model.node_embedding, edge, lr, self.negative, model.table,\n # py_size=model.layer1_size, py_work=py_work) for edge in job if edge is not None)\n jobs.task_done()\n lock.acquire(timeout=30)\n try:\n node_count[0] += job_words\n\n elapsed = time.time() - start\n if elapsed >= next_report[0]:\n log.info(\"PROGRESS: at %.2f%% \\tnode_computed %d\\talpha %.05f\\t %.0f nodes/s\" %\n (100.0 * node_count[0] / total_node, node_count[0], lr, node_count[0] / elapsed if elapsed else 0.0))\n next_report[0] = elapsed + 5.0 # don't flood the log, wait at least a second between progress reports\n finally:\n lock.release()",
"def worker_train():\n py_work = np.zeros(model.layer1_size, dtype=np.float32)\n\n while True:\n job = jobs.get(block=True)\n if job is None: # data finished, exit\n jobs.task_done()\n # print('thread %s break' % threading.current_thread().name)\n break\n\n lr = max(self.min_lr, self.lr * (1 - 1.0 * node_count[0]/total_node))\n #lr = self.lr \n job_words = 0\n #out_i = 0\n for edge in job:\n if edge is not None:\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n neg_l = []\n #负样本node选取和主node不连通的点\n min_node0, min_conn_tup = sorted(model.connected_path[model.vocab_t[edge[0].index]].items(), key=lambda x:x[1][0])[0]\n min_conn0 = min_conn_tup[0]\n min_node1, min_conn_tup = sorted(model.connected_path[model.vocab_t[edge[1].index]].items(), key=lambda x:x[1][0])[0]\n min_conn1 = min_conn_tup[0]\n for i in range(self.negative):\n nodeidx = model.table[np.random.randint(model.table_size - 1)]\n if (model.vocab_t[nodeidx] not in model.connected_path[model.vocab_t[edge[0].index]]\n or (model.connected_path[model.vocab_t[edge[0].index]][model.vocab_t[nodeidx]][0] <= max(0.1,min_conn0))) \\\n and (model.vocab_t[nodeidx] not in model.connected_path[model.vocab_t[edge[1].index]]\n or (model.connected_path[model.vocab_t[edge[1].index]][model.vocab_t[nodeidx]][1] <= max(0.1,min_conn1))):\n neg_l.append(nodeidx)\n if len(neg_l) == 0:\n neg_l.append(model.vocab[min_node0].index)\n neg_l.append(model.vocab[min_node1].index)\n neg_np = np.asarray(neg_l)\n if weight >= 0.0:\n #job_words += sum(train_o1(model.node_embedding, edge, weight, lr, self.negative, model.table,\n job_words += sum(train_o1(model.node_embedding, edge, lr, self.negative, neg_np,\n py_size=model.layer1_size, py_work=py_work) \n for i in range(1))\n #for i in range(int(10 * weight)))\n #job_words = sum(train_o1(model.node_embedding, edge, lr, self.negative, model.table,\n # py_size=model.layer1_size, py_work=py_work) for edge in job if edge is not None)\n jobs.task_done()\n lock.acquire(timeout=30)\n try:\n node_count[0] += job_words\n\n elapsed = time.time() - start\n if elapsed >= next_report[0]:\n log.info(\"PROGRESS: at %.2f%% \\tnode_computed %d\\talpha %.05f\\t %.0f nodes/s\" %\n (100.0 * node_count[0] / total_node, node_count[0], lr, node_count[0] / elapsed if elapsed else 0.0))\n next_report[0] = elapsed + 5.0 # don't flood the log, wait at least a second between progress reports\n finally:\n lock.release()",
"def preprocessing(pairs, nb=4):\n generated = Parallel(n_jobs=nb, verbose=5)(delayed(_load_brick)(*p) for p in pairs)\n return generated",
"def _parallelise_divmat_by_window(self, windows, num_threads, **kwargs):\n\n def worker(sub_windows):\n return self._ll_tree_sequence.divergence_matrix(sub_windows, **kwargs)\n\n work = self._chunk_windows(windows, num_threads)\n with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:\n futures = [executor.submit(worker, sub_windows) for sub_windows in work]\n concurrent.futures.wait(futures)\n return np.vstack([future.result() for future in futures])",
"def thread_Compute(self,X,layer):\n threads=[]\n #========get the corresponding weights length============\n #pooling n_c=input matirx channels\n #conv n_c =the weights channels\n if layer[\"l_type\"]==\"conv\":\n n_C=kernels[layer[\"kernel\"]].shape[3]\n else:\n n_C=X.shape[3]\n \n #======================\n pos=self.getPos(n_C,0,len(self.nodes))\n start,end=pos\n for node in self.nodes:\n start,end=self.getPos(n_C,end,len(self.nodes))\n a=(start,end)\n d=X[0,:,:,:]\n conv_dict = {\"data\":d,\"pos\":a,\"layer\":layer}\n threads.append(client(conv_dict,node[\"ip\"],node[\"port\"]))\n for t in threads:\n t.start()\n out= self.layerResult(layer,X,pos)\n for t in threads:\n t.join()\n out=np.concatenate((out,t.value()[\"data\"]), axis=2)\n self.count=0\n\n return out",
"def grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16, \n aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',\n save_to_disk=True, save_each=1000, chunksize=1.):\n \n \n if not list(param_list_two): # If `param_list_two` is empty\n params = param_list_one\n grid_shape = (len(param_list_one),)\n is_really_grid = False\n \n else:\n params = list(itertools.product(param_list_one, param_list_two))\n grid_shape = (len(param_list_one), len(param_list_two))\n is_really_grid = True\n \n def grid_fun(point): # Function to compute for each grid point\n \n trial_out = np.nan * np.ones((n_trials,))\n \n for i in np.arange(n_trials):\n \n if is_really_grid:\n trial_out[i] = param_eval(point[0], point[1])\n else: # If `param_list_two` is empty\n trial_out[i] = param_eval(point)\n \n return aggr_method(trial_out)\n \n n_grid_pts = len(params)\n \n # Recording procedure\n def record_experiment(grid):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n save_path = save_dir + now + ' ' + file_name + '.pkl'\n experiment = {\n 'date': now,\n 'rows': param_list_one,\n 'cols': param_list_two,\n 'n_trials': n_trials,\n 'grid': np.reshape(grid, grid_shape),\n 'path': save_path\n }\n if save_to_disk:\n utils.save_obj(experiment, save_path)\n return experiment\n \n # Set a pool of workers\n nb_workers = min(mp.cpu_count(), 24)\n print('Working with {} processes.'.format(nb_workers))\n pool = mp.Pool(nb_workers)\n \n # Iterate `grid_fun` across workers\n it = pool.imap(grid_fun, params, chunksize=chunksize)\n grid = np.nan * np.ones((n_grid_pts,))\n\n for idx, val in enumerate(tqdm(it, total=n_grid_pts)):\n grid[idx] = val\n \n # Make sure that we save after each couple of iterations\n if (idx >= save_each) and (idx % save_each == 0): \n experiment = record_experiment(grid)\n \n # Close pool\n pool.close()\n pool.join()\n \n experiment = record_experiment(grid)\n \n return experiment",
"def threadsInBatches_run(l_threadAnalysis):\n index = 1\n if self.numThreads > total:\n self.numThreads = total\n threadFullLoops = int(total / self.numThreads)\n threadRem = total % self.numThreads\n alreadyRunCount = thread_batch(\n l_threadAnalysis,\n threadFullLoops,\n self.numThreads,\n 0)\n nextRunCount = thread_batch(\n l_threadAnalysis,\n 1,\n threadRem,\n alreadyRunCount)",
"def thread_batch(l_threadFunc, outerLoop, innerLoop, offset):\n start = 0\n join = 0\n il = lambda f, i, o, l : f + i + o * l\n for t_o in range(0, outerLoop):\n for t_i in range(0, innerLoop):\n idx = il(offset, t_i, t_o, innerLoop)\n l_threadFunc[idx].start()\n start += 1\n # self.dp.qprint('Started thread %d' % start)\n\n for t_i in range(0, innerLoop):\n idx = il(offset, t_i, t_o, innerLoop)\n l_threadFunc[idx].join()\n join += 1\n # self.dp.qprint('Join set on thread %d' % join)\n\n return start",
"def __call__(self, iterable):\n\n self._reset_run_tracking()\n self._start_time = time.time()\n\n if not self._managed_backend:\n n_jobs = self._initialize_backend()\n else:\n n_jobs = self._effective_n_jobs()\n\n if n_jobs == 1:\n # If n_jobs==1, run the computation sequentially and return\n # immediatly to avoid overheads.\n output = self._get_sequential_output(iterable)\n next(output)\n return output if self.return_generator else list(output)\n\n # Let's create an ID that uniquely identifies the current call. If the\n # call is interrupted early and that the same instance is immediately\n # re-used, this id will be used to prevent workers that were\n # concurrently finalizing a task from the previous call to run the\n # callback.\n with self._lock:\n self._call_id = uuid4().hex\n\n # self._effective_n_jobs should be called in the Parallel.__call__\n # thread only -- store its value in an attribute for further queries.\n self._cached_effective_n_jobs = n_jobs\n\n if isinstance(self._backend, LokyBackend):\n # For the loky backend, we add a callback executed when reducing\n # BatchCalls, that makes the loky executor use a temporary folder\n # specific to this Parallel object when pickling temporary memmaps.\n # This callback is necessary to ensure that several Parallel\n # objects using the same resuable executor don't use the same\n # temporary resources.\n\n def _batched_calls_reducer_callback():\n # Relevant implementation detail: the following lines, called\n # when reducing BatchedCalls, are called in a thread-safe\n # situation, meaning that the context of the temporary folder\n # manager will not be changed in between the callback execution\n # and the end of the BatchedCalls pickling. The reason is that\n # pickling (the only place where set_current_context is used)\n # is done from a single thread (the queue_feeder_thread).\n self._backend._workers._temp_folder_manager.set_current_context( # noqa\n self._id\n )\n self._reducer_callback = _batched_calls_reducer_callback\n\n # self._effective_n_jobs should be called in the Parallel.__call__\n # thread only -- store its value in an attribute for further queries.\n self._cached_effective_n_jobs = n_jobs\n\n backend_name = self._backend.__class__.__name__\n if n_jobs == 0:\n raise RuntimeError(\"%s has no active worker.\" % backend_name)\n\n self._print(\n f\"Using backend {backend_name} with {n_jobs} concurrent workers.\"\n )\n if hasattr(self._backend, 'start_call'):\n self._backend.start_call()\n\n # Following flag prevents double calls to `backend.stop_call`.\n self._calling = True\n\n iterator = iter(iterable)\n pre_dispatch = self.pre_dispatch\n\n if pre_dispatch == 'all':\n # prevent further dispatch via multiprocessing callback thread\n self._original_iterator = None\n self._pre_dispatch_amount = 0\n else:\n self._original_iterator = iterator\n if hasattr(pre_dispatch, 'endswith'):\n pre_dispatch = eval_expr(\n pre_dispatch.replace(\"n_jobs\", str(n_jobs))\n )\n self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)\n\n # The main thread will consume the first pre_dispatch items and\n # the remaining items will later be lazily dispatched by async\n # callbacks upon task completions.\n\n # TODO: this iterator should be batch_size * n_jobs\n iterator = itertools.islice(iterator, self._pre_dispatch_amount)\n\n # Use a caching dict for callables that are pickled with cloudpickle to\n # improve performances. This cache is used only in the case of\n # functions that are defined in the __main__ module, functions that\n # are defined locally (inside another function) and lambda expressions.\n self._pickle_cache = dict()\n\n output = self._get_outputs(iterator, pre_dispatch)\n self._call_ref = weakref.ref(output)\n\n # The first item from the output is blank, but it makes the interpreter\n # progress until it enters the Try/Except block of the generator and\n # reach the first `yield` statement. This starts the aynchronous\n # dispatch of the tasks to the workers.\n next(output)\n\n return output if self.return_generator else list(output)",
"def start_multi(lines, func, p=6, suffix=0, write=False, pre_train=True):\n _info('Execute by {}th processes'.format(p))\n pool = Pool(p)\n \n if type(lines) is not zip:\n num_each_b = len(lines) // p\n else:\n # fine tune step, the lines is zip type,\n # this kind of type has no length, so just set the lines to data_b\n data_b = lines\n \n results = []\n for i in range(p):\n if i < (p-1):\n if pre_train: # the reason for adding judgement here is just identical to the above comment\n data_b = lines[i * num_each_b : i * num_each_b + num_each_b]\n else:\n if pre_train:\n data_b = lines[i * num_each_b :]\n if not write:\n if pre_train:\n results.append(pool.apply_async(func, (data_b, )))\n else:\n assert p == 1, _error('process number should equal to 1 when save the file', head='Value Error')\n pool.apply_async(func, (data_b, suffix, pre_train))\n \n if not write:\n for i in range(p):\n results[i] = results[i].get()\n \n pool.close()\n pool.join()\n \n if not write:\n return list(chain(*results))",
"def compute(args, fun, max_workers=6):\n print(\"\\nProcessing symbols in parallel\")\n ex = futures.ThreadPoolExecutor(max_workers=max_workers)\n ex.map(fun, args)",
"def parallelize(cores=None, fork=True, flatten=False, info=False, infoclass=InfoThreadProgressBar, init=None, *args, **kwargs):\n\tif cores == None:\n\t\tcores = multiprocessing.cpu_count()\n\tdef wrapper(f):\n\t\tdef execute(*multiargs):\n\t\t\tresults = []\n\t\t\tlen(list(zip(*multiargs)))\n\t\t\tN = len(multiargs[0])\n\t\t\tif info:\n\t\t\t\tprint(\"running %i jobs on %i cores\" % (N, cores))\n\t\t\ttaskQueue = queue.Queue(len(multiargs[0]))\n\t\t\t#for timenr in range(times):\n\t\t\t#\ttaskQueue.put(timenr)\n\t\t\tfor tasknr, _args in enumerate(zip(*multiargs)):\n\t\t\t\ttaskQueue.put((tasknr, list(_args)))\n\t\t\t#for timenr in range(times):\n\t\t\t#\tresult = f(*args, **kwargs)\n\t\t\t#\tresults.append(result)\n\t\t\texecutions = [Execution(taskQueue, fork, f, init, corenr, args, kwargs) for corenr in range(cores)]\n\t\t\tif info:\n\t\t\t\tinfoobj = infoclass(len(multiargs[0]), executions)\n\t\t\t\tinfoobj.start()\n\t\t\tfor i, execution in enumerate(executions):\n\t\t\t\texecution.setName(\"T-%d\" % i)\n\t\t\t\texecution.start()\n\t\t\t#if 1:\n\t\t\t#\twatchdog = Watchdog(executions)\n\t\t\t#\twatchdog.start()\n\t\t\terror = False\n\t\t\tfor execution in executions:\n\t\t\t\tlog(\"joining:\",execution.getName())\n\t\t\t\ttry:\n\t\t\t\t\texecution.join()\n\t\t\t\texcept BaseException:\n\t\t\t\t\terror = True\n\t\t\t\tresults.extend(execution.results)\n\t\t\t\tif execution.error:\n\t\t\t\t\terror = True \n\t\t\tif info:\n\t\t\t\tinfoobj.join()\n\t\t\tif error:\n\t\t\t\tprint(\"error\", file=sys.stderr)\n\t\t\t\tresults = None\n\t\t\t\traise Exception(\"error in one or more of the executors\")\n\t\t\telse:\n\t\t\t\tresults.sort(cmp=lambda a, b: cmp(a[0], b[0]))\n\t\t\t\tresults = [k[1] for k in results]\n\t\t\t\t#print \"bla\", results\n\t\t\t\tif flatten:\n\t\t\t\t\tflatresults = []\n\t\t\t\t\tfor result in results:\n\t\t\t\t\t\tflatresults.extend(result)\n\t\t\t\t\tresults = flatresults\n\t\t\treturn results\n\t\treturn execute\n\treturn wrapper",
"def grid_visibilities_parallel(self, visibilities,min_attenuation = 1e-10, N = 120):\n\n #Find out the number of frequencies to process per thread\n nfreq = len(self.frequencies)\n numperthread = int(np.ceil(nfreq/self.n_obs))\n offset = 0\n nfreqstart = np.zeros(self.n_obs,dtype=int)\n nfreqend = np.zeros(self.n_obs,dtype=int)\n infreq = np.zeros(self.n_obs,dtype=int)\n for i in range(self.n_obs):\n nfreqstart[i] = offset\n nfreqend[i] = offset + numperthread\n\n if(i==self.n_obs-1):\n infreq[i] = nfreq - offset\n else:\n infreq[i] = numperthread\n\n offset+=numperthread\n\n # Set the last process to the number of frequencies\n nfreqend[-1] = nfreq\n\n processes = []\n\n ugrid = np.linspace(-self.uv_max, self.uv_max, self.n_uv +1 ) # +1 because these are bin edges.\n \n centres = (ugrid[1:] + ugrid[:-1]) / 2\n \n visgrid = np.zeros((self.n_uv, self.n_uv, len(self.frequencies)), dtype=np.complex128)\n\n\n if(os.path.exists(self.datafile[0][:-4]+\".kernel_weights.npy\")):\n kernel_weights = np.load(self.datafile[0][:-4]+\".kernel_weights.npy\")\n else:\n kernel_weights=None\n \n if kernel_weights is None:\n weights = np.zeros((self.n_uv, self.n_uv, len(self.frequencies)))\n\n visgrid_buff_real = []\n visgrid_buff_imag = []\n weights_buff = []\n\n #Lets split this array up into chunks\n for i in range(self.n_obs):\n\n visgrid_buff_real.append(multiprocessing.RawArray(np.sctype2char(visgrid.real),visgrid[:,:,nfreqstart[i]:nfreqend[i]].size))\n visgrid_buff_imag.append(multiprocessing.RawArray(np.sctype2char(visgrid.imag),visgrid[:,:,nfreqstart[i]:nfreqend[i]].size))\n visgrid_tmp_real = np.frombuffer(visgrid_buff_real[i])\n visgrid_tmp_imag = np.frombuffer(visgrid_buff_imag[i])\n visgrid_tmp_real = visgrid[:,:,nfreqstart[i]:nfreqend[i]].real.flatten()\n visgrid_tmp_imag = visgrid[:,:,nfreqstart[i]:nfreqend[i]].imag.flatten()\n\n\n if(kernel_weights is None):\n weights_buff.append(multiprocessing.RawArray(np.sctype2char(weights),weights[:,:,nfreqstart[i]:nfreqend[i]].size))\n weights_tmp = np.frombuffer(weights_buff[i])\n weights_tmp = weights[:,:,nfreqstart[i]:nfreqend[i]]\n else:\n weights_buff.append(None)\n\n processes.append(multiprocessing.Process(target=self._grid_visibilities_buff,args=(self.n_uv,visgrid_buff_real[i],visgrid_buff_imag[i],weights_buff[i], visibilities[:,nfreqstart[i]:nfreqend[i]],self.frequencies[nfreqstart[i]:nfreqend[i]],self.baselines,centres,self._instr_core.sigma(self.frequencies[nfreqstart[i]:nfreqend[i]]),min_attenuation, N) ))\n\n for p in processes:\n p.start()\n\n for p in processes:\n p.join()\n\n for i in range(self.n_obs):\n\n visgrid[:,:,nfreqstart[i]:nfreqend[i]].real = np.frombuffer(visgrid_buff_real[i]).reshape(self.n_uv,self.n_uv,nfreqend[i]-nfreqstart[i])\n visgrid[:,:,nfreqstart[i]:nfreqend[i]].imag = np.frombuffer(visgrid_buff_imag[i]).reshape(self.n_uv,self.n_uv,nfreqend[i]-nfreqstart[i])\n\n if(kernel_weights is None):\n weights[:,:,nfreqstart[i]:nfreqend[i]] = np.frombuffer(weights_buff[i]).reshape(self.n_uv,self.n_uv,nfreqend[i]-nfreqstart[i])\n\n if kernel_weights is None:\n kernel_weights = weights\n \n visgrid[kernel_weights!=0] /= kernel_weights[kernel_weights!=0]\n\n return visgrid,kernel_weights",
"def parallelize_task(task_list, func_name, func_args, workers_count=10, workload=1):\n indices_list = np.arange(0, len(task_list), workload)\n results = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=workers_count) as executor:\n fetched_rows = {executor.submit(\n func_name,\n task_list[fromindex: fromindex + workload],\n **func_args\n ): fromindex for fromindex in indices_list}\n for future in concurrent.futures.as_completed(fetched_rows):\n row_data = fetched_rows[future]\n #try:\n data = future.result()\n results.extend(data)\n #except Exception as ex:\n # print(\"exception running parallel task \",func_name,\" for \", row_data, \"...\", ex)\n return results",
"def _apply_parallel(grouped_df, func, neg_compound, compound, f_cols, n_jobs,\n method):\n n_cpu = multiprocessing.cpu_count()\n output = Parallel(n_jobs=n_jobs)(delayed(func)(\n group, neg_compound, compound, f_cols, method) for _, group in grouped_df)\n return pd.concat(output)",
"def in_parallel(*args):\n \n # Execute each in a thread and return them all.\n return ThreadPool(len(args)).map(lambda x: x(), args)",
"def propagateOrbits(\n self, orbits, t1, chunk_size=100, num_jobs=1, parallel_backend=\"cf\"\n ):\n parallel, num_workers = _checkParallel(num_jobs, parallel_backend)\n if parallel:\n orbits_split = orbits.split(chunk_size)\n t1_duplicated = [copy.deepcopy(t1) for i in range(len(orbits_split))]\n backend_duplicated = [copy.deepcopy(self) for i in range(len(orbits_split))]\n\n if parallel_backend == \"ray\":\n import ray\n\n if not ray.is_initialized():\n ray.init(address=\"auto\")\n\n propagation_worker_ray = ray.remote(propagation_worker)\n propagation_worker_ray.options(num_returns=1, num_cpus=1)\n\n p = []\n for o, t, b in zip(orbits_split, t1_duplicated, backend_duplicated):\n p.append(propagation_worker_ray.remote(o, t, b))\n propagated_dfs = ray.get(p)\n\n elif parallel_backend == \"mp\":\n p = mp.Pool(\n processes=num_workers,\n initializer=_initWorker,\n )\n\n propagated_dfs = p.starmap(\n propagation_worker,\n zip(\n orbits_split,\n t1_duplicated,\n backend_duplicated,\n ),\n )\n p.close()\n\n elif parallel_backend == \"cf\":\n with cf.ProcessPoolExecutor(\n max_workers=num_workers, initializer=_initWorker\n ) as executor:\n propagated_dfs = executor.map(\n propagation_worker,\n orbits_split,\n t1_duplicated,\n backend_duplicated,\n )\n\n else:\n raise ValueError(\"parallel_backend must be one of {'ray', 'mp', 'cf'}.\")\n\n propagated = pd.concat(propagated_dfs)\n propagated.reset_index(drop=True, inplace=True)\n else:\n propagated = self._propagateOrbits(orbits, t1)\n\n return propagated",
"def parfor(func, in_list, out_shape=None, n_jobs=-1, engine=\"joblib\",\n backend=\"threading\", func_args=[], func_kwargs={}):\n if n_jobs == -1:\n n_jobs = multiprocessing.cpu_count()\n n_jobs = n_jobs - 1\n\n if engine == \"joblib\":\n p = joblib.Parallel(n_jobs=n_jobs, backend=backend)\n d = joblib.delayed(func)\n d_l = []\n for in_element in in_list:\n d_l.append(d(in_element, *func_args, **func_kwargs))\n results = p(d_l)\n\n elif engine == \"dask\":\n def partial(func, *args, **keywords):\n def newfunc(in_arg):\n return func(in_arg, *args, **keywords)\n return newfunc\n p = partial(func, *func_args, **func_kwargs)\n d = [dask.delayed(p)(i) for i in in_list]\n if backend == \"multiprocessing\":\n results = dask.compute(*d, get=dask.multiprocessing.get,\n workers=n_jobs)\n elif backend == \"threading\":\n results = dask.compute(*d, get=dask.threaded.get,\n workers=n_jobs)\n else:\n raise ValueError(\"%s is not a backend for dask\" % backend)\n\n elif engine == \"serial\":\n results = []\n for in_element in in_list:\n results.append(func(in_element, *func_args, **func_kwargs))\n\n if out_shape is not None:\n return np.array(results).reshape(out_shape)\n else:\n return results",
"def worker_train():\n py_work = np.zeros(model.layer1_size, dtype=np.float32)\n\n while True:\n job = jobs.get(block=True)\n if job is None: # data finished, exit\n jobs.task_done()\n # print('thread %s break' % threading.current_thread().name)\n break\n\n lr = max(self.min_lr, self.lr * (1 - 1.0 * node_count[0]/total_node))\n #lr = self.lr \n job_words = 0\n #pre=self.build_model(len(model.vocab), model.layer1_size, lamda = 0.0, learning_rate=lr)\n for edge in job:\n if edge is not None:\n x = []\n y = []\n x.append([edge[0].index, edge[1].index])\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n #y.append(weight)\n y.append(1.0)\n #for i in range(int(10 * (weight)) * self.negative):\n for i in range(self.negative):\n nodeidx = model.table[np.random.randint(model.table_size)]\n if nodeidx != edge[0].index:\n x.append([edge[0].index, nodeidx])\n y.append(0.0)\n feed_dict = {\n pre.x: x,\n pre.y: y,\n pre.node_embeddings_init: model.node_embedding\n }\n #saver = tf.train.Saver()\n _, loss, node_embeddings = sess.run([pre.d_updates, pre.reg_loss, pre.node_embeddings_n1],\n feed_dict=feed_dict)\n model.node_embedding[edge[0].index] = node_embeddings[edge[0].index]\n x = []\n y = []\n x.append([edge[1].index, edge[0].index])\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n #y.append(weight)\n y.append(1.0)\n for i in range(self.negative):\n nodeidx = model.table[np.random.randint(model.table_size)]\n if edge[1].index != nodeidx:\n x.append([edge[1].index, nodeidx])\n y.append(0.0)\n feed_dict = {\n pre.x: x,\n pre.y: y,\n pre.node_embeddings_init: model.node_embedding\n }\n \n #saver = tf.train.Saver()\n _, loss, node_embeddings = sess.run([pre.d_updates, pre.reg_loss, pre.node_embeddings_n1],\n feed_dict=feed_dict)\n\n #model.node_embedding = node_embeddings\n model.node_embedding[edge[1].index] = node_embeddings[edge[1].index]\n job_words += len(x)\n \n #log.info(\"train_loss: {}, node_embeddings = {}\".format(loss, model.node_embedding))\n \n #saver.restore(sess, INNER_MODEL_FILE)\n #job_words = sum(train_o1(model.node_embedding, edge, lr, self.negative, model.table,\n # py_size=model.layer1_size, py_work=py_work) for edge in job if edge is not None)\n #job_words = len(x)\n jobs.task_done()\n lock.acquire(timeout=30)\n try:\n node_count[0] += job_words\n\n elapsed = time.time() - start\n if elapsed >= next_report[0]:\n log.info(\"PROGRESS: at %.2f%% \\tnode_computed %d\\talpha %.05f\\t %.0f nodes/s\" %\n (100.0 * node_count[0] / total_node, node_count[0], lr, node_count[0] / elapsed if elapsed else 0.0))\n next_report[0] = elapsed + 5.0 # don't flood the log, wait at least a second between progress reports\n finally:\n lock.release()",
"def compute_parallel(self, inputs, communicator):\n self.compute_sequential([inputs], [communicator])",
"def chunks_threads(li, n):\n\tindex = int(len(li) / n + 0.5)\n\tfor i in range(n-1):\n\t\tyield li[i*index:i*index + index]\n\tyield li[n*index - index:]",
"def concurrent_training_bis(model, optimizer, loss_function, list_tuple_training, list_tuple_testing, verbose=False,\n keep_trace=True, early_stopping=True,\n **kwargs):\n err_train_2 = []\n err_test_2 = []\n n_calc = kwargs.get(\"n_calc\", 10)\n epochs = kwargs.get(\"epoch\", 200)\n copy_test_list = list_tuple_testing.copy()\n if early_stopping:\n N_test = len(copy_test_list) // 2\n set_valid = copy_test_list[-N_test:]\n copy_test_list = copy_test_list[:N_test]\n if verbose:\n sum_train = sum_model_conc(list_tuple_training, n_calc)\n sum_test = sum_model_conc(copy_test_list, n_calc)\n if early_stopping:\n sum_valid = sum_model_conc(set_valid, n_calc)\n\n old = 1000\n i=0\n count_early_stopping = 0\n while (i < epochs and count_early_stopping < 10) or i < 40:\n i += 1\n cumulated_train_loss = 0\n copy_train_list = list_tuple_training.copy()\n shuffle(copy_train_list)\n optimizer.zero_grad()\n loss = 0\n for mat_step in copy_train_list:\n if len(mat_step) != 0:\n partial_sum = torch.tensor(mat_step[0][1])\n for j in range(1, len(mat_step)):\n partial_sum += mat_step[j][1]\n model.reinitialize()\n cp_mat_step = mat_step.copy()\n sum_prediction = torch.ones(partial_sum.shape)\n for X_train, _ in cp_mat_step:\n model.reinitialize()\n sum_prediction += model(X_train)\n\n shuffle(cp_mat_step)\n for (X_train, y_train) in cp_mat_step:\n model.reinitialize()\n y_pred = partial_sum * model(X_train) / sum_prediction\n single_loss = loss_function(y_pred[-n_calc:, 0], y_train[-n_calc:, 0])\n loss += single_loss\n cumulated_train_loss += single_loss.item()\n\n loss.backward()\n optimizer.step()\n erreur_test = concurrent_evaluation_model(model, loss_function, copy_test_list, current_sum=sum_test,\n verbose=False, **kwargs)\n if early_stopping:\n erreur_valid = concurrent_evaluation_model(model, loss_function, set_valid, current_sum=sum_valid,\n verbose=False, **kwargs)\n if erreur_valid > old:\n count_early_stopping +=1\n old = erreur_valid\n else:\n count_early_stopping = 0\n old = erreur_valid\n if verbose :\n print('Epoch : %s/%s Training MAPE: %.4f Testing MAPE: %.4f' % (\n i, epochs, 100 * cumulated_train_loss / max(1, sum_train), erreur_test))\n if keep_trace:\n err_train_2.append(cumulated_train_loss /sum_train)\n err_test_2.append(erreur_test)\n return i ,err_train_2, err_test_2"
]
| [
"0.6423797",
"0.6420414",
"0.6377233",
"0.63467634",
"0.63066",
"0.62989044",
"0.6188953",
"0.6165699",
"0.61038834",
"0.6097976",
"0.6086044",
"0.60721475",
"0.6052888",
"0.60257155",
"0.6006814",
"0.59992325",
"0.59987307",
"0.5990851",
"0.5955791",
"0.5950258",
"0.5926273",
"0.5919878",
"0.5906161",
"0.58865607",
"0.588298",
"0.5881043",
"0.5869832",
"0.585554",
"0.58338696",
"0.58264726"
]
| 0.72813725 | 0 |
function to validate the tagged competition file generated by model, is identical to original competition file when removing tags | def validateTaggedCompFile(comp_file, tagged_comp_file):
comp_data = SimpleDataReader(comp_file)
tagged_comp_data = SimpleDataReader(tagged_comp_file)
assert comp_data.getSentencesSize() == tagged_comp_data.getSentencesSize(), "Missing Sentences!"
mistakes = 0
for i in range(comp_data.getSentencesSize()):
comp_sentence = comp_data.getSentenceByIndex(i)
tagged_comp_sentence = tagged_comp_data.getSentenceByIndex(i)
assert len(comp_sentence) == len(tagged_comp_sentence), "Missing Words in Sentence: " + str(i)
for k in range(len(comp_sentence)):
word = comp_sentence[k]
tagged_word = tagged_comp_sentence[k].split(TAGCHAR)[0]
if word != tagged_word:
mistakes += 1
print("Sentences differ:", word, tagged_word)
if mistakes == 0:
print("Files are Identical!")
else:
print("Files are NOT Identical!") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_no_tags(self):\n test_files = glob.glob(INPUT_GI_PATH + '/gi_*.mdd')\n\n mdd.procall(test_files)\n\n data = self.read_full_file('node16p1.dat')\n if not self.check_for_tags(data):\n self.fail(\"Found header tag in data file\")\n\n data = self.read_full_file('node17p1.dat')\n if not self.check_for_tags(data):\n self.fail(\"Found header tag in data file\")\n\n data = self.read_full_file('node14p1.dat')\n if not self.check_for_tags(data):\n self.fail(\"Found header tag in data file\")",
"def validate_file(self):\n print \"\\n******\"\n print \" Done creating file. Validation messages follow.\"\n missing_nodes = {'group': [], 'dataset': []}\n custom_nodes = {'group': [], 'dataset': []}\n for ns in self.id_lookups:\n for id in self.id_lookups[ns]:\n for path in self.id_lookups[ns][id]:\n qty = self.id_lookups[ns][id][path]['qty']\n type = self.id_lookups[ns][id][path]['type']\n count = len(self.id_lookups[ns][id][path]['created'])\n if qty in ('!', '+') and count == 0:\n missing_nodes[type].append(\"%s:%s/%s\" % (ns, path, id))\n for path, node_list in self.all_nodes.iteritems():\n for root_node in node_list:\n self.validate_nodes(root_node, missing_nodes, custom_nodes)\n self.report_problems(missing_nodes, \"missing\")\n self.report_problems(custom_nodes, \"custom\")\n if self.custom_attributes:\n count = len(self.custom_attributes)\n print \"%i nodes with custom attributes\" % len(self.custom_attributes)\n if count > 20:\n print \"Only first 20 shown;\"\n names = self.custom_attributes.keys()[0:min(20, count)]\n nlist = []\n for name in names:\n nlist.append(name+ \"->\" +str(self.custom_attributes[name]))\n print nlist\n else:\n print \"No custom attributes. Good.\"",
"def test_no_deletion(self):\n\t\tanalyse_text(self.filename)\n\t\tself.assertTrue(os.path.exists(self.filename))",
"def validate(self):\n print(\"Validating \")\n sha256_test = _get_file_sha256_hash(self.file_path)\n sha256_truth = self.metadata_pkg[\"hash\"]\n if sha256_test != sha256_truth:\n raise ValueError(\n f\"Hash of modelpkg file {os.path.basename(self.file_path)} ({sha256_test}) does not match truth hash ({sha256_truth}).\")",
"def Validate(self, relative_file, contents):\n pass",
"def test_no_deletion(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))",
"def test_no_deletion(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))",
"def test_old_format_for_tags(self):\n test_files = glob.glob(INPUT_HYPM_PATH + '/unit_*.mdd')\n test_files.extend(glob.glob(INPUT_FLMB_PATH + '/unit_*.mdd'))\n\n mdd.procall(test_files)\n\n data = self.read_full_file('node58p1.dat')\n if not self.check_for_tags(data):\n self.fail(\"Found header tag in data file\")\n\n data = self.read_full_file('node59p1.dat')\n if not self.check_for_tags(data):\n self.fail(\"Found header tag in data file\")",
"def validate_data_files(self):\n if self.labeled:\n image_ids = self.loader_moving_image.get_data_ids()\n label_ids = self.loader_moving_label.get_data_ids()\n check_difference_between_two_lists(list1=image_ids, list2=label_ids)",
"def reference_segmenter_validation(self):\n for file in filter(lambda x: x.endswith('referenceSegmenter.tei.xml'), listdir(self.bs_directory)):\n print \"Processing\", file\n bs = BeautifulSoup(open(self.bs_directory + file), 'xml')\n self.__reference_segmenter_correction(bs)\n file = open(self.reference_segmenter_output + file, \"wb\")\n file.write(bs.prettify().encode('utf-8'))",
"def parseRaw(tagDict, inFileName):\r\n\r\n # '%Y/%m/%d %H:%M:%S' RAW Argos.csv format\r\n\r\n\r\n csvName = path.basename(inFileName)\r\n # Trap argos raw files that occurred within these dates\r\n # date formatted dd/mm/yy instead of yyyy/mm/dd\r\n bd = False\r\n if csvName >= util.CSV_schema.bad_dates[0][0]:\r\n if csvName <= util.CSV_schema.bad_dates[0][1]:\r\n bd = True\r\n if csvName >= util.CSV_schema.bad_dates[1][0]:\r\n if csvName <= util.CSV_schema.bad_dates[1][1]:\r\n bd = True\r\n\r\n newPasses = []\r\n d_ptt = {v[0]:k for k,v in tagDict.items()}\r\n pttDict = OrderedDict(sorted(d_ptt.items())) # Sort into {ptt: tag_id, ....}\r\n del d_ptt\r\n with open(inFileName, 'rb') as inFile:\r\n count = sum(1 for line in inFile)\r\n inFile.seek(0) # reset file\r\n reader = csv.DictReader(inFile)\r\n while reader.line_num < count:\r\n # Trap for changed fieldname\r\n gt = True if util.CSV_schema.gt_names[1] in reader.fieldnames else False\r\n featID = None\r\n ptt = 0\r\n msgType = 'NEW'\r\n str_timeval = ''\r\n passDur = None\r\n for row in reader:\r\n if row['Platform ID No.'][0] =='#': # What is this even trapping ???\r\n continue\r\n if int(row['Platform ID No.']) not in pttDict.keys(): # Orphan Tag\r\n newOrphan(row, inFileName, gt)\r\n msgType = 'NEW'\r\n updatePttList(ptt, row['Msg Date'],bd)\r\n continue\r\n elif int(row['Platform ID No.']) != ptt: # Start New PTT\r\n if ptt: # Skip ptt = 0\r\n tag_id = pttDict[ptt]\r\n dbutil.updateDeployment(conn, tag_id) # Update ptt that just finished\r\n updatePttList(ptt, last_msg, bd)\r\n updateDevice(tag_id, last_msg, bd)\r\n# HOW to update final (Valid) ptt?????\r\n msgType = 'NEW'\r\n # tag specific vars\r\n ptt = int(row['Platform ID No.']) #=integer\r\n tag_id = pttDict[ptt]\r\n pttStart = tagDict.get(tag_id)[1] #=datetimes\r\n pttStop = tagDict.get(tag_id)[2]\r\n animal_id = tagDict.get(tag_id)[4] #=integer\r\n # loop vars\r\n str_timeval = row['Loc. date'] if row['Loc. date'] else row['Msg Date']\r\n timevalue = format_date(str_timeval,bd)\r\n passDur = row['Pass']\r\n sat = row['Sat.']\r\n # Trap out of range date\r\n if timevalue < pttStart:\r\n ptt = 0 # Force new ptt Variables for next row\r\n continue\r\n# ********* NOT TRAPPING stoptime ??\r\n elif timevalue > pttStop:\r\n ptt = 0\r\n continue\r\n # start parsing\r\n last_msg = format_date(row['Msg Date'],bd)\r\n if msgType == 'SAME':\r\n if row['Loc. date']:\r\n if row['Loc. date'] == str_timeval:\r\n if row['Pass'] != passDur:\r\n msgType = 'NEW'\r\n passDur = row['Pass']\r\n sat = row['Sat.']\r\n if row['Sat.'] != sat:\r\n msgType = 'NEW'\r\n sat = row['Sat.']\r\n elif row['Loc. date'] != str_timeval: # Definitely New pass\r\n msgType = 'NEW'\r\n str_timeval = row['Loc. date']\r\n timevalue = format_date(str_timeval,bd)\r\n passDur = row['Pass']\r\n sat = row['Sat.']\r\n else: # row['Loc. date'] empty\r\n if row['Pass'] == '0': # Single pass\r\n msgType = 'NEW'\r\n str_timeval = row['Msg Date']\r\n timevalue = format_date(str_timeval,bd)\r\n passDur = None # OR '0'\r\n sat = row['Sat.']\r\n elif row['Pass'] != '0': # Multi-Z pass\r\n if row['Pass'] != passDur: # still in same pass\r\n msgType = 'NEW'\r\n str_timeval = getPassTime(inFileName,row['Pass'],\r\n str(ptt),\r\n row['Msg Date'][:10])\r\n timevalue = format_date(str_timeval,bd)\r\n passDur = row['Pass']\r\n sat = row['Sat.']\r\n if msgType == 'SAME': #Append: to Transmit\r\n if featID:\r\n transmitID, last_msg = addTransmit(featID, row, bd)\r\n\r\n if msgType == 'NEW': # Append: to Argos & Transmit\r\n featID = addArgos(row, tag_id, animal_id, timevalue, gt, bd)\r\n msgType = 'SAME'\r\n if featID:\r\n print 'Pass at: [{0}] added for {1}'.format(str_timeval, ptt)\r\n newPasses.append(featID)\r\n\r\n return newPasses",
"def validate(self):\n dict_of_examples = {_.audio_file.hash(): _ for _ in self.exemplars}\n self.exemplars = [dict_of_examples[_] for _ in set(dict_of_examples)]\n return sum(_.validate() for _ in self.exemplars)",
"def validate_file_contents(cube, metadata):\n _check_start_end_times(cube, metadata)\n _check_contiguity(cube, metadata)\n _check_data_point(cube, metadata)",
"def validate_tariff(self):\n\t\treturn True",
"def valid_ogfile(listname):\r\n global original_file, directory_og\r\n listname = listname.split(\".\")[0] #get rid of adapting or notadapting\r\n try:\r\n original_file = open(directory_og+listname+\".csv\", \"r\")\r\n return True\r\n except:\r\n return False",
"def clean_tags(f: mutagen.FileType) -> mutagen.FileType:\n _awful_tags = (\n \"comment\",\n \"genre\",\n \"isrc\",\n \"upc\",\n \"barcode\",\n \"organization\",\n \"copyright\",\n \"bpm\",\n \"length\",\n \"website\",\n \"www\"\n )\n _awful_categories = (\n \"musicbrainz_.*\",\n \"replaygain_.*\",\n \"catalog.*\",\n \"beatport.*\",\n \".*label.*\",\n \"encod.*\",\n \".*key.*\",\n \"itunes.*\"\n )\n for key in f.keys():\n if key.lower() in _awful_tags or any(re.search(p, key.lower())\n is not None\n for p in _awful_categories):\n f.pop(key)\n # fix date tag\n if len(f.get(\"date\", [\"0000\"])[0]) > 4:\n date = f.pop(\"date\")\n date[0] = date[0][:4]\n f[\"date\"] = date\n # fix title\n if \"title\" in f:\n title = f.pop(\"title\")[0]\n # remove \"original mix\"\n title = re.sub(\"\\s*(-\\s*|\\()[Oo]riginal( [Mm]ix)?\\)?\\s*$\", \"\", title)\n # split out featured artist\n tmp_featured = re.split(\"\\s*\\(feat(\\.|uring)?\\s*\", title)\n if len(tmp_featured) == 2:\n title = tmp_featured[0]\n featured = re.sub(\"\\s*\\)\\s*$\", \"\", tmp_featured[1])\n if \"performer\" in f:\n f[\"performer\"].append(featured)\n else:\n f[\"performer\"] = [featured]\n f[\"title\"] = [title]\n return f",
"def validate(self, attrs):\n tag_name = attrs['tag_name']\n club = attrs['club']\n request = self.context['request']\n profile = UserProfile.objects.get(user=request.user)\n if (club not in profile.get_club_privileges() and\n club not in profile.get_workshop_privileges().values_list('club', flat=True)):\n raise serializers.ValidationError(\"You are not allowed to create tag for this club\")\n if Tag.objects.filter(tag_name=tag_name, club=club):\n raise serializers.ValidationError(\"The tag already exists for this club\")\n return attrs",
"def validate(file_in) :\n\tname = str(file_in.name)\n\tif name[-4:] != \".xml\" and name[-4:] != \".XML\" :\n\t\treturn False\n\txsd = open('wcdb/WorldCrises.xsd.xml', 'r')\n\txmlFile = open('wcdb/temp.xml', 'w')\n\txmlFile.write(file_in.read())\n\txmlFile = open('wcdb/temp.xml', 'r')\n\ttry:\n\t\tpsvi = pyxsval.parseAndValidate(\"wcdb/temp.xml\",\n\t\t\t\"wcdb/WorldCrises.xsd.xml\", xmlIfClass=pyxsval.XMLIF_ELEMENTTREE)\n\t\ttree = psvi.getTree()\n\texcept pyxsval.XsvalError, e:\n\t\treturn 'Validation aborted. ' + str(e)\n\texcept GenXmlIfError, e:\n\t\treturn 'Parsing aborted. ' + str(e)\n\texcept Exception as e:\n\t\t# catch all\n\t\treturn 'Exception. ' + str(e)\n\t#handle invalid case\n\treturn tree",
"def is_unique_file_valid_in_set(self, pack_path, ignored_errors=None):\n self.ignored_errors = ignored_errors\n is_valid = True\n error = None\n if self.is_circle:\n click.echo(f\"id set validations for: {pack_path}\")\n\n is_valid, error = self._is_pack_display_name_already_exist(\n get_pack_metadata_data(f'{pack_path}/pack_metadata.json', False))\n\n return is_valid, error",
"def validate_pot(self, attrs, source):\n value = attrs[source]\n if value:\n try:\n template_file = StringIO()\n template_file.write(value.encode('UTF8'))\n template_file.seek(0)\n # Seems the validation from read_po is too much minimalistic\n # This does not really valid if the content is a real POT content\n self.uploaded_pot_file = read_po(template_file, ignore_obsolete=True)\n except:\n raise serializers.ValidationError(\"Your file does not seem to be a valid POT file\")\n return attrs",
"def citation_validation(self):\n for file in filter(lambda x: x.startswith('citation'), listdir(bs_directory)):\n print \"Processing\", file\n bs = BeautifulSoup(open(file), 'xml')\n # find ground_truth file\n ground_truth = BeautifulSoup(open(self.ground_truth_directory + file.split(\".\")[0] + '.xml'), 'xml')\n self.__citation_correction(ground_truth, bs)\n file = open(self.citation_output + file, \"wb\")\n file.write(bs.prettify().encode('utf-8'))",
"def validate(self):\n with open(os.path.join(settings.MEDIA_ROOT, self.file.name)) as file:\n lines = file.readlines()\n validators = ['os.', 'from os', 'io.', 'from io', 'open(', 'system(']\n for line in lines:\n for validator in validators:\n if validator in line:\n return False\n return True",
"def test_warn_duplicate_label(self, caplog: pytest.LogCaptureFixture) -> None:\n with tempfile.NamedTemporaryFile(\"w\") as file:\n with open(self.EXAMPLE_YAML_FILE, \"r\", encoding=\"utf-8\") as existing:\n file.writelines(existing.readlines())\n with open(self.EXAMPLE_YAML_FILE, \"r\", encoding=\"utf-8\") as existing:\n file.writelines(existing.readlines())\n file.flush()\n _ = YAMLParser().parse(file.name)\n assert (\n \"cobib.parsers.yaml\",\n 30,\n \"An entry with label 'Cao_2019' was already encountered earlier on in the YAML file! \"\n \"Please check the file manually as this cannot be resolved automatically by coBib.\",\n ) in caplog.record_tuples",
"def validation(nameFile, fileContent):\n\n\n dayNameFile = nameFile[-5:-4]\n monthNameFile = nameFile[-8:-6]\n yearNameFile = nameFile[-13:-9]\n hourNameFile = nameFile[-19:-14]\n hourNameFile = hourNameFile.replace(\"h\", \"\")\n \n\n if nameFile[0:6] == \"drones\":\n scopeNameFile = nameFile[0:6]\n elif nameFile[0:7] == \"parcels\":\n scopeNameFile = nameFile[0:7]\n\n headerFileContent = fileContent[constants.header]\n dateFile = headerFileContent[constants.headerTime]\n dayFile = dateFile[0:1]\n monthFile = dateFile[2:4]\n yearFile = dateFile[5:9]\n hourFile = headerFileContent[1]\n hourFile = hourFile.replace(\"h\", \"\")\n scopeFile = headerFileContent[constants.scope]\n\n\n return hourNameFile == hourFile and dayNameFile == dayFile and monthNameFile == monthFile and yearNameFile == yearFile and scopeNameFile == scopeFile",
"def check_format_of_annotation_in_file(self):\n if not self.is_span_valid():\n sys.exit()",
"def test_clean_tags_with_valid_tags(self):\n Tag.objects.create(name='these')\n Tag.objects.create(name='are')\n Tag.objects.create(name='valid')\n Tag.objects.create(name='tags')\n form = forms.GroupForm(\n {\n 'tags': 'these,are, valid, tags',\n 'category': self.category.pk\n })\n self.assertTrue(form.is_valid())",
"def clean(self):\n super().clean()\n cd = self.cleaned_data\n ack = cd.get('acknowledgement_file')\n filename = ack.name\n if not (len(filename) in [18, 19] and filename[-4:].upper() == '.V21'):\n raise ValidationError('Wrong file name format.')\n self.cleaned_data['filename'] = filename\n content = ack.file.read().decode('latin1')\n match = re.match(self.RE_HDR, content)\n if not match:\n raise ValidationError('Incorrect CWR header')\n code, name, date1, date2 = match.groups()\n self.cleaned_data['society_code'] = code.strip().lstrip('0')\n self.cleaned_data['society_name'] = name.strip()\n self.cleaned_data['date'] = datetime.strptime(\n max([date1, date2]), '%Y%m%d').date()\n self.cleaned_data['acknowledgement_file'] = content",
"def make_tag_data_raw_fast(mdp,filename):\n #\n fin = open(filename,'r')\n iter = 0\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"comb_path\":\n update_params(mdp,lsp)\n if not mdp.flag_out_open: ## -- try to open output file\n try:\n if mdp.flag_overwrite == \"True\": ## check string value!\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ## -- try open output file\n for file in glob.glob(mdp.input_path):\n # get sign which corrects for boundary condition\n tvals = file.split('/')[-1].split('_')[3].split('t')\n try:\n ## flip sign if requested\n bcsign = ((int(tvals[1])+int(tvals[2])) != (int(tvals[1])+int(tvals[2])) % mdp.corr_len)\n except IndexError:\n ## 2-point function\n bcsign = False\n try:\n # open correlator file\n mdp.corr_file = open(file,'r')\n except IOError:\n print \"Could not open file \",file\n continue\n ## -- get tag\n ## baryons:\n #mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_r'+file.split('/')[-1].split('_')[4][-1]\n ## with time source tag\n #mdp.tag = file.split('/')[-1].split('_')[3][:3]\\\n # +'_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n # +file.split('/')[-1].split('_')[4][3:]\n ## no time source tag\n mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n +file.split('/')[-1].split('_')[4][3:]\n #print file,',',mdp.tag\n iter+=1\n ##endif ! flag_out_open\n\n #save_data_fast(mdp)\n save_data_fast_bc(mdp,bcsign)\n mdp.corr_file.close()\n if iter%400 == 0:\n print \"file\",iter\n max_iter = None\n if not(max_iter is None) and iter==max_iter:\n print \"reached max file iterations, ending loop...\"\n break\n ## end comb_path\n pass\n\n elif lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data_fast(mdp)\n mdp.corr_file.close()\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return",
"def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))",
"def test_remove_file_group_regex(self):\n with copy_of_directory(assets.path_to('SBB0000F29300010000/data')) as tempdir:\n mets = OcrdMets(filename=join(tempdir, 'mets.xml'))\n self.assertEqual(len(mets.file_groups), 17)\n self.assertEqual(len(mets.find_all_files()), 35)\n mets.remove_file_group('//OCR-D-GT-.*', recursive=True)\n self.assertEqual(len(mets.file_groups), 15)\n self.assertEqual(len(mets.find_all_files()), 31)"
]
| [
"0.5598545",
"0.5493406",
"0.5423309",
"0.5420384",
"0.5390964",
"0.5359352",
"0.5359352",
"0.5346071",
"0.5342391",
"0.5317892",
"0.5314947",
"0.5304241",
"0.5280131",
"0.5253318",
"0.5252958",
"0.5247392",
"0.52365345",
"0.52277553",
"0.52041465",
"0.5203192",
"0.5190546",
"0.51576823",
"0.5148625",
"0.51480937",
"0.51461256",
"0.5132415",
"0.5130352",
"0.51261026",
"0.51250046",
"0.51226974"
]
| 0.7111754 | 0 |
Determine first number that generates a hash with the given prefix. | def md5_with_prefix(input, prefix, start_with=0):
md5_input = hashlib.md5(input)
for number in itertools.count(start_with):
md5 = md5_input.copy()
md5.update(str(number).encode('ascii'))
if md5.hexdigest().startswith(prefix):
return number | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fn(k):\n seen = set()\n for i in range(len(s)-k+1): \n val = (prefix[i+k] - prefix[i]*fac[k]) % MOD \n if val in seen: return True # rolling hash (ver. Monte Carlo)\n seen.add(val)\n return False",
"def find_prefixsum_index(self, prefixsum):\n assert 0 <= prefixsum <= self.sum() + 1e-5\n index = 1\n while index < self.capacity: # while non-leaf\n if self.value[2*index] > prefixsum:\n index = 2*index\n else:\n prefixsum -= self.value[2*index]\n index = 2*index + 1\n return index - self.capacity",
"def find_prefixsum_idx(self, prefixsum):\n assert 0 <= prefixsum <= self.sum() + 1e-5\n idx = 1\n while idx < self._capacity: # while non-leaf\n if self._value[2 * idx] > prefixsum:\n idx = 2 * idx\n else:\n prefixsum -= self._value[2 * idx]\n idx = 2 * idx + 1\n return idx - self._capacity",
"def find_prefixsum_idx(self, prefixsum):\n assert 0 <= prefixsum <= self.sum() + 1e-5\n idx = 1\n while idx < self._capacity: # while non-leaf\n if self._value[2 * idx] > prefixsum:\n idx = 2 * idx\n else:\n prefixsum -= self._value[2 * idx]\n idx = 2 * idx + 1\n return idx - self._capacity",
"def find_prefixsum_idx(self, prefixsum):\n assert 0 <= prefixsum <= self.sum() + 1e-5\n idx = 1\n while idx < self._capacity: # while non-leaf\n if self._value[2 * idx] > prefixsum:\n idx = 2 * idx\n else:\n prefixsum -= self._value[2 * idx]\n idx = 2 * idx + 1\n return idx - self._capacity",
"def find_prefixsum_idx(self, prefixsum):\n assert 0 <= prefixsum <= self.sum() + 1e-5\n idx = 1\n while idx < self._capacity: # while non-leaf\n if self._value[2 * idx] > prefixsum:\n idx = 2 * idx\n else:\n prefixsum -= self._value[2 * idx]\n idx = 2 * idx + 1\n return idx - self._capacity",
"def perfect_hash(num):\n return ((num+OFFSET)*(SIZE/PERIOD)) % (SIZE+1) + 1",
"def day_04_b() -> int:\n return get_min_hash(\"bgvyzdsv\", \"000000\")",
"def first_uri_matching_prefix(xia, prefix):\n\n if xia is not None:\n for uri in xia:\n if uri.startswith(prefix):\n return uri\n return None",
"def day_04_a() -> int:\n return get_min_hash(\"bgvyzdsv\", \"00000\")",
"def compute_hash(self, key: int):\n return key % 42",
"def search(self, L: int, a: int, modulus: int, n: int, nums: List[int]) -> str:\n # compute the hash of string S[:L]\n h = 0\n for i in range(L):\n h = (h * a + nums[i]) % modulus\n \n # already seen hashes of strings of length L\n seen = {h} \n # const value to be used often : a**L % modulus\n aL = pow(a, L, modulus) \n for start in range(1, n - L + 1):\n # compute rolling hash in O(1) time\n h = (h * a - nums[start - 1] * aL + nums[start + L - 1]) % modulus\n if h in seen:\n return start\n seen.add(h)\n return -1",
"def hash(x) -> int:\n pass",
"def _get_prefix(self):\r\n return _sha512('health'.encode('utf-8'))[0:6]",
"def _prefix_fun(prefix_str: str) -> List[int]:\n pi = [0] * len(prefix_str)\n i, j = 1, 0\n while i < len(prefix_str):\n if prefix_str[i] == prefix_str[j]:\n pi[i] = j + 1\n i += 1\n j += 1\n\n else:\n if j == 0:\n i += 1\n else:\n j = pi[j-1]\n return pi",
"def kmp_algo(inp_string: str, substr: str) -> Optional[int]:\n\n pi = _prefix_fun(substr)\n i, j = 0, 0\n while i <= len(inp_string)-len(substr):\n if inp_string[i] == substr[j]:\n first_occurrence = i\n while j < len(substr):\n if inp_string[i] != substr[j]:\n j = pi[j-1]\n break\n i += 1\n j += 1\n else:\n return first_occurrence\n else:\n i += 1\n return None",
"def get_hash_value(table, prime, multiplier, start, length):\n y = pow(multiplier, length, prime)\n hash_value = (table[start+length] - y*table[start]) % prime\n return hash_value",
"def version_get(self, string, prefix):\n\n regex = r\"[/_.]{}\\d+\".format(prefix)\n matches = re.findall(regex, string, re.IGNORECASE)\n\n if not len(matches):\n msg = \"No '_{}#' found in '{}'\".format(prefix, string)\n raise ValueError(msg)\n return matches[-1:][0][1], re.search(r\"\\d+\", matches[-1:][0]).group()",
"def extractPubKeyHash(script):\n # A pay-to-pubkey-hash script is of the form:\n # OP_DUP OP_HASH160 <20-byte hash> OP_EQUALVERIFY OP_CHECKSIG\n if (\n len(script) == 25\n and script[0] == opcode.OP_DUP\n and script[1] == opcode.OP_HASH160\n and script[2] == opcode.OP_DATA_20\n and script[23] == opcode.OP_EQUALVERIFY\n and script[24] == opcode.OP_CHECKSIG\n ):\n\n return script[3:23]\n return None",
"def get_cache_key(prefix):\n return '%s' % (prefix)",
"def h_python(key, N):\n return hash(key) % N",
"def caller_hash(depth:int=1, prefix:str='#') -> str:\n caller = getframeinfo(stack()[depth+1][0])\n str = f\"{caller.filename}/{caller.lineno}\"\n _hash = hash(str)\n _hash += sys.maxsize + 1\n return prefix + hex(_hash)[2:]",
"def get_hash(hash_function, x: str):\n hash_function.update(x.encode())\n return int.from_bytes(hash_function.digest(), byteorder=\"big\")",
"def get_new_id(generator: str, seen_ids: set, prefix=''):\n while True:\n i = abs(hash(generator))\n id = prefix + str(i)\n if id not in seen_ids:\n break\n # while id in seen:\n # i += 1\n # id = prefix + str(i)\n return id",
"def resolve_hash(cls, name, registry, hash_prefix):\n validate_package_name(name)\n return get_package_registry(registry).resolve_top_hash(name, hash_prefix)",
"def _hash_function(self, key):\n h = 0\n a = 31\n table_size = self.size\n for i in range(len(key)):\n h = (h * a + ord(key[i])) % table_size\n return h",
"def _gethash(self, invalue) -> int:\n return hash(invalue) % self.capacity",
"def hash_function_2(key: str) -> int:\n hash, index = 0, 0\n index = 0\n for letter in key:\n hash += (index + 1) * ord(letter)\n index += 1\n return hash",
"def prehash(key):\n\n return hash(key)",
"def hash_function(self, x):\n if not x:\n return -1\n hashed_value = 0\n\n for char in x:\n hashed_value = 181 * hashed_value + ord(char)\n\n return hashed_value % self.capacity"
]
| [
"0.63537276",
"0.62036",
"0.61238146",
"0.61238146",
"0.61238146",
"0.61238146",
"0.6032708",
"0.59841865",
"0.5956391",
"0.58020586",
"0.5776798",
"0.5745924",
"0.571987",
"0.57072127",
"0.56676674",
"0.5663591",
"0.5651302",
"0.5642183",
"0.56292266",
"0.56020457",
"0.55982804",
"0.5578035",
"0.55645335",
"0.55561423",
"0.55121756",
"0.5501401",
"0.54948765",
"0.5467801",
"0.5463081",
"0.54594344"
]
| 0.6745519 | 0 |
List all platform groups | def get(self):
return self.query(PlatformGroup) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_groups():\n return jsonify(admin.get_all_groups(current_app.scoped_session()))",
"def list_groups(self):\n return self.get_admin(\"groups\")",
"def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))",
"def list_groups(self):\n return self._get(\"cloudConnectorGroups\").list",
"def Platforms():\n return platforms",
"def list_platforms(self):\n return self.do_rpc('list_platforms')",
"def list_groups(args):\n\n for group in get_groups(args):\n print(group)",
"def list_groups(request):\n groups = models.UserGroup.all().order('name')\n return utility.respond(request, 'admin/list_groups', {'groups': groups})",
"def list(request):\n return render_to_response('rteacher/manage_groups_list.html', request, **klist(\n request=request\n ))",
"def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)",
"def product_group_list(obj):\n client = get_client(obj)\n\n res = client.product_group_list()\n\n print(json.dumps(res, indent=4))",
"def list_projects(ctx):\n pprint(ctx.obj.groups.get().data)",
"def groups(self):\r\n return resources.Groups(self)",
"def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)",
"def platforms(self):\n return self.rpc.call(MsfRpcMethod.ModulePlatforms)",
"def get_list_groups(self):\n list_response = requests.get(self.groups_url, headers=self.headers)\n return list_response.json()[\"groups\"]",
"def hostgroup_list(self):\n return self.ezx.get_hostgroup_list()",
"def get_all_groups(self):\n self.cursor.execute(\"select * from groups\")\n self.connection.commit()\n return self.cursor.fetchall()",
"def get(self, platform_group):\n return platform_group",
"def get_all(isamAppliance, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieving groups\", \"/sysaccount/groups/v1\")",
"def getGroups():\r\n return Group.getGroups()",
"def test_get_device_groups(self):\n pass",
"def list_groups(self, **params):\n url = 'groups'\n if params:\n url += '?%s' % urllib.urlencode(params)\n resp, body = self.get(url)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)",
"def available_groups(cls):\n raise NotImplementedError",
"def groups():\n access_token = session['access_token']\n return \"%s\" % list_groups(access_token)",
"def api_groups(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_groups\")",
"def api_groups(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_groups\")",
"def api_groups(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_groups\")",
"def api_groups(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_groups\")",
"def get_cli_groups():\n\n return get_component(CLIPackage.COMPONENT_NAME).get_cli_groups()"
]
| [
"0.68230575",
"0.6745456",
"0.67188114",
"0.67183566",
"0.66820043",
"0.6664303",
"0.655608",
"0.65517473",
"0.6504185",
"0.64655936",
"0.6463842",
"0.6445459",
"0.6440731",
"0.6399825",
"0.63977414",
"0.6334661",
"0.63319683",
"0.63063276",
"0.6291134",
"0.62861246",
"0.6271167",
"0.62570053",
"0.62093735",
"0.6182559",
"0.61628914",
"0.6150475",
"0.6150475",
"0.6150475",
"0.6150475",
"0.6114113"
]
| 0.72675157 | 0 |
Create a new platform group | def post(self):
args = platform_group_arguments.parse_args()
platform_group = PlatformGroup(**args)
self.session.add(platform_group)
self.session.commit()
return platform_group | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_group():\n groupname = request.get_json().get(\"name\")\n description = request.get_json().get(\"description\")\n grp = admin.create_group(current_app.scoped_session(), groupname, description)\n if grp:\n response = admin.get_group_info(current_app.scoped_session(), groupname)\n else:\n response = {\"result\": \"group creation failed\"}\n response = jsonify(response)\n return response",
"def test_create_group(self):\n pass",
"def test_create_group(self):\n pass",
"def test_create_device_group(self):\n pass",
"def createGroup(self, *group):\n if not self.rank:\n logging.info('Creating atom group {}'.format(group))\n\n if not len(group):\n for idSS in self.pargs['idSS']:\n self.lmp.command('group group{} type {}'.format(idSS, idSS))\n else:\n self.lmp.command('group ' + ('{} ' * len(group)).format(*group))",
"def test_create_resource_group(self):\n pass",
"def product_group_create(obj, name, department):\n client = get_client(obj)\n\n with Action('Creating product_group: {}'.format(name), nl=True):\n pg = client.product_group_create(name, department)\n\n print(json.dumps(pg, indent=4))",
"def test_create_group(self):\n groupid = 'villains'\n\n # create the group\n resp = self.app.post('/groups', data=json.dumps({'name':groupid}))\n assert resp.status_code == 200\n\n # Fetch the group to check that it persists\n resp = self.app.get('/groups/{}'.format(groupid))\n assert resp.status_code == 200",
"def create_group(group_id, group_name):\n\n kwargs = config.DEFAULT_REST_KWARGS\n kwargs[\"data\"] = {\"id\": group_id, \"name\": group_name}\n http_response = call_rest_api(\"/identities/groups/\", \"post\", **kwargs)\n if http_response.status_code != 201: # 201 = 'new group created'\n raise ValueError(http_response.text)\n logger.log(f\"New custom group, {group_name}, with ID: {group_id}, was created successfully.\")",
"def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )",
"def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)",
"def create_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n required_properties = {\n 'displayName': str(args.get('display_name')),\n 'mailNickname': str(args.get('mail_nickname')),\n 'mailEnabled': args.get('mail_enabled') == 'true',\n 'securityEnabled': args.get('security_enabled')\n }\n\n # create the group\n group = client.create_group(required_properties)\n\n # display the new group and it's properties\n group_readable, group_outputs = parse_outputs(group)\n human_readable = tableToMarkdown(name=f\"{required_properties['displayName']} was created successfully:\",\n t=group_readable,\n headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',\n 'Security Enabled', 'Mail Enabled'],\n removeNull=True)\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_outputs}\n return human_readable, entry_context, group",
"def create():\n name = request.json['name']\n level = request.json['level']\n manager = request.json['manager']\n if models.user.Group.get(name):\n raise Conflict('Group already exists.', creation=False)\n else:\n authorize(manager, level=level)\n group = models.user.Group(name=name, level=level, manager=manager)\n models.db.session.add(group)\n models.db.session.commit()\n return response(200, creation=True)",
"def handle(self, *args, **options):\n new_group, created = Group.objects.get_or_create(name=options.get('group_name')) \n self.stdout.write(f\"Group {options.get('group_name')} created\")",
"def test_create_device_group1(self):\n pass",
"def _make_group(self, _rk, _group_hint):\n\n if isinstance(_group_hint, dict):\n # _group_hint is a single key/value pair\n g = _group_hint[list(_group_hint)[0]]\n\n r_type = g.get(\"type\", \"none\")\n if r_type != \"OS::Nova::ServerGroup\":\n return \"support only ServerGroup resource\"\n\n properties = g.get(\"properties\", {})\n if len(properties) == 0:\n return \"no properties\"\n\n group_name = properties.get(\"name\", None)\n if group_name is None:\n return \"no group name\"\n group_name = group_name.strip()\n\n policies = properties.get(\"policies\", [])\n if len(policies) == 0:\n return \"no policy of the group\"\n\n if len(policies) > 1:\n return \"multiple policies\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if group_name in self.groups.keys():\n group = self.groups[group_name]\n else:\n group = Group(group_name)\n\n policy = policies[0].strip()\n if policy == \"anti-affinity\":\n group_type = \"diversity\"\n else:\n group_type = policy\n\n group.group_type = group_type\n group.factory = \"server-group\"\n group.level = \"host\"\n\n self.groups[group_name] = group\n else:\n # group hint is uuid string.\n rg = self.resource.get_group_by_uuid(_group_hint)\n if rg is None:\n return \"unknown group found while making group\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if rg.name in self.groups.keys():\n group = self.groups[rg.name]\n else:\n group = Group(rg.name)\n\n group.group_type = rg.group_type\n group.factory = rg.factory\n group.level = \"host\"\n\n self.groups[rg.name] = group\n\n if group is not None:\n group.server_list.append(self.app_name + \":\" + _rk)\n\n return \"ok\"",
"def test_createGroup(self):\n\t\tself.client.force_authenticate(user=User.objects.get(id=1))\n\t\turl = \"/groups/\"\n\t\tdata = {\n\t\t\t'name' : 'testGroup3',\n\t\t\t'description' : 'This is another test group that just created.',\n\t\t\t'isPublic' : True\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data[\"id\"], 3)\n\t\tself.assertEqual(response.data[\"name\"], 'testGroup3')",
"def create_group(self, group_name, group_type):\n grp_data = {\"name\": group_name, \"type\": group_type}\n return requests.post(self.groups_url, data=json.dumps(grp_data),\n headers=self.headers)",
"def createGroup(self):\n return _libsbml.GroupsModelPlugin_createGroup(self)",
"def create_groups(**kwargs):\n for gname in SEC_GROUP_NAMES.itervalues():\n Group.objects.get_or_create(name=gname)",
"def security_group_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_security_group(**kwargs)",
"def create_group(self, groupname):\n data = {\"groupname\": groupname}\n headers = {\"user-agent\": self.u_agent}\n req_url = self.normalize_admin_url(\"groups\")\n res = requests.post(\n req_url,\n headers=headers,\n auth=self.auth,\n data=json.dumps(data),\n verify=False,\n )\n if res.status_code == 201:\n return Response(0, u\"Group {} has been created\".format(groupname))\n else:\n return Response(res.status_code, res)",
"def _create_child_group(self, name) -> \"GroupBase\":\n pass",
"def __create_new_group(self, group_name) -> None:\n group = Group(name=group_name)\n group.save()\n\n self.__add_permission_to_group(group)",
"def test_create_team_user_group(client):\n group = client.create_team_user_group(TEAM_ID, {\n \"name\": \"Python group\",\n \"is_reviewer\": True,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == NEW_GROUP_ID\n assert group.name == \"Python group\"\n assert group.permissions['is_admin']\n assert group.permissions['is_reviewer']\n assert group.permissions['admin_rights'] == [\"upload\"]",
"def create_group(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/Create/\"))",
"def test_create_group_409(self):\n request = {\n 'name': self.test_group1_groupid\n }\n # First create a group indirectly by making a user with a group\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now create a group that is already there\n resp = self.app.post('/groups', data=json.dumps(request))\n assert resp.status_code == 409",
"def create_group(self, properties: dict[str, Any | None]) -> dict:\n group = self.ms_client.http_request(method='POST', url_suffix='groups', json_data=properties)\n return group",
"def create(self, context=None):\n values = self.obj_get_changes()\n db_nodegroup = self.dbapi.create_nodegroup(values)\n self._from_db_object(self, db_nodegroup)",
"async def create_group(ctx, name: str, role: str, group_type: str=None, comp: str=None, rating: int=None, time: str=None):\n\n owner = ctx.message.author.name\n \n if comp:\n comp = [int(i) for i in comp.split()] # convert string input to array\n\n new_group = Group(owner, name, role, group_type, rating, time, comp)\n bg_bot.manager.add_group(owner, new_group)\n \n await ctx.send(f'Created new {group_type} group for leader {owner}!')"
]
| [
"0.70539564",
"0.7010585",
"0.7010585",
"0.69507515",
"0.68371516",
"0.68168336",
"0.67621464",
"0.66609395",
"0.65966123",
"0.6564437",
"0.65399975",
"0.65343636",
"0.6531129",
"0.6464494",
"0.64521194",
"0.644507",
"0.64419746",
"0.6399733",
"0.6391645",
"0.63801545",
"0.637419",
"0.63716495",
"0.6356402",
"0.63379294",
"0.6313578",
"0.63068086",
"0.6286897",
"0.6272791",
"0.6267784",
"0.6266055"
]
| 0.7955074 | 0 |
Loads session object from options in a configuration file section. The session_kwargs will be passed directly to keystoneauth1 Session and will override the values loaded from config. Consult keystoneauth1 docs for available options. | def get_session(group, **session_kwargs):
return ks_loading.load_session_from_conf_options(
CONF, group, **session_kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initialize(cls, filename: str):\n if not os.path.exists(filename):\n raise FileNotFoundError(f\"Указанный файл '{filename}' конфигурации сессии не найден\")\n\n # Read the config file\n session_config = SessionConfig()\n session_config._config_filename = filename\n session_config._parser = configparser.ConfigParser()\n session_config._parser.read(filename, encoding='utf-8')\n\n if not session_config._parser.has_section(constants.CLIENT_CREDENTIALS_SECTION):\n raise configparser.Error(f\"Конфигурационный файл сессии не содержит секции \"\n f\"{constants.CLIENT_CREDENTIALS_SECTION}, пожалуйста, добавьте её\")\n\n # Session strings may be omitted. Therefore, creates its section\n if not session_config._parser.has_section(constants.SESSION_STRINGS_SECTION):\n session_config._parser.add_section(constants.SESSION_STRINGS_SECTION)\n\n return session_config",
"def __init__(self, **kwargs):\r\n self._kwargs = kwargs\r\n\r\n if 'uri' in self._kwargs:\r\n self.session = get_session(self._kwargs['uri'], mode='session')\r\n else:\r\n # open a database session\r\n self.session = get_session(uri=None, mode='session', **{k: v for k, v in self._kwargs.items() if k in ('db_name', 'data_path')})",
"def session_setup(opts: Dict[Any, Any]) -> Any: #TODO\n stype = ''\n if 'serverca' in opts and 'cert' in opts:\n stype = 'ssl'\n s = session.get(stype, **opts)\n if s is None:\n raise errors.KojiError('Unable to idenify authentication type.')\n s.login()\n if not s.is_ok():\n raise errors.AuthError('Unable to validate session')\n return s",
"def get_session(*args, **kwargs):\n settings = _get_connection_settings(*args, **kwargs)\n return Session(settings)",
"def create_session():\n with open(CONFIG_PATH) as config_file:\n config_json = json.load(config_file)\n return boto3.Session(\n aws_access_key_id=config_json['awsAccessKeyId'],\n aws_secret_access_key= config_json['awsSecretAccessKey'],\n region_name=config_json['awsRegionName']\n )",
"def __init__(self, *kwargs):\n self.session = requests.Session()\n self.config_path = os.path.join(\n os.path.dirname(__file__), 'config.json')\n self.load_config()\n if self.application_token == '':\n self.set_application_token()\n self.token = self.get_token()\n self.get_settings()",
"def __init__(self, server=None, auth=None, options=None):\n options = {} if options is None else options\n\n if server is not None:\n options['server'] = server\n\n merged = copy.deepcopy(self.DEFAULT_OPTIONS)\n merged.update(options)\n\n self._session = Session(auth, merged)",
"def get_session_factory(self, options):",
"def get_session(self):\n if self.session is None:\n # loader = loading.get_plugin_loader('password')\n # auth = loader.load_from_options(**self.auth_kwargs)\n auth = v3.Password(**self.auth_kwargs)\n self.session = session.Session(auth=auth)\n return self.session",
"def loadSession():\n metadata = BaseClass.metadata\n Session = sessionmaker(bind=engine)\n session = Session()\n return session",
"def _init_session(session):\n if session is None:\n session = requests.Session()\n return session",
"def __init__(self,sessionpath=None,sessionlifetime=None,config=None):\n if sessionlifetime is not None:\n self.sessionlifetime = sessionlifetime\n\n if sessionpath is None:\n self.sessionpath = Config.sessionpath\n else:\n self.sessionpath = sessionpath\n\n # legt Sessionverzeichnis an, wenn nicht vorhanden\n if not os.path.exists(self.sessionpath):\n os.makedirs(self.sessionpath)\n \n\n self.cookie = Cookie.SmartCookie()\n self.loadCookie()\n self.newSession()\n\n # Attribute Laden wenn Cookie 'sid' gesetzt\n if self.cookie is not None:\n self.loadAttributes()",
"def configure(self, session):\n\n raise NotImplementedError",
"def get_session(\n user_agent: Optional[str] = None,\n user_agent_config_yaml: Optional[str] = None,\n user_agent_lookup: Optional[str] = None,\n use_env: bool = True,\n fail_on_missing_file: bool = True,\n verify: bool = True,\n **kwargs: Any,\n) -> requests.Session:\n s = requests.Session()\n s.verify = verify\n ua = kwargs.get(\"full_agent\")\n if not ua:\n ua = UserAgent.get(\n user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs\n )\n s.headers[\"User-Agent\"] = ua\n\n auths_found = []\n headers = kwargs.get(\"headers\")\n if headers is not None:\n s.headers.update(headers)\n if \"Authorization\" in headers:\n auths_found.append(\"headers\")\n\n extra_params_found = False\n extra_params_dict = None\n basic_auth = None\n if use_env:\n basic_auth_env = os.getenv(\"BASIC_AUTH\")\n if basic_auth_env:\n basic_auth = basic_auth_env\n auths_found.append(\"basic_auth environment variable\")\n extra_params = os.getenv(\"EXTRA_PARAMS\")\n if extra_params:\n if \"=\" in extra_params:\n extra_params_dict = {}\n logger.info(\n \"Loading extra parameters from environment variable\"\n )\n for extra_param in extra_params.split(\",\"):\n key, value = extra_param.split(\"=\")\n extra_params_dict[key] = value\n extra_params_found = True\n if not extra_params_found:\n # only do this if extra params env vars not supplied\n extra_params_dict = kwargs.get(\"extra_params_dict\")\n if extra_params_dict:\n extra_params_found = True\n logger.info(\"Loading extra parameters from dictionary\")\n\n extra_params_json = kwargs.get(\"extra_params_json\", \"\")\n if extra_params_json:\n if extra_params_found:\n raise SessionError(\n \"More than one set of extra parameters given!\"\n )\n extra_params_found = True\n logger.info(f\"Loading extra parameters from: {extra_params_json}\")\n try:\n extra_params_dict = load_json(extra_params_json)\n except OSError:\n if fail_on_missing_file:\n raise\n extra_params_yaml = kwargs.get(\"extra_params_yaml\", \"\")\n if extra_params_yaml:\n if extra_params_found:\n raise SessionError(\n \"More than one set of extra parameters given!\"\n )\n logger.info(f\"Loading extra parameters from: {extra_params_yaml}\")\n try:\n extra_params_dict = load_yaml(extra_params_yaml)\n except OSError:\n if fail_on_missing_file:\n raise\n extra_params_lookup = kwargs.get(\"extra_params_lookup\")\n if extra_params_lookup and extra_params_dict:\n extra_params_dict = extra_params_dict.get(extra_params_lookup)\n if extra_params_dict is None:\n raise SessionError(\n f\"{extra_params_lookup} does not exist in extra_params!\"\n )\n if extra_params_dict:\n basic_auth_param = extra_params_dict.get(\"basic_auth\")\n if basic_auth_param:\n basic_auth = basic_auth_param\n auths_found.append(\"basic_auth parameter\")\n del extra_params_dict[\"basic_auth\"]\n\n s.params = extra_params_dict\n\n basic_auth_arg = kwargs.get(\"basic_auth\")\n if basic_auth_arg:\n basic_auth = basic_auth_arg\n auths_found.append(\"basic_auth argument\")\n\n auth = kwargs.get(\"auth\")\n if auth:\n auths_found.append(\"auth argument\")\n basic_auth_file = kwargs.get(\"basic_auth_file\")\n if basic_auth_file:\n logger.info(f\"Loading basic auth from: {basic_auth_file}\")\n try:\n basic_auth = load_text(basic_auth_file, strip=True)\n auths_found.append(f\"file {basic_auth_file}\")\n except OSError:\n if fail_on_missing_file:\n raise\n if len(auths_found) > 1:\n auths_found_str = \", \".join(auths_found)\n raise SessionError(\n f\"More than one authorisation given! ({auths_found_str})\"\n )\n if \"headers\" not in auths_found:\n if basic_auth:\n auth = basicauth_decode(basic_auth)\n s.auth = auth\n\n status_forcelist = kwargs.get(\n \"status_forcelist\", (429, 500, 502, 503, 504)\n )\n allowed_methods = kwargs.get(\n \"allowed_methods\",\n (\"HEAD\", \"TRACE\", \"GET\", \"PUT\", \"OPTIONS\", \"DELETE\"),\n )\n\n retries = Retry(\n total=5,\n backoff_factor=0.4,\n status_forcelist=status_forcelist,\n allowed_methods=allowed_methods,\n raise_on_redirect=True,\n raise_on_status=True,\n )\n s.mount(\"file://\", FileAdapter())\n s.mount(\n \"http://\",\n HTTPAdapter(\n max_retries=retries, pool_connections=100, pool_maxsize=100\n ),\n )\n s.mount(\n \"https://\",\n HTTPAdapter(\n max_retries=retries, pool_connections=100, pool_maxsize=100\n ),\n )\n return s",
"def load_session(path):\n dct = {}\n with open(path, 'r', encoding='utf-8') as f:\n dct = json.load(f)\n return dict_to_session(dct)",
"def init_session(self):\n ssl_context = ssl.create_default_context(\n purpose=ssl.Purpose.SERVER_AUTH, cafile=None, capath=None,\n cadata=None)\n ssl_settings = {\"ssl_context\": ssl_context}\n self.session = iRODSSession(\n host=self.module.params[\"host\"],\n port=self.module.params[\"port\"],\n user=self.module.params[\"admin_user\"],\n password=self.module.params[\"admin_password\"],\n zone=self.module.params[\"zone\"],\n **ssl_settings)",
"def get_session():\n request_session = requests.Session()\n\n # Try to use what was passed in for username/password...\n username = CMD.username\n password = CMD.password\n \n # ...if there was nothing passed in then try to read it from config file\n if ((username is None or username == \"\") and (password is None or password == \"\")):\n # Try to read username and password from config file, if it exists\n # Otherwise default to DEFAULT_USERNAME/DEFAULT_PASSWORD\n try:\n with open(\"config.json\") as config_file:\n config_data = json.load(config_file)\n if (config_data):\n username = config_data[\"username\"]\n password = config_data[\"password\"]\n except:\n LOG.exception(\"Unable to open \\\"/collector/config.json\\\" file\")\n username = DEFAULT_USERNAME\n password = DEFAULT_PASSWORD\n\n request_session.auth = (username, password)\n request_session.headers = {\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"netapp-client-type\": \"grafana-\" + __version__}\n # Ignore the self-signed certificate issues for https\n request_session.verify = False\n return request_session",
"def _session(self):\n if self.session is None:\n self.session = create_session(self.config, self.auth)\n return self.session",
"def init_session(self):\n pass",
"def init_session(self):\n pass",
"def load_session(session):\n def inner():\n web.ctx.session = session\n return inner",
"def _initialize_session(self):\n session = requests.Session()\n session.auth = (self.login, self.password)\n session.verify = False\n session.headers.update({'Accept': 'application/json'})\n session.headers.update({'Content-type': 'application/json'})\n return session",
"def login_with_config(self):\n username = self.cfg.get('user', 'username')\n password = token = None\n\n try:\n password = self.cfg.get('user', 'password')\n except configparser.NoOptionError:\n pass\n try:\n token = self.cfg.get('user', 'token')\n except configparser.NoOptionError:\n pass\n\n if password is None and token is None:\n raise KattisConfigError(\n \"Your .kattisrc seems to be corrupted. Please download a new one.\")\n\n loginurl = self.get_url(self.cfg, 'loginurl', 'login')\n return self.login(loginurl, username, password, token)",
"def get_session(*args, **kwargs):\n session = requests.session(*args, **kwargs)\n\n return session",
"def get_ga_session(self, *args, **kwargs):\n if not hasattr(self, 'ga_session') or 'force' in kwargs:\n if 'user_id' in kwargs or 'force' in kwargs:\n self.get_utmb(**kwargs)\n try:\n self._utmb.split('.')\n except:\n self.ga_session = Session()\n else:\n self.ga_session = Session().extract_from_utmb(self._utmb)\n return self.ga_session",
"def get_session():\n assert config.AUTH_URL, \"Environment variable OS_AUTH_URL is not defined\"\n\n def _get_session(auth_url=None,\n username=None,\n password=None,\n project_name=None,\n user_domain_name=None,\n project_domain_name=None):\n auth_url = auth_url or config.AUTH_URL\n username = username or config.USERNAME\n password = password or config.PASSWORD\n project_name = project_name or config.PROJECT_NAME\n user_domain_name = user_domain_name or config.USER_DOMAIN_NAME\n project_domain_name = project_domain_name or config.PROJECT_DOMAIN_NAME\n\n if config.KEYSTONE_API_VERSION == 3:\n\n auth = identity.v3.Password(\n auth_url=auth_url,\n username=username,\n user_domain_name=user_domain_name,\n password=password,\n project_name=project_name,\n project_domain_name=project_domain_name)\n\n elif config.KEYSTONE_API_VERSION == 2:\n\n auth = identity.v2.Password(\n auth_url=auth_url,\n username=username,\n password=password,\n tenant_name=project_name)\n\n else:\n raise ValueError(\"Unexpected keystone API version: {}\".format(\n config.KEYSTONE_API_VERSION))\n\n return _session.Session(auth=auth)\n\n return _get_session",
"def _get_session():\n api_version = \"1.0\"\n originator = \"salt_cloud_{}_driver\".format(__virtualname__)\n url = config.get_cloud_config_value(\n \"url\", get_configured_provider(), __opts__, search_global=False\n )\n user = config.get_cloud_config_value(\n \"user\", get_configured_provider(), __opts__, search_global=False\n )\n password = config.get_cloud_config_value(\n \"password\", get_configured_provider(), __opts__, search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n \"ignore_ssl\",\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False,\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n \"url: %s user: %s password: %s, originator: %s\",\n url,\n user,\n \"XXX-pw-redacted-XXX\",\n originator,\n )\n session.xenapi.login_with_password(user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = str(ex.__dict__[\"details\"][1])\n slash_parts = url.split(\"/\")\n new_url = \"/\".join(slash_parts[:2]) + \"/\" + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n \"session is -> url: %s user: %s password: %s, originator:%s\",\n new_url,\n user,\n \"XXX-pw-redacted-XXX\",\n originator,\n )\n session.xenapi.login_with_password(user, password, api_version, originator)\n return session",
"def load_session():\n\n # try:\n filename = request.json.get('path')\n\n # try:\n data_dict = engine.io.load(filename)\n\n # Load everything into python memory\n state.proc = data_dict[\"proc\"]\n state.corpus = data_dict[\"corpus\"]\n corpus_tree = None\n if state.corpus:\n corpus_tree = state.corpus.get_patient_tree()\n\n state.classifier = data_dict[\"classifier\"]\n state.last_result = data_dict[\"last_result\"]\n\n state.test_corpus = data_dict[\"test_corpus\"]\n finished = data_dict[\"finished\"]\n config = data_dict[\"config\"]\n\n test_corpus_tree = None\n if state.test_corpus:\n test_corpus_tree = state.test_corpus.get_patient_tree()\n\n sectionBreakData = state.proc.section_splitter.get_expression()\n sectionNameData = state.proc.feature_combiner.get_all_expressions_as_dict_with_valid()\n\n keywords = state.proc.feature_extractor.get_keyword_expressions()\n expressions = state.proc.feature_extractor.get_feature_expressions()\n\n # Package everything to feed back to the app.jsx\n out = {\"keywords\": keywords, \"expressions\": expressions, \"finished\": finished, \"config\": config, \"corpus\": corpus_tree, \"lastResult\": state.last_result, \"testCorpus\": test_corpus_tree, \"sectionBreakData\": sectionBreakData, \"sectionNameData\": sectionNameData}\n\n return jsonify(out)\n # except:\n # return \"Could not load session\", 428",
"def __init__(self, config):\n self.config = config\n self.__session = None",
"def load_from_session(cls, serializer: URLSafeSerializer, session):\n return cls.load(serializer, session.get(SESSION_STORE_KEY, None))"
]
| [
"0.6370659",
"0.60339874",
"0.5991928",
"0.58116955",
"0.5775069",
"0.57470316",
"0.564764",
"0.56452173",
"0.5629444",
"0.5588062",
"0.55372334",
"0.545806",
"0.5447498",
"0.5445671",
"0.54302424",
"0.5421576",
"0.5374764",
"0.5332886",
"0.53301954",
"0.53301954",
"0.53228736",
"0.53115165",
"0.5254231",
"0.52471423",
"0.5226685",
"0.5203594",
"0.5201231",
"0.5196325",
"0.5191067",
"0.5168364"
]
| 0.6540114 | 0 |
Loads auth plugin from options in a configuration file section. The auth_kwargs will be passed directly to keystoneauth1 auth plugin and will override the values loaded from config. Note that the accepted kwargs will depend on auth plugin type as defined by [group]auth_type option. Consult keystoneauth1 docs for available auth plugins and their options. | def get_auth(group, **auth_kwargs):
try:
auth = ks_loading.load_auth_from_conf_options(CONF, group,
**auth_kwargs)
except ks_exception.MissingRequiredOptions:
LOG.error('Failed to load auth plugin from group %s', group)
raise
return auth | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register_auth_opts(conf, group, service_type=None):\n ks_loading.register_session_conf_options(conf, group)\n ks_loading.register_auth_conf_options(conf, group)\n CONF.set_default('auth_type', default='password', group=group)\n ks_loading.register_adapter_conf_options(conf, group)\n conf.set_default('valid_interfaces', DEFAULT_VALID_INTERFACES, group=group)\n if service_type:\n conf.set_default('service_type', service_type, group=group)\n else:\n types = os_service_types.get_service_types()\n key = 'ironic-inspector' if group == 'inspector' else group\n service_types = types.service_types_by_project.get(key)\n if service_types:\n conf.set_default('service_type', service_types[0], group=group)",
"def load_auth(config_file=None):\n if config_file is None:\n config_file = configure.get_config_path(\"auth\")\n # If finding data automatically, as a fallback, look to see if the auth data is in the main\n # config in the old style.\n if not os.path.exists(config_file):\n main_config_file = configure.get_config_path(\"general\")\n if os.path.exists(main_config_file):\n try:\n return load_auth(main_config_file)\n except RuntimeError:\n # if the main config file does not contain auth data, rewrite the error into a\n # complaint that the auth config file does not exist, since that is what should\n # be fixed\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), config_file)\n # if config.toml does not exist, continue and complain about lack of auth.toml,\n # which is the real issue\n\n if not os.path.exists(config_file):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), config_file)\n # check that the config file has suitable permissions\n config_props = os.stat(config_file)\n dangerous_perm_mask = 0o7077\n if (config_props.st_mode & dangerous_perm_mask) != 0:\n raise RuntimeError(f\"{config_file} has unsafe permissions: {config_props.st_mode:o}\\n\"\n \"Please correct it to 0600\")\n\n # load config\n with open(config_file, \"r\") as f:\n try:\n auth_data = toml.loads(f.read())[\"auth\"]\n except KeyError:\n raise RuntimeError(\"configuration file has no auth section\")\n except Exception as ex:\n raise RuntimeError(f\"configuration file is not configured correctly: {ex}\")\n\n return _interpret_auth_data(auth_data)",
"def load_auth(configfile):\n\n logging.debug('Loading habitica auth data from %s' % configfile)\n\n try:\n cf = open(configfile)\n except IOError:\n logging.error(\"Unable to find '%s'.\" % configfile)\n exit(1)\n\n config = configparser.SafeConfigParser({'checklists': False})\n config.readfp(cf)\n\n cf.close()\n\n # Config name to authentication name mapping\n mapping = {'url': 'url',\n 'login': 'x-api-user',\n 'password': 'x-api-key',\n 'checklists': 'checklists'\n }\n\n # Get data from config\n rv = {}\n try:\n rv = {'url': config.get('Habitica', 'url'),\n 'checklists': config.get('Habitica', 'checklists'),\n 'x-api-user': config.get('Habitica', 'login'),\n 'x-api-key': config.get('Habitica', 'password')}\n for item in mapping:\n rv[mapping[item]] = config.get(SECTION_HABITICA, item)\n\n except configparser.NoSectionError:\n logging.error(\"No '%s' section in '%s'\" % (SECTION_HABITICA,\n configfile))\n exit(1)\n\n except configparser.NoOptionError as e:\n logging.error(\"Missing option in auth file '%s': %s\"\n % (configfile, e.message))\n exit(1)\n\n # Do this after checking for the section.\n load_typo_check(config, mapping, SECTION_HABITICA, configfile)\n\n # Return auth data as a dictionnary\n return rv",
"def update_auth_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if not rconfig.has_section(\"auth\"):\n rconfig.add_section(\"auth\")\n if env.get(\"NEW_USER_ROLE\").lower() != \"member\":\n tempest_roles = []\n if rconfig.has_option(\"auth\", \"tempest_roles\"):\n tempest_roles = functest_utils.convert_ini_to_list(\n rconfig.get(\"auth\", \"tempest_roles\"))\n rconfig.set(\n 'auth', 'tempest_roles',\n functest_utils.convert_list_to_ini(\n [env.get(\"NEW_USER_ROLE\")] + tempest_roles))\n if not json.loads(env.get(\"USE_DYNAMIC_CREDENTIALS\").lower()):\n rconfig.set('auth', 'use_dynamic_credentials', False)\n account_file = os.path.join(\n getattr(config.CONF, 'dir_functest_data'), 'accounts.yaml')\n assert os.path.exists(\n account_file), f\"{account_file} doesn't exist\"\n rconfig.set('auth', 'test_accounts_file', account_file)\n if env.get('NO_TENANT_NETWORK').lower() == 'true':\n rconfig.set('auth', 'create_isolated_networks', False)\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def add_auth_opts(options, service_type=None):\n def add_options(opts, opts_to_add):\n for new_opt in opts_to_add:\n for opt in opts:\n if opt.name == new_opt.name:\n break\n else:\n opts.append(new_opt)\n\n opts = copy.deepcopy(options)\n opts.insert(0, ks_loading.get_auth_common_conf_options()[0])\n # NOTE(dims): There are a lot of auth plugins, we just generate\n # the config options for a few common ones\n plugins = ['password', 'v2password', 'v3password']\n for name in plugins:\n plugin = ks_loading.get_plugin_loader(name)\n add_options(opts, ks_loading.get_auth_plugin_conf_options(plugin))\n add_options(opts, ks_loading.get_session_conf_options())\n if service_type:\n adapter_opts = ks_loading.get_adapter_conf_options(\n include_deprecated=False)\n # adding defaults for valid interfaces\n cfg.set_defaults(adapter_opts, service_type=service_type,\n valid_interfaces=DEFAULT_VALID_INTERFACES)\n add_options(opts, adapter_opts)\n opts.sort(key=lambda x: x.name)\n return opts",
"def from_settings(cls, settings={}, prefix=\"srpauth.\", **kwds):\n # Grab out all the settings keys that start with our prefix.\n auth_settings = {}\n for name, value in settings.iteritems():\n if not name.startswith(prefix):\n continue\n auth_settings[name[len(prefix):]] = value\n # Update with any additional keyword arguments.\n auth_settings.update(kwds)\n # Now look for specific keys of interest.\n maybe_resolve = DottedNameResolver(None).maybe_resolve\n # You must specify a realm.\n if \"realm\" not in auth_settings:\n raise ValueError(\"pyramid_srpauth: you must specify the realm\")\n # NonceManager can be specified as class or instance name.\n nonce_manager = maybe_resolve(auth_settings.get(\"nonce_manager\"))\n if callable(nonce_manager):\n nonce_manager = nonce_manager()\n auth_settings[\"nonce_manager\"] = nonce_manager\n # get_password can be dotted name of a callable\n get_password = maybe_resolve(auth_settings.get(\"get_password\"))\n if get_password is not None:\n assert callable(get_password)\n auth_settings[\"get_password\"] = get_password\n # get_verifier can be dotted name of a callable\n get_verifier = maybe_resolve(auth_settings.get(\"get_verifier\"))\n if get_verifier is not None:\n assert callable(get_verifier)\n auth_settings[\"get_verifier\"] = get_verifier\n # groupfinder can be dotted name of a callable\n groupfinder = maybe_resolve(auth_settings.get(\"groupfinder\"))\n if groupfinder is not None:\n assert callable(groupfinder)\n auth_settings[\"groupfinder\"] = groupfinder\n # OK, the rest should just be keyword arguments.\n return cls(**auth_settings)",
"def load(self, config):\n try:\n groups = config.get(\"auth-impl\", \"groups\").split(\",\")\n except Exception as e:\n log.critical(\"Could not read the groups parameter {0}\".format(e))\n return False\n self.user = User(getpass.getuser(), groups)\n return True",
"def _load_config(self, args: argparse.Namespace):\n #\n # Load a config, filename may or may-not be provided...\n #\n try:\n self._config = TortugaScriptConfig.load(args.config)\n\n except ConfigException as ex:\n print(str(ex))\n sys.exit(0)\n\n #\n # Override the config with any provided argument values\n #\n if args.url:\n self._config.url = args.url\n if args.username:\n self._config.username = args.username\n if args.password:\n self._config.password = args.password\n if args.token:\n self._config.token = args.token\n self._config.verify = args.verify",
"def configure_auth(self, auth_type, ha_type):\n yield self.configure_kerberos(auth_type, ha_type)\n self.configure_radius(auth_type)",
"def _set_credentials(args):\n if hasattr(args, 'username') and hasattr(args, 'apikey') \\\n and args.username and args.apikey:\n config.update({'username': args.username})\n config.update({'apikey': args.apikey})\n elif os.path.exists(os.path.expanduser('~/.jarvice.cfg')):\n CParser = configparser.ConfigParser()\n CParser.read([os.path.expanduser('~/.jarvice.cfg'), ])\n config.update({'username': CParser.get('auth', 'username')})\n config.update({'apikey': CParser.get('auth', 'apikey')})\n else:\n sys.stderr.write(\"username and apikey must be passed as arguments \" \n \"or set in ~/.jarvice.cfg\")\n sys.exit(1)",
"def get_auth(self, name = \"memory\", *args, **kwargs):\r\n\r\n name_f = name.title() + \"Auth\"\r\n auth_c = getattr(netius.auth, name_f)\r\n return auth_c",
"def load_from_options(self, **kwargs):\n missing_required = [o for o in self.get_options()\n if o.required and kwargs.get(o.dest) is None]\n\n if missing_required:\n raise exceptions.MissingRequiredOptions(missing_required)\n\n return self.create_plugin(**kwargs)",
"def _authenticate(self):\n cred_file = self.module.params.pop('config_file', None)\n section = self.module.params.pop('section')\n self._env_vars(cred_file=cred_file, section=section)\n\n required_vars = ['login_url', 'login_user', 'login_password']\n variables = [\n 'login_url',\n 'login_user',\n 'login_password',\n 'login_tenant_name',\n 'region',\n 'auth_version',\n 'snet'\n ]\n variables_dict = self._get_vars(variables, required=required_vars)\n\n login_url = variables_dict.pop('login_url')\n login_user = variables_dict.pop(\n 'login_user', os.getenv('OS_AUTH_URL')\n )\n login_password = variables_dict.pop(\n 'login_password', os.getenv('OS_AUTH_URL')\n )\n login_tenant_name = variables_dict.pop(\n 'login_tenant_name', os.getenv('OS_TENANT_ID')\n )\n region = variables_dict.pop('region', None)\n\n auth_version = variables_dict.pop('auth_version')\n snet = variables_dict.pop('snet', None)\n\n if snet in BOOLEANS_TRUE:\n snet = True\n else:\n snet = None\n\n if login_password is None:\n self.failure(\n error='Missing Password',\n rc=2,\n msg='A Password is required for authentication. Try adding'\n ' [ login_password ] to the task'\n )\n\n if login_tenant_name is None:\n login_tenant_name = ' '\n\n creds_dict = {\n 'user': login_user,\n 'key': login_password,\n 'authurl': login_url,\n 'tenant_name': login_tenant_name,\n 'os_options': {\n 'region': region\n },\n 'snet': snet,\n 'auth_version': auth_version\n }\n\n self.swift = client.Connection(**creds_dict)",
"def FromConfig(section, debug=None):\n try:\n return DBMySQLAuth(host = section[MYSQL_HOST],\n user = section[MYSQL_USER],\n password = section[MYSQL_PASSWORD],\n database = section[MYSQL_DATABASE],\n debug = debug)\n except KeyError as e:\n pass\n raise KeyError(f\"config file section must have {MYSQL_HOST}, {MYSQL_USER}, {MYSQL_PASSWORD} and {MYSQL_DATABASE} options in section {section}. Only options found: {list(section.keys())}\")",
"def __init__(self, config_file):\n defaults = {'kmsauth_user_key': None, 'logging_level_option': 'INFO'}\n ConfigParser.RawConfigParser.__init__(self, defaults=defaults)\n self.read(config_file)\n\n if not self.has_section(SECTION):\n raise ValueError(\n \"Missing {0} configuration section.\".format(SECTION)\n )\n\n for option in ['kmsauth_key', 'kmsauth_to_context']:\n if not self.has_option(SECTION, option):\n raise ValueError(\"{0} not set.\".format(option))",
"def LoadConfig(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"loadConfig\", payload=payload, response_object=None)",
"def get_auth():\n config = configparser.RawConfigParser()\n config.read(\"speech.cfg\")\n apikey = config.get('auth', 'apikey')\n return (\"apikey\", apikey)",
"async def get_plugin_config(self, **kwargs) -> Any:\n namespace = self._get_namespace(**kwargs)\n return await self.AD.plugins.get_plugin_meta(namespace)",
"def _load_from_conf(self, parser, section, db, conf_dir, cloud_confs, conf_file):\n\n iaas = config_get_or_none(parser, section, \"iaas\", self.iaas)\n iaas_url = config_get_or_none(parser, section, \"iaas_url\", self.iaas_url)\n\n sshkey = config_get_or_none(parser, section, \"sshkeyname\", self.keyname)\n localssh = config_get_or_none(parser, section, \"localsshkeypath\", self.localkey)\n ssh_user = config_get_or_none(parser, section, \"ssh_username\", self.username)\n scp_user = config_get_or_none(parser, section, \"scp_username\", self.scp_username)\n bootconf = config_get_or_none(parser, section, \"bootconf\", self.bootconf)\n bootpgm = config_get_or_none(parser, section, \"bootpgm\", self.bootpgm)\n bootpgm_args = config_get_or_none(parser, section, \"bootpgm_args\", self.bootpgm_args)\n hostname = config_get_or_none(parser, section, \"hostname\", self.hostname)\n readypgm = config_get_or_none(parser, section, \"readypgm\", self.readypgm)\n readypgm_args = config_get_or_none(parser, section, \"readypgm_args\", self.readypgm_args)\n iaas_key = config_get_or_none(parser, section, \"iaas_key\", self.iaas_key)\n iaas_secret = config_get_or_none(parser, section, \"iaas_secret\", self.iaas_secret)\n securitygroups = config_get_or_none(parser, section, \"securitygroups\", self.securitygroups)\n\n terminatepgm = config_get_or_none(parser, section, \"terminatepgm\", self.terminatepgm)\n terminatepgm_args = config_get_or_none(parser, section, \"terminatepgm_args\", self.terminatepgm_args)\n\n pgm_timeout = config_get_or_none(parser, section, \"pgm_timeout\", self.pgm_timeout)\n\n local_exe = config_get_or_none_bool(parser, section, \"local_exe\", self.local_exe)\n\n\n allo = config_get_or_none(parser, section, \"allocation\", self.allocation)\n image = config_get_or_none(parser, section, \"image\", self.image)\n cloudconf = config_get_or_none(parser, section, \"cloud\")\n if cloudconf:\n try:\n conf = cloud_confs[cloudconf]\n except:\n raise APIUsageException(\"%s is not a valud cloud description in this plan\" % (cloudconf))\n\n if not iaas:\n iaas = conf.iaas\n if not iaas_url:\n iaas_url = conf.iaas_url\n if not sshkey:\n sshkey = conf.sshkey\n if not localssh:\n localssh = conf.localssh\n if not ssh_user:\n ssh_user = conf.ssh_user\n if not scp_user:\n scp_user = conf.scp_user\n if not iaas_key:\n iaas_key = conf.iaas_key\n if not iaas_secret:\n iaas_secret = conf.iaas_secret\n if not securitygroups:\n securitygroups = conf.securitygroups\n\n if not iaas:\n iaas = db.default_iaas\n if not iaas_url:\n iaas_url = db.default_iaas_url\n if not allo:\n allo = db.default_allo\n if not sshkey:\n sshkey = db.default_sshkey\n if not localssh:\n localssh = db.default_localssh\n if not ssh_user:\n ssh_user = db.default_ssh_user\n if not scp_user:\n scp_user = db.default_scp_user\n if not iaas_key:\n iaas_key = db.default_iaas_key\n if not iaas_secret:\n iaas_secret = db.default_iaas_secret\n if not securitygroups:\n securitygroups = db.default_securitygroups\n if not image:\n image = db.default_image\n if not bootconf:\n bootconf = db.default_bootconf\n if not bootpgm:\n bootpgm = db.default_bootpgm\n if not bootpgm_args:\n bootpgm_args = db.default_bootpgm_args\n if not readypgm:\n readypgm = db.default_readypgm\n if not readypgm_args:\n readypgm_args = db.default_readypgm_args\n if not terminatepgm:\n terminatepgm = db.default_terminatepgm\n if not terminatepgm_args:\n terminatepgm_args = db.default_terminatepgm_args\n if not pgm_timeout:\n pgm_timeout = db.default_pgm_timeout\n\n if not local_exe:\n local_exe = db.default_local_exe\n\n\n self.image = image\n self.bootconf = _resolve_file_or_none(conf_dir, bootconf, conf_file)\n self.bootpgm = _resolve_file_or_none(conf_dir, bootpgm, conf_file, has_args=True)\n self.bootpgm_args = bootpgm_args\n self.terminatepgm = _resolve_file_or_none(conf_dir, terminatepgm, conf_file, has_args=True)\n self.terminatepgm_args = terminatepgm_args\n self.pgm_timeout = pgm_timeout\n self.local_exe = local_exe\n\n self.hostname = hostname\n self.readypgm = _resolve_file_or_none(conf_dir, readypgm, conf_file, has_args=True)\n self.readypgm_args = readypgm_args\n self.username = ssh_user\n self.scp_username = scp_user\n self.localkey = _resolve_file_or_none(conf_dir, localssh, conf_file)\n self.keyname = sshkey\n self.allocation = allo\n self.iaas = iaas\n self.iaas_url = iaas_url\n\n self.iaas_secret = iaas_secret\n self.iaas_key = iaas_key\n self.securitygroups = securitygroups\n\n x = config_get_or_none(parser, section, \"iaas_launch\")\n if x:\n if x.lower() == 'true':\n self.iaas_launch = True\n else:\n self.iaas_launch = False\n else:\n if self.hostname:\n self.iaas_launch = False\n else:\n self.iaas_launch = True\n\n # allow the plan to over ride the default image if they want to use a hostname\n if self.iaas_launch is False:\n self.image = None\n\n item_list = parser.items(section)\n deps_list = []\n for (ka,val) in item_list:\n ndx = ka.find(\"deps\")\n if ndx == 0:\n deps_list.append(ka)\n deps_list.sort()\n for i in deps_list:\n deps = config_get_or_none(parser, section, i)\n deps_file = _resolve_file_or_none(conf_dir, deps, conf_file)\n if deps_file:\n parser2 = ConfigParser.ConfigParser()\n parser2.read(deps_file)\n keys_val = parser2.items(\"deps\")\n for (ka,val) in keys_val:\n val2 = config_get_or_none(parser2, \"deps\", ka)\n if val2 is not None:\n bao = BagAttrsObject(ka, val2)\n self.attrs.append(bao)",
"def configureAuthenticationScript(self, enabled=True, getUserInfoTTL= \"10s\", getUsersTTL = \"1min\", userLoginTTL = \"30s\", set_timing_only_if_necessary=True):\r\n \r\n # Get the existing entity if it exists\r\n try:\r\n en = entity.getEntity(RadiusAuthRestHandler.REST_AUTH_PROVIDERS, \"radius_auth_script\", namespace=RadiusAuthRestHandler.APP_NAME, owner=\"nobody\", sessionKey = self.getSessionKey() )\r\n \r\n self.clearValue(en, 'disabled')\r\n self.clearValue(en, 'getUserInfoTTL')\r\n self.clearValue(en, 'getUsersTTL')\r\n self.clearValue(en, 'userLoginTTL')\r\n \r\n except splunk.ResourceNotFound:\r\n en = entity.getEntity(RadiusAuthRestHandler.REST_AUTH_PROVIDERS, \"_new\", namespace=RadiusAuthRestHandler.APP_NAME, owner=\"nobody\", sessionKey = self.getSessionKey() )\r\n en['name'] = \"radius_auth_script\"\r\n en.owner = \"nobody\"\r\n \r\n # Create the path to python\r\n python_path = os.path.join( \"$SPLUNK_HOME\", \"bin\", \"python\" )\r\n \r\n # Create the path to auth script\r\n radius_auth = os.path.join( \"$SPLUNK_HOME\", \"etc\", \"apps\", RadiusAuthRestHandler.APP_NAME, \"bin\", RadiusAuthRestHandler.AUTH_SCRIPT_FILE )\r\n \r\n # Set the script path should look something like:\r\n # scriptPath = $SPLUNK_HOME/bin/python $SPLUNK_HOME/bin/<scriptname.py>\r\n en['scriptPath'] = '\"' + python_path + '\"' + ' \"' + radius_auth + '\"'\r\n \r\n # Set the cache timing\r\n if enabled:\r\n en['getUserInfoTTL'] = getUserInfoTTL\r\n en['getUsersTTL'] = getUsersTTL\r\n en['userLoginTTL'] = userLoginTTL\r\n \r\n # Set the entity\r\n entity.setEntity( en, sessionKey = self.getSessionKey() )\r\n \r\n # Set the entity status\r\n self.setAuthenticationScriptStatus(enabled)\r\n \r\n # Log that the script status was updated\r\n logger.info(\"Authentication script configured, enabled=%r\" % (enabled) )",
"def sufficient_options(self):\n has_token = self.opts.get('token')\n has_project_domain_or_tenant = (self.opts.get('project_id') or\n (self.opts.get('project_name') and\n (self.opts.get('user_domain_name') or\n self.opts.get('user_domain_id'))) or\n (self.opts.get('tenant_id') or\n self.opts.get('tenant_name')))\n has_credential = (self.opts.get('username')\n and has_project_domain_or_tenant\n and self.opts.get('password')\n and self.opts.get('auth_url'))\n missing = not (has_token or has_credential)\n if missing:\n missing_opts = []\n opts = ['token', 'endpoint', 'username', 'password', 'auth_url',\n 'tenant_id', 'tenant_name']\n for opt in opts:\n if not self.opts.get(opt):\n missing_opts.append(opt)\n raise exceptions.AuthPluginOptionsMissing(missing_opts)",
"def _init_auth(\n self,\n auth: t.Optional[AuthModel] = None,\n log_level: t.Union[str, int] = LOG_LEVEL_AUTH,\n ) -> AuthModel:\n if not isinstance(auth, AuthModel):\n if self.CREDENTIALS:\n auth: AuthCredentials = AuthCredentials(\n username=self.__key,\n password=self.__secret,\n http=self.http,\n log_level=log_level,\n )\n else:\n auth: AuthApiKey = AuthApiKey(\n key=self.__key,\n secret=self.__secret,\n http=self.http,\n log_level=log_level,\n )\n return self._check_binding(auth)",
"def auth(self):\n return self._auth_config",
"def enable_third_party_auth():\r\n\r\n from third_party_auth import settings as auth_settings\r\n auth_settings.apply_settings(settings.THIRD_PARTY_AUTH, settings)",
"def override_config(self):\n super(AuthedConfigFixture, self).override_config()\n self.conf.register_opts(auth_token._OPTS, group='keystone_authtoken')\n self.conf.set_override('auth_uri', 'http://127.0.0.1:35357',\n group='keystone_authtoken')",
"def getconfig(self):\n self.cmdargs.parse_args(self.args)\n config = self._getconfig(self.sources)\n\n if self.needlogin:\n config.credentials = { \n k: getattr(config, self.credentialKey[k].name)\n for k in self.authenticatorInfo.getCredentialKeys(config.auth)\n }\n\n config._freeze_varnames()\n return (self.client, config)",
"def load(name):\n\n update(settings.all())\n\n config_specific_settings = _config.pop('config', None) or {}\n if name:\n if name not in names():\n errors.string_exit('config {} not found in .ssha file'.format(name))\n if name in config_specific_settings:\n update(config_specific_settings[name])\n add('config.name', name)\n\n if not _get('ssh.username'):\n add('ssh.username', '$(whoami)')\n\n if _get('bastion') and not _get('ssh.proxy_command'):\n add('ssh.proxy_command', 'ssh -W %h:%p ${bastion.address}')\n\n iam_group_specific_settings = get('iam.group')\n if iam_group_specific_settings:\n from . import iam\n for group in iam.groups():\n if group in iam_group_specific_settings:\n update(iam_group_specific_settings[group])",
"def _authenticate_from_file(self, credentials):\n self._gauth.LoadCredentialsFile(credentials)",
"def _authenticate(config):\n if hasattr(config, \"password\"):\n connect_kwargs = {\"password\": config.password}\n elif hasattr(config, \"ssh_key\"):\n connect_kwargs = {\"key_filename\": config.ssh_key}\n else:\n password = getpass(f\"Password for {config.hostname}: \")\n if password:\n connect_kwargs = {\"password\": password}\n else:\n connect_kwargs = None\n return connect_kwargs",
"def auth_config(self) -> 'outputs.AuthConfigResponse':\n return pulumi.get(self, \"auth_config\")"
]
| [
"0.57892317",
"0.56229365",
"0.5574044",
"0.54717857",
"0.5448292",
"0.53054255",
"0.5275161",
"0.51765996",
"0.51333886",
"0.5125494",
"0.50944304",
"0.5049857",
"0.50348556",
"0.5000134",
"0.49427214",
"0.49023804",
"0.4900094",
"0.4887436",
"0.48787495",
"0.4849195",
"0.48404682",
"0.47774822",
"0.47424778",
"0.47061118",
"0.47044468",
"0.4672765",
"0.46617293",
"0.46610272",
"0.46599713",
"0.46470565"
]
| 0.6809341 | 0 |
Loads adapter from options in a configuration file section. The adapter_kwargs will be passed directly to keystoneauth1 Adapter and will override the values loaded from config. Consult keystoneauth1 docs for available adapter options. | def get_adapter(group, **adapter_kwargs):
return ks_loading.load_adapter_from_conf_options(CONF, group,
**adapter_kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_adapter_config(self):\n proxy = self.core.get_proxy('/')\n try:\n config = proxy.get('/adapters/' + self.adapter_name)\n return config\n except KeyError:\n return None",
"def set_adapter_config(config):\n if not isinstance(config, dict):\n raise TypeError(f\"The input argument of 'set_adapter_config' should be a dict, but got {config}.\")\n for key, value in config.items():\n if key == \"Tensor\":\n ms_adapter_registry.register_tensor(value)\n elif key == \"Parameter\":\n ms_adapter_registry.register_parameter(value)\n elif key == \"convert_object_map\":\n ms_adapter_registry.register_convert_map(value)\n else:\n raise ValueError(f\"Unsupported key in adapter config: {key}\")",
"def get_adapter(self, name = \"memory\", *args, **kwargs):\r\n\r\n name_f = name.title() + \"Adapter\"\r\n adapter_c = getattr(netius.adapters, name_f)\r\n adapter = adapter_c(*args, **kwargs)\r\n return adapter",
"def test_adapter_opts_set(self):\n conn = self._get_conn()\n\n discovery = {\n \"versions\": {\n \"values\": [\n {\n \"status\": \"stable\",\n \"updated\": \"2019-06-01T00:00:00Z\",\n \"media-types\": [\n {\n \"base\": \"application/json\",\n \"type\": \"application/vnd.openstack.heat-v2+json\", # noqa: E501\n }\n ],\n \"id\": \"v2.0\",\n \"links\": [\n {\n \"href\": \"https://example.org:8888/heat/v2\",\n \"rel\": \"self\",\n }\n ],\n }\n ]\n }\n }\n self.register_uris(\n [\n dict(\n method='GET',\n uri='https://example.org:8888/heat/v2',\n json=discovery,\n ),\n dict(\n method='GET',\n uri='https://example.org:8888/heat/v2/foo',\n json={'foo': {}},\n ),\n ]\n )\n\n adap = conn.orchestration\n self.assertEqual('SpecialRegion', adap.region_name)\n self.assertEqual('orchestration', adap.service_type)\n self.assertEqual('internal', adap.interface)\n self.assertEqual(\n 'https://example.org:8888/heat/v2', adap.endpoint_override\n )\n\n adap.get('/foo')\n self.assert_calls()",
"def load(config_file, verify=True):\n with open(config_file or 'tern.yml') as fh:\n config = yaml.load(fh.read())\n\n Adapter = extract_adapter(import_module(config['adapter']['module']))\n del config['adapter']['module']\n adapter = Adapter(**config['adapter'])\n adapter.open()\n tern = Tern(adapter, config['directory'], verify=verify)\n return config, tern, adapter",
"def get_adapters_config(config_data):\n adapters = {k: config_data[k] for k in ('adapters',)}\n adapters = adapters['adapters']\n return adapters",
"def __init__(self, vendor, generic_config, adapter_config):\n self.set_generic_config(**generic_config)\n self.em_adapter = construct_adapter(vendor, module_type='em', **adapter_config)",
"def __initConfiguration(self):\n conf = configparser.ConfigParser()\n with open(self.configFile, \"r\") as f:\n conf.readfp(f)\n self.orgConf = conf\n # check additionalSection\n adSection = self.additionalSection\n if adSection in conf:\n adSection = conf[adSection]\n self.conf = {}\n for i in [self.CLIENT_ID, self.CLIENT_SECRET, self.AUTHZ_ENDPOINT,\n self.TOKEN_ENDPOINT, self.REDIRECT_URI, self.SCOPE]:\n if adSection != None and i in adSection:\n self.conf[i] = adSection[i]\n else:\n self.conf[i] = conf[\"DEFAULT\"][i]",
"def setup_config(self, cfg: [dict, str, None] = None):\n if isinstance(cfg, str):\n print(f\"Loading config from file: {cfg}\")\n cfg = json.loads(open(cfg, \"r\").read())\n self.configure_network(cfg)\n self.configure_codegen(cfg)\n self.configure_jiff(cfg)\n\n return self",
"def override_config(cfg, **kwargs):\n try:\n cfg.aad_config(**kwargs)\n\n except InvalidConfigException:\n pass\n\n finally:\n return cfg",
"def test_adapter_required(self):\n from fixtures.test_adapter import TestAdapter\n from pyperry import errors\n class Test(pyperry.Base):\n def _config(cls):\n cls.configure('read', poop='smells')\n\n self.assertRaises(errors.ConfigurationError, Test.adapter, 'read')",
"def test_default_adapter_opts(self):\n conn = self._get_conn()\n\n server_id = str(uuid.uuid4())\n server_name = self.getUniqueString('name')\n fake_server = fakes.make_fake_server(server_id, server_name)\n\n self.register_uris(\n [\n self.get_nova_discovery_mock_dict(),\n dict(\n method='GET',\n uri=self.get_mock_url(\n 'compute', 'public', append=['servers', 'detail']\n ),\n json={'servers': [fake_server]},\n ),\n ]\n )\n\n # Nova has empty adapter config, so these default\n adap = conn.compute\n self.assertIsNone(adap.region_name)\n self.assertEqual('compute', adap.service_type)\n self.assertEqual('public', adap.interface)\n self.assertIsNone(adap.endpoint_override)\n\n s = next(adap.servers())\n self.assertEqual(s.id, server_id)\n self.assertEqual(s.name, server_name)\n self.assert_calls()",
"def __init__(self, **kwargs):\n # Intialise superclass\n super(FileInterfaceAdapter, self).__init__(**kwargs)\n self.abs_directory = str(self.options.get('absolute_directory'))\n self.fileInterface = FileInterface(self.abs_directory)\n\n logging.debug('FileInterface Adapter loaded')",
"def load_from_conf(self):\n raise NotImplementedError",
"def from_configuration(cls, **kwargs):\n return cls(**kwargs)",
"def __init__(self, name=None, **kwargs):\n self._params = extract_constructor_params(locals(), verbose=False)\n super(Adapter, self).__init__(name=name)",
"def FromConfig(section, debug=None):\n try:\n return DBMySQLAuth(host = section[MYSQL_HOST],\n user = section[MYSQL_USER],\n password = section[MYSQL_PASSWORD],\n database = section[MYSQL_DATABASE],\n debug = debug)\n except KeyError as e:\n pass\n raise KeyError(f\"config file section must have {MYSQL_HOST}, {MYSQL_USER}, {MYSQL_PASSWORD} and {MYSQL_DATABASE} options in section {section}. Only options found: {list(section.keys())}\")",
"def load_from_conf(self):\r\n raise NotImplementedError",
"def load_gateways_config(self):\n raise NotImplementedError",
"def load_api_config():\n with open(Config.config_file, 'r', encoding=\"utf-8\") as file_config:\n conf = json.loads(file_config.read())\n\n return OAuth2(client_id=conf[\"falcon_client_id\"],\n client_secret=conf[\"falcon_client_secret\"]\n )",
"def load_config(self):\n config = dict([(key, value) for key, value in iteritems(self.options)\n if key in self.cfg.settings and value is not None])\n for key, value in iteritems(config):\n self.cfg.set(key.lower(), value)",
"def test_configuration(self):\n self.assertEqual(self.Test.adapter_config['write'],\n { 'adapter': TestAdapter, 'foo': 'bar' })",
"def loader(config_dict, engine): # (Need to match function signature) pylint: disable=unused-argument\n config = configobj.ConfigObj(config_dict)\n return MQTTSubscribeDriver(**config[DRIVER_NAME])",
"def __init__(self, config_file: str = \"config.json\"):\n path_to_config = (Path(sys.modules[self.__module__].__file__).parent\n / config_file)\n with open(path_to_config, \"r\") as f:\n self.options = json.load(f)",
"def _load_from_conf(self, parser, section, db, conf_dir, cloud_confs, conf_file):\n\n iaas = config_get_or_none(parser, section, \"iaas\", self.iaas)\n iaas_url = config_get_or_none(parser, section, \"iaas_url\", self.iaas_url)\n\n sshkey = config_get_or_none(parser, section, \"sshkeyname\", self.keyname)\n localssh = config_get_or_none(parser, section, \"localsshkeypath\", self.localkey)\n ssh_user = config_get_or_none(parser, section, \"ssh_username\", self.username)\n scp_user = config_get_or_none(parser, section, \"scp_username\", self.scp_username)\n bootconf = config_get_or_none(parser, section, \"bootconf\", self.bootconf)\n bootpgm = config_get_or_none(parser, section, \"bootpgm\", self.bootpgm)\n bootpgm_args = config_get_or_none(parser, section, \"bootpgm_args\", self.bootpgm_args)\n hostname = config_get_or_none(parser, section, \"hostname\", self.hostname)\n readypgm = config_get_or_none(parser, section, \"readypgm\", self.readypgm)\n readypgm_args = config_get_or_none(parser, section, \"readypgm_args\", self.readypgm_args)\n iaas_key = config_get_or_none(parser, section, \"iaas_key\", self.iaas_key)\n iaas_secret = config_get_or_none(parser, section, \"iaas_secret\", self.iaas_secret)\n securitygroups = config_get_or_none(parser, section, \"securitygroups\", self.securitygroups)\n\n terminatepgm = config_get_or_none(parser, section, \"terminatepgm\", self.terminatepgm)\n terminatepgm_args = config_get_or_none(parser, section, \"terminatepgm_args\", self.terminatepgm_args)\n\n pgm_timeout = config_get_or_none(parser, section, \"pgm_timeout\", self.pgm_timeout)\n\n local_exe = config_get_or_none_bool(parser, section, \"local_exe\", self.local_exe)\n\n\n allo = config_get_or_none(parser, section, \"allocation\", self.allocation)\n image = config_get_or_none(parser, section, \"image\", self.image)\n cloudconf = config_get_or_none(parser, section, \"cloud\")\n if cloudconf:\n try:\n conf = cloud_confs[cloudconf]\n except:\n raise APIUsageException(\"%s is not a valud cloud description in this plan\" % (cloudconf))\n\n if not iaas:\n iaas = conf.iaas\n if not iaas_url:\n iaas_url = conf.iaas_url\n if not sshkey:\n sshkey = conf.sshkey\n if not localssh:\n localssh = conf.localssh\n if not ssh_user:\n ssh_user = conf.ssh_user\n if not scp_user:\n scp_user = conf.scp_user\n if not iaas_key:\n iaas_key = conf.iaas_key\n if not iaas_secret:\n iaas_secret = conf.iaas_secret\n if not securitygroups:\n securitygroups = conf.securitygroups\n\n if not iaas:\n iaas = db.default_iaas\n if not iaas_url:\n iaas_url = db.default_iaas_url\n if not allo:\n allo = db.default_allo\n if not sshkey:\n sshkey = db.default_sshkey\n if not localssh:\n localssh = db.default_localssh\n if not ssh_user:\n ssh_user = db.default_ssh_user\n if not scp_user:\n scp_user = db.default_scp_user\n if not iaas_key:\n iaas_key = db.default_iaas_key\n if not iaas_secret:\n iaas_secret = db.default_iaas_secret\n if not securitygroups:\n securitygroups = db.default_securitygroups\n if not image:\n image = db.default_image\n if not bootconf:\n bootconf = db.default_bootconf\n if not bootpgm:\n bootpgm = db.default_bootpgm\n if not bootpgm_args:\n bootpgm_args = db.default_bootpgm_args\n if not readypgm:\n readypgm = db.default_readypgm\n if not readypgm_args:\n readypgm_args = db.default_readypgm_args\n if not terminatepgm:\n terminatepgm = db.default_terminatepgm\n if not terminatepgm_args:\n terminatepgm_args = db.default_terminatepgm_args\n if not pgm_timeout:\n pgm_timeout = db.default_pgm_timeout\n\n if not local_exe:\n local_exe = db.default_local_exe\n\n\n self.image = image\n self.bootconf = _resolve_file_or_none(conf_dir, bootconf, conf_file)\n self.bootpgm = _resolve_file_or_none(conf_dir, bootpgm, conf_file, has_args=True)\n self.bootpgm_args = bootpgm_args\n self.terminatepgm = _resolve_file_or_none(conf_dir, terminatepgm, conf_file, has_args=True)\n self.terminatepgm_args = terminatepgm_args\n self.pgm_timeout = pgm_timeout\n self.local_exe = local_exe\n\n self.hostname = hostname\n self.readypgm = _resolve_file_or_none(conf_dir, readypgm, conf_file, has_args=True)\n self.readypgm_args = readypgm_args\n self.username = ssh_user\n self.scp_username = scp_user\n self.localkey = _resolve_file_or_none(conf_dir, localssh, conf_file)\n self.keyname = sshkey\n self.allocation = allo\n self.iaas = iaas\n self.iaas_url = iaas_url\n\n self.iaas_secret = iaas_secret\n self.iaas_key = iaas_key\n self.securitygroups = securitygroups\n\n x = config_get_or_none(parser, section, \"iaas_launch\")\n if x:\n if x.lower() == 'true':\n self.iaas_launch = True\n else:\n self.iaas_launch = False\n else:\n if self.hostname:\n self.iaas_launch = False\n else:\n self.iaas_launch = True\n\n # allow the plan to over ride the default image if they want to use a hostname\n if self.iaas_launch is False:\n self.image = None\n\n item_list = parser.items(section)\n deps_list = []\n for (ka,val) in item_list:\n ndx = ka.find(\"deps\")\n if ndx == 0:\n deps_list.append(ka)\n deps_list.sort()\n for i in deps_list:\n deps = config_get_or_none(parser, section, i)\n deps_file = _resolve_file_or_none(conf_dir, deps, conf_file)\n if deps_file:\n parser2 = ConfigParser.ConfigParser()\n parser2.read(deps_file)\n keys_val = parser2.items(\"deps\")\n for (ka,val) in keys_val:\n val2 = config_get_or_none(parser2, \"deps\", ka)\n if val2 is not None:\n bao = BagAttrsObject(ka, val2)\n self.attrs.append(bao)",
"def create(self, adapter, **kargs):\n res_type = adapter.getResourceType()\n\n if res_type == ProofConstants.MYSQL:\n host = kargs.get('host', '')\n username = kargs.get('username', '')\n password = kargs.get('password', '')\n dbname = kargs.get('dbname', '')\n logger = kargs.get('logger', None)\n return MySQLDataSource.MySQLDataSource( host,\n username,\n password,\n dbname,\n logger )\n else:\n return None",
"def load_config(self):\n pass",
"def load_config(self, config_file, usage):\n config = configparser.ConfigParser()\n config.read(config_file)\n auth_id = config.get('SMARTY STREETS', 'auth_id' )\n auth_token = config.get('SMARTY STREETS', 'auth_token')\n api_credentials = StaticCredentials(auth_id, auth_token)\n client_builder = ClientBuilder(api_credentials)\n if usage == 'batch': \n client_builder.with_custom_header( {'Connection':'keep-alive'} )\n \n self.client = client_builder.build_us_street_api_client()",
"def from_config_plan(cls,\n model_cfg: dict,\n plan_arch: dict,\n plan_anchors: dict,\n log_num_anchors: str = None,\n **kwargs,\n ):\n raise NotImplementedError",
"def load(self, config_instance):\r\n pass"
]
| [
"0.59922737",
"0.55616415",
"0.5418735",
"0.53291434",
"0.52491",
"0.5221085",
"0.5214442",
"0.51421666",
"0.50582564",
"0.50537366",
"0.49490044",
"0.4938976",
"0.49134752",
"0.487292",
"0.4858089",
"0.48499215",
"0.48398662",
"0.48328328",
"0.48183888",
"0.4811639",
"0.47856453",
"0.47800457",
"0.4750625",
"0.47176284",
"0.46862176",
"0.46855056",
"0.46621415",
"0.46587133",
"0.46517923",
"0.46487728"
]
| 0.673449 | 0 |
Get an endpoint from an adapter. The adapter_kwargs will be passed directly to keystoneauth1 Adapter and will override the values loaded from config. Consult keystoneauth1 docs for available adapter options. | def get_endpoint(group, **adapter_kwargs):
result = get_adapter(group, **adapter_kwargs).get_endpoint()
if not result:
service_type = adapter_kwargs.get(
'service_type',
getattr(getattr(CONF, group), 'service_type', group))
endpoint_type = adapter_kwargs.get('endpoint_type', 'internal')
raise exception.CatalogNotFound(
service_type=service_type, endpoint_type=endpoint_type)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_adapter_config(self):\n proxy = self.core.get_proxy('/')\n try:\n config = proxy.get('/adapters/' + self.adapter_name)\n return config\n except KeyError:\n return None",
"def get_adapter(group, **adapter_kwargs):\n return ks_loading.load_adapter_from_conf_options(CONF, group,\n **adapter_kwargs)",
"def _get_endpoint(ks_session, **kwargs):\n # set service specific endpoint types\n endpoint_type = kwargs.get('endpoint_type') or 'publicURL'\n service_type = kwargs.get('service_type') or 'monitoring'\n\n endpoint = ks_session.get_endpoint(service_type=service_type,\n interface=endpoint_type,\n region_name=kwargs.get('region_name'))\n\n return endpoint",
"def get_endpoint(self, session, **kwargs):\n return kwargs.get('endpoint_override') or self.endpoint",
"def get_endpoint(self, session, **kwargs):\n endpoint_data = self.get_endpoint_data(\n session, discover_versions=False, **kwargs)\n if not endpoint_data:\n return None\n return endpoint_data.url",
"def get_endpoint(self, endpoint_id):\n raise exception.NotImplemented() # pragma: no cover",
"def get_endpoint(self, *args):\n\t\traise NotImplementedError",
"def get_endpoints(self, **kwargs):\n return self._database.lookup('endpoint', kwargs)",
"def get_adapter(self, command: str) -> BaseAdapter:\n return self.routes.get(command, self.adapter)",
"def get_endpoint(self, endpoint):\n for item in self.endpoints:\n if endpoint == item[0]:\n return item\n return None",
"def __get_endpoint(self):\n return self._endpoint",
"def get_adapter(self, name = \"memory\", *args, **kwargs):\r\n\r\n name_f = name.title() + \"Adapter\"\r\n adapter_c = getattr(netius.adapters, name_f)\r\n adapter = adapter_c(*args, **kwargs)\r\n return adapter",
"def get_one(self, endpoint_ident):\n context = pecan.request.context\n endpoint = api_utils.get_resource('Endpoint', endpoint_ident)\n return Endpoint.convert_with_links(endpoint)",
"def get_url_adapter(step_key, adapter_id, back_page=None):\n result_url = '/flow/' + str(step_key) + '/' + str(adapter_id)\n if back_page is not None:\n result_url = result_url + \"?back_page=\" + str(back_page)\n return result_url",
"def get_endpoint(cls):\n raise NotImplementedError(\n \"%s must have implemented get_endpoint.\" % cls.__name__,\n )",
"def endpoint_config(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s' % endpoint_name, 'GET')\n return body",
"def get_endpoint(self):\r\n return self._endpoint",
"def get_resolver_endpoint(ResolverEndpointId=None):\n pass",
"def connect_to_api(self):\r\n try:\r\n ep = KrestExtendedEndPoint(self.host, self.username,\r\n self.password, ssl_validate=self.is_ssl,\r\n retries=self.retries)\r\n except Exception as e:\r\n raise StorageDriverAPIException('K2 API connection failure: {}'.\r\n format(e))\r\n return ep",
"def get_endpoint(self, oid):\n return self.get_object(CatalogEndpoint, ModelEndpoint, oid)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Endpoint':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = EndpointArgs.__new__(EndpointArgs)\n\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"description\"] = None\n __props__.__dict__[\"endpoint_forwarding_rule\"] = None\n __props__.__dict__[\"endpoint_id\"] = None\n __props__.__dict__[\"endpoint_ip\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"network\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"request_id\"] = None\n __props__.__dict__[\"severity\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"threat_exceptions\"] = None\n __props__.__dict__[\"traffic_logs\"] = None\n __props__.__dict__[\"update_time\"] = None\n return Endpoint(resource_name, opts=opts, __props__=__props__)",
"def get_adapter(self):\n\t\timportlib.import_module('app.adapters.{0}'.format(self.builder.name))\n\n\t\tclasses = inspect.getmembers(\n\t\t\tsys.modules['app.adapters.{0}'.format(self.builder.name)],\n\t\t\tinspect.isclass\n\t\t)\n\n\t\tadapter = next(\n\t\t\tcls_ for cls_ in classes \\\n\t\t\tif hasattr(cls_[1], 'tech') \\\n\t\t\t and cls_[1].tech == self.builder.__class__.tech \\\n\t\t\t and hasattr(cls_[1], 'ctx') \\\n\t\t\t and cls_[1].ctx == self.builder.__class__.ctx\n\t\t)[1]\n\n\t\treturn adapter(self.builder())",
"def getEndpoint(self, endpoint):\n # If endpoint not recognized, you get None\n self.__lockobj.acquire()\n retval = None\n if endpoint in self.__endpoints.keys():\n retval = self.__endpoints[endpoint]\n self.__lockobj.acquire()\n return retval",
"def build_endpoint(self, **kwargs):\n\n raise NotImplementedError()",
"def get_adapter(cls):\n pass",
"def getEndpoint(self, tag):\r\n if tag in self.robots:\r\n return self.robots[tag]\r\n elif tag in self.containers:\r\n return self.containers[tag]\r\n else:\r\n raise InvalidRequest('Can not get a non existent endpoint '\r\n \"'{0}'.\".format(tag))",
"def _get_api_endpoint():\n try:\n return get_service_endpoint(\"apiext\").strip(\"/\")\n except:\n log.warn(\n \"Could not find valid apiext endpoint for links so will use policy engine endpoint instead\"\n )\n try:\n return get_service_endpoint(\"policy_engine\").strip(\"/\")\n except:\n log.warn(\n \"No policy engine endpoint found either, using default but invalid url\"\n )\n return \"http://<valid endpoint not found>\"",
"def getEndpoint(self):\n port = \"\"\n endpoint = \"\"\n keyConfig = self.getKeyConfig()\n\n if \"port\" in keyConfig:\n port = \":\" + keyConfig[\"port\"]\n elif self._data[\"port\"] != self.PORT:\n port = \":\" + self._data[\"port\"]\n\n if \"endpoint\" in keyConfig:\n endpoint = keyConfig[\"endpoint\"]\n else:\n endpoint = self._data[\"endpoint\"]\n\n return \"https://%s%s/%s/\" % (endpoint, port, self._data[\"api_version\"])",
"def GetEndPoint(self) -> Optional[str]:\n if self._end_point:\n return self._end_point\n\n cmd = util.GcloudCommand(self, 'config', 'get-value',\n 'api_endpoint_overrides/spanner')\n stdout, _, retcode = cmd.Issue(raise_on_failure=False)\n if retcode != 0:\n logging.warning('Fail to retrieve cloud spanner end point.')\n return None\n self._end_point = json.loads(stdout)\n return self._end_point",
"def get_endpoint(self, datacenter=None, network=None):\r\n if datacenter is None:\r\n datacenter = 'dal05'\r\n if network is None:\r\n network = 'public'\r\n try:\r\n host = ENDPOINTS[datacenter][network]\r\n return \"https://%s\" % host\r\n except KeyError:\r\n raise TypeError('Invalid endpoint %s/%s'\r\n % (datacenter, network))"
]
| [
"0.64198333",
"0.6112636",
"0.60666794",
"0.5826337",
"0.5776892",
"0.57080066",
"0.565453",
"0.5649414",
"0.5629215",
"0.5584997",
"0.54440343",
"0.54425573",
"0.5436947",
"0.5435537",
"0.54330575",
"0.5416111",
"0.5399212",
"0.53746885",
"0.5347526",
"0.5345667",
"0.5163407",
"0.5140414",
"0.51252973",
"0.50955653",
"0.50558615",
"0.50449765",
"0.49482784",
"0.49289668",
"0.49049392",
"0.49049154"
]
| 0.6830363 | 0 |
Create auth plugin wrapping both user and service auth. When properly configured and using auth_token middleware, requests with valid service auth will not fail if the user token is expired. Ideally we would use the plugin provided by auth_token middleware however this plugin isn't serialized yet. | def get_service_auth(context, endpoint, service_auth):
# TODO(pas-ha) use auth plugin from context when it is available
user_auth = token_endpoint.Token(endpoint, context.auth_token)
return service_token.ServiceTokenAuthWrapper(user_auth=user_auth,
service_auth=service_auth) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_auth(self, auth_uri, username, password): # pylint: disable=no-self-use\n return authentication.SASTokenAuth.from_shared_access_key(auth_uri, username, password)",
"def auth(self):\n return AuthManager(self)",
"def register_auth(self):\n\n # pylint: disable=missing-return-doc, missing-return-type-doc\n def decorator(func):\n self.auth_func = func\n return func\n\n return decorator",
"def install(app, conf, public_routes):\n if not CONF.get('enable_authentication'):\n return app\n return auth_token.AuthTokenMiddleware(app,\n conf=dict(conf.keystone_authtoken),\n public_api_routes=public_routes)",
"def service_auth(self) -> Optional[pulumi.Input['ServiceAuthConfigurationArgs']]:\n return pulumi.get(self, \"service_auth\")",
"def auth(self, user):",
"def authenticate(self):\n #it's weird i have to do this here, but the code makes this not simple\n auth_json={'email':self.user, 'password':self.password}\n #send a post with no auth. prevents an infinite loop\n auth_response = self.post('/auth', data = json.dumps(auth_json), auth =\n None)\n\n _token = auth_response.json['token']\n\n self._token = _token\n self._wrapped.auth = SpringAuth(_token)",
"def _create_auth_token(self, user=None):\n token, created = Token.objects.get_or_create(user=user)\n return token",
"def make_plugin(audiences=None, token_url=None, nonce_timeout=None, **kwds):\n # You *must* specify the \"audiences\" parameter since it's integral\n # to the security of the protocol. If you want it set to None to\n # allow checking based on HTTP_HOST, set it to the empty string.\n if audiences is None:\n raise ValueError('You must specify the \"audiences\" parameter')\n if not audiences:\n audiences = None\n elif isinstance(audiences, basestring):\n audiences = audiences.split()\n # Load the token manager, possibly from a class+args.\n token_manager = _load_from_callable(\"token_manager\", kwds)\n # Load the VEP verifier, possibly from a class+args.\n # Assume \"urlopen\" is a dotted-name of a callable.\n verifier = _load_from_callable(\"verifier\", kwds, converters={\n \"urlopen\": resolveDotted\n })\n # If there are any kwd args left over, that's an error.\n for unknown_kwd in kwds:\n raise TypeError(\"unknown keyword argument: %s\" % unknown_kwd)\n plugin = VEPAuthPlugin(audiences, token_url, token_manager, verifier,\n nonce_timeout)\n return plugin",
"def authenticate():\n auth = OAuthHandler(config.TW_API_KEY, config.TW_API_SECRET)\n auth.set_access_token(config.TW_ACC_TOKEN, config.TW_ACC_SECRET)\n\n return auth",
"def requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n access_token = None\n # Check HTTP basic auth, set access_token if authenticated\n auth = request.authorization\n if auth is not None and not check_authentication(auth.username, auth.password):\n return authenticate()\n # Try to get access_token token from various sources\n # Token in the headers\n try:\n k, v = request.headers.get('Authorization').split(' ')\n if k.lower() == 'bearer':\n access_token = v\n except (ValueError, AttributeError, KeyError):\n pass\n # Token was set by check_authentication\n try:\n access_token = _request_ctx_stack.top.current_user_token\n except AttributeError:\n pass\n # Plain old HTTP GET and POST\n if access_token is None and request.method == 'GET':\n access_token = request.args.get('access_token', access_token)\n if request.method == 'POST':\n try:\n access_token = request.form['access_token']\n except KeyError:\n pass\n # No valid token provided or the token is present but it is not valid\n # or other rules deny access to the requested resource\n if access_token is None:\n return authenticate()\n\n # If it's a plugin download:\n if 'plugin_name' in kwargs:\n plugin_roles = get_plugin_roles(kwargs.get('plugin_name'))\n message_log(\"Got plugin roles: %s\" % plugin_roles)\n try:\n user_roles = get_user_roles(access_token)\n message_log(\"Got user roles: %s\" % user_roles)\n except Auth0Error, e:\n message_log(\"Auth0Error: Forbidden - Returning 403: %s\" % e)\n return abort(403)\n if not authorize(user_roles, plugin_roles):\n message_log(\"Forbidden - Returning 403\")\n return abort(403)\n\n _request_ctx_stack.top.current_user_token = access_token\n message_log(\"Returning from requires_auth decorator\")\n return f(*args, **kwargs)\n return decorated",
"def auth(self):\n return self.api(self.token)",
"def auth_user(f):\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n if not hasattr(current_app, \"login_manager\"):\n return f(*args, **kwargs)\n\n cr = current_app.login_manager\n current_user = LocalProxy(lambda: get_user())\n\n def get_user():\n if _request_ctx_stack.top is not None and not hasattr(_request_ctx_stack.top, \"user\"):\n cr._load_user()\n return getattr(_request_ctx_stack.top, \"user\", None)\n\n if not (current_user and current_user.is_authenticated):\n return make_response_content(ErrorResponse(MsgCode.AUTH_FAIL), http_status=HTTPStatus.UNAUTHORIZED)\n return f(*args, **kwargs)\n\n return wrapper",
"def authenticate():\n auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n return auth",
"def authenticate():\n auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n return auth",
"def shotauth_wrapper(secretsource, valid_duration=timedelta(days=365), name='TKT', domain=None, path='/'):\n SignedCookie, AuthService = shotauth_make(secretsource,\n valid_duration,\n name,\n domain,\n path)\n def wrap_handler(func):\n @wraps(func)\n def handle(request):\n t = request.cookie.get(name)\n tkt = None\n if t:\n tkt = SignedCookie.parse(t.value)\n request.shotauth = AuthService(tkt)\n request.header_hook(request.shotauth.header_hook())\n return func(request)\n return handle\n return wrap_handler",
"def as_auth_name(func):\n\n def auth_client(self):\n token = Token.objects.get(user__username=username)\n self.client = APIClient()\n self.client.credentials(HTTP_AUTHORIZATION=\"Token \" + token.key)\n return func(self)\n\n return auth_client",
"def auth_pipeline_factory(loader, global_conf, **local_conf):\n pipeline = local_conf[cfg.CONF['service:api'].auth_strategy]\n pipeline = pipeline.split()\n LOG.info('Getting auth pipeline: %s', pipeline[:-1])\n filters = [loader.get_filter(n) for n in pipeline[:-1]]\n app = loader.get_app(pipeline[-1])\n filters.reverse()\n for filter in filters:\n app = filter(app)\n return app",
"def authentication_hook(self):\n pass",
"def get_auth(group, **auth_kwargs):\n try:\n auth = ks_loading.load_auth_from_conf_options(CONF, group,\n **auth_kwargs)\n except ks_exception.MissingRequiredOptions:\n LOG.error('Failed to load auth plugin from group %s', group)\n raise\n return auth",
"def provider(hass):\n provider = hass.loop.run_until_complete(\n register_auth_provider(hass, {\"type\": \"homeassistant\"})\n )\n hass.loop.run_until_complete(provider.async_initialize())\n return provider",
"def auth(request):\n\n service = get_model_instance(request.user, MODULE_NAME)\n if service and request.method == 'POST':\n username = request.POST['username']\n\n # Delete existing token\n AccessToken.objects.filter(service=service).delete()\n # Before creating a new one\n AccessToken.objects.create(\n service=service,\n username=username,\n created=datetime.now(),\n api_token=service.app.oauth.consumer_key\n )\n\n service.setup = True\n service.public = True\n service.save()\n\n return redirect(settings_redirect(request))",
"def authenticate(self, *args, **kwargs):\n # Validate backend and arguments. Require that the Social Auth\n # response be passed in as a keyword argument, to make sure we\n # don't match the username/password calling conventions of\n # authenticate.\n if not (self.name and kwargs.get(self.name) and 'response' in kwargs):\n return None\n\n response = kwargs.get('response')\n pipeline = PIPELINE\n kwargs = kwargs.copy()\n kwargs['backend'] = self\n\n if 'pipeline_index' in kwargs:\n pipeline = pipeline[kwargs['pipeline_index']:]\n else:\n kwargs['details'] = self.get_user_details(response)\n kwargs['uid'] = self.get_user_id(kwargs['request'])\n kwargs['is_new'] = False\n \n out = self.pipeline(pipeline, *args, **kwargs)\n if not isinstance(out, dict):\n return out\n\n social_user = out.get('social_user')\n if social_user:\n # define user.social_user attribute to track current social\n # account\n user = social_user.user\n user.social_user = social_user\n user.is_new = out.get('is_new')\n return user",
"def auth_required(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n user = g.user\n if not user:\n return response(False, 401, message='Authorization required.')\n return f(types.SimpleNamespace(**user), *args, **kwargs)\n return wrapper",
"def get_authenticated_service(api_name: str, api_version: str) -> Resource:\n\n if CREDS_FILENAME.exists():\n credentials = Credentials.from_authorized_user_file(str(CREDS_FILENAME))\n # TODO make request to the access token endpoint???\n\n # FIXME verifying token\n # credentials.refresh(requests.Request())\n # print(credentials.token, credentials.expiry)\n\n # idinfo = id_token.verify_oauth2_token(\n # credentials.token, requests.Request(), credentials.client_id)\n\n # if idinfo['iss'] not in ['accounts.google.com',\n # 'https://accounts.google.com']:\n # # CREDS_FILENAME.unlink()\n # raise ValueError('Wrong issuer.')\n\n else:\n flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRETS_FILE, SCOPES)\n credentials = flow.run_local_server(\n host=\"localhost\",\n port=8080,\n authorization_prompt_message=\"Please visit this URL: {url}\",\n success_message=\"The auth flow is complete; you may close this window.\",\n open_browser=True,\n )\n\n creds_data = {\n \"token\": None,\n \"refresh_token\": credentials.refresh_token,\n \"token_uri\": credentials.token_uri,\n \"client_id\": credentials.client_id,\n \"client_secret\": credentials.client_secret,\n \"scopes\": credentials.scopes,\n }\n\n with CREDS_FILENAME.open(\"w\") as outfile:\n json.dump(creds_data, outfile)\n\n return build(api_name, api_version, credentials=credentials)",
"def create_plugin(self, **kwargs):\n return self.plugin_class(**kwargs)",
"def shotauth(secretsource, valid_duration=timedelta(days=365), name='TKT', domain=None, path='/'):\n SignedCookie, AuthService = shotauth_make(secretsource,\n valid_duration,\n name,\n domain,\n path)\n def middleware(app):\n def auth_wrapper(environ, start_response):\n cookie = SimpleCookie(environ.get('HTTP_COOKIE'))\n t = cookie.get(name)\n tkt = None\n if t:\n tkt = SignedCookie.parse(t.value)\n service = AuthService(tkt)\n environ['shotweb.authservice'] = service\n return app(environ, service.start_response_wrapper(start_response))\n return auth_wrapper\n return middleware",
"def token_auth(self):\n self.client = APIClient()\n self.user = User.objects.create_user(username='testuser', email='[email protected]', password='testpassword')\n self.token = Token.objects.create(user=self.user)\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)",
"def add_auth_opts(options, service_type=None):\n def add_options(opts, opts_to_add):\n for new_opt in opts_to_add:\n for opt in opts:\n if opt.name == new_opt.name:\n break\n else:\n opts.append(new_opt)\n\n opts = copy.deepcopy(options)\n opts.insert(0, ks_loading.get_auth_common_conf_options()[0])\n # NOTE(dims): There are a lot of auth plugins, we just generate\n # the config options for a few common ones\n plugins = ['password', 'v2password', 'v3password']\n for name in plugins:\n plugin = ks_loading.get_plugin_loader(name)\n add_options(opts, ks_loading.get_auth_plugin_conf_options(plugin))\n add_options(opts, ks_loading.get_session_conf_options())\n if service_type:\n adapter_opts = ks_loading.get_adapter_conf_options(\n include_deprecated=False)\n # adding defaults for valid interfaces\n cfg.set_defaults(adapter_opts, service_type=service_type,\n valid_interfaces=DEFAULT_VALID_INTERFACES)\n add_options(opts, adapter_opts)\n opts.sort(key=lambda x: x.name)\n return opts",
"def get_auth(self, username, password):\n raise NotImplementedError()"
]
| [
"0.5491579",
"0.5472357",
"0.5434556",
"0.5322575",
"0.53010625",
"0.5250635",
"0.5228165",
"0.52093923",
"0.5183537",
"0.5166243",
"0.5149993",
"0.51365024",
"0.51117855",
"0.5093687",
"0.5093687",
"0.5087958",
"0.50836414",
"0.5082779",
"0.5018534",
"0.5008432",
"0.50054336",
"0.49944383",
"0.49932742",
"0.49839187",
"0.49745053",
"0.49693578",
"0.4958214",
"0.4944794",
"0.49342862",
"0.49300227"
]
| 0.6576886 | 0 |
Register session and authrelated options Registers only basic auth options shared by all auth plugins. The rest are registered at runtime depending on auth plugin used. | def register_auth_opts(conf, group, service_type=None):
ks_loading.register_session_conf_options(conf, group)
ks_loading.register_auth_conf_options(conf, group)
CONF.set_default('auth_type', default='password', group=group)
ks_loading.register_adapter_conf_options(conf, group)
conf.set_default('valid_interfaces', DEFAULT_VALID_INTERFACES, group=group)
if service_type:
conf.set_default('service_type', service_type, group=group)
else:
types = os_service_types.get_service_types()
key = 'ironic-inspector' if group == 'inspector' else group
service_types = types.service_types_by_project.get(key)
if service_types:
conf.set_default('service_type', service_types[0], group=group) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register_opts():\n _register_api_opts()\n _register_db_opts()",
"def add_auth_opts(options, service_type=None):\n def add_options(opts, opts_to_add):\n for new_opt in opts_to_add:\n for opt in opts:\n if opt.name == new_opt.name:\n break\n else:\n opts.append(new_opt)\n\n opts = copy.deepcopy(options)\n opts.insert(0, ks_loading.get_auth_common_conf_options()[0])\n # NOTE(dims): There are a lot of auth plugins, we just generate\n # the config options for a few common ones\n plugins = ['password', 'v2password', 'v3password']\n for name in plugins:\n plugin = ks_loading.get_plugin_loader(name)\n add_options(opts, ks_loading.get_auth_plugin_conf_options(plugin))\n add_options(opts, ks_loading.get_session_conf_options())\n if service_type:\n adapter_opts = ks_loading.get_adapter_conf_options(\n include_deprecated=False)\n # adding defaults for valid interfaces\n cfg.set_defaults(adapter_opts, service_type=service_type,\n valid_interfaces=DEFAULT_VALID_INTERFACES)\n add_options(opts, adapter_opts)\n opts.sort(key=lambda x: x.name)\n return opts",
"def register_options(cls, register):",
"def sufficient_options(self):\n has_token = self.opts.get('token')\n has_project_domain_or_tenant = (self.opts.get('project_id') or\n (self.opts.get('project_name') and\n (self.opts.get('user_domain_name') or\n self.opts.get('user_domain_id'))) or\n (self.opts.get('tenant_id') or\n self.opts.get('tenant_name')))\n has_credential = (self.opts.get('username')\n and has_project_domain_or_tenant\n and self.opts.get('password')\n and self.opts.get('auth_url'))\n missing = not (has_token or has_credential)\n if missing:\n missing_opts = []\n opts = ['token', 'endpoint', 'username', 'password', 'auth_url',\n 'tenant_id', 'tenant_name']\n for opt in opts:\n if not self.opts.get(opt):\n missing_opts.append(opt)\n raise exceptions.AuthPluginOptionsMissing(missing_opts)",
"def register_opts(self, conf):\n config.register_opt_group(conf, project_config.service_available_group,\n project_config.ServiceAvailableGroup)\n\n config.register_opt_group(conf, project_config.placement_group,\n project_config.PlacementGroup)\n\n config.register_opt_group(conf, project_config.valet_group,\n project_config.opt_valet)",
"def register(self):\n group = cfg.OptGroup(self.group_name, title='Azure Options')\n self._config.register_group(group)\n self._config.register_opts(self._options, group=group)",
"def register_opts(self, opts):\n for opt in opts:\n self.register_opt(opt)",
"def register_opts(self, opts, group=None):\n for opt in opts:\n self.register_opt(opt, group, clear_cache=False)",
"def configure_who_defaults(config):\n settings = config.registry.settings\n BACKENDAUTH_DEFAULTS = {\n \"use\": \"mozsvc.user.whoauth:BackendAuthPlugin\"\n }\n for key, value in BACKENDAUTH_DEFAULTS.iteritems():\n settings.setdefault(\"who.plugin.backend.\" + key, value)\n BASICAUTH_DEFAULTS = {\n \"use\": \"repoze.who.plugins.basicauth:make_plugin\",\n \"realm\": \"Sync\",\n }\n for key, value in BASICAUTH_DEFAULTS.iteritems():\n settings.setdefault(\"who.plugin.basicauth.\" + key, value)\n MACAUTH_DEFAULTS = {\n \"use\": \"mozsvc.user.whoauth:SagradaMACAuthPlugin\",\n }\n for key, value in MACAUTH_DEFAULTS.iteritems():\n settings.setdefault(\"who.plugin.macauth.\" + key, value)\n # If there is an auth backend, enable basicauth by default.\n # Enable macauth by default regardless, since it doesn't need a backend.\n if config.registry.get(\"auth\") is not None:\n settings.setdefault(\"who.authenticators.plugins\", \"backend macauth\")\n settings.setdefault(\"who.identifiers.plugins\", \"basicauth macauth\")\n settings.setdefault(\"who.challengers.plugins\", \"basicauth macauth\")\n else:\n settings.setdefault(\"who.authenticators.plugins\", \"macauth\")\n settings.setdefault(\"who.identifiers.plugins\", \"macauth\")\n settings.setdefault(\"who.challengers.plugins\", \"macauth\")",
"def register_options(options):\n return (\n options\n .register('jsonFilterFile',\n type_=str,\n default=None,\n description=\"Path to JSON file containing certified runs and luminosity blocks.\")\n .register('useHLTFilter',\n type_=bool,\n default=False,\n description=\"If True, only events triggered by one of the skimmed paths will be \"\n \"written out.\")\n .register('jetCollections',\n type_=str,\n default=[],\n multiplicity='list',\n description=\"The names of the jet collections to use (e.g. 'AK4PFCHS').\")\n .register('jecVersion',\n type_=str,\n default=None,\n description=\"Tag of JEC version to use for e.g. JEC uncertainties.\")\n .register('jecFromGlobalTag',\n type_=bool,\n default=False,\n description=\"If True, the JECs will be looked up in the conditions database \"\n \"(CondDB/Frontier) under the current global tag. If False, the \"\n \"text files for `jecVersion` will be used.\")\n .register('jerVersion',\n type_=str,\n default=None,\n description=\"Tag of JER version to use for e.g. jet smearing.\")\n .register('jerMethod',\n type_=str,\n default='stochastic',\n description=\"Method to use for JER smearing. One of: 'stochastic', 'hybrid'\")\n .register('jerGenMatchPtSigma',\n type_=float,\n default=3.0,\n description=\"Size of Gaussian core for 'hybrid' JER smearing.\")\n .register('jetIDSpec',\n type_=str,\n default=None,\n description=\"Version of Jet ID to use (e.g. '2016').\")\n .register('jetIDWorkingPoint',\n type_=str,\n default=None,\n description=\"Working point of Jet ID to use (e.g. 'TightLepVeto').\")\n .register('prefiringWeightFilePath',\n type_=str,\n default=\"\",\n description=\"Path to ROOT file containing prefiring weights.\")\n .register('prefiringWeightHistName',\n type_=str,\n default=\"\",\n description=\"Name of histogram inside prefiring weights file (e.g. 'L1prefiring_jetpt_2016BCD').\")\n .register('useObjectBasedJetID',\n type_=bool,\n default=False,\n description=\"If True, only jets passing the ID specified via 'jetIDSpec' and `jetIDWorkingPoint` will be considered valid.\")\n .register('checkForCompleteness',\n type_=bool,\n default=False,\n description=(\"(for testing) If True, will run some checks on the \"\n \"Ntuple output to ensure all branches are written out \"\n \"and no branch is omitted.\"))\n .register('stitchingWeight',\n type_=float,\n default=1.0,\n description=(\"(deprecated) The output branch 'stitchingWeight' \"\n \"will contain this value for each event. Can then be \"\n \"used when stitching together different samples.\"))\n .register('doJECUncertaintySources',\n type_=bool,\n default=False,\n description=\"Fill ntuple branch with JEC correction factors for individual JEC uncertainty sources.\")\n .register('doPrescales',\n type_=bool,\n default=False,\n description=\"Write out trigger prescales to Ntuple.\")\n .register('edmOut',\n type_=bool,\n default=False,\n description=\"(for testing only) Write out EDM file.\")\n )",
"def register_cli_opts(self, opts, group=None):\n for opt in opts:\n self.register_cli_opt(opt, group, clear_cache=False)",
"def _add_cred_variables(self):\n self.credentialKey = {}\n authInfo = None\n if self.client:\n try:\n authInfo = self.client.getAuthenticatorInfo()\n except VersionMethodError:\n pass\n authArgOpts = dict(help=\"authentication plugin\")\n if authInfo:\n self.authenticatorInfo = AuthenticatorInfo(authInfo)\n authArgOpts['choices'] = self.authenticatorInfo.getAuthNames()\n else:\n self.authenticatorInfo = LegacyAuthenticatorInfo()\n\n var = self.add_variable('auth', (\"-a\", \"--auth\"), authArgOpts,\n envvar='ICAT_AUTH')\n var.postprocess = _post_auth\n for key in self.authenticatorInfo.getCredentialKeys(hide=False):\n self._add_credential_key(key)\n hidden = self.authenticatorInfo.getCredentialKeys(hide=True)\n if hidden:\n var = self.add_variable('promptPass', (\"-P\", \"--prompt-pass\"), \n dict(help=\"prompt for the password\", \n action='store_const', const=True), \n type=boolean, default=False)\n var.postprocess = _post_promptPass\n for key in hidden:\n self._add_credential_key(key, hide=True)",
"def _add_session_custom_option(self, req, section_name, option_name):\n name = 'inieditor-custom|%s|%s' % (section_name, option_name)\n req.session[name] = True",
"def add_options(self):\n self.add_option_save()\n self.add_option_enable()",
"def post_setup(cls):\n super().post_setup()\n cls.REST_FRAMEWORK[\"DEFAULT_AUTHENTICATION_CLASSES\"] = (\n \"magnify.apps.core.authentication.DelegatedJWTAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n )",
"def _get_session_custom_options(self, req, filter_section_name = None):\n sections = { }\n for item_name in req.session.keys():\n if not item_name.startswith('inieditor-custom|'):\n continue\n \n parts = item_name.split('|', 3)\n if len(parts) < 3:\n continue\n \n section_name = parts[1]\n option_name = parts[2]\n \n if filter_section_name is not None and section_name != filter_section_name:\n continue\n \n if section_name in sections:\n sections[section_name][option_name] = True\n else:\n sections[section_name] = { option_name: True }\n \n return sections",
"def definearguments(self, customparser):\n if not customparser:\n return\n customparser.add_option(\n '--url',\n dest='url',\n help=\"Use the provided iLO URL to login.\",\n default=None,\n )\n customparser.add_option(\n '-u',\n '--user',\n dest='user',\n help=\"If you are not logged in yet, including this flag along\"\\\n \" with the password and URL flags can be used to log into a\"\\\n \" server in the same command.\"\"\",\n default=None,\n )\n customparser.add_option(\n '-p',\n '--password',\n dest='password',\n help=\"\"\"Use the provided iLO password to log in.\"\"\",\n default=None,\n )\n customparser.add_option(\n '-e',\n '--enc',\n dest='encode',\n action='store_true',\n help=SUPPRESS_HELP,\n default=False,\n )",
"def configure_extra(context):\n portal_url = getToolByName(context, 'portal_url')\n pm = getToolByName(context, 'portal_membership')\n portal = portal_url.getPortalObject()\n security = ISecuritySchema(portal)\n\n if not security.enable_self_reg:\n security.enable_self_reg = True\n\n if not security.enable_user_pwd_choice:\n security.enable_user_pwd_choice = True\n\n if not security.enable_user_folders:\n security.enable_user_folders = True\n pm.memberarea_type = 'MemberFolder'",
"def register_options(options):\n return (\n options\n .register('jsonFilterFile',\n type_=str,\n default=None,\n description=\"Path to JSON file containing certified runs and luminosity blocks.\")\n .register('hltRegexes',\n type_=str,\n multiplicity='list',\n default=[],\n description=\"Trigger information will only be written out for paths that match one of these regexes.\")\n .register('useHLTFilter',\n type_=bool,\n default=False,\n description=\"If True, only events triggered by a path matching the configured regex will be written out.\")\n .register('withPATCollections',\n type_=bool,\n default=None,\n description=\"If True, collections of PAT objects (including temporary ones) will be written out.\")\n .register('withMETCorrectionLevels',\n type_=bool,\n default=None,\n description=\"If True, various pre-defined MET correction levels will be written out as edm::ValueMaps.\")\n .register('metFiltersProcess',\n type_=str,\n default=None,\n description=\"Name of the process whose TriggerResults contain the MET filters (e.g. 'RECO').\")\n )",
"def configure_auth(self, auth_type, ha_type):\n yield self.configure_kerberos(auth_type, ha_type)\n self.configure_radius(auth_type)",
"def add_options(_config):\n settings = [\n [\"cache_worker\", bool, lambda x: x in [True, False], False, False],\n [\n \"kube_deployment\",\n str,\n lambda x: x in [\"pod\", \"container\", \"file\", \"call\"],\n False,\n \"pod\",\n ],\n [\n \"kube_version\",\n str,\n lambda _: [\"v1.27.0\", \"v1.26.0\", \"v1.25.0\", \"v1.24.0\", \"v1.23.0\"],\n False,\n \"v1.27.0\",\n ],\n ]\n return settings",
"def _setup_threat_intel_auth_subparser(subparsers):\n generate_subparser(\n subparsers,\n 'update-auth',\n description='Enable, disable, or configure the threat intel downloader function',\n subcommand=True\n )",
"def basic_authentication(self, username: str, password: str) -> None:\n self.api_session.auth = (username, password)",
"def request_extras(self):\n conf = {}\n if self.api_token:\n conf['headers'] = {\n 'Authorization': 'Token {}'.format(self.api_token),\n }\n\n if self.credentials:\n conf['auth'] = self.credentials\n\n return conf",
"def authentication_hook(self):\n pass",
"def add_base_options(self):\n for option in self.base_options:\n self.add_option(*option.get_option_names(), **option.kwargs)",
"def register(self) -> None:\n with open(self.config_file) as f:\n try:\n self.options = json.loads(f.read())\n self.global_options.register_options(self.options)\n except json.decoder.JSONDecodeError:\n print(f\"Error: Unable to decode '{self.config_file} - Exiting'\\n\")\n exit()",
"def add_options(self, options):\n self.options = merge_dicts(self.options, options)",
"async def _configure_plugins(self) -> None:\n logger.debug('Configuring plugins')\n funcs = [\n info['plugin'].configure(\n config=info['config'],\n session=self._session,\n router=self.app.router\n )\n for info in self._plugins.values()\n ]\n\n if funcs:\n await asyncio.gather(*funcs, loop=self._loop)\n logger.debug('Plugins configured')",
"def set_global_flags(self):\n\n import rpki.http, rpki.x509, rpki.sql, rpki.async, rpki.log\n\n try:\n rpki.http.debug_http = self.getboolean(\"debug_http\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.http.want_persistent_client = self.getboolean(\"want_persistent_client\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.http.want_persistent_server = self.getboolean(\"want_persistent_server\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.http.use_adns = self.getboolean(\"use_adns\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.http.enable_ipv6_clients = self.getboolean(\"enable_ipv6_clients\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.http.enable_ipv6_servers = self.getboolean(\"enable_ipv6_servers\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.x509.CMS_object.debug_cms_certs = self.getboolean(\"debug_cms_certs\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.sql.sql_persistent.sql_debug = self.getboolean(\"sql_debug\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.async.timer.gc_debug = self.getboolean(\"gc_debug\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.async.timer.run_debug = self.getboolean(\"timer_debug\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.x509.XML_CMS_object.dump_outbound_cms = rpki.x509.DeadDrop(self.get(\"dump_outbound_cms\"))\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.x509.XML_CMS_object.dump_inbound_cms = rpki.x509.DeadDrop(self.get(\"dump_inbound_cms\"))\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.async.gc_summary(self.getint(\"gc_summary\"), self.getint(\"gc_summary_threshold\", 0))\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.log.enable_tracebacks = self.getboolean(\"enable_tracebacks\")\n except ConfigParser.NoOptionError:\n pass"
]
| [
"0.6607029",
"0.61653936",
"0.60332906",
"0.5657959",
"0.55373776",
"0.552294",
"0.5479442",
"0.54483664",
"0.5426347",
"0.53914356",
"0.53523314",
"0.5235399",
"0.52281266",
"0.5205289",
"0.51972586",
"0.51221377",
"0.5098826",
"0.5094243",
"0.5043019",
"0.5042551",
"0.5026544",
"0.5023253",
"0.4984844",
"0.49298277",
"0.49202707",
"0.49095666",
"0.48915303",
"0.4874247",
"0.4846181",
"0.48186144"
]
| 0.6373836 | 1 |
Add auth options to sample config As these are dynamically registered at runtime, this adds options for most used auth_plugins when generating sample config. | def add_auth_opts(options, service_type=None):
def add_options(opts, opts_to_add):
for new_opt in opts_to_add:
for opt in opts:
if opt.name == new_opt.name:
break
else:
opts.append(new_opt)
opts = copy.deepcopy(options)
opts.insert(0, ks_loading.get_auth_common_conf_options()[0])
# NOTE(dims): There are a lot of auth plugins, we just generate
# the config options for a few common ones
plugins = ['password', 'v2password', 'v3password']
for name in plugins:
plugin = ks_loading.get_plugin_loader(name)
add_options(opts, ks_loading.get_auth_plugin_conf_options(plugin))
add_options(opts, ks_loading.get_session_conf_options())
if service_type:
adapter_opts = ks_loading.get_adapter_conf_options(
include_deprecated=False)
# adding defaults for valid interfaces
cfg.set_defaults(adapter_opts, service_type=service_type,
valid_interfaces=DEFAULT_VALID_INTERFACES)
add_options(opts, adapter_opts)
opts.sort(key=lambda x: x.name)
return opts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register_auth_opts(conf, group, service_type=None):\n ks_loading.register_session_conf_options(conf, group)\n ks_loading.register_auth_conf_options(conf, group)\n CONF.set_default('auth_type', default='password', group=group)\n ks_loading.register_adapter_conf_options(conf, group)\n conf.set_default('valid_interfaces', DEFAULT_VALID_INTERFACES, group=group)\n if service_type:\n conf.set_default('service_type', service_type, group=group)\n else:\n types = os_service_types.get_service_types()\n key = 'ironic-inspector' if group == 'inspector' else group\n service_types = types.service_types_by_project.get(key)\n if service_types:\n conf.set_default('service_type', service_types[0], group=group)",
"def update_auth_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if not rconfig.has_section(\"auth\"):\n rconfig.add_section(\"auth\")\n if env.get(\"NEW_USER_ROLE\").lower() != \"member\":\n tempest_roles = []\n if rconfig.has_option(\"auth\", \"tempest_roles\"):\n tempest_roles = functest_utils.convert_ini_to_list(\n rconfig.get(\"auth\", \"tempest_roles\"))\n rconfig.set(\n 'auth', 'tempest_roles',\n functest_utils.convert_list_to_ini(\n [env.get(\"NEW_USER_ROLE\")] + tempest_roles))\n if not json.loads(env.get(\"USE_DYNAMIC_CREDENTIALS\").lower()):\n rconfig.set('auth', 'use_dynamic_credentials', False)\n account_file = os.path.join(\n getattr(config.CONF, 'dir_functest_data'), 'accounts.yaml')\n assert os.path.exists(\n account_file), f\"{account_file} doesn't exist\"\n rconfig.set('auth', 'test_accounts_file', account_file)\n if env.get('NO_TENANT_NETWORK').lower() == 'true':\n rconfig.set('auth', 'create_isolated_networks', False)\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def override_config(self):\n super(AuthedConfigFixture, self).override_config()\n self.conf.register_opts(auth_token._OPTS, group='keystone_authtoken')\n self.conf.set_override('auth_uri', 'http://127.0.0.1:35357',\n group='keystone_authtoken')",
"def register(self):\n group = cfg.OptGroup(self.group_name, title='Azure Options')\n self._config.register_group(group)\n self._config.register_opts(self._options, group=group)",
"def configure(self, options, conf):",
"def configure(self, options, conf):\n pass",
"def configure_auth(self, auth_type, ha_type):\n yield self.configure_kerberos(auth_type, ha_type)\n self.configure_radius(auth_type)",
"def add_options(_config):\n settings = [\n [\"cache_worker\", bool, lambda x: x in [True, False], False, False],\n [\n \"kube_deployment\",\n str,\n lambda x: x in [\"pod\", \"container\", \"file\", \"call\"],\n False,\n \"pod\",\n ],\n [\n \"kube_version\",\n str,\n lambda _: [\"v1.27.0\", \"v1.26.0\", \"v1.25.0\", \"v1.24.0\", \"v1.23.0\"],\n False,\n \"v1.27.0\",\n ],\n ]\n return settings",
"def configure_who_defaults(config):\n settings = config.registry.settings\n BACKENDAUTH_DEFAULTS = {\n \"use\": \"mozsvc.user.whoauth:BackendAuthPlugin\"\n }\n for key, value in BACKENDAUTH_DEFAULTS.iteritems():\n settings.setdefault(\"who.plugin.backend.\" + key, value)\n BASICAUTH_DEFAULTS = {\n \"use\": \"repoze.who.plugins.basicauth:make_plugin\",\n \"realm\": \"Sync\",\n }\n for key, value in BASICAUTH_DEFAULTS.iteritems():\n settings.setdefault(\"who.plugin.basicauth.\" + key, value)\n MACAUTH_DEFAULTS = {\n \"use\": \"mozsvc.user.whoauth:SagradaMACAuthPlugin\",\n }\n for key, value in MACAUTH_DEFAULTS.iteritems():\n settings.setdefault(\"who.plugin.macauth.\" + key, value)\n # If there is an auth backend, enable basicauth by default.\n # Enable macauth by default regardless, since it doesn't need a backend.\n if config.registry.get(\"auth\") is not None:\n settings.setdefault(\"who.authenticators.plugins\", \"backend macauth\")\n settings.setdefault(\"who.identifiers.plugins\", \"basicauth macauth\")\n settings.setdefault(\"who.challengers.plugins\", \"basicauth macauth\")\n else:\n settings.setdefault(\"who.authenticators.plugins\", \"macauth\")\n settings.setdefault(\"who.identifiers.plugins\", \"macauth\")\n settings.setdefault(\"who.challengers.plugins\", \"macauth\")",
"def add_virtual_authenticator(self, config):\n pass",
"def auth_config(self):\n\t\treturn {\n\t\t\t'login_url': self.uri_for('login'),\n\t\t\t'logout_url': self.uri_for('logout')\n\t\t}",
"def register_opts(self, conf):\n config.register_opt_group(conf, project_config.service_available_group,\n project_config.ServiceAvailableGroup)\n\n config.register_opt_group(conf, project_config.placement_group,\n project_config.PlacementGroup)\n\n config.register_opt_group(conf, project_config.valet_group,\n project_config.opt_valet)",
"def register_opts():\n _register_api_opts()\n _register_db_opts()",
"def add_options(self, options):\n self.options = merge_dicts(self.options, options)",
"def _add_cred_variables(self):\n self.credentialKey = {}\n authInfo = None\n if self.client:\n try:\n authInfo = self.client.getAuthenticatorInfo()\n except VersionMethodError:\n pass\n authArgOpts = dict(help=\"authentication plugin\")\n if authInfo:\n self.authenticatorInfo = AuthenticatorInfo(authInfo)\n authArgOpts['choices'] = self.authenticatorInfo.getAuthNames()\n else:\n self.authenticatorInfo = LegacyAuthenticatorInfo()\n\n var = self.add_variable('auth', (\"-a\", \"--auth\"), authArgOpts,\n envvar='ICAT_AUTH')\n var.postprocess = _post_auth\n for key in self.authenticatorInfo.getCredentialKeys(hide=False):\n self._add_credential_key(key)\n hidden = self.authenticatorInfo.getCredentialKeys(hide=True)\n if hidden:\n var = self.add_variable('promptPass', (\"-P\", \"--prompt-pass\"), \n dict(help=\"prompt for the password\", \n action='store_const', const=True), \n type=boolean, default=False)\n var.postprocess = _post_promptPass\n for key in hidden:\n self._add_credential_key(key, hide=True)",
"def auth_config(self):\n return {\n 'login_url': self.uri_for('login'),\n 'logout_url': self.uri_for('logout')\n }",
"def auth_config(self) -> 'outputs.AuthConfigResponse':\n return pulumi.get(self, \"auth_config\")",
"def add_base_options(self):\n for option in self.base_options:\n self.add_option(*option.get_option_names(), **option.kwargs)",
"def sufficient_options(self):\n has_token = self.opts.get('token')\n has_project_domain_or_tenant = (self.opts.get('project_id') or\n (self.opts.get('project_name') and\n (self.opts.get('user_domain_name') or\n self.opts.get('user_domain_id'))) or\n (self.opts.get('tenant_id') or\n self.opts.get('tenant_name')))\n has_credential = (self.opts.get('username')\n and has_project_domain_or_tenant\n and self.opts.get('password')\n and self.opts.get('auth_url'))\n missing = not (has_token or has_credential)\n if missing:\n missing_opts = []\n opts = ['token', 'endpoint', 'username', 'password', 'auth_url',\n 'tenant_id', 'tenant_name']\n for opt in opts:\n if not self.opts.get(opt):\n missing_opts.append(opt)\n raise exceptions.AuthPluginOptionsMissing(missing_opts)",
"def configure_extra(context):\n portal_url = getToolByName(context, 'portal_url')\n pm = getToolByName(context, 'portal_membership')\n portal = portal_url.getPortalObject()\n security = ISecuritySchema(portal)\n\n if not security.enable_self_reg:\n security.enable_self_reg = True\n\n if not security.enable_user_pwd_choice:\n security.enable_user_pwd_choice = True\n\n if not security.enable_user_folders:\n security.enable_user_folders = True\n pm.memberarea_type = 'MemberFolder'",
"def _setup_threat_intel_auth_subparser(subparsers):\n generate_subparser(\n subparsers,\n 'update-auth',\n description='Enable, disable, or configure the threat intel downloader function',\n subcommand=True\n )",
"def includeme(config):\n # Grab the pyramid-wide settings, to look for any auth config.\n settings = config.get_settings().copy()\n # Use the settings to construct an AuthenticationPolicy.\n authn_policy = SRPAuthenticationPolicy.from_settings(settings)\n config.set_authentication_policy(authn_policy)\n # Hook up a default AuthorizationPolicy.\n # You can't have one without the other, and ACLAuthorizationPolicy is\n # usually what you want. If the app configures one explicitly then this\n # will get overridden.\n authz_policy = ACLAuthorizationPolicy()\n config.set_authorization_policy(authz_policy)\n # Add forbidden view to challenge for auth credentials.\n config.add_view(authn_policy.challenge_view,\n context=\"pyramid.exceptions.Forbidden\")",
"def configure(self):\n configurations = config.Configurations()\n self.credentials = configurations.credentials\n self.config = configurations.config",
"def configure(config):\n\n\tif config.option(\"Configure key logging\", False):\n\t\tconfig.add_section(\"keylogs\")\n\t\tconfig.interactive_add(\n\t\t\t\"keylogs\", \"dir\",\n\t\t\t\"Absolure path to key log storage directory\",\n\t\t\tdefault = os.path.join(\"~\", \"keylogs\")\n\t\t)",
"def add_config(self):\n\n config = {\n 'invert_byte': InvertByte,\n 'invert_word': InvertWord,\n 'invert_double_word': InvertDoubleWord,\n 'and_byte': AndByte,\n 'and_word': AndWord,\n 'and_double_word': AndDoubleWord,\n 'or_byte': OrByte,\n 'or_word': OrWord,\n 'or_double_word': OrDoubleWord,\n 'exclusive_or_byte': ExclusiveOrByte,\n 'exclusive_or_word': ExclusiveOrWord,\n 'exclusive_or_double_word': ExclusiveOrDoubleWord\n }\n\n return config",
"def add_options(self):\n self.add_option_save()\n self.add_option_enable()",
"def get_external_opts_configs(cls):\n return [\n ExternalOptConfig(\n name=\"auth_uri\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ExternalOptConfig(\n name=\"admin_user\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ExternalOptConfig(\n name=\"admin_password\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ExternalOptConfig(\n name=\"admin_tenant_name\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ]",
"def inject_args_in_config(args, config):\n log = logging.getLogger(__name__)\n\n for t_opt in list(args._options.values()):\n n = t_opt.name\n first_ = n.find('_')\n if first_ > 0:\n s, o = n[:first_], n[first_ + 1:]\n v = t_opt.value()\n log.info('inject argument {} = {} in configuration section {}, option {}'.format(n, v, s, o))\n if not config.has_section(s):\n config.add_section(s)\n config.set(s, o, v)\n return config",
"def __init__(self, **user_options):\n self.options = config.default_options.copy()\n self.configure(**user_options)",
"def register_options(cls, register):"
]
| [
"0.667338",
"0.64628184",
"0.6089287",
"0.58291835",
"0.57456213",
"0.5612679",
"0.56000763",
"0.5594233",
"0.5493821",
"0.5414338",
"0.54052657",
"0.5396743",
"0.5367182",
"0.52925545",
"0.52918124",
"0.52757716",
"0.52467024",
"0.52238643",
"0.51919407",
"0.518778",
"0.51851726",
"0.5175143",
"0.5157376",
"0.5153056",
"0.5153014",
"0.51280624",
"0.5121467",
"0.51095206",
"0.5087794",
"0.50760156"
]
| 0.6891298 | 0 |
Toggles the infomail field in Onlineuser object | def toggle_infomail(request):
if request.is_ajax():
if request.method == 'POST':
request.user.infomail = not request.user.infomail
request.user.save()
return HttpResponse(status=200, content=json.dumps({'state': request.user.infomail}))
raise Http404 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def toggle_jobmail(request):\n if request.is_ajax():\n if request.method == 'POST':\n request.user.jobmail = not request.user.jobmail\n request.user.save()\n\n return HttpResponse(status=200, content=json.dumps({'state': request.user.jobmail}))\n raise Http404",
"def toggle(self, *_):\r\n \r\n global ac\r\n if self.author_f_ent.var.get():\r\n self.add_a['state'] = 'normal'\r\n elif self.author_m_ent.var.get():\r\n self.add_a['state'] = 'normal'\r\n elif self.author_l_ent.var.get():\r\n self.add_a['state'] = 'normal'\r\n else:\r\n self.add_a['state'] = 'disabled'",
"def toggle_active(self, user):\n user.active = not user.active\n return True",
"def toggle_active(self, user):\n user.active = not user.active\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True",
"def toggle_in_room(self,new_bool):\n self.in_room = new_bool",
"def toggle_nominee_flag(nominee, field):\n n = biv.load_obj(nominee)\n assert type(n) == pem.E15Nominee\n assert hasattr(n, field), \\\n '{}: has no attr {}'.format(n, field)\n v = getattr(n, field)\n assert type(v) == bool, \\\n '{}.{}: is not boolean {}'.format(n, field, type(n.field))\n setattr(n, field, not v)\n _add_model(n)",
"async def toggle(self, ctx):\n guild = ctx.message.guild\n\n enabled = await self.config.guild(guild).enabled()\n\n enabled = not enabled\n await self.config.guild(guild).enabled.set(enabled)\n\n if enabled is True:\n await ctx.send(\"AntiSpam has been enabled\")\n else:\n await ctx.send(\"AntiSpam has been disabled\")",
"async def async_toggle_informationcard(self):\n await self.local_meural.send_key_up()",
"def toggle_pick_upable(self,new_bool):\n self.pick_upable = new_bool",
"def is_online(tnu):\n if ship_tnm(tnu, taunet.TauNetMessage().test()):\n taunet.users.by_name(tnu.name).is_on = True\n return True\n taunet.users.by_name(tnu.name).is_on = False\n return False",
"def send_user_invitation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"send_user_invitation\")",
"def send_user_invitation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"send_user_invitation\")",
"def activation_toggle(employee_id):\n\n employee = Employee.objects.get_or_404(id=employee_id)\n employee.active = not employee.active\n employee.date_edited = datetime.utcnow()\n employee.save()\n return jsonify({\n 'msg': 'OK'\n })",
"def set_user_info(self, userInfo: UserInfo):\n self.__isLogin = True\n self.__userInfo = userInfo",
"def get_object(self, queryset=None):\n obj = super(InvitationDetailView, self).get_object(queryset)\n if self.request.user == obj.receiver:\n obj.read = True\n obj.save()\n return obj",
"def toggle_autorun(self, event):\n self.lnp.toggle_autorun(self.proglist.item(self.proglist.identify(\n 'row', event.x, event.y), 'text'))\n self.update_autorun_list()",
"def toggle(self):\n self._state.is_on = not self._state.is_on\n self.send_command(Command.TOGGLE, [])",
"async def toggle(self, ctx):\r\n serverid = ctx.message.server.id\r\n if self.adkillr[serverid]['toggle'] is True:\r\n self.adkillr[serverid]['toggle'] = False\r\n e = discord.Embed(description='**AntiAdv is now disabled.**')\r\n await self.bot.say(embed=e)\r\n elif self.adkillr[serverid]['toggle'] is False:\r\n self.adkillr[serverid]['toggle'] = True\r\n e = discord.Embed(description='**AntiAdv is now enabled.**')\r\n await self.bot.say(embed=e)\r\n dataIO.save_json(\"data/adkillr/adkillr.json\", self.adkillr)",
"def toggle(self, **kwargs):\n self.on = False if self.on else True",
"def toggle_interested(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n workshop = self.context['workshop']\n\n if workshop in profile.interested_workshops.all():\n workshop.interested_users.remove(profile)\n else:\n workshop.interested_users.add(profile)",
"def toggleai(connection):\n protocol = connection.protocol\n protocol.ai_enabled = not protocol.ai_enabled\n if not protocol.ai_enabled:\n for bot in protocol.bots:\n bot.flush_input()\n state = \"enabled\" if protocol.ai_enabled else \"disabled\"\n protocol.broadcast_chat(\"AI %s!\" % state)\n protocol.irc_say(\"* %s %s AI\" % (connection.name, state))",
"def toggle_unread_only(self):\n was_unread_only = self.show_unread_only\n self.action_show_unread_only.setChecked(not was_unread_only)\n self.action_show_all.setChecked(was_unread_only)",
"def save(self, *args, **kwargs):\n if not self.require_confirm_email:\n User.objects.filter(is_active=False, deactivation_reason=\"pending\").update(\n is_active=True, deactivation_reason=None\n )\n if not self.invite_question_text:\n self.invite_question_text = \"What is your favourite book?\"\n super().save(*args, **kwargs)",
"def action_checkbox(self):\n self.checkbox_online_var = not self.checkbox_online_var",
"def toggle(self, id):\n e = self.objectmanager.objects.get(id=id)\n e.enabled = not e.enabled\n e.save()\n return render({\"id\": id, \"status\": e.enabled})",
"def ref_user_flag(self):\n try:\n ref = User.objects.get(\n associated_emails__email__iexact=self.reference_email,\n associated_emails__is_verified=True)\n return True\n except ObjectDoesNotExist:\n return False",
"def togglePopup(userid, args):\r\n player = players[userid]\r\n player['popup'] = 1 - player['popup']\r\n \r\n \"\"\" Tell them the message \"\"\"\r\n tokens = {}\r\n tokens['status'] = {1:\"on\", 0:\"off\"}[player['popup']]\r\n tell(userid, 'toggle popup', tokens)",
"def step_impl_the_msg_to_is_set_to_internal_specific_user(context):\n step_impl_the_msg_to_is_set_to(context, context.bdd_helper.internal_id_specific_user)",
"def send_user_invitation(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"send_user_invitation\")",
"def confirm_email(self):\n self.active = True\n self.save()"
]
| [
"0.62338006",
"0.5970438",
"0.5821275",
"0.5793889",
"0.56367135",
"0.56040823",
"0.54553586",
"0.5432163",
"0.53502816",
"0.5339276",
"0.53326595",
"0.53326595",
"0.5331724",
"0.5324913",
"0.527716",
"0.5273758",
"0.52581966",
"0.5248645",
"0.5247283",
"0.5236965",
"0.52249074",
"0.52165955",
"0.5194507",
"0.51844054",
"0.518231",
"0.5180696",
"0.51547766",
"0.5140465",
"0.5130915",
"0.51306623"
]
| 0.8057394 | 0 |
Toggles the jobmail field in Onlineuser object | def toggle_jobmail(request):
if request.is_ajax():
if request.method == 'POST':
request.user.jobmail = not request.user.jobmail
request.user.save()
return HttpResponse(status=200, content=json.dumps({'state': request.user.jobmail}))
raise Http404 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def toggle_infomail(request):\n if request.is_ajax():\n if request.method == 'POST':\n request.user.infomail = not request.user.infomail\n request.user.save()\n\n return HttpResponse(status=200, content=json.dumps({'state': request.user.infomail}))\n raise Http404",
"def confirm_email(self):\n self.active = True\n self.save()",
"def toggle_active(self, user):\n user.active = not user.active\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True",
"def toggle_active(self, user):\n user.active = not user.active\n return True",
"def change_email(self, token):\n app = current_app._get_current_object()\n serializer = Serializer(app.config[\"SECRET_KEY\"])\n try:\n data = serializer.loads(token.encode(\"utf-8\"))\n except:\n return False\n if data.get(\"user_id\") != self.id:\n return False\n new_email = data.get(\"new_email\")\n if new_email is None:\n return False\n # check to see if another user has this email\n if self.query.filter_by(email=new_email).first() is not None:\n return False\n self.email = data.get(\"new_email\")\n db.session.add(self)\n return True",
"def test_mod_email(self, mapp, existing_user_id, url_of_liveserver):\n mapp.logoff()\n mapp.login(user=existing_user_id, password=\"1234\")\n email_address = existing_user_id + '_' + str(id(self)) + \"@devpi.net\"\n mapp.modify_user(user=existing_user_id, email=email_address)\n # Verify that the email was indeed changed.\n json = mapp.getjson(url_of_liveserver)\n assert json['result'][existing_user_id]['email'] == email_address",
"def home_edituser():\n\tpass",
"def change_email(self, email):\n self.active = False\n self.other_email = email\n self.key = EmailManager.generate_key()\n self.save()\n\n send_change_email(self, email)\n return self.key",
"def set_receive_mail(self):\n self.__mail = True",
"async def change_email(self, new_email, password):\n data = {\"password\": password, \"emailAddress\": new_email}\n e = await self.request.request(url='https://accountsettings.roblox.com/v1/email', method='post', data=data)\n return e",
"def change_email(user):\n if 'research' in user.get_domains():\n domain = 'research'\n else: domain = 'academic'\n subject = \"ECE/CIS Account - Login Changed\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n\n message = \"A request has been made to change the login information for your ECE/CIS %s account: %s.\\n\" % (domain, user.username)\n message = \"This request may have been a change to any of the following: password, login shell, or GECOS information\\n\\n\"\n message += \"If you did not make this request or feel this messag was sent in error, \\n\"\n message += \"please contact ECE/CIS Labstaff immediately at: %s\\n\\n\" % helprequest\n message += \"If you are unable to log into your account, you my post a ticket as an outsider\\n\\n\"\n message += \"-- EE/CIS Labstaff\\n\"\n\n send('[email protected]', 'ECE/CIS Account System', \\\n [user.email], subject, message, MAILHOST)",
"def email_selected(self, request, queryset):\n pass",
"def save(self, *args, **kwargs):\n if not self.require_confirm_email:\n User.objects.filter(is_active=False, deactivation_reason=\"pending\").update(\n is_active=True, deactivation_reason=None\n )\n if not self.invite_question_text:\n self.invite_question_text = \"What is your favourite book?\"\n super().save(*args, **kwargs)",
"def action_checkbox(self):\n self.checkbox_online_var = not self.checkbox_online_var",
"def save(self, commit=False):\n mail_result = self.send_email()\n if mail_result:\n self.instance.is_admin_notified = True\n\n contact = super().save(commit=commit)\n\n return contact",
"def toggle_in_room(self,new_bool):\n self.in_room = new_bool",
"def post(self, request, *args, **kwargs):\n usuario=Usuario.objects.get(id=self.kwargs['pk'])\n if request.POST[\"esta_aprobado\"] == 'True':\n CorreoMail(\"Aprobado\",\"Usted fue apobado en el sistema, bienvenido!!\",usuario.user.email )\n return super(ActualizarUser, self).post(request, **kwargs)",
"def activation_toggle(employee_id):\n\n employee = Employee.objects.get_or_404(id=employee_id)\n employee.active = not employee.active\n employee.date_edited = datetime.utcnow()\n employee.save()\n return jsonify({\n 'msg': 'OK'\n })",
"def toggle(self, *_):\r\n \r\n global ac\r\n if self.author_f_ent.var.get():\r\n self.add_a['state'] = 'normal'\r\n elif self.author_m_ent.var.get():\r\n self.add_a['state'] = 'normal'\r\n elif self.author_l_ent.var.get():\r\n self.add_a['state'] = 'normal'\r\n else:\r\n self.add_a['state'] = 'disabled'",
"def confirm_email(self, request, email_address):\n email_address.verified = True\n email_address.set_as_primary(conditional=True)\n email_address.save()\n\n u = get_user_model().objects.get(pk=email_address.user.id)\n u.is_active = True\n u.save()",
"def change_email(self, new_email):\n self.email = new_email\n print(f\"Email for {self.name} has been updated!\")\n return self.email",
"def save(self, *args, **kwargs):\r\n\r\n\t\t# if self.has_django_dashboard_access is True:\r\n\t\t# self.is_staff = True\r\n\t\tsuper(User, self).save(*args, **kwargs)",
"def set_On(self):\n if not(self._locked):\n self.__dict__['statusOn']=True\n self._do_action()\n else:\n self._log.info('The JobProperty %s is blocked', self.__name__)",
"def mark_email(args):\n cache.get_default().set_email(args.address, args.is_valid)\n print('{!r:} marked as {:s}valid.'.format(args.address, '' if args.is_valid else 'in'))",
"def reinvite_user(self, user, email):\n if self.is_moderator and self.has_perm('accounts.invite_user'):\n # Reset email, set a new token and update decision datetime\n user.email = email\n user.auth_token = generate_unique_id()\n user.decision_datetime = timezone.now()\n user.save()\n\n return user\n\n else:\n raise PermissionDenied",
"def login_on_activation(sender, user, request, **kwargs):\n user.backend = 'storybase_user.auth.backends.EmailModelBackend'\n login(request, user)",
"def __str__(self):\n return self.user.email",
"def change_email_settings(request):\r\n user = request.user\r\n\r\n course_id = request.POST.get(\"course_id\")\r\n course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n receive_emails = request.POST.get(\"receive_emails\")\r\n if receive_emails:\r\n optout_object = Optout.objects.filter(user=user, course_id=course_key)\r\n if optout_object:\r\n optout_object.delete()\r\n log.info(u\"User {0} ({1}) opted in to receive emails from course {2}\".format(user.username, user.email, course_id))\r\n track.views.server_track(request, \"change-email-settings\", {\"receive_emails\": \"yes\", \"course\": course_id}, page='dashboard')\r\n else:\r\n Optout.objects.get_or_create(user=user, course_id=course_key)\r\n log.info(u\"User {0} ({1}) opted out of receiving emails from course {2}\".format(user.username, user.email, course_id))\r\n track.views.server_track(request, \"change-email-settings\", {\"receive_emails\": \"no\", \"course\": course_id}, page='dashboard')\r\n\r\n return JsonResponse({\"success\": True})",
"def set_as_type_user(self):\n self.type = MessageTypes.USER",
"async def toggle(self, ctx):\n guild = ctx.message.guild\n\n enabled = await self.config.guild(guild).enabled()\n\n enabled = not enabled\n await self.config.guild(guild).enabled.set(enabled)\n\n if enabled is True:\n await ctx.send(\"AntiSpam has been enabled\")\n else:\n await ctx.send(\"AntiSpam has been disabled\")"
]
| [
"0.6270449",
"0.5669173",
"0.5585548",
"0.555343",
"0.55495137",
"0.55313456",
"0.55053437",
"0.5502035",
"0.54168755",
"0.53954124",
"0.5379221",
"0.53549236",
"0.5352222",
"0.5332456",
"0.5301001",
"0.52917707",
"0.52809656",
"0.5271293",
"0.52669114",
"0.5261871",
"0.5240435",
"0.5223935",
"0.52157617",
"0.51782227",
"0.5163422",
"0.515555",
"0.5142799",
"0.5120211",
"0.5106119",
"0.5090824"
]
| 0.81715626 | 0 |
The difference between plain_user_search and the other is exposing only id and name. | def api_plain_user_search(request):
if request.GET.get('query'):
users = search_for_plain_users(request.GET.get('query'))
return JsonResponse(users, safe=False)
return render_json(error=u'Mangler søkestreng') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def user_search_partial():\n username = request.args.get('search') or ''\n\n ret = []\n for user in User.query.filter(User.name.ilike(username + \"%\")):\n ret.append({\n \"id\": user.id,\n \"name\": user.name\n })\n return json.dumps(ret)",
"def search_user(request: Request) -> Response:\n if not request.query_params.get('query'):\n return Response({'type': 'error', 'data': {'message': 'Invalid username query'}})\n\n users = User.objects.filter(\n username__contains=request.query_params.get('query'))\n return Response(UserSerializer(instance=users, many=True).data)",
"def search_user(message, search):\n found = []\n search = search.lower()\n users = hf.get_users()\n for user in users:\n if search in user['name'].lower():\n found.append('{} ({})'.format(user['name'], user[\"id\"]))\n if len(found) == 0:\n message.reply('No user found by that key: {}.'.format(search))\n return\n message.reply('Users found: {}'.format(', '.join(found)))",
"def get(self, request, search_string=None):\n query = SearchQuery(search_string)\n\n username_vector = SearchVector('username', weight='A')\n first_name_vector = SearchVector('first_name', weight='B')\n last_name_vector = SearchVector('last_name', weight='B')\n email_vector = SearchVector('email', weight='B')\n vectors = username_vector + first_name_vector + last_name_vector + email_vector\n qs = User.objects\n qs = qs.annotate(search=vectors).filter(search=query)\n qs = qs.annotate(rank=SearchRank(vectors, query)).order_by('-rank')\n print(qs)\n return Response(UserSerializer(qs, many=True).data)",
"def search_user(user, conditions=[],fields=[], filters={}):\n return db((db.auth_user.first_name.like(user+'%')),*conditions).select(*fields,**filters)",
"def search_user_by_id(self,id, cursor):\n sql = \"SELECT * FROM users WHERE userid = %s\"\n cursor.execute(sql, (id,))\n return cursor",
"def resolve_users(self, info, **kwargs):\n user = info.context.user # I dont want to search myself\n username = kwargs.get('username', \"\")\n return CustomUser.objects.filter(username__contains=username).exclude(username=user.username)",
"def test_iodi_get_user_search(self):\n\n # IODI a new `User` record.\n user_id = self.dal.iodi_user(\n auth0_user_id=\"auth0|12345678\",\n email=\"[email protected]\",\n )\n\n # IODI a new `Search` record.\n search_id = self.dal.iodu_search(\n search_uuid=uuid.uuid4(),\n title=\"Search Title\",\n gender=GenderType.FEMALE,\n year_beg=2013,\n year_end=2023,\n age_beg=10,\n age_end=30,\n )\n\n # IODI a new `UserSearch` record.\n obj_id = self.dal.iodi_user_search(\n user_id=user_id,\n search_id=search_id,\n )\n\n self.assertEqual(obj_id, 1)\n\n # Retrieve the new record.\n obj = self.dal.get(UserSearch, obj_id) # type: UserSearch\n\n # Assert that the different fields of the record match.\n self.assertEqual(obj.user_search_id, 1)\n self.assertEqual(obj.user_id, user_id)\n self.assertEqual(obj.search_id, search_id)",
"def name_search(self,cr,uid,name='',args=[],operator='ilike',context=None,limit=80):\n if context is None: \n context={}\n ids= []\n if len(name) >= 2:\n ids = self.search(cr, uid, [('vat',operator,name)] + args, limit=limit, context=context)\n if not ids:\n ids = self.search(cr,uid,[('name',operator,name)] + args, limit=limit, context=context)\n return self.name_get(cr,uid,ids,context=context)",
"def test_list_users_with_search(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.get(reverse('user-list') + '?search=chuck')\n self.assertEqual(json.loads(response.content)['count'], 1)\n\n # Users are ordered alphabetically by email\n first_user = json.loads(response.content)['results'][0]\n self.assertEqual(first_user['email'], self.admin.email)\n\n # Check the system doesn't return attributes not expected\n attributes = [\n 'id',\n 'url',\n 'email',\n 'first_name',\n 'last_name',\n 'is_active',\n 'phone',\n 'other_phone',\n 'is_superuser',\n 'is_staff',\n 'university',\n 'last_login',\n 'date_joined',\n 'academic_level',\n 'academic_field',\n 'gender',\n 'language',\n 'birthdate',\n 'groups',\n 'user_permissions',\n 'tickets',\n 'membership',\n 'membership_end',\n 'city',\n 'personnal_restrictions',\n 'academic_program_code',\n 'faculty',\n 'student_number',\n 'volunteer_for_workplace',\n 'hide_newsletter',\n 'is_in_newsletter',\n 'number_of_free_virtual_retreat',\n 'membership_end_notification',\n 'get_number_of_past_tomatoes',\n 'get_number_of_future_tomatoes',\n 'last_acceptation_terms_and_conditions',\n 'tomato_field_matrix',\n 'current_month_tomatoes',\n ]\n for key in first_user.keys():\n self.assertTrue(\n key in attributes,\n 'Attribute \"{0}\" is not expected but is '\n 'returned by the system.'.format(key)\n )\n attributes.remove(key)\n\n # Ensure the system returns all expected attributes\n self.assertTrue(\n len(attributes) == 0,\n 'The system failed to return some '\n 'attributes : {0}'.format(attributes)\n )\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_12_admin_user_search(self):\r\n # Create two users\r\n self.register()\r\n self.signout()\r\n self.register(fullname=\"Juan Jose\", name=\"juan\",\r\n email=\"[email protected]\", password=\"juan\")\r\n self.signout()\r\n # Signin with admin user\r\n self.signin()\r\n data = {'user': 'juan'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n assert \"Juan Jose\" in res.data, \"username should be searchable\"\r\n # Check with uppercase\r\n data = {'user': 'JUAN'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n err_msg = \"username search should be case insensitive\"\r\n assert \"Juan Jose\" in res.data, err_msg\r\n # Search fullname\r\n data = {'user': 'Jose'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n assert \"Juan Jose\" in res.data, \"fullname should be searchable\"\r\n # Check with uppercase\r\n data = {'user': 'JOsE'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n err_msg = \"fullname search should be case insensitive\"\r\n assert \"Juan Jose\" in res.data, err_msg\r\n # Warning should be issued for non-found users\r\n data = {'user': 'nothingExists'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n warning = (\"We didn't find a user matching your query: <strong>%s</strong>\" %\r\n data['user'])\r\n err_msg = \"A flash message should be returned for non-found users\"\r\n assert warning in res.data, err_msg",
"def search(self, search_query: str, limit: int = 25) -> List[User]:\n if len(search_query) < 3:\n return SpeckleException(\n message=\"User search query must be at least 3 characters\"\n )\n\n query = gql(\n \"\"\"\n query UserSearch($search_query: String!, $limit: Int!) {\n userSearch(query: $search_query, limit: $limit) {\n items {\n id\n name\n bio\n company\n avatar\n verified\n }\n }\n }\n \"\"\"\n )\n params = {\"search_query\": search_query, \"limit\": limit}\n\n return self.make_request(\n query=query, params=params, return_type=[\"userSearch\", \"items\"]\n )",
"def search(cls, identifient, by=\"id\", type_retrun=\"object\"):\n if by == \"id\":\n sql_query = \"SELECT * FROM user WHERE id_user=?\"\n elif by == \"username\":\n sql_query = \"SELECT * FROM user WHERE username=?\"\n elif by == \"email\":\n sql_query = \"SELECT * FROM user WHERE email=?\"\n\n db = database.db_connection()\n cursor = db.cursor()\n cursor.execute(sql_query, (identifient, ))\n row = cursor.fetchone()\n cursor.close()\n\n # we check what return type is expected...\n if type_retrun == \"object\":\n user_dict = User.__row_to_dict(row)\n return User(**user_dict)\n elif type_retrun == \"dict\":\n return User.__row_to_dict(row)\n elif type_retrun == \"row\":\n return row\n else:\n raise ValueError(\"type_retrun parameter must be: 'object' or 'dict' or 'row'\")",
"def __searchUser(self, args = []):\n\n try:\n if len(args) == 0:\n self.__cm.send(p.T_QUERY, '')\n else:\n self.__cm.send(p.T_QUERY, args)\n\n reply = self.__cm.receive()\n\n if (reply is not None and reply.type == p.T_RESULT):\n [ self.__parseUserRecord(r) for r in reply.payload.split() ] \n self.__agent.printList(self.__userList)\n else:\n raise Exception, \"An error occured while fetching user data! The user list is outdated.\"\n \n except Exception, e:\n self.__handleError('List', e)",
"def search(user, param):\r\n if len(param) <= 2:\r\n return bad_request(error_messages['too_short'])\r\n return search_user(param.lower(), user)",
"def find_user(self, *args, **kwargs):\n raise NotImplementedError",
"def get_queryset(self):\n return filterUsersByName( self.request.query_params.get('username', None) )",
"def list_users():\n\n search = request.args.get('q')\n\n if not search:\n users = User.query.all()\n else:\n users = User.query.filter(User.username.like(f\"%{search}%\")).all()\n\n return render_template('users/index.html', users=users)",
"def search_user(message, search):\n found = []\n search = search.lower()\n for userid, user in iteritems(message._client.users):\n if search in user['name'].lower():\n found.append('{} ({})'.format(user['name'], userid))\n if len(found) == 0:\n message.reply('No user found by that key: {}.'.format(search))\n return\n message.reply('Users found: {}'.format(', '.join(found)))",
"def resources_for_index_query(self, search_text, session):\n query = session.query(self.User).order_by(self.User.name)\n if search_text:\n query = query.filter(self.User.name.ilike(\"%%%s%%\" % search_text))\n\n return query",
"def test_name_search(self):\n # A name in the database\n search_string = \"Umut\"\n # Search For Umut\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string})\n search_result = json.loads(resp.content)\n # Check the name field of the result\n self.assertEqual(search_string,search_result[0]['name'],\"It doesn't return the user with the name {}\".format(search_string))",
"def search_by_name(request):\n if 'keyword' in request.GET:\n keyword = request.GET['keyword']\n try: \n identity = Base.objects.get(user=request.user).identity\n except:\n identity = 'AnonymousUser'\n template_var = {\n \"identity\":identity,\n \"keyword\":keyword,\n }\n print identity\n return render_to_response('search/search_by_name.html', template_var,\n context_instance=RequestContext(request))\n\n else:\n if 'data' in request.GET:\n data = json.loads(request.GET['data'])\n keyword = data['keyword']\n\n searchresult = _searchresult_name(request, keyword=keyword)\n template_var = {\n \"searchresult\":searchresult,\n }\n else:\n return render_to_response('search/search_by_namae.html',template_var,\n context_instance=RequestContext(request))\n\n return JsonResponse(template_var)",
"def usersearch(q_user, page=0, splash=True, identify='forUsername'):\n\n user, _, term = (x.strip() for x in q_user.partition(\"/\"))\n if identify == 'forUsername':\n ret = channelfromname(user)\n if not ret: # Error\n return\n user, channel_id = ret\n\n else:\n channel_id = user\n\n # at this point, we know the channel id associated to a user name\n usersearch_id('/'.join([user, channel_id, term]), page, splash)",
"def queryuser(q, limit=10):\n _, idx1 = idquery.query(q)\n _, idx2 = nicknamequery.query(q)\n idx = list(set(idx1 + idx2))\n if len(idx)>999:\n idx = idx[:999]\n rst = db_session.query(User.id, User.nickname).filter(User.index.in_(idx)).\\\n order_by(User.score.desc(), User.active.asc()).limit(limit).all()\n return [{'id':itm[0], 'name':itm[1]} for itm in rst]",
"def search(self, *args, **kwargs): # real signature unknown\n pass",
"def query_users(request):\r\n if(any(param not in [\"name\",'encryption',\"uid\",\"gid\",\"comment\",\"home\",\"shell\"] for param in request.GET)):\r\n badRequest(\"Parameters incorrect\")\r\n user = User()\r\n return HttpResponse(json.dumps(user.query(request.GET)))",
"def get_user(id):\n pass",
"def search(username):\n if not UserExists(username):\n return \"False\"\n if g.user:\n return render_template('search.html',user=username)\n return redirect(url_for('login'))",
"def search(self, *args, **kwargs):",
"def get(self):\n queries = {\"wildcard_properties\": []}\n\n fullname_query = request.args.get(\"fullName\", None)\n email_query = request.args.get(\"email\", None)\n\n if fullname_query:\n queries[\"fullName\"] = f\"TextP.startingWith('{fullname_query}')\"\n queries[\"wildcard_properties\"].append(\"fullName\")\n if email_query:\n queries[\"fullName\"] = f\"TextP.startingWith('{email_query}')\"\n queries[\"wildcard_properties\"].append(\"email\")\n\n users = User.filter(limit=10, **queries)\n response = UserListSchema(many=True).dumps(users).data\n\n return jsonify_response(json.loads(response), 200)"
]
| [
"0.7153319",
"0.6538987",
"0.63925016",
"0.63028264",
"0.6294686",
"0.6273909",
"0.6267887",
"0.62233007",
"0.61973494",
"0.6170636",
"0.61243373",
"0.61071986",
"0.61001164",
"0.6088135",
"0.6076058",
"0.6064127",
"0.6049083",
"0.6042824",
"0.60098755",
"0.59887075",
"0.59506243",
"0.5940869",
"0.5933154",
"0.5913474",
"0.5798574",
"0.57880825",
"0.57781833",
"0.57701415",
"0.5768102",
"0.57675505"
]
| 0.6853136 | 1 |
Prints every other item in sequence. | def everyotheritem(n):
print(n[1::2])
return(n[1::2]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def every_other(seq):\n every_other = seq[::2]\n return every_other",
"def every_other(seq):\n return seq[::2]",
"def every_other(seq):\n seq = seq[::2]\n return seq",
"def e_seq():\n yield 2;\n for n in count(2, 2):\n yield 1\n yield n\n yield 1",
"def print_evens(n):\n print(f\"Printing Evens below {n}\")\n print(\"-------------------\")\n for i in range(0, n): # n iterations\n if i % 2 == 0:\n print(i)",
"def every_other_new (list):\n return list[::2]",
"def print_even(file_name):\n with open('../test_files/' + file_name, 'r') as infile:\n #initialising 1 to 1 so that it evaluate from line 1\n i = 1\n for x in infile.readlines():\n #performing operation to find the even number entry\n if i%2 == 0:\n #actual printing of lines\n print(x)\n #increamenting\n i+=1",
"def odd_generator(limit):\n current = 1\n while current < limit:\n yield current\n current = current + 2",
"def yield2(l):\n\n l = list(l)\n\n for x in range(0,len(l),2):\n try:\n yield [l[x],l[x+1]]\n except IndexError:\n yield [l[x],None]",
"def other_lines(line):\r\n res = \"\"\r\n for j, i in enumerate(line):\r\n res += i\r\n if j != len(line) - 1:\r\n res += '|'\r\n print(res)",
"def print_board(b):\n print(\" A|B|C\")\n for i,r in enumerate(b):\n print(\"{}\".format(i+1),\"|\".join(r))\n if i<2:\n print(\"---+-+-\")",
"def print(self,n):\r\n c = 0\r\n for i in n:\r\n for j in i:\r\n if c == 9:\r\n print()\r\n c = 0\r\n c = c+1\r\n print(j, end=\" \")",
"def print_m(seq1, seq2, m):\n seq1 = '-' + seq1; seq2 = '-' + seq2\n print()\n print(' '.join(['%3s' % i for i in ' '+seq2]))\n for i, p in enumerate(seq1):\n line = [p] + [m[i][j] for j in range(len(seq2))]\n print(' '.join(['%3s' % i for i in line]))\n print()\n return",
"def number_print(items: iter) -> None:\n for number, option in enumerate(items, 1):\n print(f\"[{number}] {option}\")",
"def remove_every_other_item(seq):\n seq_copy = seq [0::2]\n return seq_copy",
"def remove_every_other(seq):\n # Make a copy of the original sequence and step by 2\n new_seq = seq[::2]\n\n return new_seq",
"def printstringtp2(xs): #Printing function\n for x in range(xs+1): #Outer loop for line iteration\n print(\"\\n\")\n for y in range(x):\n print(y,end=' ')",
"def for_loop_print_evens():\n for counter in range(2, 11, 2):\n print(counter)\n\n print(\"Goodbye!\")",
"def print(self):\n current = self.head.next\n for i in range(0,self.count):\n print(current.item)\n current = current.next",
"def sequential_print_statements():\n pass",
"def print_block():\n do_twice(do_block)\n do_twice(do_block)\n print_column()",
"def imprimir_opciones(lista):\n for i, elem in enumerate(lista):\n print(\"{}. {}\".format(i + 1, elem))",
"def odd_only(CC):\n if CC % 2 == 0 & CC > 0:\n CC -= 1\n if CC < 0:\n CC = 0\n while CC % 2 == 1:\n yield CC\n CC -= 2\n if CC == -1:\n break",
"def entiers_2(i: int, j: int) -> None:\n\n if i > j:\n raise ValueError(\"i must be less than or equal to j\")\n print('-'.join([str(i) for i in range(i, j)]))",
"def displaySides(self):\n\n for item in range(self.number_of_sides):\n print(f\"Side {item + 1} is {self.sides[item]}\")",
"def double_even_pos(self):\n after_double = [i*2 if i*2 < 9 else i * 2 - 9 for i in self.card_num[::-2]]\n\n return after_double + self.card_num[::2]",
"def print_block():\n do_twice(do_block)\n print_column()",
"def prow(x, y=2):\n for i in range(y):\n print(\"+\", \"- \" * x, end=\"\")\n print(\"+\")",
"def remove_every_other(lst):\n return [ea for ea in lst if lst.index(ea) % 2 == 0 ]",
"def print_twice(pipe, ofile, last_line):\n\n # Utility subroutine to print listing data both to stdout\n # and to the listing file, accessed via the ofile handle\n lastlineempty = False # JPG addition here as opposed to argument\n last_dat = b''\n for line in iter(pipe.readline, b''):\n dat = line.rstrip()\n # This IF statement just avoid printing a lot of blank lines\n # at the end of the run, before Python realises that the process\n # has stopped.\n if dat == b'':\n if not lastlineempty:\n print(dat.decode('utf-8'))\n if ofile != None:\n # Write to sortiefile (if requested)\n ofile.write(dat.decode('utf-8')+'\\n')\n # Set to avoid printing multiple consecutive newlines\n lastlineempty = True\n else:\n lastlineempty = False\n print(dat.decode('utf-8'))\n if ofile != None:\n # Write to sortiefile (if requested)\n ofile.write(dat.decode('utf-8')+'\\n')\n last_dat = dat\n\n last_line.append(last_dat)"
]
| [
"0.67592245",
"0.6604033",
"0.6525325",
"0.59878826",
"0.59219635",
"0.59175736",
"0.5881483",
"0.57112664",
"0.57045525",
"0.56148934",
"0.55368537",
"0.5527244",
"0.5519429",
"0.54997087",
"0.5485153",
"0.54672766",
"0.54497844",
"0.54383534",
"0.53987724",
"0.53545326",
"0.5343075",
"0.5334515",
"0.53273994",
"0.53007525",
"0.5281376",
"0.52779126",
"0.5265519",
"0.52541286",
"0.52482545",
"0.5222055"
]
| 0.73784566 | 0 |
Select the fullscale values of the gyroscope from the given provided values | def gyro_scale_selection(self):
GYRO_SCALE = (LSM330_GYRO_DEFAULT | LSM330_GYRO_SCALE_2000)
bus.write_byte_data(LSM330_GYRO_ADDRESS, LSM330_CTRL_REG4_G, GYRO_SCALE) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gyroscope(self):\n raw = self.read_raw()\n # Compensate values depending on the resolution\n factor = 0\n if self._gyro_range == GYRO_RANGE_250DPS:\n factor = _GYRO_SENSITIVITY_250DPS\n elif self._gyro_range == GYRO_RANGE_500DPS:\n factor = _GYRO_SENSITIVITY_500DPS\n elif self._gyro_range == GYRO_RANGE_1000DPS:\n factor = _GYRO_SENSITIVITY_1000DPS\n elif self._gyro_range == GYRO_RANGE_2000DPS:\n factor = _GYRO_SENSITIVITY_2000DPS\n factor *= DEGREE_TO_RAD\n return [x * factor for x in raw]",
"def get_gyro_data(self):\n x = self.read_i2c_word(self.GYRO_XOUT0)\n y = self.read_i2c_word(self.GYRO_YOUT0)\n z = self.read_i2c_word(self.GYRO_ZOUT0)\n\n gyro_scale_modifier = None\n gyro_range = self.read_gyro_range(True)\n\n if gyro_range == self.GYRO_RANGE_250DEG:\n gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_250DEG\n elif gyro_range == self.GYRO_RANGE_500DEG:\n gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_500DEG\n elif gyro_range == self.GYRO_RANGE_1000DEG:\n gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_1000DEG\n elif gyro_range == self.GYRO_RANGE_2000DEG:\n gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_2000DEG\n else:\n print(\"Unkown range - gyro_scale_modifier set to self.GYRO_SCALE_MODIFIER_250DEG\")\n gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_250DEG\n\n x = x / gyro_scale_modifier * self.DEG2RAD\n y = y / gyro_scale_modifier * self.DEG2RAD\n z = z / gyro_scale_modifier * self.DEG2RAD\n\n return [x, y, z]",
"def read_values(self):\n temp, acc, gyro = self.read_ag_data()\n tempc = lsm9ds1.TEMPC_0 + temp * lsm9ds1.TEMP_SENSOR_SCALE\n tempf = (tempc * 9/5) + 32\n acc = [c * lsm9ds1.ACC_SENSOR_SCALE for c in acc]\n gyro = [g * lsm9ds1.DPS_SENSOR_SCALE for g in gyro]\n return tempf, acc, gyro",
"def get_gyro_data(self):\n\t\tx = self.read_i2c_word(self.GYRO_XOUT0)\n\t\ty = self.read_i2c_word(self.GYRO_YOUT0)\n\t\tz = self.read_i2c_word(self.GYRO_ZOUT0)\n\n\t\tgyro_scale_modifier = None\n\t\tgyro_range = self.read_gyro_range(True)\n\n\t\tif gyro_range == self.GYRO_RANGE_250DEG:\n\t\t\tgyro_scale_modifier = self.GYRO_SCALE_MODIFIER_250DEG\n\t\telif gyro_range == self.GYRO_RANGE_500DEG:\n\t\t\tgyro_scale_modifier = self.GYRO_SCALE_MODIFIER_500DEG\n\t\telif gyro_range == self.GYRO_RANGE_1000DEG:\n\t\t\tgyro_scale_modifier = self.GYRO_SCALE_MODIFIER_1000DEG\n\t\telif gyro_range == self.GYRO_RANGE_2000DEG:\n\t\t\tgyro_scale_modifier = self.GYRO_SCALE_MODIFIER_2000DEG\n\t\telse:\n\t\t\tprint(\"Unkown range - gyro_scale_modifier set to self.GYRO_SCALE_MODIFIER_250DEG\")\n\t\t\tgyro_scale_modifier = self.GYRO_SCALE_MODIFIER_250DEG\n\n\t\tx = x / gyro_scale_modifier\n\t\ty = y / gyro_scale_modifier\n\t\tz = z / gyro_scale_modifier\n\n\t\treturn {'x': x, 'y': y, 'z': z}",
"def gyroscope_sensor(axis):\n\n\tsensor_name = \"baseBoard\"\n\treg_addr = 24\n\tdata_len = 56\n\tregist_sensor(sensor_name, reg_addr, data_len)\n\n\t#get sensor data\n\tdata = rospy.wait_for_message('MediumSize/SensorHub/Imu', Imu, 2)\n\tangular_velocity = data.angular_velocity\n\tif axis == \"x\":\n\t\tresult = angular_velocity.x\n\telif axis == \"y\":\n\t\tresult = angular_velocity.y\n\telse:\n\t\tresult = angular_velocity.z\n\n\tdelete_sensor(sensor_name)\n\treturn result",
"def read_gyroscope(self):\n data = self.ag.read_bytes(Register.OUT_X_G, 6)\n return lsm9ds1.to_vector_left_to_right_hand_rule(data)",
"def max_angular_acceleration():",
"def readgyro(self):\r\n\t\tdata0 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_X_L_G)\r\n\t\tdata1 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_X_H_G)\r\n\t\t\r\n\t\txGyro = data1 * 256 + data0\r\n\t\tif xGyro > 32767 :\r\n\t\t\txGyro -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from LSM330_OUT_Y_L_G(0x2A), 2 bytes\r\n\t\tY-Axis Mag LSB, Y-Axis Mag MSB\"\"\"\r\n\t\tdata0 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_Y_L_G)\r\n\t\tdata1 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_Y_H_G)\r\n\t\t\r\n\t\tyGyro = data1 * 256 + data0\r\n\t\tif yGyro > 32767 :\r\n\t\t\tyGyro -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from LSM330_OUT_Z_L_G(0x2C), 2 bytes\r\n\t\tZ-Axis Mag LSB, Z-Axis Mag MSB\"\"\"\r\n\t\tdata0 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_Z_L_G)\r\n\t\tdata1 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_Z_H_G)\r\n\t\t\r\n\t\tzGyro = data1 * 256 + data0\r\n\t\tif zGyro > 32767 :\r\n\t\t\tzGyro -= 65536\r\n\t\t\r\n\t\treturn {'x' : xGyro, 'y' : yGyro, 'z' : zGyro}",
"def getGyroReadings():\n\n gyro_readings = RoboCaller().call(\"getGyroReadings\", \"int\")\n for i in range(len(gyro_readings)):\n gyro_readings[i] = (gyro_readings[i] + 2**15) % 2**16 - 2**15\n return gyro_readings",
"def _get_values(self) -> ty.List[float]:\r\n ...",
"def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray) -> np.ndarray:\r\n transformed_xvalues = np.subtract(xvalues, self.centre)\r\n return self.normalization * np.multiply(\r\n self.rate, np.exp(-1.0 * self.rate * abs(transformed_xvalues))\r\n )",
"def getValues(self):\n return [self.scale_min, self.scale_max]",
"def read_gyro(self):\r\n\t\tdata0 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_X_L)\r\n\t\tdata1 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_X_H)\r\n\t\t\r\n\t\t# Convert the data\r\n\t\txGyro = data1 * 256 + data0\r\n\t\tif xGyro > 32767 :\r\n\t\t\txGyro -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from L3DG20_REG_OUT_Y_L(0x2A), 2 bytes, Y-Axis LSB first\"\"\"\r\n\t\tdata0 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_Y_L)\r\n\t\tdata1 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_Y_H)\r\n\t\t\r\n\t\t# Convert the data\r\n\t\tyGyro = data1 * 256 + data0\r\n\t\tif yGyro > 32767 :\r\n\t\t\tyGyro -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from L3DG20_REG_OUT_Z_L(0x2C), 2 bytes, Z-Axis LSB first\"\"\"\r\n\t\tdata0 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_Z_L)\r\n\t\tdata1 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_Z_H)\r\n\t\t\r\n\t\t# Convert the data\r\n\t\tzGyro = data1 * 256 + data0\r\n\t\tif zGyro > 32767 :\r\n\t\t\tzGyro -= 65536\r\n\t\t\r\n\t\treturn {'x' : xGyro, 'y' : yGyro, 'z' : zGyro}",
"def calibrate_sensors(self):\n for j in range(0, 10):\n self.read_sensors()\n for i in range(0, self.NUM_SENSORS):\n if self.calibratedMax[i] < self.sensorValues[i]:\n self.calibratedMax[i] = self.sensorValues[i]\n if self.calibratedMin[i] > self.sensorValues[i] and self.sensorValues[i] > 30:\n self.calibratedMin[i] = self.sensorValues[i]",
"def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray):\r\n transformed_xvalues = np.subtract(xvalues, self.centre)\r\n return self.normalization * np.multiply(\r\n self.rate, np.exp(-1.0 * self.rate * abs(transformed_xvalues))\r\n )",
"def get_accel_data(self):\n x = self.read_i2c_word(self.ACCEL_XOUT0)\n y = self.read_i2c_word(self.ACCEL_YOUT0)\n z = self.read_i2c_word(self.ACCEL_ZOUT0)\n\n accel_scale_modifier = None\n accel_range = self.read_accel_range(True)\n\n if accel_range == self.ACCEL_RANGE_2G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G\n elif accel_range == self.ACCEL_RANGE_4G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_4G\n elif accel_range == self.ACCEL_RANGE_8G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_8G\n elif accel_range == self.ACCEL_RANGE_16G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_16G\n else:\n print(\"Unkown range - accel_scale_modifier set to self.ACCEL_SCALE_MODIFIER_2G\")\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G\n\n x = x / accel_scale_modifier\n y = y / accel_scale_modifier\n z = z / accel_scale_modifier\n\n x = x * self.GRAVITIY_MS2\n y = y * self.GRAVITIY_MS2\n z = z * self.GRAVITIY_MS2\n return [x, y, z]",
"def read_gyro_range(self, raw = False):\n raw_data = self.bus.read_byte_data(self.address, self.GYRO_CONFIG)\n\n if raw is True:\n return raw_data\n elif raw is False:\n if raw_data == self.GYRO_RANGE_250DEG:\n return 250\n elif raw_data == self.GYRO_RANGE_500DEG:\n return 500\n elif raw_data == self.GYRO_RANGE_1000DEG:\n return 1000\n elif raw_data == self.GYRO_RANGE_2000DEG:\n return 2000\n else:\n return -1",
"def select_arm(self):\n\n # Exploitation\n if random.uniform(0, 1) > self.epsilon:\n return np.argmax(self.values)\n\n # Exploration\n else:\n return random.randrange(len(self.values))",
"def max_linear_acceleration():",
"def read_gyro_range(self, raw = False):\n\t\traw_data = self.bus.read_byte_data(self.address, self.GYRO_CONFIG)\n\n\t\tif raw is True:\n\t\t\treturn raw_data\n\t\telif raw is False:\n\t\t\tif raw_data == self.GYRO_RANGE_250DEG:\n\t\t\t\treturn 250\n\t\t\telif raw_data == self.GYRO_RANGE_500DEG:\n\t\t\t\treturn 500\n\t\t\telif raw_data == self.GYRO_RANGE_1000DEG:\n\t\t\t\treturn 1000\n\t\t\telif raw_data == self.GYRO_RANGE_2000DEG:\n\t\t\t\treturn 2000\n\t\t\telse:\n\t\t\t\treturn -1",
"def set_value(self, device_name, val):\n epics.caput(device_name, val)\n\n\t\t#mu = mu\n\t\t#sig = math.sqrt(abs(mu))\n\t\t#y = (float(x)-mu)/(sig)",
"def get(self) -> list[float]:",
"def gyro(self):\n self._read(False)\n return self._readings.gyro",
"def get_Sample(self, values, probabilities):\r\n return choices(values,probabilities)\r\n # return np.random.choice(values,p=probabilities)\r",
"def query(axis):\n value = (sample[axis] if axis in sample\n else float(candor[axis]) if axis == \"detectorMaskMap\"\n else candor[axis])\n #print(f\"axis {axis} = {value}\")\n return value",
"def sample_mu(self, val) -> None:\n\n # get data\n data = self.data.reshape((1, -1))\n\n # get values\n gain = val.gain\n states = val.states\n mu_flor = val.mu_flor\n mu_flor_mean = val.mu_flor_mean\n mu_flor_shape = val.mu_flor_shape\n mu_back = val.mu_back\n mu_back_mean = val.mu_back_mean\n mu_back_shape = val.mu_back_shape\n num_data = val.num_data\n num_rois = val.num_rois\n num_states = val.num_states\n\n # initialze variables\n num_vars = num_states + num_rois\n idx = np.where(val.mu_flor_mean > 0)[0]\n # shape\n shape = np.zeros((num_vars, 1))\n shape[:num_states, 0] = mu_flor_shape[:]\n shape[num_states:, 0] = mu_back_shape\n # scale\n scale = np.zeros((num_vars, 1))\n scale[idx, 0] = mu_flor_mean[idx] / mu_flor_shape[idx]\n scale[num_states:, 0] = (mu_back_mean / mu_back_shape)[:]\n\n # initialize a mu vector containing the variables we wish to sample, mu_flor and mu_back\n q = np.zeros((num_vars, 1))\n q[:num_states, 0] = mu_flor[:]\n q[num_states:, 0] = mu_back[:]\n q_old = q.copy()\n idy = q > 0 # keep track of which states are dark (we only sample bright states)\n num_var = q.shape[0]\n\n # hmc dynamics variables\n h = np.random.exponential() / 100\n masses = (1 + np.random.pareto(1, size=q.shape))\n masses_inv = np.zeros(shape=masses.shape) # negative mass is interpretted as an unchanging variable\n masses_inv[masses > 0] = 1 / masses[masses > 0]\n num_steps = np.random.poisson(25)\n\n # create populations array\n pops = np.zeros((num_vars, num_rois * num_data))\n \"\"\"\n pops is an array such that each element i, j corresponds to the \n multiplicitive factor in front of q[i] for data point j in the \n likelihood. For example, if in ROI 1 at time level 17 there are two\n fluorophores in the bright state, then we find the element, j,\n corresponding to ROI 1 and time level 17, and we find the element,\n i, corresponding to the bright state, and we set q[i,j]=2 (because\n there are two bright fluorophores), then we would find the i\n corresponding to the background brightness of ROI 1, and we would\n set this q[i,j]=1 (the multiplicitive factor in front of the \n background brightness is 1 when it is the corresponding ROI and 0\n otherwise).\n \"\"\"\n for r in range(num_rois):\n idx = np.arange(r*num_data, (r+1)*num_data)\n pops[:num_states, idx] = states_to_pops(states[r, :, :], num_states)\n pops[num_states + r, idx] = 1\n\n # the conditional probability for the mu vector\n def probability(q_, p_):\n if np.sum(q_ < 0) > 0:\n prob = -np.inf\n else:\n prob = (\n np.sum(stats.gamma.logpdf(data, a=q_.T @ pops, scale=gain)) # likelihood\n + np.sum(stats.gamma.logpdf(q_[idy], a=shape[idy], scale=scale[idy])) # prior\n + np.sum(stats.norm.logpdf(p_[idy], loc=0, scale=np.sqrt(masses[idy]))) # momentum\n )\n return prob\n\n # the gradient of the Hamiltonian with respect to the mu_vector\n def dH_dq(q_):\n if np.any(q_ < 0):\n \"\"\"\n In the event that q_new becomes negative, fast_digamma becomes\n slow. Since q should never be negative anyway, there is no\n need for further computation and we can skip this step knowing\n that this value of q will be rejected anyway.\n \"\"\"\n return q_\n q_new = np.zeros(q_.shape)\n q_new[idy] = (\n (shape[idy] - 1) / q_[idy] - 1 / scale[idy]\n + (pops @ (np.log(data / gain) - fast_digamma(q_.T @ pops)).T)[idy]\n )\n return q_new\n\n # sample momentum\n p = np.random.randn(num_var, 1) * np.sqrt(masses)\n p_old = p.copy()\n\n # run the HMC\n for i in range(num_steps):\n p = p + .5 * h * dH_dq(q)\n q = q + h * p * masses_inv\n p = p + .5 * h * dH_dq(q)\n\n # find acceptance ratio\n P_new = probability(q, p)\n P_old = probability(q_old, p_old)\n if (P_new - P_old) < np.log(np.random.rand()):\n q = q_old\n\n # update the new mu values\n val.mu_flor[:] = q[:num_states, 0]\n val.mu_back[:] = q[num_states:, 0]\n\n return",
"def read_calibrated(self):\n\n self.read_sensors()\n\n print(\"uncalibrated readings\")\n self.print_sensor_values(self.sensorValues)\n\n for i in range(0, self.NUM_SENSORS):\n denominator = self.calibratedMax[i] - self.calibratedMin[i]\n val = 0\n if denominator != 0:\n val = (self.sensorValues[i] - self.calibratedMin[i]) * 1000 / denominator\n if val < 0:\n val = 0\n elif val > 1000:\n val = 1000\n self.sensorValues[i] = val\n\n print(\"calibrated readings\")\n self.print_sensor_values(self.sensorValues)",
"def random_value(self, selected_vals):\n pass",
"def get_vector_for_ueser(df, genres, userID, mean_genres):\n mean_for_user = get_mean_for_user(df, genres, userID)\n\n dict_mean_for_user = {}\n for genres, mean in mean_for_user.items():\n if mean > 0.0:\n dict_mean_for_user[genres] = mean_genres[genres] - mean\n else:\n dict_mean_for_user[genres] = mean\n\n\n mean_array = [v for v in dict_mean_for_user.values()]\n mean_array = np.array(mean_array)\n\n return dict_mean_for_user, mean_array",
"def gyro_datarate(self):\r\n\t\tGYRO_DATARATE = (LSM330_GYRO_DR_95 | LSM330_GYRO_BW_12_5 | LSM330_GYRO_ND | LSM330_GYRO_XAXIS | LSM330_GYRO_YAXIS | LSM330_GYRO_ZAXIS)\r\n\t\tbus.write_byte_data(LSM330_GYRO_ADDRESS, LSM330_CTRL_REG1_G, GYRO_DATARATE)"
]
| [
"0.6476268",
"0.60761327",
"0.59181947",
"0.5740605",
"0.56659377",
"0.5642714",
"0.54681754",
"0.5465321",
"0.5385613",
"0.5307664",
"0.5235304",
"0.52343214",
"0.51984936",
"0.5194715",
"0.5164173",
"0.5156035",
"0.51070654",
"0.50962615",
"0.5094679",
"0.509257",
"0.50734985",
"0.5060818",
"0.50476843",
"0.50463635",
"0.50427836",
"0.50177467",
"0.50111395",
"0.50027424",
"0.49958816",
"0.49933043"
]
| 0.6345249 | 1 |
Read data back from LSM330_OUT_X_L_G(0x28), 2 bytes XAxis Mag LSB, XAxis Mag MSB | def readgyro(self):
data0 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_X_L_G)
data1 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_X_H_G)
xGyro = data1 * 256 + data0
if xGyro > 32767 :
xGyro -= 65536
"""Read data back from LSM330_OUT_Y_L_G(0x2A), 2 bytes
Y-Axis Mag LSB, Y-Axis Mag MSB"""
data0 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_Y_L_G)
data1 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_Y_H_G)
yGyro = data1 * 256 + data0
if yGyro > 32767 :
yGyro -= 65536
"""Read data back from LSM330_OUT_Z_L_G(0x2C), 2 bytes
Z-Axis Mag LSB, Z-Axis Mag MSB"""
data0 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_Z_L_G)
data1 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_Z_H_G)
zGyro = data1 * 256 + data0
if zGyro > 32767 :
zGyro -= 65536
return {'x' : xGyro, 'y' : yGyro, 'z' : zGyro} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_magnetometer(self):\n data = self.mag.read_bytes(Register.OUT_X_L_M, 6)\n return lsm9ds1.to_vector(data)",
"def read_gyro(self):\r\n\t\tdata0 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_X_L)\r\n\t\tdata1 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_X_H)\r\n\t\t\r\n\t\t# Convert the data\r\n\t\txGyro = data1 * 256 + data0\r\n\t\tif xGyro > 32767 :\r\n\t\t\txGyro -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from L3DG20_REG_OUT_Y_L(0x2A), 2 bytes, Y-Axis LSB first\"\"\"\r\n\t\tdata0 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_Y_L)\r\n\t\tdata1 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_Y_H)\r\n\t\t\r\n\t\t# Convert the data\r\n\t\tyGyro = data1 * 256 + data0\r\n\t\tif yGyro > 32767 :\r\n\t\t\tyGyro -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from L3DG20_REG_OUT_Z_L(0x2C), 2 bytes, Z-Axis LSB first\"\"\"\r\n\t\tdata0 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_Z_L)\r\n\t\tdata1 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_Z_H)\r\n\t\t\r\n\t\t# Convert the data\r\n\t\tzGyro = data1 * 256 + data0\r\n\t\tif zGyro > 32767 :\r\n\t\t\tzGyro -= 65536\r\n\t\t\r\n\t\treturn {'x' : xGyro, 'y' : yGyro, 'z' : zGyro}",
"def read_ag_data(self):\n data = self.ag.read_bytes(Register.OUT_TEMP_L, 14)\n temp = lsm9ds1.to_int16(data[0:2])\n gyro = lsm9ds1.to_vector_left_to_right_hand_rule(data[2:8])\n acc = lsm9ds1.to_vector_left_to_right_hand_rule(data[8:14])\n return temp, acc, gyro",
"def Get_RawOutX_Value(self):\r\n l = self.__readFromRegister(self.__REG_R_OUT_X_L, 0xff)\r\n h_u2 = self.__readFromRegister(self.__REG_R_OUT_X_H, 0xff)\r\n h = bitOps.TwosComplementToByte(h_u2)\r\n if (h < 0):\r\n return (h*256 - l) * self.gain\r\n elif (h >= 0):\r\n return (h*256 + l) * self.gain",
"def read_acceleration(self):\n data = self.ag.read_bytes(Register.OUT_X_XL, 6)\n return lsm9ds1.to_vector_left_to_right_hand_rule(data)",
"def read_gyroscope(self):\n data = self.ag.read_bytes(Register.OUT_X_G, 6)\n return lsm9ds1.to_vector_left_to_right_hand_rule(data)",
"def get_light_sensors(self):\n x=self.send_packet_check_response('\\x50')\n LS=[]\n for i in range(8):\n a=bytearray(x[i*3:(i+1)*3])\n LS.append(a[0]|(a[1]&0xf)<<8)\n LS.append(a[1]>>4|a[2]<<4)\n return LS",
"def readaccl(self):\r\n\t\tdata0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_X_L_A)\r\n\t\tdata1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_X_H_A)\r\n\t\t\r\n\t\txAccl = data1 * 256 + data0\r\n\t\tif xAccl > 32767 :\r\n\t\t\txAccl -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from LSM330_OUT_Y_L_M(0x2A), 2 bytes\r\n\t\tY-Axis Mag LSB, Y-Axis Mag MSB\"\"\"\r\n\t\tdata0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Y_L_A)\r\n\t\tdata1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Y_H_A)\r\n\t\t\r\n\t\tyAccl = data1 * 256 + data0\r\n\t\tif yAccl > 32767 :\r\n\t\t\tyAccl -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from LSM330_OUT_Z_L_M(0x2C), 2 bytes\r\n\t\tZ-Axis Mag LSB, Z-Axis Mag MSB\"\"\"\r\n\t\tdata0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Z_L_A)\r\n\t\tdata1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Z_H_A)\r\n\t\t\r\n\t\tzAccl = data1 * 256 + data0\r\n\t\tif zAccl > 32767 :\r\n\t\t\tzAccl -= 65536\r\n\t\t\r\n\t\treturn {'x' : xAccl, 'y' : yAccl, 'z' : zAccl}",
"def read_raw(self):\n # Read gyro data from the sensor.\n res = self._bus.read(self._address, _GYRO_REGISTER_OUT_X_MSB, 8)\n # Parse out the gyroscope data as 16-bit signed data.\n raw_x = struct.unpack_from(\">h\", res[0:2])[0]\n raw_y = struct.unpack_from(\">h\", res[2:4])[0]\n raw_z = struct.unpack_from(\">h\", res[4:6])[0]\n return (raw_x, raw_y, raw_z)",
"def lo_band(self):\n return self._read(0x13, 7, 0x80)",
"def read_data(self):\n self.data = self.i2c.readfrom_mem(accel_address, x_data, 6)\n data_xyz = []\n for i in range(3):\n value = (self.data[2*i + 1] << 8) | self.data[2*i]\n data_xyz.append(self.get_acceleration(value) - self.offset[i])\n data_xyz.append(utime.ticks_ms() - self.time)\n return data_xyz",
"def lo_vcm(self):\n return self._read(0x12, 5, 0xE0)",
"def luminance(self) -> float:\n use_option = 1\n\n if use_option == 1:\n # 1st option\n msb = 0\n msb_2nd = 1\n while msb != msb_2nd:\n msb = self.read_byte_data(Reg.luminance_msb)\n lsb = self.read_byte_data(Reg.luminance_lsb)\n msb_2nd = self.read_byte_data(Reg.luminance_msb)\n\n elif use_option == 2:\n # 2nd option, which does not work on rpi OSError: [Errno 95] Operation not supported\n wr_msb = i2c_msg.write(self.device_addr, [Reg.luminance_msb])\n rd_msb = i2c_msg.read(self.device_addr, 1)\n wr_lsb = i2c_msg.write(self.device_addr, [Reg.luminance_lsb])\n rd_lsb = i2c_msg.read(self.device_addr, 1)\n self.i2c_rdwr(wr_msb, rd_msb, wr_lsb, rd_lsb)\n msb = ord(rd_msb.data)\n lsb = ord(rd_lsb.data)\n\n # Convert the data to lux\n exponent = (msb & 0xF0) >> 4\n mantissa = ((msb & 0x0F) << 4) | (lsb & 0x0F)\n return 2.0 ** exponent * mantissa * 0.045",
"def get_calib_data(self):\n\n accel_offset_read = self.con.receive(registers.ACCEL_OFFSET_X_LSB_ADDR, 6)\n accel_offset_read_x = (accel_offset_read[1] << 8) | accel_offset_read[\n 0] # Combine MSB and LSB registers into one decimal\n accel_offset_read_y = (accel_offset_read[3] << 8) | accel_offset_read[\n 2] # Combine MSB and LSB registers into one decimal\n accel_offset_read_z = (accel_offset_read[5] << 8) | accel_offset_read[\n 4] # Combine MSB and LSB registers into one decimal\n\n accel_radius_read = self.con.receive(registers.ACCEL_RADIUS_LSB_ADDR, 2)\n accel_radius_read_value = (accel_radius_read[1] << 8) | accel_radius_read[0]\n\n mag_offset_read = self.con.receive(registers.MAG_OFFSET_X_LSB_ADDR, 6)\n mag_offset_read_x = (mag_offset_read[1] << 8) | mag_offset_read[\n 0] # Combine MSB and LSB registers into one decimal\n mag_offset_read_y = (mag_offset_read[3] << 8) | mag_offset_read[\n 2] # Combine MSB and LSB registers into one decimal\n mag_offset_read_z = (mag_offset_read[5] << 8) | mag_offset_read[\n 4] # Combine MSB and LSB registers into one decimal\n\n mag_radius_read = self.con.receive(registers.MAG_RADIUS_LSB_ADDR, 2)\n mag_radius_read_value = (mag_radius_read[1] << 8) | mag_radius_read[0]\n\n gyro_offset_read = self.con.receive(registers.GYRO_OFFSET_X_LSB_ADDR, 6)\n gyro_offset_read_x = (gyro_offset_read[1] << 8) | gyro_offset_read[\n 0] # Combine MSB and LSB registers into one decimal\n gyro_offset_read_y = (gyro_offset_read[3] << 8) | gyro_offset_read[\n 2] # Combine MSB and LSB registers into one decimal\n gyro_offset_read_z = (gyro_offset_read[5] << 8) | gyro_offset_read[\n 4] # Combine MSB and LSB registers into one decimal\n\n calib_data = {'accel_offset': {'x': accel_offset_read_x, 'y': accel_offset_read_y, 'z': accel_offset_read_z}, 'accel_radius': accel_radius_read_value,\n 'mag_offset': {'x': mag_offset_read_x, 'y': mag_offset_read_y, 'z': mag_offset_read_z}, 'mag_radius': mag_radius_read_value,\n 'gyro_offset': {'x': gyro_offset_read_x, 'y': gyro_offset_read_y, 'z': gyro_offset_read_z}}\n\n return calib_data",
"def get_gyro_data(self):\n x = self.read_i2c_word(self.GYRO_XOUT0)\n y = self.read_i2c_word(self.GYRO_YOUT0)\n z = self.read_i2c_word(self.GYRO_ZOUT0)\n\n gyro_scale_modifier = None\n gyro_range = self.read_gyro_range(True)\n\n if gyro_range == self.GYRO_RANGE_250DEG:\n gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_250DEG\n elif gyro_range == self.GYRO_RANGE_500DEG:\n gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_500DEG\n elif gyro_range == self.GYRO_RANGE_1000DEG:\n gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_1000DEG\n elif gyro_range == self.GYRO_RANGE_2000DEG:\n gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_2000DEG\n else:\n print(\"Unkown range - gyro_scale_modifier set to self.GYRO_SCALE_MODIFIER_250DEG\")\n gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_250DEG\n\n x = x / gyro_scale_modifier * self.DEG2RAD\n y = y / gyro_scale_modifier * self.DEG2RAD\n z = z / gyro_scale_modifier * self.DEG2RAD\n\n return [x, y, z]",
"def read_light_bump(self, light_bump):\n data = self._read_packet(light_bump, Bump.LIGHT_DATA_BYTES)\n\n if len(data) == Bump.LIGHT_DATA_BYTES:\n return struct.unpack(\">h\", data)[0]\n else:\n return 0",
"def magnetometer(self):\n self.com.reset_input_buffer()\n self.com.write(self.HEADER + self.MAG + self.END)\n header = self.com.read(1)\n if header != self.HEADER:\n print \"Got bad header from Arduino\"\n raise ArduinoError()\n data = ''\n while len(data) < 15:\n read_data = self.com.read(1)\n if len(read_data) != 1:\n print \"Error reading from Arduino\"\n raise ArduinoError()\n data += read_data\n if read_data == self.END:\n break\n print \"Arduino mag data:\", data\n mag_x = int(data[:data.index(',')])\n mag_y = int(data[data.index(',') + 1:-1])\n return mag_x, mag_y",
"def ReadFPGA(self, getAddr):\n self.xem.UpdateWireOuts()\n ## Read 18-bit integer from FPGA\n if False :\n intValLo = self.xem.GetWireOutValue(getAddr) & 0xffff # length = 16-bit\n intValHi = self.xem.GetWireOutValue(getAddr + 0x01) & 0x0003 # length = 2-bit\n intVal = ((intValHi << 16) + intValLo) & 0xFFFFFFFF\n if intVal > 0x1FFFF:\n intVal = -(0x3FFFF - intVal + 0x1)\n outVal = float(intVal) / 0xFFFF # in mV De-Scaling factor = 0xFFFF\n\n ## Read 32-bit float\n outValLo = self.xem.GetWireOutValue(getAddr) & 0xffff # length = 16-bit\n outValHi = self.xem.GetWireOutValue(getAddr + 0x01) & 0xffff\n outVal = ((outValHi << 16) + outValLo) & 0xFFFFFFFF\n outVal = ConvertType(outVal, 'I', 'f')\n\n ## if getAddr == DATA_OUT_ADDR[0]:\n ## print \"%2.4f\" % outVal, \n ## print \"%d\" % (outValLo), \n \n return outVal",
"def lo_lf(self):\n return self._read(0x13, 5, 0x60)",
"def get_data_ge(logger, file):\n fp = open(file, 'rb')\n offset = 8192\n\n fp.seek(18)\n size, nframes = st.unpack('<ih',fp.read(6))\n if size != 2048:\n logger.error('GE image size unexpected: '+str(size))\n return None, 0, 0\n\n fsize = os.stat(str(fp).split(\"'\")[1]).st_size\n nframes_calc = (fsize - offset)/(2*size**2)\n\n if nframes != nframes_calc:\n logger.error('GE number frames unexpected: '+str(nframes))\n return None, 0, 0\n\n pos = offset\n fp.seek(pos)\n\n return fp, int(nframes_calc), size*size",
"def readData(self):\n if (self.model == 'GDS'):\n self.write(':ACQ'+str(ch)+':MEM?\\n')\n elif (self.model == 'TDS'):\n self.write('CURVe?\\n')\n\n # Check for the initial '#'; if not present, raise error.\n if (self.read(1) != '#'):\n raise Exception, \"Expected header not present\"\n\n # Read the data length indicator\n dataSize = int(self.read(int(self.read(1))))\n\n # extra steps for GDS\n if (self.model == 'GDS'):\n # subtract the 8 bytes we will read.\n dataSize -= 8\n # Read the sampling period\n hstep = struct.unpack('>f', self.read(4))[0]\n # also, fix hoff so it corresponds with that for TDS\n # FIXME: check with the scope at some point.\n hoff = hoff - float(dataSize/4) * hstep\n # Read 4 bytes to advance to the actual data: first byte\n # contains the channel and the three are not used,\n # according to the GDS800 manual.\n self.read(4)\n \n # Read data; TDS expects a 1-byte data, GDS expects 2-byte one.\n if (self.model == 'TDS'):\n data = list(struct.unpack('>'+str(dataSize)+'b',\n self.read(dataSize)))\n # TDS has a trailing '\\n' that should be drained.\n self.read(1)\n elif (self.model == 'GDS'):\n data = list(struct.unpack('>'+str(dataSize/2)+'h',\n self.read(dataSize)))\n\n return data",
"def read_data(self):\r\n\t\tdata = bus.read_byte_data(PCA9537_DEFAULT_ADDRESS, PCA9537_REG_OUTPUT)\r\n\t\t\r\n\t\t# Convert the data to 4-bits\r\n\t\tdata = (data & 0x0F)\r\n\t\t\r\n\t\tif (data & (2 ** self.pin)) == 0 :\r\n\t\t\tprint \"I/O Pin %d State is LOW\" %self.pin\r\n\t\telse :\r\n\t\t\tprint \"I/O Pin %d State is HIGH\" %self.pin",
"def read_data(self):\r\n\t\tdata = bus.read_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT)\r\n\t\t\r\n\t\t# Convert the data to 4-bits\r\n\t\tdata = (data & 0x0F)\r\n\t\t\r\n\t\tif (data & (2 ** self.pin)) == 0 :",
"def gyro_datarate(self):\r\n\t\tGYRO_DATARATE = (LSM330_GYRO_DR_95 | LSM330_GYRO_BW_12_5 | LSM330_GYRO_ND | LSM330_GYRO_XAXIS | LSM330_GYRO_YAXIS | LSM330_GYRO_ZAXIS)\r\n\t\tbus.write_byte_data(LSM330_GYRO_ADDRESS, LSM330_CTRL_REG1_G, GYRO_DATARATE)",
"def read_state(self):\n #build a list of 0x00 bytes to send through shift registers\n #existing data will be read as zeros are shifted in\n all_zeros = []\n for i in range(self.num_registers):\n all_zeros.append(0x00)\n\n #shift in the 0x00 data in order to read current data\n shift_reg_bytes = self.e.write_SPI_bytes_to_portA(all_zeros)\n\n #write the current data back into the shift registers\n self.e.write_SPI_bytes_to_portA(shift_reg_bytes)\n\n shift_reg_bytes.reverse()\n return shift_reg_bytes",
"def get_gyro_data(self):\n\t\tx = self.read_i2c_word(self.GYRO_XOUT0)\n\t\ty = self.read_i2c_word(self.GYRO_YOUT0)\n\t\tz = self.read_i2c_word(self.GYRO_ZOUT0)\n\n\t\tgyro_scale_modifier = None\n\t\tgyro_range = self.read_gyro_range(True)\n\n\t\tif gyro_range == self.GYRO_RANGE_250DEG:\n\t\t\tgyro_scale_modifier = self.GYRO_SCALE_MODIFIER_250DEG\n\t\telif gyro_range == self.GYRO_RANGE_500DEG:\n\t\t\tgyro_scale_modifier = self.GYRO_SCALE_MODIFIER_500DEG\n\t\telif gyro_range == self.GYRO_RANGE_1000DEG:\n\t\t\tgyro_scale_modifier = self.GYRO_SCALE_MODIFIER_1000DEG\n\t\telif gyro_range == self.GYRO_RANGE_2000DEG:\n\t\t\tgyro_scale_modifier = self.GYRO_SCALE_MODIFIER_2000DEG\n\t\telse:\n\t\t\tprint(\"Unkown range - gyro_scale_modifier set to self.GYRO_SCALE_MODIFIER_250DEG\")\n\t\t\tgyro_scale_modifier = self.GYRO_SCALE_MODIFIER_250DEG\n\n\t\tx = x / gyro_scale_modifier\n\t\ty = y / gyro_scale_modifier\n\t\tz = z / gyro_scale_modifier\n\n\t\treturn {'x': x, 'y': y, 'z': z}",
"def get_data(self):\n self.dev.write(1, 'A0')\n digit1, digit2 = self.dev.read(0x81, 64)[:2]\n # Save the data as voltage between 0.0 and 5.0\n self.data0.append((digit1 + 256*digit2)*5.0/1024)",
"def get_pressurexlsb(self):\n byte_list = self.i2c.readfrom_mem(\n self.device_address,\n self.REGISTER_PRESSUREXLSB,\n 1,\n addrsize=8\n )\n val = 0\n val = val << 8 | byte_list[0]\n return val",
"def read_values(self):\n temp, acc, gyro = self.read_ag_data()\n tempc = lsm9ds1.TEMPC_0 + temp * lsm9ds1.TEMP_SENSOR_SCALE\n tempf = (tempc * 9/5) + 32\n acc = [c * lsm9ds1.ACC_SENSOR_SCALE for c in acc]\n gyro = [g * lsm9ds1.DPS_SENSOR_SCALE for g in gyro]\n return tempf, acc, gyro",
"def readData():\n\tN = 800\n\tD = 28*28\n\tX = np.zeros((N, D), dtype=np.uint8)\n\n\tf = open(\"data/a012_images.dat\", 'rb')\n\n\tfor i in range(0, N):\n\t\tX[i, :] = np.fromstring(f.read(D), dtype='uint8')\n\n\tf.close()\n\n\treturn X"
]
| [
"0.65370625",
"0.6098071",
"0.6070389",
"0.60361487",
"0.5879142",
"0.5865109",
"0.58422726",
"0.5837195",
"0.58229584",
"0.5771828",
"0.57190835",
"0.5493885",
"0.5431597",
"0.5373591",
"0.53586006",
"0.5351768",
"0.5290464",
"0.5274608",
"0.5246518",
"0.5233826",
"0.5191351",
"0.5185408",
"0.51845515",
"0.51605934",
"0.51580036",
"0.514303",
"0.5142628",
"0.51219225",
"0.5119528",
"0.51160717"
]
| 0.66605854 | 0 |
Read data back from LSM330_OUT_X_L_A(0x28), 2 bytes XAxis Mag LSB, XAxis Mag MSB | def readaccl(self):
data0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_X_L_A)
data1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_X_H_A)
xAccl = data1 * 256 + data0
if xAccl > 32767 :
xAccl -= 65536
"""Read data back from LSM330_OUT_Y_L_M(0x2A), 2 bytes
Y-Axis Mag LSB, Y-Axis Mag MSB"""
data0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Y_L_A)
data1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Y_H_A)
yAccl = data1 * 256 + data0
if yAccl > 32767 :
yAccl -= 65536
"""Read data back from LSM330_OUT_Z_L_M(0x2C), 2 bytes
Z-Axis Mag LSB, Z-Axis Mag MSB"""
data0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Z_L_A)
data1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Z_H_A)
zAccl = data1 * 256 + data0
if zAccl > 32767 :
zAccl -= 65536
return {'x' : xAccl, 'y' : yAccl, 'z' : zAccl} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_magnetometer(self):\n data = self.mag.read_bytes(Register.OUT_X_L_M, 6)\n return lsm9ds1.to_vector(data)",
"def read_acceleration(self):\n data = self.ag.read_bytes(Register.OUT_X_XL, 6)\n return lsm9ds1.to_vector_left_to_right_hand_rule(data)",
"def get_light_sensors(self):\n x=self.send_packet_check_response('\\x50')\n LS=[]\n for i in range(8):\n a=bytearray(x[i*3:(i+1)*3])\n LS.append(a[0]|(a[1]&0xf)<<8)\n LS.append(a[1]>>4|a[2]<<4)\n return LS",
"def read_ag_data(self):\n data = self.ag.read_bytes(Register.OUT_TEMP_L, 14)\n temp = lsm9ds1.to_int16(data[0:2])\n gyro = lsm9ds1.to_vector_left_to_right_hand_rule(data[2:8])\n acc = lsm9ds1.to_vector_left_to_right_hand_rule(data[8:14])\n return temp, acc, gyro",
"def read_data(self):\n self.data = self.i2c.readfrom_mem(accel_address, x_data, 6)\n data_xyz = []\n for i in range(3):\n value = (self.data[2*i + 1] << 8) | self.data[2*i]\n data_xyz.append(self.get_acceleration(value) - self.offset[i])\n data_xyz.append(utime.ticks_ms() - self.time)\n return data_xyz",
"def Get_RawOutX_Value(self):\r\n l = self.__readFromRegister(self.__REG_R_OUT_X_L, 0xff)\r\n h_u2 = self.__readFromRegister(self.__REG_R_OUT_X_H, 0xff)\r\n h = bitOps.TwosComplementToByte(h_u2)\r\n if (h < 0):\r\n return (h*256 - l) * self.gain\r\n elif (h >= 0):\r\n return (h*256 + l) * self.gain",
"def readgyro(self):\r\n\t\tdata0 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_X_L_G)\r\n\t\tdata1 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_X_H_G)\r\n\t\t\r\n\t\txGyro = data1 * 256 + data0\r\n\t\tif xGyro > 32767 :\r\n\t\t\txGyro -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from LSM330_OUT_Y_L_G(0x2A), 2 bytes\r\n\t\tY-Axis Mag LSB, Y-Axis Mag MSB\"\"\"\r\n\t\tdata0 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_Y_L_G)\r\n\t\tdata1 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_Y_H_G)\r\n\t\t\r\n\t\tyGyro = data1 * 256 + data0\r\n\t\tif yGyro > 32767 :\r\n\t\t\tyGyro -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from LSM330_OUT_Z_L_G(0x2C), 2 bytes\r\n\t\tZ-Axis Mag LSB, Z-Axis Mag MSB\"\"\"\r\n\t\tdata0 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_Z_L_G)\r\n\t\tdata1 = bus.read_byte_data(LSM330_GYRO_ADDRESS, LSM330_OUT_Z_H_G)\r\n\t\t\r\n\t\tzGyro = data1 * 256 + data0\r\n\t\tif zGyro > 32767 :\r\n\t\t\tzGyro -= 65536\r\n\t\t\r\n\t\treturn {'x' : xGyro, 'y' : yGyro, 'z' : zGyro}",
"def lo_band(self):\n return self._read(0x13, 7, 0x80)",
"def read_data(self):\r\n\t\tdata = bus.read_byte_data(PCA9537_DEFAULT_ADDRESS, PCA9537_REG_OUTPUT)\r\n\t\t\r\n\t\t# Convert the data to 4-bits\r\n\t\tdata = (data & 0x0F)\r\n\t\t\r\n\t\tif (data & (2 ** self.pin)) == 0 :\r\n\t\t\tprint \"I/O Pin %d State is LOW\" %self.pin\r\n\t\telse :\r\n\t\t\tprint \"I/O Pin %d State is HIGH\" %self.pin",
"def get_data(self):\n self.dev.write(1, 'A0')\n digit1, digit2 = self.dev.read(0x81, 64)[:2]\n # Save the data as voltage between 0.0 and 5.0\n self.data0.append((digit1 + 256*digit2)*5.0/1024)",
"def lo_lf(self):\n return self._read(0x13, 5, 0x60)",
"def lo_vcm(self):\n return self._read(0x12, 5, 0xE0)",
"def read_gyro(self):\r\n\t\tdata0 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_X_L)\r\n\t\tdata1 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_X_H)\r\n\t\t\r\n\t\t# Convert the data\r\n\t\txGyro = data1 * 256 + data0\r\n\t\tif xGyro > 32767 :\r\n\t\t\txGyro -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from L3DG20_REG_OUT_Y_L(0x2A), 2 bytes, Y-Axis LSB first\"\"\"\r\n\t\tdata0 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_Y_L)\r\n\t\tdata1 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_Y_H)\r\n\t\t\r\n\t\t# Convert the data\r\n\t\tyGyro = data1 * 256 + data0\r\n\t\tif yGyro > 32767 :\r\n\t\t\tyGyro -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from L3DG20_REG_OUT_Z_L(0x2C), 2 bytes, Z-Axis LSB first\"\"\"\r\n\t\tdata0 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_Z_L)\r\n\t\tdata1 = bus.read_byte_data(L3DG20_DEFAULT_ADDRESS, L3DG20_REG_OUT_Z_H)\r\n\t\t\r\n\t\t# Convert the data\r\n\t\tzGyro = data1 * 256 + data0\r\n\t\tif zGyro > 32767 :\r\n\t\t\tzGyro -= 65536\r\n\t\t\r\n\t\treturn {'x' : xGyro, 'y' : yGyro, 'z' : zGyro}",
"def luminance(self) -> float:\n use_option = 1\n\n if use_option == 1:\n # 1st option\n msb = 0\n msb_2nd = 1\n while msb != msb_2nd:\n msb = self.read_byte_data(Reg.luminance_msb)\n lsb = self.read_byte_data(Reg.luminance_lsb)\n msb_2nd = self.read_byte_data(Reg.luminance_msb)\n\n elif use_option == 2:\n # 2nd option, which does not work on rpi OSError: [Errno 95] Operation not supported\n wr_msb = i2c_msg.write(self.device_addr, [Reg.luminance_msb])\n rd_msb = i2c_msg.read(self.device_addr, 1)\n wr_lsb = i2c_msg.write(self.device_addr, [Reg.luminance_lsb])\n rd_lsb = i2c_msg.read(self.device_addr, 1)\n self.i2c_rdwr(wr_msb, rd_msb, wr_lsb, rd_lsb)\n msb = ord(rd_msb.data)\n lsb = ord(rd_lsb.data)\n\n # Convert the data to lux\n exponent = (msb & 0xF0) >> 4\n mantissa = ((msb & 0x0F) << 4) | (lsb & 0x0F)\n return 2.0 ** exponent * mantissa * 0.045",
"def read_data(self):\r\n\t\tdata = bus.read_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT)\r\n\t\t\r\n\t\t# Convert the data to 4-bits\r\n\t\tdata = (data & 0x0F)\r\n\t\t\r\n\t\tif (data & (2 ** self.pin)) == 0 :",
"def get_calib_data(self):\n\n accel_offset_read = self.con.receive(registers.ACCEL_OFFSET_X_LSB_ADDR, 6)\n accel_offset_read_x = (accel_offset_read[1] << 8) | accel_offset_read[\n 0] # Combine MSB and LSB registers into one decimal\n accel_offset_read_y = (accel_offset_read[3] << 8) | accel_offset_read[\n 2] # Combine MSB and LSB registers into one decimal\n accel_offset_read_z = (accel_offset_read[5] << 8) | accel_offset_read[\n 4] # Combine MSB and LSB registers into one decimal\n\n accel_radius_read = self.con.receive(registers.ACCEL_RADIUS_LSB_ADDR, 2)\n accel_radius_read_value = (accel_radius_read[1] << 8) | accel_radius_read[0]\n\n mag_offset_read = self.con.receive(registers.MAG_OFFSET_X_LSB_ADDR, 6)\n mag_offset_read_x = (mag_offset_read[1] << 8) | mag_offset_read[\n 0] # Combine MSB and LSB registers into one decimal\n mag_offset_read_y = (mag_offset_read[3] << 8) | mag_offset_read[\n 2] # Combine MSB and LSB registers into one decimal\n mag_offset_read_z = (mag_offset_read[5] << 8) | mag_offset_read[\n 4] # Combine MSB and LSB registers into one decimal\n\n mag_radius_read = self.con.receive(registers.MAG_RADIUS_LSB_ADDR, 2)\n mag_radius_read_value = (mag_radius_read[1] << 8) | mag_radius_read[0]\n\n gyro_offset_read = self.con.receive(registers.GYRO_OFFSET_X_LSB_ADDR, 6)\n gyro_offset_read_x = (gyro_offset_read[1] << 8) | gyro_offset_read[\n 0] # Combine MSB and LSB registers into one decimal\n gyro_offset_read_y = (gyro_offset_read[3] << 8) | gyro_offset_read[\n 2] # Combine MSB and LSB registers into one decimal\n gyro_offset_read_z = (gyro_offset_read[5] << 8) | gyro_offset_read[\n 4] # Combine MSB and LSB registers into one decimal\n\n calib_data = {'accel_offset': {'x': accel_offset_read_x, 'y': accel_offset_read_y, 'z': accel_offset_read_z}, 'accel_radius': accel_radius_read_value,\n 'mag_offset': {'x': mag_offset_read_x, 'y': mag_offset_read_y, 'z': mag_offset_read_z}, 'mag_radius': mag_radius_read_value,\n 'gyro_offset': {'x': gyro_offset_read_x, 'y': gyro_offset_read_y, 'z': gyro_offset_read_z}}\n\n return calib_data",
"def LSM_acquisition(add):\n # control register\n CTRL0 = 0x1f # p.34, accelerator\n CTRL1 = 0x20\n CTRL2 = 0x21\n CTRL5 = 0x24 # p.36, magnetic\n CTRL6 = 0x25\n CTRL7 = 0x26\n FIFO_CTRL = 0x2e # p.40\n # accelerater\n OUT_X_L_A = 0x28\n OUT_X_H_A = 0x29\n OUT_Y_L_A = 0x2a\n OUT_Y_H_A = 0x2b\n OUT_Z_L_A = 0x2c\n OUT_Z_H_A = 0x2d\n # magentic\n OUT_X_L_M = 0x08\n OUT_X_H_M = 0x09\n OUT_Y_L_M = 0x0a\n OUT_Y_H_M = 0x0b\n OUT_Z_L_M = 0x0c\n OUT_Z_H_M = 0x0d\n\n # follow lsm303D arduino library\n # AFS = 0, +-2g scale\n bus.write_byte_data(add, CTRL2, 0x00)\n # 50 Hz AODR, all axis enable\n bus.write_byte_data(add, CTRL1, 0x57)\n # high resolution, 6.25Hz MODR\n bus.write_byte_data(add, CTRL5, 0x64)\n # +-4 gauss scale\n bus.write_byte_data(add, CTRL6, 0x20)\n # low power mode off, continuous conversion mode\n bus.write_byte_data(add, CTRL7, 0x00)\n # # FIFO mode\n # bus.write_byte_data(add, CTRL0, 0b01000000)\n # bus.write_byte_data(add, FIFO_CTRL, 0b01000000)\n # # accelerator with 12.5Hz, all axis enable\n # bus.write_byte_data(add, CTRL1, 0b00110111)\n # # magnetic 12.5Hz, high resolutn, temp en\n # bus.write_byte_data(add, CTRL5, 0b11100000)\n # # full scale range \\pm 12 gauss\n # bus.write_byte_data(add, CTRL6, 0b01101000)\n # # enable magnetic\n # bus.write_byte_data(add, CTRL7, 0x00)\n\n # accelerator accumulate\n while True:\n uint16_ax = (bus.read_byte_data(add, OUT_X_H_A) << 8) + \\\n bus.read_byte_data(add, OUT_X_L_A)\n uint16_ay = (bus.read_byte_data(add, OUT_Y_H_A) << 8) + \\\n bus.read_byte_data(add, OUT_Y_L_A)\n uint16_az = (bus.read_byte_data(add, OUT_Z_H_A) << 8) + \\\n bus.read_byte_data(add, OUT_Z_L_A)\n\n uint16_mx = (bus.read_byte_data(add, OUT_X_H_M) << 8) + \\\n bus.read_byte_data(add, OUT_X_L_M)\n uint16_my = (bus.read_byte_data(add, OUT_Y_H_M) << 8) + \\\n bus.read_byte_data(add, OUT_Y_L_M)\n uint16_mz = (bus.read_byte_data(add, OUT_Z_H_M) << 8) + \\\n bus.read_byte_data(add, OUT_Z_L_M)\n # accelerometer 12 bit left aligned\n # ax = twos_comp(uint16_ax>>4, 12)\n # ay = twos_comp(uint16_ay>>4, 12)\n # az = twos_comp(uint16_az>>4, 12)\n ax = twos_comp(uint16_ax, 16)\n ay = twos_comp(uint16_ay, 16)\n az = twos_comp(uint16_az, 16)\n\n mx = twos_comp(uint16_mx, 16)\n my = twos_comp(uint16_my, 16)\n mz = twos_comp(uint16_mz, 16)\n\n yield [ax, ay, az, mx, my, mz]",
"def read_light_bump(self, light_bump):\n data = self._read_packet(light_bump, Bump.LIGHT_DATA_BYTES)\n\n if len(data) == Bump.LIGHT_DATA_BYTES:\n return struct.unpack(\">h\", data)[0]\n else:\n return 0",
"def read_state(self):\n #build a list of 0x00 bytes to send through shift registers\n #existing data will be read as zeros are shifted in\n all_zeros = []\n for i in range(self.num_registers):\n all_zeros.append(0x00)\n\n #shift in the 0x00 data in order to read current data\n shift_reg_bytes = self.e.write_SPI_bytes_to_portA(all_zeros)\n\n #write the current data back into the shift registers\n self.e.write_SPI_bytes_to_portA(shift_reg_bytes)\n\n shift_reg_bytes.reverse()\n return shift_reg_bytes",
"def read_gyroscope(self):\n data = self.ag.read_bytes(Register.OUT_X_G, 6)\n return lsm9ds1.to_vector_left_to_right_hand_rule(data)",
"def readData():\n\tN = 800\n\tD = 28*28\n\tX = np.zeros((N, D), dtype=np.uint8)\n\n\tf = open(\"data/a012_images.dat\", 'rb')\n\n\tfor i in range(0, N):\n\t\tX[i, :] = np.fromstring(f.read(D), dtype='uint8')\n\n\tf.close()\n\n\treturn X",
"def read_data(self):\r\n\t\tdata0 = bus.read_byte(A1332_DEFAULT_ADDRESS)\r\n\t\tdata1 = bus.read_byte(A1332_DEFAULT_ADDRESS)\r\n\t\t\r\n\t\ttime.sleep(0.5)\r\n\t\t\r\n\t\t# Checking valid data\r\n\t\twhile (data0 == 0) and (data1 == 0) :\r\n\t\t\tdata0 = bus.read_byte(A1332_DEFAULT_ADDRESS)\r\n\t\t\tdata1 = bus.read_byte(A1332_DEFAULT_ADDRESS)\r\n\t\t\r\n\t\t# Convert the data to 12-bits\r\n\t\traw_adc = ((data0 & 0x0F) * 256.0) + data1\r\n\t\tangle = (raw_adc / 4096.0) * 360.0\r\n\t\t\r\n\t\treturn {'a' : angle}",
"def readOutput(self):\n\n LMOutputFile = open(self.LMOutputFName, 'r')\n\n if self.rawDataOutputFlag:\n\n self.LMOutput['rawData'] = []\n prevLine = LMOutputFile.tell()\n tempStr = LMOutputFile.readline()\n\n while not tempStr.count('\\t'):\n\n prevLine = LMOutputFile.tell()\n self.LMOutput['rawData'].append(self.str2floatTrap(tempStr))\n tempStr = LMOutputFile.readline()\n\n LMOutputFile.seek(prevLine)\n\n if self.outputFormat == 1:\n\n tempStr = LMOutputFile.readline()\n tempWords = tempStr.split('\\t')\n self.LMOutput['TotalSum'] = self.str2floatTrap(tempWords[2])\n self.LMOutput['CompartmentsConsidered'] = self.str2floatTrap(tempWords[3])\n self.LMOutput['CompartmentsDiscarded'] = self.str2floatTrap(tempWords[4])\n self.LMOutput['Minimum'] = self.str2floatTrap(tempWords[5])\n self.LMOutput['Average'] = self.str2floatTrap(tempWords[6])\n self.LMOutput['Maximum'] = self.str2floatTrap(tempWords[7])\n self.LMOutput['StdDev'] = self.str2floatTrap(tempWords[8])\n\n elif self.outputFormat == 2:\n\n tempStr = LMOutputFile.readline()\n tempWords = tempStr.split('\\t')\n tempWords = tempWords[2:len(tempWords) - 1]\n self.LMOutput['measure1BinCentres'] = [self.str2floatTrap(x) for x in tempWords]\n\n tempStr = LMOutputFile.readline()\n tempWords = tempStr.split('\\t')\n tempWords = tempWords[2:len(tempWords) - 1]\n self.LMOutput['measure1BinCounts'] = [self.str2floatTrap(x) for x in tempWords]\n\n elif self.outputFormat == 3:\n\n tempStr = LMOutputFile.readline()\n tempWords = tempStr.split('\\t')\n tempWords = tempWords[2:len(tempWords) - 1]\n self.LMOutput['measure1BinCentres'] = [self.str2floatTrap(x) for x in tempWords]\n\n tempStr = LMOutputFile.readline()\n tempWords = tempStr.split('\\t')\n tempWords = tempWords[2:len(tempWords) - 1]\n self.LMOutput['measure2BinAverages'] = [self.str2floatTrap(x) for x in tempWords]\n\n tempStr = LMOutputFile.readline()\n tempWords = tempStr.split('\\t')\n tempWords = tempWords[1:len(tempWords) - 1]\n self.LMOutput['measure2BinStdDevs'] = [self.str2floatTrap(x) for x in tempWords]\n\n LMOutputFile.close()",
"def get_dc_offset(self):\n self.dev.write(1, 'A1')\n digit1, digit2 = self.dev.read(0x81, 64)[:2]\n # Save the data as voltage between 0.0 and 5.0\n self.data1.append((digit1 + 256*digit2)*5.0/1024)",
"def getAmbientLightSensor(self):\n cmd = 'O'\n ambientLightSensor = [-1,-1,-1,-1,-1,-1,-1,-1]\n out = self.getData(cmd)\n out = str(out, 'utf-8')\n if self.debug:\n print(out)\n if out[0] == 'n':\n isStart = False\n j = 0\n for i in range(len(out)):\n if isStart:\n if out[i] == ',':\n ambientLightSensor[j] = int(data)\n j = j + 1\n isStart = False\n else:\n data=data+out[i]\n if out[i] == ',':\n isStart = True\n data = ''\n ambientLightSensor[j] = int(data)\n return ambientLightSensor",
"def ldax(self, addr):\n\n val = self.mem_if.read(addr, index=self.reg.idx)\n self.reg.accum = val",
"def read_raw(self):\n # Read gyro data from the sensor.\n res = self._bus.read(self._address, _GYRO_REGISTER_OUT_X_MSB, 8)\n # Parse out the gyroscope data as 16-bit signed data.\n raw_x = struct.unpack_from(\">h\", res[0:2])[0]\n raw_y = struct.unpack_from(\">h\", res[2:4])[0]\n raw_z = struct.unpack_from(\">h\", res[4:6])[0]\n return (raw_x, raw_y, raw_z)",
"def read_tsl2561(self):\n try:\n # Create the I2C bus\n i2c = busio.I2C(board.SCL, board.SDA)\n # Create the TSL2561 instance, passing in the I2C bus\n tsl = adafruit_tsl2561.TSL2561(i2c)\n # Print chip info\n print(\"Chip ID = {}\".format(tsl.chip_id))\n print(\"Enabled = {}\".format(tsl.enabled))\n print(\"Gain = {}\".format(tsl.gain))\n print(\"Integration time = {}\".format(tsl.integration_time))\n print(\"Configuring TSL2561...\")\n print(\"Configuring TSL2561...\")\n # Enable the light sensor\n tsl.enabled = True\n time.sleep(1)\n # Set gain 0=1x, 1=16x\n tsl.gain = 0\n # Set integration time (0=13.7ms, 1=101ms, 2=402ms, or 3=manual)\n tsl.integration_time = 1\n # print(\"Getting readings...\")\n print(\"Getting readings....\")\n # Get raw (luminosity) readings individually\n broadband = tsl.broadband\n infrared = tsl.infrared\n # Get raw (luminosity) readings using tuple unpacking\n # broadband, infrared = tsl.luminosity\n # Get computed lux value (tsl.lux can return None or a float)\n lux = tsl.lux\n # Print results\n # print(\"Enabled = {}\".format(tsl.enabled))\n print(\"Enabled = {}\".format(tsl.enabled))\n # print(\"Gain = {}\".format(tsl.gain))\n print(\"Gain = {}\".format(tsl.gain))\n # print(\"Integration time = {}\".format(tsl.integration_time))\n print(\"Integration time = {}\".format(tsl.integration_time))\n # print(\"Broadband = {}\".format(broadband))\n print(\"Broadband = {}\".format(broadband))\n # print(\"Infrared = {}\".format(infrared))\n print(\"Infrared = {}\".format(infrared))\n # if lux is not None:\n # print(\"Lux = {}\".format(lux))\n # else:\n # print(\"Lux value is None. Possible \\\n # sensor underrange or overrange.\")\n # Disble the light sensor (to save power)\n tsl.enabled = False\n print('read light data: ')\n print(lux)\n print(infrared)\n print(broadband)\n return lux, infrared, broadband\n except BaseException as e:\n print('An exception occurred: {}'.format(e))",
"def hp3458a_read_voltage(hp_meter):\n hp_meter.write(\"TARM SGL\")\n return float(hp_meter.read())",
"def _generateLSSData():\n d65data=matFromVec([d65Illum(x) for x in brange(10,380,730)])\n a=matNew([cie1931cmf(x) for x in brange(10,380,730)])\n aprime=matT(a)\n width=matShape(a)[0]\n wdiag=matDiag(d65data[0])\n mat=matNew([ \\\n [3.240970, -1.537383, -0.4986108],\\\n [-0.9692436, 1.875968, 0.04155506],\\\n [0.05563008, -0.2039770, 1.056972]])\n wnorm=vecDot(d65data[0],aprime[1])\n t=matScale(matMul(matMul(mat,aprime),wdiag),1.0/wnorm)\n # Compute Least Slope Squared matrix\n d=matScale(matEye(width),4)\n dSize=matShape(d)[0]\n matSet(d,0,0,2)\n matSet(d,dSize-1,dSize-1,2)\n for i in range(1,dSize):\n matSet(d,i,i-1,-2)\n matSet(d,i-1,i,-2)\n dt=matT(d)\n vt=matT(t)\n tshape=matShape(t)\n bm=matBlock(d,vt,t,matZeros((tshape[0],tshape[0])))\n bm=matI(bm)\n b11=matPart(bm,0,width,0,width)\n b12=matPart(bm,0,matShape(vt)[0],width,matShape(bm)[1])\n return [b11, b12]"
]
| [
"0.6292134",
"0.61093265",
"0.6046865",
"0.5951739",
"0.59020424",
"0.58906746",
"0.5887905",
"0.5749123",
"0.56047714",
"0.551105",
"0.54300964",
"0.54177797",
"0.5411739",
"0.53821826",
"0.5339218",
"0.5283152",
"0.52742636",
"0.52548987",
"0.52476174",
"0.52301073",
"0.52187264",
"0.5192391",
"0.5187576",
"0.51421785",
"0.5129976",
"0.51252985",
"0.5094398",
"0.5089733",
"0.5074445",
"0.50741345"
]
| 0.64557457 | 0 |
NODE generates a missingidentity message and SELF responds. | def test_incoming_missing_identity(self):
community = DebugCommunity.create_community(self._dispersy, self._my_member)
missing = DebugNode(community)
missing.init_socket()
missing.init_my_member()
node = DebugNode(community)
node.init_socket()
node.init_my_member()
# use NODE to fetch the identities for MISSING
node.drop_packets()
node.give_message(node.create_dispersy_missing_identity(missing.my_member, 10, community.my_candidate))
responses = node.receive_messages()
self.assertEqual(len(responses), 1)
for _, response in responses:
self.assertEqual(response.name, u"dispersy-identity")
self.assertEqual(response.authentication.member.public_key, missing.my_member.public_key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_outgoing_missing_identity(self):\n community = DebugCommunity.create_community(self._dispersy, self._my_member)\n node = DebugNode(community)\n node.init_socket()\n node.init_my_member(candidate=False, identity=False)\n node.drop_packets()\n\n # NODE sends a message to SELF\n node.give_message(node.create_full_sync_text(\"Hello World\", 10))\n\n # SELF must not yet process the 'Hello World' message\n self.assertEqual(community.fetch_messages(u\"full-sync-text\"), [])\n\n # SELF must send a missing-identity to NODE\n responses = node.receive_messages()\n self.assertEqual(len(responses), 1)\n for _, response in responses:\n self.assertEqual(response.name, u\"dispersy-missing-identity\")\n self.assertEqual(response.payload.mid, node.my_member.mid)\n\n # NODE sends the identity to SELF\n node.give_message(node.create_dispersy_identity(2))\n\n # SELF must now process and store the 'Hello World' message\n messages = community.fetch_messages(u\"full-sync-text\")\n self.assertEqual(len(messages), 1)\n for message in messages:\n self.assertEqual(message.payload.text, \"Hello World\")",
"def on_missing_identity(self, messages):\n meta = messages[0].community.get_meta_message(u\"dispersy-identity\")\n for message in messages:\n # we are assuming that no more than 10 members have the same sha1 digest.\n sql = u\"SELECT packet FROM sync JOIN member ON member.id = sync.member WHERE sync.community = ? AND sync.meta_message = ? AND member.mid = ? LIMIT 10\"\n packets = [str(packet) for packet, in self._database.execute(sql, (message.community.database_id, meta.database_id, buffer(message.payload.mid)))]\n if packets:\n if __debug__:\n dprint(\"responding with \", len(packets), \" identity messages\")\n self._statistics.dict_inc(self._statistics.outgoing, u\"-dispersy-identity\", len(packets))\n self._endpoint.send([message.candidate], packets)\n\n else:\n assert not message.payload.mid == message.community.my_member.mid, \"we should always have our own dispersy-identity\"\n if __debug__: dprint(\"could not find any missing members. no response is sent [\", message.payload.mid.encode(\"HEX\"), \", mid:\", message.community.my_member.mid.encode(\"HEX\"), \", cid:\", message.community.cid.encode(\"HEX\"), \"]\", level=\"warning\")",
"def test_missing(self):\n community = DebugCommunity.create_community(self._dispersy, self._my_member)\n node = DebugNode(community)\n node.init_socket()\n node.init_my_member()\n\n messages = [node.create_sequence_text(\"Sequence message #%d\" % sequence, sequence + 10, sequence)\n for sequence\n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]\n\n # NODE gives #5, hence SELF will request [#1:#4]\n node.give_message(messages[4])\n requests = node.receive_messages(message_names=[u\"dispersy-missing-sequence\"])\n self.assertEqual(len(requests), 1)\n _, request = requests[0]\n\n self.assertEqual(request.payload.member.public_key, node.my_member.public_key)\n self.assertEqual(request.payload.message.name, u\"sequence-text\")\n self.assertEqual(request.payload.missing_low, 1)\n self.assertEqual(request.payload.missing_high, 4)\n\n # NODE gives the missing packets, database should now contain [#1:#5]\n node.give_messages(messages[0:4])\n yield 0.11\n packets = community.fetch_packets(u\"sequence-text\")\n self.assertEqual(packets, [message.packet for message in messages[0:5]])\n\n #\n # Lets give the following range and test if it works when there are already (a few) messages in the database\n #\n\n # NODE gives #10, hence SELF will request [#6:#9]\n node.give_message(messages[9])\n requests = node.receive_messages(message_names=[u\"dispersy-missing-sequence\"])\n self.assertEqual(len(requests), 1)\n _, request = requests[0]\n\n self.assertEqual(request.payload.member.public_key, node.my_member.public_key)\n self.assertEqual(request.payload.message.name, u\"sequence-text\")\n self.assertEqual(request.payload.missing_low, 6)\n self.assertEqual(request.payload.missing_high, 9)\n\n # NODE gives the missing packets, database should now contain [#1:#5]\n node.give_messages(messages[5:9])\n yield 0.11\n packets = community.fetch_packets(u\"sequence-text\")\n self.assertEqual(packets, [message.packet for message in messages])",
"def test_missingId(self):\n node = Node()\n node.properties[\"datawire_nodeId\"] = \"4567\"\n self.assertEqual(node.getId(), \"4567\")",
"def create_missing_identity(self, community, candidate, dummy_member, response_func=None, response_args=(), timeout=4.5, forward=True):\n if __debug__:\n from .community import Community\n assert isinstance(community, Community)\n assert isinstance(candidate, Candidate)\n assert isinstance(dummy_member, DummyMember)\n assert not dummy_member.public_key\n assert response_func is None or callable(response_func)\n assert isinstance(response_args, tuple)\n assert isinstance(timeout, float)\n assert isinstance(forward, bool)\n\n sendRequest = False\n \n identifier = MissingMemberCache.properties_to_identifier(community, dummy_member)\n cache = self._request_cache.get(identifier, MissingMemberCache)\n if not cache:\n cache = MissingMemberCache(timeout)\n self._request_cache.set(identifier, cache)\n\n if __debug__: dprint(candidate, \" sending missing-identity \", dummy_member.mid.encode(\"HEX\"))\n meta = community.get_meta_message(u\"dispersy-missing-identity\")\n request = meta.impl(distribution=(community.global_time,), destination=(candidate,), payload=(dummy_member.mid,))\n self._forward([request])\n \n sendRequest = True\n\n cache.callbacks.append((response_func, response_args))\n return sendRequest",
"def test_missing_case_id(self):\n responses.add(responses.POST, self.endpoint, status=200)\n with self.assertRaises(QuarantinableError):\n with self.assertLogs() as cm:\n processor.process(encrypt(test_data['missing_case_id']))\n self.assertIn('Decrypted json missing metadata. Quarantining message', cm.output[0])",
"def test_missing_tx_id(self):\n responses.add(responses.POST, self.endpoint, status=200)\n with self.assertRaises(QuarantinableError):\n with self.assertLogs() as cm:\n processor.process(encrypt(test_data['missing_tx_id']))\n self.assertIn('Decrypted json missing tx_id . Quarantining message', cm.output[0])",
"def test_no_identities(dummy_command):\n # get_identities will return some options.\n dummy_command.get_identities.return_value = {}\n\n # Return option 1\n dummy_command.input.values = [\"1\"]\n\n result = dummy_command.select_identity()\n\n assert result == (\n \"-\",\n (\n \"Ad-hoc identity. The resulting package will run but cannot be \"\n \"re-distributed.\"\n ),\n )\n\n # User input was solicited\n assert dummy_command.input.prompts",
"def test_fetchUIDNonIntegerFound(self):\n d = self.client.fetchUID('1')\n self.assertEqual(self.transport.value(), b'0001 FETCH 1 (UID)\\r\\n')\n self.client.lineReceived(b'* foo FETCH (UID 22)')\n self.client.lineReceived(b'0001 OK FETCH completed')\n self.failureResultOf(d, imap4.IllegalServerResponse)",
"def create_dispersy_missing_identity(self, dummy_member, global_time, destination_candidate):\n assert isinstance(dummy_member, Member), type(dummy_member)\n assert isinstance(global_time, (int, long)), type(global_time)\n assert isinstance(destination_candidate, Candidate), type(destination_candidate)\n meta = self._community.get_meta_message(u\"dispersy-missing-identity\")\n return meta.impl(distribution=(global_time,),\n destination=(destination_candidate,),\n payload=(dummy_member.mid,))",
"def forget_identity(response, request):\n raise NotImplementedError # pragma: nocoverage",
"def confirm_identity(self):\n self.__not_implemented()",
"def send_error_missing_id(message, obj_type):\n return make_response(jsonify({\"validation_error\": {\n \"error\": 'Missing id',\n \"object_type\": obj_type,\n \"description\": message\n }}), 400)",
"def test_get_user_id_unknown_user(self):\n print('(' + self.test_get_user_id.__name__+')',\n self.test_get_user_id.__doc__)\n self.assertIsNone(self.connection.get_user_id(\n NON_EXIST_PATIENT_USERNAME))",
"def test_generate_xml_node(self):\n with self.assertRaises(cfy_exc.NonRecoverableError):\n utils.generate_xml_node({}, {}, \"sometag\")",
"def test_bad_oid(self):\n block = SNMPBase()\n block._create_data = MagicMock()\n block._execute_snmp_request = MagicMock(\n return_value=SAMPLE_SNMP_RESPONSE)\n block._handle_data = MagicMock()\n\n myOID = \"1.3.6.1.2.1.31.1.1.1.6.2\"\n starting_signal = Signal({\n \"existing_key\": \"existing_val\",\n \"not an oid\": myOID\n })\n\n self.configure_block(block, {\n \"oids\": [{\"oid\": \"{{ $oid }}\"}]\n })\n block.start()\n\n # Send the bad signal, make sure execute was never called\n block.process_signals([starting_signal])\n self.assertFalse(block._execute_snmp_request.called)\n block.stop()",
"def test_emptyInstanceID(self) :\n\t\tself.assertRaises(ValueError, lambda: Node(\"\"))",
"def test_invalid_identity_name(dummy_command):\n # get_identities will return some options.\n dummy_command.get_identities.return_value = {\n \"38EBD6F8903EC63C238B04C1067833814CE47CA3\": \"Developer ID Application: Example Corporation Ltd (Z2K4383DLE)\",\n \"11E77FB58F13F6108B38110D5D92233C58ED38C5\": \"iPhone Developer: Jane Smith (BXAH5H869S)\",\n }\n\n # The identity will be the one the user specified as an option.\n with pytest.raises(BriefcaseCommandError):\n dummy_command.select_identity(\"not-an-identity\")\n\n # User input was not solicited\n assert dummy_command.input.prompts == []",
"def test_missing_proof(self):\n node, other = self.create_nodes(2)\n node.send_identity(other)\n\n # permit NODE\n authorize = self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u\"protected-full-sync-text\"), u\"permit\"),\n (node.my_member, self._community.get_meta_message(u\"protected-full-sync-text\"), u\"authorize\")])\n node.give_message(authorize, self._mm)\n\n protected_text = node.create_protected_full_sync_text(\"Protected message\", 42)\n node.store([protected_text])\n\n # OTHER pretends to received the protected message and requests the proof\n node.give_message(other.create_missing_proof(node.my_member, 42), other)\n\n # NODE sends dispersy-authorize to OTHER\n _, authorize = other.receive_message(names=[u\"dispersy-authorize\"]).next()\n\n permission_triplet = (node.my_member.mid, u\"protected-full-sync-text\", u\"permit\")\n authorize_permission_triplets = [(triplet[0].mid, triplet[1].name, triplet[2]) for triplet in authorize.payload.permission_triplets]\n self.assertIn(permission_triplet, authorize_permission_triplets)",
"def test_expiredUnknown(self):\n disco = create_disco()\n node = create_node(\"somewhere\")\n disco.onMessage(None, NodeExpired(node))\n self.assertEqual(knownNodes(disco, \"myservice\", \"sandbox\"), [])",
"def test_incompleteFetchUIDResponse(self):\n d = self.client.fetchUID('1:7')\n self.assertEqual(self.transport.value(), b'0001 FETCH 1:7 (UID)\\r\\n')\n self.client.lineReceived(b'* 2 FETCH (UID 22)')\n self.client.lineReceived(b'* 3 FETCH (UID)')\n self.client.lineReceived(b'* 4 FETCH (UID 24)')\n self.client.lineReceived(b'0001 OK FETCH completed')\n self.failureResultOf(d, imap4.IllegalServerResponse)",
"def test_requestNonexistentAvatarId(self):\n username = '%s@%s' % (self.localpart, self.domain)\n d = self._requestAvatarId(\n UsernamePassword(username, self.password))\n return self.assertFailure(d, errors.NoSuchUser)",
"def _require_node(self, instance):\n node_uuid = instance.get('node')\n if not node_uuid:\n raise exception.NovaException(_(\n \"Baremetal node id not supplied to driver for %r\")\n % instance['uuid'])\n return node_uuid",
"def create_identity(msg: CreateIdentity_request):\n \n # Check if we have received some data in the POST\n if len(msg.DID) == 0:\n log.error(\"No data received\")\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=\"No data received\")\n\n # Create the identity using the library\n try:\n error, didDoc = tf.create_identity_subnode(\n msg.DID, msg.domain_name, msg.website, msg.commercial_name, msg.new_privatekey, msg.parent_privatekey)\n except Exception as e:\n detail=str(e)\n log.error(detail)\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=detail)\n\n if error is not None:\n log.error(error)\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=error)\n\n return {\"didDoc\": didDoc.to_dict()}",
"def test_30_app_id_anonymous_user(self, Mock, mock):\r\n html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,\r\n {'content-type': 'application/json'})\r\n Mock.return_value = html_request\r\n\r\n self.register()\r\n self.new_application()\r\n self.signout()\r\n\r\n res = self.app.get('/app/sampleapp', follow_redirects=True)\r\n assert \"Sample App\" in res.data, (\"Application name should be shown\"\r\n \" to users\")\r\n assert '<strong><i class=\"icon-cog\"></i> ID</strong>: 1' not in \\\r\n res.data, \"Application ID should be shown to the owner\"",
"def testInfoEmptyDefaultNode(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\">\n <identity category=\"client\" type=\"bot\" />\n <feature var=\"http://jabber.org/protocol/disco#info\" />\n </query>\n </iq>\n \"\"\")",
"def test_nonIntegerUIDNEXT(self):\n d = self._examineOrSelect()\n self._response(b'* OK [UIDNEXT foo] Predicted next UID')\n self.failureResultOf(d, imap4.IllegalServerResponse)",
"def test_create_identity(self):\n pass",
"def test_nonIntegerUnseen(self):\n d = self._examineOrSelect()\n self._response(b'* OK [UNSEEN foo] Message foo is first unseen')\n self.failureResultOf(d, imap4.IllegalServerResponse)",
"def _identify_fail(failure):\n logger.warning(failure.getErrorMessage())\n logger.warning(\"Failed to setup & obtain identity\")\n return"
]
| [
"0.74775064",
"0.6636877",
"0.63363093",
"0.6295154",
"0.59223104",
"0.58052754",
"0.5758968",
"0.56116825",
"0.5479746",
"0.547219",
"0.5457771",
"0.543826",
"0.5415727",
"0.53859234",
"0.53766704",
"0.53607786",
"0.53590137",
"0.53589034",
"0.5356511",
"0.53152335",
"0.5276069",
"0.52737",
"0.52671635",
"0.52606183",
"0.52540034",
"0.52299666",
"0.5215375",
"0.52050674",
"0.5197072",
"0.51856774"
]
| 0.78432083 | 0 |
NODE generates data and sends it to SELF, resulting in SELF asking for the missing identity. | def test_outgoing_missing_identity(self):
community = DebugCommunity.create_community(self._dispersy, self._my_member)
node = DebugNode(community)
node.init_socket()
node.init_my_member(candidate=False, identity=False)
node.drop_packets()
# NODE sends a message to SELF
node.give_message(node.create_full_sync_text("Hello World", 10))
# SELF must not yet process the 'Hello World' message
self.assertEqual(community.fetch_messages(u"full-sync-text"), [])
# SELF must send a missing-identity to NODE
responses = node.receive_messages()
self.assertEqual(len(responses), 1)
for _, response in responses:
self.assertEqual(response.name, u"dispersy-missing-identity")
self.assertEqual(response.payload.mid, node.my_member.mid)
# NODE sends the identity to SELF
node.give_message(node.create_dispersy_identity(2))
# SELF must now process and store the 'Hello World' message
messages = community.fetch_messages(u"full-sync-text")
self.assertEqual(len(messages), 1)
for message in messages:
self.assertEqual(message.payload.text, "Hello World") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, username: str, current_key: Key, \n user_data: UserData = None, server_type=constants.LOCAL_SERVER, \n port=constants.LOCAL_PORT, auth_keys : tuple[PublicKey, PrivateKey] = None):\n self.username = username\n \n #Get the __init__ of Node class.\n super().__init__(username, auth_Keys= auth_keys)\n \n \n #This is the correct_key i.e. the Key that Alice has.\n #TODO: __ or _ for best security?\n self._current_key = current_key\n \n self.server_type = server_type\n self.port = port\n\n\n # The User Data for all people on this server \n user_data_loaded = self.check_if_user_data_file_exists()\n \n if user_data:\n self.user_data = user_data\n elif user_data_loaded:\n self.user_data = user_data_loaded\n # print(self.user_data)\n else:\n self.user_data = UserData()\n \n # set the address corresponding to Local or Public Server.\n self._set_the_address_variable()\n self.user = User(username, address=self.address, auth_id=self.auth_id)\n self.user_data.update_user_data(self.user)\n # print(\"UserData (After Server Add): \", self.user_data.users)\n \n if server_type == constants.PUBLIC_SERVER:\n with open(constants.DATA_STORAGE + 'server_address.pickle', 'wb') as fh:\n pickle.dump(self.address, fh)\n \n self.threads = [];\n \n # The Server will start on this address\n # Then we can port-forward the ngrok address to this address\n self.LOCAL_ADDRESS = (constants.LOCAL_IP, constants.LOCAL_PORT)\n \n # The status of different algorithms for Error Correction.\n self.reconciliation_status = {'cascade': 'Not yet started',\n 'winnow': 'Not yet started',\n 'ldpc': 'Not yet started',\n 'polar': 'Not yet started'}\n \n # Start Listening!\n self.start_listening()\n self.server_is_active = True\n\n # Now Start accepting connections:\n self.start_receiving()",
"def setup(self):\n self.nonce = generate_nonce()\n # print('nonce: ' + str(self.nonce))\n n, ciphertext, tag = aes_encode(self.aes, self.nonce)\n print(Colors.BOLD + 'N --> S: {N_N}K' + Colors.ENDC)\n print('\\t' + Colors.BOLD + 'N_N: ' + Colors.ENDC + str(self.nonce))\n print('\\t' + Colors.BOLD + 'K: ' + Colors.ENDC + str(self.aes))\n print('\\t' + Colors.BOLD + '{N_N}K : (n, c, t)' + Colors.ENDC)\n # print('sending encrypted, (n, c, t) : (' + str(n) + ', ' + str(ciphertext) + ', ' + str(tag) + ')')\n to_send = {'dest': 'setup', 'n': n, 'c': ciphertext, 't': tag} # dictionary to send to the server\n self.nodesocket.sendall(pickle.dumps(to_send))\n data = pickle.loads(self.nodesocket.recv(MAX_SIZE))\n self.id = data['id'] # set the given id from the server\n return data",
"def test_missingId(self):\n node = Node()\n node.properties[\"datawire_nodeId\"] = \"4567\"\n self.assertEqual(node.getId(), \"4567\")",
"def invoke(self, msg, req):\n node = Node.create()\n node.acquire_lock()\n\n if msg.name == 'forward':\n try:\n with node.graph.as_default():\n if node.num_devices == 5:\n output, name = Model_5.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n elif node.num_devices == 6:\n output, name = Model_6.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n elif node.num_devices == 7:\n output, name = Model_7.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n elif node.num_devices == 8:\n output, name = Model_8.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n\n node.release_lock()\n return\n\n except Exception, e:\n node.log('Error', e.message)\n elif msg.name == 'update':\n \"\"\"update this node's task configuration,based on the received massage \"\"\"\n try:\n node.num_devices = req['num_devices']\n available_ip = req['available_ip']\n\n update_ip(get_file(node.num_devices), available_ip)\n load_ip(node)\n\n node.release_lock()\n return\n\n except Exception, e:\n node.log('Error', e.message)\n\n else:\n raise schema.AvroException('unexpected message:', msg.getname())",
"def generate (self, n, ind = 0):\n\n addr = \"chirt1qcmdxwpu35mqlzxz3alc9u9ztp22edsuc5s7zzk\"\n self.generatetoaddress (self.nodes[ind], n, addr)",
"def send_node_props(self, host_info):\n se = get_se()\n version = get_version()\n name = host_info.get_hostname()\n unique_id = '%s:Pool:%s' % (se, name)\n parent_id = \"%s:SE:%s\" % (se, se)\n\n sa = StorageElement.StorageElement()\n sar = StorageElementRecord.StorageElementRecord()\n sa.UniqueID(unique_id)\n sa.Name(name)\n sa.SE(se)\n sa.SpaceType(\"Pool\")\n sa.Implementation(XRD_NAME)\n sa.Version(version)\n sa.Status(XRD_STATUS)\n sa.ParentID(parent_id)\n sa.Timestamp(timestamp)\n sar.Timestamp(timestamp)\n sar.UniqueID(unique_id)\n sar.MeasurementType(\"raw\")\n sar.StorageType(\"disk\")\n sar.TotalSpace(1024*host_info.get_total_kb())\n sar.FreeSpace(1024*host_info.get_total_free_kb())\n sar.UsedSpace(1024*host_info.get_total_used_kb())\n Gratia.Send(sa)\n Gratia.Send(sar)",
"def _send_data(self):\n pass",
"def send_data(self):\n data = self.datastore.use(self.data_name)\n if data is None:\n self.dbg(\"sockets_warning\", \"Data is none for {}\", [self.data_name])\n encoded_data = json.dumps(data).encode()\n self.conn.sendall(encoded_data)\n self.dbg(\"sockets_verbose\", \"Data sent\")",
"def send_node(self) -> str:\n node = self.current_node\n MDI_Send(node, MDI_COMMAND_LENGTH, MDI_CHAR, self.comm)\n return node",
"def run(send_to_node=False):\n\n # Signed request\n sk = read_signing_key_file(os.path.join(SIGNING_KEY_DIR, 'bank_02_nid'))\n signed_request = generate_signed_request(\n data={\n 'ip_address': '104.131.41.225',\n 'port': None,\n 'protocol': 'http'\n },\n nid_signing_key=sk\n )\n\n if send_to_node:\n send_request_to_node(signed_request, live_pv=True)\n\n write_json(\n os.path.join(SIGNED_REQUESTS_DIR, 'connection-request.json'),\n signed_request\n )",
"def _start_oef_node(self, network_node):",
"def transmit_data(self, data: str, target_node: str = None):\n raise NotImplementedError",
"def _explicit(self):\n _device_uuid = self._task.get(\"to_device_uuid\")\n _device = redis_hash_to_dict(self._redis, DeviceInfo, _device_uuid)\n if _device == None:\n logging.error(\"no device:%s\" % _device_uuid)\n return\n\n _user_uuid = self._task.get(\"from_uuid\")\n self._users_hash[_user_uuid] = self._task[\"_user\"]\n self._devices_hash[_device_uuid] = _device\n # not save db for explicit message\n self._push_to_socket(_user_uuid, _device_uuid)\n return",
"def test_set_self_address(self):\n print('### Testing set up address ###')\n node_id = \"001\" # node_id of the form of 3 chr string already verified in Nanomodem.py\n \n command = b'$A' + node_id.encode()\n self.serport.write(command)\n\n received_bytes = self.serport.readline()\n index = received_bytes.find(b'#A')\n #print(\"SET_ADDRESS len is \"+ str(len(received_bytes)) +\" and index is \"+str(index))\n\n if (index != -1) and (len(received_bytes) - index == 5 and received_bytes.decode()[1] == 'A'): \n # received_bytes[1] == b'A' as condition doesn't work because x = b'A' still stay b'A' and x[0] give 65 (the byte for A)\n #print(\"SET_ADDRESS A was spot on\")\n if received_bytes[1:4] == command[1:4]:\n node_id = received_bytes.decode()[2:5]\n print(\"SET_ADDRESS node is :\"+ node_id)\n print(\"set self address SUCCESS\")\n return True\n else: \n print(\"set self address FAILURE\")\n return False",
"def send(self, data):",
"def test_fast_forward_scenario1 (self) :\n\t\tnodeList = self.createNodes(3)\n\n\t\t# Adding a record to a node A\n\t\tnodeList[0].addAppData(\"record1\",\"A version 1\", Node.ALL, Node.ALL)\n\t\tnodeList[0].serialize((Node.ALL, Node.ALL))\n\t\t\n\t\t# Node A pushing data to Node B\n\t\tsess0_1 = nodeList[0].createSyncSession(nodeList[1], nodeList[1].instanceID)\n\t\tnodeList[0].pushInitiation(sess0_1, (Node.ALL, Node.ALL))\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].lastSavedByInstance, nodeList[0].instanceID)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].lastSavedByCounter, 1)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].lastSavedByHistory, {nodeList[0].instanceID:1})\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].partitionFacility, Node.ALL)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].partitionUser, Node.ALL)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].recordData, \"A version 1\")\n\n\t\tappRecord = nodeList[1].searchRecordInApp(\"record1\")\t\n\t\tself.assertEqual(appRecord.recordData, \"A version 1\")\n\t\tself.assertEqual(appRecord.dirtyBit, 0)\n\t\tself.assertEqual(appRecord.partitionFacility, Node.ALL)\n\t\tself.assertEqual(appRecord.partitionUser, Node.ALL)\n\n\t\t# Node B now modifies this data\n\t\tnodeList[1].addAppData(\"record1\",\"B version 1\", Node.ALL, Node.ALL)\n\t\tself.assertEqual(appRecord.recordData, \"B version 1\")\n\t\tself.assertEqual(appRecord.dirtyBit, 1)\n\n\t\tnodeList[1].serialize((Node.ALL, Node.ALL))\n\t\tself.assertEqual(appRecord.dirtyBit, 0)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].lastSavedByInstance, nodeList[1].instanceID)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].lastSavedByCounter, 1)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].lastSavedByHistory, {nodeList[0].instanceID:1,\\\n\t\t\t nodeList[1].instanceID:1})\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].partitionFacility, Node.ALL)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].partitionUser, Node.ALL)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].recordData, \"B version 1\")\n\n\t\t# Node B pushing data to Node C\n\t\tsess1_2 = nodeList[1].createSyncSession(nodeList[2], nodeList[2].instanceID)\n\t\tnodeList[1].pushInitiation(sess1_2, (Node.ALL, Node.ALL))\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].lastSavedByInstance, nodeList[1].instanceID)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].lastSavedByCounter, 1)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].lastSavedByHistory, {nodeList[0].instanceID:1, \\\n\t\t\tnodeList[1].instanceID:1})\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].partitionFacility, Node.ALL)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].partitionUser, Node.ALL)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].recordData, \"B version 1\")\n\n\t\t# Node A pushing data to Node C\n\t\tsess0_2 = nodeList[0].createSyncSession(nodeList[2], nodeList[2].instanceID)\n\t\tnodeList[0].pushInitiation(sess0_2, (Node.ALL, Node.ALL))\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].lastSavedByInstance, nodeList[1].instanceID)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].lastSavedByCounter, 1)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].lastSavedByHistory, {nodeList[0].instanceID:1, \\\n\t\t\tnodeList[1].instanceID:1})\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].partitionFacility, Node.ALL)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].partitionUser, Node.ALL)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].recordData, \"B version 1\")",
"def create_datasinker_node(self, base_directory, bids_atlas_label, recon_model, tracking_model):\n sinker = pe.Node(nio.DataSink(), name=\"dwi_datasinker\")\n sinker.inputs.base_directory = os.path.abspath(base_directory)\n\n # Dataname substitutions in order to comply with BIDS derivatives specifications\n # fmt:off\n sinker.inputs.substitutions = [ # ('T1', self.subject+'_T1w_head'),\n (\"brain_mask.nii.gz\", self.subject + \"_desc-brain_mask.nii.gz\"),\n (\"brain.nii.gz\", self.subject + \"_desc-brain_T1w.nii.gz\"),\n (\"T1_warped\", self.subject + \"_space-DWI_desc-head_T1w\"),\n (\"T1-TO-TARGET\", self.subject + \"_space-DWI_desc-head_T1w\"),\n (\"anat_resampled_warped\", self.subject + \"_space-DWI_desc-head_T1w\"),\n (\"brain_warped\", self.subject + \"_space-DWI_desc-brain_T1w\"),\n (\"anat_masked_resampled_warped\", self.subject + \"_space-DWI_desc-brain_T1w\"),\n (\"brain_mask_registered_temp_crop\", self.subject + \"_space-DWI_desc-brain_mask\"),\n (\"brain_mask_resampled_warped.nii.gz\", self.subject + \"_space-DWI_desc-brain_mask\"),\n (\"wm_mask_warped\", self.subject + \"_space-DWI_label-WM_dseg\"),\n (\"wm_mask_registered\", self.subject + \"_space-DWI_label-WM_dseg\"),\n (\"wm_mask_resampled_warped\", self.subject + \"_space-DWI_label-WM_dseg\"),\n (\n f'{self.subject}_atlas-Desikan_dseg_out_warped.nii.gz',\n f'{self.subject}_space-DWI_atlas-Desikan_dseg.nii.gz'\n ),\n (\"fast__pve_0_out_warped.nii.gz\", self.subject + \"_space-DWI_label-CSF_probseg.nii.gz\"),\n (\"fast__pve_1_out_warped.nii.gz\", self.subject + \"_space-DWI_label-GM_probseg.nii.gz\"),\n (\"fast__pve_2_out_warped.nii.gz\", self.subject + \"_space-DWI_label-WM_probseg.nii.gz\"),\n (\"pve_0_out_warped.nii.gz\", self.subject + \"_space-DWI_label-CSF_probseg.nii.gz\"),\n (\"pve_1_out_warped.nii.gz\", self.subject + \"_space-DWI_label-GM_probseg.nii.gz\"),\n (\"pve_2_out_warped.nii.gz\", self.subject + \"_space-DWI_label-WM_probseg.nii.gz\"),\n (\"act_5tt_resampled_warped.nii.gz\", self.subject + \"_space-DWI_label-5TT_probseg.nii.gz\"),\n (\"gmwmi_resampled_warped.nii.gz\", self.subject + \"_space-DWI_label-GMWMI_probseg.nii.gz\"),\n (\"5tt_warped.nii.gz\", self.subject + \"_space-DWI_label-5TT_probseg.nii.gz\"),\n (\"gmwmi_warped.nii.gz\", self.subject + \"_space-DWI_label-GMWMI_probseg.nii.gz\"),\n (\"connectome_freesurferaparc\", self.subject + \"_label-Desikan_conndata-network_connectivity\"),\n (\"dwi.nii.gz\", self.subject + \"_dwi.nii.gz\"),\n (\"dwi.bval\", self.subject + \"_dwi.bval\"),\n (\"eddy_corrected.nii.gz.eddy_rotated_bvecs\", self.subject + \"_desc-eddyrotated.bvec\"),\n (\"eddy_corrected.nii.gz\", self.subject + \"_desc-eddycorrected_dwi.nii.gz\"),\n (\"dwi_brain_mask_resampled.nii.gz\", self.subject + \"_desc-brain_mask.nii.gz\"),\n (\"brain_mask_resampled.nii.gz\", self.subject + \"_desc-brain_mask.nii.gz\"),\n (\"ADC\", self.subject + \"_model-DTI_MD\"),\n (\"FA\", self.subject + \"_model-DTI_FA\"),\n (\"diffusion_preproc_resampled_fa\", self.subject + \"_model-DTI_FA\"),\n (\"diffusion_preproc_resampled_ad\", self.subject + \"_model-DTI_AD\"),\n (\"diffusion_preproc_resampled_md\", self.subject + \"_model-DTI_MD\"),\n (\"diffusion_preproc_resampled_rd\", self.subject + \"_model-DTI_RD\"),\n (\"shore_gfa.nii.gz\", \"{}_model-SHORE_GFA.nii.gz\".format(self.subject)),\n (\"shore_msd.nii.gz\", \"{}_model-SHORE_MSD.nii.gz\".format(self.subject)),\n (\"shore_rtop_signal.nii.gz\", \"{}_model-SHORE_RTOP.nii.gz\".format(self.subject),),\n (\"shore_fodf.nii.gz\", \"{}_model-SHORE_FOD.nii.gz\".format(self.subject)),\n (\"diffusion_resampled_CSD.mif\", self.subject + \"_model-CSD_diffmodel.mif\",),\n (\"diffusion_shm_coeff.nii.gz\", \"{}_model-CSD_diffmodel.nii.gz\".format(self.subject),),\n (\"spherical_harmonics_image.nii.gz\", \"{}_model-CSD_diffmodel.nii.gz\".format(self.subject),),\n (\"shm_coeff.nii.gz\", \"{}_model-CSD_diffmodel.nii.gz\".format(self.subject),),\n (\"dwi_tensor.nii.gz\", \"{}_desc-WLS_model-DTI_diffmodel.nii.gz\".format(self.subject),),\n (\"grad.txt\", self.subject + \"_desc-grad_dwi.txt\"),\n (\"target_epicorrected\", self.subject + \"_desc-preproc_dwi\"),\n (\"diffusion_preproc_resampled.nii.gz\", self.subject + \"_desc-preproc_dwi.nii.gz\",),\n (\"streamline_final\", \"{}_model-{}_desc-{}_tractogram\".format(self.subject, recon_model, tracking_model),)\n ]\n # fmt:on\n\n if self.parcellation_scheme != \"Custom\":\n for scale in ['scale1', 'scale2', 'scale3', 'scale4', 'scale5']:\n # fmt:off\n sinker.inputs.substitutions += [\n (\n f'ROIv_HR_th_{scale}_out_warped.nii.gz',\n f'{self.subject}_space-DWI_atlas-{bids_atlas_label}_res-{scale}_dseg.nii.gz'\n ),\n (\n f'{self.subject}_atlas-{bids_atlas_label}_res-{scale}_dseg_out_warped.nii.gz',\n f'{self.subject}_space-DWI_atlas-{bids_atlas_label}_res-{scale}_dseg.nii.gz'\n ),\n (\n f'{self.subject}_atlas-{bids_atlas_label}_res-{scale}_dseg_out_flirt.nii.gz',\n f'{self.subject}_space-DWI_atlas-{bids_atlas_label}_res-{scale}_dseg.nii.gz'\n ),\n (\n f'connectome_{scale}',\n f'{self.subject}_atlas-{bids_atlas_label}_res-{scale}_conndata-network_connectivity'),\n ]\n # fmt:on\n else:\n # fmt:off\n bids_atlas_name = bids_atlas_label if \"res\" not in bids_atlas_label else bids_atlas_label.split('_')[0]\n sinker.inputs.substitutions += [\n (\n f'{self.subject}_atlas-{bids_atlas_label}_dseg_out_warped.nii.gz',\n f'{self.subject}_space-DWI_atlas-{bids_atlas_label}_dseg.nii.gz'\n ),\n (\n f'{self.subject}_atlas-{bids_atlas_label}_dseg_out_flirt.nii.gz',\n f'{self.subject}_space-DWI_atlas-{bids_atlas_label}_dseg.nii.gz'\n ),\n (\n f'connectome_{bids_atlas_name}',\n f'{self.subject}_atlas-{bids_atlas_label}_conndata-network_connectivity'),\n ]\n # fmt:on\n\n return sinker",
"def test_incoming_missing_identity(self):\n community = DebugCommunity.create_community(self._dispersy, self._my_member)\n\n missing = DebugNode(community)\n missing.init_socket()\n missing.init_my_member()\n\n node = DebugNode(community)\n node.init_socket()\n node.init_my_member()\n\n # use NODE to fetch the identities for MISSING\n node.drop_packets()\n node.give_message(node.create_dispersy_missing_identity(missing.my_member, 10, community.my_candidate))\n responses = node.receive_messages()\n\n self.assertEqual(len(responses), 1)\n for _, response in responses:\n self.assertEqual(response.name, u\"dispersy-identity\")\n self.assertEqual(response.authentication.member.public_key, missing.my_member.public_key)",
"def create_identity_subnode(\n did: str,\n domain_name: str,\n website: str,\n commercial_name: str,\n new_privatekey: PrivatekeyJWK,\n parent_privatekey: PrivatekeyJWK, \n ) -> Tuple[str, DIDDocument]:\n\n # Check that node has at least two components: subnode.parent\n s = domain_name.partition(\".\")\n if len(s[1]) == 0:\n return \"Domain name has only one component\", None\n\n this_node = s[0]\n parent_node = s[2]\n\n # Obtain subnode's private and public key and Ethereum address\n subnode_account = Account.from_key(base64url_decode(new_privatekey.d))\n subnode_publicKey = base64url_decode(new_privatekey.x) + base64url_decode(new_privatekey.y)\n pb = PublicKey(subnode_publicKey)\n subnode_address = pb.to_checksum_address()\n\n # The caller account from its private key\n Manager_account = Account.from_key(base64url_decode(parent_privatekey.d))\n\n # Initialize the DIDDocument\n didDoc = DIDDocument(\n DID=did,\n node_name=parent_node,\n label=this_node,\n address=subnode_address,\n publicKey=subnode_publicKey,\n manager_account=Manager_account\n )\n\n # Add the entity info\n service = {\n \"id\": did + \"#info\",\n \"type\": \"EntityCommercialInfo\",\n \"serviceEndpoint\": website,\n \"name\": commercial_name\n }\n didDoc.addService(service)\n\n # Add the Secure Messaging Server info\n service = {\n \"id\": did + \"#sms\",\n \"type\": \"SecureMessagingService\",\n \"serviceEndpoint\": \"https://safeisland.hesusruiz.org/api\"\n }\n didDoc.addService(service)\n\n # Store the info in the blockchain trust framework\n success, tx_receipt, tx_hash = didDoc.createIdentity(ens, resolver)\n if not success:\n return \"Failed to create identity in blockchain\", None\n\n success, tx_receipt, tx_hash = ens.setApprovalForAll(resolver.address(), True, subnode_account.key)\n if not success:\n return \"Failed in setApprovalForAll\", None\n\n return None, didDoc",
"def __init__(self):\n self.node = None\n self.data = None",
"def m_create_test_identities():\n\n # Get the ROOT account (it was created in the deployment of the Smart Contracts)\n ROOT_address, ROOT_key = wallet.account_from_name(\"ROOT\", \"ThePassword\")\n\n # Create the Alastria account for node \"ala\"\n print(f\"\\n==> Creating the Alastria account\")\n Alastria_account = wallet.new_account(\n \"Alastria\", \"ThePassword\")\n alakey = Alastria_account.key\n print(f\"Alastria key: {alakey}\")\n\n print(f\"Done\")\n\n # Set the subnode \"ala\"\n print(f\"\\n==> Creating the ala subnode in the Trust Framework\")\n success, _, _ = ens.setSubnodeOwner(\n node_name=\"root\",\n label=\"ala\",\n new_owner_address=Alastria_account.address,\n current_owner_key=ROOT_key\n )\n print(f\"ala subnode created\")\n\n # Assign the name for reverse resolution\n resolver.setName(\"ala\", \"ala\", Alastria_account.key)\n\n # And assign approval to the PublicResolver contract so it can call ENS methods on behalf of Alastria\n print(f\"Resolver address for ROOT: {resolver.address()}\")\n ens.setApprovalForAll(resolver.address(), True, Alastria_account.key)\n\n ################################\n # Heathrow airport\n print(f\"\\n==> Creating the Heathrow identity\")\n\n DID = \"did:elsi:VATGB-927365404\"\n domain_name = \"heathrow.ala\"\n website = \"www.heathrow.com\"\n commercial_name = \"Heathrow Airport Limited\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # AENA\n print(f\"\\n==> Creating the AENA identity\")\n\n DID = \"did:elsi:VATES-A86212420\"\n domain_name = \"aena.ala\"\n website = \"www.aena.es\"\n commercial_name = \"Aena\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # Lanzarote airport\n # The airport belongs to AENA and does not have independent entity (shares the same VAT, for example)\n # In production, the node should be created by AENA, as a subnode controlled by them.\n # In this PoC, the node is created automatically to facilitate the tests\n print(f\"\\n==> Creating the César Manrique airport identity\")\n\n DID = \"did:elsi:VATES-A86212420-1\"\n domain_name = \"ace.ala\"\n website = \"www.aena.es/es/aeropuerto-lanzarote\"\n commercial_name = \"Aeropuerto de Lanzarote-Cesar Manrique\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # Metrovacesa\n print(f\"\\n==> Creating the Metrovacesa identity\")\n\n DID = \"did:elsi:VATES-A87471264\"\n domain_name = \"metrovacesa.ala\"\n website = \"metrovacesa.com\"\n commercial_name = \"Metrovacesa\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # IN2\n print(f\"\\n==> Creating the IN2 identity\")\n\n DID = \"did:elsi:VATES-B60645900\"\n domain_name = \"in2.ala\"\n website = \"www.in2.es\"\n commercial_name = \"IN2 Innovating 2gether\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # Perfect Health\n print(f\"\\n==> Creating the Perfect Health identity\")\n\n DID = \"did:elsi:VATES-X12345678X\"\n domain_name = \"perfecthealth.ala\"\n website = \"www.perfecthealth.org\"\n commercial_name = \"Perfect Health plc\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # BME\n print(f\"\\n==> Creating the BME identity\")\n\n DID = \"did:elsi:VATES-A83246314\"\n domain_name = \"bme.ala\"\n website = \"www.bolsasymercados.es\"\n commercial_name = \"Bolsas y Mercados Españoles\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)",
"def write_node(self, record) -> None:\n pass",
"def __init__(self, data, node):\n self.data = data\n self.node = node",
"def receive_generate_info():\n input_info_tensor = torch.empty(5, dtype=torch.float32, device=torch.cuda.current_device())\n torch.distributed.broadcast(input_info_tensor, 0)\n batch_size = int(input_info_tensor[0].item())\n seq_len = int(input_info_tensor[1].item())\n tokens_to_generate = int(input_info_tensor[2].item())\n all_probs = int(input_info_tensor[3].item())\n temperature = float(input_info_tensor[4].item())\n \n context_length_tensor = torch.empty(batch_size, dtype=torch.int64, device=torch.cuda.current_device())\n context_tokens_tensor = torch.empty(batch_size, seq_len, dtype=torch.int64, device=torch.cuda.current_device())\n \n # Send variables to all ranks \n torch.distributed.broadcast(context_length_tensor, 0)\n torch.distributed.broadcast(context_tokens_tensor, 0)\n \n return context_length_tensor, context_tokens_tensor, tokens_to_generate, all_probs, temperature",
"def seed_protocol_test():\n\tuser1 = User()\n\tuser2 = User()\n\t\n\tuser1.seed, user1.seed_hash = genSeed()\n\tuser1.data_chunk_1 = 'Some random'\n\tuser1.data_chunk_2 = ' data for you'\n\n\tuser2.seed, user2.seed_hash = genSeed()\n\tuser2.data = 'My data'\n\tuser1.data_chunk_2 = ' data for you'\n\n\t#user1 sends their hashed seed\n\tuser2.recv_hash= user1.seed_hash\n\n\t#user2 then sends their seed\n\tuser1.recv_seed = user2.seed\n\tuser1.concat_seed = user1.seed + user1.recv_seed\n\n\t#user1 then sends the first chunk of data with their seed\n\tuser2.recv_seed = user1.seed\n\tuser2.recv_data = user1.data_chunk_1\n\n\t#user2 verifys the seed before using the data as needed\n\tverifySeed(user2.recv_seed, user2.recv_hash)\n\n\t#user1 verifys the seed before using the data as needed\n\t#verifySeed(user1.recv_seed, user1.recv_hash)\n\n\tprint('Successfully verified seeds')",
"def create_node(self, topogramId, id=None, x=None, y=None, data={}):\n assert type(data) is dict\n if id : assert type(id) is str\n if x : assert type(x) is float or type(x) is int\n if y : assert type(y) is float or type(x) is int\n\n el = {\n \"id\" : id,\n \"x\" : x,\n \"y\" : y\n }\n for k in data :\n el[k] = data[k]\n\n node = { \"element\" : el, \"data\" : data }\n return self.make_request(\"POST\", \"nodes\", { \"topogramId\" : topogramId, \"nodes\" : [ node ]})",
"def send(self, data):\n pass",
"def onRegisterNetworkNode(self):\n pass",
"def send_generate_info(context_tokens_tensor, context_length_tensor, tokens_to_generate, all_probs, temperature):\n # Send the sizes of the tensors\n input_info = [context_tokens_tensor.size(0), context_tokens_tensor.size(1), tokens_to_generate, all_probs, temperature]\n input_info_tensor = torch.cuda.FloatTensor(input_info)\n torch.distributed.broadcast(input_info_tensor, 0)\n\n # Send variables to all ranks \n torch.distributed.broadcast(context_length_tensor, 0)\n torch.distributed.broadcast(context_tokens_tensor, 0)",
"def send(self, X, name, tag):\n node = Node.create()\n queue = node.ip[name]\n address = queue.get()\n\n # initializer use port 9999 to receive data\n port = 9999 if name == 'initial' else 12345\n client = ipc.HTTPTransceiver(address, port)\n requestor = ipc.Requestor(PROTOCOL, client)\n\n node.name = name\n\n data = dict()\n data['input'] = X.tostring()\n data['next'] = name\n data['tag'] = tag\n node.log('finish assembly')\n start = time.time()\n try:\n requestor.request('forward', data)\n except Exception, e:\n # node.log('Error', e.message)\n # The interrupt node's ip is the address above\n print address\n \"\"\"Remove the IP address of the interrupted node from the available ip\"\"\"\n available_ip = read_ip(get_file(node.num_devices))\n available_ip = del_ip(available_ip, address)\n\n node.num_devices = node.num_devices - 1\n \"\"\"Update new IP configuration based on available ip\"\"\"\n update_ip(get_file(node.num_devices), available_ip)\n\n \"\"\"Reload the new ip configuration file\"\"\"\n load_ip(node)\n\n end = time.time()\n node.timer(end - start)\n node.log('node gets request back')\n\n client.close()\n queue.put(address)"
]
| [
"0.59956884",
"0.58435893",
"0.5449365",
"0.5444657",
"0.54393643",
"0.5438327",
"0.54369813",
"0.53430873",
"0.53387535",
"0.53292567",
"0.53126484",
"0.5288606",
"0.52818483",
"0.52673703",
"0.525738",
"0.5243236",
"0.5233719",
"0.5204092",
"0.51981485",
"0.5169455",
"0.51604885",
"0.51576424",
"0.5153114",
"0.51458514",
"0.5135996",
"0.512917",
"0.5123659",
"0.51086843",
"0.5089669",
"0.50542307"
]
| 0.60745317 | 0 |
Table function a la R for integer values | def table(x):
c = Counter(x)
return list(c), list(c.values()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def truthtable(self):\n table = []\n for i in xrange(self.length):\n inputs = []\n binary = bin(i).lstrip('0b')\n for i in xrange(len(binary)):\n inputs.append(int(binary[i]))\n inputs.append(1)\n table.append(self.compute(inputs))\n return table",
"def tabulate(func, start = 0):\n return imap(func, count(start))",
"def create_numbers_table():\n work_tuples = parse_columns()\n print('\\n\\n\\n ----- Tableau récapitulatif -----')\n print('-----------------------')\n for ii in work_tuples:\n line = '|'\n for ij in ii:\n line += ' ij |'\n print(line)\n print('-----------------------')",
"def table(nb, max):\n\ti = 0\n\twhile i < max:\n\t\tprint(i + 1, \"*\", nb, \"=\", (i + 1) * nb)\n\t\ti += 1",
"def generate_table(start_int=0, end_int=10, table_type='Addition'):\n lines = [r'\\documentclass{article}',\n r'\\usepackage{geometry}',\n r'\\geometry{landscape,a4paper,total={170mm,257mm},left=10mm,right=10mm,top=10mm}',\n r'\\usepackage{amsmath}',\n r'\\usepackage{amsfonts}',\n r'\\usepackage{amssymb}',\n r'\\usepackage{dcolumn}',\n r'\\newcolumntype{2}{D{.}{}{2.0}}',\n r'\\begin{document}',\n r'\\begin{large}',\n r'\\begin{center}',\n r'{\\Large ' + table_type + r' Table version 0.1\\par}',\n r'\\vspace*{25px}',\n r'\\renewcommand\\arraystretch{1.3}',\n r'\\setlength\\doublerulesep{0pt}',\n r'\\pagenumbering{gobble}',\n r'\\begin{tabular}{r||*{' + str(end_int - start_int + 1) + '}{3|}}']\n\n operator = {'Addition': r'$+$',\n 'Subtraction': r'$-$',\n 'Multiplication': r'$\\times$'}\n\n lines.append(operator[table_type] + ''.join([' & {} '.format(x) for x in range(start_int, end_int + 1)]) + r'\\\\')\n lines.append('\\hline\\hline')\n for i in range(start_int, end_int + 1):\n if table_type == 'Addition':\n lines.append(str(i) + ''.join([' & {} '.format(x + i) for x in range(start_int, end_int + 1)]) + r'\\\\')\n if table_type == 'Subtraction':\n lines.append(str(i) + ''.join([' & {} '.format(x - i) for x in range(start_int, end_int + 1)]) + r'\\\\')\n if table_type == 'Multiplication':\n lines.append(str(i) + ''.join([' & {} '.format(x * i) for x in range(start_int, end_int + 1)]) + r'\\\\')\n lines.append('\\hline')\n\n lines.append(r'\\end{tabular}')\n lines.append(r'\\end{center}')\n lines.append(r'\\end{large}')\n lines.append(r'\\end{document}')\n\n return '\\n'.join(lines)",
"def multi_table(a, b):\n for i in range(1, int(b) + 1):\n print(f'{a} * {i} = {a*i}')",
"def generate_table(self, rows):\n ...",
"def print_table(n):\n \n numbers = list(range(1, n + 1))\n\n #处理第一行\n s = ''\n for i in numbers:\n s = s + '\\t' + str(i)\n print(s)\n\n for i in numbers:\n s = str(i)\n for j in numbers:\n s = s + '\\t' + str(i * j)\n print(s)",
"def make_table(m, n):\n return np.array([[0] * n for _ in range(m)], dtype=float)",
"def tab(data, column) :\r\n\r\n table = {}\r\n \r\n for i in data :\r\n if i[column] not in table :\r\n # add it to tab\r\n table[i[column]] = 1 \r\n else:\r\n # add tabulation\r\n table[i[column]] += 1\r\n\r\n #place the cursor back at 0\r\n \r\n return table",
"def hash_to_table(self, hash_vals):\r\n\t\treturn int(sum([(hash_vals[i] * self._random_r[i]) \\\r\n\t\t\t\t\t\tfor i in range(self._lsh_nums)]) % self._c)",
"def make_table(m, n):\n return [[0] * n for _ in range(m)]",
"def print_table(self, table):\n raise NotImplementedError('print_table method not defined!')",
"def init_table(var_count):\n rows_count = int(math.pow(2, var_count))\n table = [[0 for i in range(var_count)] for i in range(rows_count)]\n\n for i, row in enumerate(table):\n for k, value in enumerate(row):\n tmp = rows_count // math.pow(2, (k + 1))\n if (i // tmp) % 2:\n table[i][k] = True\n else:\n table[i][k] = False\n\n return table",
"def table(self):\n return self.t",
"def _generateTable(self, obj, **args):\n\n if self._script.utilities.isLayoutOnly(obj):\n return []\n\n try:\n table = obj.queryTable()\n except:\n return []\n\n return [messages.tableSize(table.nRows, table.nColumns)]",
"def num_54():\n frmt = \"\"\"\n :{}\n :Generate Data that conform to a uniform distribution.\n :\n :Class values: {}\n :Population size: {}\n :Results:\n : values:\n {}\n : table:\n {}\n : histogram: (class, frequency)\n {}\n :Then use NumPyArrayToTable to get your table.\n \"\"\"\n # import numpy as np\n st = 1\n end = 7\n vals = np.arange(st,end)\n reps = 10\n z = np.repeat(vals,reps)\n np.random.shuffle(z)\n ID = np.arange(len(z))\n tbl = np.array(list(zip(ID, z)), \n dtype = [('ID', 'int'), ('Class', 'int')])\n h = np.histogram(z, np.arange(st, end+1))\n h = np.array(list(zip(h[1], h[0])))\n pad = \" \"\n args =[num_54.__doc__, vals, reps*len(vals),\n indent(str(z.reshape(3,20)), pad),\n indent(str(tbl), pad), indent(str(h), pad)]\n print(dedent(frmt).format(*args))",
"def plus_table(n):\n return [[(i + j) % n for i in range(n)] for j in range(n)]",
"def print_table(table):\n for row in table:\n print(row)",
"def create_primes_table(self, n):\n\n if n == 0:\n print('You\\'ve selected 0 primes. Here is a nonexistent table.')\n return '0 primes'\n if not isinstance(n, int) or n < 0:\n raise ValueError('Sorry, that\\'s not a valid number of primes. Please try again with an integer greater than 0.')\n\n n_primes = get_n_primes(n)\n self.table.append(n_primes)\n\n for i in range(1, len(n_primes)):\n row = []\n row.append(n_primes[i])\n for j in range(1, len(n_primes)):\n row.append(n_primes[i] * n_primes[j])\n self.table.append(row)",
"def htable(nbuckets):",
"def times_table(n):\n return [[(i * j) % n for i in range(n)] for j in range(n)]",
"def throw_table(n, d=6, type='classical'):\n table = None\n roll = range(1, d+1)\n \n if type == 'classical':\n table = list(itertools.product(roll, repeat=n))\n else:\n table = list(itertools.combinations(roll, n))\n if type == 'bosonic':\n # TODO: This only works for 2 dice!!!!\n for i in roll:\n table.append((i,i))\n\n return table",
"def create_small_table(small_dict):\n keys, values = tuple(zip(*small_dict.items()))\n table = tabulate(\n [values],\n headers=keys,\n tablefmt=\"pipe\",\n floatfmt=\".3f\",\n stralign=\"center\",\n numalign=\"center\",\n )\n return table",
"def create_small_table(small_dict):\n keys, values = tuple(zip(*small_dict.items()))\n table = tabulate(\n [values],\n headers=keys,\n tablefmt=\"pipe\",\n floatfmt=\".3f\",\n stralign=\"center\",\n numalign=\"center\",\n )\n return table",
"def print_table(table):\n for row in table:\n print(row)",
"def print_table(table):\n for row in table:\n print(row)",
"def prepare_table(table):\n n = len(table)\n for i, row in enumerate(table):\n assert len(row) == n, f\"len(row) = {len(row)} != {n} = n\"\n for j, _ in enumerate(row):\n if i == j:\n table[i][i] = 0.0\n elif i > j:\n table[i][j] = 1 - table[j][i]\n return table",
"def print_table(table):\n # transpose the table:\n table = map(list, zip(*table))\n # get the column width:\n col_width = [max(len(str(x)) for x in col) for col in zip(*table)]\n # print it to screen:\n print\n for line in table:\n print \"| \" + \" | \".join(\"{:{}}\".format(x, col_width[i]) for i, x in enumerate(line)) + \" |\"\n print",
"def _generate_tabular(lookup_table, interpolation='linear', points_unit=u.pix, **kwargs):\n if not isinstance(lookup_table, u.Quantity):\n raise TypeError(\"lookup_table must be a Quantity.\") # pragma: no cover\n\n ndim = lookup_table.ndim\n TabularND = tabular_model(ndim, name=f\"Tabular{ndim}D\")\n\n # The integer location is at the centre of the pixel.\n points = [(np.arange(size) - 0) * points_unit for size in lookup_table.shape]\n if len(points) == 1:\n points = points[0]\n\n kwargs = {\n 'bounds_error': False,\n 'fill_value': np.nan,\n 'method': interpolation,\n **kwargs\n }\n\n t = TabularND(points, lookup_table, **kwargs)\n\n # TODO: Remove this when there is a new gWCS release\n # Work around https://github.com/spacetelescope/gwcs/pull/331\n t.bounding_box = None\n\n return t"
]
| [
"0.6684065",
"0.63467693",
"0.6338746",
"0.62285286",
"0.6102548",
"0.6079408",
"0.6012336",
"0.6006329",
"0.5984143",
"0.5936262",
"0.5877407",
"0.58743227",
"0.5830404",
"0.5813022",
"0.5775467",
"0.5732407",
"0.5676613",
"0.56693375",
"0.5663503",
"0.56617427",
"0.5659174",
"0.5628342",
"0.56249595",
"0.5616826",
"0.5616826",
"0.5613316",
"0.5613316",
"0.5591249",
"0.5561989",
"0.5527721"
]
| 0.6623371 | 1 |
Calculating local means. Poor man's loess. Fine and quick if many data points. | def local_mean(x,y, n=10):
xx, yy = (list(t) for t in zip(*sorted(zip(x, y)))) # sort x and y after x
m = int(len(x)/n) # Number of data points in each group
x_o, y_o = [], []
x_sum, y_sum, v = 0, 0, 0
j=1
for i in range(len(x)):
if v < m:
x_sum += xx[i]
y_sum += yy[i]
v += 1
else:
x_o.append(x_sum/m)
y_o.append(y_sum/m)
x_sum, y_sum, v = 0, 0, 0
j += 1
return x_o, y_o | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def local_density_mean(self):\n\n # the simulation units are msun / kpc ^3\n local = np.mean(self.dens)\n\n return local",
"def mean(points):\r\n\t\treturn sum(points)/len(points)",
"def mean_average_position():\n pass",
"def Mean(data):\n return data.mean()",
"def calculate_mean(self) -> float:\n\n if self.data:\n return np.mean(self.data)\n else:\n return self.mu",
"def get_mean(self):\n self.meanval = np.mean(self.adulist)",
"def compute_means(window_means):\n return np.mean(window_means, axis=0)",
"def compute_local_statistic(self, datapoint):\n mu = np.mean(datapoint, axis=0)\n d2 = scipy.spatial.distance.mahalanobis(mu, self.mu_0,\n self.s2_0inv) ** 2\n d2 /= (1. / self.training_sample_size + 1. / self.window_size)\n return d2",
"def global_mean(data, p=None):\n\n jit_my_stuff()\n\n if p is not None:\n return global_sum(mean_helper(data, p))\n\n global globNumSamples\n\n return global_sum(data) / globNumSamples",
"def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))",
"def get_original_means(self):\n return self.meanDataValues",
"def get_mean_coord(self):\n # load dataset in a dummy manner\n dataset = torchvision.datasets.MNIST('../../data/MNIST_data/', train=True, download=False)\n mean = (dataset.data.float().mean(0) / 255).unsqueeze(0) # [1,28,28]\n return mean",
"def mean_loss_py(florida_landfall_rate, florida_mean, florida_stddev,\n gulf_landfall_rate, gulf_mean, gulf_stddev, num_monte_carlo_samples):\n tot_loss = 0\n\n for i in range(num_monte_carlo_samples):\n fl_events = np.random.poisson(lam=florida_landfall_rate, size=1)[0]\n fl_loss = 0\n for j in range(fl_events):\n fl_loss += np.random.lognormal(florida_mean, florida_stddev)\n\n gulf_events = np.random.poisson(lam=gulf_landfall_rate, size=1)[0]\n\n gulf_loss = 0\n for k in range(gulf_events):\n gulf_loss += np.random.lognormal(gulf_mean, gulf_stddev)\n\n year_loss = fl_loss + gulf_loss\n\n tot_loss += year_loss\n\n return tot_loss / num_monte_carlo_samples",
"def modelmean(self, model_params, this_data, this_suff_stat):\n pass",
"def mean(L):\n\treturn sum(L) / len(L)",
"def global_mean(trainset, finalpredset):\n train = testset_to_sparse_matrix(trainset.build_testset())\n\n # find the non zero ratings in the train\n nonzero_train = train[train.nonzero()]\n\n # calculate the global mean\n global_mean_train = nonzero_train.mean()\n\n pred = np.full(train.shape, global_mean_train)\n\n finalpred_usr_idx, finalpred_movies_idx, _ = get_testset_indices(finalpredset)\n return pred[finalpred_usr_idx, finalpred_movies_idx]",
"def calculate_mean(weather_data):\n sum_value=0\n\n for value in weather_data:\n sum_value += float(value)\n \n mean = sum_value/len(weather_data)\n\n return (mean)",
"def calculate_mean(cls, sensor):\n try:\n if sensor == 't':\n return cls.calculate_y_pos(sum(cls.mean_t) / len(cls.mean_t), sensor)\n if sensor == 'l':\n return cls.calculate_y_pos(sum(cls.mean_l) / len(cls.mean_l), sensor)\n except ZeroDivisionError:\n return None",
"def lmean (inlist):\r\n sum = 0\r\n for item in inlist:\r\n sum = sum + item\r\n return sum/float(len(inlist))",
"def mean(data):\n n = len(data)\n return sum(data)/float(n)",
"def _get_mean(self):\n return self._get_conditional_negative_energy()",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean",
"def _predictive_mean_analytical(self, mu, sigma):\r\n #FIXME: Not correct\r\n return mu",
"def my_mean(x):\n return my_sum(x) / my_len(x)",
"def mean(self):\n return np.average(self.particles, weights=self.weights, axis=0)",
"def mpi_mean(data):\n s_local = data.sum(0)\n m = np.empty_like(s_local)\n mpi.COMM.Allreduce(s_local, m)\n num_data = mpi.COMM.allreduce(data.shape[0])\n m /= float(num_data)\n return m",
"def mann_whitney_plus_means(turnstile_weather):\n with_rain = turnstile_weather[turnstile_weather.rain == 1]\n without_rain = turnstile_weather[turnstile_weather.rain == 0]\n\n with_rain_mean = with_rain['ENTRIESn_hourly'].mean()\n without_rain_mean = without_rain['ENTRIESn_hourly'].mean()\n U, p = scipy.stats.mannwhitneyu(with_rain['ENTRIESn_hourly'], without_rain['ENTRIESn_hourly'])\n\n return with_rain_mean, without_rain_mean, U, p",
"def em_mean(self) -> float:\n if self.__total_pulls == 0:\n raise Exception('Number of pulls is 0. No empirical mean.')\n return self.__total_rewards / self.__total_pulls",
"def mean(series):\n return fsum(series) / len(series)"
]
| [
"0.67056066",
"0.6491562",
"0.6450197",
"0.6444586",
"0.62870014",
"0.6237517",
"0.62069595",
"0.6158274",
"0.61431855",
"0.6097487",
"0.6062705",
"0.6060495",
"0.60540843",
"0.6053039",
"0.6024757",
"0.60079587",
"0.599939",
"0.599226",
"0.59790677",
"0.5973261",
"0.5965069",
"0.5953176",
"0.5953176",
"0.59356296",
"0.59319615",
"0.59282845",
"0.5921963",
"0.5919373",
"0.59096426",
"0.5863226"
]
| 0.6994391 | 0 |
Parse a ```line`` based against a list of ``patterns``. | def parse_line(line, patterns=None):
if patterns is None:
patterns = LINE_PATTERNS
for line_re in patterns:
match = line_re.match(line)
if match:
data = match.groupdict()
av_pairs = data['av_pairs']
data['av_pairs'] = cleanup_av_pairs(av_pairs)
return data
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse(self, line, allow_extra=False):\r\n if not isinstance(line, Compatibility.string):\r\n raise TypeError(\"Expected line to be a string, got %s\" % type(line))\r\n sre_match = self._re.match(line)\r\n if sre_match is None:\r\n raise ScanfParser.ParseError(\"Failed to match pattern: %s against %s\" % (\r\n self._re_pattern, line))\r\n groups = list(sre_match.groups())\r\n if len(groups) != len(self._applicators):\r\n raise ScanfParser.ParseError(\"Did not parse all groups! Missing %d\" % (\r\n len(self._applicators) - len(groups)))\r\n if sre_match.end() != len(line) and not allow_extra:\r\n raise ScanfParser.ParseError(\"Extra junk on the line! '%s'\" % (\r\n line[sre_match.end():]))\r\n so = ScanfResult()\r\n for applicator, group in zip(self._applicators, groups):\r\n applicator(so, group)\r\n return so",
"def _process_file(self, patterns):\n with open(self.logfile) as fh:\n for line in fh:\n for pat in patterns:\n pat.send(line)",
"def _do_rule_processing(self, line, events):\n\n for rule in self.rules:\n match = rule.regexp.search(line)\n if match:\n events.append(Event(self, rule.handler, LogMatch(line, match)))\n if rule.quick:\n break",
"def _MatchPatternLines(self, in_stream, re_pattern, num_lines=None):\n num_read = 0\n while True:\n line = in_stream.readline()\n if not line:\n return None\n num_read += 1\n m = re_pattern.match(line)\n if m is not None:\n return m\n if num_lines is not None and num_read >= num_lines:\n return None",
"def ProcessLine(line, rules, processing, previous_line_data):\n line_data = {'line':line, 'line_offset':processing['offset_processed']}\n \n # Update with always-included data, like glob keys, and the component\n line_data.update(processing['data'])\n \n # Test if this line is multi-line (positive test)\n is_multi_line = False\n for rule in rules:\n if rule.get('multi line regex test', False):\n if re.match(rule['regex'], line):\n is_multi_line = True\n break\n # Negative regex test\n for rule in rules:\n if rule.get('multi line regex not', False):\n if re.match(rule['regex'], line):\n is_multi_line = True\n break\n \n # If this is multi_line and we have a real previous line to embed this data in\n if is_multi_line and previous_line_data != None:\n #print 'Multiline: %s' % line\n if 'multiline' not in previous_line_data:\n previous_line_data['multiline'] = []\n \n previous_line_data['multiline'].append(line)\n\n\n # Only process rules on first lines (not multi lines), and return the line_data to be the next line's previous_line_data\n if not is_multi_line:\n #print line\n \n # Start with: We havent found a match yet\n match_found = False\n \n for item in rules:\n # Skip the multi-line regext test/not rules\n if item.get('multi line regex test', False) or item.get('multi line regex not', False):\n continue\n \n # Break out our terms for this rule item\n terms = re.findall('%\\((.*?)\\)s', item['regex'])\n #print item['regex']\n #print terms\n \n regex = item['regex']\n \n # Pre-processing step, to remove any conflicting characters with the rest of the regex which need to be escaped/sanitized\n for term in terms:\n regex = regex.replace('%%(%s)s' % term, 'MATCHMATCHMATCH')\n \n regex = SanitizeRegex(regex)\n regex = regex.replace('MATCHMATCHMATCH', '(.*?)')\n \n #print '--- %s' % item['id']\n #print regex\n #print line\n \n regex_result = re.findall(regex, line)\n #print regex_result\n if regex_result:\n \n # Python does something stupid with multiple variables, so pull them out of the embedded tuple it adds to the list\n if type(regex_result[0]) == tuple:\n regex_result = regex_result[0]\n \n for count in range(0, len(terms)):\n #print '%s: %s: %s' % (count, terms[count], regex_result[count])\n line_data[terms[count]] = regex_result[count]\n \n #print regex\n #print 'MATCHED! %s' % regex\n #print regex_result\n \n match_found = True\n \n # Save the line match ID, so we can reference it for markup/state information\n line_data['__rule_id__'] = item['id']\n \n break\n \n return line_data\n \n # Else, this is multi-line, so return it to continue to be the next line's previous_line_data\n else:\n #TODO(g): Save this multi-line data every time? Otherwise when does it get saved out?\n pass\n \n return previous_line_data",
"def _line_fits_pattern(self, logline):\n for (fieldname, pattern) in self._excludepatterns:\n try:\n m = pattern.search(str(logline.__dict__[fieldname]))\n except AttributeError:\n warn(\"Exclude patterns must be tuples of a field name and a compiled regex.\")\n warn(\"The object that you provided as a regex seems not to have a 'search' method\")\n exit(-1)\n except KeyError:\n warn(\"You tried to filter for a field that doesn't exist\")\n m = False\n if m:\n return False\n if len(self._includepatterns) == 0:\n return True # no includepatterns means 'accept everything'\n for (fieldname, pattern) in self._includepatterns:\n try:\n m = pattern.search(str(logline.__dict__[fieldname]))\n except AttributeError:\n warn(\"Exclude patterns must be tuples of a field name and a compiled regex.\")\n warn(\"The object that you provided as a regex seems not to have a 'search' method\")\n exit(-1)\n except KeyError:\n warn(\"You tried to filter for a field that doesn't exist\")\n m = False\n if m:\n return True\n return False",
"def parse_line(file,pattern,group=1,indiv_file=None):\n text = open_file(file,indiv_file).split(\"\\n\")\n for line in text:\n match = re.search(pattern, line)\n if match:\n matched_string = match.group(group)\n return matched_string",
"def parse_line(self, line):\n raise NotImplementedError",
"def parse_lines(lines, packages):\n for line in lines:\n x = line.split(' ')\n cmd = x[0].upper()\n #LOG.debug(cmd)\n if 'LIST' in cmd:\n getattr(commands, cmd)(p)\n else:\n getattr(commands, cmd)(line, p)",
"def parse_order(line, *line_parsers):\r\n for parser in line_parsers:\r\n try:\r\n return parser.parse(line)\r\n except ValueError:\r\n continue",
"def parse(cls, line):\r\n raise NotImplementedError",
"def _ProcessMatch(self, input_line, match_regex, line, output_stream):\n lastpos = 0\n for fullmatch in match_regex.finditer(line):\n # Add text before the match as regular text.\n if lastpos < fullmatch.start():\n starting_line = line[lastpos:fullmatch.start()]\n if self._ConsumeTextForPlugin():\n self._formatting_handler.HandleText(\n input_line,\n output_stream,\n starting_line)\n else:\n self._formatting_handler.HandleEscapedText(\n input_line,\n output_stream,\n starting_line)\n\n for rulename, match in fullmatch.groupdict().items():\n if match is not None:\n if self._ConsumeTextForPlugin() and rulename != \"PluginEnd\":\n self._formatting_handler.HandleText(\n input_line,\n output_stream,\n match)\n else:\n handler = getattr(self, u\"_Handle{0}\".format(rulename), None)\n handler(input_line, match, output_stream)\n\n lastpos = fullmatch.end()\n\n # Add remainder of the line as regular text.\n if lastpos < len(line):\n remaining_line = line[lastpos:]\n if self._ConsumeTextForPlugin():\n self._formatting_handler.HandleText(\n input_line,\n output_stream,\n remaining_line)\n else:\n self._formatting_handler.HandleEscapedText(\n input_line,\n output_stream,\n remaining_line)",
"def parse_file(file: CodeFile, config: dict):\n path = os.path.join(file.dir, f'{file.name}.{file.extension}')\n print(path)\n file_gen = (line for line in open(path))\n for line in file_gen:\n parse.apply_patterns(line.strip(), config['patterns'], file)",
"def _parse_line(line):\n\n number_pattern = '(\\d+(?:\\.\\d+)?)'\n line_pattern = '^\\s+%s\\s+$' % ('\\s+'.join([number_pattern for x in range(10)]))\n\n match = re.match(line_pattern, line)\n if match:\n print(match.groups())\n return match.groups()\n # if there are no matches\n return None",
"def _add_patterns(\n self, fuzzy_patterns: List[Dict[str, Any]], regex_patterns: List[Dict[str, Any]]\n ) -> None:\n for entry in fuzzy_patterns + regex_patterns:\n label = entry[\"label\"]\n if \"id\" in entry:\n ent_label = label\n label = self._create_label(label, entry[\"id\"])\n self._ent_ids[label] = (ent_label, entry[\"id\"])\n pattern = entry[\"pattern\"]\n kwargs = entry[\"kwargs\"]\n if isinstance(pattern, Doc):\n self.fuzzy_patterns[label][\"patterns\"].append(pattern)\n self.fuzzy_patterns[label][\"kwargs\"].append(kwargs)\n elif isinstance(pattern, str):\n self.regex_patterns[label][\"patterns\"].append(pattern)\n self.regex_patterns[label][\"kwargs\"].append(kwargs)\n else:\n raise ValueError(\n (\n \"One or more patterns do not conform\",\n \"to spaczz pattern structure:\",\n \"{label (str), pattern (str), type (str),\",\n \"optional kwargs (Dict[str, Any]),\",\n \"and optional id (str)}.\",\n )\n )\n\n for label, pattern in self.fuzzy_patterns.items():\n self.fuzzy_matcher.add(label, pattern[\"patterns\"], pattern[\"kwargs\"])\n for label, pattern in self.regex_patterns.items():\n self.regex_matcher.add(label, pattern[\"patterns\"], pattern[\"kwargs\"])",
"def extract_pattern(self, patterns):\n\n # if we have more patterns or\n # a single one which is not a file:\n if len(patterns) > 1 or (\n len(patterns) == 1 and not os.path.isfile(patterns[0])):\n return patterns\n\n else:\n pattern = patterns[0]\n pat_list = []\n # if PATTERN is a file, extract all patterns\n if os.path.isfile(pattern):\n try:\n with open(pattern, \"r\", encoding=\"utf-8\") as p_file:\n for line in p_file:\n pat_list.append(line.strip())\n except Exception:\n print(\"The selected PATH-file cannot be opened! \"\n \"Please choose another one.\")\n sys.exit()\n\n return pat_list",
"def line_parser(line_starts_with: str, line: str) -> str: # pure function\n\n if line is None:\n return 'empty'\n elif line.startswith(line_starts_with):\n return 'start'\n elif line[0] in [' ', '\\t', '#', '\\n']:\n return 'empty'\n else:\n print('End works: ', line)\n return 'end'",
"def validate_line_durations(\n line_durations: Optional[List[float]],\n valid_rhythmic_patterns: List[List[float]],\n n_measures: int\n) -> None:\n if line_durations is None:\n return\n total_time = 0\n current_measure_durations = []\n for duration in line_durations:\n extended_durations = current_measure_durations + [duration]\n is_valid = any(\n valid_pattern[:len(extended_durations)] == extended_durations\n for valid_pattern in valid_rhythmic_patterns\n )\n if not is_valid:\n raise ValueError(\n f\"Disallowed rhythmic pattern found: {extended_durations}.\"\n )\n total_time += duration\n current_measure_durations = update_current_measure_durations(\n current_measure_durations, duration\n )\n if total_time != n_measures:\n raise ValueError(\n f\"Line lasts {total_time} measures, \"\n f\"but {n_measures} measures are needed.\"\n )",
"def line_parser(path):\n lines = []\n with open(path, 'r') as input:\n lines = [line.rstrip().split(',') for line in input]\n lines = [\n [[float(x1), float(y1)],\n [float(x2), float(y2)]] \n for x1, y1, x2, y2 in lines]\n return lines",
"def parse_line(cls, line):\n regex = re.compile(cls.pattern)\n m = regex.search(line)\n if m:\n data = m.groupdict()\n data = cls.post_process(data)\n if cls.date_format:\n data['time'] = cls.convert_time(data['time'])\n else:\n data['time'] = datetime.now()\n return data\n else:\n return {}",
"def process_line(line):\n\n name_comp_list = []\n givenname_comp_list = []\n surname_comp_list = []\n geocode_comp_list = []\n locality_comp_list = []\n date1_comp_list = []\n date2_comp_list = []\n\n # Split the line into the basic fields - - - - - - - - - - - - - - - - - - -\n #\n if (config.in_file_type in ['CSV','CSVQ','TAB','TABQ']):\n # Comma or tabulator separated\n try:\n line_list = config.line_parser.parse(line)\n except:\n log_message('CSV line parsing failed with inout: '+line,'err')\n\n if (len(line_list) < config.input_len):\n log_message('Input line does not contain enough fields,' +\\\n 'fill up with empty fields','warn')\n while (len(line_list) < config.input_len):\n line_list.append('')\n\n config.curr_line_list = line_list # Save current line list\n\n # Extract fields into different component lists - - - - - - - - - - - - - -\n #\n if (config.input_component['name'] != []): # Extract name fields\n for i in config.input_component['name']:\n name_comp_list.append(line_list[i])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for i in config.input_component['givenname']:\n givenname_comp_list.append(line_list[i])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for i in config.input_component['surname']:\n surname_comp_list.append(line_list[i])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for i in config.input_component['geocode']:\n geocode_comp_list.append(line_list[i])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for i in config.input_component['locality']:\n locality_comp_list.append(line_list[i])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for i in config.input_component['date1']:\n date1_comp_list.append(line_list[i])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for i in config.input_component['date2']:\n date2_comp_list.append(line_list[i])\n\n elif (config.in_file_type == 'COL'): # Column based input file - - - - - - -\n\n if (len(line) < config.input_len):\n log_message('Input line is not long enough, fill up with spaces','warn')\n line += ' '*(config.input_len-len(line))\n\n if (config.input_component['name'] != []): # Extract name fields\n for (col_start,length) in config.input_component['name']:\n name_comp_list.append(line[col_start,col_start+length])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for (col_start,length) in config.input_component['givenname']:\n givenname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for (col_start,length) in config.input_component['surname']:\n surname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for (col_start,length) in config.input_component['geocode']:\n geocode_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for (col_start,length) in config.input_component['locality']:\n locality_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for (col_start,length) in config.input_component['date1']:\n date1_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for (col_start,length) in config.input_component['date2']:\n date2_comp_list.append(line[col_start,col_start+length])\n\n # elif (config.in_file_type == 'SQL'): # - - - - - - - - - - - - - - - - - -\n\n ################################\n # Add later: SQL database access\n ################################\n\n msg = [' Component basic field lists:', \\\n ' Name: '+str(name_comp_list), \\\n ' Given name: '+str(givenname_comp_list), \\\n ' Surname: '+str(surname_comp_list), \\\n ' Geocode: '+str(geocode_comp_list), \\\n ' Locality: '+str(locality_comp_list), \\\n ' Date1: '+str(date1_comp_list), \\\n ' Date2: '+str(date2_comp_list)]\n log_message(msg,'v2')\n\n name_comp = ''\n givenname_comp = ''\n surname_comp = ''\n geocode_comp = ''\n locality_comp = ''\n date1_comp = ''\n date2_comp = ''\n\n # Now clean and then concatenate component lists into strings - - - - - - - -\n #\n if (name_comp_list != []): # Name component\n name_comp = name_comp_list[0] # Start with first field in list\n\n for f in name_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['name'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['name'] == 1):\n sep = check_field_spill(name_comp, f)\n\n name_comp = name_comp+sep+f # Append separator and field\n\n if (givenname_comp_list != []): # Givenname component - - - - - - - - - - -\n givenname_comp = givenname_comp_list[0] # Start with first field in list\n\n for f in givenname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['givenname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['givenname'] == 1):\n sep = check_field_spill(givenname_comp, f)\n\n givenname_comp = givenname_comp+sep+f # Append separator and field\n\n if (surname_comp_list != []): # Surname component - - - - - - - - - - - - -\n surname_comp = surname_comp_list[0] # Start with first field in list\n\n for f in surname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['surname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['surname'] == 1):\n sep = check_field_spill(surname_comp, f)\n\n surname_comp = surname_comp+sep+f # Append separator and field\n\n if (geocode_comp_list != []): # Geocode component - - - - - - - - - - - - -\n geocode_comp = geocode_comp_list[0] # Start with first field in list\n\n for f in geocode_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['geocode'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['geocode'] == 1):\n sep = check_field_spill(geocode_comp, f)\n\n geocode_comp = geocode_comp+sep+f # Append separator and field\n\n if (locality_comp_list != []): # Locality component - - - - - - - - - - - -\n locality_comp = locality_comp_list[0] # Start with first field in list\n\n for f in locality_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['locality'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['locality'] == 1):\n sep = check_field_spill(locality_comp, f)\n\n locality_comp = locality_comp+sep+f # Append separator and field\n\n if (date1_comp_list != []): # Date1 component - - - - - - - - - - - - - - -\n date1_comp = date1_comp_list[0] # Start with first field in list\n\n for f in date1_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date1'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date1'] == 1):\n if (date1_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date1_comp = date1_comp+sep+f # Append separator and field\n\n if (date2_comp_list != []): # Date2 component - - - - - - - - - - - - - - -\n date2_comp = date2_comp_list[0] # Start with first field in list\n\n for f in date2_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date2'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date2'] == 1):\n if (date2_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date2_comp = date2_comp+sep+f # Append separator and field\n\n # Check if name component is given or givenname and surname separately - - -\n #\n if (config.input_component['givenname'] != []) or \\\n (config.input_component['surname'] != []):\n name_comp = [givenname_comp, surname_comp]\n\n msg = [' Components:', \\\n ' Name: \"'+str(name_comp)+'\"', \\\n ' Geocode: \"'+geocode_comp+'\"', \\\n ' Locality: \"'+locality_comp+'\"', \\\n ' Date1: \"'+date1_comp+'\"', \\\n ' Date2: \"'+date2_comp+'\"']\n log_message(msg,'v1')\n\n return [name_comp, geocode_comp, locality_comp, date1_comp, date2_comp]",
"def _parse_rule(line):\n # type: (AnyStr) -> Tuple[AnyStr, AnyStr, AnyStr]\n line = line.strip()\n if not line or line.startswith('#'):\n return None\n try:\n tag, project, regexp = line.split(':', 2)\n except ValueError:\n raise ParseError(\"syntax error\")\n regexp = _parse_regexp(regexp)\n return tag.strip(), project.strip(), regexp",
"def process_lines(self, lines):\n line_index = 0\n n_lines = len(lines)\n while line_index < n_lines:\n if lines[line_index].startswith(\"HIERARCHY\"):\n line_index = self._read_skeleton(lines, line_index, n_lines)\n if lines[line_index].startswith(\"MOTION\"):\n self._read_frametime(lines, line_index+2)\n line_index = self._read_frames(lines, line_index+3, n_lines)\n else:\n line_index += 1",
"def parse_line(self, line):\n if self.signal_eof:\n return \"\"\n\n match = re.search(\"^([\\w\\s]+from) ([^:]+):(\\d+)(:|,)$\", line)\n if match:\n return self.parse_line_from(match)\n\n match = re.search(\"^([^:]+):(?:((?:\\d+:)?\\d+):)?(?:(error|warning|note):)?(.+)$\", line)\n if match:\n return self.parse_line_err(match)\n\n return line",
"def parse( regexp, line, method=search ):\n m = method( regexp, line )\n if m:\n return m\n return None",
"def parse_lines(self, start_line=0, end_line=False):\n if end_line is False: end_line = len(self.file_ltxt)\n\n lines = self.file_ltxt\n self.E_str = \"parse_lines\"\n self.line_num = start_line\n\n # Loop over lines and parse\n while self.line_num < end_line:\n line = lines[self.line_num].strip()\n\n if line == \"echo\": print(\"\")\n\n # Parse any variables\n elif self.line_declarations['variable'](line):\n self.parse_variable_line(line)\n\n # Parse any file loading commands\n elif self.line_declarations['load'](line):\n self.parse_load_cmd(line)\n\n # Parse any file loading commands\n elif self.line_declarations['plot'](line):\n self.parse_plot_cmd(line)\n\n # Parse any file loading commands\n elif self.line_declarations['write'](line):\n self.parse_write_cmd(line)\n\n # Parse any math commands\n elif self.line_declarations['math'](line):\n self.parse_math_cmd(line)\n\n # Parse any echo commands\n elif self.line_declarations['echo'](line):\n self.parse_echo_cmd(line)\n\n # Parse any echo commands\n elif self.line_declarations['calc'](line):\n self.parse_calc_cmd(line)\n\n # Parse any echo commands\n elif self.line_declarations['set'](line):\n self.parse_set_cmd(line)\n\n # Parse any shell commands\n elif self.line_declarations['shell'](line):\n self.parse_shell_cmd()\n\n # Parse any for loop commands\n elif self.line_declarations['for'](line):\n self.parse_for_cmd(line)\n\n # Parse any echo commands\n elif self.line_declarations['script'](line):\n self.parse_script_cmd(line)\n\n elif self.line_declarations['inline_code'](line):\n getattr(self, f\"parse_{line.split()[0]}_cmd\")(line)\n\n elif self.line_declarations['if'](line):\n self.parse_if_cmd(line)\n\n # elif self.line_declarations['splice'](line):\n # self.parse_splice_cmd(line)\n\n elif self.line_declarations['glue'](line):\n self.parse_glue_cmd(line)\n\n elif self.line_declarations['exit'](line):\n print(\"\\n\\nStopped Code -exit was called.\")\n raise SystemExit\n\n # The end of control statements\n elif '}' in line:\n pass\n\n # Print a warning about unknown line\n else:\n self.print_warning(f\"I don't understand a line: '{line}'\")\n\n self.line_num += 1",
"def parse(self, f):\n INFO = self.logger.info\n DEBUG = self.logger.debug\n\n self.dbid = None\n self.standby_dbid = None\n for line in f:\n line = line.strip()\n DEBUG('parse: %s' % line)\n\n m = re.match(DBID_RE, line)\n if m:\n self.dbid = int(m.group(1))\n INFO('match dbid: %d' % self.dbid)\n\n m = re.match(STANDBY_DBID_RE, line)\n if m:\n self.standby_dbid = int(m.group(1))\n INFO('match standby_dbid: %d' % self.standby_dbid)\n\n assert self.dbid is not None",
"def __init__(self, line, match):\n\n self.line = line\n self.match = match",
"def parse(self, f):\n \n for line in f:\n self.parse_line(line)",
"def process(patterns, text):\n\n for i, p in enumerate(patterns):\n pattern = _fix_pattern(p)\n\n found = []\n for grammar, replace in pattern:\n\n find_and_replace = create_find_and_replace(grammar, replace)\n results = parse_grammar(find_and_replace, text)\n if not results:\n break\n else:\n found.append(len(results))\n text = _transform_results(results, text)\n\n if found:\n log.info('=> pattern {} found {} time(s) in {} pass(es)'\n .format(i + 1, sum(found), len(found)))\n else:\n log.info('__ pattern {} not found'\n .format(i + 1))\n\n return text"
]
| [
"0.6369723",
"0.6281397",
"0.62425673",
"0.59930545",
"0.5857287",
"0.5748906",
"0.571239",
"0.5686701",
"0.5681457",
"0.56215805",
"0.5605779",
"0.5588075",
"0.55302525",
"0.54868627",
"0.5478415",
"0.5466466",
"0.5439876",
"0.5428541",
"0.54191476",
"0.53572595",
"0.53267634",
"0.53231585",
"0.5322475",
"0.53011394",
"0.52799785",
"0.52774304",
"0.5258956",
"0.5249317",
"0.5224804",
"0.52190363"
]
| 0.7249798 | 0 |
When user cancel checkout relation to baseline is None | def test_after_cancel_checkout(self):
baseline = createContentInContainer(self.folder, 'stageable_type')
working_copy = self.do_checkout(baseline)
self.do_cancel(working_copy)
relation = IWCAnnotator(baseline).get_relation()
self.assertIsNone(relation)
self.assertIsNone(get_checkout_relation(baseline)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_cancel_pending_payment(self):\n pass",
"def setNoCheckout(self) -> None:\n ...",
"def test_cancel_shipment_old(self):\n pass",
"def _order_cancel(self, bo):\n log.info(\"bo_blotter: order_cancel bracket order bo#%s\" % bo.ticket) \n cancelled = bo.cancel()\n return(cancelled)",
"def cancel_contract(self) -> float:\n self.start = None\n if self.balance > 0.0:\n return self.balance\n return 0.0",
"def cancel(self): #$NON-NLS-1$\r",
"def cancel_contract(self) -> float:\n self.start = None\n if self._carried_term is True:\n return self.bill.get_cost() - TERM_DEPOSIT\n else:\n return self.bill.get_cost()",
"def cancel_contract(self) -> float:\n self.start = None\n return self.bill.get_cost()",
"def onDealCanceled(argsList):\r\n\tCyInterface().setDirty(InterfaceDirtyBits.Score_DIRTY_BIT, True)",
"def cancel(self):",
"def cancel(self):",
"def cancel(self):",
"def test_cancel_shipment(self):\n pass",
"def cancel():",
"def cancel_dummy(self):\n if self.state != 'authorized':\n self.raise_user_error('cancel_only_authorized')\n else:\n self.state = 'cancel'\n self.save()",
"def cancel(self):\n if self.is_market:\n log.info(\"bo#%s: can't cancel order (market)\" % self.ticket)\n return(False)\n else:\n log.info(\"bo#%s: cancel master order, limit and stop order\" % self.ticket)\n if self.is_cancellable:\n cancel_order(self.order_master)\n cancel_order(self.order_limit)\n cancel_order(self.order_stop)\n self.cancelled.emit(bo=self)\n self.bo_blotter._move_cancelled_order(self)\n return(True)\n else:\n log.info(\"bo#%s: can't cancel order (not cancellable)\" % self.ticket)\n return(False)",
"def action_cancel(self):\n self.state = 'canceled'",
"def OnCancel(self, event):\n pass",
"def OnCancel(self, event):\n pass",
"def CallTipCancel(self):\n if self.CallTipActive():\n super(EditraBaseStc, self).CallTipCancel()",
"def on_cancel(self) -> None:\n pass",
"def on_cancel(self) -> None:\n pass",
"def do_cancel(self):\r\n self.write({'cancelled': True})",
"def test_dont_cancel_if_advance_payment_not_required(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 11, 10, tzinfo=dt_timezone.utc\n )\n # set payment_due_date to None, otherwise advance_payment_required is\n # automatically set to True\n self.ticketed_event.payment_due_date = None\n self.ticketed_event.advance_payment_required = False\n self.ticketed_event.save()\n self.assertFalse(self.unpaid.cancelled)\n self.assertFalse(self.paid.cancelled)\n\n management.call_command('cancel_unpaid_ticket_bookings')\n # emails are sent to user per cancelled booking and studio once for all\n # cancelled bookings\n self.unpaid.refresh_from_db()\n self.paid.refresh_from_db()\n self.assertEqual(len(mail.outbox), 0)\n self.assertFalse(self.unpaid.cancelled)\n self.assertFalse(self.paid.cancelled)",
"def cancel(self):\n pass",
"def cancel(self):\n if not self.is_cancelled:\n self.will_change_value_for('is_cancelled')\n self.cancelled = True\n # remove our dependencies so that we're ready, properly behaved operations\n # will honor the cancel flag\n self.dependencies.clear()\n self.did_change_value_for('is_cancelled')\n \n if not self.is_executing and not self.is_finished:\n with self.changing('is_finished'):\n self.finished = True",
"def test_request_cancel_active_subscription(self):\n self.braintree_customer.subscription_id = \"1234\"\n self.braintree_customer.pending_cancel = False\n self.braintree_customer.save()\n self.assertTrue(SubscriptionManager.request_cancel(self.braintree_customer))\n self.assertTrue(self.braintree_customer.pending_cancel)",
"def do_cancel(order):\r\n self.gox.cancel(order.oid)",
"def onCancel(self, fetcher): #$NON-NLS-1$\r",
"def test_dont_cancel_for_already_cancelled(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 11, 10, tzinfo=dt_timezone.utc\n )\n self.unpaid.cancelled = True\n self.unpaid.save()\n\n management.call_command('cancel_unpaid_ticket_bookings')\n # emails are sent to user per cancelled booking and studio once\n # for all cancelled bookings\n self.unpaid.refresh_from_db()\n self.assertEqual(len(mail.outbox), 0)\n self.assertTrue(self.unpaid.cancelled)"
]
| [
"0.6682023",
"0.64539814",
"0.6367978",
"0.625337",
"0.6229602",
"0.61449844",
"0.6132463",
"0.6123441",
"0.611586",
"0.6084435",
"0.6084435",
"0.6084435",
"0.6039756",
"0.6036369",
"0.60193",
"0.60182756",
"0.59655523",
"0.5933016",
"0.5933016",
"0.59231687",
"0.59224665",
"0.59224665",
"0.5919484",
"0.5918455",
"0.59031975",
"0.58891",
"0.5885468",
"0.5877561",
"0.58756435",
"0.58571315"
]
| 0.7254451 | 0 |
Checks the status of a panel i.e. whether it is live or has uncommitted changes | def check_panel_status(s, id):
panels = check_panel_status_query(s, id)
status = True
for i in panels:
if i.intro > i.current_version:
status = False
break
if i.last is not None:
if i.last == i.current_version:
status = False
break
return status | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_virtualpanel_status(s, id):\n panels = check_virtualpanel_status_query(s, id)\n status = True\n for i in panels:\n if i.intro > i.current_version:\n status = False\n break\n if i.last is not None:\n if i.last == i.current_version:\n status = False\n break\n\n return status",
"def getPanelStatus(self) -> dict:\r\n if self.visprotocol is not None:\r\n return self.visprotocol.getPanelStatus()\r\n return {}",
"def check_status(self):",
"def checkStatus(self):\n return None",
"def status() -> None:\n wit = WitStatus()\n\n print(f'Commit ID: {wit.last_commit_id}')\n\n if wit.last_commit_id:\n full_changes = wit.get_changes_to_be_committed()\n print(f'Changes to be committed: {\", \".join(full_changes)}')\n\n changed, untracked = wit.compare_two_list_files(\n wit.original_files, wit.stage_files,\n wit.parent_wit_dir, wit.stage_dir\n )\n print(f'Changes not staged for commit: {\", \".join(changed)}')\n print(f'Untracked files: {\", \".join(untracked)}')",
"def test_0030_check_workflow_repository(self):\n repository = self.test_db_util.get_repository_by_name_and_owner(workflow_repository_name, common.test_user_1_name)\n strings_displayed = ['Workflows', 'New workflow for 0060_filter', '0.1']\n strings_not_displayed = ['Valid tools', 'Invalid tools']\n self.display_manage_repository_page(repository, strings_displayed=strings_displayed, strings_not_displayed=strings_not_displayed)",
"def get_status(self):\n\n # update status\n # TODO: this needs to consider \"partial\" status based on the testcodes that are defined\n # in the panel.\n # get the condition OK aliquot condition instance\n result_item_cls = models.get_model(self._meta.app_label, 'resultitem')\n aliquot_condition_ok = AliquotCondition.objects.get_ok()\n if not self.aliquot.aliquot_condition:\n # how can this be ??\n status = 'ERROR'\n elif result_item_cls.objects.filter(result__order=self) or self.panel.panel_type == 'STORAGE':\n # test aliquot condition and set the order status\n if self.aliquot.aliquot_condition == aliquot_condition_ok:\n status = 'COMPLETE'\n else:\n # has results or is stored but condition is not 10\n # was this meant to be a storage panel?\n status = 'ERROR'\n elif self.aliquot.aliquot_condition != aliquot_condition_ok:\n status = 'REDRAW'\n else:\n status = 'PENDING'\n # regardless of status, check that order was not deleted on DMIS\n dmis_tools = DmisTools()\n if dmis_tools.is_withdrawn_order(self):\n # other aspects of result visibility must consider this value\n status = 'WITHDRAWN'\n return status",
"def getComponentStatus(self, opts):\n\n # We'll check screen even if we found PID so that we can get screen information\n screen = self.findScreen(opts.verbose)\n\n if screen == None:\n print(\"Did not find screen named %s\" % self._screenName)\n else:\n print(\"Found screen %s\" % screen)\n\n if opts.verbose:\n print(\"OpenSimulator path: %s\" % self._binaryPath)\n\n if screen != None:\n print(\"Status: ### Active ###\")\n return True\n else:\n print(\"Status: ### Inactive ###\")\n return False",
"def check_status(board, player_mark, turn_counter):",
"def local_changes():\n result, output = popen('git status', False, False)\n try:\n return not output[-1].startswith(\"nothing to commit\")\n except IndexError:\n return True",
"def check_status(self, id):\n raise NotImplementedError()",
"def view_panel():\n id = request.args.get('id')\n try:\n version = request.form[\"versions\"]\n except KeyError:\n version = None\n if id:\n status = check_panel_status(s, id)\n if not status:\n message = \"This panel has changes which cannot be viewed here as they have not been made live yet, if you have permission you can view these by editing the panel\"\n else:\n message = None\n panel_details = get_panel_details_by_id(s, id)\n if not version:\n version = panel_details.current_version\n panel_name = panel_details.name\n panel = get_regions_by_panelid(s, id, version)\n project_id = get_project_id_by_panel_id(s, id)\n result = []\n rows = list(panel)\n if len(rows) != 0:\n bed = ''\n for i in rows:\n row = dict(zip(i.keys(), i))\n result.append(row)\n # panel_name = i.panel_name\n current_version = i.current_version\n else:\n message = \"This Panel has no regions yet & may also have changes that have not been made live\"\n bed = 'disabled'\n current_version = version\n\n if check_user_has_permission(s, current_user.id, project_id):\n edit = ''\n else:\n edit = 'disabled'\n\n form = ViewPanel()\n v_list = range(1, current_version + 1)\n choices = []\n for i in v_list:\n choices.append((i, i))\n form.versions.choices = choices\n form.versions.default = version\n form.process()\n\n table = []\n\n for i in result:\n line = []\n line.append(i['chrom'])\n line.append(str(i['region_start']))\n line.append(str(i['region_end']))\n line.append(i['gene_name'])\n line.append(i['name'].replace(',', ' '))\n table.append(line)\n return render_template('panel_view.html', scope='Panel', table=json.dumps(table), panel=table,\n panel_name=panel_name, edit=edit, bed=bed,\n version=version, panel_id=id, project_id=project_id, message=message,\n url=url_for('panels.view_panel'),\n form=form)\n\n else:\n return redirect(url_for('panels.view_panels'))",
"def check_unstaged_changes(self):\n pass",
"def check_status():\n logger.debug(\"Starting the check_status() routine.\")\n\n url = \"https://www.toggl.com/api/v8/time_entries/current\"\n token = os.environ[\"TOGGL_API_TOKEN\"]\n auth_token = base64.b64encode(f\"{token}:api_token\".encode()).decode()\n resp = requests.get(url, headers={\"Authorization\": \"Basic \" + auth_token})\n\n cols = \"id\", \"duration\", \"description\"\n status = {k: v for k, v in (resp.json()[\"data\"] or {}).items() if k in cols}\n logger.debug(f\"{'Something' if 'id' in status else 'Something'} is being tracked.\")\n\n return status",
"def check_state(self):\n pass",
"def check(self):\n if self.widget:\n self.widget.update()\n\n self.check_virtual_display()\n\n return self.runner.check()",
"def status():\n if not check_for_wit():\n raise NoWitError(f'No .wit folder exists in {os.getcwd()}')\n if not os.path.exists(refs_path):\n print('No files have been committed yet')\n return False\n print(f'Current commit ID: {get_current_commit_id()}')\n print('Changes to be committed:')\n print('-' * 20)\n for num, file in enumerate(get_files_to_be_committed()):\n print(f'{num + 1}: {file}')\n print('\\n')\n print('Changes not staged for commit')\n print('-' * 20)\n for num, file in enumerate(get_files_not_staged()):\n print(f'{num + 1}: {file}')\n for file in deleted_files:\n print(f'{file} - deleted from main folder')\n print('\\n')\n print('Untracked files')\n print('-' * 20)\n for num, file in enumerate(get_untracked_files()):\n print(f'{num + 1}: {file}')",
"def is_statement_status_changed_successfully(self):\n pop_up_not_present = None\n try:\n self.wait(10).until(EC.presence_of_element_located(self.info_pop_up_locator))\n pop_up_not_present = False\n self.click_element(self.ok_button_locator)\n except:\n pop_up_not_present = True\n finally:\n return pop_up_not_present",
"def test_repo_updated():\n\n status = \"The following updates were applied\"\n report_status = BehavioralUtils.check_repo_updated('drupal', 'builds')\n assert report_status == status",
"def state_preview_validate(cfg, app, win, events):",
"def isup(self):\n if self.cloudserver:\n # print self.cloudserver.status\n if self.cloudserver.status in (\"ACTIVE\",):\n return True\n \n return False",
"def check_event_status(self):\n pass",
"def is_up(self):\n data = self.vxprint()\n return self.name in data and data[self.name].STATE == \"ACTIVE\"",
"def has_state_changed(self) -> bool:\r\n ...",
"def status(self):",
"def check_status(self):\n return self.status",
"def check_status(self):\n return self.status",
"def getPanelStatusCode(self) -> int:\r\n if self.visprotocol is not None:\r\n return self.visprotocol.getPanelStatusCode()\r\n return -1",
"def _check_status(self, ests):\n if self.status != ests:\n raise CpoException(\"Unexpected solver status. Should be '{}' instead of '{}'\".format(ests, self.status))",
"def status_pf(module):\n rc, out, err = module.run_command(['service', 'pf', 'status'])\n\n # Obtain current status of pf\n if 'Enabled' in out:\n return True\n else:\n return False"
]
| [
"0.67008185",
"0.63950515",
"0.6303801",
"0.5993147",
"0.5920224",
"0.58405703",
"0.5785136",
"0.5742182",
"0.56543016",
"0.5647434",
"0.5636928",
"0.56352186",
"0.5626811",
"0.5620609",
"0.56139094",
"0.5611207",
"0.5607989",
"0.5591626",
"0.55884117",
"0.5582546",
"0.55730665",
"0.5558817",
"0.5554581",
"0.5530815",
"0.55155283",
"0.55122787",
"0.55122787",
"0.550801",
"0.550631",
"0.5493091"
]
| 0.7214635 | 0 |
Checks the status of a virtual panel i.e. whether it is live or has uncommitted changes | def check_virtualpanel_status(s, id):
panels = check_virtualpanel_status_query(s, id)
status = True
for i in panels:
if i.intro > i.current_version:
status = False
break
if i.last is not None:
if i.last == i.current_version:
status = False
break
return status | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_panel_status(s, id):\n panels = check_panel_status_query(s, id)\n status = True\n for i in panels:\n if i.intro > i.current_version:\n status = False\n break\n if i.last is not None:\n if i.last == i.current_version:\n status = False\n break\n\n return status",
"def check_status(self):",
"def getPanelStatus(self) -> dict:\r\n if self.visprotocol is not None:\r\n return self.visprotocol.getPanelStatus()\r\n return {}",
"def view_vpanel():\n id = request.args.get('id')\n try:\n version = request.form[\"versions\"]\n except KeyError:\n version = None\n if id:\n status = check_virtualpanel_status(s, id)\n if not status:\n message = \"This panel has changes which cannot be viewed here as they have not been made live yet, if you have permission you can view these by editing the panel\"\n else:\n message = None\n panel_details = get_vpanel_details_by_id(s, id)\n for i in panel_details:\n if not version:\n version = panel_details.current_version\n panel_name = panel_details.name\n project_id = panel_details.project_id\n panel = get_regions_by_vpanelid(s, id, version)\n result = []\n rows = list(panel)\n if len(rows) != 0:\n bed = ''\n for i in rows:\n row = dict(zip(i.keys(), i))\n result.append(row)\n panel_name = i.panel_name\n current_version = i.current_version\n else:\n message = \"This Panel has no regions yet & may also have changes that have not been made live yet\"\n bed = 'disabled'\n current_version = version\n print(type(version))\n current_version = round(current_version, 1)\n version = round(float(version), 1)\n\n if check_user_has_permission(s, current_user.id, project_id):\n edit = ''\n else:\n edit = 'disabled'\n\n form = ViewPanel()\n v_list = get_prev_versions_vp(s, id)\n choices = []\n for i in v_list:\n choices.append((i, i))\n\n if (current_version, current_version) not in choices:\n choices.append((current_version, current_version))\n\n form.versions.choices = choices\n form.versions.default = current_version\n form.process()\n\n table = []\n\n for i in result:\n line = []\n line.append(i['chrom'])\n line.append(str(i['region_start']))\n line.append(str(i['region_end']))\n line.append(i['gene_name'])\n line.append(i['name'].replace(',', ' '))\n table.append(line)\n\n return render_template('panel_view.html', table=json.dumps(table), panel=table, panel_name=panel_name,\n edit=edit, bed=bed,\n version=version, panel_id=id, message=message, url=url_for('panels.view_vpanel'),\n scope='Virtual', form=form)\n\n else:\n return redirect(url_for('panels.view_virtual_panels'))",
"def make_virtualpanel_live():\n vpanelid = request.args.get('id')\n panelid = get_panel_by_vp_id(s, vpanelid)\n locked = check_if_locked(s, panelid)\n if locked:\n if current_user.id == get_locked_user(s, panelid):\n make_vp_panel_live(s, vpanelid)\n add_to_starlims(vpanelid)\n return redirect(url_for('panels.view_virtual_panels'))\n else:\n make_vp_panel_live(s, vpanelid)\n add_to_starlims(vpanelid)\n return redirect(url_for('panels.view_virtual_panels'))",
"def is_up(self):\n data = self.vxprint()\n return self.name in data and data[self.name].STATE == \"ACTIVE\"",
"def test_verification_status_visible(self):\r\n self.client.login(username=\"jack\", password=\"test\")\r\n self.check_verification_status_on('verified', 'You\\'re enrolled as a verified student')\r\n self.check_verification_status_on('honor', 'You\\'re enrolled as an honor code student')\r\n self.check_verification_status_on('audit', 'You\\'re auditing this course')",
"def isup(self):\n if self.cloudserver:\n # print self.cloudserver.status\n if self.cloudserver.status in (\"ACTIVE\",):\n return True\n \n return False",
"def checkStatus(self):\n return None",
"def getComponentStatus(self, opts):\n\n # We'll check screen even if we found PID so that we can get screen information\n screen = self.findScreen(opts.verbose)\n\n if screen == None:\n print(\"Did not find screen named %s\" % self._screenName)\n else:\n print(\"Found screen %s\" % screen)\n\n if opts.verbose:\n print(\"OpenSimulator path: %s\" % self._binaryPath)\n\n if screen != None:\n print(\"Status: ### Active ###\")\n return True\n else:\n print(\"Status: ### Inactive ###\")\n return False",
"def check(self):\n if self.widget:\n self.widget.update()\n\n self.check_virtual_display()\n\n return self.runner.check()",
"def status_pf(module):\n rc, out, err = module.run_command(['service', 'pf', 'status'])\n\n # Obtain current status of pf\n if 'Enabled' in out:\n return True\n else:\n return False",
"def test_verification_status_invisible(self):\r\n self.client.login(username=\"jack\", password=\"test\")\r\n self.check_verification_status_off('verified', 'You\\'re enrolled as a verified student')\r\n self.check_verification_status_off('honor', 'You\\'re enrolled as an honor code student')\r\n self.check_verification_status_off('audit', 'You\\'re auditing this course')",
"def status(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n\n box_name = self.box_name\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(wait=False, quiet=True, lookup=lookup)\n state = vmrun.checkToolsState(quiet=True)\n\n print(\"Current machine states:\" + os.linesep)\n if ip is None:\n ip = \"poweroff\"\n elif not ip:\n ip = \"unknown\"\n print(\"%s\\t%s\\t(VMware Tools %s)\" % (box_name, ip, state))\n\n if ip == \"poweroff\":\n print(os.linesep + \"The VM is powered off. To restart the VM, simply run `mech up`\")\n elif ip == \"unknown\":\n print(os.linesep + \"The VM is on. but it has no IP to connect to, VMware Tools must be installed\")\n elif state in (\"installed\", \"running\"):\n print(os.linesep + \"The VM is ready. Connect to it using `mech ssh`\")",
"def getVirtualStatus(self,node,vmid):\n data = self.connect('get','nodes/%s/qemu/%s/status/current' % (node,vmid),None)\n return data",
"def state_preview_validate(cfg, app, win, events):",
"def VMStatus(self):\n try:\n status = self.vmInstance.get_status()\n LOGGER.info('Current status of virtual machine \"{}\": {}'.format(VM_NAME, status))\n\n except Exception as e:\n status = None\n LOGGER.debug(e)\n LOGGER.error(traceback.format_exc())\n LOGGER.error('An error occured while getting status of virtual machine \"{}\"!'.format(VM_NAME))\n\n return status",
"def status() -> None:\n wit = WitStatus()\n\n print(f'Commit ID: {wit.last_commit_id}')\n\n if wit.last_commit_id:\n full_changes = wit.get_changes_to_be_committed()\n print(f'Changes to be committed: {\", \".join(full_changes)}')\n\n changed, untracked = wit.compare_two_list_files(\n wit.original_files, wit.stage_files,\n wit.parent_wit_dir, wit.stage_dir\n )\n print(f'Changes not staged for commit: {\", \".join(changed)}')\n print(f'Untracked files: {\", \".join(untracked)}')",
"def verify_server_status(server_hardware):\n\n logger._log_to_console_and_log_file(\"Verifying the list of server hardwares present in the server Profile page..\")\n selenium2lib = ui_lib.get_s2l()\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_LIST, PerfConstants.DEFAULT_SYNC_TIME):\n logger._log_to_console_and_log_file(\"Sever Profile Page contains a Server Profile List Table and starting to verify the servers status..\")\n else:\n logger._warn(\"Sever Profile Page does not contains a Server Profile List Table and Hence failing the test..\")\n selenium2lib.capture_page_screenshot()\n return False\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_NO_SERVER_PROFILE, PerfConstants.DEFAULT_SYNC_TIME):\n logger._warn(\"Sever Profile Page does not contains a any Server and Hence failing the test..\")\n selenium2lib.capture_page_screenshot()\n return False\n else:\n logger._log_to_console_and_log_file(\"Sever Profile Page contains a Servers and starting to verify the servers status..\")\n\n if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SELECT_SERVER % server_hardware, PerfConstants.DEFAULT_SYNC_TIME):\n logger._warn(\"Server Hardware : \" + server_hardware + \" is not present in the ServerList of the Server Profile page\")\n selenium2lib.capture_page_screenshot()\n return False\n else:\n logger._log_to_console_and_log_file(\"Server Hardware : \" + server_hardware + \" is present in the ServerList and Hence verifying for the status..\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_SELECT_SERVER % server_hardware)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_OK, PerfConstants.DEFAULT_SYNC_TIME):\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'OK'\")\n elif ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_ERROR, PerfConstants.DEFAULT_SYNC_TIME):\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'ERROR' with the error msg : '\" + err_msg + \"'\")\n else:\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'WARNING' with the warning msg : '\" + err_msg + \"'\")\n return True",
"def status(self):",
"def status(self, cmd):\n\n cmd.inform('text=\"Present!\"')\n cmd.finish()",
"def check_unstaged_changes(self):\n pass",
"def status(self):\n if self.qemu.is_running():\n status = 0\n self.log.info(\"vm-status\", result=\"online\")\n for device in list(self.qemu.block_info().values()):\n self.log.info(\n \"disk-throttle\",\n device=device[\"device\"],\n iops=device[\"inserted\"][\"iops\"],\n )\n else:\n status = 1\n self.log.info(\"vm-status\", result=\"offline\")\n for volume in self.ceph.volumes:\n locker = volume.lock_status()\n self.log.info(\"rbd-status\", volume=volume.fullname, locker=locker)\n consul = locate_live_service(self.consul, \"qemu-\" + self.name)\n if consul:\n self.log.info(\n \"consul\", service=consul[\"Service\"], address=consul[\"Address\"]\n )\n else:\n self.log.info(\"consul\", service=\"<not registered>\")\n return status",
"def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)",
"def check_status(self):\n return self.status",
"def check_status(self):\n return self.status",
"def view_virtual_panels(id=None):\n if not id:\n id = request.args.get('id')\n if id:\n panels = get_virtual_panels_by_panel_id(s, id)\n else:\n panels = get_virtual_panels_simple(s)\n result = []\n panel_name = \"Virtual\"\n for i in panels:\n row = dict(zip(i.keys(), i))\n\n row[\"current_version\"] = round(row[\"current_version\"], 1)\n\n status = check_virtualpanel_status(s, row[\"id\"])\n row[\"status\"] = status\n permission = check_user_has_permission(s, current_user.id, row[\"projectid\"])\n locked = check_if_locked_by_user_vpanel(s, current_user.id, row[\"panelid\"])\n\n row['permission'] = permission\n row['locked'] = locked\n\n status = check_virtualpanel_status(s, row[\"id\"])\n row[\"status\"] = status\n\n if id:\n panel_name = row['panelname'] + ' Virtual'\n # if check_user_has_permission(s, current_user.id, row[\"projectid\"]):\n # result.append(row)\n result.append(row)\n table = ItemTableVPanels(result, classes=['table', 'table-striped'])\n return render_template('panels.html', panels=table, project_name=panel_name,\n message='Virtual Panels are locked if their parent panel is being edited')",
"def get_status(self):\n\n # update status\n # TODO: this needs to consider \"partial\" status based on the testcodes that are defined\n # in the panel.\n # get the condition OK aliquot condition instance\n result_item_cls = models.get_model(self._meta.app_label, 'resultitem')\n aliquot_condition_ok = AliquotCondition.objects.get_ok()\n if not self.aliquot.aliquot_condition:\n # how can this be ??\n status = 'ERROR'\n elif result_item_cls.objects.filter(result__order=self) or self.panel.panel_type == 'STORAGE':\n # test aliquot condition and set the order status\n if self.aliquot.aliquot_condition == aliquot_condition_ok:\n status = 'COMPLETE'\n else:\n # has results or is stored but condition is not 10\n # was this meant to be a storage panel?\n status = 'ERROR'\n elif self.aliquot.aliquot_condition != aliquot_condition_ok:\n status = 'REDRAW'\n else:\n status = 'PENDING'\n # regardless of status, check that order was not deleted on DMIS\n dmis_tools = DmisTools()\n if dmis_tools.is_withdrawn_order(self):\n # other aspects of result visibility must consider this value\n status = 'WITHDRAWN'\n return status",
"def check_state(self):\n pass",
"def create_virtual_panel_process():\n form = CreateVirtualPanelProcess()\n\n if request.method == \"POST\":\n make_live = request.form['make_live']\n vp_id = request.args.get('id')\n if make_live == \"on\":\n make_vp_panel_live(s, vp_id)\n add_to_starlims(vp_id)\n panel_id = get_panel_by_vp_id(s, vp_id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_vpanel') + \"?id=\" + vp_id)\n elif request.method == \"GET\":\n form.panel.choices = get_panel_choices(s, current_user.id)\n url = url_for('panels.create_virtual_panel_process')\n return render_template('virtualpanels_createprocess.html', form=form, url=url, vp_id=\"main\")"
]
| [
"0.6554641",
"0.6370629",
"0.6291984",
"0.60570294",
"0.60495496",
"0.598873",
"0.59630346",
"0.59411",
"0.59268373",
"0.5703046",
"0.5640611",
"0.56372267",
"0.5632552",
"0.5621469",
"0.5608949",
"0.559546",
"0.5583101",
"0.5578039",
"0.5565517",
"0.55514395",
"0.5544561",
"0.5540127",
"0.55283135",
"0.55204695",
"0.5512394",
"0.5512394",
"0.55013376",
"0.54983455",
"0.54954374",
"0.5492326"
]
| 0.75358933 | 0 |
this is the method for gene autocompletion gets gene list from db and makes it into a json so that javascript can read it | def autocomplete():
value = str(request.args.get('q'))
result = s.query(Genes).filter(Genes.name.like("%" + value + "%")).all()
data = [i.name for i in result]
return jsonify(matching_results=data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_genes(request, genome, chrom, start, end):\n logger.debug(\"annotation_server.get_genes called for genome: %s chromosome: %s\" % (genome, chrom)) \n \n if genome in SUPPORTED_GENOMES:\n current_table = eval(genome+ \"_EnsGene\")\n curr_vals = current_table.objects.filter(\n Q(chrom__iexact=chrom),\n Q(cdsStart__range=(start, end)) | Q(cdsEnd__range=(start, end))\n ).values('name', 'chrom', 'strand', 'txStart', 'txEnd', 'cdsStart', 'cdsEnd', 'exonCount', 'exonStarts', 'exonEnds')\n data = ValuesQuerySetToDict(curr_vals)\n return HttpResponse(data, 'application/json')\n else:\n return HttpResponse(status=400)\n \n \n # Postbio query\n #cursor = connection.cursor() \n #query = \"\"\"SELECT x.symbol, r.name, #<r.region as start, #>r.region as end, case when r.same_orient then '+' else '-' end as strand, #<r.cds as cds_start, #>r.cds as cds_end from dm3.flybase r join dm3.flyBase2004Xref x on r.name = x.name JOIN (select id, name from dm3.sequence where name = '%s') n ON n.id = r.seq_id and region && int_interval '(%s,%s)' order by region\"\"\" % (chrom, start, end)\n #cursor.execute(query) \n #return HttpResponse(cursor_to_json(cursor), 'application/javascript')",
"def gene_search(\n self,\n genes:list=[\"MYL2\"], \n ):\n try: \n assert isinstance(genes, list)\n except AssertionError as e:\n e.args += (\"[genes] argument needs to be type(list)\", )\n raise\n \n\n self.genes = genes\n\n self.requestURL = f\"https://www.ebi.ac.uk/proteins/api/proteins?offset=0&size=100&gene={'%2C%20'.join(genes)}&organism=human\"\n \n r = requests.get(self.requestURL, headers={ \"Accept\" : \"application/json\"})\n \n if not r.ok:\n r.raise_for_status()\n sys.exit()\n\n self.responseBody = r.text\n self.data = json.loads(self.responseBody)\n\n return self.responseBody",
"def _parse_genes(chrom: str, db: FeatureDB) -> List[Dict]:\n parsed_genes = []\n for gene in db.region(\n seqid=chrom, featuretype=[GFF3GeneFeatureTypes.GENE.value, GFF3GeneFeatureTypes.PSEUDOGENE.value]\n ):\n gene_id = gene.attributes.get(\"gene_id\", [None])[0]\n locus_tag = gene.attributes.get(\"locus_tag\", [None])[0]\n gene_symbol = gene.attributes.get(\"gene_name\", [gene.attributes.get(\"gene_symbol\", None)])[0]\n gene_biotype = gene.attributes.get(\"gene_biotype\", [gene.attributes.get(\"gene_type\", None)])[0]\n gene_qualifiers = {x: y for x, y in gene.attributes.items() if not BioCantorGFF3ReservedQualifiers.has_value(x)}\n\n if Biotype.has_name(gene_biotype):\n gene_biotype = Biotype[gene_biotype]\n elif gene_biotype:\n gene_qualifiers[\"provided_biotype\"] = [gene_biotype]\n gene_biotype = None\n\n transcripts = []\n for i, transcript in enumerate(db.children(gene, level=1)):\n\n transcript_id = transcript.attributes.get(\"transcript_id\", [None])[0]\n transcript_symbol = transcript.attributes.get(\n \"transcript_name\", [gene.attributes.get(\"transcript_name\", None)]\n )[0]\n transcript_qualifiers = {\n x: y for x, y in transcript.attributes.items() if not BioCantorGFF3ReservedQualifiers.has_value(x)\n }\n provided_transcript_biotype = gene.attributes.get(\n \"transcript_biotype\", [gene.attributes.get(\"transcript_type\", None)]\n )[0]\n\n if Biotype.has_name(provided_transcript_biotype):\n transcript_biotype = Biotype[provided_transcript_biotype]\n else:\n # keep track of what they gave us, that did not match the enum\n if provided_transcript_biotype:\n transcript_qualifiers[\"provided_transcript_biotype\"] = provided_transcript_biotype\n # use the gene biotype\n transcript_biotype = gene_biotype\n\n if locus_tag is not None:\n if transcript_id is None:\n transcript_id = locus_tag\n if transcript_symbol is None:\n transcript_symbol = locus_tag\n\n exons = []\n cds = []\n for feature in db.children(transcript, level=1):\n if feature.featuretype == GFF3GeneFeatureTypes.EXON.value:\n exons.append(feature)\n elif feature.featuretype == GFF3GeneFeatureTypes.CDS.value:\n cds.append(feature)\n else:\n logger.warning(f\"Found non CDS/exon child of transcript in feature: {feature}\")\n\n # This gene has only a CDS/exon feature as its direct child\n # therefore, we really have one interval here\n if len(exons) == 0:\n if transcript.featuretype not in [\n GFF3GeneFeatureTypes.CDS.value,\n GFF3GeneFeatureTypes.EXON.value,\n ]:\n logger.warning(f\"Gene child feature has type {transcript.featuretype}; skipping\")\n continue\n logger.info(f\"gene {gene_id} had no transcript feature\")\n if transcript.featuretype == GFF3GeneFeatureTypes.CDS.value:\n exons = cds = [transcript]\n else:\n exons = [transcript]\n\n exons = sorted(exons, key=lambda e: e.start)\n exon_starts = [x.start - 1 for x in exons]\n exon_ends = [x.end for x in exons]\n start = exon_starts[0]\n end = exon_ends[-1]\n assert start <= end\n strand = Strand.from_symbol(transcript.strand)\n\n if len(cds) == 0:\n cds_starts = cds_ends = cds_frames = None\n protein_id = product = None\n else:\n # sort by start and end in case two blocks start at the same position\n cds = sorted(cds, key=lambda c: (c.start, c.end))\n cds_starts = [x.start - 1 for x in cds]\n cds_ends = [x.end for x in cds]\n cds_frames = [CDSPhase.from_int(int(f.frame)).to_frame().name for f in cds]\n # NCBI encodes protein IDs and products on the CDS feature\n protein_id = cds[0].attributes.get(\"protein_id\", [None])[0]\n product = cds[0].attributes.get(\"product\", [None])[0]\n\n tx = dict(\n exon_starts=exon_starts,\n exon_ends=exon_ends,\n strand=strand.name,\n cds_starts=cds_starts,\n cds_ends=cds_ends,\n cds_frames=cds_frames,\n qualifiers=filter_and_sort_qualifiers(transcript_qualifiers),\n is_primary_tx=False,\n transcript_id=transcript_id,\n transcript_type=transcript_biotype.name if transcript_biotype else transcript_biotype,\n transcript_symbol=transcript_symbol,\n sequence_name=chrom,\n protein_id=protein_id,\n product=product,\n )\n transcripts.append(tx)\n\n if len(transcripts) == 0:\n # infer a transcript for a gene\n logger.info(f\"Inferring a transcript for gene {gene_symbol}\")\n tx = dict(\n exon_starts=[gene.start],\n exon_ends=[gene.end],\n strand=Strand.from_symbol(gene.strand).name,\n qualifiers=gene_qualifiers,\n transcript_type=gene_biotype.name if gene_biotype else gene_biotype,\n transcript_id=gene_id,\n sequence_name=gene.seqid,\n )\n transcripts.append(tx)\n\n gene = dict(\n transcripts=transcripts,\n gene_id=gene_id,\n gene_symbol=gene_symbol,\n locus_tag=locus_tag,\n gene_type=gene_biotype.name if gene_biotype else gene_biotype,\n qualifiers=filter_and_sort_qualifiers(gene_qualifiers),\n sequence_name=chrom,\n )\n\n parsed_genes.append(gene)\n return parsed_genes",
"def getGeneList( self ):\n return self.geneList",
"def autocomplete_schools():\n schools = School.query.all()\n return jsonify(json_list=[school.name for school in schools])",
"def autocomplete_languages():\n languages = Language.query.all()\n return jsonify(json_list=[language.name for language in languages])",
"def genus_list(request, format='csv'):\n\n\n genera = ( Genus.objects.all()\n .order_by('genus_name')\n .values_list('genus_name', flat=True) )\n \n # if the user supplied a subfamily, get genuses with that subfamily\n if request.GET.get('subfamily'):\n genera = genera.filter(subfamily_name=request.GET.get('subfamily').capitalize())\n \n \n \n if format == 'csv':\n return CSVResponse(\n [{'genus': g} for g in genera], \n fields=('genus',) )\n \n else:\n # serialize to JSON\n json_objects = [{'key': g, 'display':g} for g in genera]\n return JSONResponse({'genera': json_objects})",
"def bentity_autocomplete(request, format='csv'):\n \n if request.GET.get('q'):\n q = request.GET.get('q')\n \n bentities = Bentity.objects.all().order_by('bentity')\n \n # split tokens by period or white space\n q_tokens = split(r'[.\\s]+', q)\n \n # prefix match for each token in the search string against genus name or species name\n for token in q_tokens:\n bentities = bentities.filter(bentity__icontains=token)\n \n \n else:\n bentities = []\n \n \n if format == 'csv':\n # Serislize CSV for API\n return CSVResponse(\n [{'bentity_id': b.gid, 'bentity_name': b.bentity} for b in bentities],\n ('bentity_id', 'bentity_name') )\n \n else:\n # Serialize JSON for bentity-list widget\n json_objects = [{\n 'bentity_id': b.gid,\n 'bentity_name': b.bentity,\n } for b in bentities]\n return JSONResponse({'bentities' : json_objects})",
"def get_auto_anno_batch_list(request):\n\n json_resp = {}\n usecase = request.GET.get('usecase')\n # print(usecase)\n use_obj = UseCase.objects.get(name=usecase)\n json_resp['batch_list'] = []\n languages = ['English','english']\n batch = Report.objects.filter(name=use_obj,language__in = languages).exclude(institute = 'PUBMED').values('batch')\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)",
"def return_artistnames(): \n\n names = [] #list for artist names\n rows = db.session.query(Artist.name).all()\n for row in rows: \n names.append(row[0])\n\n return jsonify(names)",
"def init_gene():\n gene_details=dict(\n id = '', \n anno_id = [],\n confgenes_id = [],\n name = '',\n source = '',\n gene_info = {},\n alias = '',\n name2 = [],\n strand = '',\n chr = '',\n chr_num = [],\n paralogs = [],\n start = '',\n stop = '',\n transcripts = [],\n transcript_info = [],\n transcript_status = [],\n transcript_valid = [],\n exons = [],\n exons_confirmed = [],\n cds_exons = [],\n utr5_exons = [],\n utr3_exons = [],\n tis = [],\n tis_conf = [],\n tis_info = [],\n cdsStop = [],\n cdsStop_conf = [],\n cdsStop_info = [],\n tss = [],\n tss_info = [],\n tss_conf = [],\n cleave = [],\n cleave_info = [],\n cleave_conf = [],\n polya = [],\n polya_info = [],\n polya_conf = [],\n is_alt = [],\n is_alt_spliced = 0,\n is_valid = [],\n transcript_complete = [],\n is_complete = [],\n is_correctly_gff3_referenced = '',\n splicegraph = []\n )\n return gene_details",
"def convert_list(ens):\n\n mg = get_client('gene')\n ginfo = mg.querymany(ens, scopes='ensembl.gene')\n\n out = {\n \"query\": [],\n \"symbol\": [],\n \"name\": []\n }\n keys = out.keys()\n print(keys)\n\n for g in ginfo:\n # print(\"symbol:\", g[\"symbol\"], \"name:\", g[\"name\"])\n for key in keys:\n\n try:\n out[key].append(g[key])\n except:\n out[key].append(\"void\")\n\n df_all = pd.DataFrame(out)\n return df_all",
"def get_genes(variant):\n genes = {}\n transcripts = []\n mongo_genes = []\n \n # Conversion from ensembl to refseq\n # ensembl_to_refseq is a dictionary with ensembl transcript id as keys and\n # a list of refseq ids as values\n ensembl_to_refseq = {}\n for gene_info in variant['info_dict'].get(\n 'Ensembl_transcript_to_refseq_transcript', []):\n splitted_gene = gene_info.split(':')\n transcript_info = splitted_gene[1]\n for transcript in transcript_info.split('|'):\n splitted_transcript = transcript.split('>')\n if len(splitted_transcript) > 1:\n ensembl_id = splitted_transcript[0]\n refseq_ids = splitted_transcript[1].split('/')\n ensembl_to_refseq[ensembl_id] = refseq_ids\n \n # A dictionary with clinical gene descriptions\n gene_descriptions = {}\n for gene_info in variant['info_dict'].get('Gene_description', []):\n splitted_gene = gene_info.split(':')\n hgnc_symbol = splitted_gene[0]\n description = splitted_gene[1]\n gene_descriptions[hgnc_symbol] = description\n \n # First we get all vep entrys that we find and put them under their \n # corresponding gene symbol in 'genes'\n for vep_entry in variant['vep_info'].get(variant['ALT'], []):\n transcript = get_transcript(vep_entry, ensembl_to_refseq)\n hgnc_symbol = transcript.hgnc_symbol\n if hgnc_symbol:\n if hgnc_symbol in genes:\n genes[hgnc_symbol]['transcripts'][transcript.transcript_id] = transcript\n for functional_annotation in transcript.functional_annotations:\n new_rank = SO_TERMS[functional_annotation]['rank']\n if new_rank < genes[hgnc_symbol]['best_rank']:\n genes[hgnc_symbol]['best_rank'] = new_rank\n genes[hgnc_symbol]['most_severe_transcript'] = transcript\n genes[hgnc_symbol]['most_severe_function'] = functional_annotation\n \n else:\n genes[hgnc_symbol] = {}\n genes[hgnc_symbol]['transcripts'] = {}\n genes[hgnc_symbol]['transcripts'][transcript.transcript_id] = transcript\n genes[hgnc_symbol]['most_severe_transcript'] = transcript\n genes[hgnc_symbol]['omim_gene_id'] = None\n genes[hgnc_symbol]['phenotypic_terms'] = []\n genes[hgnc_symbol]['best_rank'] = 40\n genes[hgnc_symbol]['ensembl_id'] = transcript.ensembl_id\n \n for functional_annotation in transcript.functional_annotations:\n new_rank = SO_TERMS[functional_annotation]['rank']\n if new_rank < genes[hgnc_symbol]['best_rank']:\n genes[hgnc_symbol]['best_rank'] = new_rank\n genes[hgnc_symbol]['most_severe_function'] = functional_annotation\n \n \n ######################################################################\n ## There are two types of OMIM terms, one is the OMIM gene entry ##\n ## and one is for the phenotypic terms. ##\n ## Each key in the 'omim_terms' dictionary reprecents a gene id. ##\n ## Values are a dictionary with 'omim_gene_id' = omim_gene_id and ##\n ## 'phenotypic_terms' = [list of OmimPhenotypeObjects] ##\n ######################################################################\n\n # Fill the omim gene id:s:\n for annotation in variant['info_dict'].get('OMIM_morbid', []):\n if annotation:\n splitted_record = annotation.split(':')\n try:\n hgnc_symbol = splitted_record[0]\n omim_term = splitted_record[1]\n genes[hgnc_symbol]['omim_gene_id'] = omim_term\n except (ValueError, KeyError):\n pass\n\n # Fill the omim phenotype terms:\n for gene_annotation in variant['info_dict'].get('Phenotypic_disease_model', []):\n if gene_annotation:\n splitted_gene = gene_annotation.split(':')\n hgnc_symbol = splitted_gene[0]\n for omim_entry in splitted_gene[1].split('|'):\n splitted_record = omim_entry.split('>')\n \n phenotype_id = splitted_record[0]\n inheritance_patterns = []\n if len(splitted_record) > 1:\n inheritance_patterns = splitted_record[1].split('/')\n \n disease_model = PhenotypeTerm(\n phenotype_id=phenotype_id,\n disease_models=inheritance_patterns\n )\n \n genes[hgnc_symbol]['phenotypic_terms'].append(disease_model)\n \n for hgnc_symbol in genes:\n gene_info = genes[hgnc_symbol]\n most_severe = gene_info['most_severe_transcript']\n # Create a mongo engine gene object for each gene found in the variant\n mongo_gene = Gene(hgnc_symbol=hgnc_symbol)\n mongo_gene.description = gene_descriptions.get(hgnc_symbol)\n mongo_gene.ensembl_gene_id = gene_info.get('ensembl_id', None)\n mongo_gene.omim_gene_entry = gene_info.get(\n 'omim_gene_id', \n None\n )\n\n mongo_gene.omim_phenotypes = gene_info.get(\n 'phenotypic_terms', \n []\n )\n\n # Add a list with the transcripts:\n mongo_gene.transcripts = []\n for transcript_id in gene_info['transcripts']:\n mongo_gene.transcripts.append(gene_info['transcripts'][transcript_id])\n\n try:\n mongo_gene.functional_annotation = gene_info['most_severe_function']\n except AttributeError:\n pass\n try:\n mongo_gene.region_annotation = SO_TERMS[mongo_gene.functional_annotation]['region']\n except AttributeError:\n pass\n try:\n mongo_gene.sift_prediction = most_severe.sift_prediction\n except AttributeError:\n pass\n try:\n mongo_gene.polyphen_prediction = most_severe.polyphen_prediction\n except AttributeError:\n pass\n # Add the mongo engine gene to the dictionary\n mongo_genes.append(mongo_gene)\n\n return mongo_genes",
"def GetGeneName(arg):\n\n genbank = ChromUnzip(arg)\n \n p1=re.compile(r'(?:ACCESSION\\s+)(\\w+\\d+)')\n p6=re.compile(r'(?:/gene=\")(.+?)(?:\"\\s+)')\n\n gene_name_dict={}\n \n for entry in genbank:\n gene_list=[] \n gene_it_6=p6.finditer(entry)\n gene_it_1=p1.finditer(entry) \n for hit in gene_it_6:\n gene_list.append(hit.group(1))\n for item in gene_it_1:\n gene_name_dict[item.group(1)]=gene_list[0]\n \n return gene_name_dict",
"def users_json(self, rows=None, sidx=None, _search=None, searchField=None,\n searchOper=None, searchString=None, page=None, sord=None, nd=None): # 1 line # 2 lines\n t1 = time.clock()\n header = [\"value\", \"flags\", \"source\", \"evidence_type\", \"creation_time\", \"time\", \"useby\", \"owner\", \"comment\"] # 3 lines\n reslist = []\n genshi_tmpl = LoadGenshiTemplate(cherrypy.session.get('cur_session'), cherrypy.session.get('username'))\n cur_component = cherrypy.session.get('cur_component')\n cur_context = cherrypy.session.get('cur_context') \n if cur_component != 'None':\n #print \"getting new\"\n context = cur_context.split()\n um = cherrypy.session.get('um')\n reslist = um.get_evidence_new(context, cur_component)\n cherrypy.session['cur_component'] = 'None'\n else:\n #print \"getting default\"\n cherrypy.session['cur_component'] = 'firstname'\n reslist = um.get_evidence_new()\n\n #users_list = test_data_to_list(test_data) # 4 lines\n evdlist = []\n i = 0\n #{'comment': None, 'evidence_type': 'explicit', 'creation_time': 1322914468.889158, 'value': 'Bob',\n #'source': 'Jane', 'flags': [], 'time': None, 'owner': 'Jane', 'objectType': 'Evidence', 'useby': None}\n myEvd = []\n\n if type(reslist) is ListType:\n for res in reslist:\n print \"Inside user_json \"\n myEvd = [0]*10\n myEvd[0] = i\n for key, value in res.__dict__.items():\n #print \"%s:%s\"%(key, value)\n for item in header:\n if item == key:\n #print \"key: %s %s--\"%(item,key)\n if key == 'creation_time' or key == 'time' or key == 'useby':\n if value:\n import datetime\n value = datetime.datetime.fromtimestamp(int(value)).strftime('%d/%m/%Y %H:%M:%S')\n elif key == 'flags':\n if value:\n value = ''.join(value)\n else:\n value=\"None\"\n __index = header.index(item)\n #print \"%s in %d\" %(value,__index+1)\n myEvd[__index+1]=value\n evdlist.append(myEvd)\n i = i+1\n #print \"Evidence: %d\" %i\n #for val in myEvd:\n # print val\n\n import my_jqGrid\n result_page = my_jqGrid.jqgrid_json(self, evdlist, header, rows=rows, sidx=sidx, _search=_search,\n searchField=searchField, searchOper=searchOper, searchString=searchString, page=page, sord=sord)\n\n t2 = time.clock()\n print 'user-json took %0.3fms' % ((t2-t1)*1000.0)\n write_log('notice','Show evidence list operation successful')\n\n return result_page\n\n else:\n #print reslist\n e = reslist\n write_log('error','Show evidence list Operation Failed; Error:'+str(e))\n modeltree = cherrypy.session.get('modeltree')\n return genshi_tmpl.greeting_template(e, \"Evidencelist upload\", modeltree)",
"def get_auto_anno_PUBMED_batch_list(request):\n\n json_resp = {}\n usecase = request.GET.get('usecase')\n # print(usecase)\n languages = ['English', 'english']\n use_obj = UseCase.objects.get(name=usecase)\n json_resp['batch_list'] = []\n batch = Report.objects.filter(name=use_obj,language__in = languages,institute = 'PUBMED').values('batch')\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)",
"def CreateGeneModels(genes_cmpt, transcripts_cmpt, exons_cmpt, utr3_cmpt, utr5_cmpt, cds_cmpt):\n gene_counter, gene_models = 1, []\n for gene_entry in genes_cmpt: ## Figure out the genes and transcripts associated feature \n if gene_entry in transcripts_cmpt:\n gene=init_gene() \n gene['id']=gene_counter\n gene['name']=gene_entry[1]\n gene['chr']=genes_cmpt[gene_entry]['chr']\n gene['source']=genes_cmpt[gene_entry]['source']\n gene['start']=genes_cmpt[gene_entry]['start']\n gene['stop']=genes_cmpt[gene_entry]['stop']\n gene['strand']=genes_cmpt[gene_entry]['strand']\n if not gene['strand'] in ['+', '-']:\n gene['strand']='.' # Strand info not known replaced with a dot symbol instead of None, ?, . etc.\n if len(transcripts_cmpt[gene_entry])>1:\n gene['is_alt_spliced'] = 1\n gene['is_alt'] = 1\n\t gtype=[]\n for tids in transcripts_cmpt[gene_entry]: ## transcript section related tags \n gene['transcripts'].append(tids['ID'])\n\t\tgtype.append(tids['type'])\n exon_cod, utr5_cod, utr3_cod, cds_cod = [], [], [], []\n if (gene['chr'], tids['ID']) in exons_cmpt:\n exon_cod = [[feat_exon['start'], feat_exon['stop']] for feat_exon in exons_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr5_cmpt:\n utr5_cod = [[feat_utr5['start'], feat_utr5['stop']] for feat_utr5 in utr5_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr3_cmpt:\n utr3_cod = [[feat_utr3['start'], feat_utr3['stop']] for feat_utr3 in utr3_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in cds_cmpt:\n cds_cod = [[feat_cds['start'], feat_cds['stop']] for feat_cds in cds_cmpt[(gene['chr'], tids['ID'])]]\n if len(exon_cod) == 0: ## build exon coordinates from UTR3, UTR5 and CDS\n if cds_cod != []:\n exon_cod=createExon(gene['strand'], utr5_cod, cds_cod, utr3_cod) \n\n if gene['strand']=='-': ## general order to coordinates\n if len(exon_cod) >1:\n if exon_cod[0][0] > exon_cod[-1][0]:\n exon_cod.reverse()\n if len(cds_cod) >1:\n if cds_cod[0][0] > cds_cod[-1][0]: \n cds_cod.reverse()\n if len(utr3_cod) >1:\n if utr3_cod[0][0] > utr3_cod[-1][0]: \n utr3_cod.reverse()\n if len(utr5_cod) >1:\n if utr5_cod[0][0] > utr5_cod[-1][0]:\n utr5_cod.reverse()\n\n tis, cdsStop, tss, cleave = [], [], [], [] ## speacial sited in the gene region \n if cds_cod != []:\n if gene['strand'] == '+':\n tis = [cds_cod[0][0]]\n cdsStop = [cds_cod[-1][1]-3]\n elif gene['strand'] == '-':\n tis = [cds_cod[-1][1]]\n cdsStop = [cds_cod[0][0]+3]\n if utr5_cod != []:\n if gene['strand'] == '+':\n tss = [utr5_cod[0][0]]\n elif gene['strand'] == '-':\n tss = [utr5_cod[-1][1]]\n if utr3_cod != []:\n if gene['strand'] == '+':\n cleave = [utr3_cod[-1][1]]\n elif gene['strand'] == '-':\n cleave = [utr3_cod[0][0]]\n\n cds_status, exon_status, utr_status = 0, 0, 0 ## status of the complete elements of the gene\n if cds_cod != []: ## adding phase to the CDS region \n cds_cod_phase = addCDSphase(gene['strand'], cds_cod)\n cds_status = 1\n gene['cds_exons'].append(cds_cod_phase)\n\n if exon_cod != []: \n exon_status = 1\n if utr5_cod != [] or utr3_cod != []: \n utr_status = 1\n if cds_status != 0 and exon_status != 0 and utr_status != 0:\n gene['transcript_status'].append(1)\n else:\n gene['transcript_status'].append(0)\n\n if exon_cod: ## final check point for a valid gene model \n gene['exons'].append(exon_cod)\n gene['utr3_exons'].append(utr3_cod)\n gene['utr5_exons'].append(utr5_cod)\n gene['tis'].append(tis)\n gene['cdsStop'].append(cdsStop)\n gene['tss'].append(tss)\n gene['cleave'].append(cleave) \n\t \n\t gtype=list(set(gtype)) ## different types \n gene['gene_info']=dict(ID=gene_entry[1],\n\t\t\t\tSource=genes_cmpt[gene_entry]['source'],\n\t\t\t\tType=gtype)\n gene=FeatureValueFormat(gene) ## get prepare for MAT writing \n gene_counter+=1\n gene_models.append(gene)\n return gene_models",
"def prepareAutoComplete(editor, text, charPos, lineStartCharPos,\r\n wikiDocument, settings):\r\n return []",
"def geneSymbols(self, returnType=\"list\"):\n\t\treturn self._dataframe['GeneSymbol'].to_dict() if returnType==\"dict\" else self._dataframe['GeneSymbol'].tolist()",
"def _get_gene_map(self) -> OrderedDict:\n if \"gene\" not in self.data:\n return OrderedDict()\n\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data[\"gene\"].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(\",\"):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes",
"def get_autocomplete(cursor, query):\n cursor.execute(\"SELECT * FROM entities WHERE name LIKE %s ORDER BY total_occurences DESC LIMIT 9;\", [query + \"%\"])\n return_obj = {'entities':[]}\n\n for entity in cursor.fetchall():\n return_obj['entities'].append({\n 'name': entity[1],\n 'score': entity[2]\n })\n return return_obj",
"def buildCompleter(jsonfile):\n strlist = []\n jsondict = {}\n with open(jsonfile, 'r') as f:\n jsondict = json.load(f)\n\n for x in jsondict.keys():\n for item in jsondict[x]:\n strlist.append(item)\n\n comp = QtWidgets.QCompleter(strlist)\n comp.popup().setStyleSheet(hou.qt.styleSheet())\n comp.setCompletionMode(QtWidgets.QCompleter.PopupCompletion)\n return comp",
"def otuList():\n # Query all passengers\n results = session.query(Otu.otu_id,Otu.lowest_taxonomic_unit_found).all()\n\n # Create a dictionary from the row data and append to a list of all_passengers\n otus = []\n for aotu in results:\n # print(aotu.otu_id)\n otu_dict = {}\n # otu_dict[\"ID\"] = aotu.otu_id\n otu_dict[\"Lowest\"] = aotu.lowest_taxonomic_unit_found\n otus.append(otu_dict)\n return jsonify(otus)",
"def fetchRefSeq(genome = 'hg18',lookupval = 'name'):\n cursor=gbdbConnect(gbdbname=genome)\n select=\"SELECT * FROM refGene\"\n cursor.execute(select)\n rows=cursor.fetchall()\n output={}\n for chr in genomelib.chr_names:\n output[chr]={}\n output[chr]['+']={}\n output[chr]['-']={}\n for row in rows:\n if row['chrom'] in genomelib.chr_names:\n output[row['chrom']][row['strand']][row[lookupval]]=row\n return output",
"def genre_choices(request):\n choices = GENRES\n diction = {}\n li = []\n for data in choices:\n li.append(data[0])\n diction['GENRE_CHOICES'] = li\n return JsonResponse(data=diction, status=status.HTTP_200_OK)#, safe=False)",
"def autocomplete_geoname(str_name):\n\n DB_NAME = global_settings.DB_NAME_GEONAMES\n db_user = global_settings.POSTGRESQL_USERNAME\n db_password = global_settings.POSTGRESQL_PASSWORD\n db_host = global_settings.POSTGRESQL_HOST\n db_port = global_settings.POSTGRESQL_PORT\n\n sql = \"SELECT distinct name FROM {} WHERE name ilike '{}%'\".format(global_settings.TABLE_NAME_GEONAMES, str_name)\n\n resp = sqlExecute(DB_NAME, db_user, db_password, db_host, db_port, sql, True)\n\n if not resp['success']:\n return []\n\n geonames = []\n\n for data in resp['data']:\n geonames.append(data[0])\n\n return geonames",
"def fetch_gene_info(gene_list, batch_size=100):\n print(\"Looking up additional information about the genes identified by BLAST...\")\n post_handle = Entrez.epost(db=\"nucleotide\", id=\",\".join(gene_list))\n result = Entrez.read(post_handle)\n post_handle.close()\n webenv = result[\"WebEnv\"]\n query_key = result[\"QueryKey\"]\n count = len(gene_list)\n OUT = open(\"Log_Directory/fetch_results.txt\", \"w\")\n for start in range(0, count, batch_size):\n end = min(count, start + batch_size)\n print(\"Fetching records %i through %i\" % (start + 1, end))\n attempt = 0\n while attempt < 3:\n attempt += 1\n try:\n fetch_handle = Entrez.efetch(db=\"nucleotide\", rettype=\"gb\", retmode=\"text\", retstart=start, retmax=batch_size,\n webenv=webenv, query_key=query_key)\n except HTTPError as err:\n if 500 <= err.code <= 599:\n print(\"Received error from server %s\" % err)\n print(\"Attempt %i of 3\" % attempt)\n time.sleep(15)\n else:\n raise\n OUT.write(fetch_handle.read())\n fetch_handle.close()\n OUT.close()",
"def search_autocomplete(request):\n response = HttpResponse(content_type='application/json')\n query = request.GET.get('query', None)\n if query:\n try:\n suggestions = []\n for node in nc.get_indexed_node(nc.graphdb.manager, 'name', query):\n suggestions.append(node['name'])\n d = {'query': query, 'suggestions': suggestions, 'data': []}\n json.dump(d, response)\n except Exception:\n pass\n return response\n return False",
"def species_autocomplete(request, format='csv'):\n \n if request.GET.get('q'):\n q = request.GET.get('q')\n \n species = Species.objects.all().order_by('taxon_code')\n \n # split tokens by period or white space\n q_tokens = split(r'[.\\s]+', q)\n \n # prefix match for each token in the search string against genus name or species name\n for token in q_tokens:\n species = species.filter(Q(species_name__istartswith=token) | Q(genus_name__genus_name__istartswith=token))\n \n \n \n \n # empty species list if no query provided by the user\n else:\n species = []\n \n\n \n \n if format == 'csv':\n # serialize results as CSV\n return CSVResponse(\n [{'species': s.taxon_code} for s in species], \n fields=('species',) )\n \n \n else:\n # serialize results as JSON\n JSON_objects = [{'label': (s.genus_name_id + ' ' + s.species_name), 'value': s.taxon_code} for s in species]\n return JSONResponse({'species': JSON_objects})",
"def gene(self):\n\t\tif self._record is None:\n\t\t\treturn []\n\t\tgene_list =[i for i in self._record.features if i.type == 'gene']\n\t\treturn gene_list"
]
| [
"0.5893482",
"0.5857896",
"0.57166195",
"0.5713266",
"0.5686897",
"0.56468934",
"0.5633033",
"0.56239164",
"0.56137943",
"0.54916984",
"0.549004",
"0.5441414",
"0.54217285",
"0.5417656",
"0.54035157",
"0.538486",
"0.53812414",
"0.53533083",
"0.5330496",
"0.53243965",
"0.5300726",
"0.53005075",
"0.5299467",
"0.5283632",
"0.52784324",
"0.5275433",
"0.5271948",
"0.526935",
"0.5261345",
"0.5253969"
]
| 0.5941018 | 0 |
Method to translate regions to BED file format. | def create_bed(regions):
result = []
for i in regions:
line = []
line.append(i.chrom)
line.append(str(i.start))
line.append(str(i.end))
line.append(i.name.replace(";", ","))
result.append(line)
bed = '\n'.join(['\t'.join(l) for l in result])
return bed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bed_to_quest(in_bed, out_regions):\n with open(in_bed) as infile:\n with open(out_regions, 'w') as outfile:\n for line in infile:\n fields = line.strip().split('\\t')\n outfile.write(' '.join(fields[:2] + [fields[5]]) + '\\n')",
"def to_bed(self, file_name, subset=None, **kwargs):\n write_bed(file_name, self.regions(subset, lazy=True), **kwargs)",
"def bed_file_to_regions(in_file: Union[str, os.PathLike]\n ) -> Generator[BedRegion, None, None]:\n with open(in_file, \"rt\") as in_file_h:\n for line in in_file_h:\n fields = line.strip().split()\n # Skip browser and track fields and other invalid lines.\n if fields[0] in [\"browser\", \"track\"] or len(fields) < 3:\n continue\n # Take the first 3 columns of each line to create a new BedRegion\n yield BedRegion(fields[0], int(fields[1]), int(fields[2]))",
"def merge_regions(bed_files, out_bed):\n merge_all = (\"zcat {0} | \"\n \"sort -k1,1 -k2,2n | \"\n \"bedtools merge -i stdin | \"\n \"gzip -c \"\n \"> {1}\").format(' '.join(bed_files), out_bed)\n print merge_all\n os.system(merge_all)\n\n return None",
"def _process_region(self, region, writer):",
"def stampaBedGraph(dictReadsEsIn, dictGeneChr, fileOut, geneNames):\n\n\tkeyF \t\t\t= '%s\\t%s'\t\t\t\t\t\t\t\t\t\t\t\t\t# Formato key del dizionario dictGeneChr\n\tcoordinateF \t= '%s:%s-%s'\t\t\t\t\t\t\t\t\t\t\t\t# Formato coordinate del gene all'interno del cromosoma\n\trigaF\t\t\t= '%s\\t%s\\t%s\\t%s\\n'\t\t\t\t\t\t\t\t\t\t# Formato della riga del file BedGraph\n\tbedGraphF\t\t= '%s%s.bedGraph'\t\t\t\t\t\t\t\t\t\t\t# Formato nome del file BedGraph\n\n\tidx_start\t= 1\n\tidx_end\t\t= 2\n\tidx_reads\t= 3\n\n\tfor geneName in dictReadsEsIn:\n\t\tif geneName not in geneNames:\t\t\t\t\t\t\t\t\t\t\t# Se il gene non presenta regioni introniche..\n\t\t\tcontinue\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# ..non se ne stampa il bedGraph\n\n\t\tcod = geneNames[geneName]\n\t\tfor chrom in dictReadsEsIn[geneName]:\n\t\t\tstartG, endG = dictGeneChr[keyF % (geneName, chrom)]\n\t\t\tpos_browser = coordinateF %(chrom, startG, endG)\n\n\t\t\tnuovoFile = open(cartella % cod + bedGraphF % (chrom, fileOut), 'w') # Apertura del file nella cartella relativa al gene\n\t\t\tnuovoFile.write(intestazione % pos_browser)\t\t\t\t\t\t\t # Stampa della header all'inizio del file\n\n\t\t\tfor i in range(0, len(dictReadsEsIn[geneName][chrom][idx_start])):\n\t\t\t\tnuovoFile.write(rigaF % (chrom,\t\t\t\t\t\t\t\t\t\t\t\\\n\t\t\t\t\t\t\t\t\t\t dictReadsEsIn[geneName][chrom][idx_start][i],\t\\\n\t\t\t\t\t\t\t\t\t\t dictReadsEsIn[geneName][chrom][idx_end][i],\t\\\n\t\t\t\t\t\t\t\t\t\t dictReadsEsIn[geneName][chrom][idx_reads][i]))\n\n\t\t\tnuovoFile.close()",
"def dict_file_to_regions(in_file: Union[str, os.PathLike]\n ) -> Generator[BedRegion, None, None]:\n with open(in_file, \"rt\") as in_file_h:\n for line in in_file_h:\n fields = line.strip().split()\n if fields[0] != \"@SQ\":\n continue\n\n contig: Optional[str] = None\n length: Optional[int] = None\n for field in fields:\n if field.startswith(\"LN\"):\n length = int(field[3:])\n elif field.startswith(\"SN\"):\n contig = field[3:]\n if contig and length:\n yield BedRegion(contig, 0, length)",
"def loadBED(line, fp, fname, labelColumn, labels, regions, defaultGroup):\n\n # This is largely parseBED from deeptoolsintervals\n labelIdx = None\n localRegions = {}\n\n cols = line.strip().split(\"\\t\")\n if labelColumn is not None:\n label = cols.pop(labelColumn)\n if label not in labels:\n labels[label] = len(labels)\n labelIdx = labels[label]\n if labelIdx >= len(regions):\n regions.append(localRegions)\n else:\n localRegions = regions[labelIdx]\n\n if len(cols) >= 6:\n name = cols[3]\n else:\n name = \"{0}:{1}-{2}\".format(cols[0], cols[1], cols[2])\n localRegions[name] = len(localRegions)\n\n for line in fp:\n if line.startswith(\"#\") and labelColumn is None:\n if len(localRegions) > 0:\n label = line[1:].strip()\n if len(label):\n labels[dti.findRandomLabel(labels, label)] = len(labels)\n else:\n labels[dti.findRandomLabel(labels, os.path.basename(fname))] = len(labels)\n regions.append(localRegions)\n localRegions = dict()\n continue\n elif line.startswith(\"#\") and labelColumn is not None:\n continue\n\n cols = line.strip().split(\"\\t\")\n if len(cols) < 3:\n continue\n if labelColumn is not None:\n label = cols.pop(labelColumn)\n if label not in labels:\n labels[label] = len(labels)\n labelIdx = labels[label]\n if labelIdx >= len(regions):\n regions.append({})\n localRegions = regions[labelIdx]\n\n if len(cols) >= 6:\n name = cols[3]\n else:\n name = \"{0}:{1}-{2}\".format(cols[0], cols[1], cols[2])\n name = dti.findRandomLabel(localRegions, name)\n localRegions[name] = len(localRegions)\n\n # Handle the last group if there is no label\n if labelIdx is None and len(localRegions) > 0:\n if defaultGroup is not None:\n labels[dti.findRandomLabel(labels, defaultGroup)] = len(labels)\n else:\n labels[dti.findRandomLabel(labels, os.path.basename(fname))] = len(labels)\n regions.append(localRegions)",
"def save_regions_bmp(self, robot, output_directory):\n # Make ouput directory if it doesn't already exist\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n regions_path = os.path.join(output_directory, \"regions.bmp\")\n if os.path.exists(regions_path):\n return\n image = Image.new(\"L\", OUTPUT_BMP_DIMENSIONS)\n draw = ImageDraw.Draw(image)\n pixels = image.load()\n output_width, output_height = image.size\n output_bounds = (0, output_height, output_width, 0)\n # Set default colour\n for i in range(output_width):\n for j in range(output_height):\n pixels[i, j] = OUTPUT_DEFAULT_COLOUR\n # Add regions\n for region in self.regions:\n translated_bounds = get_translated_bounds(region.bounds, self.bounds, output_bounds)\n left, top, right, bottom = list(map(int, translated_bounds))\n if robot.can_hold(region.permeability):\n colour = OUTPUT_VALID_COLOUR\n else:\n colour = OUTPUT_INVALID_COLOUR\n draw.rectangle((left, bottom, right, top), fill=colour)\n image.save(regions_path)\n LOGGER.debug(\"Saved regions!\")",
"def bed2bedpe(fin, fout, ext=150):\n if fin.endswith(\".gz\"):\n fino = gzip.open(fin, \"rt\")\n else:\n fino = open(fin)\n if fout.endswith(\".gz\"):\n fo = gzip.open(fout, \"wt\")\n else:\n fo = open(fout, \"w\")\n for i, line in enumerate(fino):\n if i % 10000 == 0:\n cFlush(\"%s read from %s\" % (i,fin))\n line = line.split(\"\\n\")[0].split('\\t')\n if len(line) < 6: #no strand information\n nline = [\n line[0], line[1], line[2], line[0],\n int(line[1]) + ext,\n int(line[2]) + ext, \".\", \"44\", \"+\", \"-\"\n ]\n elif line[5] == \"+\":\n nline = [\n line[0], line[1], line[2], line[0],\n int(line[1]) + ext,\n int(line[2]) + ext, \".\", \"44\", \"+\", \"-\"\n ]\n else:\n nline = [\n line[0],\n max(0, int(line[1])),\n max(0,\n int(line[2]) - ext), line[0], line[1], line[2], \".\", \"44\",\n \"+\", \"-\"\n ]\n nline = \"\\t\".join(list(map(str, nline))) + \"\\n\"\n fo.write(nline)\n fino.close()\n fo.close()",
"def SaveRegions(self, fname=\"js9.reg\"):\n self.fname = fname\n temp = open('temp','r').read()\n regions = re.sub(r'(circle)', r'\\n\\1', temp)\n regions = \"# Region file format: JS9 version 1.0\\nICRS\"+regions\n file = open(fname, \"w\")\n file.write(regions)\n file.close()\n os.remove(\"temp\")",
"def vcf2bed_annotateTE(file_path_d:dict, window:int = 20) -> None:\n blast_d = None\n intersect_d = None\n\n for i in range(len(file_path_d[\"key\"])):\n # for each experiment/condition, read blast and bedtools output\n if file_path_d[\"blast\"][i] is not None:\n blast_d = te_info2map(file_path_d[\"blast\"][i],\"blast\")\n else:\n print(f'file_path_d[\"key\"][i]: no blast.out available, skip.')\n if file_path_d[\"intersect\"][i] is not None:\n intersect_d = te_info2map(file_path_d[\"intersect\"][i],\"intersect\")\n else:\n print(f'file_path_d[\"key\"][i]: no bedtools intersect.bed available, skip.')\n\n out_name = f'te_annotated_{file_path_d[\"key\"][i]}_INSpad{window}.bed'\n if os.path.exists(out_name):\n q = input(f\"te_annotated_{out_name}.vcf already exist, rewrite it? (Y/N)\")\n if q.capitalize() == \"N\":\n sys.exit(0)\n\n print(f'Open fh on {out_name}. Convert VCF to BED (read comments in script for details), subset of INFO parse to NAME (col 4) field.',file=sys.stderr)\n\n with open(file_path_d[\"vcf\"][i],\"r\") as f, open(f'{out_dir}/{out_name}', \"w\") as o:\n line_count = 1\n for line in f:\n line_count += 1\n if line.startswith(\"##\") or line.startswith(\"#\"):\n continue\n col = line.strip().split(\"\\t\")\n try:\n infos = parse_info(col[7])\n except Exception as e:\n print(f\"{line_count}: Cannot parse info field.\\n{line}\\n{e}\")\n \n sv_chr = col[0]\n sv_start = int(col[1]) \n sv_end = int(sv_start) + 1 if \"END\" not in infos else int(infos[\"END\"]) # if missing END (i.e. BND) use start + 1\n sv_id = col[2]\n\n name = f'ID={sv_id};SVTYPE={infos[\"SVTYPE\"]};SUPPORT={infos[\"SUPPORT\"]}'\n if \"SVLEN\" in infos:\n name += f';SVLEN={infos[\"SVLEN\"]}'\n\n # chr start end name{ID;SVTYPE;SUPPORT;SVLEN;BLAST_TE (sep=,);INTERSECT_TE(sep=,)}\n if infos[\"SVTYPE\"] == \"INS\":\n sv_start = sv_start - 10 if (sv_start - 10) > 0 else 0\n sv_end = sv_end + 10 # there is chance that sv_end larger than chr length, but should be rare and we can filter this later\n if blast_d is not None:\n if sv_id in blast_d:\n name += f';BLAST_TE={blast_d[sv_id]}'\n if intersect_d is not None:\n if sv_id in intersect_d:\n name += f';INTERSECT_TE={intersect_d[sv_id]}'\n \n # write to out_file\n # if missing END (i.e. BND) use start + 1\n o.write(f'{sv_chr}\\t{sv_start}\\t{sv_end}\\t{name}\\n')\n print(f'Finish writing {out_name}. Close fh.',file=sys.stderr)",
"def write_bg_regions(bg_region, outfile):\n jdict = ltu.jsonify(bg_region)\n # Write\n ltu.savejson(outfile, jdict, easy_to_read=True, overwrite=True)\n print(\"Wrote Background Regions to {:s}\",outfile)",
"def simplify_bed(fbed, has_header):\n line_gen = reader(fbed, header=False)\n header = line_gen.next() if has_header else None\n fh = open(BedTool._tmp(), \"w\")\n for toks in line_gen:\n new_toks = toks[:3] + [\"Z_Z\".join(toks), \".\",\n toks[5] if len(toks) > 5 else \".\"]\n fh.write(\"\\t\".join(new_toks) + \"\\n\")\n fh.close()\n return BedTool(fh.name), header",
"def get_region(self, region):\n\n return self.adapter.get_region(region) \n\n\n\n\n #file_compression = \"\"\n # magic_dict = {\n # b\"\\x1f\\x8b\\x08\": \"gz\",\n # b\"\\x42\\x5a\\x68\": \"bz2\",\n # b\"\\x50\\x4b\\x03\\x04\": \"zip\"\n # }\n # \n\n # max_len = max(len(x) for x in magic_dict)\n # with open(file_path, \"rb\") as f:\n # file_start = f.read(max_len)\n # for magic, filetype in magic_dict.items():\n # if file_start.startswith(magic):\n # file_compression = filetype\n # split_ext = file_path.split(\".\")\n # extension = split_ext[len(split_ext) -1]\n # if(file_compression == \"zip\"):\n # if extension != \"zip\":\n # subprocess.call(\"mv {} {}.zip\".format(file_path, file_path).split())\n # subprocess.call(\"unzip {} -d .\".format(file_path).split())\n # if(file_compression == \"bz2\"):\n # if extension != \"bz2\":\n # subprocess.call(\"mv {} {}.bz2\".format(file_path,file_path).split())\n # subprocess.call(\"bzip2 -df {}\".format(file_path).split())\n # if(file_compression == \"gz\"):\n # if extension != \"gz\":\n # subprocess.call(\"mv {} {}.gz\".format(file_path,file_path).split())\n # subprocess.call(\"gzip -df {}\".format(file_path).split())",
"def save(self, f):\n self.f = f\n for region in self.regions:\n ext = region.address & 0xFFFF0000\n self.write_hex_line(\n HexLine(0, EXTLINADR, struct.pack(\">H\", ext >> 16))\n )\n address = region.address - ext\n for chunk in chunks(region.data):\n if address >= 0x10000:\n ext += 0x10000\n self.write_hex_line(\n HexLine(0, EXTLINADR, struct.pack(\">H\", ext >> 16))\n )\n address -= 0x10000\n self.write_hex_line(HexLine(address, DATA, chunk))\n address += len(chunk)\n self.write_hex_line(HexLine(0, EOF))",
"def write_bgf(self, filename):\n body = [\"BIOGRF{0:>5s}\\n\".format(self.biogrf)]\n if self.descrp:\n body.append(\"DESCRP {0}\\n\".format(self.descrp))\n else:\n body.append(\"DESCRP {0}\\n\".format(filename))\n body.append(\"FORCEFIELD {0}\\n\".format(self.ff))\n body.append(\"FORMAT ATOM (a6,1x,i5,1x,a5,1x,a3,1x,a1,1x,a5,3f10.5\"\n \",1x,a5,i3,i2,1x,f8.5,i2,i4,f10.5)\\n\")\n atoms = []\n hetatms = []\n conect = []\n for atom in self.atoms:\n a, c = atom.writeline()\n if atom.record == 'ATOM':\n atoms.append(a)\n elif atom.record == 'HETATM':\n hetatms.append(a)\n conect.append(c)\n body.extend(atoms)\n body.extend(hetatms)\n body.append(\"FORMAT CONECT (a6,14i6)\\nFORMAT ORDER (a6,i6,13f6.3)\\n\")\n body.extend(conect)\n body.append(\"END\\n\")\n with open(filename, 'w') as f:\n f.writelines(body)",
"def transform_region_ascii(infile, outfile, wcs_in, wcs_out):\n\n with open(infile, 'r') as fh:\n regions = fh.readlines()\n\n with open(outfile, 'w') as ofh:\n for region in regions:\n if region.startswith('#'):\n ofh.write(region + '\\n')\n continue\n\n region = region.rstrip()\n post0 = 0\n post1 = region.find(\"(\")\n reg_type = region[post0:post1]\n\n if reg_type in ['polygon', 'Polygon']:\n # convert from a 1D array into a 2D one\n coords_in = [float(f)\n for f in region[post1 + 1:-1].split(',')]\n\n assert coords_in.size % 2 == 0\n # Use integer division here\n coords_in.resize(2, coords_in.size // 2)\n\n # The conversion can be applied to all the\n # pairs at once, but it requires the data be\n # in the \"right\" shape.\n #\n coords_cel = wcs_in.apply(coords_in.T)\n coords_out = wcs_out.invert(coords_cel)\n\n # The coords_out array is not transposed (to\n # match the input) since it makes it easier\n # to convert back to a string.\n coords_str = \",\".join([\"{:7.2f}\".format(c)\n for c in coords_out])\n\n out = reg_type + '(' + coords_str + ')'\n\n elif reg_type == 'rotbox':\n\n # Just need to convert the center of the box, since\n # the assumption is that the pixel scale is the\n # same in both the input and output systems.\n #\n toks = region[post1 + 1:].split(\",\")\n assert len(toks) > 2\n\n xphys_in = float(toks[0])\n yphys_in = float(toks[1])\n\n # The handling of nD arrays by the apply and invert\n # methods of transform objects is, at best, strange\n # to describe.\n #\n coords_cel = wcs_in.apply([[xphys_in, yphys_in]])\n coords_out = wcs_out.invert(coords_cel)\n\n xphys_out = coords_out[0][0]\n yphys_out = coords_out[0][1]\n coords_str = '{:7.2f},{:7.2f},'.format(xphys_out,\n yphys_out)\n\n # Hopefully this re-creates the remainded of the\n # string (i.e. after the center of the box).\n #\n out = reg_type + '(' + coords_str + \",\".join(toks[2:])\n\n else:\n # copy over the line\n out = region\n\n ofh.write(out + '\\n')",
"def get_state_blockgroups_file(state=48, district=7, leg_body='US-REP', year='2015'):\r\n\r\n blockgroups_file = get_state_blockgroups_geojson_filename(state=state)\r\n \r\n state = \"{0:0>2}\".format(state)\r\n district = \"{0:0>2}\".format(district)\r\n \r\n print( blockgroups_file )\r\n\r\n if not os.path.isfile(blockgroups_file):\r\n print( \"Downloading blockgroups\" )\r\n bgs_url = 'ftp://ftp2.census.gov/geo/tiger/TIGER{year}/BG/tl_{year}_{state}_bg.zip'.format(year=year, state=state)\r\n bgs_dl_file = geojson_path + 'bgs.zip'\r\n download_file(bgs_url, bgs_dl_file)\r\n extract_all(bgs_dl_file, geojson_path)\r\n bgs_shapefile = glob(geojson_path + '*shp')[0]\r\n\r\n print( \"Converting blockgroups file to GEOJSON\")\r\n bgs = gpd.read_file(bgs_shapefile)\r\n bgs = bgs.to_crs({'init': u'epsg:4326'})\r\n bgs.to_file(blockgroups_file, driver='GeoJSON')\r\n\r\n # cleanup geojson dir\r\n shapefile_prefix = glob(geojson_path + '*shp')[0].split(\r\n geojson_path)[1].split('.')[0]\r\n shapefiles = glob(geojson_path + shapefile_prefix + '*')\r\n for f in shapefiles:\r\n os.remove(f)\r\n os.remove(bgs_dl_file)",
"def prepare_bed_file(bed_file, output, ouf=False, save_rejected=None, only_chrom=None):\n new_lines = [] # keep updated lines\n rejected = [] # keep IDs of skipped transcripts + the reason why\n names = Counter() # we need to make sure that all names are unique\n allowed_re = re.compile(ALLOWED_CHARSET_RE).search\n broken_names = []\n\n f = open(bed_file, \"r\")\n for num, line in enumerate(f, 1):\n # parse bed file according to specification\n line_data = line.rstrip().split(\"\\t\")\n\n if len(line_data) != 12:\n f.close() # this is for sure an error\n # it is possible only if something except a bed12 was provided\n die(\n \"Error! Bed 12 file is required! Got a file with {len(line_data)} fields instead\"\n )\n\n chrom = line_data[0]\n if only_chrom and chrom != only_chrom:\n # TOGA allows to perform the analysis on a specific chromosome only\n # is so, we can skip all transcripts that located on other chromosomes\n continue\n chromStart = int(line_data[1])\n chromEnd = int(line_data[2])\n name = line_data[3] # gene_name usually\n corr_name = not bool(allowed_re(name))\n if corr_name is False:\n broken_names.append(name)\n # TODO: check weird characters in the transcript name\n # bed_score = int(line_data[4]) # never used\n # strand = line_data[5] # otherwise:\n # strand = True if line_data[5] == '+' else False\n thickStart = int(line_data[6])\n thickEnd = int(line_data[7])\n # itemRgb = line_data[8] # never used\n blockCount = int(line_data[9])\n blockSizes = [int(x) for x in line_data[10].split(\",\") if x != \"\"]\n blockStarts = [int(x) for x in line_data[11].split(\",\") if x != \"\"]\n blockEnds = [blockStarts[i] + blockSizes[i] for i in range(blockCount)]\n blockAbsStarts = [blockStarts[i] + chromStart for i in range(blockCount)]\n blockAbsEnds = [blockEnds[i] + chromStart for i in range(blockCount)]\n blockNewStarts, blockNewEnds = [], []\n names[name] += 1\n\n if thickStart > thickEnd:\n f.close() # according to bed12 specification this should never happen\n sys.stderr.write(f\"Problem occurred at line {num}, gene {name}\\n\")\n die(\"Error! Bed file is corrupted, thickEnd MUST be >= thickStart\")\n elif thickStart == thickEnd:\n # this means that this is a non-coding transcript\n # TOGA cannot process them: we can skip it\n rejected.append((name, \"No CDS\"))\n continue\n\n if thickStart < chromStart or thickEnd > chromEnd:\n # a very strange (but still possible) case\n f.close() # for sure an error with input data\n sys.stderr.write(f\"Problem occurred at line {num}, gene {name}\\n\")\n die(\"Error! Bed file is corrupted, thickRange is outside chromRange!\")\n\n # now select CDS only\n # we keep UTRs in the filtered file\n # however, we need CDS to check whether it's correct (% 3 == 0)\n for block_num in range(blockCount):\n blockStart = blockAbsStarts[block_num]\n blockEnd = blockAbsEnds[block_num]\n\n # skip the block if it is entirely UTR\n if blockEnd <= thickStart:\n continue\n elif blockStart >= thickEnd:\n continue\n\n # if we are here: this is not an entirely UTR exon\n # it might intersect the CDS border or to be in the CDS entirely\n # remove UTRs: block start must be >= CDS_start (thickStart)\n # block end must be <= CDS_end (thickEnd)\n blockNewStart = blockStart if blockStart >= thickStart else thickStart\n blockNewEnd = blockEnd if blockEnd <= thickEnd else thickEnd\n blockNewStarts.append(blockNewStart - thickStart)\n blockNewEnds.append(blockNewEnd - thickStart)\n\n if len(blockNewStarts) == 0:\n # even it thickStart != thickEnd this transcript still can be non-coding\n # but if there are no blocks in the CDS -> we can catch this\n rejected.append((name, \"No CDS\"))\n continue\n\n block_new_count = len(blockNewStarts)\n blockNewSizes = [\n blockNewEnds[i] - blockNewStarts[i] for i in range(block_new_count)\n ]\n\n if sum(blockNewSizes) % 3 != 0 and not ouf:\n # this is an out-of-frame (or incomplete transcript)\n # ideally CDS length should be divisible by 3\n # not ouf means that we like to keep such transcripts for some reason\n rejected.append((name, \"Out-of-frame gene\"))\n continue\n\n # we keep this transcript: add in to the list\n new_line = \"\\t\".join([str(x) for x in line_data])\n new_lines.append(new_line)\n f.close()\n\n # if not allowed characters in transcript names: list them\n if len(broken_names) > 0:\n eprint(\"Error! Some transcript names contain not allowed characters\")\n for t in broken_names:\n eprint(t)\n die(f\"Allowed characters are: {ALLOWED_CHARSET}\")\n # if there are non-unique transcript IDs: die\n # I kill it there, not earlier to show them altogether\n if any(v > 1 for v in names.values()):\n eprint(\"Error! There are non-uniq transcript IDs:\")\n duplicates = [k for k, v in names.items() if v > 1]\n for d in duplicates:\n eprint(d)\n die(\"Abort\")\n\n if len(new_lines) == 0:\n # no transcripts pass the filter: probably an input data mistake\n sys.exit(\n f\"Error! No reference annotation tracks left after filtering procedure! Abort\"\n )\n\n # write transcripts that passed the filter to the output file\n f = open(output, \"w\") if output != \"stdout\" else sys.stdout\n f.write(\"\\n\".join(new_lines) + \"\\n\")\n f.close() if output != \"stdout\" else None\n\n if save_rejected:\n # save transcripts that didn't pass the filter + reason why\n f = open(save_rejected, \"w\")\n for elem in rejected:\n f.write(f\"{elem[0]}\\t{elem[1]}\\n\")\n f.close()",
"def _cmd_export_bed(args):\n bed_tables = []\n for segfname in args.segments:\n segments = read_cna(segfname)\n # ENH: args.sample_sex as a comma-separated list\n is_sample_female = verify_sample_sex(\n segments, args.sample_sex, args.male_reference, args.diploid_parx_genome\n )\n if args.sample_id:\n label = args.sample_id\n elif args.label_genes:\n label = None\n else:\n label = segments.sample_id\n tbl = export.export_bed(\n segments,\n args.ploidy,\n args.male_reference,\n args.diploid_parx_genome,\n is_sample_female,\n label,\n args.show,\n )\n bed_tables.append(tbl)\n table = pd.concat(bed_tables)\n write_dataframe(args.output, table, header=False)",
"def reverse_convert_textfile(self, input_textfile, output_textfile, block_number):\n input_textfile = './src/data/qvalue_files/' + input_textfile + '.txt'\n\n output_textfile = './src/data/qvalue_files/' + output_textfile + '.txt'\n \"\"\"\n block1\n \"\"\"\n box_conversion_map_1 = {'A1': 'D1', 'A3': 'D3', 'A2': 'D2', 'A5': 'D5', 'A4': 'D4', 'A7': 'D7', 'A6': 'D6',\n 'A9': 'D9', 'A8': 'D8'}\n\n \"\"\"\n block2\n \"\"\"\n box_conversion_map_2 = {'A15': 'D9', 'A14': 'D8', 'A11': 'D5', 'A10': 'D4', 'A13': 'D7', 'A12': 'D6',\n 'A7': 'D1', 'A9': 'D3', 'A8': 'D2'}\n\n \"\"\"\n block3\n \"\"\"\n box_conversion_map_3 = {'B7': 'U1', 'B14': 'U8', 'B15': 'U9', 'B12': 'U6', 'B13': 'U7', 'B10': 'U4',\n 'B11': 'U5', 'B8': 'U2', 'B9': 'U3'}\n\n \"\"\"\n block0\n \"\"\"\n box_conversion_map_0 = {'B4': 'U4', 'B5': 'U5', 'B6': 'U6', 'B7': 'U7', 'B1': 'U1', 'B2': 'U2', 'B3': 'U3',\n 'B8': 'U8', 'B9': 'U9'}\n\n box_maps_dict = {0: box_conversion_map_0, 1: box_conversion_map_1, 2: box_conversion_map_2,\n 3: box_conversion_map_3}\n box_conversion_map = box_maps_dict[block_number]\n\n f_read = open(input_textfile, 'r')\n f_write = open(output_textfile, 'w+')\n\n for i in f_read.read().split('\\n'):\n print(i, \"before\")\n for key in box_conversion_map.keys():\n i = i.replace(key + 'x', box_conversion_map[key] + 'x')\n i = i.replace(key + 'z', box_conversion_map[key] + 'z')\n i = i.replace(key + 'N', box_conversion_map[key] + 'N')\n i = i.replace(key + 'E', box_conversion_map[key] + 'E')\n i = i.replace(key + 'W', box_conversion_map[key] + 'W')\n i = i.replace(key + 'S', box_conversion_map[key] + 'S')\n i = i.replace(key + '|', box_conversion_map[key] + '|')\n print(i, \"after\")\n f_write.write(i + '\\n')\n f_read.close()\n f_write.close()",
"def add_sr_ebi_brc4_names(self,\n seq_region_file: str,\n seq_region_map: dict,\n attrib_type_map: dict,\n work_dir: str,\n unversion: bool = False):\n os.makedirs(work_dir, exist_ok=True)\n\n # return if there's nothing to add\n if not seq_region_file: return\n\n # technical / optimization. get atttib_type_id(s) for \"(EBI|BRC4)_seq_region_name\"\n tagged_sr_name_attrib_id = {\n tag : self.id_from_map_or_die(f\"{tag}_seq_region_name\", attrib_type_map, \"attrib_type_map\") for tag in [\"EBI\", \"BRC4\"]\n }\n\n # load BRC4/EBI name from seq_region file\n brc4_ebi_name_attrib_trios = [] # [ (seq_region_id, attrib_id, value)... ] list of trios for inserting into db \n with open(seq_region_file) as in_file:\n seq_regions = list(json.load(in_file))\n for seq_region in seq_regions:\n # get seq_region_id (perhaps, by using unversioned name)\n seq_region_name, seq_region_id, unversioned_name = \\\n self.name_and_id_from_seq_region_item(seq_region, seq_region_map, try_unversion = unversion)\n # append attribs to the brc4_ebi_name_attrib_trios list\n for tag in [\"BRC4\", \"EBI\"]:\n attrib_name = f\"{tag}_seq_region_name\"\n attrib_id = tagged_sr_name_attrib_id[tag]\n value = seq_region.get(attrib_name, seq_region_name)\n brc4_ebi_name_attrib_trios.append( (seq_region_id, attrib_id, self.quote_or_null(value)) )\n\n # run insertion SQL\n self.insert_to_db(\n brc4_ebi_name_attrib_trios,\n \"seq_region_attrib\",\n [\"seq_region_id\", \"attrib_type_id\", \"value\"],\n self.pjc(work_dir, \"brc4_ebi_seq_region_synonyms\"),\n ignore = True\n )",
"def convert_dict_to_bed(dicti, chr_list,file_name):\n filename= file_name+'.bed'\n myFile = open(filename,'w')\n myFile.write('track name=Inter'+sys.argv[1]+'-'+sys.argv[2]+'\\n')\n #print ' I just made a file to put your results in called'+ filename\n for chr in chr_list: \n #print 'the chr is', chr\n #print \"chr no.\",chr,\" : \",len(dicti[chr]),\" put enhancers\"\n for item in dicti[chr]:\n #print 'the item is ', item\n line=item.split('-')\n line.insert(0,chr)\n #print line\n myFile.write('\\t'.join(line)+'\\n')\n myFile.close()",
"def create_design(regions):\n print(type(regions))\n result = []\n for i in regions:\n line = []\n line.append(i.chrom)\n\n #ensure minimum region size\n if (i.end - i.start) > 150:\n line.append(str(i.start))\n line.append(str(i.end))\n else:\n region_size = i.end - i.start\n extra = math.ceil(150.0 -region_size / 2) #round up to whole number\n line.append(str(i.start - extra))\n line.append(str(i.end + extra))\n\n line.append(i.name.replace(\";\", \",\"))\n result.append(line)\n\n design = '\\n'.join([line[0] + ':' + str(line[1]) + '-' + str(line[2]) + ' ' + line[3] for line in result])\n print(design)\n return design",
"def write_dftb_in(self, outfile):\n\n outfile.write('Geometry = GenFormat { \\n')\n outfile.write(' <<< \"geo_end.gen\" \\n')\n outfile.write('} \\n')\n outfile.write(' \\n')\n\n params = self.parameters.copy()\n\n s = 'Hamiltonian_MaxAngularMomentum_'\n for key in params:\n if key.startswith(s) and len(key) > len(s):\n break\n # --------MAIN KEYWORDS-------\n previous_key = 'dummy_'\n myspace = ' '\n for key, value in sorted(params.items()):\n current_depth = key.rstrip('_').count('_')\n previous_depth = previous_key.rstrip('_').count('_')\n for my_backsclash in reversed(\n range(previous_depth - current_depth)):\n outfile.write(3 * (1 + my_backsclash) * myspace + '} \\n')\n outfile.write(3 * current_depth * myspace)\n if key.endswith('_') and len(value) > 0:\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0)\n and current_depth == 0): # E.g. 'Options {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0)\n and current_depth > 0): # E.g. 'Hamiltonian_Max... = {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif key.count('_empty') == 1:\n outfile.write(str(value) + ' \\n')\n else:\n outfile.write(key.rsplit('_')[-1] + ' = ' + str(value) + ' \\n')\n previous_key = key\n current_depth = key.rstrip('_').count('_')\n for my_backsclash in reversed(range(current_depth)):\n outfile.write(3 * my_backsclash * myspace + '} \\n')\n outfile.write('ParserOptions { \\n')\n outfile.write(' IgnoreUnprocessedNodes = Yes \\n')\n outfile.write('} \\n')",
"def hMMsplc_jDict2edgeBED(jDict,outPath):\n oFile = open(outPath, 'w')\n # --- get list of all coverages ---\n for chrm in jDict:\n for jnc in jDict[chrm]:\n origLine = jDict[chrm][jnc][0].split('\\t')\n oFile.write('%s\\t%s\\t%s\\t%s\\t%s\\n' % (chrm, jnc[0], jnc[1], origLine[3], origLine[4]))",
"def write_regions(srclist, impath, ext='.reg'):\n fname = impath[:-5] + ext\n with open(fname, 'w') as f:\n f.write('global color=cyan font=\"helvetica 10 normal\" '\n 'select=1 highlite=1 edit=1 move=1 delete=1 '\n 'include=1 fixed=0 source\\n')\n f.write('fk5\\n')\n for src in srclist:\n f.write('ellipse(%f,%f,%.2f\",%.2f\",%.1f) # text={%s}\\n' % (\n src.ra, src.dec, src.maj, src.min, src.pa + 90.0, src.name))",
"def toBed(self,value = 'score',rgb='0,0,0'):\n return \"%s\\t%d\\t%d\\t%s\\t%.2f\\t%s\\t%d\\t%d\\t%s\\t%d\\t%s\\t%s\" %(self.chr,self.start,self.end,self.name,self.__dict__[value],self.strand,self.start,self.end,rgb,len(self.exonStarts),\",\".join([str(x) for x in self.exonLengths]),\",\".join([str(x) for x in self.exonOffsets]))",
"def make_csb_region(regfile, center, r1, r2):\n regions = [\n \"pie(%.2f,%.2f,0,%.2f,0,360)\" % (center[0], center[1], r1),\n \"pie(%.2f,%.2f,0,%.2f,0,360)\" % (center[0], center[1], r2),\n ]\n open(regfile, \"w\").write(\"\\n\".join(regions) + \"\\n\")"
]
| [
"0.6066997",
"0.5997694",
"0.57949936",
"0.5722753",
"0.56932145",
"0.55274326",
"0.5516248",
"0.55012625",
"0.533869",
"0.53141445",
"0.5282663",
"0.5208408",
"0.52067125",
"0.51832414",
"0.5090945",
"0.507771",
"0.5054706",
"0.5033555",
"0.502927",
"0.4997322",
"0.49905843",
"0.49766603",
"0.49746135",
"0.4969505",
"0.49384946",
"0.49276227",
"0.49141863",
"0.49021056",
"0.4894062",
"0.4886852"
]
| 0.6242754 | 0 |
Allows the panel or virtual panel to be downloaded as a text file in the correct BED format using the name of the panel. The scope is defined as either panel or virtual to determine the query to be executed. The request can also specify the version of the panel to be downloaded. | def download():
scope = request.args.get('scope')
type = request.args.get('type')
id = request.args.get('id')
version = request.args.get('version')
panel_name = request.args.get('name')
if type == 'default':
extension = 0
else:
extension = 25
if scope == 'Panel':
panel = get_regions_by_panelid(s, id, version, extension)
elif scope == 'Virtual':
panel = get_regions_by_vpanelid(s, id, version, extension)
result = []
for i in panel:
line = []
line.append(i.chrom)
line.append(str(i.region_start))
line.append(str(i.region_end))
if not i.gene_name == '' and not i.gene_name == 'N/A':
line.append(i.gene_name + ':' + i.name)
else:
line.append(i.name)
result.append(line)
bed = '\n'.join(['\t'.join(l) for l in result])
bed_tool = BedTool(bed, from_string=True)
bed_sorted = bed_tool.sort()
bed_sorted_merged = bed_sorted.merge(c=4, o='collapse')
if type == 'design':
bed = create_design(bed_sorted_merged)
filename = "attachment; filename=" + panel_name + "_25bp_v" + version + "_" + current_user.id + "_" + time.strftime(
"%d-%m-%Y") + ".txt"
else:
bed = create_bed(bed_sorted_merged)
if type == "extension":
filename = "attachment; filename=" + panel_name + "_25bp_v" + version + "_" + current_user.id + "_" + time.strftime(
"%d-%m-%Y") + ".bed"
else:
filename = "attachment; filename=" + panel_name + "_v" + version + "_" + current_user.id + "_" + time.strftime(
"%d-%m-%Y") + ".bed"
return Response(
bed,
mimetype='test/plain',
headers={"Content-disposition": filename
}
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def panel(context, panel, version):\n LOG.info(\"Running scout export panel\")\n adapter = context.obj['adapter']\n \n if not panel:\n LOG.warning(\"Please provide at least one gene panel\")\n context.abort()\n\n LOG.info(\"Exporting panels: {}\".format(', '.join(panel)))\n for line in export_gene_panels(adapter, panel, version):\n click.echo(line)",
"def download():\n if auth.has_membership(1):\n user = \"Admin\"\n elif auth.has_membership(2):\n user = \"Examiner\"\n elif auth.has_membership(3):\n user = \"student\"\n elif auth.has_membership(5):\n user = \"Managment\"\n\n db.activity_log.insert( Title_entry=\"Download assignment\", \n referance_id=auth.user.id,\n remarks=\"content downloaded by {}\".format(user))\n db.commit()\n return response.download(request, db)",
"def generic_download(self, data_set, scene, output_dir, chunk_size=1024):\n\n ### LANDSAT DOWNLOAD ###\n if is_product_id(scene['displayId']):\n filename = self.download(scene['displayId'], output_dir)\n\n ### NON-LANDSAT ###\n else:\n filename = self.download(scene['displayId'], output_dir, data_set=data_set)\n\n return filename",
"def view_panel():\n id = request.args.get('id')\n try:\n version = request.form[\"versions\"]\n except KeyError:\n version = None\n if id:\n status = check_panel_status(s, id)\n if not status:\n message = \"This panel has changes which cannot be viewed here as they have not been made live yet, if you have permission you can view these by editing the panel\"\n else:\n message = None\n panel_details = get_panel_details_by_id(s, id)\n if not version:\n version = panel_details.current_version\n panel_name = panel_details.name\n panel = get_regions_by_panelid(s, id, version)\n project_id = get_project_id_by_panel_id(s, id)\n result = []\n rows = list(panel)\n if len(rows) != 0:\n bed = ''\n for i in rows:\n row = dict(zip(i.keys(), i))\n result.append(row)\n # panel_name = i.panel_name\n current_version = i.current_version\n else:\n message = \"This Panel has no regions yet & may also have changes that have not been made live\"\n bed = 'disabled'\n current_version = version\n\n if check_user_has_permission(s, current_user.id, project_id):\n edit = ''\n else:\n edit = 'disabled'\n\n form = ViewPanel()\n v_list = range(1, current_version + 1)\n choices = []\n for i in v_list:\n choices.append((i, i))\n form.versions.choices = choices\n form.versions.default = version\n form.process()\n\n table = []\n\n for i in result:\n line = []\n line.append(i['chrom'])\n line.append(str(i['region_start']))\n line.append(str(i['region_end']))\n line.append(i['gene_name'])\n line.append(i['name'].replace(',', ' '))\n table.append(line)\n return render_template('panel_view.html', scope='Panel', table=json.dumps(table), panel=table,\n panel_name=panel_name, edit=edit, bed=bed,\n version=version, panel_id=id, project_id=project_id, message=message,\n url=url_for('panels.view_panel'),\n form=form)\n\n else:\n return redirect(url_for('panels.view_panels'))",
"def download_report():\n entities = get_names()\n save_csv(entities)",
"def download():\n\treturn response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)"
]
| [
"0.55245066",
"0.5285476",
"0.52011573",
"0.5189003",
"0.5167064",
"0.5078086",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122",
"0.5049122"
]
| 0.76141894 | 0 |
Method to view panels, if project ID given then only return panels from that project. The Method also checks if each panel is locked (is being worked on by another user) and whether the user has permission to edit each panel, otherwise this action is not available to them. | def view_panels(id=None):
if not id:
id = request.args.get('id')
if id:
panels = get_panels_by_project_id(s, id)
else:
panels = get_panels(s)
result = []
project_name = "All"
for i in panels:
row = dict(zip(i.keys(), i))
status = check_panel_status(s, row["panelid"])
row["status"] = status
permission = check_user_has_permission(s, current_user.id, row["projectid"])
locked = check_if_locked(s, row["panelid"])
row['permission'] = permission
row['locked'] = locked
if id:
project_name = row['projectname']
# if check_user_has_permission(s, current_user.id, row["projectid"]):
# result.append(row)
result.append(row)
table = ItemTablePanels(result, classes=['table', 'table-striped'])
return render_template('panels.html', panels=table, project_name=project_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def view_panel():\n id = request.args.get('id')\n try:\n version = request.form[\"versions\"]\n except KeyError:\n version = None\n if id:\n status = check_panel_status(s, id)\n if not status:\n message = \"This panel has changes which cannot be viewed here as they have not been made live yet, if you have permission you can view these by editing the panel\"\n else:\n message = None\n panel_details = get_panel_details_by_id(s, id)\n if not version:\n version = panel_details.current_version\n panel_name = panel_details.name\n panel = get_regions_by_panelid(s, id, version)\n project_id = get_project_id_by_panel_id(s, id)\n result = []\n rows = list(panel)\n if len(rows) != 0:\n bed = ''\n for i in rows:\n row = dict(zip(i.keys(), i))\n result.append(row)\n # panel_name = i.panel_name\n current_version = i.current_version\n else:\n message = \"This Panel has no regions yet & may also have changes that have not been made live\"\n bed = 'disabled'\n current_version = version\n\n if check_user_has_permission(s, current_user.id, project_id):\n edit = ''\n else:\n edit = 'disabled'\n\n form = ViewPanel()\n v_list = range(1, current_version + 1)\n choices = []\n for i in v_list:\n choices.append((i, i))\n form.versions.choices = choices\n form.versions.default = version\n form.process()\n\n table = []\n\n for i in result:\n line = []\n line.append(i['chrom'])\n line.append(str(i['region_start']))\n line.append(str(i['region_end']))\n line.append(i['gene_name'])\n line.append(i['name'].replace(',', ' '))\n table.append(line)\n return render_template('panel_view.html', scope='Panel', table=json.dumps(table), panel=table,\n panel_name=panel_name, edit=edit, bed=bed,\n version=version, panel_id=id, project_id=project_id, message=message,\n url=url_for('panels.view_panel'),\n form=form)\n\n else:\n return redirect(url_for('panels.view_panels'))",
"def toggle_locked():\n panel_id = request.args.get('id')\n json = False\n if not panel_id:\n json = True\n panel_id = request.json['id']\n project_id = get_project_id_by_panel_id(s, panel_id)\n if current_user.id == get_locked_user(s, panel_id) and json:\n unlock_panel_query(s, panel_id)\n return jsonify(\"complete\")\n elif check_user_has_permission(s, current_user.id, project_id):\n unlock_panel_query(s, panel_id)\n return manage_locked(message=\"Panel Unlocked\")\n else:\n return manage_locked(message=\"Hmmmm you don't have permission to do that\")",
"def view_virtual_panels(id=None):\n if not id:\n id = request.args.get('id')\n if id:\n panels = get_virtual_panels_by_panel_id(s, id)\n else:\n panels = get_virtual_panels_simple(s)\n result = []\n panel_name = \"Virtual\"\n for i in panels:\n row = dict(zip(i.keys(), i))\n\n row[\"current_version\"] = round(row[\"current_version\"], 1)\n\n status = check_virtualpanel_status(s, row[\"id\"])\n row[\"status\"] = status\n permission = check_user_has_permission(s, current_user.id, row[\"projectid\"])\n locked = check_if_locked_by_user_vpanel(s, current_user.id, row[\"panelid\"])\n\n row['permission'] = permission\n row['locked'] = locked\n\n status = check_virtualpanel_status(s, row[\"id\"])\n row[\"status\"] = status\n\n if id:\n panel_name = row['panelname'] + ' Virtual'\n # if check_user_has_permission(s, current_user.id, row[\"projectid\"]):\n # result.append(row)\n result.append(row)\n table = ItemTableVPanels(result, classes=['table', 'table-striped'])\n return render_template('panels.html', panels=table, project_name=panel_name,\n message='Virtual Panels are locked if their parent panel is being edited')",
"def panels(self, request, panel_list, group):\n return panel_list",
"def view_vpanel():\n id = request.args.get('id')\n try:\n version = request.form[\"versions\"]\n except KeyError:\n version = None\n if id:\n status = check_virtualpanel_status(s, id)\n if not status:\n message = \"This panel has changes which cannot be viewed here as they have not been made live yet, if you have permission you can view these by editing the panel\"\n else:\n message = None\n panel_details = get_vpanel_details_by_id(s, id)\n for i in panel_details:\n if not version:\n version = panel_details.current_version\n panel_name = panel_details.name\n project_id = panel_details.project_id\n panel = get_regions_by_vpanelid(s, id, version)\n result = []\n rows = list(panel)\n if len(rows) != 0:\n bed = ''\n for i in rows:\n row = dict(zip(i.keys(), i))\n result.append(row)\n panel_name = i.panel_name\n current_version = i.current_version\n else:\n message = \"This Panel has no regions yet & may also have changes that have not been made live yet\"\n bed = 'disabled'\n current_version = version\n print(type(version))\n current_version = round(current_version, 1)\n version = round(float(version), 1)\n\n if check_user_has_permission(s, current_user.id, project_id):\n edit = ''\n else:\n edit = 'disabled'\n\n form = ViewPanel()\n v_list = get_prev_versions_vp(s, id)\n choices = []\n for i in v_list:\n choices.append((i, i))\n\n if (current_version, current_version) not in choices:\n choices.append((current_version, current_version))\n\n form.versions.choices = choices\n form.versions.default = current_version\n form.process()\n\n table = []\n\n for i in result:\n line = []\n line.append(i['chrom'])\n line.append(str(i['region_start']))\n line.append(str(i['region_end']))\n line.append(i['gene_name'])\n line.append(i['name'].replace(',', ' '))\n table.append(line)\n\n return render_template('panel_view.html', table=json.dumps(table), panel=table, panel_name=panel_name,\n edit=edit, bed=bed,\n version=version, panel_id=id, message=message, url=url_for('panels.view_vpanel'),\n scope='Virtual', form=form)\n\n else:\n return redirect(url_for('panels.view_virtual_panels'))",
"def get_panel(self, panel_id):\n return self.panels.get(panel_id, None)",
"def get_panels(config):\n\n task = TaskPanels(config)\n task.execute()\n\n task = TaskPanelsMenu(config)\n task.execute()\n\n logging.info(\"Panels creation finished!\")",
"def adpanel():\n if 'user_id' not in session or session['user_id'] != 'admin':\n return redirect(url_for('login'))\n return render_template('adminpanel.html')",
"def getPanels(self, LibraryID):\n response = self.request(\"getPanels\", LibraryID=LibraryID)\n if not response:\n return None\n return response[\"Result\"][\"Panels\"]",
"def unlock_panel():\n panelid = request.args.get('panelid')\n unlock_panel_query(s, panelid)\n\n return redirect(url_for('panels.view_panels'))",
"def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())",
"def _panelapp_panel_ids():\n json_lines = fetch_resource(PANELAPP_BASE_URL.format(\"list_panels\"), json=True)\n return [panel_info[\"Panel_Id\"] for panel_info in json_lines.get(\"result\", [])]",
"def load_panelapp_panel(adapter, panel_id=None, institute=\"cust000\", confidence=\"green\"):\n panel_ids = [panel_id]\n\n if not panel_id:\n LOG.info(\"Fetching all panel app panels\")\n panel_ids = _panelapp_panel_ids()\n\n for _ in panel_ids:\n parsed_panel = _parse_panelapp_panel(adapter, _, institute, confidence)\n\n if len(parsed_panel[\"genes\"]) == 0:\n LOG.warning(\"Panel %s is missing genes. Skipping.\", parsed_panel[\"display_name\"])\n continue\n\n try:\n adapter.load_panel(parsed_panel=parsed_panel, replace=True)\n except Exception as err:\n raise err",
"def get_projects_user_can_view(user):\n if hasattr(user, 'worker'):\n # Workers need to be able to view all data\n projects = Project.objects.all()\n else:\n projects = get_objects_for_user(\n user,\n 'view_project_data',\n klass=Project)\n sites = get_objects_for_user(user, 'view_site_data', klass=Site)\n site_projects = Project.objects\\\n .filter(id__in=[i.project_id for i in sites])\\\n .exclude(id__in=[p.id for p in projects])\n\n return projects | site_projects",
"def edit_panel_process():\n if request.method == \"POST\":\n make_live = request.form['make_live']\n panel_id = request.args.get('id')\n project_id = get_project_id_by_panel_id(s, panel_id)\n preftx_id = get_preftx_id_by_project_id(s, project_id)\n tx_version = get_current_preftx_version(s, preftx_id)\n panel_version = get_current_version(s, panel_id)\n if not tx_version:\n tx_version = 0\n if make_live == \"on\":\n print('make_live')\n make_preftx_live(s, preftx_id, tx_version + 1, current_user.id)\n make_panel_live(s, panel_id, panel_version + 1, current_user.id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_panel') + \"?id=\" + panel_id)\n elif request.method == \"GET\":\n panel_id = request.args.get('id')\n form = EditPanelProcess()\n panel_info = get_panel_info(s, panel_id)\n project_id = panel_info.project_id\n form.project.choices = [(project_id, panel_info.project_name), ]\n form.panelname.data = panel_info.name\n\n lock_panel(s, current_user.id, panel_id)\n\n genes = get_genes_by_panelid_edit(s, panel_id, panel_info.current_version)\n html = \"\"\n buttonlist = \"\"\n print('hello')\n for gene in genes:\n gene_id = gene.id\n gene_name = gene.name\n preftx_id = get_preftx_by_gene_id\n upcoming_preftx = get_upcoming_preftx_by_gene_id(s, project_id, gene_id)\n all_tx = get_tx_by_gene_id(s, gene_id)\n\n buttonlist += render_template(\"gene_button.html\", gene_name=gene_name, gene_id=gene_id, added=True)\n tx_html = render_template(\"tx_list.html\", gene_name=gene_name, all_tx=all_tx, preftx=preftx_id,\n upcoming=upcoming_preftx, disabled=True)\n html += tx_html\n\n return render_template('panel_createprocess.html', form=form, genes=html, genelist=buttonlist,\n panel_id=panel_id,\n url=url_for('panels.edit_panel_process') + \"?id=\" + panel_id)",
"def get_active_panel(cls):\n active_panel = None\n panel_list = pm.getPanel(type='modelPanel')\n for panel in panel_list:\n if pm.modelEditor(panel, q=1, av=1):\n active_panel = panel\n break\n\n return active_panel",
"def make_virtualpanel_live():\n vpanelid = request.args.get('id')\n panelid = get_panel_by_vp_id(s, vpanelid)\n locked = check_if_locked(s, panelid)\n if locked:\n if current_user.id == get_locked_user(s, panelid):\n make_vp_panel_live(s, vpanelid)\n add_to_starlims(vpanelid)\n return redirect(url_for('panels.view_virtual_panels'))\n else:\n make_vp_panel_live(s, vpanelid)\n add_to_starlims(vpanelid)\n return redirect(url_for('panels.view_virtual_panels'))",
"def can_view_cohorts(user):\n return user.is_authenticated and user.has_perm('release.view_releasecohort')",
"def create_panel_process():\n form = CreatePanelProcess()\n if request.method == \"POST\":\n make_live = request.form['make_live']\n panel_id = request.args.get('id')\n project_id = get_project_id_by_panel_id(s, panel_id)\n preftx_id = get_preftx_id_by_project_id(s, project_id)\n version = get_current_preftx_version(s, preftx_id)\n if not version:\n version = 0\n if make_live == \"on\":\n make_preftx_live(s, preftx_id, version + 1, current_user.id)\n make_panel_live(s, panel_id, 1, current_user.id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_panel') + \"?id=\" + panel_id)\n elif request.method == \"GET\":\n form.project.choices = get_project_choices(s, current_user.id)\n return render_template('panel_createprocess.html', form=form, panel_id=\"main\",\n url=url_for('panels.create_panel_process'))",
"def getPanel(self, LibraryID, PanelID, EmbeddedData=None, LastRecipientID=None, NumberOfRecords=None,\n ExportLanguage=None, Unsubscribed=None, Subscribed=None, **kwargs):\n return self.request(\n \"getPanel\",\n LibraryID=LibraryID,\n PanelID=PanelID,\n EmbeddedData=EmbeddedData,\n LastRecipientID=LastRecipientID,\n NumberOfRecords=NumberOfRecords,\n ExportLanguage=ExportLanguage,\n Unsubscribed=Unsubscribed,\n Subscribed=Subscribed,\n **kwargs\n )",
"def editPanels(self, panels, **properties):\n\n if type(panels) is not list:\n panels = [panels]\n\n panels.reverse()\n\n panelsXML = []\n for panel in panels:\n panelXML = ET.fromstring(panel)\n\n multiTrackXML = panelXML.find(\"MultiTrackElements\")\n if multiTrackXML is not None:\n self.__saveSetupData(multiTrackDataXMLS=ET.tostring(multiTrackXML), properties=panelXML.attrib)\n\n panelsXML.append({\"properties\":panelXML.attrib, \"multiTrackXML\":multiTrackXML})\n\n if panelsXML:\n if (OSUtils.type == OSUtils.LINUX):\n paths = []\n for panel in panelsXML:\n properties = panel['properties'].copy()\n properties['frame'] = '#'\n mode = Mode(properties.get('show', None), properties.get('sequence', None))\n path = mode.get('[recipeCompedFile]', properties)\n paths.append(path)\n if not self.fileServiceLocal.exists(path):\n raise utils.FlixException(msg=\"Missing File: %s\"%path)\n command = Mode().get(\"[editImageCommand]\")\n log('Edit command %s' % command)\n os.system(command + \" \" + ' '.join(paths))\n else:\n Photoshop().createPhotoshopFileForPanels(panelsXML)\n\n return \"Done\"",
"def can_modify_project(user, project_id):\n if user.id:\n project = models.Project.objects.get(pk=project_id)\n\n # check to see if a superuser or projects_admin -- both are allow to modify projects\n if \"projects_admin\" in [g.name for g in user.groups.all()]:\n return True\n\n # check to see if they are a section head, div. manager or RDS\n if is_section_head(user, project) or is_division_manager(user, project) or is_rds(user, project):\n return True\n\n # if the project is unsubmitted, the project lead is also able to edit the project... obviously\n # check to see if they are a project lead\n if not project.submitted and is_project_lead(user, project.id):\n return True",
"def list(self, request, *args, **kwargs):\n project = Project.objects.get(id=kwargs[\"projects_pk\"])\n self.check_object_permissions(request, project)\n return super().list(request, args, kwargs)",
"def user_project_view(cls, user, project):\r\n pass",
"def make_live():\n panelid = request.args.get('id')\n locked = check_if_locked(s, panelid)\n if locked:\n unlock_panel_query(s, panelid)\n current_version = get_current_version(s, panelid)\n if not current_version:\n current_version = 0\n new_version = current_version + 1\n make_panel_live(s, panelid, new_version, current_user.id)\n return redirect(url_for('panels.view_panels'))",
"def get_editable_explorations(user_id):\n return [e for e in get_viewable_explorations(user_id)\n if e.is_editable_by(user_id)]",
"def open_projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user, open=True)",
"def retrieve_web_panel(panel_id: int, confidences: str = '01234'):\n import pandas as pd\n confidences = ''.join(sorted(confidences))\n reply = requests.get(f'https://panelapp.genomicsengland.co.uk/panels/{panel_id}/download/{confidences}/')\n table_handle = io.StringIO(reply.text)\n return pd.read_csv(table_handle, sep='\\t')",
"def project_view(request, project_id):\n\n # Retrieve the project to to be displayed. Raise an error if this project does not exist\n project = get_object_or_404(Projet, id=project_id)\n\n if request.method == 'GET':\n\n filters = Q()\n list_of_key = []\n query_string = request.META['QUERY_STRING']\n query_tab = query_string.split('&')\n filter_id_tab = []\n filter_dic = {}\n\n print(query_tab)\n\n if (query_tab != ['']):\n for query in query_tab:\n query_arg = query.split('=')\n id = query_arg[0]\n\n if not (id in filter_id_tab):\n filter_id_tab.append(id)\n try:\n filter_dic[id].append(query_arg[1])\n except KeyError:\n filter_dic[id] = [query_arg[1]]\n\n for key in request.GET:\n list_of_key.append(key)\n\n print(list_of_key)\n filters = creat_filters_rec(project, filter_dic, filter_id_tab)\n else:\n filters = Q()\n\n #\n # for key in filter_id_tab:\n #\n #\n # entry = filter_dic[key]\n #\n # if (len(entry) != 3):\n # continue\n #\n # filters = add_filter(filters, entry)\n\n tasks = project.task_set.filter(filters).order_by('-priority')\n else:\n # Retrieve all the task of the project and order them\n tasks = project.task_set.all().order_by('-priority')\n\n # Check if the logged in user is allowed to see this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n status = Status.objects.all()\n users = project.members.all()\n return render(request, 'project.html', locals())\n else:\n return redirect(\"projects\")",
"def projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user)"
]
| [
"0.743536",
"0.65683925",
"0.64532185",
"0.6285346",
"0.6196534",
"0.61308366",
"0.6059618",
"0.5872456",
"0.5869206",
"0.57798433",
"0.5656728",
"0.55517346",
"0.5526247",
"0.546346",
"0.54273313",
"0.5419527",
"0.5378603",
"0.53371745",
"0.5283594",
"0.52555084",
"0.523886",
"0.5222209",
"0.5215169",
"0.5214818",
"0.52002025",
"0.5188209",
"0.51692873",
"0.51428646",
"0.5135348",
"0.51297987"
]
| 0.7989837 | 0 |
Method to view regions in a panel. If no panel ID is given, the method executes view_panels() The method checks the user has permission to edit the panel to determine whether this feature is available to them. It also checks if the panel is locked as this will also restrict access to the edit option. | def view_panel():
id = request.args.get('id')
try:
version = request.form["versions"]
except KeyError:
version = None
if id:
status = check_panel_status(s, id)
if not status:
message = "This panel has changes which cannot be viewed here as they have not been made live yet, if you have permission you can view these by editing the panel"
else:
message = None
panel_details = get_panel_details_by_id(s, id)
if not version:
version = panel_details.current_version
panel_name = panel_details.name
panel = get_regions_by_panelid(s, id, version)
project_id = get_project_id_by_panel_id(s, id)
result = []
rows = list(panel)
if len(rows) != 0:
bed = ''
for i in rows:
row = dict(zip(i.keys(), i))
result.append(row)
# panel_name = i.panel_name
current_version = i.current_version
else:
message = "This Panel has no regions yet & may also have changes that have not been made live"
bed = 'disabled'
current_version = version
if check_user_has_permission(s, current_user.id, project_id):
edit = ''
else:
edit = 'disabled'
form = ViewPanel()
v_list = range(1, current_version + 1)
choices = []
for i in v_list:
choices.append((i, i))
form.versions.choices = choices
form.versions.default = version
form.process()
table = []
for i in result:
line = []
line.append(i['chrom'])
line.append(str(i['region_start']))
line.append(str(i['region_end']))
line.append(i['gene_name'])
line.append(i['name'].replace(',', ' '))
table.append(line)
return render_template('panel_view.html', scope='Panel', table=json.dumps(table), panel=table,
panel_name=panel_name, edit=edit, bed=bed,
version=version, panel_id=id, project_id=project_id, message=message,
url=url_for('panels.view_panel'),
form=form)
else:
return redirect(url_for('panels.view_panels')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def view_vpanel():\n id = request.args.get('id')\n try:\n version = request.form[\"versions\"]\n except KeyError:\n version = None\n if id:\n status = check_virtualpanel_status(s, id)\n if not status:\n message = \"This panel has changes which cannot be viewed here as they have not been made live yet, if you have permission you can view these by editing the panel\"\n else:\n message = None\n panel_details = get_vpanel_details_by_id(s, id)\n for i in panel_details:\n if not version:\n version = panel_details.current_version\n panel_name = panel_details.name\n project_id = panel_details.project_id\n panel = get_regions_by_vpanelid(s, id, version)\n result = []\n rows = list(panel)\n if len(rows) != 0:\n bed = ''\n for i in rows:\n row = dict(zip(i.keys(), i))\n result.append(row)\n panel_name = i.panel_name\n current_version = i.current_version\n else:\n message = \"This Panel has no regions yet & may also have changes that have not been made live yet\"\n bed = 'disabled'\n current_version = version\n print(type(version))\n current_version = round(current_version, 1)\n version = round(float(version), 1)\n\n if check_user_has_permission(s, current_user.id, project_id):\n edit = ''\n else:\n edit = 'disabled'\n\n form = ViewPanel()\n v_list = get_prev_versions_vp(s, id)\n choices = []\n for i in v_list:\n choices.append((i, i))\n\n if (current_version, current_version) not in choices:\n choices.append((current_version, current_version))\n\n form.versions.choices = choices\n form.versions.default = current_version\n form.process()\n\n table = []\n\n for i in result:\n line = []\n line.append(i['chrom'])\n line.append(str(i['region_start']))\n line.append(str(i['region_end']))\n line.append(i['gene_name'])\n line.append(i['name'].replace(',', ' '))\n table.append(line)\n\n return render_template('panel_view.html', table=json.dumps(table), panel=table, panel_name=panel_name,\n edit=edit, bed=bed,\n version=version, panel_id=id, message=message, url=url_for('panels.view_vpanel'),\n scope='Virtual', form=form)\n\n else:\n return redirect(url_for('panels.view_virtual_panels'))",
"def view_panels(id=None):\n if not id:\n id = request.args.get('id')\n\n if id:\n panels = get_panels_by_project_id(s, id)\n else:\n panels = get_panels(s)\n result = []\n project_name = \"All\"\n for i in panels:\n row = dict(zip(i.keys(), i))\n status = check_panel_status(s, row[\"panelid\"])\n row[\"status\"] = status\n permission = check_user_has_permission(s, current_user.id, row[\"projectid\"])\n locked = check_if_locked(s, row[\"panelid\"])\n row['permission'] = permission\n row['locked'] = locked\n\n if id:\n project_name = row['projectname']\n # if check_user_has_permission(s, current_user.id, row[\"projectid\"]):\n # result.append(row)\n result.append(row)\n table = ItemTablePanels(result, classes=['table', 'table-striped'])\n return render_template('panels.html', panels=table, project_name=project_name)",
"def add_panel_regions():\n version_ids = request.json['id_ext']\n panel_id = request.json['panel_id']\n project_id = request.json['project_id']\n gene_name = request.json['gene_name']\n\n try:\n tx_id = request.json['pref_tx_id']\n add_preftxs_to_panel(s, project_id, [{\"gene\": gene_name, \"tx_id\": tx_id}, ])\n except KeyError:\n pass\n\n for i in version_ids:\n if i[\"ext_5\"] == 0:\n ext_5 = None\n else:\n ext_5 = i[\"ext_5\"]\n\n if i[\"ext_3\"] == 0:\n ext_3 = None\n else:\n ext_3 = i[\"ext_3\"]\n add_region_to_panel(s, i[\"id\"], panel_id, ext_3=ext_3, ext_5=ext_5)\n s.commit()\n return jsonify(\"complete\")",
"def view_virtual_panels(id=None):\n if not id:\n id = request.args.get('id')\n if id:\n panels = get_virtual_panels_by_panel_id(s, id)\n else:\n panels = get_virtual_panels_simple(s)\n result = []\n panel_name = \"Virtual\"\n for i in panels:\n row = dict(zip(i.keys(), i))\n\n row[\"current_version\"] = round(row[\"current_version\"], 1)\n\n status = check_virtualpanel_status(s, row[\"id\"])\n row[\"status\"] = status\n permission = check_user_has_permission(s, current_user.id, row[\"projectid\"])\n locked = check_if_locked_by_user_vpanel(s, current_user.id, row[\"panelid\"])\n\n row['permission'] = permission\n row['locked'] = locked\n\n status = check_virtualpanel_status(s, row[\"id\"])\n row[\"status\"] = status\n\n if id:\n panel_name = row['panelname'] + ' Virtual'\n # if check_user_has_permission(s, current_user.id, row[\"projectid\"]):\n # result.append(row)\n result.append(row)\n table = ItemTableVPanels(result, classes=['table', 'table-striped'])\n return render_template('panels.html', panels=table, project_name=panel_name,\n message='Virtual Panels are locked if their parent panel is being edited')",
"def show(self):\n self.window.run_command(\"show_panel\", {\"panel\": self.full_name})",
"def can_view_repo(session, repo):\n return repo.id in viewable_repos(session)",
"def load_panelapp_panel(adapter, panel_id=None, institute=\"cust000\", confidence=\"green\"):\n panel_ids = [panel_id]\n\n if not panel_id:\n LOG.info(\"Fetching all panel app panels\")\n panel_ids = _panelapp_panel_ids()\n\n for _ in panel_ids:\n parsed_panel = _parse_panelapp_panel(adapter, _, institute, confidence)\n\n if len(parsed_panel[\"genes\"]) == 0:\n LOG.warning(\"Panel %s is missing genes. Skipping.\", parsed_panel[\"display_name\"])\n continue\n\n try:\n adapter.load_panel(parsed_panel=parsed_panel, replace=True)\n except Exception as err:\n raise err",
"def can_view_cohorts(user):\n return user.is_authenticated and user.has_perm('release.view_releasecohort')",
"def panel_genes(context, panel):\n LOG.info(\"Running scout export panel\")\n adapter = context.obj['adapter']\n \n if not panel:\n LOG.warning(\"Please provide at least one gene panel\")\n context.abort()\n\n LOG.info(\"Exporting panels: {}\".format(', '.join(panel)))\n for line in export_panels(adapter, panel):\n click.echo(line)",
"def adpanel():\n if 'user_id' not in session or session['user_id'] != 'admin':\n return redirect(url_for('login'))\n return render_template('adminpanel.html')",
"def create_panel_custom_regions():\n panel_id = request.json[\"panel_id\"]\n chrom = request.json[\"chrom\"]\n start = request.json[\"start\"]\n end = request.json[\"end\"]\n name = request.json[\"name\"]\n regions = select_region_by_location(s, chrom, start, end) # if region already exists, return current entry\n if regions:\n for i in regions:\n add_region_to_panel(s, i.id, panel_id)\n s.commit()\n continue\n else:\n create_custom_region(s, panel_id, chrom, start, end, name)\n\n return jsonify(\"complete\")",
"def remove_panel_regions():\n if type(request.json['ids']) is list:\n version_ids = request.json['ids']\n else:\n version_ids = request.json['ids'].replace('[', '').replace(']', '').split(',')\n # TODO does this happen?\n if type(version_ids) is str:\n version_ids = version_ids.split(',')\n panel_id = request.json['panel_id']\n\n for i in version_ids:\n remove_version_from_panel(s, int(panel_id), int(i))\n\n panel = get_panel_by_id(s, panel_id) # check if there are still regions in the panel\n length = len(list(panel))\n return jsonify(length)",
"def get_panel(self, panel_id):\n return self.panels.get(panel_id, None)",
"def edit_virtual_panel_process():\n form = EditVirtualPanelProcess()\n\n vp_id = request.args.get('id')\n panel_id = get_panel_by_vp_id(s, vp_id)\n if request.method == \"POST\":\n if request.form['make_live'] == \"on\":\n make_vp_panel_live(s, vp_id)\n add_to_starlims(vp_id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_vpanel') + \"?id=\" + vp_id)\n elif request.method == \"GET\":\n lock_panel(s, current_user.id, panel_id)\n panel_info = get_panel_details_by_id(s, panel_id)\n panel_name = panel_info.name\n form.panel.choices = [(panel_id, panel_name), ]\n\n panel_version = get_current_version(s, panel_id)\n panel_genes = get_genes_by_panelid(s, panel_id, panel_version)\n vp_info = get_vpanel_details_by_id(s, vp_id)\n vp_version = vp_info.current_version\n vp_name = vp_info.name\n form.vpanelname.data = vp_name\n vp_genes = get_genes_by_vpanelid_edit(s, vp_id, vp_version)\n genelist = \"\"\n vp_list = []\n for i in vp_genes:\n vp_list.append(i.id)\n\n genes = []\n print('new method')\n for i in panel_genes:\n if i.id in vp_list:\n genes.append({\"name\": i.name, \"id\": i.id, \"vp_list\": True})\n button = render_template(\"gene_button.html\", gene_name=i.name, gene_id=i.id, added=True)\n genelist += button\n\n else:\n genes.append({\"name\": i.name, \"id\": i.id, \"vp_list\": False})\n\n gene_html = render_template(\"panel_genes.html\", panel_genes=genes)\n\n url = url_for('panels.edit_virtual_panel_process') + '?id=' + str(vp_id)\n return render_template('virtualpanels_createprocess.html', form=form, genes=gene_html, genelist=genelist,\n vp_id=vp_id, panel_name=vp_name, current_version=vp_version, url=url)",
"def panel(context, panel, version):\n LOG.info(\"Running scout export panel\")\n adapter = context.obj['adapter']\n \n if not panel:\n LOG.warning(\"Please provide at least one gene panel\")\n context.abort()\n\n LOG.info(\"Exporting panels: {}\".format(', '.join(panel)))\n for line in export_gene_panels(adapter, panel, version):\n click.echo(line)",
"def run_view(self, expanded, unexpanded) :\n\t\treturn self.manage_view_properties(expanded, unexpanded, \"\", perms = \"View\")",
"def enableEditorDrop():\n\n perspPanel = cmds.getPanel( withLabel='Persp View')\n panelControl = cmds.panel( perspPanel, query=True, control=True)\n cmds.control(panelControl, edit=True, dropCallback=panelDropLoad)\n global viewportHeight\n viewportHeight = cmds.control(panelControl, query=True, h=True)",
"def panels(self, request, panel_list, group):\n return panel_list",
"def toggle_locked():\n panel_id = request.args.get('id')\n json = False\n if not panel_id:\n json = True\n panel_id = request.json['id']\n project_id = get_project_id_by_panel_id(s, panel_id)\n if current_user.id == get_locked_user(s, panel_id) and json:\n unlock_panel_query(s, panel_id)\n return jsonify(\"complete\")\n elif check_user_has_permission(s, current_user.id, project_id):\n unlock_panel_query(s, panel_id)\n return manage_locked(message=\"Panel Unlocked\")\n else:\n return manage_locked(message=\"Hmmmm you don't have permission to do that\")",
"def render_regions(view=None):\r\n # Get current active view\r\n if view is None:\r\n view = sublime.active_window().active_view()\r\n # Unable to set regions when no view available\r\n if view is None:\r\n return\r\n\r\n # Do no set regions if view is empty or still loading\r\n if view.size() == 0 or view.is_loading():\r\n return\r\n\r\n # Remove all markers to avoid marker conflict\r\n view.erase_regions(S.REGION_KEY_BREAKPOINT)\r\n view.erase_regions(S.REGION_KEY_CURRENT)\r\n view.erase_regions(S.REGION_KEY_DISABLED)\r\n\r\n # Get filename of current view and check if is a valid filename\r\n filename = view.file_name()\r\n if not filename:\r\n return\r\n\r\n # Determine icon for regions\r\n icon_current = get_region_icon(S.KEY_CURRENT_LINE)\r\n icon_disabled = get_region_icon(S.KEY_BREAKPOINT_DISABLED)\r\n icon_enabled = get_region_icon(S.KEY_BREAKPOINT_ENABLED)\r\n\r\n # Get all (disabled) breakpoint rows (line numbers) for file\r\n breakpoint_rows = []\r\n disabled_rows = []\r\n if filename in S.BREAKPOINT and isinstance(S.BREAKPOINT[filename], dict):\r\n for lineno, bp in S.BREAKPOINT[filename].items():\r\n # Do not show temporary breakpoint\r\n if S.BREAKPOINT_RUN is not None and S.BREAKPOINT_RUN['filename'] == filename and S.BREAKPOINT_RUN['lineno'] == lineno:\r\n continue\r\n # Determine if breakpoint is enabled or disabled\r\n if bp['enabled']:\r\n breakpoint_rows.append(lineno)\r\n else:\r\n disabled_rows.append(lineno)\r\n\r\n # Get current line from breakpoint hit\r\n if S.BREAKPOINT_ROW is not None:\r\n # Make sure current breakpoint is in this file\r\n if filename == S.BREAKPOINT_ROW['filename']:\r\n # Remove current line number from breakpoint rows to avoid marker conflict\r\n if S.BREAKPOINT_ROW['lineno'] in breakpoint_rows:\r\n breakpoint_rows.remove(S.BREAKPOINT_ROW['lineno'])\r\n # Set icon for current breakpoint\r\n icon_breakpoint_current = get_region_icon(S.KEY_BREAKPOINT_CURRENT)\r\n if icon_breakpoint_current:\r\n icon_current = icon_breakpoint_current\r\n if S.BREAKPOINT_ROW['lineno'] in disabled_rows:\r\n disabled_rows.remove(S.BREAKPOINT_ROW['lineno'])\r\n # Set current line marker\r\n if icon_current:\r\n view.add_regions(S.REGION_KEY_CURRENT, rows_to_region(S.BREAKPOINT_ROW['lineno']), S.REGION_SCOPE_CURRENT, icon_current, sublime.HIDDEN)\r\n\r\n # Set breakpoint marker(s)\r\n if breakpoint_rows and icon_enabled:\r\n view.add_regions(S.REGION_KEY_BREAKPOINT, rows_to_region(breakpoint_rows), S.REGION_SCOPE_BREAKPOINT, icon_enabled, sublime.HIDDEN)\r\n if disabled_rows and icon_disabled:\r\n view.add_regions(S.REGION_KEY_DISABLED, rows_to_region(disabled_rows), S.REGION_SCOPE_BREAKPOINT, icon_disabled, sublime.HIDDEN)",
"def _getBrailleRegionsForPanel(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForPanel\", obj)\n\n regions = []\n\n text = \"\"\n text = self._script.appendString(\n text, self._script.getDisplayedLabel(obj))\n\n # If there was no label for the panel, but it has a name, we'll\n # use the name.\n #\n if len(text) == 0:\n text = self._script.appendString(\n text, self._script.getDisplayedText(obj))\n\n text = self._script.appendString(text, self._getTextForRole(obj))\n\n regions = []\n componentRegion = braille.Component(obj, text)\n regions.append(componentRegion)\n\n return [regions, componentRegion]",
"def get_panels(config):\n\n task = TaskPanels(config)\n task.execute()\n\n task = TaskPanelsMenu(config)\n task.execute()\n\n logging.info(\"Panels creation finished!\")",
"def run(self):\n\n self.window.run_command(\"show_panel\", {\"panel\": \"output.reg_replace\"})",
"def editPanels(self, panels, **properties):\n\n if type(panels) is not list:\n panels = [panels]\n\n panels.reverse()\n\n panelsXML = []\n for panel in panels:\n panelXML = ET.fromstring(panel)\n\n multiTrackXML = panelXML.find(\"MultiTrackElements\")\n if multiTrackXML is not None:\n self.__saveSetupData(multiTrackDataXMLS=ET.tostring(multiTrackXML), properties=panelXML.attrib)\n\n panelsXML.append({\"properties\":panelXML.attrib, \"multiTrackXML\":multiTrackXML})\n\n if panelsXML:\n if (OSUtils.type == OSUtils.LINUX):\n paths = []\n for panel in panelsXML:\n properties = panel['properties'].copy()\n properties['frame'] = '#'\n mode = Mode(properties.get('show', None), properties.get('sequence', None))\n path = mode.get('[recipeCompedFile]', properties)\n paths.append(path)\n if not self.fileServiceLocal.exists(path):\n raise utils.FlixException(msg=\"Missing File: %s\"%path)\n command = Mode().get(\"[editImageCommand]\")\n log('Edit command %s' % command)\n os.system(command + \" \" + ' '.join(paths))\n else:\n Photoshop().createPhotoshopFileForPanels(panelsXML)\n\n return \"Done\"",
"def _handler_control_view(self,event):\n self._mgr.LoadPerspective(\n\t\t\tself._perspectives['control_view'])",
"def checkRegionControl(self, iPlayer, regionID, bVassal = False):\n\t\t\n\t\tbFound = False\n\t\tplotList = self.getRegionPlotList([regionID])\n\t\tfor tPlot in plotList:\n\t\t\t\tpCurrent = gc.getMap().plot(tPlot[0], tPlot[1])\n\t\t\t\tif pCurrent.isCity():\n\t\t\t\t\tiOwner = pCurrent.getPlotCity().getOwner()\n\t\t\t\t\tif iOwner != iPlayer:\n\t\t\t\t\t\tif bVassal:\n\t\t\t\t\t\t\tif gc.getTeam(gc.getPlayer(iOwner).getTeam()).isVassal(iPlayer):\n\t\t\t\t\t\t\t\tbFound = True\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\treturn False\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treturn False\n\t\t\t\t\telse:\n\t\t\t\t\t\tbFound = True\n\t\tif bFound:\n\t\t\treturn True\n\t\telse:\n\t\t\tfor tPlot in plotList:\n\t\t\t\tpCurrent = gc.getMap().plot(tPlot[0], tPlot[1])\n\t\t\t\tiOwner = pCurrent.getOwner()\n\t\t\t\tif iOwner != iPlayer:\n\t\t\t\t\tbFound = False\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tbFound = True\n\t\t\tif bFound:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False",
"def get_active_panel(cls):\n active_panel = None\n panel_list = pm.getPanel(type='modelPanel')\n for panel in panel_list:\n if pm.modelEditor(panel, q=1, av=1):\n active_panel = panel\n break\n\n return active_panel",
"def unlock_panel():\n panelid = request.args.get('panelid')\n unlock_panel_query(s, panelid)\n\n return redirect(url_for('panels.view_panels'))",
"def edit_panel_process():\n if request.method == \"POST\":\n make_live = request.form['make_live']\n panel_id = request.args.get('id')\n project_id = get_project_id_by_panel_id(s, panel_id)\n preftx_id = get_preftx_id_by_project_id(s, project_id)\n tx_version = get_current_preftx_version(s, preftx_id)\n panel_version = get_current_version(s, panel_id)\n if not tx_version:\n tx_version = 0\n if make_live == \"on\":\n print('make_live')\n make_preftx_live(s, preftx_id, tx_version + 1, current_user.id)\n make_panel_live(s, panel_id, panel_version + 1, current_user.id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_panel') + \"?id=\" + panel_id)\n elif request.method == \"GET\":\n panel_id = request.args.get('id')\n form = EditPanelProcess()\n panel_info = get_panel_info(s, panel_id)\n project_id = panel_info.project_id\n form.project.choices = [(project_id, panel_info.project_name), ]\n form.panelname.data = panel_info.name\n\n lock_panel(s, current_user.id, panel_id)\n\n genes = get_genes_by_panelid_edit(s, panel_id, panel_info.current_version)\n html = \"\"\n buttonlist = \"\"\n print('hello')\n for gene in genes:\n gene_id = gene.id\n gene_name = gene.name\n preftx_id = get_preftx_by_gene_id\n upcoming_preftx = get_upcoming_preftx_by_gene_id(s, project_id, gene_id)\n all_tx = get_tx_by_gene_id(s, gene_id)\n\n buttonlist += render_template(\"gene_button.html\", gene_name=gene_name, gene_id=gene_id, added=True)\n tx_html = render_template(\"tx_list.html\", gene_name=gene_name, all_tx=all_tx, preftx=preftx_id,\n upcoming=upcoming_preftx, disabled=True)\n html += tx_html\n\n return render_template('panel_createprocess.html', form=form, genes=html, genelist=buttonlist,\n panel_id=panel_id,\n url=url_for('panels.edit_panel_process') + \"?id=\" + panel_id)",
"def test_portlet_available_to_correct_roles(self):\n setRoles(self.portal, TEST_USER_ID, ['Manager'])\n context = self.portal\n request = self.layer['request']\n # Simulate being on the default view.\n request.set('ACTUAL_URL', aq_inner(context).absolute_url() + '/view')\n view = self.portal.restrictedTraverse('@@plone')\n manager = getUtility(IPortletManager, name='plone.leftcolumn', context=self.portal)\n assignment = analyticsportlet.Assignment()\n renderer = getMultiAdapter((context, request, view, manager, assignment), IPortletRenderer)\n\n self.assertEquals(renderer.available, True)"
]
| [
"0.63788956",
"0.5876005",
"0.56357265",
"0.5570096",
"0.5334055",
"0.51983225",
"0.51884156",
"0.5155888",
"0.5124921",
"0.51096785",
"0.50766367",
"0.5070279",
"0.50308144",
"0.50113946",
"0.4993579",
"0.49817935",
"0.4955134",
"0.49180374",
"0.48635954",
"0.48109576",
"0.4808198",
"0.4797087",
"0.47293457",
"0.47047177",
"0.46668684",
"0.4640415",
"0.46336776",
"0.4622239",
"0.46155033",
"0.4608429"
]
| 0.73863775 | 0 |
Create panel wizard method. If request is "GET" method renders template for wizard If the request is "POST" method makes the panel live (if selected) and redirects to the view panel page | def create_panel_process():
form = CreatePanelProcess()
if request.method == "POST":
make_live = request.form['make_live']
panel_id = request.args.get('id')
project_id = get_project_id_by_panel_id(s, panel_id)
preftx_id = get_preftx_id_by_project_id(s, project_id)
version = get_current_preftx_version(s, preftx_id)
if not version:
version = 0
if make_live == "on":
make_preftx_live(s, preftx_id, version + 1, current_user.id)
make_panel_live(s, panel_id, 1, current_user.id)
unlock_panel_query(s, panel_id)
return redirect(url_for('panels.view_panel') + "?id=" + panel_id)
elif request.method == "GET":
form.project.choices = get_project_choices(s, current_user.id)
return render_template('panel_createprocess.html', form=form, panel_id="main",
url=url_for('panels.create_panel_process')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_virtual_panel_process():\n form = CreateVirtualPanelProcess()\n\n if request.method == \"POST\":\n make_live = request.form['make_live']\n vp_id = request.args.get('id')\n if make_live == \"on\":\n make_vp_panel_live(s, vp_id)\n add_to_starlims(vp_id)\n panel_id = get_panel_by_vp_id(s, vp_id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_vpanel') + \"?id=\" + vp_id)\n elif request.method == \"GET\":\n form.panel.choices = get_panel_choices(s, current_user.id)\n url = url_for('panels.create_virtual_panel_process')\n return render_template('virtualpanels_createprocess.html', form=form, url=url, vp_id=\"main\")",
"def edit_panel_process():\n if request.method == \"POST\":\n make_live = request.form['make_live']\n panel_id = request.args.get('id')\n project_id = get_project_id_by_panel_id(s, panel_id)\n preftx_id = get_preftx_id_by_project_id(s, project_id)\n tx_version = get_current_preftx_version(s, preftx_id)\n panel_version = get_current_version(s, panel_id)\n if not tx_version:\n tx_version = 0\n if make_live == \"on\":\n print('make_live')\n make_preftx_live(s, preftx_id, tx_version + 1, current_user.id)\n make_panel_live(s, panel_id, panel_version + 1, current_user.id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_panel') + \"?id=\" + panel_id)\n elif request.method == \"GET\":\n panel_id = request.args.get('id')\n form = EditPanelProcess()\n panel_info = get_panel_info(s, panel_id)\n project_id = panel_info.project_id\n form.project.choices = [(project_id, panel_info.project_name), ]\n form.panelname.data = panel_info.name\n\n lock_panel(s, current_user.id, panel_id)\n\n genes = get_genes_by_panelid_edit(s, panel_id, panel_info.current_version)\n html = \"\"\n buttonlist = \"\"\n print('hello')\n for gene in genes:\n gene_id = gene.id\n gene_name = gene.name\n preftx_id = get_preftx_by_gene_id\n upcoming_preftx = get_upcoming_preftx_by_gene_id(s, project_id, gene_id)\n all_tx = get_tx_by_gene_id(s, gene_id)\n\n buttonlist += render_template(\"gene_button.html\", gene_name=gene_name, gene_id=gene_id, added=True)\n tx_html = render_template(\"tx_list.html\", gene_name=gene_name, all_tx=all_tx, preftx=preftx_id,\n upcoming=upcoming_preftx, disabled=True)\n html += tx_html\n\n return render_template('panel_createprocess.html', form=form, genes=html, genelist=buttonlist,\n panel_id=panel_id,\n url=url_for('panels.edit_panel_process') + \"?id=\" + panel_id)",
"def edit_virtual_panel_process():\n form = EditVirtualPanelProcess()\n\n vp_id = request.args.get('id')\n panel_id = get_panel_by_vp_id(s, vp_id)\n if request.method == \"POST\":\n if request.form['make_live'] == \"on\":\n make_vp_panel_live(s, vp_id)\n add_to_starlims(vp_id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_vpanel') + \"?id=\" + vp_id)\n elif request.method == \"GET\":\n lock_panel(s, current_user.id, panel_id)\n panel_info = get_panel_details_by_id(s, panel_id)\n panel_name = panel_info.name\n form.panel.choices = [(panel_id, panel_name), ]\n\n panel_version = get_current_version(s, panel_id)\n panel_genes = get_genes_by_panelid(s, panel_id, panel_version)\n vp_info = get_vpanel_details_by_id(s, vp_id)\n vp_version = vp_info.current_version\n vp_name = vp_info.name\n form.vpanelname.data = vp_name\n vp_genes = get_genes_by_vpanelid_edit(s, vp_id, vp_version)\n genelist = \"\"\n vp_list = []\n for i in vp_genes:\n vp_list.append(i.id)\n\n genes = []\n print('new method')\n for i in panel_genes:\n if i.id in vp_list:\n genes.append({\"name\": i.name, \"id\": i.id, \"vp_list\": True})\n button = render_template(\"gene_button.html\", gene_name=i.name, gene_id=i.id, added=True)\n genelist += button\n\n else:\n genes.append({\"name\": i.name, \"id\": i.id, \"vp_list\": False})\n\n gene_html = render_template(\"panel_genes.html\", panel_genes=genes)\n\n url = url_for('panels.edit_virtual_panel_process') + '?id=' + str(vp_id)\n return render_template('virtualpanels_createprocess.html', form=form, genes=gene_html, genelist=genelist,\n vp_id=vp_id, panel_name=vp_name, current_version=vp_version, url=url)",
"def get_template(self, request, step, form):\n return 'forms/wizard.html'",
"def newPage(request):\n newForm = newWikiPageForm()\n newFormTitle = newForm[\"newFormTitle\"]\n newFormBody = newForm[\"newFormBody\"]\n if request.method == \"POST\":\n form = newWikiPageForm(request.POST)\n # check that all fields are filled\n if form.is_valid():\n title = form.cleaned_data[\"newFormTitle\"]\n content = form.cleaned_data[\"newFormBody\"]\n\n # add new wiki page if page doesn't already exist\n if util.get_entry(title) is None:\n\n util.save_entry(title, content)\n\n # take user to their newly created page\n return HttpResponseRedirect(reverse(\"entry\", kwargs={\n \"title\": title\n }))\n # render template again with the inputted data along with a error message \n else:\n return render(request, \"encyclopedia/newPage.html\", {\n \"formTitle\": title,\n \"formBody\": content,\n \"exists\": True\n }) \n # render template again with error message \n else: \n return render(request, \"encyclopedia/newPage.html\", {\n \"formTitle\": title,\n \"formBody\": content,\n \"exists\": False\n }) \n #when 'create new page' button is clicked user is taken to newPage.html\n else: \n return render(request, \"encyclopedia/newPage.html\", {\n \"formTitle\": newFormTitle,\n \"formBody\": newFormBody,\n \"exists\": False\n })",
"def createPanel(self, LibraryID, Name, **kwargs):\n if self.request(\"createPanel\", LibraryID=LibraryID, Name=Name, **kwargs) is None:\n return None\n return self.json_response[\"Result\"][\"PanelID\"]",
"def make_live():\n panelid = request.args.get('id')\n locked = check_if_locked(s, panelid)\n if locked:\n unlock_panel_query(s, panelid)\n current_version = get_current_version(s, panelid)\n if not current_version:\n current_version = 0\n new_version = current_version + 1\n make_panel_live(s, panelid, new_version, current_user.id)\n return redirect(url_for('panels.view_panels'))",
"def add_panel():\n panel_name = request.json['panel_name']\n project_id = request.json['project_id']\n panel_id = create_panel_query(s, project_id, panel_name, current_user.id)\n return jsonify(panel_id)",
"def new_dynamic(request):\n\n if request.method == 'POST':\n dynamic_form = DynamicForm(request.POST)\n\n if dynamic_form.is_valid():\n new_dynamic = dynamic_form.save(commit=False)\n new_dynamic.user = request.user\n new_dynamic.save()\n request.session['dynamic_id'] = new_dynamic.id\n return HttpResponseRedirect('/dynamics/attach-molecules')\n else:\n context = {\n 'dynamic_form': dynamic_form,\n }\n return render(request, 'dynamics/new_dynamic.html', context)\n # if dynamic_id is None:\n context = {\n 'dynamic_form': DynamicForm(),\n }\n # else:\n # dynamic = Dynamic.objects.get(pk=dynamic_id)\n # dynamic_form = DynamicForm(instance=dynamic)\n # dynamic_form.fields['box_size'].disabled = True\n # dynamic_form.fields['number_of_molecules'].label = 'Number of molecules to be added'\n # dynamic_form.fields['number_of_atoms_for_alignment'].disabled = True\n # context = {\n # 'dynamic_form': dynamic_form,\n # }\n return render(request, 'dynamics/new_dynamic.html', context)",
"def view_panel():\n id = request.args.get('id')\n try:\n version = request.form[\"versions\"]\n except KeyError:\n version = None\n if id:\n status = check_panel_status(s, id)\n if not status:\n message = \"This panel has changes which cannot be viewed here as they have not been made live yet, if you have permission you can view these by editing the panel\"\n else:\n message = None\n panel_details = get_panel_details_by_id(s, id)\n if not version:\n version = panel_details.current_version\n panel_name = panel_details.name\n panel = get_regions_by_panelid(s, id, version)\n project_id = get_project_id_by_panel_id(s, id)\n result = []\n rows = list(panel)\n if len(rows) != 0:\n bed = ''\n for i in rows:\n row = dict(zip(i.keys(), i))\n result.append(row)\n # panel_name = i.panel_name\n current_version = i.current_version\n else:\n message = \"This Panel has no regions yet & may also have changes that have not been made live\"\n bed = 'disabled'\n current_version = version\n\n if check_user_has_permission(s, current_user.id, project_id):\n edit = ''\n else:\n edit = 'disabled'\n\n form = ViewPanel()\n v_list = range(1, current_version + 1)\n choices = []\n for i in v_list:\n choices.append((i, i))\n form.versions.choices = choices\n form.versions.default = version\n form.process()\n\n table = []\n\n for i in result:\n line = []\n line.append(i['chrom'])\n line.append(str(i['region_start']))\n line.append(str(i['region_end']))\n line.append(i['gene_name'])\n line.append(i['name'].replace(',', ' '))\n table.append(line)\n return render_template('panel_view.html', scope='Panel', table=json.dumps(table), panel=table,\n panel_name=panel_name, edit=edit, bed=bed,\n version=version, panel_id=id, project_id=project_id, message=message,\n url=url_for('panels.view_panel'),\n form=form)\n\n else:\n return redirect(url_for('panels.view_panels'))",
"def KLP_Institution_Management_Create(request):\n\tbuttonType = request.POST.get('form-buttonType')\n #before Institution_Mangement.objects.all()\n\tKLP_Institution_Management_Create = KLP_Institution_Management(queryset = Institution_Management.objects.filter(pk=0), permitted_methods = ('GET', 'POST'), responder = TemplateResponder(template_dir = 'viewtemplates', template_object_name = 'InstitutionManagement',extra_context={'buttonType':buttonType}), receiver = XMLReceiver(),)\n\tresponse = KLP_Institution_Management_Create.responder.create_form(request,form_class=Institution_Management_Form)\n\t\n\treturn HttpResponse(response)",
"def process_view(self, request, view_func, view_args, view_kwargs): # pylint: disable=W0613,R0201\n # If no user or they aren't authenticated, return to hit the default login redirect\n if not hasattr(request, \"user\") or not request.user.is_authenticated:\n return\n if request.path == \"/\":\n merlin_msg = f\"<a href={reverse('plugins:welcome_wizard:dashboard')}>The Nautobot Welcome Wizard can help you get started with Nautobot!</a>\"\n messages.success(request, mark_safe(merlin_msg)) # nosec\n elif request.path.endswith(\"/add/\"):\n # model = view_func.view_class.model_form.Meta.model\n base_fields = view_func.view_class.model_form.base_fields\n for field in base_fields:\n if base_fields[field].required:\n if hasattr(base_fields[field], \"queryset\") and not base_fields[field].queryset.exists():\n name = base_fields[field].label if base_fields[field].label else field.replace(\"_\", \" \").title()\n reverse_name = field.replace(\"_\", \"\")\n try:\n reverse_link = reverse(f\"{request.resolver_match.app_names[0]}:{reverse_name}_add\")\n except NoReverseMatch as error:\n logger.warning(\"No Reverse Match was found for %s. %s\", reverse_name, error)\n reverse_link = \"\"\n msg = (\n f\"You need to configure a <a href='{reverse_link}'>{name}</a> before you create this item.\"\n )\n messages.error(request, mark_safe(msg)) # nosec",
"def workorderwizard_submit(request):\n # Manually checking if user is authenticated rather than using @login_required\n # in order to return a 401 status that the workorder wizard understands so it can display a specific error message\n # instead of returning a 302 redirect to the login page, which wouldn't work because this view is called via AJAX\n if not request.user.is_authenticated:\n return HttpResponse('Unauthorized', status=401)\n\n # load JSON\n data = json.loads(request.body.decode('utf-8'))\n\n # check that all required fields are present\n mandatory_fields = ('org', 'event_name', 'location', 'start', 'end', 'setup_complete', 'services')\n if not all(key in data for key in mandatory_fields):\n return HttpResponse('Unprocessable Entity', status=422)\n\n reversion.set_comment('Event submitted using work order wizard')\n\n # create event object and populate fields\n event = events_models.Event2019()\n event.submitted_by = request.user\n event.submitted_ip = request.META.get('REMOTE_ADDR')\n event.contact = request.user\n event.event_name = data['event_name']\n if 'description' in data:\n event.description = data['description']\n try:\n event.location = events_models.Location.objects.filter(show_in_wo_form=True).get(pk=data['location'])\n except events_models.Location.DoesNotExist:\n return HttpResponse('Unprocessable Entity', status=422)\n event.datetime_setup_complete = parse_datetime(data['setup_complete'])\n event.datetime_start = parse_datetime(data['start'])\n event.datetime_end = parse_datetime(data['end'])\n try:\n org = events_models.Organization.objects.get(pk=data['org'])\n except events_models.Organization.DoesNotExist:\n return HttpResponse('Unprocessable Entity', status=422)\n event.billing_org = org\n\n # populate many-to-many fields\n event.save()\n event.org.add(org)\n \n # add services\n for service_data in data['services']:\n if 'id' not in service_data:\n return HttpResponse('Unprocessable Entity', status=422)\n try:\n service = events_models.Service.objects.filter(enabled_event2019=True).get(shortname=service_data['id'])\n except events_models.Service.DoesNotExist:\n return HttpResponse('Unprocessable Entity', status=422)\n service_instance = events_models.ServiceInstance()\n service_instance.service = service\n service_instance.event = event\n if 'detail' in service_data:\n service_instance.detail = service_data['detail']\n service_instance.save()\n\n # add extras\n for extra_data in data['extras']:\n if not all(key in extra_data for key in ('id', 'quantity')):\n return HttpResponse('Unprocessable Entity', status=422)\n try:\n extra = events_models.Extra.objects \\\n .filter(disappear=False, services__in=event.serviceinstance_set.values_list('service', flat=True)) \\\n .distinct().get(name=extra_data['id'])\n except events_models.Extra.DoesNotExist:\n return HttpResponse('Unprocessable Entity', status=422)\n extra_instance = events_models.ExtraInstance()\n extra_instance.extra = extra\n extra_instance.event = event\n extra_instance.quant = extra_data['quantity']\n extra_instance.save()\n\n # send confirmation email\n email_body = 'You have successfully submitted the following event.'\n bcc = [settings.EMAIL_TARGET_VP, settings.EMAIL_TARGET_HP] if event.has_projection else [settings.EMAIL_TARGET_VP]\n email = EventEmailGenerator(event=event, subject='New Event Submitted', to_emails=[request.user.email],\n body=email_body, bcc=bcc)\n email.send()\n\n # If the user does not have permission to submit events on behalf of the selected organization,\n # send an email to the organization to alert them that the event was submitted\n # if not request.user.has_perm('events.create_org_event', org):\n # email_body = ('The following event was submitted. You are receiving this email because the user who submitted '\n # 'this event is not expressly authorized to submit events on behalf of {}. The organization owner '\n # 'can update authorized users at {}.'.format(org.name,\n # request.scheme + '://' + request.get_host() + reverse('my:org-edit', args=(org.pk,))))\n # email = EventEmailGenerator(event=event, subject='Event Submitted on behalf of {}'.format(org.name),\n # to_emails=[org.exec_email], body=email_body, bcc=[settings.EMAIL_TARGET_W])\n # email.send()\n\n # return response with the URL to the event detail page\n return HttpResponse(json.dumps({'event_url': reverse('events:detail', args=[event.pk])}))",
"def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.is_valid():\n self.use_template(self.render_template())\n return self.form_valid(form)\n else:\n return self.form_invalid(form)",
"def new(): \n pages_object = Pages()\n page = pages_object.page\n \n language_name = languages_object.get_languages(3)\n \n # Creation new page\n if request.method == 'POST':\n if pages_object.new():\n return redirect(url_for('pages.overview'))\n \n # Come back a message when there is an error\t\n if not pages_object.message is None:\n message = pages_object.message\n status = pages_object.status\n \n return render_template('{}/new.html'.format(MODULE_DIR), **locals())",
"def custom_actions(self, form_wizard_entry, request=None):",
"def adpanel():\n if 'user_id' not in session or session['user_id'] != 'admin':\n return redirect(url_for('login'))\n return render_template('adminpanel.html')",
"def test_form_pass(self):\n resp = self.post_step(\"basics\")\n self.assertWizardResponse(resp, \"config\")\n resp = self.post_step(\"config\", session=list(resp._request.session.items()))\n self.assertIsInstance(resp, HttpResponseRedirect)\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(resp[\"location\"], \"/projects/foobar/\")\n\n proj = Project.objects.get(name=\"foobar\")\n self.assertIsNotNone(proj)",
"def make_virtualpanel_live():\n vpanelid = request.args.get('id')\n panelid = get_panel_by_vp_id(s, vpanelid)\n locked = check_if_locked(s, panelid)\n if locked:\n if current_user.id == get_locked_user(s, panelid):\n make_vp_panel_live(s, vpanelid)\n add_to_starlims(vpanelid)\n return redirect(url_for('panels.view_virtual_panels'))\n else:\n make_vp_panel_live(s, vpanelid)\n add_to_starlims(vpanelid)\n return redirect(url_for('panels.view_virtual_panels'))",
"def save(self, *args, **kwargs):\n step_numeral, step_name = kwargs.pop('step', (None, None))\n\n if step_numeral == 1:\n \"\"\"\n Basic Form: Application & File Uploader\n \"\"\"\n return self.cleaned_data\n if step_numeral == 2:\n \"\"\"\n Basic Form + Mapping Fields\n \"\"\"\n return self.cleaned_data\n\n if step_numeral == 3:\n pass # end-user is previewing",
"def serve(self, request, *args, **kwargs):\n\n template = self.get_template(request)\n\n if request.method == 'POST':\n\n form = self.get_form(request.POST, page=self, user=request.user)\n\n if form.is_valid():\n self.process_form_submission(form)\n return HttpResponseRedirect(self.url + '?thank=you')\n\n else:\n\n thanks = request.GET.get('thank', False)\n if thanks:\n form = None\n template = self.get_landing_page_template(request)\n if self.thanks_page_title:\n self.title = self.thanks_page_title\n else:\n form = self.get_form(page=self, user=request.user)\n\n context = self.get_context(request)\n context['form'] = form\n if form:\n context['conditional_rules'] = json.dumps(form.conditional_rules)\n\n return render(\n request,\n template,\n context\n )",
"def post(self):\n return CreateSavingPlan(request, current_user.id)",
"def dashboard_post(request):\n template = \"pages/dashboard.html\"\n form = CreateLeaderboardForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n add_owner = data[\"add_owner\"]\n del data[\"add_owner\"] # Drop the fields not needed by leaderboard model\n ldb = Leaderboard.objects.create(**data, owner=request.user)\n if add_owner:\n ldb.participants.add(request.user) # Add the creator as participant\n return render(request, template, context=dashboard_context(request))\n else:\n ctx = dashboard_context(request)\n ctx[\"creation_form\"] = form # Send it back with error messages\n return render(request, template, context=ctx)",
"def new_job(request):\n sis_account_id = request.LTI[\"custom_canvas_account_sis_id\"]\n terms, _current_term_id = get_term_data_for_school(sis_account_id)\n school_id = sis_account_id.split(\":\")[1]\n canvas_site_templates = get_canvas_site_templates_for_school(school_id)\n potential_course_sites_query = None\n departments = []\n course_groups = []\n selected_term_id = None\n selected_course_group_id = None\n selected_department_id = None\n\n # Only display the Course Groups dropdown if the tool is launched in the COLGSAS sub-account\n if school_id == 'colgsas':\n try:\n course_groups = get_course_group_data_for_school(sis_account_id, include_ile_sb=False)\n except Exception:\n logger.exception(f\"Failed to get course groups with sis_account_id {sis_account_id}\")\n # For all other schools, display just the Departments dropdown\n else:\n try:\n departments = get_department_data_for_school(sis_account_id, include_ile_sb=False)\n except Exception:\n logger.exception(f\"Failed to get departments with sis_account_id {sis_account_id}\")\n\n logging_dept_cg_text = ' and no selected department or course group'\n if request.method == \"POST\":\n selected_term_id = request.POST.get(\"courseTerm\", None)\n selected_course_group_id = request.POST.get(\"courseCourseGroup\", None)\n selected_department_id = request.POST.get(\"courseDepartment\", None)\n\n logging_dept_cg_text = f' and course group ID {selected_course_group_id}' if selected_course_group_id \\\n else f' and department ID {selected_department_id}' if selected_department_id \\\n else ' and no selected department or course group.'\n logger.debug(f'Retrieving potential course sites for term ID '\n f'{selected_term_id}{logging_dept_cg_text}', extra={\"sis_account_id\": sis_account_id,\n \"school_id\": school_id,\n })\n\n # Retrieve all course instances for the given term_id and account that do not have Canvas course sites\n # nor are set to be fed into Canvas via the automated feed\n potential_course_sites_query = get_course_instance_query_set(\n selected_term_id, sis_account_id\n ).filter(canvas_course_id__isnull=True,\n sync_to_canvas=0,\n bulk_processing=0,\n term__term_id=selected_term_id)\n\n # Filter potential_course_sites_query by course group.\n if selected_course_group_id and selected_course_group_id != '0':\n potential_course_sites_query = potential_course_sites_query.filter(course__course_group=selected_course_group_id)\n # Filter potential_course_sites_query by department.\n elif selected_department_id and selected_department_id != '0':\n potential_course_sites_query = potential_course_sites_query.filter(course__department=selected_department_id)\n\n # TODO maybe better to use template tag unless used elsewhere?\n # TODO cont. this may be included in a summary generation to be displayed in page (see wireframe and Jira ticket)\n potential_course_site_count = (\n potential_course_sites_query.count() if potential_course_sites_query else 0\n )\n\n logger.debug(f'Retrieved {potential_course_site_count} potential course sites for term '\n f'{selected_term_id}{logging_dept_cg_text}', extra={\"sis_account_id\": sis_account_id,\n \"school_id\": school_id,\n })\n\n context = {\n \"terms\": terms,\n \"potential_course_sites\": potential_course_sites_query,\n \"potential_site_count\": potential_course_site_count,\n \"canvas_site_templates\": canvas_site_templates,\n \"departments\": departments,\n \"course_groups\": course_groups,\n 'selected_term_id': selected_term_id,\n 'selected_course_group_id': selected_course_group_id,\n 'selected_department_id': selected_department_id,\n 'canvas_url': settings.CANVAS_URL,\n }\n return render(request, \"bulk_site_creator/new_job.html\", context=context)",
"def post_create():\n req_data = request.get_json()\n\n print('This is the request itself \\n', req_data)\n name = req_data['name']\n chapter = req_data['chapter']\n site = req_data['site']\n print('\\nThe function that is selected: {0} {1} {2}\\n'.format(name, chapter, site))\n flask_wms.write_new_data(name, chapter, site, \"False\")\n return 'Request recieved, create method'",
"def create_report():\n\n LocalCreateReportForm = CreateReportForm.get_instance()\n for department in Department.query.all():\n if len(department.fields) > 0:\n LocalCreateReportForm.add_department(department)\n\n form = LocalCreateReportForm()\n form.user_id.data = current_user.id\n if form.validate_on_submit():\n # Add the new report to the database\n db.session.add(form.report)\n db.session.commit()\n\n return redirect(url_for('reports.my_reports'))\n else:\n flash_form_errors(form)\n return render_template('reports/create.html', form=form)",
"def customer_add_view(h):\n global html\n html = h\n \n common_elements = customer_common_elements()\n \n css_list = common_elements[\"css_list\"]\n\n javascript_list = common_elements[\"javascript_list\"]\n\n all_btn = common_elements[\"all_btn\"]\n\n html.new_header(\"Add Customers\", \"customer_management.py\", all_btn, css_list, javascript_list)\n customer_string = \"\"\"\n <div id=\"grid_view_div\">\n <div class=\"yo-tabs\">\n <ul>\n <li>\n <a class=\"active\" href=\"#content_1\" id=\"active_host_tab\">Customer Application Form</a>\n </li>\n <li>\n <a href=\"#content_2\" id=\"disable_host_tab\">Bulk Upload</a>\n </li>\n <li>\n <a href=\"#content_3\" id=\"discovered_host_tab\">CRM Import</a>\n </li>\n </ul>\n <div id=\"content_1\" class=\"tab-content\" style=\"display:block;height:100%;\">\n <form action=\"customer_post_ajax.py\" method=\"get\" id=\"add_customer_form\" name=\"add_customer_form\" autocomplete=\"on\" >\n <div class=\"form-div\" style=\"top:30px;\">\n <div class=\"form-body\">\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"username\">Customer User Name</label>\n <input type=\"text\" id=\"user_name\" name=\"user_name\" \n title=\"Choose Unique User Name. <br/>Must be at least 5 characters.\" />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"password\">Password</label>\n <input type=\"password\" id=\"password\" name=\"password\" title=\"Must be at least 8 characters. \"/>\n </div>\n \"\"\"\n customer_string += \"\"\"\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"groups\">Customer Organisation(Role)</label>\n \"\"\"\n customer_string += (customer_group_customer_widget())\n customer_string += \"\"\"\n </div>\n \"\"\"\n customer_string += \"\"\"\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"first_name\">First Name</label>\n <input type=\"text\" id=\"first_name\" name=\"first_name\" title=\"Please Enter First name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"last_name\">Last Name</label>\n <input type=\"text\" id=\"last_name\" name=\"last_name\" title=\"Please Enter Last name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_name\">Company</label>\n <input type=\"text\" id=\"company_name\" name=\"company_name\" title=\"Please Enter Company Name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"designation\">Designation</label>\n <input type=\"text\" id=\"designation\" name=\"designation\" title=\"Please Enter Designation.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"mobile_no\">Mobile Number</label>\n <input type=\"text\" id=\"mobile_no\" name=\"mobile_no\" \n title=\"Please Enter Mobile Number<br/> Don't include +91 or 0.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"telephone_no\">Telephone Number</label>\n <input type=\"text\" id=\"telephone_no\" name=\"telephone_no\" \n title=\"Please Enter Mobile Number<br/> Don't include +91 or 0.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"fax\">Fax</label>\n <input type=\"text\" id=\"fax\" name=\"fax\" \n title=\"Please Enter Mobile Number<br/> Don't include +91 or 0.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"email_id\">E-Mail ID</label>\n <input type=\"text\" id=\"email_id\" name=\"email_id\" title=\"Please Enter E-Mail ID.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"city_id\">City</label>\n <input type=\"text\" id=\"city_id\" name=\"city_id\" title=\"Please Enter City Name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"state_id\">State</label>\n <input type=\"text\" id=\"state_id\" name=\"state_id\" title=\"Please Enter State.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"country_id\">Country</label>\n <input type=\"text\" id=\"country_id\" name=\"country_id\" title=\"Please Enter Country.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"usage\">Usage</label>\n <select id='usage' name='usage'>\n <option value=0>Personal</option>\n <option value=1>Commercial</option>\n </select>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"purpose\">Purpose</label>\n <input type=\"text\" id=\"purpose\" name=\"purpose\" title=\"Please Enter Purpose.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"address\">Address</label>\n <textarea id=\"address\" name=\"address\" title=\"Please Enter own Address.\"></textarea>\n </div>\n </div>\n </div>\n <div class=\"form-div-footer\">\n <button type=\"submit\" class=\"yo-small yo-button\"><span class=\"add\">Save</span></button>\n <button type=\"reset\" class=\"yo-small yo-button\" id=\"close_add_user\"><span class=\"cancel\">Cancel</span></button>\n </div>\n </form>\n </div>\n <!-- container tab 2 -->\n <div id=\"content_2\" class=\"tab-content\" style=\"display:block;height:100%;\">\n <form action=\"#\" method=\"post\">\n <label for=\"bulk\">Upload Customers</label>\n <input name=\"bulk\" id=\"bulk\" type=\"file\" />\n </form>\n </div>\n <!-- container tab 3 -->\n <div id=\"content_3\" class=\"tab-content\" style=\"display:block;height:100%;\">\n <form class=\"form-body\" id=\"crm_conn\" action=\"#\" method=\"get\">\n <div class=\"form-div\" style=\"top:30px;\">\n <div class=\"form-body\">\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"crm_link\">CRM Link Location</label>\n <input type=\"text\" name=\"crm_link\" id=\"crm_link\" />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"crm_user\">CRM Username</label>\n <input type=\"text\" name=\"crm_user\" id=\"crm_user\" />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"crm_pass\">CRM Password</label>\n <input type=\"password\" name=\"crm_pass\" id=\"crm_pass\" />\n </div>\n </div>\n </div>\n <div class=\"form-div-footer\">\n <button type=\"submit\" class=\"yo-small yo-button\"><span class=\"add\">Test Connection</span></button>\n <button type=\"reset\" class=\"yo-small yo-button\" id=\"close_add_user\"><span class=\"add\">Start Import</span></button>\n </div>\n </form>\n </div>\n </div>\n </div>\n \"\"\" \n customer_string += \"\"\"\n <script>\n post_customers();\n </script>\n \"\"\"\n html.write(customer_string)\n html.new_footer()",
"def process_request(self, req):\r\n add_stylesheet(req, 'common/css/wiki.css')\r\n name = req.args.get('page', '')\r\n node = self._get_node(req)\r\n if ('create' in req.args):\r\n parent=req.args.get('parent', '')\r\n if not WikiPage(self.env, parent).exists:\r\n parent = ''\r\n req.redirect(req.href.wiki(name, action='edit', parent=parent, title=req.args.get('title', '')))\r\n if (req.args.get('action', '') == 'new'):\r\n data = dict({'node' : node, '_' : _ })\r\n return 'wiki_create.html', data, None",
"def staff_form(request, pk, step=0):\n ts = get_timeslot()\n if not hasattr(ts, 'resultoptions'):\n raise PermissionDenied(\"Results menu is not yet visible.\")\n else:\n if not get_timeslot().resultoptions.Visible:\n raise PermissionDenied(\"Results menu is not yet visible.\")\n dstr = get_object_or_404(Distribution, pk=pk)\n if not hasattr(dstr, 'presentationtimeslot'):\n raise PermissionDenied('This student does not have a presentation planned. Please plan it first.')\n if not request.user.is_superuser and \\\n request.user != dstr.Proposal.Track.Head and \\\n request.user != dstr.Proposal.ResponsibleStaff and \\\n (get_grouptype('1') not in request.user.groups.all() or request.user not in dstr.Proposal.Assistants.all()) and \\\n get_grouptype('3') not in request.user.groups.all() and \\\n request.user not in dstr.presentationtimeslot.Presentations.Assessors.all():\n raise PermissionDenied(\"You are not the correct owner of this distribution. \"\n \"Only track heads and responsible staff can edit grades.\")\n\n cats = GradeCategory.objects.filter(TimeSlot=get_timeslot()).distinct()\n numcategories = len(cats)\n step = int(step)\n if step == 0:\n return render(request, \"results/wizard.html\", {\n \"step\": 0,\n \"pk\": pk,\n \"categories\": cats,\n \"dstr\": dstr,\n \"final\": all(f.Final is True for f in dstr.results.all()) if dstr.results.all() else False, # fix for all([])=True\n # \"files\": files,\n })\n elif step <= numcategories:\n saved = False\n cat = cats[step - 1]\n try: # existing category result\n cat_result = CategoryResult.objects.get(Distribution=dstr, Category=cat)\n initial = None\n except CategoryResult.DoesNotExist: # new result\n cat_result = CategoryResult(Distribution=dstr, Category=cat)\n # initial = {'Files': list(StudentFile.objects.filter(Type=cat_result.Category.File, Distribution=cat_result.Distribution).distinct())}\n if request.method == \"POST\": # submitted form\n if cat_result.Final:\n return render(request, \"base.html\", context={\n \"Message\": \"Category Result has already been finalized! Editing is not allowed anymore. \"\n \"If this has to be changed, contact support staff\"\n })\n # if files:\n # category_form = CategoryResultFormFile(request.POST, instance=cat_result, prefix='catform')\n # else:\n category_form = CategoryResultForm(request.POST, instance=cat_result, prefix='catform')\n aspect_forms = []\n for i, aspect in enumerate(cat.aspects.all()):\n try: # try find existing form\n aspect_result = CategoryAspectResult.objects.get(CategoryResult=cat_result, CategoryAspect=aspect)\n except CategoryAspectResult.DoesNotExist: # new clean form\n aspect_result = CategoryAspectResult(CategoryResult=cat_result, CategoryAspect=aspect)\n aspect_forms.append({\n \"form\": AspectResultForm(request.POST, instance=aspect_result, prefix=\"aspect\" + str(i)),\n \"aspect\": aspect,\n })\n if category_form.is_valid() and all([form['form'].is_valid() for form in aspect_forms]):\n cat_result = category_form.save()\n # return the form with the cleaned grade, not the one with the (uncleaned) post data:\n # if files:\n # category_form = CategoryResultFormFile(instance=cat_result, prefix='catform')\n # else:\n category_form = CategoryResultForm(instance=cat_result, prefix='catform')\n for form in aspect_forms: # these forms do not need to be updated as aspect data is not cleaned.\n aspect_result = form['form'].instance\n aspect_result.CategoryResult = cat_result\n aspect_result.save()\n saved = True\n else:\n # if files:\n # category_form = CategoryResultFormFile(instance=cat_result, initial=initial, prefix='catform', disabled=cat_result.Final)\n # else:\n category_form = CategoryResultForm(instance=cat_result, prefix='catform', disabled=cat_result.Final)\n aspect_forms = []\n for i, aspect in enumerate(cat.aspects.all()):\n try:\n aspect_result = CategoryAspectResult.objects.get(CategoryResult=cat_result, CategoryAspect=aspect)\n except CategoryAspectResult.DoesNotExist:\n aspect_result = CategoryAspectResult(CategoryResult=cat_result, CategoryAspect=aspect)\n aspect_forms.append({\n \"form\": AspectResultForm(instance=aspect_result, prefix=\"aspect\" + str(i), disabled=cat_result.Final),\n \"aspect\": aspect,\n })\n return render(request, \"results/wizard.html\", {\n \"step\": step,\n \"categories\": cats,\n \"category\": cat,\n \"categoryform\": category_form,\n \"aspectsforms\": aspect_forms,\n \"dstr\": dstr,\n \"pk\": pk,\n \"saved\": saved,\n \"final\": cat_result.Final,\n \"aspectlabels\": CategoryAspectResult.ResultOptions,\n # \"files\": files,\n 'rounding': settings.CATEGORY_GRADE_QUANTIZATION\n })\n else:\n raise PermissionDenied(\"This category does not exist.\")",
"def management_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n management_form = ManagementForm()\n return render_to_response('management_form.html', {'form': management_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n management_form = ManagementForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if management_form.is_valid():\n mf = management_form.save(commit=False)\n mf.company = company\n mf.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('management_form.html', \n {'form': management_form, 'form_errors': management_form.errors, 'company':company},\n context_instance=RequestContext(request))"
]
| [
"0.7150321",
"0.6497616",
"0.62563187",
"0.6069959",
"0.5927046",
"0.58684856",
"0.58421654",
"0.5759699",
"0.5713984",
"0.5710167",
"0.567427",
"0.5629274",
"0.55678403",
"0.5563093",
"0.5559728",
"0.55200875",
"0.55147177",
"0.550369",
"0.5481536",
"0.5462065",
"0.54507065",
"0.54418737",
"0.54413867",
"0.5435511",
"0.54103404",
"0.54057527",
"0.53442633",
"0.5321019",
"0.5315218",
"0.5305141"
]
| 0.76065373 | 0 |
Edit panel wizard method. If the request method is "GET" the information for the panel is retrieved and the relevant HTML is created (e.g. gene buttons and tx drop downs. The template for the wizard is rendered and returned. If the request method is "POST" the panel and preferred tx are made live (if selected) and redirects to the view_panels() method. | def edit_panel_process():
if request.method == "POST":
make_live = request.form['make_live']
panel_id = request.args.get('id')
project_id = get_project_id_by_panel_id(s, panel_id)
preftx_id = get_preftx_id_by_project_id(s, project_id)
tx_version = get_current_preftx_version(s, preftx_id)
panel_version = get_current_version(s, panel_id)
if not tx_version:
tx_version = 0
if make_live == "on":
print('make_live')
make_preftx_live(s, preftx_id, tx_version + 1, current_user.id)
make_panel_live(s, panel_id, panel_version + 1, current_user.id)
unlock_panel_query(s, panel_id)
return redirect(url_for('panels.view_panel') + "?id=" + panel_id)
elif request.method == "GET":
panel_id = request.args.get('id')
form = EditPanelProcess()
panel_info = get_panel_info(s, panel_id)
project_id = panel_info.project_id
form.project.choices = [(project_id, panel_info.project_name), ]
form.panelname.data = panel_info.name
lock_panel(s, current_user.id, panel_id)
genes = get_genes_by_panelid_edit(s, panel_id, panel_info.current_version)
html = ""
buttonlist = ""
print('hello')
for gene in genes:
gene_id = gene.id
gene_name = gene.name
preftx_id = get_preftx_by_gene_id
upcoming_preftx = get_upcoming_preftx_by_gene_id(s, project_id, gene_id)
all_tx = get_tx_by_gene_id(s, gene_id)
buttonlist += render_template("gene_button.html", gene_name=gene_name, gene_id=gene_id, added=True)
tx_html = render_template("tx_list.html", gene_name=gene_name, all_tx=all_tx, preftx=preftx_id,
upcoming=upcoming_preftx, disabled=True)
html += tx_html
return render_template('panel_createprocess.html', form=form, genes=html, genelist=buttonlist,
panel_id=panel_id,
url=url_for('panels.edit_panel_process') + "?id=" + panel_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def edit_virtual_panel_process():\n form = EditVirtualPanelProcess()\n\n vp_id = request.args.get('id')\n panel_id = get_panel_by_vp_id(s, vp_id)\n if request.method == \"POST\":\n if request.form['make_live'] == \"on\":\n make_vp_panel_live(s, vp_id)\n add_to_starlims(vp_id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_vpanel') + \"?id=\" + vp_id)\n elif request.method == \"GET\":\n lock_panel(s, current_user.id, panel_id)\n panel_info = get_panel_details_by_id(s, panel_id)\n panel_name = panel_info.name\n form.panel.choices = [(panel_id, panel_name), ]\n\n panel_version = get_current_version(s, panel_id)\n panel_genes = get_genes_by_panelid(s, panel_id, panel_version)\n vp_info = get_vpanel_details_by_id(s, vp_id)\n vp_version = vp_info.current_version\n vp_name = vp_info.name\n form.vpanelname.data = vp_name\n vp_genes = get_genes_by_vpanelid_edit(s, vp_id, vp_version)\n genelist = \"\"\n vp_list = []\n for i in vp_genes:\n vp_list.append(i.id)\n\n genes = []\n print('new method')\n for i in panel_genes:\n if i.id in vp_list:\n genes.append({\"name\": i.name, \"id\": i.id, \"vp_list\": True})\n button = render_template(\"gene_button.html\", gene_name=i.name, gene_id=i.id, added=True)\n genelist += button\n\n else:\n genes.append({\"name\": i.name, \"id\": i.id, \"vp_list\": False})\n\n gene_html = render_template(\"panel_genes.html\", panel_genes=genes)\n\n url = url_for('panels.edit_virtual_panel_process') + '?id=' + str(vp_id)\n return render_template('virtualpanels_createprocess.html', form=form, genes=gene_html, genelist=genelist,\n vp_id=vp_id, panel_name=vp_name, current_version=vp_version, url=url)",
"def view_panel():\n id = request.args.get('id')\n try:\n version = request.form[\"versions\"]\n except KeyError:\n version = None\n if id:\n status = check_panel_status(s, id)\n if not status:\n message = \"This panel has changes which cannot be viewed here as they have not been made live yet, if you have permission you can view these by editing the panel\"\n else:\n message = None\n panel_details = get_panel_details_by_id(s, id)\n if not version:\n version = panel_details.current_version\n panel_name = panel_details.name\n panel = get_regions_by_panelid(s, id, version)\n project_id = get_project_id_by_panel_id(s, id)\n result = []\n rows = list(panel)\n if len(rows) != 0:\n bed = ''\n for i in rows:\n row = dict(zip(i.keys(), i))\n result.append(row)\n # panel_name = i.panel_name\n current_version = i.current_version\n else:\n message = \"This Panel has no regions yet & may also have changes that have not been made live\"\n bed = 'disabled'\n current_version = version\n\n if check_user_has_permission(s, current_user.id, project_id):\n edit = ''\n else:\n edit = 'disabled'\n\n form = ViewPanel()\n v_list = range(1, current_version + 1)\n choices = []\n for i in v_list:\n choices.append((i, i))\n form.versions.choices = choices\n form.versions.default = version\n form.process()\n\n table = []\n\n for i in result:\n line = []\n line.append(i['chrom'])\n line.append(str(i['region_start']))\n line.append(str(i['region_end']))\n line.append(i['gene_name'])\n line.append(i['name'].replace(',', ' '))\n table.append(line)\n return render_template('panel_view.html', scope='Panel', table=json.dumps(table), panel=table,\n panel_name=panel_name, edit=edit, bed=bed,\n version=version, panel_id=id, project_id=project_id, message=message,\n url=url_for('panels.view_panel'),\n form=form)\n\n else:\n return redirect(url_for('panels.view_panels'))",
"def create_panel_process():\n form = CreatePanelProcess()\n if request.method == \"POST\":\n make_live = request.form['make_live']\n panel_id = request.args.get('id')\n project_id = get_project_id_by_panel_id(s, panel_id)\n preftx_id = get_preftx_id_by_project_id(s, project_id)\n version = get_current_preftx_version(s, preftx_id)\n if not version:\n version = 0\n if make_live == \"on\":\n make_preftx_live(s, preftx_id, version + 1, current_user.id)\n make_panel_live(s, panel_id, 1, current_user.id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_panel') + \"?id=\" + panel_id)\n elif request.method == \"GET\":\n form.project.choices = get_project_choices(s, current_user.id)\n return render_template('panel_createprocess.html', form=form, panel_id=\"main\",\n url=url_for('panels.create_panel_process'))",
"def create_virtual_panel_process():\n form = CreateVirtualPanelProcess()\n\n if request.method == \"POST\":\n make_live = request.form['make_live']\n vp_id = request.args.get('id')\n if make_live == \"on\":\n make_vp_panel_live(s, vp_id)\n add_to_starlims(vp_id)\n panel_id = get_panel_by_vp_id(s, vp_id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_vpanel') + \"?id=\" + vp_id)\n elif request.method == \"GET\":\n form.panel.choices = get_panel_choices(s, current_user.id)\n url = url_for('panels.create_virtual_panel_process')\n return render_template('virtualpanels_createprocess.html', form=form, url=url, vp_id=\"main\")",
"def get_template(self, request, step, form):\n return 'forms/wizard.html'",
"def edit_form():\n return template (\"edit\")",
"def getModifyForm(request):\n\t\n\tlogger = logging.getLogger(__name__)\n\t\n\tcontext = {}\n\t\n\ttry:\n\t\t# Get a complete list of sensors.\n\t\tcontext['allsensors'] = Sensor.objects.all()\n\t\n\texcept Sensor.DoesNotExist:\n\t\tlogger.warning(\"No sensors found.\")\n\t\traise Http404\n\t\n\t# Send to template.\n\treturn render(request, 'tuning/modifyForm.tpl', context)",
"def get_edit_form(self, data):\n self.add_success(data)\n rv = self.get((data[self.id_field], self.edit_url))\n assert not is_404(rv)\n assert in_response(rv, 'Edit {}'.format(data[self.name_field]))\n for field, name in self.fields:\n assert in_response(rv, name)\n return rv",
"def customer_edit_view(h):\n global html\n html = h\n \n user_id = html.var(\"customer_id\"); #get the user id from the url query\n\n common_elements = customer_common_elements()\n \n css_list = common_elements[\"css_list\"]\n\n javascript_list = common_elements[\"javascript_list\"]\n\n all_btn = common_elements[\"all_btn\"]\n\n html.new_header(\"Edit Customers\", \"customer_management.py\", all_btn, css_list, javascript_list)\n customer_string = \"\"\"\n <div>\n <form action=\"customer_put_ajax.py\" method=\"get\" id=\"add_customer_form\" name=\"add_customer_form\" autocomplete=\"on\" >\n <div class=\"form-div\">\n <table class=\"tt-table\" cellspacing=\"0\" cellpadding=\"0\" width=\"100%\">\n <tr>\n <th class=\"cell-title\">Edit Customer</th>\n </tr>\n </table>\n <div class=\"form-body\">\n <div class=\"row-elem\">\n <input type=\"hidden\" id=\"user_id\" name=\"user_id\" disabled='disabled' />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"username\">Customer User Name</label>\n <input type=\"text\" id=\"user_name\" name=\"user_name\" disabled='disabled'\n title=\"Choose Unique User Name. <br/>Must be at least 5 characters.\" />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"password\">Password</label>\n <input type=\"password\" id=\"password\" name=\"password\" title=\"Must be at least 8 characters. \"/>\n </div>\n \"\"\"\n customer_string += \"\"\"\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"groups\">Select Group</label>\n \"\"\"\n customer_string += (customer_group_customer_widget())\n customer_string += \"\"\"\n </div>\n \"\"\"\n customer_string += \"\"\"\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"first_name\">First Name</label>\n <input type=\"text\" id=\"first_name\" name=\"first_name\" title=\"Please Enter First name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"last_name\">Last Name</label>\n <input type=\"text\" id=\"last_name\" name=\"last_name\" title=\"Please Enter Last name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_name\">Company</label>\n <input type=\"text\" id=\"company_name\" name=\"company_name\" title=\"Please Enter Company Name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"designation\">Designation</label>\n <input type=\"text\" id=\"designation\" name=\"designation\" title=\"Please Enter Designation.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"mobile_no\">Mobile Number</label>\n <input type=\"text\" id=\"mobile_no\" name=\"mobile_no\" \n title=\"Please Enter Mobile Number<br/> Don't include +91 or 0.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"telephone_no\">Telephone Number</label>\n <input type=\"text\" id=\"telephone_no\" name=\"telephone_no\" \n title=\"Please Enter Mobile Number<br/> Don't include +91 or 0.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"fax\">Fax</label>\n <input type=\"text\" id=\"fax\" name=\"fax\" \n title=\"Please Enter Mobile Number<br/> Don't include +91 or 0.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"email_id\">E-Mail ID</label>\n <input type=\"text\" id=\"email_id\" name=\"email_id\" title=\"Please Enter E-Mail ID.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"city_id\">City</label>\n <input type=\"text\" id=\"city_id\" name=\"city_id\" title=\"Please Enter City Name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"state_id\">State</label>\n <input type=\"text\" id=\"state_id\" name=\"state_id\" title=\"Please Enter State.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"country_id\">Country</label>\n <input type=\"text\" id=\"country_id\" name=\"country_id\" title=\"Please Enter Country.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"usage\">Usage</label>\n <select id='usage' name='usage'>\n <option value=0>Personal</option>\n <option value=1>Commercial</option>\n </select>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"purpose\">Purpose</label>\n <input type=\"text\" id=\"purpose\" name=\"purpose\" title=\"Please Enter Purpose.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"address\">Address</label>\n <textarea id=\"address\" name=\"address\" title=\"Please Enter own Address.\"></textarea>\n </div>\n </div>\n </div>\n <div class=\"form-div-footer\">\n <button type=\"submit\" class=\"yo-small yo-button\"><span class=\"add\">Save</span></button>\n <button type=\"reset\" class=\"yo-small yo-button\" id=\"close_add_user\"><span class=\"cancel\">Cancel</span></button>\n </div>\n </form>\n </div>\n \"\"\" \n customer_string += \"\"\"\n <script>\n put_customer_values(\"%s\");\n post_customers(action=\"put\", user_id = \"%s\");\n </script>\n \"\"\" %(user_id,user_id)\n html.write(customer_string)\n html.new_footer()",
"def preview_handler(self, _, __):\r\n template = self.system.render_template('lti_form.html', self.get_context())\r\n return Response(template, content_type='text/html')",
"def get_edit_handler(cls): # noqa\n panels = [\n ObjectList(\n cls.content_panels\n + cls.body_content_panels\n + cls.bottom_content_panels,\n heading=_(\"Content\"),\n ),\n ObjectList(cls.classify_panels, heading=_(\"Classify\")),\n ObjectList(cls.promote_panels, heading=_(\"SEO\"), classname=\"seo\"),\n ObjectList(\n cls.settings_panels, heading=_(\"Settings\"), classname=\"settings\"\n ),\n ]\n\n if cls.integration_panels:\n panels.append(\n ObjectList(\n cls.integration_panels,\n heading=\"Integrations\",\n classname=\"integrations\",\n )\n )\n\n return TabbedInterface(panels).bind_to(model=cls)",
"def save(self, *args, **kwargs):\n step_numeral, step_name = kwargs.pop('step', (None, None))\n\n if step_numeral == 1:\n \"\"\"\n Basic Form: Application & File Uploader\n \"\"\"\n return self.cleaned_data\n if step_numeral == 2:\n \"\"\"\n Basic Form + Mapping Fields\n \"\"\"\n return self.cleaned_data\n\n if step_numeral == 3:\n pass # end-user is previewing",
"def get_edit_lesson(self):\n self._render_edit_form_for(\n LessonRESTHandler, 'Lessons and Activities',\n annotations_dict=LessonRESTHandler.get_schema_annotations_dict(\n courses.Course(self).get_units()),\n delete_xsrf_token='delete-lesson')",
"def studio_view(self, context=None):\n log.info(\"Studio view called\")\n log.info(self)\n cls = type(self)\n\n def none_to_empty(data):\n return data if data is not None else ''\n edit_fields = (\n (field, none_to_empty(getattr(self, field.name)), validator)\n for field, validator in (\n (cls.title, 'string'),\n (cls.question,'string'),\n (cls.points, 'number'),\n (cls.weight, 'number'),\n (cls.max_attempts, 'number')\n )\n )\n context = {\n \"studio_state\": json.dumps(self.studio_state()),\n\n \"id\": self.location.name.replace('.', '_'),\n \"max_file_size\": getattr(\n settings, \"STUDENT_FILEUPLOAD_MAX_SIZE\",\n self.STUDENT_FILEUPLOAD_MAX_SIZE,\n ),\n \n 'fields': edit_fields \n \n \n }\n fragment = Fragment()\n fragment.add_content(\n render_template(\n 'templates/assignment/edit.html',\n context\n )\n )\n fragment.add_css(_resource(\"static/css/agea.css\"))\n fragment.add_javascript(_resource(\"static/js/src/studio.js\"))\n fragment.initialize_js('ExcelSheetAssessmentXBlock')\n return fragment",
"def render_step(self,step):\n data = self.storage.get_step_data(step)\n files = self.storage.get_step_files(step)\n form = self.get_form(step,data,files)\n self.clean_for_commit()\n return self.render(form)",
"def custom_actions(self, form_wizard_entry, request=None):",
"def edit(request, pageName):\n \n if request.method == \"POST\":\n form = EditForm(request.POST)\n \n if form.is_valid(): \n content = form.cleaned_data[\"content\"]\n title = form.cleaned_data[\"title\"]\n \n util.save_entry(title, content)\n return HttpResponseRedirect(reverse(\"encyclopedia:visit_entry\", args=(title, )))\n \n else:\n\n form = EditForm({'title': pageName, 'content': util.get_entry(pageName) })\n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm(),\n \"pageName\": pageName\n })\n \n \n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm({'title': pageName, 'content': util.get_entry(pageName) }),\n \"pageName\": pageName\n })",
"def view_vpanel():\n id = request.args.get('id')\n try:\n version = request.form[\"versions\"]\n except KeyError:\n version = None\n if id:\n status = check_virtualpanel_status(s, id)\n if not status:\n message = \"This panel has changes which cannot be viewed here as they have not been made live yet, if you have permission you can view these by editing the panel\"\n else:\n message = None\n panel_details = get_vpanel_details_by_id(s, id)\n for i in panel_details:\n if not version:\n version = panel_details.current_version\n panel_name = panel_details.name\n project_id = panel_details.project_id\n panel = get_regions_by_vpanelid(s, id, version)\n result = []\n rows = list(panel)\n if len(rows) != 0:\n bed = ''\n for i in rows:\n row = dict(zip(i.keys(), i))\n result.append(row)\n panel_name = i.panel_name\n current_version = i.current_version\n else:\n message = \"This Panel has no regions yet & may also have changes that have not been made live yet\"\n bed = 'disabled'\n current_version = version\n print(type(version))\n current_version = round(current_version, 1)\n version = round(float(version), 1)\n\n if check_user_has_permission(s, current_user.id, project_id):\n edit = ''\n else:\n edit = 'disabled'\n\n form = ViewPanel()\n v_list = get_prev_versions_vp(s, id)\n choices = []\n for i in v_list:\n choices.append((i, i))\n\n if (current_version, current_version) not in choices:\n choices.append((current_version, current_version))\n\n form.versions.choices = choices\n form.versions.default = current_version\n form.process()\n\n table = []\n\n for i in result:\n line = []\n line.append(i['chrom'])\n line.append(str(i['region_start']))\n line.append(str(i['region_end']))\n line.append(i['gene_name'])\n line.append(i['name'].replace(',', ' '))\n table.append(line)\n\n return render_template('panel_view.html', table=json.dumps(table), panel=table, panel_name=panel_name,\n edit=edit, bed=bed,\n version=version, panel_id=id, message=message, url=url_for('panels.view_vpanel'),\n scope='Virtual', form=form)\n\n else:\n return redirect(url_for('panels.view_virtual_panels'))",
"def change_view(self, request, object_id, form_url='', extra_context=None):\n section = models.Section.objects.filter(pk=object_id)\\\n .prefetch_related(\"facility__experiment\",\n \"participants\")\\\n .first()\n exp_id = section.facility.experiment.id\n # create bulk forms\n bulk_add_change_frm = create_bulk_add_change_form(request, exp_id)\n bulk_del_frm = create_bulk_delete_form(request)\n # attach site id and bulk forms to 'extra_context'\n extra_context = extra_context or {}\n extra_context['section_id'] = object_id\n extra_context[\"bulk_add_change_form\"] = bulk_add_change_frm\n extra_context['bulk_delete_form'] = bulk_del_frm\n # print extra_context\n return super(SectionAdmin, self).change_view(\n request, object_id, form_url, extra_context=extra_context)",
"def get_edit_unit_lesson(self):\n\n key = self.request.get('key')\n\n exit_url = self.canonicalize_url('/dashboard')\n rest_url = self.canonicalize_url(UnitLessonTitleRESTHandler.URI)\n form_html = oeditor.ObjectEditor.get_html_for(\n self,\n UnitLessonTitleRESTHandler.SCHEMA_JSON,\n UnitLessonTitleRESTHandler.SCHEMA_ANNOTATIONS_DICT,\n key, rest_url, exit_url,\n required_modules=UnitLessonTitleRESTHandler.REQUIRED_MODULES)\n\n template_values = {}\n template_values['page_title'] = self.format_title('Edit Course Outline')\n template_values[\n 'page_description'] = messages.COURSE_OUTLINE_EDITOR_DESCRIPTION\n template_values['main_content'] = form_html\n self.render_page(template_values)",
"def editPanels(self, panels, **properties):\n\n if type(panels) is not list:\n panels = [panels]\n\n panels.reverse()\n\n panelsXML = []\n for panel in panels:\n panelXML = ET.fromstring(panel)\n\n multiTrackXML = panelXML.find(\"MultiTrackElements\")\n if multiTrackXML is not None:\n self.__saveSetupData(multiTrackDataXMLS=ET.tostring(multiTrackXML), properties=panelXML.attrib)\n\n panelsXML.append({\"properties\":panelXML.attrib, \"multiTrackXML\":multiTrackXML})\n\n if panelsXML:\n if (OSUtils.type == OSUtils.LINUX):\n paths = []\n for panel in panelsXML:\n properties = panel['properties'].copy()\n properties['frame'] = '#'\n mode = Mode(properties.get('show', None), properties.get('sequence', None))\n path = mode.get('[recipeCompedFile]', properties)\n paths.append(path)\n if not self.fileServiceLocal.exists(path):\n raise utils.FlixException(msg=\"Missing File: %s\"%path)\n command = Mode().get(\"[editImageCommand]\")\n log('Edit command %s' % command)\n os.system(command + \" \" + ' '.join(paths))\n else:\n Photoshop().createPhotoshopFileForPanels(panelsXML)\n\n return \"Done\"",
"def pincode_tab(request, req_type):\n\n #based on the request type fetches the template to be rendered,\n #the form to be displayed and the method object to be called\n # form =PincodeEditForm\n # template_name =\n\n template_name = \"edit_pin.html\"\n form = PincodeEditForm\n method_call = edit_pincodes\n if req_type == \"edit_pin\":\n template_name = \"edit_pin.html\"\n form = PincodeEditForm\n method_call = edit_pincodes\n if req_type == \"add_pin\":\n template_name = \"add_pin.html\"\n form = PincodeAddForm\n method_call = add_pin\n elif req_type == \"postalcat_act\":\n template_name = \"pincode_postalcat_act.html\"\n form = PincodeClientActForm\n method_call = postalcatg_act\n elif req_type == \"return_pin\":\n template_name = \"return_pin.html\"\n form = PincodeReturnForm\n method_call = return_pincodes\n elif req_type == \"firstmile_pin\":\n template_name = \"firstmile_pin.html\"\n form = PincodeFirstmileForm\n method_call = firstmile_pincodes\n #intialise the dict to be rendered on html\n return_dict = dict()\n if request.method == \"POST\":\n form = form(request.POST, user=request.user)\n if form.is_valid():\n clean_data = form.cleaned_data\n \"\"\"\n request type specific method called with form cleaned_data\n the response having the edited/added/activated pin-codes updated\n in the return_dict for displaying on success on the html\n \"\"\"\n return_dict.update(method_call(clean_data, request.user.username))\n else:\n form = form(user = request.user)\n #return dict updated with the form and the req_type\n return_dict.update({'form': form, 'tab_type' : req_type})\n return render_to_response(template_name, return_dict,\n context_instance=RequestContext(request))",
"def render(self):\n _ = self.request.getText\n form = self.request.form\n \n if form.has_key('cancel'):\n # User canceled\n return self.page.send_page(self.request)\n\n try:\n if not self.allowed():\n raise ActionError(_('You are not allowed to edit this page.'))\n elif not self.page.exists():\n raise ActionError(_('This page is already deleted or was never created!'))\n \n self.package()\n except ActionError, e:\n return self.page.send_page(self.request, msg=e.args[0])",
"def adpanel():\n if 'user_id' not in session or session['user_id'] != 'admin':\n return redirect(url_for('login'))\n return render_template('adminpanel.html')",
"def customer_group_edit_view(h):\n global html\n html = h\n \n group_id = html.var(\"group_id\"); #get the user id from the url query\n company_name = html.var(\"company_name\");\n\n common_elements = customer_common_elements(group=True)\n \n css_list = common_elements[\"css_list\"]\n\n javascript_list = common_elements[\"javascript_list\"]\n\n all_btn = common_elements[\"all_btn\"]\n\n html.new_header(\"Edit Customer Group\", \"customer_group_management.py\", all_btn, css_list, javascript_list)\n customer_string = \"\"\"\n <div>\n <form action=\"customer_group_put_ajax.py\" method=\"get\" id=\"add_customer_group_form\" \n name=\"add_customer_group_form\" autocomplete=\"on\" >\n <div class=\"form-div\">\n <table class=\"tt-table\" cellspacing=\"0\" cellpadding=\"0\" width=\"100%\">\n <tr>\n <th class=\"cell-title\">Add Customer Group</th>\n </tr>\n </table>\n <div class=\"form-body\">\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"group_name\">Customer Company Name</label>\n <input type=\"text\" id=\"group_name\" name=\"group_name\" />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_telephone\">Company Telephone</label>\n <input type=\"text\" id=\"company_telephone\" name=\"company_telephone\" title=\"Please Enter Company Name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_fax\">Company Fax</label>\n <input type=\"text\" id=\"company_fax\" name=\"company_fax\" title=\"Please Enter Fax information.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_website\">Company Website</label>\n <input type=\"text\" id=\"company_website\" name=\"company_website\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_sales_contact\">Company Sales Contact</label>\n <input type=\"text\" id=\"company_sales_contact\" name=\"company_sales_contact\" />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_purchase_contact\">Company Purchase Contact</label>\n <input type=\"text\" id=\"company_purchase_contact\" name=\"company_purchase_contact\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_business\">Company Business</label>\n <select id='company_business' name='company_business'>\n <option value=\"OEM\">OEM</option>\n <option value=\"VAR\">VAR</option>\n <option value=\"Distributor\">Distributor</option>\n <option value=\"Retailer\">Retailer</option>\n <option value=\"Reseller\">Reseller</option>\n <option value=\"Other\">Other</option>\n </select>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_business_type\">Company Business Type</label>\n <select id='company_business_type' name='company_business_type'>\n <option value=\"Private\">Personal</option>\n <option value=\"Public\">Public</option>\n </select>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_sales_email\">Company Sales Email</label>\n <input type=\"text\" id=\"company_sales_email\" name=\"company_sales_email\" />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_purchase_email\">Company Purchase Email</label>\n <input type=\"text\" id=\"company_purchase_email\" name=\"company_purchase_email\" />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_reg_number\">Company Registration Number</label>\n <input type=\"text\" id=\"company_reg_number\" name=\"company_reg_number\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_vat_number\">VAT Information</label>\n <input type=\"text\" id=\"company_vat_number\" name=\"company_vat_number\" />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_address\">Company Address</label>\n <textarea type=\"text\" id=\"company_address\" name=\"company_address\" /> </textarea>\n </div>\n </div>\n </div>\n <div class=\"form-div-footer\">\n <button type=\"submit\" class=\"yo-small yo-button\"><span class=\"add\">Save</span></button>\n <button type=\"reset\" class=\"yo-small yo-button\" id=\"close_add_user\"><span class=\"cancel\">Cancel</span></button>\n </div>\n </form>\n </div>\n \"\"\" \n customer_string += \"\"\"\n <script>\n put_customer_group_values(\"%s\", \"%s\");\n post_customer_groups(action=\"put\", group_id = \"%s\");\n </script>\n \"\"\" %(group_id, company_name, group_id)\n html.write(customer_string)\n html.new_footer()",
"def admin_panel(request):\n if not request.user.is_superuser: # security to redirect user that aren't admin\n return redirect(reverse('accueil'))\n context = context_general()\n return render(request, 'administration/admin_panel.html', context)",
"def create_wizard(mod):\n wiz = WC.Wizard(u\"Isotropic linear elastic study\", mod)\n WC.add_model_page(wiz, [\n WC.Mode3D,\n WC.PlaneStress,\n WC.PlaneStrain,\n WC.AxisSymmetric,\n ])\n\n exp_store = WC.ExpStore()\n exp_store.register(WC.ExpStore.smesh, SMeshExp())\n exp_store.register(WC.ExpStore.geom, GeomExp())\n WC.add_mesh_page(wiz, mod, exp_store)\n title = u\"Young's modulus and Poisson ratio definitions\"\n WC.add_material_page(wiz, title, [\n WC.YoungModulus(),\n WC.PoissonRatio(),\n ])\n WC.add_boundaries_page(wiz)\n add_pressure_page(wiz)\n WC.add_command_file_page(wiz, FinalPage(mod))\n return wiz",
"def post(self, request, page_pk=None):\n if page_pk is not None:\n try:\n page = request.website.pages.select_related()\\\n .get(pk=page_pk)\n app_page = page.app_page_object\n except Page.DoesNotExist:\n raise ErrorResponse(status.HTTP_400_BAD_REQUEST,\n {'msg': MESSAGES.get('default_error', \"\")})\n else:\n app_page = request.page.app_page_object\n page = request.page\n\n # Page App Admin Form\n PageAppForm = app_page.get_admin_form()\n form = PageAppForm(request.POST, instance=app_page)\n \n if form.is_valid():\n new_app_page = form.save()\n # If page is the current page,\n # refresh the layout section\n if request.page == page:\n # Get layout slug\n placeholder_slug_items = check_placeholder_html_id(\n page.placeholder_slug)\n layout_section_slug = placeholder_slug_items[0]\n # Rendering layout section\n rendering_context = RenderingContext(request)\n html_rendering = rendering_context.get_html_layout(\n layout_section_slug)\n # Send response\n data_context = {'msg': MESSAGES.get('app_edit_success', \"\"),\n 'html': html_rendering,\n 'layout_section_slug': layout_section_slug}\n # Check if the page manager have to be displayed\n if page_pk:\n data_context['refresh_pages_list'] = True\n \n response = Response(status.HTTP_200_OK,\n data_context)\n else:\n data_context = {'msg': MESSAGES.get('app_edit_success', \"\")}\n # Check if the page manager have to be displayed\n if page_pk:\n data_context['refresh_pages_list'] = True\n response = Response(status.HTTP_200_OK,\n data_context)\n return self.render(response)\n # render_page = page.render_page(request)\n\n # if render_page.status_code == 200:\n # response = Response(status.HTTP_200_OK,\n # {\"msg\": MESSAGES.get('app_edit_success', \"\"),\n # 'html': render_page.content,\n # 'medias': render_page.medias})\n # elif render_page.status_code in [301, 302]:\n # response = Response(status.HTTP_202_ACCEPTED,\n # {\"msg\": MESSAGES.get('redirection', \"\"),\n # 'location': render_page['location']})\n\n # If form not valid => reload the edit form with messages\n else:\n data_context = {'form': form,\n 'object': app_page}\n if page_pk:\n data_context['page'] = page\n\n html = render_to_string('administration/app/app-edit.html',\n data_context,\n context_instance=RequestContext(request))\n raise ErrorResponse(status.HTTP_400_BAD_REQUEST,\n {'msg': MESSAGES.get('invalid_data', \"\"),\n 'html': html})",
"def customer_add_view(h):\n global html\n html = h\n \n common_elements = customer_common_elements()\n \n css_list = common_elements[\"css_list\"]\n\n javascript_list = common_elements[\"javascript_list\"]\n\n all_btn = common_elements[\"all_btn\"]\n\n html.new_header(\"Add Customers\", \"customer_management.py\", all_btn, css_list, javascript_list)\n customer_string = \"\"\"\n <div id=\"grid_view_div\">\n <div class=\"yo-tabs\">\n <ul>\n <li>\n <a class=\"active\" href=\"#content_1\" id=\"active_host_tab\">Customer Application Form</a>\n </li>\n <li>\n <a href=\"#content_2\" id=\"disable_host_tab\">Bulk Upload</a>\n </li>\n <li>\n <a href=\"#content_3\" id=\"discovered_host_tab\">CRM Import</a>\n </li>\n </ul>\n <div id=\"content_1\" class=\"tab-content\" style=\"display:block;height:100%;\">\n <form action=\"customer_post_ajax.py\" method=\"get\" id=\"add_customer_form\" name=\"add_customer_form\" autocomplete=\"on\" >\n <div class=\"form-div\" style=\"top:30px;\">\n <div class=\"form-body\">\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"username\">Customer User Name</label>\n <input type=\"text\" id=\"user_name\" name=\"user_name\" \n title=\"Choose Unique User Name. <br/>Must be at least 5 characters.\" />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"password\">Password</label>\n <input type=\"password\" id=\"password\" name=\"password\" title=\"Must be at least 8 characters. \"/>\n </div>\n \"\"\"\n customer_string += \"\"\"\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"groups\">Customer Organisation(Role)</label>\n \"\"\"\n customer_string += (customer_group_customer_widget())\n customer_string += \"\"\"\n </div>\n \"\"\"\n customer_string += \"\"\"\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"first_name\">First Name</label>\n <input type=\"text\" id=\"first_name\" name=\"first_name\" title=\"Please Enter First name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"last_name\">Last Name</label>\n <input type=\"text\" id=\"last_name\" name=\"last_name\" title=\"Please Enter Last name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_name\">Company</label>\n <input type=\"text\" id=\"company_name\" name=\"company_name\" title=\"Please Enter Company Name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"designation\">Designation</label>\n <input type=\"text\" id=\"designation\" name=\"designation\" title=\"Please Enter Designation.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"mobile_no\">Mobile Number</label>\n <input type=\"text\" id=\"mobile_no\" name=\"mobile_no\" \n title=\"Please Enter Mobile Number<br/> Don't include +91 or 0.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"telephone_no\">Telephone Number</label>\n <input type=\"text\" id=\"telephone_no\" name=\"telephone_no\" \n title=\"Please Enter Mobile Number<br/> Don't include +91 or 0.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"fax\">Fax</label>\n <input type=\"text\" id=\"fax\" name=\"fax\" \n title=\"Please Enter Mobile Number<br/> Don't include +91 or 0.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"email_id\">E-Mail ID</label>\n <input type=\"text\" id=\"email_id\" name=\"email_id\" title=\"Please Enter E-Mail ID.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"city_id\">City</label>\n <input type=\"text\" id=\"city_id\" name=\"city_id\" title=\"Please Enter City Name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"state_id\">State</label>\n <input type=\"text\" id=\"state_id\" name=\"state_id\" title=\"Please Enter State.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"country_id\">Country</label>\n <input type=\"text\" id=\"country_id\" name=\"country_id\" title=\"Please Enter Country.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"usage\">Usage</label>\n <select id='usage' name='usage'>\n <option value=0>Personal</option>\n <option value=1>Commercial</option>\n </select>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"purpose\">Purpose</label>\n <input type=\"text\" id=\"purpose\" name=\"purpose\" title=\"Please Enter Purpose.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"address\">Address</label>\n <textarea id=\"address\" name=\"address\" title=\"Please Enter own Address.\"></textarea>\n </div>\n </div>\n </div>\n <div class=\"form-div-footer\">\n <button type=\"submit\" class=\"yo-small yo-button\"><span class=\"add\">Save</span></button>\n <button type=\"reset\" class=\"yo-small yo-button\" id=\"close_add_user\"><span class=\"cancel\">Cancel</span></button>\n </div>\n </form>\n </div>\n <!-- container tab 2 -->\n <div id=\"content_2\" class=\"tab-content\" style=\"display:block;height:100%;\">\n <form action=\"#\" method=\"post\">\n <label for=\"bulk\">Upload Customers</label>\n <input name=\"bulk\" id=\"bulk\" type=\"file\" />\n </form>\n </div>\n <!-- container tab 3 -->\n <div id=\"content_3\" class=\"tab-content\" style=\"display:block;height:100%;\">\n <form class=\"form-body\" id=\"crm_conn\" action=\"#\" method=\"get\">\n <div class=\"form-div\" style=\"top:30px;\">\n <div class=\"form-body\">\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"crm_link\">CRM Link Location</label>\n <input type=\"text\" name=\"crm_link\" id=\"crm_link\" />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"crm_user\">CRM Username</label>\n <input type=\"text\" name=\"crm_user\" id=\"crm_user\" />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"crm_pass\">CRM Password</label>\n <input type=\"password\" name=\"crm_pass\" id=\"crm_pass\" />\n </div>\n </div>\n </div>\n <div class=\"form-div-footer\">\n <button type=\"submit\" class=\"yo-small yo-button\"><span class=\"add\">Test Connection</span></button>\n <button type=\"reset\" class=\"yo-small yo-button\" id=\"close_add_user\"><span class=\"add\">Start Import</span></button>\n </div>\n </form>\n </div>\n </div>\n </div>\n \"\"\" \n customer_string += \"\"\"\n <script>\n post_customers();\n </script>\n \"\"\"\n html.write(customer_string)\n html.new_footer()",
"def edit_template(self):\n return '{}/{}.html'.format(self.object_name, self.edit_endpoint)"
]
| [
"0.6511815",
"0.62808156",
"0.5995787",
"0.5682653",
"0.5631195",
"0.55523235",
"0.55235845",
"0.5493029",
"0.5487577",
"0.5467368",
"0.5455909",
"0.5455815",
"0.54286665",
"0.5413771",
"0.5399437",
"0.5357053",
"0.5355894",
"0.5342035",
"0.5334771",
"0.53329116",
"0.5325167",
"0.53232145",
"0.52174866",
"0.5177624",
"0.51659846",
"0.5150086",
"0.51398456",
"0.51344025",
"0.5117835",
"0.5113058"
]
| 0.69339293 | 0 |
Method to add panel to db. Uses panel name and project ID to create a panel in the db and returns the unique ID for the panel | def add_panel():
panel_name = request.json['panel_name']
project_id = request.json['project_id']
panel_id = create_panel_query(s, project_id, panel_name, current_user.id)
return jsonify(panel_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createPanel(self, LibraryID, Name, **kwargs):\n if self.request(\"createPanel\", LibraryID=LibraryID, Name=Name, **kwargs) is None:\n return None\n return self.json_response[\"Result\"][\"PanelID\"]",
"def add_panel(self, panel):\n assert panel.PANEL_ID not in self.panels\n assert not self.tools, \"tools must be added after panels\"\n self.panels[panel.PANEL_ID] = panel\n panel.register_panel(self)",
"def create_panel_process():\n form = CreatePanelProcess()\n if request.method == \"POST\":\n make_live = request.form['make_live']\n panel_id = request.args.get('id')\n project_id = get_project_id_by_panel_id(s, panel_id)\n preftx_id = get_preftx_id_by_project_id(s, project_id)\n version = get_current_preftx_version(s, preftx_id)\n if not version:\n version = 0\n if make_live == \"on\":\n make_preftx_live(s, preftx_id, version + 1, current_user.id)\n make_panel_live(s, panel_id, 1, current_user.id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_panel') + \"?id=\" + panel_id)\n elif request.method == \"GET\":\n form.project.choices = get_project_choices(s, current_user.id)\n return render_template('panel_createprocess.html', form=form, panel_id=\"main\",\n url=url_for('panels.create_panel_process'))",
"def add_vp():\n vp_name = request.json['vp_name']\n panel_id = request.json['panel_id']\n vp_id = create_virtualpanel_query(s, vp_name, panel_id)\n if vp_id != -1:\n lock_panel(s, current_user.id, panel_id)\n return jsonify(vp_id)",
"def add_to_starlims(vpanelid):\n details = get_vpanel_details_by_id(s, vpanelid)\n print(details)\n version = round(details.current_version,1)\n panel_name = 'Analysis: ' + details.name + ' v' + str(version) + ' (' + details.panel_name + ')'\n print(len(panel_name))\n if len(panel_name) > 50:\n #todo do something with the name here!\n pass\n gene_result = get_genes_by_vpanelid(s, vpanelid, version)\n gene_list = list()\n for g in gene_result:\n gene_list.append(g.name)\n starlims = StarLims.StarLimsApi(test=True)\n testcode = starlims.add_new_test(panel_name, 'NGS Analysis', details.project_name, gene_list)\n if testcode > 0:\n add_testcode(s, vpanelid, details.current_version, testcode)\n\n return testcode",
"def add_plant(db_path: str, plant: Plant) -> None:\n query = f'INSERT INTO plants (name, family_name, metadata) VALUES (\"{str(plant.name)}\", \"{str(plant.family_name)}\", \"{str(plant.metadata)}\")'\n\n conn: Connection = sqlite3.connect(path.join(db_path, 'company_data.db'))\n curr: Cursor = conn.cursor()\n try:\n curr.execute(query)\n except sqlite3.IntegrityError:\n raise ValueError(\"Error, plant already exists in database.\")\n\n conn.commit()\n curr.close()\n conn.close()",
"def new_project(self, project_name: str) -> str:\n if project_name in [NO_PROJECT_NAME, \"\"]:\n raise MephistoDBException(f'Invalid project name \"{project_name}')\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n try:\n c.execute(\n \"INSERT INTO projects(project_name) VALUES (?);\", (project_name,)\n )\n project_id = str(c.lastrowid)\n return project_id\n except sqlite3.IntegrityError as e:\n if is_key_failure(e):\n raise EntryDoesNotExistException()\n elif is_unique_failure(e):\n raise EntryAlreadyExistsException(\n f\"Project {project_name} already exists\"\n )\n raise MephistoDBException(e)",
"def add_project(self, project):\n c = self.conn.cursor()\n cursor = c.execute(\"INSERT INTO projects VALUES (null, ?, ?, ?, ?)\", (project['owner'],\n project['title'],\n datetime.now(), datetime.now(),))\n\n self.conn.commit()\n project_id = cursor.lastrowid\n\n self.conn.cursor().execute(\"INSERT INTO users_projects VALUES (?,?)\", (project['owner'], project_id),)\n self.conn.commit()\n return self.get_project(project_id)",
"def addPlayer(self, steamid, name):\r\n self.execute(\"INSERT INTO Player (steamid, popup, credits, name, lastconnected) VALUES (?,?,?,?,?)\", steamid, int(popupStatus), int(startCredits), name, int(time.time()))\r\n return self.cursor.lastrowid",
"def add(self, name):\n self.rpc.call(MsfRpcMethod.DbAddWorkspace, [name])",
"def add_project(project):\n print('add_project: ' + str(project))\n try_insert_or_update(models.projects.insert(), # pylint: disable=no-value-for-parameter\n [dict(\n name=project['name'], path=project['name'], active=True, user_id=current_user.id)])\n return",
"def add_task():\n found = False\n project_id = None\n task = request.form['task']\n project = request.form['project']\n \n if not task:\n return redirect('/')\n\n if not project:\n project = 'Tasks'\n\n projects = Projects.query.all()\n\n for proj in projects:\n if proj.project_name == project:\n found = True\n\n # add the project if not in database already\n if not found:\n add_project = Projects(project, True)\n db.session.add(add_project)\n db.session.commit()\n projects = Projects.query.all()\n\n # set the active tab\n for proj in projects:\n if proj.project_name == project:\n project_id = proj.project_id\n proj.active = True\n else:\n proj.active = False\n\n status = bool(int(request.form['status']))\n\n # add the new task\n new_task = Tasks(project_id, task, status)\n db.session.add(new_task)\n db.session.commit()\n return redirect('/')",
"def panel(self):\n data_to_track = {}\n for possession in self.possessions_to_track_panel:\n data_to_track[possession] = self._haves[possession]\n\n for variable in self.variables_to_track_panel:\n try:\n data_to_track[variable] = self.__dict__[variable]\n except KeyError:\n pass\n self.database_connection.put([\"panel\",\n data_to_track,\n str(self.id),\n self.group,\n str(self.round)])",
"def add_project(self, project):\n project_id = storage_utils.get_next_id(self._storage_location)\n self.save_project(project_id, project)\n return project_id",
"def register(self, panel):\n new_instance = panel()\n new_event_type = new_instance._meta.event_type\n if new_event_type in self.__class__._panels:\n raise Exception(\"Two panels with the same event type: %s\" % \\\n new_event_type)\n self.__class__._panels[new_event_type] = new_instance\n self.storage.register_event(new_event_type, new_instance._meta.dimensions.keys())",
"def add_department():\n form = AddDepartment()\n if request.method == 'POST':\n if form.validate_on_submit():\n new_department = Department(name=form.name.data)\n db.session.add(new_department)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n flash('Department already exists!', 'warning')\n return redirect(url_for('add_department'))\n\n flash(f'Department {form.name.data} created!', 'success')\n return redirect(url_for('home'))\n\n flash('Name not defined.', 'warning')\n return render_template('department/department_add.html', form=form)",
"def add_piece(project_id):\n \n form=PieceForm()\n if 'username' in session:\n project = mongo.db.projects.find_one_or_404(\n {'_id': ObjectId(project_id)})\n \n \n if request.method == 'POST':\n user = mongo.db.user.find_one({'username': session['username']})\n username = user['username']\n \n assignee = request.form.get('username')\n task = request.form.get('task')\n status = request.form.get('status')\n description = request.form.get('description')\n comment = request.form.get('comment')\n \n projects = mongo.db.projects\n project = projects.find_one_and_update({'_id': ObjectId(project_id) },\n {'$push':\n {'pieces':\n {'date': datetime.utcnow(),\n 'username': assignee,\n 'status': status,\n 'task': task,\n 'submitted': False\n }\n }\n })\n \n pieces = mongo.db.project_pieces\n pieces.insert_one({'user_id': user['_id'],\n 'project_id': project['_id'],\n 'project_title': project['title'],\n 'owner': username,\n 'task': task,\n 'description': description,\n 'status': status,\n 'date': datetime.utcnow(),\n 'due_date': datetime.strptime(form.due_date.data, \"%d/%m/%Y\"),\n 'submit_date': datetime.utcnow(),\n 'assignee': assignee,\n 'comment': comment\n \n }) \n \n flash(f'{username}, your project has been updated and the piece, \"{task}\" has been sent to {assignee}.', 'success'\n )\n return redirect(url_for('dashboard'))\n \n return render_template('pages/addpiece.html', form=form, project=project) \n \n flash('You need to be logged in to post any content.', 'info')\n return redirect(url_for('login'))",
"def add_project():\n if request.method == \"POST\":\n result = add_project_to_db(\n request.form[\"title\"],\n request.form[\"link\"],\n request.form[\"description\"]\n )\n flash(result)\n return redirect(url_for(\"portfolio\"))\n else:\n return render_template(\"add_project.html\")",
"def add_project():\n \n if 'username' in session: \n form=ProjectForm()\n \n if request.method == 'POST':\n if form.validate_on_submit():\n user = mongo.db.user.find_one({'username': session['username']})\n mongo.db.projects.insert_one({'username': user['username'],\n 'date': datetime.utcnow(),\n 'title': form.title.data,\n 'deadline': datetime.strptime(form.deadline.data, \"%d/%m/%Y\"),\n 'brief': form.brief.data,\n 'status': \"open\",\n 'note': form.note.data,\n 'user_id': user['_id']\n })\n \n flash('Your project has been created.', 'success')\n return redirect(url_for('projects'))\n \n return render_template('pages/addproject.html', title='New Project', form=form, legend=\"Add a project\")\n \n flash('You need to be logged in to post any content.', 'info')\n return redirect(url_for('login'))",
"def load_panel(panel_path, adapter, **kwargs):\n panel_lines = get_file_handle(panel_path)\n version = kwargs.get(\"version\")\n\n try:\n # This will parse panel metadata if includeed in panel file\n panel_info = get_panel_info(\n panel_lines=panel_lines,\n panel_id=kwargs.get(\"panel_id\"),\n institute=kwargs.get(\"institute\"),\n version=version,\n date=kwargs.get(\"date\"),\n maintatiner=kwargs.get(\"maintainer\"),\n display_name=kwargs.get(\"display_name\"),\n )\n except Exception as err:\n raise err\n\n if panel_info.get(\"version\"):\n version = float(panel_info[\"version\"])\n\n panel_id = panel_info[\"panel_id\"]\n display_name = panel_info[\"display_name\"] or panel_id\n institute = panel_info[\"institute\"]\n date = panel_info[\"date\"]\n\n if not institute:\n raise SyntaxError(\"A Panel has to belong to a institute\")\n\n # Check if institute exists in database\n if not adapter.institute(institute):\n raise SyntaxError(\"Institute {0} does not exist in database\".format(institute))\n\n if not panel_id:\n raise SyntaxError(\"A Panel has to have a panel id\")\n\n if version:\n existing_panel = adapter.gene_panel(panel_id, version)\n else:\n # Assuming version 1.0\n existing_panel = adapter.gene_panel(panel_id)\n version = 1.0\n LOG.info(\"Set version to %s\", version)\n\n if existing_panel:\n LOG.info(\"found existing panel\")\n if version == existing_panel[\"version\"]:\n LOG.warning(\"Panel with same version exists in database\")\n LOG.info(\"Reload with updated version\")\n raise SyntaxError()\n display_name = display_name or existing_panel[\"display_name\"]\n institute = institute or existing_panel[\"institute\"]\n\n # Check if maintainers exist in the user database\n maintainer = kwargs.get(\"maintainer\")\n if maintainer is not None:\n if adapter.user(user_id=maintainer) is None:\n LOG.warning(\"Maintainer %s does not exist in user database\", maintainer)\n raise Abort()\n\n try:\n parsed_panel = parse_gene_panel(\n path=panel_path,\n institute=institute,\n panel_type=kwargs.get(\"panel_type\"),\n date=date,\n version=version,\n panel_id=panel_id,\n maintainer=maintainer,\n display_name=display_name,\n )\n adapter.load_panel(parsed_panel=parsed_panel)\n except Exception as err:\n raise err",
"def add_user(self, username, password, name, department):\n db = sqlite3.connect(self.name)\n cur = db.cursor()\n cur.execute('SELECT MAX(ID) FROM users')\n maxid = cur.fetchone()[0]\n usid = maxid + 1 if maxid is not None else 0\n date = time.strftime('%Y.%m.%d')\n cur.execute(\n 'INSERT INTO users VALUES (?, ?, ?, ?, ?, ?, ?)',\n (usid, username, password, \"user\", name, department, 28)\n )\n db.commit()\n db.close()",
"def view_panel():\n id = request.args.get('id')\n try:\n version = request.form[\"versions\"]\n except KeyError:\n version = None\n if id:\n status = check_panel_status(s, id)\n if not status:\n message = \"This panel has changes which cannot be viewed here as they have not been made live yet, if you have permission you can view these by editing the panel\"\n else:\n message = None\n panel_details = get_panel_details_by_id(s, id)\n if not version:\n version = panel_details.current_version\n panel_name = panel_details.name\n panel = get_regions_by_panelid(s, id, version)\n project_id = get_project_id_by_panel_id(s, id)\n result = []\n rows = list(panel)\n if len(rows) != 0:\n bed = ''\n for i in rows:\n row = dict(zip(i.keys(), i))\n result.append(row)\n # panel_name = i.panel_name\n current_version = i.current_version\n else:\n message = \"This Panel has no regions yet & may also have changes that have not been made live\"\n bed = 'disabled'\n current_version = version\n\n if check_user_has_permission(s, current_user.id, project_id):\n edit = ''\n else:\n edit = 'disabled'\n\n form = ViewPanel()\n v_list = range(1, current_version + 1)\n choices = []\n for i in v_list:\n choices.append((i, i))\n form.versions.choices = choices\n form.versions.default = version\n form.process()\n\n table = []\n\n for i in result:\n line = []\n line.append(i['chrom'])\n line.append(str(i['region_start']))\n line.append(str(i['region_end']))\n line.append(i['gene_name'])\n line.append(i['name'].replace(',', ' '))\n table.append(line)\n return render_template('panel_view.html', scope='Panel', table=json.dumps(table), panel=table,\n panel_name=panel_name, edit=edit, bed=bed,\n version=version, panel_id=id, project_id=project_id, message=message,\n url=url_for('panels.view_panel'),\n form=form)\n\n else:\n return redirect(url_for('panels.view_panels'))",
"def createNewDashboard(userId, name):\n if Dashboard.objects(analystId=userId,name=name):\n return\n newDash = Dashboard()\n newDash.name = name\n newDash.analystId = userId\n newDash.save()\n return newDash",
"def view_panels(id=None):\n if not id:\n id = request.args.get('id')\n\n if id:\n panels = get_panels_by_project_id(s, id)\n else:\n panels = get_panels(s)\n result = []\n project_name = \"All\"\n for i in panels:\n row = dict(zip(i.keys(), i))\n status = check_panel_status(s, row[\"panelid\"])\n row[\"status\"] = status\n permission = check_user_has_permission(s, current_user.id, row[\"projectid\"])\n locked = check_if_locked(s, row[\"panelid\"])\n row['permission'] = permission\n row['locked'] = locked\n\n if id:\n project_name = row['projectname']\n # if check_user_has_permission(s, current_user.id, row[\"projectid\"]):\n # result.append(row)\n result.append(row)\n table = ItemTablePanels(result, classes=['table', 'table-striped'])\n return render_template('panels.html', panels=table, project_name=project_name)",
"def add_Plante(id):\n f = PlanteForm()\n return render_template(\n \"create-plante.html\",\n form = f,\n title = \"Nouvelle Plante\",\n param = \"create\",\n parterre = id)",
"def create_panel(self):\n return\n # return Panel(self)",
"def add_department():\r\n check_admin()\r\n\r\n add_department = True\r\n\r\n form = DepartmentForm()\r\n if form.validate_on_submit():\r\n department = Department(name=form.name.data,\r\n description=form.description.data)\r\n try:\r\n # add department to the database\r\n db.session.add(department)\r\n db.session.commit()\r\n flash('You have successfully added a new department.')\r\n except:\r\n # in case department name already exists\r\n flash('Error: department name already exists.',category='error')\r\n\r\n # redirect to departments page\r\n return redirect(url_for('admin.list_departments'))\r\n\r\n # load department template\r\n return render_template('admin/departments/department.html', action=\"Add\",\r\n add_department=add_department, form=form,\r\n title=\"Add Department\")",
"def add_new_project(title, description, max_grade):\n QUERY = \"\"\"INSERT into Projects (title, description, max_grade) VALUES(?,?,?)\"\"\"\n db_cursor.execute(QUERY, (title, description, max_grade))\n db_connection.commit()\n print \"Success! Add %s project, and here is the description: %s, and max grade: %s\"\\\n %(title, description, max_grade)",
"def on_add(self, project, name, **kwargs):\n pass",
"def add_department():\n\tcheck_admin()\n\n\tadd_department = True\n\n\tform = DepartmentForm()\n\tif form.validate_on_submit():\n\t\tdepartment = Department(name=form.name.data,description=form.description.data)\n\n\t\ttry:\n\t\t\t#add department to the database\n\t\t\tdb.session.add(department)\n\t\t\tdb.session.commit()\n\t\t\tflash(\"You have successsfully added a new department.\")\n\t\texcept:\n\t\t\t#incase the department already exists\n\t\t\tflash(\"Error: department already exists.\")\n\t#once the admin creates a new department,they will be redirected to the departments page\n\treturn render_template('admin/departments/department.html',action=\"Add\", add_department= add_department,form=form,title = \"Add Department\")"
]
| [
"0.71067053",
"0.6671714",
"0.6179229",
"0.6139341",
"0.59658146",
"0.57764566",
"0.56813574",
"0.5659387",
"0.55523556",
"0.5535822",
"0.5534156",
"0.55321145",
"0.54948974",
"0.5462264",
"0.5452474",
"0.5366363",
"0.53566235",
"0.5349205",
"0.5336535",
"0.53030396",
"0.5300113",
"0.5285679",
"0.52780503",
"0.5271611",
"0.5270988",
"0.5244799",
"0.52429336",
"0.52299047",
"0.5204782",
"0.51947874"
]
| 0.81717074 | 0 |
Method to delete panel from db If the panel has not been made live it can be removed from the db. | def remove_panel():
panel_name = request.json['panel_name']
remove_panel_query(s, panel_name)
return jsonify('complete') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def onDelToolClicked(self, event):\n i_selected = self.db_listBox.GetSelection()\n if i_selected >= 0:\n manager = self.getManager()\n if manager:\n try:\n data = manager.getData()\n db_record = data['records'][i_selected]\n manager.deleteDB(parent=self, db_guid=db_record['guid'])\n\n self.refreshDBList()\n except:\n log_func.fatal(u'Error delete DB engine')\n event.Skip()",
"def remove():\n\n db_remove()",
"def _on_delete_plot_panel(self, event):\n # name = event.name\n caption = event.caption\n if self.cb_plotpanel is not None:\n pos = self.cb_plotpanel.FindString(str(caption))\n if pos != wx.NOT_FOUND:\n self.cb_plotpanel.Delete(pos)\n self.enable_append()",
"def delete(self):\n\n\n try:\n db = getDatabase()\n connection = db.connect()\n\n connection.delete(self)\n except Exception as e:\n raise e\n finally:\n db.dispose()",
"def deletePlayers():\n DB = dbc()\n DB.cursor().execute('DELETE FROM players')\n DB.commit()\n DB.close()",
"def deletePlayers():\n db_conn = connect()\n db_cursor = db_conn.cursor()\n db_cursor.execute(\"delete from players;\")\n db_conn.commit()\n db_conn.close()",
"def deletePlayers():\n #deletes the contents of table players\n DB().execute(\"DELETE FROM players\", True)",
"def delete_plante(id):\n plante = get_plante(id)\n nom=plante.get_name()\n db.session.delete(plante)\n get_parterre(plante.get_parterre()).delete_plante(plante)\n p = Actions(\n contenu = \"Suppression de la plante \"+nom + \" au parterre \"+ get_parterre(plante.get_parterre()).get_name(),\n liste = 1\n )\n db.session.add(p)\n db.session.commit()\n return redirect(url_for(\"parterre\"))",
"def delete_from_db(self):\n db.session.delete(self)\n db.session.commit()",
"def delete_from_db(self):\n db.session.delete(self)\n db.session.commit()",
"def deletePlayers():\n db = connect()\n db_cursor = db.cursor()\n query = \"DELETE FROM players\"\n db_cursor.execute(query)\n db.commit()\n db.close()",
"def delete_from_db(self):\n self.db.newsdb.delete_one({'_id': ObjectId(self.id)})",
"def deletePlayers():\n dbconnection = connect()\n dbcursor = dbconnection.cursor()\n dbcursor.execute(\"DELETE FROM players\")\n dbconnection.commit()\n dbconnection.close()",
"def deletePlayers():\n dbConn = connect()\n c = dbConn.cursor()\n c.execute(\"DELETE FROM player\")\n dbConn.commit()\n dbConn.close()",
"def delete_players():\n DB = connect()\n c = DB.cursor()\n c.execute(\"DELETE FROM players\")\n DB.commit()\n DB.close()",
"def delete(self):\r\n s = self.get_session()\r\n s.delete(self)\r\n s.commit()",
"def deletePlayers():\n db = connect()\n c = db.cursor()\n query = (\"DELETE FROM players;\")\n c.execute(query)\n db.commit()\n db.close()",
"def delete(self, request, p_name):\n project = Project.objects.get(name=p_name)\n connectors = project.connector_set.all()\n connectors.delete()\n if os.path.isfile(project.project_location):\n os.remove(project.project_location)\n project.delete()\n return HttpResponse(HTTPStatus.OK)",
"def deletePlayers():\n with _connect_db() as (conn, cur):\n cur.execute(\"\"\"DELETE FROM players;\"\"\")\n conn.commit()",
"def deleteModFrame(self,name):\n del self.data.activeMod[name]",
"def deletePlayers():\n db, cursor = connect()\n cursor.execute(\"DELETE FROM players\") \n db.commit() \n db.close()",
"def remove_data(self):\n db.session.delete(self)\n db.session.commit( )",
"def deletePlayers():\n DB = connect()\n c = DB.cursor()\n c.execute(\"DELETE FROM tournament\")\n DB.commit()\n DB.close()",
"def deletePlayers():\n conn, cur = connect()\n cur.execute(\"DELETE FROM PLAYERS;\")\n conn.commit()\n conn.close()",
"def __del__(self):\n print(f\"{self.fullname()} deleted from database.\")",
"def deletePlayers():\n conn = connect()\n cursor = conn.cursor()\n cursor.execute(\"DELETE FROM players\")\n conn.commit()\n conn.close()",
"def delete(self, name):\n instance = self.get_one_instance('name', name)\n\n if type(instance) != self.Component:\n set_session_var('errors', str(instance))\n return None\n\n res = delete_in_db(instance)\n\n if res != 'deleted':\n set_session_var('errors', str(res))\n else:\n set_session_var('success', res)\n\n return True",
"def delete(self):\n\n cursor = self._conn.cursor()\n cursor.execute(\"DELETE FROM saves\")\n self._conn.commit()",
"def delete(self)->None:\n database.cursor.execute(\n \"DELETE FROM {} WHERE id={}\".format(self.table_name, self.id))\n database.connection.commit()",
"def delete(self)->None:\n database.cursor.execute(\n \"DELETE FROM {} WHERE id = %s\".format(self.table_name), (self.id))\n database.connection.commit()"
]
| [
"0.6200058",
"0.60520643",
"0.60312414",
"0.5943299",
"0.5910845",
"0.5905584",
"0.58856493",
"0.5877893",
"0.58686376",
"0.58686376",
"0.5861024",
"0.5856986",
"0.5811765",
"0.5786195",
"0.57745904",
"0.57610625",
"0.5759628",
"0.57585543",
"0.57567686",
"0.5725289",
"0.5689384",
"0.56802005",
"0.567237",
"0.5667507",
"0.56622136",
"0.56519043",
"0.56479144",
"0.5637685",
"0.563235",
"0.56286603"
]
| 0.7019545 | 0 |
Method to allow a file of gene names to be uploaded for a panel. The file is read in the javascript at the client side and a list of gene names are sent to the method within the ajax. The create_panel_get_tx() method is applied to each gene in the list and teh html is combined before being returned to the client side for display. | def upload_multiple():
gene_list = request.json['gene_list']
project_id = request.json['project_id']
all_message = ''
html = ''
added_list = []
button_list = ''
for gene in sorted(gene_list):
if gene == "" or gene in added_list:
continue
dct = create_panel_get_tx(gene, project_id)
if dct["message"] == "added":
added_list.append(gene)
else:
all_message += dct["message"]
try:
html += dct["html"]
except KeyError:
pass
try:
button_list += dct["button_list"]
except KeyError:
pass
if len(added_list) > 0:
added_message = render_template("added_list.html", added_list=enumerate(added_list), length=len(added_list))
all_message += added_message
return jsonify({'message': all_message, 'html': html, 'button_list': button_list}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def uploader():\n if request.method == 'POST':\n # from the upload page, the clients input is called from the text-area of the form. \n clientinput_unic = request.form['list_genes']\n #print \"raw input client:\\n\", clientinput_unic\n clientinput_unic = re.sub('[^a-zA-Z0-9 \\n\\r]', '', clientinput_unic) # all symbols are removed from unicode\n clientinput_genelist = clientinput_unic.replace('\\r','').split('\\n') # a list is created\n clientinput_genelist = [x.encode('UTF8') for x in clientinput_genelist] # removing the Unicode from the list\n # Input send to tool of the latest version. Current = version 1.1\n HPOdatabase, output_path = config_yaml()\n phen_df, genescores_df, numbers, outfile_phen, outfile_genes, outfile_phenpergenes, accepted_df, dropped_df, Q, missing, dupli = tool11(clientinput_genelist, HPOdatabase, output_path)\n print \"missing:\", missing, len(missing), \"dupli:\", len(dupli), \"dropped:\", len(dropped_df), \"accepted:\", len(accepted_df)\n # information about the variables are also send to the html pages, as the result page has different sections shown dependent on the clients input.\n number_dropped = len(dropped_df)\n number_accepted = len(accepted_df)\n # if dataframes are not empty, an html table is made from them.\n try:\n \taccepted = accepted_df.to_html()\n except:\n \taccepted = accepted_df\n try:\n \tdropped = dropped_df.to_html()\n except:\n \tdropped = dropped_df\n\n return render_template('result.html', genes_in_HPO=countgenes(),output_phen=outfile_phen,output_genes=outfile_genes, output_phenpergenes=outfile_phenpergenes, accepted=accepted, dropped=dropped, Q=Q, missing=missing, dupli=dupli, number_accepted=number_accepted, number_dropped=number_dropped)",
"def create_panel_get_tx(gene_name=None, project_id=None):\n json = False\n if not gene_name:\n gene_name = request.json['gene_name']\n project_id = request.json['project_id']\n json = True\n exists = isgene(s, gene_name)\n if exists:\n if exists != gene_name:\n message = render_template(\"wrong_gene_name_message.html\", gene_name=gene_name, match_name=exists)\n if json:\n return jsonify({'html': '', 'button_list': '', 'message': message})\n else:\n return {'html': '', 'button_list': '', 'message': message} #alternate gene name is not automatically added\n else:\n message = render_template(\"success_message.html\", gene_name=gene_name)\n gene_name = exists\n gene_id = get_gene_id_from_name(s, gene_name)\n preftx_id = get_preftx_by_gene_id(s, project_id, gene_id)\n upcoming_preftx = get_upcoming_preftx_by_gene_id(s, project_id, gene_id)\n all_tx = get_tx_by_gene_id(s, gene_id)\n\n gene_button = render_template(\"gene_button.html\", gene_name=gene_name, gene_id=gene_id, added=False)\n html = render_template(\"tx_list.html\", gene_name=gene_name, all_tx=all_tx, preftx=preftx_id,\n upcoming=upcoming_preftx, disabled=False)\n\n if json:\n return jsonify({'html': html, 'button_list': gene_button, 'message': message})\n else:\n return {'html': html, 'button_list': gene_button, 'message': \"added\"}\n else:\n fail_message = render_template(\"fail_message.html\", gene_name=gene_name)\n if json:\n return jsonify({'message': fail_message})\n else:\n return {'message': fail_message}",
"def upload_file():\n global gui\n print(request.data)\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n print(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n gui.controller.main('openFile %s' % os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return redirect('/data')\n return render_template('upload_file.html')",
"def html(self, filebase):\n fieldfile = filebase + '.json'\n fp = open(fieldfile, \"w\")\n fp.write(self.json())\n fp.close()\n\n # _htmlPath = os.path.join(\n # os.path.dirname(__file__),\n # \"..\",\n # \"..\",\n # \"etc\",\n # \"robotGrid.html\"\n # )\n\n # fp = open(os.path.join(os.getenv('KAIJU_DIR'), 'etc',\n # 'robotGrid.html'), \"r\")\n\n fp = open(os.path.join(KAIJU_ETC_DIR, \"robotGrid.html\"), \"r\")\n\n html_str = ''\n for l in fp.readlines():\n l = l.replace(\"fieldfile\", \"'\" + os.path.basename(fieldfile) + \"'\")\n html_str = html_str + l\n fp.close()\n\n fp = open(filebase + '.html', \"w\")\n fp.write(html_str)\n fp.close()\n\n fp = open(os.path.join(KAIJU_ETC_DIR, 'robotGrid.js'), \"r\")\n js_str = ''\n for l in fp.readlines():\n js_str = js_str + l\n fp.close()\n\n fp = open(os.path.join(os.path.dirname(filebase), 'robotGrid.js'), \"w\")\n fp.write(js_str)\n fp.close()\n return",
"def form_valid(self, form):\n file_in_memory = form.cleaned_data \n xml_text = forms.handle_upload(file_in_memory)\n data = parse_txt(xml_text)\n return render(self.request, 'esfviewer/output.html', {'data': data})",
"def send_file_name():\n if value.get() == \"----------------------\":\n messagebox.showinfo(\"Choose File\", \"Please choose a file to edit.\", parent=app_frame)\n return\n elif len(entries) != 0:\n messagebox.showinfo(\"Warning!\", \"You must first close the current file!\", parent=app_frame)\n return\n\n events = get_file(value.get())\n # Call display_lr_assignments() and send events file to be displayed in the application window\n display_lr_assignments(events)",
"def render_analysis(page,analys,fileName,listF,listOrder) -> 'html':\n return render_template(page,\n title='Analysis',\n n=analys + ' '+ fileName,\n fileList=listF,\n prev = listOrder)",
"def create(self):\n fileserver = servers.get_file_server()\n self.AddLabelTool(\r\n filecmds.ID_New, \r\n \"new\", \r\n wx.Bitmap(\n fileserver.manglepath(\"outgui:images/tools/new.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_NORMAL,\n \"New File\", \r\n \"\"\r\n )\n self.AddLabelTool(\r\n filecmds.ID_Open, \r\n \"open\", \r\n wx.Bitmap(\n fileserver.manglepath(\"outgui:images/tools/open.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_NORMAL,\n \"Open File\", \r\n \"\"\r\n )\n self.AddLabelTool(\r\n filecmds.ID_Save, \r\n \"save\", \r\n wx.Bitmap(\n fileserver.manglepath(\"outgui:images/tools/save.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_NORMAL,\n \"Save File\", \r\n \"\"\r\n )\n self.AddLabelTool(\r\n levelcmds.ID_Save, \r\n \"save level\", \r\n wx.Bitmap(\n fileserver.manglepath(\"outgui:images/tools/save_level.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_NORMAL,\n \"Save Level\", \r\n \"\"\r\n )\n self.AddLabelTool(\r\n filecmds.ID_SaveAll, \r\n \"saveall\", \r\n wx.Bitmap(\n fileserver.manglepath(\"outgui:images/tools/saveall.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_NORMAL,\n \"Save All Files\", \r\n \"\"\r\n )\n self.AddSeparator()\n self.AddLabelTool(\r\n editcmds.ID_Undo, \r\n \"undo\", \r\n wx.Bitmap(\n fileserver.manglepath(\"outgui:images/tools/undo.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_NORMAL, \r\n \"Undo\", \r\n \"\"\r\n )\n self.AddLabelTool(\r\n editcmds.ID_Redo, \r\n \"redo\", \r\n wx.Bitmap(\n fileserver.manglepath(\"outgui:images/tools/redo.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ),\r\n wx.NullBitmap, \r\n wx.ITEM_NORMAL, \r\n \"Redo\", \r\n \"\"\r\n )\n self.AddSeparator()\n self.AddLabelTool(\r\n toolscmds.ID_NullTool, \r\n \"null tool\", \r\n wx.Bitmap(\n fileserver.manglepath(\"outgui:images/tools/null.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_RADIO,\n \"Do Nothing Tool\", \r\n \"\"\r\n )\n self.AddLabelTool(\r\n toolscmds.ID_SelectObjTool, \r\n \"select object\",\n wx.Bitmap(\r\n fileserver.manglepath(\"outgui:images/tools/select.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_RADIO,\n \"Select Object Tool\", \r\n \"\"\r\n )\r\n self.AddLabelTool(\r\n toolscmds.ID_TranslateObjTool, \r\n \"translate object\",\n wx.Bitmap(\r\n fileserver.manglepath(\"outgui:images/tools/translate.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_RADIO,\n \"Translate Object Tool\", \r\n \"\"\r\n )\n self.AddLabelTool(\r\n toolscmds.ID_RotateObjTool, \r\n \"rotate object\",\n wx.Bitmap(\r\n fileserver.manglepath(\"outgui:images/tools/rotate.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_RADIO,\n \"Rotate Object Tool\", \r\n \"\"\r\n )\n self.AddLabelTool(\r\n toolscmds.ID_ScaleObjTool, \r\n \"scale object\",\n wx.Bitmap(\r\n fileserver.manglepath(\"outgui:images/tools/scale.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_RADIO,\n \"Scale Object Tool\", \r\n \"\"\r\n )\n self.AddLabelTool(\r\n toolscmds.ID_PlaceObjTool, \r\n \"place object\",\n wx.Bitmap(\r\n fileserver.manglepath(\"outgui:images/tools/place.bmp\"),\n wx.BITMAP_TYPE_ANY), \r\n wx.NullBitmap, \r\n wx.ITEM_RADIO,\n \"Place Object Tool\", \r\n \"\"\r\n )\n self.AddLabelTool(\r\n toolscmds.ID_MultiplaceObjTool, \r\n \"multiplace object\",\n wx.Bitmap(\r\n fileserver.manglepath(\"outgui:images/tools/multiplace.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_RADIO,\n \"Multiplace Object Tool\", \r\n \"\"\r\n )\n self.AddLabelTool(\r\n toolscmds.ID_TrnEditTool, \r\n \"terrain edit\", \r\n wx.Bitmap(\n fileserver.manglepath(\"outgui:images/tools/terrain_edit.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_RADIO,\n \"Terrain Edit Tool\", \r\n \"\"\r\n )\n self.AddLabelTool(\r\n toolscmds.ID_WaypointsTool, \r\n \"create waypoints\", \r\n wx.Bitmap(\n fileserver.manglepath(\"outgui:images/tools/waypoints.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_RADIO,\n \"Waypoints Creation Tool\", \r\n \"\"\r\n )\n self.AddLabelTool(\r\n toolscmds.ID_CircleTriggerTool, \r\n \"circle triggers\", \r\n wx.Bitmap(\n fileserver.manglepath(\"outgui:images/tools/circle_trigger.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_RADIO,\n \"Circle Trigger Creation Tool\", \r\n \"\"\r\n )\n self.AddLabelTool(\r\n toolscmds.ID_PolygonTriggerTool, \r\n \"polygon triggers\", \r\n wx.Bitmap(\n fileserver.manglepath(\r\n \"outgui:images/tools/polygon_trigger.bmp\"\r\n ),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_RADIO,\n \"Polygon Trigger Creation Tool\", \r\n \"\"\r\n )\r\n self.AddLabelTool(\r\n toolscmds.ID_CircleSoundSourceTool, \r\n \"circle sound sources\", \r\n wx.Bitmap(\r\n fileserver.manglepath(\r\n \"outgui:images/tools/circle_sound_source.bmp\"\r\n ),\r\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_RADIO,\r\n \"Circle Sound Source Creation Tool\",\r\n \"\"\r\n )\r\n self.AddLabelTool(\r\n toolscmds.ID_PolygonSoundSourceTool, \r\n \"polygon sound sources\", \r\n wx.Bitmap(\r\n fileserver.manglepath(\r\n \"outgui:images/tools/polygon_sound_source.bmp\"\r\n ),\r\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_RADIO,\r\n \"Polygon Sound Source Creation Tool\",\r\n \"\"\r\n )\r\n self.AddLabelTool(\r\n toolscmds.ID_MeasureTool, \r\n \"measure\", \r\n wx.Bitmap(\n fileserver.manglepath(\r\n \"outgui:images/tools/measure.bmp\"\r\n ),\n wx.BITMAP_TYPE_ANY), \r\n wx.NullBitmap, wx.ITEM_RADIO,\n \"Terrain Distance Measure Tool\", \r\n \"\"\r\n )\r\n self.AddSeparator()\n self.AddLabelTool(\r\n viewcmds.ID_LocalTransform, \r\n \"local/world transform\",\n wx.Bitmap(\r\n fileserver.manglepath(\n \"outgui:images/tools/world_axis.bmp\"\r\n ), \r\n wx.BITMAP_TYPE_ANY\r\n ),\n wx.Bitmap(\r\n fileserver.manglepath(\n \"outgui:images/tools/local_axis.bmp\"\r\n ), \r\n wx.BITMAP_TYPE_ANY\r\n ),\n wx.ITEM_CHECK,\r\n \"Switch between local and world transform\",\r\n \"\"\r\n )\n self.choice_move_mode = wx.Choice(\r\n self, \r\n -1, \r\n choices = [\r\n 'No terrain collision', \r\n 'Terrain collision', \r\n 'Snap to terrain'\r\n ]\r\n )\n self.AddControl( self.choice_move_mode )\n self.choice_selection_mode = wx.Choice(\r\n self, \r\n -1, \r\n choices = [\r\n 'Objects', \r\n 'Subentities', \r\n 'Terrain cells'\r\n ]\r\n )\n self.AddControl( self.choice_selection_mode )\n self.AddLabelTool(\r\n viewcmds.ID_LockSelection, \r\n \"lock selection\",\n wx.Bitmap(\r\n fileserver.manglepath(\r\n \"outgui:images/tools/lock2.bmp\"\r\n ),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_CHECK,\n \"Lock/Unlock current selection\",\r\n \"\"\r\n )\n self.AddSeparator()\n self.AddLabelTool(\r\n debugcmds.ID_ToggleAI, \r\n \"toggle AI\", \r\n wx.Bitmap(\n fileserver.manglepath(\r\n \"outgui:images/tools/ai.bmp\"\r\n ),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_CHECK,\n \"Turn on/off triggers and agents\",\r\n \"\"\r\n )\n self.AddLabelTool(\r\n debugcmds.ID_ToggleSound,\r\n \"toggle sound\",\n wx.Bitmap(\r\n fileserver.manglepath(\r\n \"outgui:images/tools/sound.bmp\"\r\n ),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_CHECK,\n \"Turn on/off the sound sources\",\r\n \"\"\r\n )\n self.AddSeparator()\n self.AddLabelTool(\r\n viewcmds.ID_ObjBrowser, \r\n \"object browser\",\n wx.Bitmap(\r\n fileserver.manglepath(\"outgui:images/tools/browser.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_NORMAL,\n \"Open an Object Browser\", \r\n \"\"\r\n )\n self.AddSeparator()\n self.AddLabelTool(\r\n toolscmds.ID_WaypointsPathTool, \r\n \"create waypoint paths\", \r\n wx.Bitmap(\r\n fileserver.manglepath(\"outgui:images/tools/waypoints.bmp\"),\n wx.BITMAP_TYPE_ANY\r\n ), \r\n wx.NullBitmap, \r\n wx.ITEM_NORMAL,\n \"Waypoint Paths Creation Tool\", \r\n \"\"\r\n )\n\n # initial state\n self.toggle_axis_transform(True)\n self.choice_move_mode.SetSelection(0)\n self.choice_selection_mode.SetSelection(0)\n\n # bindings\n self.Bind(\r\n wx.EVT_CHOICE, \r\n self.__on_move_mode, \r\n self.choice_move_mode\r\n )\n self.Bind(\r\n wx.EVT_CHOICE, \r\n self.__on_selection_mode, \r\n self.choice_selection_mode\r\n )",
"def file_upload(self, req, folder_path):\n\t\tresult, filename = self.handle_upload(req, folder_path)\n\t\tfile_url = self.selected_root['url_callback'](req, folder_path, filename)\n\t\t\n\t\tself.content_type = 'text/html'\n\t\tself.content = [str(tags.script(type=\"text/javascript\")[\n\t\t\t\t\t\t\"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\\n\" % (result, filename)\n\t\t\t\t\t\t])]",
"def xmodule_handler(self, request, suffix=None):\r\n class FileObjForWebobFiles(object):\r\n \"\"\"\r\n Turn Webob cgi.FieldStorage uploaded files into pure file objects.\r\n\r\n Webob represents uploaded files as cgi.FieldStorage objects, which\r\n have a .file attribute. We wrap the FieldStorage object, delegating\r\n attribute access to the .file attribute. But the files have no\r\n name, so we carry the FieldStorage .filename attribute as the .name.\r\n\r\n \"\"\"\r\n def __init__(self, webob_file):\r\n self.file = webob_file.file\r\n self.name = webob_file.filename\r\n\r\n def __getattr__(self, name):\r\n return getattr(self.file, name)\r\n\r\n # WebOb requests have multiple entries for uploaded files. handle_ajax\r\n # expects a single entry as a list.\r\n request_post = MultiDict(request.POST)\r\n for key in set(request.POST.iterkeys()):\r\n if hasattr(request.POST[key], \"file\"):\r\n request_post[key] = map(FileObjForWebobFiles, request.POST.getall(key))\r\n\r\n response_data = self.handle_ajax(suffix, request_post)\r\n return Response(response_data, content_type='application/json')",
"def uploader():\n\tif request.method == 'POST':\n\t\t\n\t\tif \"file\" not in request.files:\n\t\t\treturn \"No data in file.\"\n\n\t\tFile = request.files['file']\n\t\t\n\t\tif File.filename == \"\":\n\t\t\treturn \"No file selected.\"\n\t\t\n\t\tfilename, ext = secure_filename(File.filename).split('.')\n\t\t#Check if file stream exists and file tpye correct.\n\t\tif File and ext == \"hepmc\":\n\t\t\t#The file is a byte stream by default which is not compatible with the current version of hepmcio.\n\t\t\tstring_stream = io.StringIO(File.read().decode('utf-8'))\n\n\t\t\t#Get all events from file and jsonify them.\n\t\t\tevents = hepmcio.HepMCReader(string_stream).all_events()\n\t\t\thepMCEncoder = hepmcio_json.HepMCJSONEncoder()\n\t\t\tjsonified = [hepMCEncoder.encode(event) for event in events]\n\n\t\t\t#Each collection contains all the data in a file.\n\t\t\tif filename not in mongo.db.collection_names():\n\t\t\t\tcollection = mongo.db[filename]\n\t\t\t\tjsonDecoder = json.JSONDecoder()\n\n\t\t\t\t#MongoDB takes in Python objects and not JSON strings, so have to decode before adding documents.\n\t\t\t\tfor jsonObject in jsonified:\n\t\t\t\t\tjsonEvent = jsonDecoder.decode(jsonObject.evt)\n\t\t\t\t\tjsonParticles = [jsonDecoder.decode(p) for p in jsonObject.particles]\n\t\t\t\t\tjsonVertices = [jsonDecoder.decode(v) for v in jsonObject.vertices]\n\n\t\t\t\t\tcollection.insert_one(jsonEvent)\n\t\t\t\t\tcollection.insert_many(jsonParticles)\n\t\t\t\t\tcollection.insert_many(jsonVertices)\n\t\t\n\t\t\t\treturn \"Succesfully uploaded file.\"\n\t\t\t\n\t\t\treturn \"File already in database.\"\n\n\t\treturn \"Incorrect file type.\"",
"def customized_pondlets(driver, file_name):\n file_path = get_file_path(file_name)\n data = []\n\n with open(file_path) as file:\n data = file.readlines()\n\n driver.find_element_by_xpath(\n addlink.format(model_path='customized_pondlet/customizedpondlet')\n ).click()\n\n for row in data:\n items = row[:-1].split('\\t')\n\n input_value = {\n 'title': items[0],\n 'level': items[1],\n 'lang': items[2],\n 'author': items[3],\n 'date': items[4],\n }\n\n wait_xpath(driver, addanother)\n\n driver.find_element_by_id('id_title').send_keys(input_value['title'])\n\n driver.find_element_by_id('id_level').clear()\n driver.find_element_by_id('id_level').send_keys(input_value['level'])\n\n Select(driver.find_element_by_id('id_lang')\n ).select_by_value(input_value['lang'])\n\n driver.find_element_by_id('id_author').send_keys(input_value['author'])\n\n if input_value['date'].lower() == 'today':\n driver.find_element_by_link_text('Today').click()\n else:\n driver.find_element_by_id(\n 'id_finalized_date').send_keys(input_value['date'])\n\n driver.find_element_by_xpath(addanother).click()\n wait_xpath(driver, success)\n\n driver.find_element_by_id('site-name').click()\n wait_url(driver, dashboard)",
"def upload_groups(self, files: [str]):\n self.driver.get(\"https://reporting.smarterbalanced.org/admin-groups/import\")\n\n try:\n WebDriverWait(self.driver, 20).until(\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"maincontent\"]/admin/div[1]/div[3]'))\n )\n except:\n print(\"Error\")\n\n self.driver.execute_script(\n f\"\"\"document.evaluate('//*[@id=\"maincontent\"]/admin/div[1]/div[3]', document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue.removeAttribute(\"hidden\")\"\"\")\n\n file_upload = self.driver.find_element_by_xpath('//*[@id=\"maincontent\"]/admin/div[1]/div[3]/input')\n [file_upload.send_keys(file) for file in files]",
"def csv_bootstrap_table():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n #filename = secure_filename(file.filename)\n html_text = csv_html_converter(file)\n html = Markup(html_text)\n return render_template('bootstrap_table.html', html=html, html_code=html_text)\n return render_template('form.html')",
"def staff_view(self):\n spinner_url = self.runtime.local_resource_url(self, 'public/static/images/spinner.gif')\n frag = Fragment(loader.render_template(\"templates/edx_gea/staff.html\",\n {'upload_assessment_file_form' : UploadAssessmentFileForm(auto_id=True, initial={'csv_delimiter' : get_default_delimiter()}),\n 'spinner_url' : spinner_url,\n 'max_assessment_file_lines' : self.max_assessment_file_lines}))\n frag.add_css(loader.load_unicode(\"static/css/gea.css\"))\n frag.add_javascript(loader.load_unicode(\"static/js/src/gea.js\"))\n frag.initialize_js('GeaXBlock')\n return frag",
"def text_file_upload(request):\n if request.method == \"POST\":\n txt_files = request.FILES.getlist('text_file')\n fs = [i for i in os.listdir('temp/text_files') if 'txt' in i]\n #delete old files\n for f in fs:\n os.remove('temp/text_files/{}'.format(f))\n for i, f in enumerate(txt_files):\n handle_uploaded_file(f, 'temp/text_files/text_file_{}.txt'.format(i+1))\n\n return _start_analysis(request)\n else:\n return HttpResponse(\n json.dumps({\"error\": \"error, GET request not supported\"}),\n content_type=\"application/json\"\n )",
"def prepare_quick_upload(self, req):\n\t\tresult, filename = self.handle_upload(req, self.get_selected_root(req))\n\t\tfile_url = self.selected_root['url_callback'](req, filename)\n\t\t\n\t\tself.content = [str(tags.script(type=\"text/javascript\")[\n\t\t\t\t\t\t\"window.parent.OnUploadCompleted(%s, '%s', '%s', '');\\n\" % (result, file_url, filename)\n\t\t\t\t\t\t])]",
"def NewFile(self, e=0):\n\n self.m_Filter.SetValue('')\n # clear all\n self.clear_controls()\n self.d = dict()\n # Raname Title of window\n self.file = \"\"\n self.SetTitle(\"Chainer\")\n self.n = 1\n self.set_arrows()\n #self.mnemo_hide()",
"def upload_transcripts(request):\r\n response = {\r\n 'status': 'Unknown server error',\r\n 'subs': '',\r\n }\r\n\r\n locator = request.POST.get('locator')\r\n if not locator:\r\n return error_response(response, 'POST data without \"locator\" form data.')\r\n\r\n try:\r\n item = _get_item(request, request.POST)\r\n except (InvalidKeyError, ItemNotFoundError):\r\n return error_response(response, \"Can't find item by locator.\")\r\n\r\n if 'transcript-file' not in request.FILES:\r\n return error_response(response, 'POST data without \"file\" form data.')\r\n\r\n video_list = request.POST.get('video_list')\r\n if not video_list:\r\n return error_response(response, 'POST data without video names.')\r\n\r\n try:\r\n video_list = json.loads(video_list)\r\n except ValueError:\r\n return error_response(response, 'Invalid video_list JSON.')\r\n\r\n source_subs_filedata = request.FILES['transcript-file'].read().decode('utf8')\r\n source_subs_filename = request.FILES['transcript-file'].name\r\n\r\n if '.' not in source_subs_filename:\r\n return error_response(response, \"Undefined file extension.\")\r\n\r\n basename = os.path.basename(source_subs_filename)\r\n source_subs_name = os.path.splitext(basename)[0]\r\n source_subs_ext = os.path.splitext(basename)[1][1:]\r\n\r\n if item.category != 'video':\r\n return error_response(response, 'Transcripts are supported only for \"video\" modules.')\r\n\r\n # Allow upload only if any video link is presented\r\n if video_list:\r\n sub_attr = source_subs_name\r\n try:\r\n # Generate and save for 1.0 speed, will create subs_sub_attr.srt.sjson subtitles file in storage.\r\n generate_subs_from_source({1: sub_attr}, source_subs_ext, source_subs_filedata, item)\r\n\r\n for video_dict in video_list:\r\n video_name = video_dict['video']\r\n # We are creating transcripts for every video source, if in future some of video sources would be deleted.\r\n # Updates item.sub with `video_name` on success.\r\n copy_or_rename_transcript(video_name, sub_attr, item, user=request.user)\r\n\r\n response['subs'] = item.sub\r\n response['status'] = 'Success'\r\n except Exception as ex:\r\n return error_response(response, ex.message)\r\n else:\r\n return error_response(response, 'Empty video sources.')\r\n\r\n return JsonResponse(response)",
"def serve_upload_page():\n image_names = get_uploaded_image_names()\n image_pages = [ {\"name\":filename, \"url\":url_for('compare', image=filename) } \\\n for filename in image_names]\n return render_template('upload.html', image_pages=image_pages)",
"def upload(request):\n gi = GalaxyInstance(url=request.session.get('server'), email=request.session.get('galaxyemail'), password=request.session.get(\"galaxypass\"))\n selected = request.POST.get('selected')\n selectedmeta = request.POST.get('meta')\n filetype = request.POST.get('filetype')\n dbkey = request.POST.get('dbkey')\n workflowid = request.POST.get('workflowid')\n pid = request.POST.get('data_id')\n onlydata = request.POST.get('onlydata')\n makecol = request.POST.get('col')\n data_ids = []\n control = request.POST.get('samples')\n test = request.POST.get('samplesb')\n new_hist = request.POST.get('historyname')\n group = request.POST.get('group')\n investigation = request.POST.get('investigation')\n date = strftime(\"%d_%b_%Y_%H:%M:%S\", gmtime())\n select = selected.split(',')\n mselect = selectedmeta.split(',')\n gselect = group.split(',')\n iselect = investigation.split(',')\n files = get_selection(iselect, gselect, select, mselect)[0]\n mfiles = get_selection(iselect, gselect, select, mselect)[1]\n groups = get_selection(iselect, gselect, select, mselect)[2]\n investigations = get_selection(iselect, gselect, select, mselect)[3]\n history_id = create_new_hist(gi, request.session.get('galaxyemail'), request.session.get(\"galaxypass\"),\n request.session.get('server'), workflowid, files, new_hist)\n inputs = {}\n if len(filter(None, files)) <= 0:\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n if onlydata == \"true\":\n make_data_files(gi, files, request.session.get('username'), request.session.get('password'), request.session.get('galaxyemail'),\n request.session.get('galaxypass'), control, test, history_id, filetype, dbkey)\n else:\n make_data_files(gi, files, request.session.get('username'), request.session.get('password'), request.session.get('galaxyemail'),\n request.session.get('galaxypass'), control, test, history_id, filetype, dbkey)\n make_meta_files(gi, mfiles, request.session.get('username'), request.session.get('password'), request.session.get('galaxyemail'),\n request.session.get('galaxypass'), control, test, history_id)\n if workflowid != \"0\":\n in_count = 0\n resultid = uuid.uuid1()\n datamap = dict()\n mydict = {}\n jsonwf = gi.workflows.export_workflow_json(workflowid)\n for i in range(len(jsonwf[\"steps\"])):\n if jsonwf[\"steps\"][str(i)][\"name\"] == \"Input dataset\":\n try:\n label = jsonwf[\"steps\"][str(i)][\"inputs\"][0][\"name\"]\n except IndexError:\n label = jsonwf[\"steps\"][str(i)][\"label\"]\n mydict[\"in%s\" % (str(i + 1))] = gi.workflows.get_workflow_inputs(workflowid, label=label)[0]\n for k, v in mydict.items():\n datamap[v] = {'src': \"hda\", 'id': get_input_data(request.session.get('galaxyemail'), request.session.get('galaxypass'),\n request.session.get('server'))[0][in_count]}\n data_ids.append(get_input_data(request.session.get('galaxyemail'), request.session.get('galaxypass'),\n request.session.get('server'))[0][in_count])\n in_count += 1\n if makecol == \"true\":\n gi.histories.create_dataset_collection(history_id, make_collection(data_ids))\n gi.workflows.invoke_workflow(workflowid, datamap, history_id=history_id)\n gi.workflows.export_workflow_to_local_path(workflowid, request.session.get('username'), True)\n datafiles = get_output(request.session.get('galaxyemail'), request.session.get('galaxypass'), request.session.get('server'))\n store_results(1, datafiles, request.session.get('server'), request.session.get('username'),\n request.session.get('password'), request.session.get('storage'),\n groups, resultid, investigations, date)\n store_results(3, datafiles, request.session.get('server'), request.session.get('username'),\n request.session.get('password'), request.session.get('storage'),\n groups, resultid, investigations, date)\n ga_store_results(request.session.get('username'), request.session.get('password'), workflowid,\n request.session.get('storage'), resultid, groups, investigations)\n call([\"rm\", request.session.get('username') + \"/input_test\"])\n return render_to_response('results.html', context={'workflowid': workflowid, 'inputs': inputs, 'pid': pid,\n 'server': request.session.get('server')})\n else:\n if makecol == \"true\":\n history_data = gi.histories.show_history(history_id, contents=True)\n for c in range(0, len(history_data)):\n data_ids.append(history_data[c]['id'])\n gi.histories.create_dataset_collection(history_id, make_collection(data_ids))\n ug_store_results(\n request.session.get('galaxyemail'), request.session.get('galaxypass'), request.session.get('server'), workflowid,\n request.session.get('username'), request.session.get('password'), request.session.get('storage'), groups, investigations, date)\n return HttpResponseRedirect(reverse(\"index\"))",
"def html_files_upload(request):\n if request.method == \"POST\":\n \n html_files = request.FILES.getlist('html_file')\n fs = [i for i in os.listdir('temp/html_files') if 'html' in i]\n #delete old files\n for f in fs:\n os.remove('temp/html_files/{}'.format(f))\n for i, f in enumerate(html_files):\n handle_uploaded_file(f, 'temp/html_files/html_file_{}.html'.format(i+1))\n\n return _start_analysis(request)\n else:\n return HttpResponse(\n json.dumps({\"error\": \"error, GET request not supported\"}),\n content_type=\"application/json\"\n )",
"def upload():\n\treturn render_template(\"upload.html\", title=\"Upload a file\")",
"def upload_panel(store, institute_id, case_name, stream):\n institute_obj, case_obj = institute_and_case(store, institute_id, case_name)\n raw_symbols = [line.strip().split('\\t')[0] for line in stream if\n line and not line.startswith('#')]\n # check if supplied gene symbols exist\n hgnc_symbols = []\n for raw_symbol in raw_symbols:\n if store.hgnc_genes(raw_symbol).count() == 0:\n flash(\"HGNC symbol not found: {}\".format(raw_symbol), 'warning')\n else:\n hgnc_symbols.append(raw_symbol)\n return hgnc_symbols",
"def render_successful_upload(request):\n torrent_file = request.files['file']\n filename = secure_filename(torrent_file.filename)\n username = request.form['username']\n save_torrent_file(filename, torrent_file, request.form['username'])\n dl_success = start_torrent_download(filename)\n update_download_info()\n return render_template('successful_upload.html', \n username=username, \n filename=filename, \n download_success=dl_success, \n downloads=downloads)",
"def OnOpen(self, e):\n\t\tsuccess = False\n\t\tdlg = wx.FileDialog(self, \"Choose a file\", self.dirname, \"\", \"*.*\", wx.FD_OPEN)\n\t\tif dlg.ShowModal() == wx.ID_OK:\n\t\t\tsuccess = True\n\t\t\tself.dirname, self.filename = os.path.split(dlg.GetPath())\n\t\tdlg.Destroy()\n\t\tif success:\n\t\t\tself.FileText.SetLabel(\"File: \"+self.filename)\n\t\t\tself.raw_file = data.load_data(os.path.join(self.dirname, self.filename))\n\t\t\tself.combine_data()\n\t\t\tself.plot_data()",
"def addLoadPaths(self):\n #create a static boxsizer\n load_box = wx.StaticBox(self, label=\"Step 1: Input Data\")\n box_sizer = wx.StaticBoxSizer(load_box, wx.VERTICAL)\n fgs = wx.FlexGridSizer(rows=2, cols=2, vgap=10, hgap=10)\n box_sizer.Add(fgs, proportion=1, flag=wx.EXPAND)\n\n #actual data handled by a FGS\n self.data_btn = wx.Button(self, label=\"Load Data\")\n self.data_btn.Bind(wx.EVT_BUTTON, self.onLoadData)\n\n self.assign_btn = wx.Button(self, label=\"Add Assignments\")\n self.assign_btn.Bind(wx.EVT_BUTTON, self.onAddAssignment)\n self.assign_btn.Disable()\n\n pub.subscribe(self.dataLoaded, \"data_loaded\")\n \n fgs.Add(self.data_btn, proportion=1, flag = wx.EXPAND)\n fgs.Add(wx.StaticText(self), proportion=1, flag = wx.EXPAND)\n\n fgs.Add(self.assign_btn)\n btn_label = wx.StaticText(self, label=\"(optional)\")\n new_font = btn_label.GetFont()\n new_font.SetStyle(wx.FONTSTYLE_ITALIC)\n btn_label.SetFont(new_font)\n fgs.Add(btn_label)\n \n \n fgs.Add(wx.StaticText(self), proportion=1, flag = wx.EXPAND)\n\n self.SetSizerAndFit(box_sizer)",
"def _render(self, parsed_args):\n self.handle_file_upload(parsed_args, passed_vals=self.config)\n setattr(self, 'document', self.json_file_contents)\n if self.display:\n self.messages.append(('render', self.document))\n return True",
"def process_reg_file(filename, tracking_id, args):\n file_path = os.path.join(app.config['DRS_UPLOADS'], '{0}'.format(tracking_id), filename)\n processor = Processor(file_path, args)\n response = processor.process('registration')\n return response",
"def js(self, file):\n\t\tfor f in file:\n\t\t\tself.to_head('<script type=\"text/javascript\" src=\"' + f + '\"></script>\\n')"
]
| [
"0.57950294",
"0.5733016",
"0.53495437",
"0.5318565",
"0.512028",
"0.51008075",
"0.5074191",
"0.5051867",
"0.50033104",
"0.49809676",
"0.49671775",
"0.49513146",
"0.4897819",
"0.48895323",
"0.4885491",
"0.483393",
"0.48064065",
"0.47825357",
"0.47541657",
"0.47469318",
"0.47346625",
"0.46984974",
"0.46797943",
"0.4677193",
"0.46718052",
"0.46443802",
"0.46061328",
"0.46044296",
"0.460328",
"0.45997328"
]
| 0.61844766 | 0 |
Method to produce HTML for create panel wizard when new gene is added. Each gene is checked against the database before the HTML is generated for the transcript list and gene button list within the process workflow. The method checks for changes in the preferred transcript that have yet to be applied so these changes are displayed correctly. The method can either be accessed through an AJAX query from the client side or within the views (from multiple upload). If JSON is specified, the method returns the complete HTML sections using jsonify. | def create_panel_get_tx(gene_name=None, project_id=None):
json = False
if not gene_name:
gene_name = request.json['gene_name']
project_id = request.json['project_id']
json = True
exists = isgene(s, gene_name)
if exists:
if exists != gene_name:
message = render_template("wrong_gene_name_message.html", gene_name=gene_name, match_name=exists)
if json:
return jsonify({'html': '', 'button_list': '', 'message': message})
else:
return {'html': '', 'button_list': '', 'message': message} #alternate gene name is not automatically added
else:
message = render_template("success_message.html", gene_name=gene_name)
gene_name = exists
gene_id = get_gene_id_from_name(s, gene_name)
preftx_id = get_preftx_by_gene_id(s, project_id, gene_id)
upcoming_preftx = get_upcoming_preftx_by_gene_id(s, project_id, gene_id)
all_tx = get_tx_by_gene_id(s, gene_id)
gene_button = render_template("gene_button.html", gene_name=gene_name, gene_id=gene_id, added=False)
html = render_template("tx_list.html", gene_name=gene_name, all_tx=all_tx, preftx=preftx_id,
upcoming=upcoming_preftx, disabled=False)
if json:
return jsonify({'html': html, 'button_list': gene_button, 'message': message})
else:
return {'html': html, 'button_list': gene_button, 'message': "added"}
else:
fail_message = render_template("fail_message.html", gene_name=gene_name)
if json:
return jsonify({'message': fail_message})
else:
return {'message': fail_message} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def uploader():\n if request.method == 'POST':\n # from the upload page, the clients input is called from the text-area of the form. \n clientinput_unic = request.form['list_genes']\n #print \"raw input client:\\n\", clientinput_unic\n clientinput_unic = re.sub('[^a-zA-Z0-9 \\n\\r]', '', clientinput_unic) # all symbols are removed from unicode\n clientinput_genelist = clientinput_unic.replace('\\r','').split('\\n') # a list is created\n clientinput_genelist = [x.encode('UTF8') for x in clientinput_genelist] # removing the Unicode from the list\n # Input send to tool of the latest version. Current = version 1.1\n HPOdatabase, output_path = config_yaml()\n phen_df, genescores_df, numbers, outfile_phen, outfile_genes, outfile_phenpergenes, accepted_df, dropped_df, Q, missing, dupli = tool11(clientinput_genelist, HPOdatabase, output_path)\n print \"missing:\", missing, len(missing), \"dupli:\", len(dupli), \"dropped:\", len(dropped_df), \"accepted:\", len(accepted_df)\n # information about the variables are also send to the html pages, as the result page has different sections shown dependent on the clients input.\n number_dropped = len(dropped_df)\n number_accepted = len(accepted_df)\n # if dataframes are not empty, an html table is made from them.\n try:\n \taccepted = accepted_df.to_html()\n except:\n \taccepted = accepted_df\n try:\n \tdropped = dropped_df.to_html()\n except:\n \tdropped = dropped_df\n\n return render_template('result.html', genes_in_HPO=countgenes(),output_phen=outfile_phen,output_genes=outfile_genes, output_phenpergenes=outfile_phenpergenes, accepted=accepted, dropped=dropped, Q=Q, missing=missing, dupli=dupli, number_accepted=number_accepted, number_dropped=number_dropped)",
"def Prokka_Check():\n global PROKKA_PROCESS\n if session.query(Genome).filter_by(name=PROKKA_PROCESS).first(): #check if it exists\n g_id = session.query(Genome).filter_by(name=PROKKA_PROCESS).order_by(Genome.id.desc()).first().id #most recent genome of that name entered\n if session.query(Prokka).filter_by(genome_id=g_id).count() > 0: \n form = TableForm()\n PROKKA_PROCESS = \"\" #if the prokka name has been added to the database, make this global vaiable empty so it will alert nothing\n \n data = pd.read_excel('PATH/TO/ROOT/Database_Inputs/metadata_table.xlsx')\n data.set_index(['Check boxes'], inplace=True) #sets which column will be all blue (checkbox column)\n data.index.name=None\n pd.set_option('display.max_colwidth', -1) #The integer sets how many characters can be shown in a single cell on the table. -1 means as many as you would like\n #this is to allow the \"<\"s to show up as they should when they are rendered by this table maker. Otherwise they are their speceial character equivolents which do not work. Also the other replaces are to get desired formatting so that is something\n out = data.to_html(classes='male').replace(\"<\", \"<\").replace(\">\", \">\").replace(\"<table border=\\\"1\\\" class=\\\"dataframe male\\\">\", \"<table border=\\\"1\\\" class=\\\"sortable\\\">\").replace(\"<tr style=\\\"text-align: right;\\\">\", \"<tr style=\\\"text-align: center;\\\">\").replace(\"{\",\"{\").replace(\"}\",\"}\")\n HTML_format(out)\n flash('Finished processing '+PROKKA_PROCESS+'\\'s PROKKA information.') \n return render_template('genome_table.html', form=form, title=\"Genome Table\") #create a new table and refresh the page\n else:\n flash('Currently processing '+PROKKA_PROCESS+'\\'s PROKKA information. Do NOT upload any additional genomes until this is done. Please check again later')",
"def upload_multiple():\n gene_list = request.json['gene_list']\n project_id = request.json['project_id']\n all_message = ''\n html = ''\n added_list = []\n button_list = ''\n\n for gene in sorted(gene_list):\n if gene == \"\" or gene in added_list:\n continue\n dct = create_panel_get_tx(gene, project_id)\n if dct[\"message\"] == \"added\":\n added_list.append(gene)\n else:\n all_message += dct[\"message\"]\n try:\n html += dct[\"html\"]\n except KeyError:\n pass\n try:\n button_list += dct[\"button_list\"]\n except KeyError:\n pass\n\n if len(added_list) > 0:\n added_message = render_template(\"added_list.html\", added_list=enumerate(added_list), length=len(added_list))\n all_message += added_message\n\n return jsonify({'message': all_message, 'html': html, 'button_list': button_list})",
"def show_new_tag_form():\n\n return render_template('create_tag.html')",
"def start_GUI(self):\n experiment_names = list(GUI.api.get_experiment_names())\n #selected_exp = None #value picked in the list\n\n branch_log_dict = GUI.api.get_branching_indep_to_dep()\n #Separate columns for a new trial and a new experiment\n\n col_new_trial = [[sg.Radio('New Trial', \"RADIO1\", default=True, enable_events = True, key=\"new_trial_radio\", metadata='not_disable')],\n [sg.Text(text = \"Please pick your experiment from the list below:\")], \n [sg.Listbox(values=experiment_names, size=(30, 6), key=\"list\", select_mode = sg.LISTBOX_SELECT_MODE_SINGLE, enable_events= True)]]\n \n\n #metadata ahs true if we need to input filed\n col_new_experiment = [[sg.Radio('New experiment', \"RADIO1\", enable_events=True, key=\"new_exp_radio\", metadata='not_disable')]]\n col_new_experiment.extend(self.make_fields())#add fields to the form\n layout = [[sg.Column(col_new_trial), sg.Column(col_new_experiment)], \n [sg.Button(button_text= \"OK\", enable_events= True, key =\"OK\")]]\n \n window = sg.Window('New Data', layout, keep_on_top=True)#Creation of the window\n while True:\n event, values = window.read()\n # End program if user closes window or\n # presses the OK button\n # you can use switch-case here instead of if statements\n if event == sg.WIN_CLOSED:\n #Indicate abort\n return None, None, None, None\n elif event == \"new_exp_radio\":#if new experiment is picked, then disable the elements for the new trial\n #for evey field on which branching logic depends on, disable everything not selected\n window['list'].update(disabled = True)\n for row in col_new_experiment:\n for elem in row:\n if(elem.metadata != 'not_disable' and not isinstance(elem, sg.Text)):#do not block the radio button):\n window[elem.Key].update(disabled = False)\n \n self.clear_disable_all(window, branch_log_dict, col_new_experiment)#we could just enable a few, instead\n elif event == \"new_trial_radio\":#if new trial is picked, disable the elements for the new experiment, enable for the new trua\n #disable everything in the form\n for row in col_new_experiment:\n for elem in row:\n if(elem.metadata != 'not_disable' and not isinstance(elem, sg.Text)):#do not block the radio button and do not update textboxes\n window[elem.Key].update(disabled = True)\n #enable the listbox\n \n window['list'].update(disabled = False)\n elif event == \"OK\":\n field_missing = False\n #Check if the listbox has a value or the form has a value\n if values['new_exp_radio']:#we are doing new expriment\n # printing_params = {\"paxton\":\"\"}\n printing_params = {}\n #Check the all the stuff in the form of the new experiment\n for row in col_new_experiment:\n if(field_missing):\n break#do not check anymore\n for elem in row:\n if(elem.metadata != 'not_disable' and not isinstance(elem, sg.Text)):#do not check labels and the radio button\n if (elem.metadata and values[elem.Key]== \"\"): #value ahs to be filled and not empty\n field_missing = True\n sg.popup_ok('Required fields are missing!')#if at least one field is empty, throw a popup and stop checking\n break # Shows OK button\n #if at least one field does not have a value, then we generate a popup\n elif(values[elem.Key] != \"\"):#add to the dictonary of params\n printing_params[elem.Key] = values[elem.Key]\n \n if not field_missing:\n #if everything is filled, then validate\n \n #if user closes the popup, then the print is considered bad by default\n is_valid, field_name = self.validate_fields(window, values)\n if(is_valid):\n print_result, folderPath = self.getPicturesPrintEval()\n window.close()\n #now, we also return print_result\n return \"add_record\", printing_params, print_result, folderPath\n else:\n sg.popup_ok(\"The field could not be validated: \" + field_name)\n \n elif values['new_trial_radio']:#could use else\n if values['list'] == []:\n sg.popup_ok('Required fields are missing!')\n continue#go to while loop\n #we got here, so we now know the record_id of the experiment we want to do the new trial for\n record_lst = GUI.api.get_elements(values['list'][0])\n #create a new window with print quality + pictures\n print_result, folderPath = self.getPicturesPrintEval()\n window.close()\n return \"add_trial\", record_lst, print_result, folderPath\n elif event in branch_log_dict:#if branching logic is dependent on this element\n #we could only enable/disable stuff affected by the element\n self.enable_selected(window, copy.deepcopy(values), branch_log_dict, event)\n self.disable_not_selected(window, copy.deepcopy(values), branch_log_dict, event)",
"def edit_panel_process():\n if request.method == \"POST\":\n make_live = request.form['make_live']\n panel_id = request.args.get('id')\n project_id = get_project_id_by_panel_id(s, panel_id)\n preftx_id = get_preftx_id_by_project_id(s, project_id)\n tx_version = get_current_preftx_version(s, preftx_id)\n panel_version = get_current_version(s, panel_id)\n if not tx_version:\n tx_version = 0\n if make_live == \"on\":\n print('make_live')\n make_preftx_live(s, preftx_id, tx_version + 1, current_user.id)\n make_panel_live(s, panel_id, panel_version + 1, current_user.id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_panel') + \"?id=\" + panel_id)\n elif request.method == \"GET\":\n panel_id = request.args.get('id')\n form = EditPanelProcess()\n panel_info = get_panel_info(s, panel_id)\n project_id = panel_info.project_id\n form.project.choices = [(project_id, panel_info.project_name), ]\n form.panelname.data = panel_info.name\n\n lock_panel(s, current_user.id, panel_id)\n\n genes = get_genes_by_panelid_edit(s, panel_id, panel_info.current_version)\n html = \"\"\n buttonlist = \"\"\n print('hello')\n for gene in genes:\n gene_id = gene.id\n gene_name = gene.name\n preftx_id = get_preftx_by_gene_id\n upcoming_preftx = get_upcoming_preftx_by_gene_id(s, project_id, gene_id)\n all_tx = get_tx_by_gene_id(s, gene_id)\n\n buttonlist += render_template(\"gene_button.html\", gene_name=gene_name, gene_id=gene_id, added=True)\n tx_html = render_template(\"tx_list.html\", gene_name=gene_name, all_tx=all_tx, preftx=preftx_id,\n upcoming=upcoming_preftx, disabled=True)\n html += tx_html\n\n return render_template('panel_createprocess.html', form=form, genes=html, genelist=buttonlist,\n panel_id=panel_id,\n url=url_for('panels.edit_panel_process') + \"?id=\" + panel_id)",
"def get_html(self):\r\n\r\n # these 3 will be used in class methods\r\n self.html_id = self.location.html_id()\r\n self.html_class = self.location.category\r\n\r\n self.configuration_json = self.build_configuration_json()\r\n params = {\r\n 'gst_html': self.substitute_controls(self.render),\r\n 'element_id': self.html_id,\r\n 'element_class': self.html_class,\r\n 'configuration_json': self.configuration_json\r\n }\r\n content = self.system.render_template(\r\n 'graphical_slider_tool.html', params\r\n )\r\n return content",
"def customer_add_view_new(h):\n global html\n html = h\n common_elements = customer_common_elements()\n \n css_list = common_elements[\"css_list\"]\n\n javascript_list = [\"js/commonFunctions.js\"]\n javascript_list += common_elements[\"javascript_list\"]\n\n all_btn = common_elements[\"all_btn\"]\n\n html.new_header(\"Add Customers\", \"customer_management.py\", all_btn, css_list, javascript_list)\n\n customer_add_html = \"\"\"\n <table width=\"100%\" class=\"content_tbl\" border=\"0\" cellspacing=\"0\" cellpadding=\"0\">\n <tbody>\n <tr>\n <th>Customer Details</th>\n </tr>\n <tr>\n <td>\n <table width=\"100%\" class=\"deatil_tbl\" border=\"0\" cellspacing=\"0\" cellpadding=\"0\">\n <tbody>\n <tr>\n <td class=\"label\">Organization Name</td>\n <td colspan=\"3\"><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n </tr>\n <tr>\n <td>Telephone Number</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n <td class=\"label\">Fax</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n </tr>\n <tr>\n <td>Website</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n <td>Email</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n </tr>\n <tr>\n <td>Business Type</td>\n <td><select class=\"txt_bx\"><option> Auction </option></select></td>\n <td>Submission</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n </tr>\n <tr>\n <td>Address</td>\n <td colspan=\"3\"><textarea rows=\"4\" cols=\"27\"></textarea></td>\n </tr>\n </tbody>\n </table>\n </td>\n </tr>\n <tr>\n <td>\n <table width=\"100%\" id=\"iDetails_table\" class=\"individual_tbl\" border=\"0\" cellspacing=\"0\" cellpadding=\"0\">\n <tbody>\n <tr>\n <th colspan=\"4\">Individual Details <a href=\"\" class=\"addChildTr\"><img class=\"add\" src=\"images/add-gray.png\" align=\"right\" alt=\"\" border=\"0\"></a></th>\n </tr>\n <tr class=\"findInnerHTML\"><td colspan=\"4\"></td></tr>\n <tr class=\"findInnerHTML\">\n <td class=\"label\">Contact Person Name</td>\n <td colspan=\"3\"><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n </tr>\n <tr class=\"findInnerHTML\">\n <td>Email</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n <td class=\"label\">Contact Number</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n </tr>\n <tr class=\"findInnerHTML\">\n <td>Department</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n <td>Designation</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n </tr>\n <tr><td colspan=\"4\"></td></tr>\n </tbody>\n </table>\n </td>\n </tr>\n <tr>\n <td>\n <table width=\"100%\" class=\"individual_tbl\" border=\"0\" cellspacing=\"0\" cellpadding=\"0\">\n <tbody>\n <tr>\n <th colspan=\"4\">Customer Portal Configration</th>\n </tr>\n <tr><td colspan=\"4\"></td></tr>\n <tr>\n <td class=\"label\">Portal ID</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n <td class=\"label\">Portal Link</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n </tr>\n <tr>\n <td>Set Theme</td>\n <td><select class=\"txt_bx\"><option> Theme </option></select></td>\n <td>Port Number</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n </tr>\n <tr><td colspan=\"4\"></td></tr>\n <tr>\n <td colspan=\"4\">\n <table border=\"0\" class=\"add_user_tbl\" width=\"90%\" cellpadding=\"0\" cellspacing=\"0\">\n <tbody>\n <tr>\n <th colspan=\"6\">Add Customer Users <a href=\"#\" class=\"addChildTr\"><img class=\"add\" src=\"images/add-gray.png\" align=\"right\" alt=\"\" border=\"0\"></a></th>\n </tr>\n <tr><td colspan=\"6\"></td></tr>\n <tr>\n <td>User Name</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n <td>Password</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n <td>Email</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n </tr>\n <tr class=\"findInnerHTML\">\n <td>User Name</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n <td>Password</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n <td>Email</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n </tr>\n <tr><td colspan=\"6\"></td></tr>\n </tbody>\n </table>\n </td>\n </tr>\n <tr><td> </td></tr>\n </tbody></table>\n </td>\n </tr>\n <tr>\n <td>\n <table width=\"100%\" id=\"billing_dept_table\" class=\"individual_tbl endof_tbl\" border=\"0\" cellspacing=\"0\" cellpadding=\"0\">\n <tbody><tr>\n <th colspan=\"4\">Billing Departments</th>\n </tr>\n <tr><td colspan=\"4\"></td></tr>\n <tr>\n <td class=\"width_150\">\n Select Billing Department \n <br> \n <em class=\"fs10\">(If TPS selected)</em>\n </td>\n <td colspan=\"3\">\n <select id=\"billing_determinant_select_box\" class=\"txt_bx\">\n <option>TPS</option>\n <option>Bandwidth</option>\n <option>Sim Subscription</option>\n </select>\n </td>\n </tr>\n <tr>\n <td class=\"width_150\">Max TPS</td>\n <td colspan=\"3\"><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n </tr>\n <tr>\n <td class=\"width_150\">Billing Cycle</td>\n <td colspan=\"3\"><select class=\"txt_bx\"><option> Monthly </option></select></td>\n </tr>\n <tr>\n <td class=\"width_150\">Billing Start Date</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n <td class=\"width_150\">Billing Amount</td>\n <td><input type=\"text\" value=\"\" class=\"txt_bx\"></td>\n </tr>\n <tr><td colspan=\"4\"></td></tr>\n </tbody></table>\n </td>\n </tr>\n </tbody></table>\n \n \"\"\"\n html.write(customer_add_html)\n html.write(\"\"\"\n <script>\n customer_add_view_new();\n </script>\n \"\"\")\n html.new_footer()",
"def display_and_process_html_for_modification_of_json_election(request, slug):\n logger = Loggers.get_logger()\n logger.info(\n \"[elections/display_and_process_html_for_json.py \"\n \"display_and_process_html_for_modification_of_json_election()] request.POST=\"\n )\n logger.info(json.dumps(request.POST, indent=3))\n context = create_context_for_election_officer(request, tab=TAB_STRING)\n\n if len(Election.objects.all().filter(slug=slug)) != 1:\n context[ERROR_MESSAGES_KEY] = [f\"Received invalid Election slug of {slug}\"]\n return render(request, 'elections/update_election/update_election_json.html', context)\n\n process_election = (request.method == \"POST\") and (UPDATE_EXISTING_ELECTION__NAME in request.POST)\n election = Election.objects.get(slug=slug)\n return process_existing_election_information_from_json(request, election, context) \\\n if process_election else display_current_json_election_json(request, election, context)",
"def update_demo(self):\n debug = False\n if not self.settings_data:\n lib.OutputLib.update_html_ctrl(self.html, WAITING_MSG)\n return\n if debug: print(self.settings_data)\n ## 1) part before the table-specific items e.g. column names and data\n html = [mg.DEFAULT_HDR % {'title': 'Demonstration table',\n 'css': self.styles, 'dojo_insert': ''}]\n html.append(\"<table cellspacing='0'>\\n<thead>\\n<tr>\")\n ## 2) the table-specific items (inc column labels)\n ## list based on sequence of fields in underlying table\n db_flds_orig_names = [] ## will be the key for any dicts taken from db\n ## lists based on sequence of fields in (re)design\n design_flds_orig_names = [] ## NB will be None for new or inserted flds.\n ## Ordered as per list of variables in design.\n design_flds_new_names = []\n design_flds_col_labels = []\n design_flds_types = []\n for data_dict in self.settings_data:\n ## all must have same num of elements (even if a None) in same order\n fldname = data_dict[mg.TBL_FLDNAME]\n design_flds_new_names.append(fldname)\n design_flds_col_labels.append(\n self.var_labels.get(fldname, fldname.title()))\n design_flds_orig_names.append(data_dict.get(mg.TBL_FLDNAME_ORIG))\n design_flds_types.append(data_dict[mg.TBL_FLDTYPE]) \n if data_dict.get(mg.TBL_FLDNAME_ORIG) is not None:\n db_flds_orig_names.append(data_dict[mg.TBL_FLDNAME_ORIG]) \n if debug:\n print(db_flds_orig_names)\n print(design_flds_orig_names)\n print(design_flds_new_names)\n print(design_flds_col_labels)\n print(design_flds_types)\n ## column names\n for col_label in design_flds_col_labels:\n html.append(f'<th>{col_label}</th>')\n ## get data rows (list of lists)\n display_n = 4 ## demo rows to display\n if self.new:\n rows = []\n for i in range(display_n):\n row_lst = self.get_demo_row_lst(\n i, design_flds_col_labels, design_flds_types)\n rows.append(row_lst)\n else:\n rows = self.get_real_demo_data(display_n, db_flds_orig_names,\n design_flds_orig_names, design_flds_new_names,\n design_flds_col_labels, design_flds_types)\n ## data rows into html\n for row in rows:\n html.append('</tr>\\n</thead>\\n<tbody><tr>')\n for raw_val in row:\n html.append(f'<td>{raw_val}</td>')\n html.append('</tr>')\n html.append('\\n</tbody>\\n</table></body></html>')\n str_content = ''.join(html)\n lib.OutputLib.update_html_ctrl(self.html, str_content)",
"def show_new(thing):\n add_template_variable('thing', thing)\n return my_render_template('generic/create.html')",
"def create_html(self):\n rows = self.check()\n htmlrows = \"\"\n for row in rows:\n data = self._format_row(row)\n htmlrows += data\n \n return self.TEMPLATE.format(content=htmlrows)",
"def buildPage(self):\n Users = [(u['name']) for u in driver.nodes.match(\"User\")]\n Tissues = [(t['name']) for t in driver.nodes.match(\"Tissue\")]\n Diseases = [(d['name']) for d in driver.nodes.match(\"Disease\")]\n self.add_basic_layout()\n layout = [html.Div([\n html.Div([html.H4('Project information', style={'width': '15.5%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.H4('', id='update_project_id', style={'width': '15%', 'verticalAlign': 'top', 'display': 'none'}),\n html.Br(),\n html.Div(children=[html.Label('Project name:*', style={'marginTop': 15}),\n dcc.Input(id='project name', placeholder='Insert name...', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '100%'}),\n html.Br(),\n html.Div(children=[html.Label('Project Acronym:', style={'marginTop': 15}),\n dcc.Input(id='project acronym', placeholder='Insert name...', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '100%'}),\n html.Br(),\n html.Div(children=[html.Label('Project Responsible:*', style={'marginTop': 15})],\n style={'width': '49%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Project Participants:*', style={'marginTop': 15})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='responsible-picker', options=[{'label': i, 'value': i} for i in Users], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='participant-picker', options=[{'label': i, 'value': i} for i in Users], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Project Data Types:*', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Project Disease:*', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='data-types-picker', options=[{'label': i, 'value': i} for i in DataTypes], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='disease-picker', options=[{'label': i, 'value': i} for i in Diseases], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Project Tissue:*', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Project Intervention:', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='tissue-picker', options=[{'label': i, 'value': i} for i in Tissues], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Input(id='intervention-picker', placeholder='E.g. SNOMED identifier|SNOMED identifier|...', type='text', style={'width': '100%', 'height': '54px'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Number of subjects:*', style={'marginTop': 15})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Timepoints:', style={'marginTop': 15})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Input(id='number_subjects', placeholder='E.g. 77 (each unique patient counts as 1 subject)', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Input(id='number_timepoints', placeholder='E.g. 2 months|15 days|24 hours...', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Follows up project:', style={'marginTop': 15}),\n dcc.Input(id='related_to', placeholder='Use the Project Identifier (P000000X)', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Project Description:', style={'marginTop': 15}),\n dcc.Textarea(id='project description', placeholder='Enter description...', style={'width': '100%', 'height': '100px'})]),\n html.Br(),\n html.Div(children=[html.Label('Starting Date:', style={'marginTop': 10}),\n dcc.DatePickerSingle(id='date-picker-start', placeholder='Select date...', clearable=True)],\n style={'width': '30%', 'verticalAlign': 'top', 'marginTop': 10, 'display': 'inline-block'}),\n html.Div(children=[html.Label('Ending Date:', style={'marginTop': 10}),\n dcc.DatePickerSingle(id='date-picker-end', placeholder='Select date...', clearable=True)],\n style={'width': '30%', 'verticalAlign': 'top', 'marginTop': 10, 'display': 'inline-block'}),\n html.Div(children=html.Button('Create Project', id='project_button', n_clicks=0, className=\"button_link\",\n style={'fontSize': '25px'}), style={'width': '100%', 'padding-left': '87%', 'padding-right': '0%'}),\n html.Br(),\n html.Div(children=[html.A(children=html.Button('Download Clinical Data template', id='download_button', n_clicks=0,\n style={'fontSize': '16px', 'display': 'block'}),\n id='download_link', href='', n_clicks=0)], style={'width': '100%', 'padding-left': '87%', 'padding-right': '0%'}),\n html.Br(),\n html.Div(id='project-creation', style={'fontSize': '20px', 'marginLeft': '70%'}),\n html.Br()]),\n html.Hr()])]\n\n self.extend_layout(layout)",
"def get_additional(cls, obj, **kwargs):\n if \"classifier_results\" in obj.extra_data:\n keywords = obj.extra_data.get('classifier_results').get(\"complete_output\")\n else:\n keywords = []\n prediction_results = obj.extra_data.get(\"arxiv_guessing\", {})\n if prediction_results:\n prediction_results = prediction_results[0].get(\"result\")\n return render_template(\n 'inspire_workflows/styles/harvesting_record_additional.html',\n object=obj,\n keywords=keywords,\n score=prediction_results.get(\"max_score\"),\n decision=prediction_results.get(\"decision\")\n )",
"def display_and_process_html_for_new_nominee_links_election(request):\n logger.info(\n \"[elections/create_election_nominee_links.py display_and_process_html_for_new_nominee_links_election()] \"\n \"request.POST\"\n )\n logger.info(json.dumps(request.POST, indent=3))\n (render_value, error_message, context) = verify_access_logged_user_and_create_context_for_elections(\n request, TAB_STRING\n )\n if render_value is not None:\n request.session[ERROR_MESSAGE_KEY] = '{}<br>'.format(error_message)\n return render_value\n\n if request.method == \"POST\":\n return process_new_election_and_nominee_links(request, context)\n else:\n create_context_for_create_election_nominee_links_html(context, create_new_election=True)\n return render(request, 'elections/create_election/create_election_nominee_links.html', context)",
"def add_row() -> str:\r\n return render_template(\"add_row.html\")",
"def html(self, filebase):\n fieldfile = filebase + '.json'\n fp = open(fieldfile, \"w\")\n fp.write(self.json())\n fp.close()\n\n # _htmlPath = os.path.join(\n # os.path.dirname(__file__),\n # \"..\",\n # \"..\",\n # \"etc\",\n # \"robotGrid.html\"\n # )\n\n # fp = open(os.path.join(os.getenv('KAIJU_DIR'), 'etc',\n # 'robotGrid.html'), \"r\")\n\n fp = open(os.path.join(KAIJU_ETC_DIR, \"robotGrid.html\"), \"r\")\n\n html_str = ''\n for l in fp.readlines():\n l = l.replace(\"fieldfile\", \"'\" + os.path.basename(fieldfile) + \"'\")\n html_str = html_str + l\n fp.close()\n\n fp = open(filebase + '.html', \"w\")\n fp.write(html_str)\n fp.close()\n\n fp = open(os.path.join(KAIJU_ETC_DIR, 'robotGrid.js'), \"r\")\n js_str = ''\n for l in fp.readlines():\n js_str = js_str + l\n fp.close()\n\n fp = open(os.path.join(os.path.dirname(filebase), 'robotGrid.js'), \"w\")\n fp.write(js_str)\n fp.close()\n return",
"def edit_virtual_panel_process():\n form = EditVirtualPanelProcess()\n\n vp_id = request.args.get('id')\n panel_id = get_panel_by_vp_id(s, vp_id)\n if request.method == \"POST\":\n if request.form['make_live'] == \"on\":\n make_vp_panel_live(s, vp_id)\n add_to_starlims(vp_id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_vpanel') + \"?id=\" + vp_id)\n elif request.method == \"GET\":\n lock_panel(s, current_user.id, panel_id)\n panel_info = get_panel_details_by_id(s, panel_id)\n panel_name = panel_info.name\n form.panel.choices = [(panel_id, panel_name), ]\n\n panel_version = get_current_version(s, panel_id)\n panel_genes = get_genes_by_panelid(s, panel_id, panel_version)\n vp_info = get_vpanel_details_by_id(s, vp_id)\n vp_version = vp_info.current_version\n vp_name = vp_info.name\n form.vpanelname.data = vp_name\n vp_genes = get_genes_by_vpanelid_edit(s, vp_id, vp_version)\n genelist = \"\"\n vp_list = []\n for i in vp_genes:\n vp_list.append(i.id)\n\n genes = []\n print('new method')\n for i in panel_genes:\n if i.id in vp_list:\n genes.append({\"name\": i.name, \"id\": i.id, \"vp_list\": True})\n button = render_template(\"gene_button.html\", gene_name=i.name, gene_id=i.id, added=True)\n genelist += button\n\n else:\n genes.append({\"name\": i.name, \"id\": i.id, \"vp_list\": False})\n\n gene_html = render_template(\"panel_genes.html\", panel_genes=genes)\n\n url = url_for('panels.edit_virtual_panel_process') + '?id=' + str(vp_id)\n return render_template('virtualpanels_createprocess.html', form=form, genes=gene_html, genelist=genelist,\n vp_id=vp_id, panel_name=vp_name, current_version=vp_version, url=url)",
"def _format_html(self, file_content):\n old_string = r\"<!-- INSERT JUMP BOX HERE -->\"\n new_string = self._getJumpBoxHtml()\n file_content = string.replace(file_content, old_string, new_string) \n\n additional_head_string = ''' \n<link media=\"screen\" href=\"dataTableMedia/css/demo_table.css\" type=\"text/css\" rel=\"stylesheet\"/>\n<link media=\"screen\" href=\"dataTableMedia/css/TableTools.css\" type=\"text/css\" rel=\"stylesheet\"/>\n<script src=\"util.js\" type=\"text/javascript\"></script>\n<script src=\"jquery.js\" type=\"text/javascript\"></script>\n<script src=\"customTables.js\" type=\"text/javascript\"></script>\n<script src=\"dataTableMedia/js/jquery.dataTables.js\" type=\"text/javascript\"></script>\n<script src=\"dataTableMedia/js/TableTools.js\" type=\"text/javascript\"></script>\n<script src=\"dataTableMedia/js/jquery.dataTables.select.filtering.js\" type=\"text/javascript\" ></script>\n '''\n old_string = r\"<!-- INSERT ADDITIONAL HEAD STRING HERE -->\" \n file_content = string.replace(file_content, old_string, additional_head_string) \n new_string = '''\n <table id=\"dataTables-summaryArchive\" class=\"display\" cellspacing=\"0\" cellpadding=\"0\" border=\"0\"> \n <thead>\n <tr> \n '''\n #Write headers: 'name', 'rog', 'distance_count', 'cs_count', 'chothia_class', 'chain_count', 'res_count'\n for i,_header in enumerate(summaryHeaderList):\n new_string += '\\t<th title=\"{help}\">{header}</th>\\n'.format(header = summaryHeader2List[i],\n help = summaryHeaderTitleList[i])\n # end for \n new_string += '''\n </tr> \n </thead>\n </table>\n '''\n old_string = r\"<!-- INSERT NEW RESULT STRING HERE -->\" \n file_content = string.replace(file_content, old_string, new_string)\n return file_content",
"def create_form_html():\n data_file = os.path.join('data', 'data.csv')\n data = pd.read_csv(data_file, index_col=0)\n example1 = data.iloc[0, :178]\n example2 = data.iloc[4340, : 178]\n placeholder = ', '.join(example1.astype(str))\n example_str1 = textwrap.fill(placeholder, 80)\n example_str2 = textwrap.fill(', '.join(example2.astype(str)), 80)\n form_html = ('''\n <html><body>\n <h1>Binary classifier for Epileptic Seizure Recognition Data \n Set</h1>\n <h2>Please enter features for classification</h1>\n (178 integers, separated by commas)\n <form method=\"post\" action=\"\">\n <textarea name=\"query\" cols=\"80\" rows=\"10\">'''\n + placeholder\n + ''' </textarea>\n <input type=\"submit\">\n </form>\n <p> Example non-seizure data point:\n '''\n + example_str1\n + '''<p> Example seizure data point: '''\n + example_str2\n + '''</body></html>''')\n return form_html",
"def buildHTML(self):\n\n # TODO: make this configurable via a dialog\n os.chdir(self.file_path.parent)\n proc = subprocess.Popen(\n [\"make\", \"clean\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n for line in proc.stdout:\n print(\"stdout: \" + str(line.rstrip(), encoding='utf8'))\n print('----------------')\n proc = subprocess.Popen(\n [\"make\", \"html\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n proc.wait()\n for line in proc.stdout:\n print(\"stdout: \" + str(line.rstrip(), encoding='utf8'))\n\n # Load corresponding HTML file from newly-built Sphinx docs\n self.preview.load_html(self.output_html_path)",
"def _do_load_page(self, **kwargs):\n _tree = kwargs['tree']\n _row = kwargs['row']\n _error_code = 0\n _user_msg = \"\"\n _debug_msg = \"\"\n\n _data = []\n _model = self.treeview.get_model()\n\n _node = _tree.nodes[SortedDict(_tree.nodes).keys()[0]]\n _entity = _node.data\n try:\n if _entity.is_mission:\n _icon = gtk.gdk.pixbuf_new_from_file_at_size(\n self._dic_icons['mission'], 22, 22)\n _data = [\n _icon, _entity.mission_id, _entity.description, '',\n _entity.time_units, 0.0, _entity.mission_time, 0.0, 0.0,\n _node.identifier, 0, 'mission'\n ]\n _new_row = None\n\n elif _entity.is_phase:\n _icon = gtk.gdk.pixbuf_new_from_file_at_size(\n self._dic_icons['phase'], 22, 22)\n _data = [\n _icon, _entity.phase_id, _entity.name, _entity.description,\n '', _entity.phase_start, _entity.phase_end, 0.0, 0.0,\n _node.identifier, 0, 'phase'\n ]\n\n elif _entity.is_env:\n _icon = gtk.gdk.pixbuf_new_from_file_at_size(\n self._dic_icons['environment'], 22, 22)\n _data = [\n _icon, _entity.environment_id, _entity.name, '',\n _entity.units, _entity.minimum, _entity.maximum,\n _entity.mean, _entity.variance, _node.identifier, 1,\n 'environment'\n ]\n\n try:\n _new_row = _model.append(_row, _data)\n except TypeError:\n _error_code = 1\n _user_msg = _(u\"One or more Usage Profile line items had the \"\n u\"wrong data type in it's data package and is \"\n u\"not displayed in the Usage Profile.\")\n _debug_msg = (\n \"RAMSTK ERROR: Data for Usage Profile ID {0:s} for \"\n \"Revision ID {1:s} is the wrong type for one or \"\n \"more columns.\".format(\n str(_node.identifier), str(self._revision_id)))\n _new_row = None\n except ValueError:\n _error_code = 1\n _user_msg = _(u\"One or more Usage Profile line items was \"\n u\"missing some of it's data and is not \"\n u\"displayed in the Usage Profile.\")\n _debug_msg = (\n \"RAMSTK ERROR: Too few fields for Usage Profile ID \"\n \"{0:s} for Revision ID {1:s}.\".format(\n str(_node.identifier), str(self._revision_id)))\n _new_row = None\n except AttributeError:\n if _node.identifier != 0:\n _error_code = 1\n _user_msg = _(u\"One or more Usage Profile line items was \"\n u\"missing it's data package and is not \"\n u\"displayed in the Usage Profile.\")\n _debug_msg = (\n \"RAMSTK ERROR: There is no data package for Usage \"\n \"Profile ID {0:s} for Revision ID {1:s}.\".format(\n str(_node.identifier), str(self._revision_id)))\n _new_row = None\n\n for _n in _tree.children(_node.identifier):\n _child_tree = _tree.subtree(_n.identifier)\n self._do_load_page(tree=_child_tree, row=_new_row)\n\n _row = _model.get_iter_root()\n self.treeview.expand_all()\n if _row is not None:\n _path = _model.get_path(_row)\n _column = self.treeview.get_column(0)\n self.treeview.set_cursor(_path, None, False)\n self.treeview.row_activated(_path, _column)\n\n return (_error_code, _user_msg, _debug_msg)",
"def get_html(self):\n\n # these 3 will be used in class methods\n self.html_id = self.location.html_id()\n self.html_class = self.location.category\n self.configuration_json = self.build_configuration_json()\n params = {\n 'gst_html': self.substitute_controls(self.render),\n 'element_id': self.html_id,\n 'element_class': self.html_class,\n 'configuration_json': self.configuration_json\n }\n content = self.system.render_template(\n 'graphical_slider_tool.html', params)\n return content",
"def include_content_html():\n\n# <div id=\"content\"> \n root_div = etree.Element(\"div\", id=\"content\")\n \n for initial_condition in initial_conditions:\n for flux in fluxes:\n # content_id identifies the results of a particular computation in the HTML document \n content_id = initial_condition + \"_\" + flux\n # <div id=\"content_id\">\n div = etree.SubElement(root_div, \"div\", id=content_id)\n # JQuery function to include content dynamically\n # <script> = include_content(content_id)</script>\n etree.SubElement(div, \"script\").text = \"include_content(\\\"\" + content_id + \"\\\")\"\n #</div> \n# </div>\n\n# Write the generated HTML document to a file\n output_html_file = open(html_path + \"html/computations/include_content.html\", \"w\") \n output_html_file.write(etree.tostring(root_div, pretty_print=True).decode(\"utf-8\"))",
"def customer_add_view(h):\n global html\n html = h\n \n common_elements = customer_common_elements()\n \n css_list = common_elements[\"css_list\"]\n\n javascript_list = common_elements[\"javascript_list\"]\n\n all_btn = common_elements[\"all_btn\"]\n\n html.new_header(\"Add Customers\", \"customer_management.py\", all_btn, css_list, javascript_list)\n customer_string = \"\"\"\n <div id=\"grid_view_div\">\n <div class=\"yo-tabs\">\n <ul>\n <li>\n <a class=\"active\" href=\"#content_1\" id=\"active_host_tab\">Customer Application Form</a>\n </li>\n <li>\n <a href=\"#content_2\" id=\"disable_host_tab\">Bulk Upload</a>\n </li>\n <li>\n <a href=\"#content_3\" id=\"discovered_host_tab\">CRM Import</a>\n </li>\n </ul>\n <div id=\"content_1\" class=\"tab-content\" style=\"display:block;height:100%;\">\n <form action=\"customer_post_ajax.py\" method=\"get\" id=\"add_customer_form\" name=\"add_customer_form\" autocomplete=\"on\" >\n <div class=\"form-div\" style=\"top:30px;\">\n <div class=\"form-body\">\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"username\">Customer User Name</label>\n <input type=\"text\" id=\"user_name\" name=\"user_name\" \n title=\"Choose Unique User Name. <br/>Must be at least 5 characters.\" />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"password\">Password</label>\n <input type=\"password\" id=\"password\" name=\"password\" title=\"Must be at least 8 characters. \"/>\n </div>\n \"\"\"\n customer_string += \"\"\"\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"groups\">Customer Organisation(Role)</label>\n \"\"\"\n customer_string += (customer_group_customer_widget())\n customer_string += \"\"\"\n </div>\n \"\"\"\n customer_string += \"\"\"\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"first_name\">First Name</label>\n <input type=\"text\" id=\"first_name\" name=\"first_name\" title=\"Please Enter First name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"last_name\">Last Name</label>\n <input type=\"text\" id=\"last_name\" name=\"last_name\" title=\"Please Enter Last name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"company_name\">Company</label>\n <input type=\"text\" id=\"company_name\" name=\"company_name\" title=\"Please Enter Company Name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"designation\">Designation</label>\n <input type=\"text\" id=\"designation\" name=\"designation\" title=\"Please Enter Designation.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"mobile_no\">Mobile Number</label>\n <input type=\"text\" id=\"mobile_no\" name=\"mobile_no\" \n title=\"Please Enter Mobile Number<br/> Don't include +91 or 0.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"telephone_no\">Telephone Number</label>\n <input type=\"text\" id=\"telephone_no\" name=\"telephone_no\" \n title=\"Please Enter Mobile Number<br/> Don't include +91 or 0.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"fax\">Fax</label>\n <input type=\"text\" id=\"fax\" name=\"fax\" \n title=\"Please Enter Mobile Number<br/> Don't include +91 or 0.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"email_id\">E-Mail ID</label>\n <input type=\"text\" id=\"email_id\" name=\"email_id\" title=\"Please Enter E-Mail ID.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"city_id\">City</label>\n <input type=\"text\" id=\"city_id\" name=\"city_id\" title=\"Please Enter City Name.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"state_id\">State</label>\n <input type=\"text\" id=\"state_id\" name=\"state_id\" title=\"Please Enter State.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"country_id\">Country</label>\n <input type=\"text\" id=\"country_id\" name=\"country_id\" title=\"Please Enter Country.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"usage\">Usage</label>\n <select id='usage' name='usage'>\n <option value=0>Personal</option>\n <option value=1>Commercial</option>\n </select>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"purpose\">Purpose</label>\n <input type=\"text\" id=\"purpose\" name=\"purpose\" title=\"Please Enter Purpose.\"/>\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"address\">Address</label>\n <textarea id=\"address\" name=\"address\" title=\"Please Enter own Address.\"></textarea>\n </div>\n </div>\n </div>\n <div class=\"form-div-footer\">\n <button type=\"submit\" class=\"yo-small yo-button\"><span class=\"add\">Save</span></button>\n <button type=\"reset\" class=\"yo-small yo-button\" id=\"close_add_user\"><span class=\"cancel\">Cancel</span></button>\n </div>\n </form>\n </div>\n <!-- container tab 2 -->\n <div id=\"content_2\" class=\"tab-content\" style=\"display:block;height:100%;\">\n <form action=\"#\" method=\"post\">\n <label for=\"bulk\">Upload Customers</label>\n <input name=\"bulk\" id=\"bulk\" type=\"file\" />\n </form>\n </div>\n <!-- container tab 3 -->\n <div id=\"content_3\" class=\"tab-content\" style=\"display:block;height:100%;\">\n <form class=\"form-body\" id=\"crm_conn\" action=\"#\" method=\"get\">\n <div class=\"form-div\" style=\"top:30px;\">\n <div class=\"form-body\">\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"crm_link\">CRM Link Location</label>\n <input type=\"text\" name=\"crm_link\" id=\"crm_link\" />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"crm_user\">CRM Username</label>\n <input type=\"text\" name=\"crm_user\" id=\"crm_user\" />\n </div>\n <div class=\"row-elem\">\n <label class=\"lbl lbl-big\" for=\"crm_pass\">CRM Password</label>\n <input type=\"password\" name=\"crm_pass\" id=\"crm_pass\" />\n </div>\n </div>\n </div>\n <div class=\"form-div-footer\">\n <button type=\"submit\" class=\"yo-small yo-button\"><span class=\"add\">Test Connection</span></button>\n <button type=\"reset\" class=\"yo-small yo-button\" id=\"close_add_user\"><span class=\"add\">Start Import</span></button>\n </div>\n </form>\n </div>\n </div>\n </div>\n \"\"\" \n customer_string += \"\"\"\n <script>\n post_customers();\n </script>\n \"\"\"\n html.write(customer_string)\n html.new_footer()",
"def registerexperiment(request):\r\n # Obtain the context from the HTTP request.\r\n\r\n context_instance = RequestContext(request)\r\n\r\n try:\r\n user = _validate_and_get_geniuser(request)\r\n except LoggedInButFailedGetGeniUserError:\r\n return _show_failed_get_geniuser_page(request)\r\n\r\n\r\n page_top_errors = []\r\n username = user.username\r\n ret =['testA'] #test list\r\n \r\n if request.method == 'POST':\r\n # create a form instance and populate it with data from the request:\r\n r_form = forms.RegisterExperimentForm(request.POST)#glabal data form\r\n battery_form = forms.BatteryForm(request.POST, prefix = 'battery') #form for each sensor.\r\n bluetooth_form = forms.BluetoothForm(request.POST, prefix = 'bluetooth') #form for each sensor.\r\n cellular_form = forms.CellularForm(request.POST, prefix = 'cellular') #form for each sensor.\r\n location_form = forms.LocationForm(request.POST, prefix = 'location') #form for each sensor.\r\n settings_form = forms.SettingsForm(request.POST, prefix = 'settings') #form for each sensor.\r\n sensor_form = forms.SensorForm(request.POST, prefix = 'sensor') #form for each sensor.\r\n signalstrength_form = forms.SignalStrengthForm(request.POST, prefix = 'signalstrength') #form for each sensor.\r\n wifi_form = forms.WifiForm(request.POST, prefix = 'wifi') #form for each sensor.\r\n\r\n if r_form.is_valid(): #if r_form is valid save the data\r\n ret.append(\"valid1\")\r\n geni_user = user #foreign key of the experiment\r\n experiment_name = r_form.cleaned_data['expe_name']\r\n researcher_name = r_form.cleaned_data['researcher_name']\r\n researcher_address = r_form.cleaned_data['researcher_address']\r\n researcher_email = r_form.cleaned_data['researcher_email']\r\n irb = r_form.cleaned_data['researcher_institution_name']\r\n irb_email = r_form.cleaned_data['irb_officer_email']\r\n goal = r_form.cleaned_data['goal']\r\n if r_form.is_required('terms_of_use') == False:\r\n page_top_errors.append(\"Please accept the terms of use\")\r\n\r\n try:\r\n # we should never error here, since we've already finished validation at this point.\r\n # but, just to be safe...\r\n experiment = interface.register_experiment(geni_user,experiment_name,\r\n researcher_name,researcher_address,\r\n researcher_email,irb, irb_email, goal)\r\n except ValidationError, err:\r\n page_top_errors.append(str(err))\r\n\r\n else:\r\n #Evreything went good so far and\r\n #now we have to check every sensor form.\r\n if battery_form.is_valid():\r\n if battery_form.is_required('battery'):#check if the researcher wants to use this sensor\r\n #check sensor checkboxes\r\n if_battery_present = battery_form.is_required('if_battery_present')\r\n battery_health = battery_form.is_required('battery_health')\r\n battery_level = battery_form.is_required('battery_level')\r\n battery_plug_type = battery_form.is_required('battery_plug_type')\r\n battery_status = battery_form.is_required('battery_status')\r\n battery_technology = battery_form.is_required('battery_technology')\r\n #check general sensor atributes\r\n battery_frequency = battery_form.cleaned_data['frequency']\r\n battery_frequency_unit = battery_form.cleaned_data['frequency_unit']\r\n battery_frequency_other = battery_form.cleaned_data['frequency_other']\r\n battery_precision = battery_form.cleaned_data['precision']\r\n battery_truncation = battery_form.cleaned_data['truncation']\r\n battery_precision_other = battery_form.cleaned_data['precision_other']\r\n battery_goal = battery_form.cleaned_data['goal']\r\n \r\n if battery_frequency == None: #if the user doesnt set frequency\r\n battery_frequency = 0 #we set it to 0\r\n if battery_frequency_other == '':#if he doesnt provide any other informatio either\r\n page_top_errors.append(\"Please select the frequency in the battery sensor\")#We set an error\r\n\r\n if battery_truncation == None:\r\n if battery_precision == 'truncate':\r\n page_top_errors.append(\"Please select the truncation decimals in the battery sensor\")\r\n else:\r\n battery_truncation = 0\r\n\r\n if battery_goal == '':\r\n page_top_errors.append(\"Please explain the goal of using the battery sensor\")\r\n\r\n if if_battery_present == False and battery_health == False and \\\r\n battery_level == False and battery_plug_type == False and \\\r\n battery_status == False and battery_technology == False:\r\n page_top_errors.append(\"Please select any battery attribute\")\r\n\r\n if page_top_errors == []:\r\n try:\r\n battery = interface.register_sensor('battery',experiment,battery_frequency,\r\n battery_frequency_unit,battery_frequency_other,\r\n battery_precision,battery_truncation, battery_precision_other,\r\n battery_goal,[if_battery_present,battery_health,battery_level,\r\n battery_plug_type,battery_status,battery_technology])\r\n except ValidationError, err:\r\n page_top_errors.append(str(err))\r\n\r\n else:#when battery form is not valid\r\n page_top_errors.append(\"Battery form is not valid\")\r\n\r\n if bluetooth_form.is_valid():#save data into bluetooth model\r\n if bluetooth_form.is_required('bluetooth'):#check if the researcher wants to use this sensor\r\n #check sensor checkboxes\r\n bluetooth_state = bluetooth_form.is_required('bluetooth_state')\r\n bluetooth_is_discovering = bluetooth_form.is_required('bluetooth_is_discovering')\r\n scan_mode = bluetooth_form.is_required('scan_mode')\r\n local_address = bluetooth_form.is_required('local_address')\r\n local_name = bluetooth_form.is_required('local_name')\r\n #check general sensor atributes\r\n bluetooth_frequency = bluetooth_form.cleaned_data['frequency']\r\n bluetooth_frequency_unit = bluetooth_form.cleaned_data['frequency_unit']\r\n bluetooth_frequency_other = bluetooth_form.cleaned_data['frequency_other']\r\n bluetooth_precision = bluetooth_form.cleaned_data['precision']\r\n bluetooth_truncation = bluetooth_form.cleaned_data['truncation']\r\n bluetooth_precision_other = bluetooth_form.cleaned_data['precision_other']\r\n bluetooth_goal = bluetooth_form.cleaned_data['goal']\r\n\r\n if bluetooth_frequency == None:\r\n bluetooth_frequency = 0\r\n if bluetooth_frequency_other == '':\r\n page_top_errors.append(\"Please select the frequency in the bluetooth sensor\")\r\n\r\n if bluetooth_precision == 'truncate'and bluetooth_truncation == None:\r\n page_top_errors.append(\"Please select the truncation decimals in the bluetooth sensor\")\r\n\r\n if bluetooth_goal == '':\r\n page_top_errors.append(\"Please explain the goal of using the bluetooth sensor\")\r\n\r\n if bluetooth_state == False and bluetooth_is_discovering == False and \\\r\n scan_mode == False and local_address == False and \\\r\n local_name == False:\r\n page_top_errors.append(\"Please select any bluetooth attribute\")\r\n\r\n if page_top_errors == []:\r\n try:\r\n bluetooth = interface.register_sensor('bluetooth',experiment,bluetooth_frequency,\r\n bluetooth_frequency_unit,bluetooth_frequency_other,\r\n bluetooth_precision,bluetooth_truncation, bluetooth_precision_other,\r\n bluetooth_goal,[bluetooth_state,bluetooth_is_discovering,scan_mode,\r\n local_address,local_name])\r\n except ValidationError, err:\r\n page_top_errors.append(str(err))\r\n\r\n else:#when bluetooth form is not valid\r\n page_top_errors.append(\"Bluetooth form is not valid\")\r\n\r\n if cellular_form.is_valid():#save data into cellular model\r\n if cellular_form.is_required('cellular'):#check if the researcher wants to use this sensor\r\n #check sensor checkboxes\r\n network_roaming = cellular_form.is_required('network_roaming')\r\n cellID = cellular_form.is_required('cellID')\r\n location_area_code = cellular_form.is_required('location_area_code')\r\n mobile_country_code = cellular_form.is_required('mobile_country_code')\r\n mobile_network_code = cellular_form.is_required('mobile_network_code')\r\n network_operator = cellular_form.is_required('network_operator')\r\n network_operator_name = cellular_form.is_required('network_operator_name')\r\n network_type = cellular_form.is_required('network_type')\r\n service_state = cellular_form.is_required('service_state')\r\n signal_strengths = cellular_form.is_required('signal_strengths')\r\n #check general sensor atributes\r\n cellular_frequency = cellular_form.cleaned_data['frequency']\r\n cellular_frequency_unit = cellular_form.cleaned_data['frequency_unit']\r\n cellular_frequency_other = cellular_form.cleaned_data['frequency_other']\r\n cellular_precision = cellular_form.cleaned_data['precision']\r\n cellular_truncation = cellular_form.cleaned_data['truncation']\r\n cellular_precision_other = cellular_form.cleaned_data['precision_other']\r\n cellular_goal = cellular_form.cleaned_data['goal']\r\n\r\n if cellular_frequency == None:\r\n cellular_frequency = 0\r\n if cellular_frequency_other == '':\r\n page_top_errors.append(\"Please select the frequency in the cellular sensor\")\r\n\r\n if cellular_precision == 'truncate'and cellular_truncation == None:\r\n page_top_errors.append(\"Please select the truncation decimals in the cellular sensor\")\r\n\r\n if cellular_goal == '':\r\n page_top_errors.append(\"Please explain the goal of using the cellular sensor\")\r\n\r\n if network_roaming == False and cellID == False and \\\r\n location_area_code == False and mobile_country_code == False and \\\r\n mobile_network_code == False and network_operator == False and \\\r\n network_operator_name == False and network_type == False and \\\r\n service_state == False and signal_strengths == False:\r\n page_top_errors.append(\"Please select any cellular attribute\")\r\n\r\n if page_top_errors == []:\r\n try:\r\n cellular = interface.register_sensor('cellular',experiment,cellular_frequency,\r\n cellular_frequency_unit,cellular_frequency_other,\r\n cellular_precision,cellular_truncation, cellular_precision_other,\r\n cellular_goal,[network_roaming,cellID,location_area_code,\r\n mobile_country_code,mobile_network_code,network_operator,\r\n network_operator_name, network_type,service_state,signal_strengths])\r\n except ValidationError, err:\r\n page_top_errors.append(str(err))\r\n\r\n else:#when cellular form is not valid\r\n page_top_errors.append(\"Cellular form is not valid\")\r\n\r\n if location_form.is_valid():#save data into location model\r\n if location_form.is_required('location'):#check if the researcher wants to use this sensor\r\n #check sensor checkboxes\r\n location_providers = location_form.is_required('location_providers')\r\n location_provider_enabled = location_form.is_required('location_provider_enabled')\r\n location_data = location_form.is_required('location_data')\r\n last_known_location = location_form.is_required('last_known_location')\r\n geocode = location_form.is_required('geocode')\r\n #check general sensor atributes\r\n location_frequency = location_form.cleaned_data['frequency']\r\n location_frequency_unit = location_form.cleaned_data['frequency_unit']\r\n location_frequency_other = location_form.cleaned_data['frequency_other']\r\n location_precision = location_form.cleaned_data['precision']\r\n location_truncation = location_form.cleaned_data['truncation']\r\n location_precision_other = location_form.cleaned_data['precision_other']\r\n location_goal = location_form.cleaned_data['goal']\r\n\r\n if location_frequency == None:\r\n location_frequency = 0\r\n if location_frequency_other == '':\r\n page_top_errors.append(\"Please select the frequency in the location sensor\")\r\n\r\n if location_precision == 'truncate'and location_truncation == None:\r\n page_top_errors.append(\"Please select the truncation decimals in the location sensor\")\r\n\r\n if location_goal == '':\r\n page_top_errors.append(\"Please explain the goal of using the location sensor\")\r\n\r\n if location_providers == False and location_provider_enabled == False and \\\r\n location_data == False and last_known_location == False and \\\r\n geocode == False:\r\n page_top_errors.append(\"Please select any location attribute\")\r\n\r\n if page_top_errors == []:\r\n try:\r\n location = interface.register_sensor('location',experiment,location_frequency,\r\n location_frequency_unit,location_frequency_other,\r\n location_precision,location_truncation, location_precision_other,\r\n location_goal,[location_providers,location_provider_enabled,\r\n location_data,last_known_location,geocode])\r\n except ValidationError, err:\r\n page_top_errors.append(str(err))\r\n\r\n else:#when location form is not valid\r\n page_top_errors.append(\"Location form is not valid\")\r\n\r\n if settings_form.is_valid():#save data into settings model\r\n if settings_form.is_required('settings'):#check if the researcher wants to use this sensor\r\n #check sensor checkboxes\r\n airplane_mode = settings_form.is_required('airplane_mode')\r\n ringer_silent_mode = settings_form.is_required('ringer_silent_mode')\r\n screen_on = settings_form.is_required('screen_on')\r\n max_media_volume = settings_form.is_required('max_media_volume')\r\n max_ringer_volume = settings_form.is_required('max_ringer_volume')\r\n media_volume = settings_form.is_required('media_volume')\r\n ringer_volume = settings_form.is_required('ringer_volume')\r\n screen_brightness = settings_form.is_required('screen_brightness')\r\n screen_timeout = settings_form.is_required('screen_timeout')\r\n #check general sensor atributes\r\n settings_frequency = settings_form.cleaned_data['frequency']\r\n settings_frequency_unit = settings_form.cleaned_data['frequency_unit']\r\n settings_frequency_other = settings_form.cleaned_data['frequency_other']\r\n settings_precision = settings_form.cleaned_data['precision']\r\n settings_truncation = settings_form.cleaned_data['truncation']\r\n settings_precision_other = settings_form.cleaned_data['precision_other']\r\n settings_goal = settings_form.cleaned_data['goal']\r\n\r\n if settings_frequency == None:\r\n settings_frequency = 0\r\n if settings_frequency_other == '':\r\n page_top_errors.append(\"Please select the frequency in the settings sensor\")\r\n\r\n if settings_precision == 'truncate'and settings_truncation == None:\r\n page_top_errors.append(\"Please select the truncation decimals in the settings sensor\")\r\n\r\n if settings_goal == '':\r\n page_top_errors.append(\"Please explain the goal of using the settings sensor\")\r\n\r\n if airplane_mode == False and ringer_silent_mode == False and \\\r\n screen_on == False and max_media_volume == False and \\\r\n max_ringer_volume == False and media_volume == False and \\\r\n ringer_volume == False and screen_brightness == False and \\\r\n screen_timeout == False:\r\n page_top_errors.append(\"Please select any settings attribute\")\r\n\r\n if page_top_errors == []:\r\n try:\r\n settings = interface.register_sensor('settings',experiment,settings_frequency,\r\n settings_frequency_unit,settings_frequency_other,\r\n settings_precision,settings_truncation, settings_precision_other,\r\n settings_goal,[airplane_mode,ringer_silent_mode,screen_on,\r\n max_media_volume,max_ringer_volume,media_volume,ringer_volume,\r\n screen_brightness,screen_timeout])\r\n except ValidationError, err:\r\n page_top_errors.append(str(err))\r\n\r\n else:#when settings form is not valid\r\n page_top_errors.append(\"Settings form is not valid\")\r\n\r\n if sensor_form.is_valid():#save data into sensor model\r\n if sensor_form.is_required('sensor'):#check if the researcher wants to use this sensor\r\n #check sensor checkboxes\r\n sensor_data = sensor_form.is_required('sensor_data')\r\n sensors_accuracy = sensor_form.is_required('sensors_accuracy')\r\n light = sensor_form.is_required('light')\r\n accelerometer = sensor_form.is_required('accelerometer')\r\n magnetometer = sensor_form.is_required('magnetometer')\r\n orientation = sensor_form.is_required('orientation') \r\n #check general sensor atributes\r\n sensor_frequency = sensor_form.cleaned_data['frequency']\r\n sensor_frequency_unit = sensor_form.cleaned_data['frequency_unit']\r\n sensor_frequency_other = sensor_form.cleaned_data['frequency_other']\r\n sensor_precision = sensor_form.cleaned_data['precision']\r\n sensor_truncation = sensor_form.cleaned_data['truncation']\r\n sensor_precision_other = sensor_form.cleaned_data['precision_other']\r\n sensor_goal = sensor_form.cleaned_data['goal']\r\n\r\n if sensor_frequency == None:\r\n sensor_frequency = 0\r\n if sensor_frequency_other == '':\r\n page_top_errors.append(\"Please select the frequency in the sensor sensor\")\r\n\r\n if sensor_precision == 'truncate'and sensor_truncation == None:\r\n page_top_errors.append(\"Please select the truncation decimals in the sensor sensor\")\r\n\r\n if sensor_goal == '':\r\n page_top_errors.append(\"Please explain the goal of using the sensor sensor\")\r\n\r\n if sensor_data == False and sensors_accuracy == False and \\\r\n light == False and accelerometer == False and \\\r\n magnetometer == False and orientation == False:\r\n page_top_errors.append(\"Please select any sensor attribute\")\r\n\r\n if page_top_errors == []:\r\n try:\r\n sensor = interface.register_sensor('sensor',experiment,sensor_frequency,\r\n sensor_frequency_unit, sensor_frequency_other,\r\n sensor_precision,sensor_truncation, sensor_precision_other,\r\n sensor_goal,[sensor_data,sensors_accuracy,light,\r\n accelerometer,magnetometer,orientation])\r\n except ValidationError, err:\r\n page_top_errors.append(str(err))\r\n\r\n else:#when concret sensor form is not valid\r\n page_top_errors.append(\"Sensor form is not valid\")\r\n\r\n if signalstrength_form.is_valid():#save data into signalstrenght model\r\n if signalstrength_form.is_required('signalstrength'):#check if the researcher wants to use this sensor\r\n #check sensor checkboxes\r\n signal_strengths = signalstrength_form.is_required('signal_strengths')\r\n #check general sensor atributes\r\n signalstrength_frequency = signalstrength_form.cleaned_data['frequency']\r\n signalstrength_frequency_unit = signalstrength_form.cleaned_data['frequency_unit']\r\n signalstrength_frequency_other = signalstrength_form.cleaned_data['frequency_other']\r\n signalstrength_precision = signalstrength_form.cleaned_data['precision']\r\n signalstrength_truncation = signalstrength_form.cleaned_data['truncation']\r\n signalstrength_precision_other = signalstrength_form.cleaned_data['precision_other']\r\n signalstrength_goal = signalstrength_form.cleaned_data['goal']\r\n\r\n if signalstrength_frequency == None:\r\n signalstrength_frequency = 0\r\n if signalstrength_frequency_other == '':\r\n page_top_errors.append(\"Please select the frequency in the signalstrength sensor\")\r\n\r\n if signalstrength_precision == 'truncate'and signalstrength_truncation == None:\r\n page_top_errors.append(\"Please select the truncation decimals in the signalstrength sensor\")\r\n\r\n if signalstrength_goal == '':\r\n page_top_errors.append(\"Please explain the goal of using the signalstrength sensor\")\r\n\r\n if signal_strengths == False:\r\n page_top_errors.append(\"Please select any signalstrength attribute\")\r\n\r\n if page_top_errors == []:\r\n try:\r\n signalstrength = interface.register_sensor('signalstrength',experiment,signalstrength_frequency,\r\n signalstrength_frequency_unit,signalstrength_frequency_other,\r\n signalstrength_precision,signalstrength_truncation, signalstrength_precision_other,\r\n signalstrength_goal,[signal_strengths])\r\n except ValidationError, err:\r\n page_top_errors.append(str(err))\r\n\r\n else:#when signalstrength form is not valid\r\n page_top_errors.append(\"Signalstrength form is not valid\")\r\n\r\n if wifi_form.is_valid():#save data into wifi model\r\n if wifi_form.is_required('wifi'):#check if the researcher wants to use this sensor\r\n #check sensor checkboxes\r\n wifi_state = wifi_form.is_required('wifi_state')\r\n ip_address = wifi_form.is_required('ip_address')\r\n link_speed = wifi_form.is_required('link_speed')\r\n supplicant_state = wifi_form.is_required('supplicant_state')\r\n ssid = wifi_form.is_required('ssid')\r\n rssi = wifi_form.is_required('rssi')\r\n scan_results = wifi_form.is_required('scan_results')\r\n #check general sensor atributes\r\n wifi_frequency = wifi_form.cleaned_data['frequency']\r\n wifi_frequency_unit = wifi_form.cleaned_data['frequency_unit']\r\n wifi_frequency_other = wifi_form.cleaned_data['frequency_other']\r\n wifi_precision = wifi_form.cleaned_data['precision']\r\n wifi_truncation = wifi_form.cleaned_data['truncation']\r\n wifi_precision_other = wifi_form.cleaned_data['precision_other']\r\n wifi_goal = wifi_form.cleaned_data['goal']\r\n\r\n if wifi_frequency == None:\r\n wifi_frequency = 0\r\n if wifi_frequency_other == '':\r\n page_top_errors.append(\"Please select the frequency in the wifi sensor\")\r\n\r\n if wifi_precision == 'truncate'and wifi_truncation == None:\r\n page_top_errors.append(\"Please select the truncation decimals in the wifi sensor\")\r\n\r\n if wifi_goal == '':\r\n page_top_errors.append(\"Please explain the goal of using the wifi sensor\")\r\n\r\n if wifi_state == False and ip_address == False and \\\r\n link_speed == False and supplicant_state == False and \\\r\n ssid == False and rssi == False:\r\n page_top_errors.append(\"Please select any wifi attribute\")\r\n\r\n if page_top_errors == []:\r\n try:\r\n wifi = interface.register_sensor('wifi',experiment,wifi_frequency,\r\n wifi_frequency_unit,wifi_frequency_other,\r\n wifi_precision,wifi_truncation, wifi_precision_other,\r\n wifi_goal,[wifi_state,ip_address,link_speed,\r\n supplicant_state,ssid,rssi,scan_results])\r\n except ValidationError, err:\r\n page_top_errors.append(str(err))\r\n\r\n else:#when bluetooth wifi is not valid\r\n page_top_errors.append(\"Wifi form is not valid\")\r\n\r\n if page_top_errors == []: #all data have been saved succesfully\r\n #redirect to the help page just as a test\r\n return HttpResponseRedirect(reverse(\"viewexperiments\")) \r\n \r\n else: #if r_form is not valid\r\n page_top_errors.append(\"Basic information of the experiment is not valid\")\r\n \r\n # if a GET (or any other method) we'll create a blank form\r\n else:\r\n r_form = forms.RegisterExperimentForm()\r\n battery_form = forms.BatteryForm(prefix = 'battery') #form for each sensor\r\n bluetooth_form = forms.BluetoothForm(prefix = 'bluetooth') #form for each sensor\r\n cellular_form = forms.CellularForm(prefix = 'cellular') #form for each sensor\r\n location_form = forms.LocationForm(prefix = 'location') #form for each sensor\r\n settings_form = forms.SettingsForm(prefix = 'settings') #form for each sensor\r\n sensor_form = forms.SensorForm(prefix = 'sensor') #form for each sensor\r\n signalstrength_form = forms.SignalStrengthForm(prefix = 'signalstrength') #form for each sensor\r\n wifi_form = forms.WifiForm(prefix = 'wifi') #form for each sensor\r\n\r\n return render(request, 'control/registerexperiment.html', {'username' : username,\r\n 'battery_form': battery_form, 'bluetooth_form': bluetooth_form,\r\n 'cellular_form': cellular_form, 'location_form': location_form, \r\n 'settings_form': settings_form, 'sensor_form': sensor_form, \r\n 'signalstrength_form': signalstrength_form, 'wifi_form': wifi_form, \r\n 'r_form': r_form, 'ret': ret, 'page_top_errors':page_top_errors})",
"def gene_detail(request, pk):\n\n try:\n gene = Gene.objects.get(pk=pk)\n except Gene.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'POST':\n serializer = GeneSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n elif request.method == 'DELETE':\n gene.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n elif request.method == 'PUT':\n serializer = GeneSerializer(gene, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n if request.method == 'GET':\n serializer = GeneSerializer(gene)\n return Response(serializer.data)",
"def preview_handler(self, _, __):\r\n template = self.system.render_template('lti_form.html', self.get_context())\r\n return Response(template, content_type='text/html')",
"def output_xhtml(lines, oldest_revision, newest_revision, ignored_revision_data_points,\n regressions, requested_width, requested_height, title):\n print '<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\"',\n print '\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">'\n print '<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\">'\n print '<head>'\n print '<title>%s</title>' % qe(title)\n print '</head>'\n print '<body>'\n \n output_svg(lines, regressions, requested_width, requested_height)\n\n #output the manipulation controls\n print \"\"\"\n<script type=\"text/javascript\">//<![CDATA[\n function getElementsByClass(node, searchClass, tag) {\n var classElements = new Array();\n var elements = node.getElementsByTagName(tag);\n var pattern = new RegExp(\"^|\\\\s\"+searchClass+\"\\\\s|$\");\n for (var i = 0, elementsFound = 0; i < elements.length; ++i) {\n if (pattern.test(elements[i].className)) {\n classElements[elementsFound] = elements[i];\n ++elementsFound;\n }\n }\n return classElements;\n }\n function getAllLines() {\n var selectElem = document.getElementById('benchSelect');\n var linesObj = {};\n for (var i = 0; i < selectElem.options.length; ++i) {\n var lines = JSON.parse(selectElem.options[i].value);\n for (var j = 0; j < lines.length; ++j) {\n linesObj[lines[j]] = true;\n }\n }\n return linesObj;\n }\n function getOptions(selectElem) {\n var linesSelectedObj = {};\n for (var i = 0; i < selectElem.options.length; ++i) {\n if (!selectElem.options[i].selected) continue;\n \n var linesSelected = JSON.parse(selectElem.options[i].value);\n for (var j = 0; j < linesSelected.length; ++j) {\n linesSelectedObj[linesSelected[j]] = true;\n }\n }\n return linesSelectedObj;\n }\n function objectEmpty(obj) {\n for (var p in obj) {\n return false;\n }\n return true;\n }\n function markSelectedLines(selectElem, allLines) {\n var linesSelected = getOptions(selectElem);\n if (!objectEmpty(linesSelected)) {\n for (var line in allLines) {\n allLines[line] &= (linesSelected[line] == true);\n }\n }\n }\n function updateSvg() {\n var allLines = getAllLines();\n \n var selects = getElementsByClass(document, 'lines', 'select');\n for (var i = 0; i < selects.length; ++i) {\n markSelectedLines(selects[i], allLines);\n }\n \n for (var line in allLines) {\n var svgLine = document.getElementById(line);\n var display = (allLines[line] ? 'inline' : 'none');\n svgLine.setAttributeNS(null,'display', display);\n }\n }\n \n function mark(markerId) {\n for (var line in getAllLines()) {\n var svgLineGroup = document.getElementById(line);\n var display = svgLineGroup.getAttributeNS(null,'display');\n if (display == null || display == \"\" || display != \"none\") {\n var svgLine = document.getElementById(line+'_line');\n if (markerId == null) {\n svgLine.removeAttributeNS(null,'marker-mid');\n } else {\n svgLine.setAttributeNS(null,'marker-mid', markerId);\n }\n }\n }\n }\n//]]></script>\"\"\"\n\n all_settings = {}\n variant_settings = set()\n for label in lines.keys():\n for key, value in label.settings.items():\n if key not in all_settings:\n all_settings[key] = value\n elif all_settings[key] != value:\n variant_settings.add(key)\n\n print '<table border=\"0\" width=\"%s\">' % requested_width\n #output column headers\n print \"\"\"\n<tr valign=\"top\"><td width=\"50%\">\n<table border=\"0\" width=\"100%\">\n<tr><td align=\"center\"><table border=\"0\">\n<form>\n<tr valign=\"bottom\" align=\"center\">\n<td width=\"1\">Bench Type</td>\n<td width=\"1\">Bitmap Config</td>\n<td width=\"1\">Timer Type (Cpu/Gpu/wall)</td>\n\"\"\"\n\n for k in variant_settings:\n print '<td width=\"1\">%s</td>' % qe(k)\n\n print '<td width=\"1\"><!--buttons--></td></tr>'\n\n #output column contents\n print '<tr valign=\"top\" align=\"center\">'\n print '<td width=\"1\">'\n create_select(lambda l: l.bench, lines, 'benchSelect')\n print '</td><td width=\"1\">'\n create_select(lambda l: l.config, lines)\n print '</td><td width=\"1\">'\n create_select(lambda l: l.time_type, lines)\n\n for k in variant_settings:\n print '</td><td width=\"1\">'\n create_select(lambda l: l.settings.get(k, \" \"), lines)\n\n print '</td><td width=\"1\"><button type=\"button\"',\n print 'onclick=%s' % qa(\"mark('url(#circleMark)'); return false;\"),\n print '>Mark Points</button>'\n print '<button type=\"button\" onclick=\"mark(null);\">Clear Points</button>'\n print '</td>'\n print \"\"\"\n</tr>\n</form>\n</table></td></tr>\n<tr><td align=\"center\">\n<hr />\n\"\"\"\n\n output_ignored_data_points_warning(ignored_revision_data_points)\n print '</td></tr></table>'\n print '</td><td width=\"2%\"><!--gutter--></td>'\n\n print '<td><table border=\"0\">'\n print '<tr><td align=\"center\">%s<br></br>revisions r%s - r%s</td></tr>' % (\n qe(title),\n bench_util.CreateRevisionLink(oldest_revision),\n bench_util.CreateRevisionLink(newest_revision))\n print \"\"\"\n<tr><td align=\"left\">\n<p>Brighter red indicates tests that have gotten worse; brighter green\nindicates tests that have gotten better.</p>\n<p>To highlight individual tests, hold down CONTROL and mouse over\ngraph lines.</p>\n<p>To highlight revision numbers, hold down SHIFT and mouse over\nthe graph area.</p>\n<p>To only show certain tests on the graph, select any combination of\ntests in the selectors at left. (To show all, select all.)</p>\n<p>Use buttons at left to mark/clear points on the lines for selected\nbenchmarks.</p>\n</td></tr>\n</table>\n\n</td>\n</tr>\n</table>\n</body>\n</html>\"\"\"",
"def createchore():\n return render_template(\"newchore.html\")"
]
| [
"0.56408334",
"0.55816025",
"0.52680415",
"0.503966",
"0.488444",
"0.48382986",
"0.47955337",
"0.47820178",
"0.47750244",
"0.47589657",
"0.47389212",
"0.4719479",
"0.46907225",
"0.46665788",
"0.46599403",
"0.4656926",
"0.4655909",
"0.4653246",
"0.46490628",
"0.46456513",
"0.46368492",
"0.4632549",
"0.462015",
"0.4615505",
"0.46102855",
"0.45942473",
"0.4592676",
"0.45596674",
"0.45549637",
"0.45514244"
]
| 0.67516744 | 0 |
Creates a custom region from ajax query and adds region to versions table for specified panel. Method checks that region does not exist before adding to the database | def create_panel_custom_regions():
panel_id = request.json["panel_id"]
chrom = request.json["chrom"]
start = request.json["start"]
end = request.json["end"]
name = request.json["name"]
regions = select_region_by_location(s, chrom, start, end) # if region already exists, return current entry
if regions:
for i in regions:
add_region_to_panel(s, i.id, panel_id)
s.commit()
continue
else:
create_custom_region(s, panel_id, chrom, start, end, name)
return jsonify("complete") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_panel_regions():\n version_ids = request.json['id_ext']\n panel_id = request.json['panel_id']\n project_id = request.json['project_id']\n gene_name = request.json['gene_name']\n\n try:\n tx_id = request.json['pref_tx_id']\n add_preftxs_to_panel(s, project_id, [{\"gene\": gene_name, \"tx_id\": tx_id}, ])\n except KeyError:\n pass\n\n for i in version_ids:\n if i[\"ext_5\"] == 0:\n ext_5 = None\n else:\n ext_5 = i[\"ext_5\"]\n\n if i[\"ext_3\"] == 0:\n ext_3 = None\n else:\n ext_3 = i[\"ext_3\"]\n add_region_to_panel(s, i[\"id\"], panel_id, ext_3=ext_3, ext_5=ext_5)\n s.commit()\n return jsonify(\"complete\")",
"def initialize_region(self):\n self.new_region_name = \"\"\n self.map.regions.create_new_region()",
"def add_cloud_region(self, position):\n region = self.cloud_region_selector(position)\n self.regions[id(region)] = region",
"def add_region(self, position):\n region = self.region_selector(position)\n self.regions[id(region)] = region",
"def add_all_regions_vp():\n gene_id = request.json['gene_id']\n vpanel_id = request.json['vpanel_id']\n panel_id = request.json['panel_id']\n add_all_regions_to_vp(s, panel_id, gene_id, vpanel_id)\n return jsonify({\"genes\": [gene_id, ]})",
"def create_region(self, region_ref):\n raise exception.NotImplemented() # pragma: no cover",
"def add_new_region(self, image_name: str, region_text: str, region_position: RegionPosition, region_type: str):\n pass",
"def set_region(sender, instance, *args, **kwargs):\n if instance.geocity and not instance.georegion:\n instance.georegion = instance.geocity.region",
"def add_region_feature(data):\n\n data.loc[:, 'region'] = data.loc[:, 'district'].apply(\n lambda x: mapping.SOFIA_NEIGHBOURHOOD_TO_REGION_MAPPING[x]\n )\n\n return data",
"def remove_panel_regions():\n if type(request.json['ids']) is list:\n version_ids = request.json['ids']\n else:\n version_ids = request.json['ids'].replace('[', '').replace(']', '').split(',')\n # TODO does this happen?\n if type(version_ids) is str:\n version_ids = version_ids.split(',')\n panel_id = request.json['panel_id']\n\n for i in version_ids:\n remove_version_from_panel(s, int(panel_id), int(i))\n\n panel = get_panel_by_id(s, panel_id) # check if there are still regions in the panel\n length = len(list(panel))\n return jsonify(length)",
"def add_all_regions():\n gene_id = request.json['gene_id']\n panel_id = request.json['panel_id']\n tx_id = request.json['tx_id']\n gene_name = request.json['gene_name']\n project_id = get_project_id_by_panel_id(s, panel_id)\n\n add_preftxs_to_panel(s, project_id, [{\"gene\": gene_name, \"tx_id\": tx_id}, ])\n add_genes_to_panel_with_ext(s, panel_id, gene_id)\n return jsonify({\"genes\": [gene_id, ]})",
"def putregion(self, *args, **kwargs):\n return _image.image_putregion(self, *args, **kwargs)",
"def create(self):\n assert self.name != \"Settings\", \"Cannot create a new mesh region with this Name\"\n args = [\"NAME:\" + self.name, \"Enable:=\", self.Enable]\n if self.UserSpecifiedSettings:\n args += self.manualsettings\n else:\n args += self.autosettings\n self.meshmodule.AssignMeshRegion(args)\n return True",
"def createSubdivRegion(*args, **kwargs)->bool:\n pass",
"def add_region_of_interest(self, event: str):\n\n mesh = self.comm.lasif.find_event_mesh(event)\n m = UnstructuredMesh.from_h5(mesh)\n mesh_layers = np.sort(np.unique(m.elemental_fields[\"layer\"]))[::-1].astype(int)\n layers = m.elemental_fields[\"layer\"]\n o_core_idx = layers[np.where(m.elemental_fields[\"fluid\"] == 1)[0][0]]\n o_core_idx = np.where(mesh_layers == o_core_idx)[0][0]\n correct_layers = mesh_layers[o_core_idx:]\n roi = np.zeros_like(layers)\n for layer in correct_layers:\n roi = np.logical_or(roi, layers == layer)\n\n m.attach_field(\"ROI\", roi)\n m.write_h5(mesh)",
"def add_region(self, region):\n self._country_code_whitelist.append(\n phonenumbers.country_code_for_valid_region(region))",
"def __init__(self, region):\r\n self.region = region",
"def add_vp():\n vp_name = request.json['vp_name']\n panel_id = request.json['panel_id']\n vp_id = create_virtualpanel_query(s, vp_name, panel_id)\n if vp_id != -1:\n lock_panel(s, current_user.id, panel_id)\n return jsonify(vp_id)",
"def sync_region(self, region_id):\n self.init_structures()\n con = SimConnection()\n con.connect(self.gridinfo._url)\n scenedata = con._con.ogrescene_list({\"RegionID\":region_id})[\"res\"]\n objects = editor.getSelected()\n if not objects:\n objects = bpy.data.objects\n for obj in objects:\n obj_uuid = str(self.get_uuid(obj))\n if obj_uuid:\n if obj_uuid in scenedata:\n self.import_group(obj_uuid, scenedata[obj_uuid], 10)",
"def add_regions(self, regions, **options):\n \n options.setdefault(\"col\", color(0,0,1))\n options.setdefault(\"style\", \"box\")\n options.setdefault(\"height\", 0.5)\n \n return self.add_track(RegionTrack, -.5, regions, **options)",
"def river_region(rr_id):\n r = RiverRegionRenderer(request, rr_id, None)\n return r.render()",
"def add_region(self, address, data):\n region = HexFileRegion(address, data)\n self.regions.append(region)\n self.check()",
"def _import_insee_region(self, cr, uid, ids, data_dir, context=None):\n if context is None:\n context = {}\n filepath = os.path.abspath(os.path.join(data_dir, 'reg2011.csv'))\n region_obj = self.pool.get('insee.region')\n with open(filepath, 'rb') as regfile:\n reader = csv.DictReader(regfile)\n for row in reader:\n values = {\n 'region': row['REGION'],\n 'cheflieu': row['CHEFLIEU'],\n 'tncc': row['TNCC'],\n 'ncc': row['NCC'],\n 'nccenr': row['NCCENR'],\n }\n region_obj.create(cr, uid, values, context=context)",
"def river_region(rr_id):\n try:\n r = RiverRegionRenderer(request, rr_id, None)\n except NotFoundError:\n return abort(404)\n return r.render()",
"def AddRegions(self, **kwargs):\n # Addregions use pixel coordinates. listRegions and SaveRegions use RA and Dec.\n n_objs = 0\n objs = []\n # default shape is circle\n if not 'shape' in kwargs:\n kwargs['shape'] = ['circle']\n for k in kwargs.keys():\n n_objs = max(n_objs, len(kwargs[k]))\n for j in range(n_objs):\n temp = {}\n for k in kwargs.keys():\n try:\n temp[k] = kwargs[k][j]\n except IndexError:\n if k == 'shape': \n temp[k] = 'circle'\n objs.append(temp)\n self.all_objs = json.dumps(objs)\n command = \"JS9.AddRegions({objs}, {{display:'{wid}{suffix}'}})\".format(objs=self.all_objs, wid=self.wid, suffix=self.suffix)\n get_ipython().run_cell_magic('javascript', '', command)",
"def region(self, box):\n is_indexbox(box, errors=\"raise\") # Validate the box definition\n self.fetcher = self.Fetchers[\"region\"](box=box, **self.fetcher_options)\n self._AccessPoint = \"region\" # Register the requested access point\n return self",
"def create_RegionCnv(cur, tname='RegionCnv'):\n # debug: there must be a comma at the end\n create = '''create table %s\n (Chip, Chr, Start integer, End integer, State, Cn real, Im real,\n primary key (Chip, Chr, Start, End))\n ''' % tname\n print create\n cur.execute(create)",
"def helper_test_create_vessel_duplicated(self):\n url = reverse('vessel-create')\n payload = json.dumps({\n \"code\": \"MV101\"\n })\n response = self.post(url, payload)\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)",
"def add_new_region(self, image_name: str, region_text: str, region_position: RegionPosition, region_type: str):\n image_annotation = self._images[image_name] # type: ImageAnnotation\n regions_array = getattr(image_annotation, REGIONS_MAP[region_type])\n regions_array.append(Region(text=region_text, position=region_position))",
"def create(self, validated_data):\n\n region = CourierRegions.objects.create(\n courier_id=Courier.objects.get(courier_id=validated_data['courier_id']),\n region=validated_data['region']\n )\n return region"
]
| [
"0.6851175",
"0.58318996",
"0.5732002",
"0.57220423",
"0.5642025",
"0.5546407",
"0.54592353",
"0.5327448",
"0.52333945",
"0.5185885",
"0.5143218",
"0.5129821",
"0.5091915",
"0.49749365",
"0.4962693",
"0.49568614",
"0.49368486",
"0.49333954",
"0.49035862",
"0.48829305",
"0.48709947",
"0.48601332",
"0.4848405",
"0.48395458",
"0.48337594",
"0.4830667",
"0.4798056",
"0.47759965",
"0.47751606",
"0.4754266"
]
| 0.7340577 | 0 |
Method to add all regions for a gene to versions table | def add_all_regions():
gene_id = request.json['gene_id']
panel_id = request.json['panel_id']
tx_id = request.json['tx_id']
gene_name = request.json['gene_name']
project_id = get_project_id_by_panel_id(s, panel_id)
add_preftxs_to_panel(s, project_id, [{"gene": gene_name, "tx_id": tx_id}, ])
add_genes_to_panel_with_ext(s, panel_id, gene_id)
return jsonify({"genes": [gene_id, ]}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_all_regions_vp():\n gene_id = request.json['gene_id']\n vpanel_id = request.json['vpanel_id']\n panel_id = request.json['panel_id']\n add_all_regions_to_vp(s, panel_id, gene_id, vpanel_id)\n return jsonify({\"genes\": [gene_id, ]})",
"def add_panel_regions():\n version_ids = request.json['id_ext']\n panel_id = request.json['panel_id']\n project_id = request.json['project_id']\n gene_name = request.json['gene_name']\n\n try:\n tx_id = request.json['pref_tx_id']\n add_preftxs_to_panel(s, project_id, [{\"gene\": gene_name, \"tx_id\": tx_id}, ])\n except KeyError:\n pass\n\n for i in version_ids:\n if i[\"ext_5\"] == 0:\n ext_5 = None\n else:\n ext_5 = i[\"ext_5\"]\n\n if i[\"ext_3\"] == 0:\n ext_3 = None\n else:\n ext_3 = i[\"ext_3\"]\n add_region_to_panel(s, i[\"id\"], panel_id, ext_3=ext_3, ext_5=ext_5)\n s.commit()\n return jsonify(\"complete\")",
"def vcf_add_gene(vrecs,gi_by_chr_map):\n def _overlap(gi,vr):\n return gi['end_i'] >= vr.start and gi['start_i'] <= vr.end\n def _genes_for_vr(vr,gi_by_chr_map):\n cm = gi_by_chr_map[vr.CHROM]\n genes = [ gi['gene'] for gi in cm if _overlap(gi,vr) ] \n return genes\n for vr in vrecs:\n vr.genes = _genes_for_vr(vr,gi_by_chr_map)",
"def regions(self, regions):\n self._regions = regions",
"def geneExonicRegions(self, df):\n scaffold = df.iloc[0].scaffold\n strand = df.iloc[0].strand\n gene_type = df.iloc[0].gene_type\n gene_id = df.iloc[0].gene_id\n gene_name = df.iloc[0].gene_name\n start = df.start.min()\n end = df.end.max()\n bp = [False] * (end - start + 1)\n for i in range(df.shape[0]):\n s = df.iloc[i]['start'] - start\n e = df.iloc[i]['end'] - start + 1\n bp[s:e] = [True] * (e - s)\n regions = list(range(start, end + 1))\n groups = []\n\n for i, j in groupby(bp):\n groups.append((i, len(list(j))))\n e_start = 0\n\n for i in groups:\n e_end = e_start + i[1]\n if i[0]:\n record = Record(scaffold=scaffold, start=regions[e_start],\n end=regions[e_end - 1], gene_type=gene_type, gene_id=gene_id,\n gene_name=gene_name, strand=strand)\n yield record\n e_start += i[1]",
"def add_regions(self, regions, **options):\n \n options.setdefault(\"col\", color(0,0,1))\n options.setdefault(\"style\", \"box\")\n options.setdefault(\"height\", 0.5)\n \n return self.add_track(RegionTrack, -.5, regions, **options)",
"def _add_transform_genes(self):\n self._alleles.add(pu.make_int_gene(1, 1, 10, 1)) # 'AR' backshift (p)\n self._alleles.add(pu.make_choice_gene(1, [0, 1, 2])) # 'I' backshift (d) \n self._alleles.add(pu.make_choice_gene(1, [1, 2, 3])) # 'MA' backshift (q)\n self._alleles.add(pu.make_int_gene(1, 1, 10, 1)) # Seasonal 'AR' backshift (p)\n self._alleles.add(pu.make_choice_gene(1, [0, 1, 2])) # Seasonal 'I' backshift (d) \n self._alleles.add(pu.make_choice_gene(1, [1, 2, 3])) # Seasonal 'MA' backshift (q)\n self._loci_list += ['AR_order', 'I_order', 'MA_order',\n 'ssn_AR_order', 'ssn_I_order', 'ssn_MA_order']",
"def ensembl_genes(species,log=0):\n \n t1=time.time()\n #open ace genes by- ensembl region\n path=os.environ['PYDATA']+'/'+species+'/aceview/'+species+'_genes_by_ensembl_region.bkdb'\n aceGeneDB=bsddb.btopen(path,'r') \n \n path=os.environ['PYDATA']+\"/\"+species+\"/ensembl/\"+species+'_genes_by_region.bkdb'\n otherGenesDB=bsddb.btopen(path,'r')\n '''create ensembl genes by ace gene'''\n path=os.environ['PYDATA']+'/'+species+'/aceview/'+species+'_ensembl_genes_by_gene.bkdb'\n otherGenesByGeneDB=bsddb.btopen(path,'w')\n geneNb=0\n\n for region in aceGeneDB.keys():\n if region not in otherGenesDB.keys():\n print 'region %s not in ensembl/%s_genes_by_region.bkdb'%(region,species)\n else: \n print 'processing region ',region\n aceGenes=cPickle.loads(aceGeneDB[region])\n otherGenes=cPickle.loads(otherGenesDB[region])\n #construct dictionnary of ace genes containing ensembl genes\n ace={}\n '''process each ensembl gene in the current region'''\n for ensemblGeneIndex in range(len(otherGenes.IDs)): \n '''find the index of the ace genes that contains either the start or the end of the current ensembl gene'''\n aceGeneIndexes=[]\n indexes=bintools.owningStructure(otherGenes.starts[ensemblGeneIndex],otherGenes.strands[ensemblGeneIndex],aceGenes.starts,aceGenes.ends,aceGenes.strands,1)\n aceGeneIndexes.extend(indexes) \n indexes=bintools.owningStructure(otherGenes.ends[ensemblGeneIndex],otherGenes.strands[ensemblGeneIndex],aceGenes.starts,aceGenes.ends,aceGenes.strands,1)\n aceGeneIndexes.extend(indexes) \n '''register the current ensembl gene index in the ace dictionnary'''\n for aceGeneIndex in aceGeneIndexes: \n try:\n ace[aceGenes.IDs[aceGeneIndex]].append(ensemblGeneIndex)\n except: \n ace[aceGenes.IDs[aceGeneIndex]]=[ensemblGeneIndex] \n \n '''process each ace gene in the current region''' \n for aceGeneIndex in range(len(aceGenes.IDs)): \n '''find the index of the ensembl genes that contains either the start or the end of the current ace gene'''\n ensemblGeneIndexes=[]\n indexes=bintools.owningStructure(aceGenes.starts[aceGeneIndex],aceGenes.strands[aceGeneIndex],otherGenes.starts,otherGenes.ends,otherGenes.strands,1) \n ensemblGeneIndexes.extend(indexes)\n indexes=bintools.owningStructure(aceGenes.ends[aceGeneIndex],aceGenes.strands[aceGeneIndex],otherGenes.starts,otherGenes.ends,otherGenes.strands,1) \n ensemblGeneIndexes.extend(indexes)\n '''add the ensembl gene indexes found in the previous for loop processing ensembl genes'''\n try:\n ensemblGeneIndexes.extend(ace[aceGenes.IDs[aceGeneIndex]])\n except:\n pass \n ensemblGeneIndexes=set(ensemblGeneIndexes)\n '''recover the corresponding ensembl gene IDs'''\n ensemblGeneIDs=[] \n for i in range(len(ensemblGeneIndexes)):\n ensemblGeneIDs.append(otherGenes.IDs[ensemblGeneIndexes.pop()]) \n '''register the current ace gene'''\n ensemblGeneIDs.sort()\n otherGenesByGeneDB[aceGenes.IDs[aceGeneIndex]]=cPickle.dumps(ensemblGeneIDs,protocol=-1)\n geneNb=geneNb+1\n \n otherGenesByGeneDB.close() \n otherGenesDB.close()\n aceGeneDB.close()\n t2=time.time()\n if log!=0:\n log.write('%s\\t%s\\t\\t17\\t%s_ensembl_genes_by_gene.bkdb\\taceview\\t%u\\t%.2f\\n'%(date.today(),species,species,geneNb,t2-t1))",
"def add_liftover_file(in_file, regions):\n with open(in_file) as in_handle:\n for line in in_handle:\n if not line.startswith(\"#\"):\n chrom, start, end = line.strip().split()\n key = (chrom.replace(\"chr\", \"\"), int(start), int(end))\n regions[key] += 1\n return regions",
"def regions(self):\n\n class RegionIter(object):\n def __init__(self, region_based):\n self._region_based = region_based\n\n def __len__(self):\n return self._region_based._region_len()\n\n def __iter__(self):\n return self()\n\n def _fix_chromosome(self, regions):\n for r in regions:\n r.fix_chromosome(copy=True)\n\n def __call__(self, key=None, *args, **kwargs):\n fix_chromosome = kwargs.pop('fix_chromosome', False)\n\n if key is None:\n iterator = self._region_based._region_iter(*args, **kwargs)\n else:\n if isinstance(key, string_types) or isinstance(key, GenomicRegion):\n iterator = self._region_based.region_subset(key, *args, **kwargs)\n else:\n iterator = self._region_based._get_regions(key, *args, **kwargs)\n\n if fix_chromosome:\n return self._fix_chromosome(iterator)\n else:\n return iterator\n\n def __getitem__(self, item):\n if isinstance(item, string_types) or isinstance(item, GenomicRegion):\n return self._region_based.region_subset(item)\n return self._region_based._get_regions(item)\n\n return RegionIter(self)",
"def add_sr_ebi_brc4_names(self,\n seq_region_file: str,\n seq_region_map: dict,\n attrib_type_map: dict,\n work_dir: str,\n unversion: bool = False):\n os.makedirs(work_dir, exist_ok=True)\n\n # return if there's nothing to add\n if not seq_region_file: return\n\n # technical / optimization. get atttib_type_id(s) for \"(EBI|BRC4)_seq_region_name\"\n tagged_sr_name_attrib_id = {\n tag : self.id_from_map_or_die(f\"{tag}_seq_region_name\", attrib_type_map, \"attrib_type_map\") for tag in [\"EBI\", \"BRC4\"]\n }\n\n # load BRC4/EBI name from seq_region file\n brc4_ebi_name_attrib_trios = [] # [ (seq_region_id, attrib_id, value)... ] list of trios for inserting into db \n with open(seq_region_file) as in_file:\n seq_regions = list(json.load(in_file))\n for seq_region in seq_regions:\n # get seq_region_id (perhaps, by using unversioned name)\n seq_region_name, seq_region_id, unversioned_name = \\\n self.name_and_id_from_seq_region_item(seq_region, seq_region_map, try_unversion = unversion)\n # append attribs to the brc4_ebi_name_attrib_trios list\n for tag in [\"BRC4\", \"EBI\"]:\n attrib_name = f\"{tag}_seq_region_name\"\n attrib_id = tagged_sr_name_attrib_id[tag]\n value = seq_region.get(attrib_name, seq_region_name)\n brc4_ebi_name_attrib_trios.append( (seq_region_id, attrib_id, self.quote_or_null(value)) )\n\n # run insertion SQL\n self.insert_to_db(\n brc4_ebi_name_attrib_trios,\n \"seq_region_attrib\",\n [\"seq_region_id\", \"attrib_type_id\", \"value\"],\n self.pjc(work_dir, \"brc4_ebi_seq_region_synonyms\"),\n ignore = True\n )",
"def _add_transform_genes(self):\n self._alleles.add(pu.make_int_gene(1, 1, 10, 1)) # 'AR' backshift (p)\n self._alleles.add(pu.make_choice_gene(1, [0, 1, 2])) # 'I' backshift (d) \n self._alleles.add(pu.make_choice_gene(1, [1, 2, 3])) # 'MA' backshift (q)\n self._loci_list += ['AR_order', 'I_order', 'MA_order']",
"def sync_region(self, region_id):\n self.init_structures()\n con = SimConnection()\n con.connect(self.gridinfo._url)\n scenedata = con._con.ogrescene_list({\"RegionID\":region_id})[\"res\"]\n objects = editor.getSelected()\n if not objects:\n objects = bpy.data.objects\n for obj in objects:\n obj_uuid = str(self.get_uuid(obj))\n if obj_uuid:\n if obj_uuid in scenedata:\n self.import_group(obj_uuid, scenedata[obj_uuid], 10)",
"def RegionList(self):\n command = \"\"\"\n IPython.notebook.kernel.execute(\"RegionList=\" + JSON.stringify(JS9.GetShapes(\"regions\", {{display: '{wid}JS9'}})));\n \"\"\".format(wid=self.wid)\n get_ipython().run_cell_magic('javascript', '', command)",
"def get_regions_from_genes(gene_list, gtf_pr):\n\n regions = []\n\n bad_genes = []\n for gene in gene_list:\n\n gene_pr = gtf_pr[gtf_pr.gene_name == gene]\n\n if gene_pr.empty:\n bad_genes.append(gene)\n continue\n\n chrom = gene_pr.df.Chromosome.to_list()[0]\n start = gene_pr.df.Start.min() - 100\n end = gene_pr.df.End.max() + 100\n\n regions.append(\"{}:{}-{}\".format(chrom, start, end))\n\n if bad_genes:\n print(\n \"\\n!!ERROR!! At least one gene from the list was not found in the gtf file. Please make sure the gene symbol provided is correct and in the gtf file. If the symbol is a correct symbol, check for alternative gene symbols in the gtf file.\"\n )\n print(\"Bad Gene(s):\\n\\t- {}\\n\".format(\"\\n\\t- \".join(bad_genes)))\n sys.exit(1)\n\n return regions",
"def AddRegions(self, **kwargs):\n # Addregions use pixel coordinates. listRegions and SaveRegions use RA and Dec.\n n_objs = 0\n objs = []\n # default shape is circle\n if not 'shape' in kwargs:\n kwargs['shape'] = ['circle']\n for k in kwargs.keys():\n n_objs = max(n_objs, len(kwargs[k]))\n for j in range(n_objs):\n temp = {}\n for k in kwargs.keys():\n try:\n temp[k] = kwargs[k][j]\n except IndexError:\n if k == 'shape': \n temp[k] = 'circle'\n objs.append(temp)\n self.all_objs = json.dumps(objs)\n command = \"JS9.AddRegions({objs}, {{display:'{wid}{suffix}'}})\".format(objs=self.all_objs, wid=self.wid, suffix=self.suffix)\n get_ipython().run_cell_magic('javascript', '', command)",
"def _import_insee_region(self, cr, uid, ids, data_dir, context=None):\n if context is None:\n context = {}\n filepath = os.path.abspath(os.path.join(data_dir, 'reg2011.csv'))\n region_obj = self.pool.get('insee.region')\n with open(filepath, 'rb') as regfile:\n reader = csv.DictReader(regfile)\n for row in reader:\n values = {\n 'region': row['REGION'],\n 'cheflieu': row['CHEFLIEU'],\n 'tncc': row['TNCC'],\n 'ncc': row['NCC'],\n 'nccenr': row['NCCENR'],\n }\n region_obj.create(cr, uid, values, context=context)",
"def write_regions(pathfolder, key_firms, regions, methodvalues):\n ## Generate namefile\n namefile = generate_namefile(pathfolder, methodvalues)\n\n ## Writting\n db = shelve.open(namefile)\n db['nif'] = key_firms\n db['regions'] = regions\n db['methodvalues'] = methodvalues\n db.close()",
"def _add_transform_genes(self):\n pass",
"def __init__(self):\n self.regions = []",
"def add(cls, name, supply_centers, excel_table):\r\n # Make and register a lovely new variant\r\n variant = Variant(name, supply_centers)\r\n cls.registry.append(variant)\r\n\r\n # Split string into a list of lists\r\n excel_rows = [line.split('\\t') for line in excel_table.split('\\n')]\r\n\r\n # Remove leading blank rows\r\n while excel_rows[0] == ['']:\r\n excel_rows.pop(0)\r\n \r\n # Remove and store header in CSV format\r\n if excel_rows[0][0] == '':\r\n excel_rows[0].pop(0) # Remove leading tab\r\n variant.header = ','.join(excel_rows.pop(0)).strip()\r\n\r\n # Iterate through pre-computed country data\r\n for row in excel_rows:\r\n if row == ['']:\r\n continue\r\n \r\n # Remove \"Sample \" prefix if William still has it in the country name for some reason\r\n row[0] = row[0][len(\"Sample \"):] if row[0].startswith(\"Sample \") else row[0]\r\n\r\n # Store pre-computed information as CSV string in a `countries` dict with the country name as the key\r\n variant.countries[row.pop(0)] = ','.join(row)",
"def create_genes_table(self, fn_genes):\n log.info(\"Creating table with information about the genes ...\")\n gene_record = GeneParser.GeneRecord()\n names = gene_record.fields_names\n types = gene_record.fields_types\n if len(names) != len(types):\n raise ValueError, \"The number of fields is different from the \"\\\n \"number of types\"\n self.create_table(self.GenesTable,names,types)\n fh = open(fn_genes, \"r\")\n log.debug(\"Reading file %s\",fn_genes)\n reader = csv.reader(fh, delimiter=\"\\t\")\n reader.next() # discard first line\n data = []\n for row in reader:\n if row[0] == \"\":\n continue\n g = GeneParser.GeneRecord()\n g.read(reader, row)\n data.append(g.get_values())\n self.store_data(self.GenesTable,data)",
"def add_karyotype_bands(self,\n seq_region_file: str,\n seq_region_map: dict,\n attrib_type_map: dict,\n work_dir: str,\n unversion: bool = False,\n karyotype_bands_property = \"karyotype_bands\") -> list: # [ (seq_region_name, seq_region_id, unversioned_name) ]\n os.makedirs(work_dir, exist_ok=True)\n\n # return if there's nothing to add\n if not seq_region_file: return\n\n # resulting list of seq regions with bands\n seq_regions_with_karyotype_bands = [] # [ ( seq_region_name, seq_region_id, unversioned_name )... ] \n\n # load BRC4/EBI name from seq_region file\n band_tuples = [] # [ (seq_region_id, seq_region_start, seq_region_end, band|\"NULL\", stain|\"NULL\")... ] list of tuples for inserting into db \n with open(seq_region_file) as in_file:\n seq_regions = list(json.load(in_file))\n for seq_region in filter(lambda sr: sr.get(karyotype_bands_property, False), seq_regions):\n # iterate through all seq_regions having non-empty \"karyotype_bands\" \n\n # get seq_region_id (perhaps, by using unversioned name)\n seq_region_name, seq_region_id, unversioned_name = \\\n self.name_and_id_from_seq_region_item(seq_region, seq_region_map, try_unversion = unversion)\n\n #append trio to the resulting list \n seq_regions_with_karyotype_bands.append( (seq_region_name, seq_region_id, unversioned_name) )\n\n # append bands to the band_tuples list\n for band in seq_region[ karyotype_bands_property ]:\n # print(\"BAND: \" + str(band), file = sys.stderr)\n # coords\n seq_region_start = band[\"start\"]\n seq_region_end = band[\"end\"]\n # band_name and stain\n band_name = band.get(\"name\", None)\n stain = band.get(\"stain\", None)\n # special cases for stain\n structure = band.get(\"structure\", None)\n if structure == \"telomere\":\n stain = \"TEL\"\n elif structure == \"centromere\":\n stain = \"ACEN\"\n\n # append tuple\n band_tuples.append( (seq_region_id, seq_region_start, seq_region_end, self.quote_or_null(band_name), self.quote_or_null(stain)) )\n\n # run insertion SQL\n self.insert_to_db(\n band_tuples,\n \"karyotype\",\n [\"seq_region_id\", \"seq_region_start\", \"seq_region_end\", \"band\", \"stain\"],\n self.pjc(work_dir, \"karyotype_insertion\"),\n ignore = True\n )\n\n # return resulting list of regions with bands trios\n return seq_regions_with_karyotype_bands",
"def update (self) :\n for met in self.gene :\n met(self)",
"def _discoverNewZincRegions(self):\n newRegionCount = 0\n zincChildRef = self._zincRegion.getFirstChild()\n while zincChildRef.isValid():\n childName = zincChildRef.getName()\n neonChild = self._findChildByName(childName)\n if not neonChild:\n neonChild = NeonRegion(childName, zincChildRef, self)\n neonChild._ancestorModelSourceCreated = True\n self._children.append(neonChild)\n newRegionCount += (1 + neonChild._discoverNewZincRegions())\n zincChildRef = zincChildRef.getNextSibling()\n return newRegionCount",
"def add_gene_ids(self, genes_list):\n orig_num_genes = len(self.genes)\n\n for g in list(set(genes_list)):\n if not self.genes.has_id(g):\n new_gene = GenePro(id=g, pdb_file_type=self.pdb_file_type, root_dir=self.genes_dir)\n if self.model:\n self.model.genes.append(new_gene)\n else:\n self.genes.append(new_gene)\n\n log.info('Added {} genes to GEM-PRO project'.format(len(self.genes)-orig_num_genes))",
"def initialize_region(self):\n self.new_region_name = \"\"\n self.map.regions.create_new_region()",
"def create_panel_custom_regions():\n panel_id = request.json[\"panel_id\"]\n chrom = request.json[\"chrom\"]\n start = request.json[\"start\"]\n end = request.json[\"end\"]\n name = request.json[\"name\"]\n regions = select_region_by_location(s, chrom, start, end) # if region already exists, return current entry\n if regions:\n for i in regions:\n add_region_to_panel(s, i.id, panel_id)\n s.commit()\n continue\n else:\n create_custom_region(s, panel_id, chrom, start, end, name)\n\n return jsonify(\"complete\")",
"def _informRegionChange(self, treeChange):\n rootRegion = self\n while rootRegion._parent:\n rootRegion = rootRegion._parent\n for callback in rootRegion._regionChangeCallbacks:\n callback(self, treeChange)",
"def generateAllRegionVectors():\n\tregionVectors = []\n\tfor i in range(NUM_REGION_VECTORS):\n\t\tregionVectors.append('{0:04x}'.format(i))\n\treturn regionVectors"
]
| [
"0.6766564",
"0.6378839",
"0.5810066",
"0.5718972",
"0.557586",
"0.54156053",
"0.5386809",
"0.53579724",
"0.5317539",
"0.5312527",
"0.5252136",
"0.52013516",
"0.51998466",
"0.51921564",
"0.51599437",
"0.51550764",
"0.5132998",
"0.5104016",
"0.5098529",
"0.50962406",
"0.5076107",
"0.50634766",
"0.5029969",
"0.5025251",
"0.50160724",
"0.5011248",
"0.49889308",
"0.49489906",
"0.4938177",
"0.49292248"
]
| 0.66544604 | 1 |
Method to add selected regions and relevant extensions to a panel. When regions for a gene are added to a panel the preferred tx is also added/updated. The regions are sent within a dictionary containing the containing the region ID and both extensions (zero if no ext to be added). | def add_panel_regions():
version_ids = request.json['id_ext']
panel_id = request.json['panel_id']
project_id = request.json['project_id']
gene_name = request.json['gene_name']
try:
tx_id = request.json['pref_tx_id']
add_preftxs_to_panel(s, project_id, [{"gene": gene_name, "tx_id": tx_id}, ])
except KeyError:
pass
for i in version_ids:
if i["ext_5"] == 0:
ext_5 = None
else:
ext_5 = i["ext_5"]
if i["ext_3"] == 0:
ext_3 = None
else:
ext_3 = i["ext_3"]
add_region_to_panel(s, i["id"], panel_id, ext_3=ext_3, ext_5=ext_5)
s.commit()
return jsonify("complete") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_all_regions():\n gene_id = request.json['gene_id']\n panel_id = request.json['panel_id']\n tx_id = request.json['tx_id']\n gene_name = request.json['gene_name']\n project_id = get_project_id_by_panel_id(s, panel_id)\n\n add_preftxs_to_panel(s, project_id, [{\"gene\": gene_name, \"tx_id\": tx_id}, ])\n add_genes_to_panel_with_ext(s, panel_id, gene_id)\n return jsonify({\"genes\": [gene_id, ]})",
"def create_panel_custom_regions():\n panel_id = request.json[\"panel_id\"]\n chrom = request.json[\"chrom\"]\n start = request.json[\"start\"]\n end = request.json[\"end\"]\n name = request.json[\"name\"]\n regions = select_region_by_location(s, chrom, start, end) # if region already exists, return current entry\n if regions:\n for i in regions:\n add_region_to_panel(s, i.id, panel_id)\n s.commit()\n continue\n else:\n create_custom_region(s, panel_id, chrom, start, end, name)\n\n return jsonify(\"complete\")",
"def add_all_regions_vp():\n gene_id = request.json['gene_id']\n vpanel_id = request.json['vpanel_id']\n panel_id = request.json['panel_id']\n add_all_regions_to_vp(s, panel_id, gene_id, vpanel_id)\n return jsonify({\"genes\": [gene_id, ]})",
"def AddRegions(self, **kwargs):\n # Addregions use pixel coordinates. listRegions and SaveRegions use RA and Dec.\n n_objs = 0\n objs = []\n # default shape is circle\n if not 'shape' in kwargs:\n kwargs['shape'] = ['circle']\n for k in kwargs.keys():\n n_objs = max(n_objs, len(kwargs[k]))\n for j in range(n_objs):\n temp = {}\n for k in kwargs.keys():\n try:\n temp[k] = kwargs[k][j]\n except IndexError:\n if k == 'shape': \n temp[k] = 'circle'\n objs.append(temp)\n self.all_objs = json.dumps(objs)\n command = \"JS9.AddRegions({objs}, {{display:'{wid}{suffix}'}})\".format(objs=self.all_objs, wid=self.wid, suffix=self.suffix)\n get_ipython().run_cell_magic('javascript', '', command)",
"def add_regions(self, regions, **options):\n \n options.setdefault(\"col\", color(0,0,1))\n options.setdefault(\"style\", \"box\")\n options.setdefault(\"height\", 0.5)\n \n return self.add_track(RegionTrack, -.5, regions, **options)",
"def push_regions(self, regions: [MouseRegion]):\n raise NotImplementedError",
"def add_panel(self, panel):\n assert panel.PANEL_ID not in self.panels\n assert not self.tools, \"tools must be added after panels\"\n self.panels[panel.PANEL_ID] = panel\n panel.register_panel(self)",
"def add_new_region(self, image_name: str, region_text: str, region_position: RegionPosition, region_type: str):\n pass",
"def add_region(self, position):\n region = self.region_selector(position)\n self.regions[id(region)] = region",
"def post(self, request, *args, **kwargs):\n org = request.org\n\n # Load data and validate that it is in the correct format.\n self.raw_data = request.POST.get('data', \"\").strip() or None\n try:\n data = json.loads(self.raw_data)\n except TypeError:\n return self.error(\n \"No data was provided in the `data` parameter.\")\n except ValueError:\n return self.error(\n \"Data must be valid JSON.\")\n if not isinstance(data, dict):\n return self.error(\n \"Data must be a dict that maps panel id to \"\n \"(parent id, boundary id).\")\n if not all(isinstance(v, list) and len(v) == 2 for v in data.values()):\n return self.error(\n \"All data values must be of the format \"\n \"(parent id, boundary id).\")\n\n # Grab all of the org's regions and boundaries at once.\n regions = {str(r.pk): r for r in Region.get_all(org)}\n boundaries = {str(b.pk): b for b in Boundary.objects.by_org(org)}\n\n # Check that the user is updating exactly the regions from this\n # org, and that specified parents and boundaries are valid for\n # this org.\n valid_regions = set(regions.keys())\n valid_boundaries = set(boundaries.keys())\n sent_regions = set(str(i) for i in data.keys())\n sent_parents = set(str(i[0]) for i in data.values() if i[0] is not None)\n sent_boundaries = set(str(i[1]) for i in data.values() if i[1] is not None)\n if sent_regions != valid_regions:\n return self.error(\n \"Data must map panel id to parent id for every panel \"\n \"in this org.\")\n if not sent_parents.issubset(valid_regions):\n return self.error(\n \"Panel parent must be a panel from the same org, \"\n \"or null.\")\n if not sent_boundaries.issubset(valid_boundaries):\n return self.error(\n \"Panel boundary must be a boundary from the same \"\n \"org, or null.\")\n\n # Re-set parent and boundary values for each region,\n # then rebuild the mptt tree.\n with Region.objects.disable_mptt_updates():\n for region_id, (parent_id, boundary_id) in data.items():\n region = regions.get(str(region_id))\n parent = regions.get(str(parent_id)) if parent_id else None\n boundary = boundaries.get(str(boundary_id)) if boundary_id else None\n\n changed = False\n if region.boundary != boundary:\n changed = True\n self.log_change(\"boundary\", region, region.boundary, boundary)\n region.boundary = boundary\n if region.parent != parent:\n changed = True\n self.log_change(\"parent\", region, region.parent, parent)\n region.parent = parent\n\n if changed:\n region.save()\n Region.objects.rebuild()\n\n return self.success(\"{} panels have been updated.\".format(request.org))",
"def remove_panel_regions():\n if type(request.json['ids']) is list:\n version_ids = request.json['ids']\n else:\n version_ids = request.json['ids'].replace('[', '').replace(']', '').split(',')\n # TODO does this happen?\n if type(version_ids) is str:\n version_ids = version_ids.split(',')\n panel_id = request.json['panel_id']\n\n for i in version_ids:\n remove_version_from_panel(s, int(panel_id), int(i))\n\n panel = get_panel_by_id(s, panel_id) # check if there are still regions in the panel\n length = len(list(panel))\n return jsonify(length)",
"def regions(self, regions):\n self._regions = regions",
"def add_sections(base_dict, new_tools):\n base_tools_labels = {}\n # load base tools list in dict\n for tool in base_dict['tools']:\n if 'tool_panel_section_label' in tool.keys():\n if tool['name'] not in base_tools_labels.keys():\n base_tools_labels[tool['name']] = [tool['tool_panel_section_label']]\n else:\n base_tools_labels[tool['name']].append(tool['tool_panel_section_label'])\n\n # iterate over the new tools and update sections in place\n for tool in new_tools['tools']:\n if tool['name'] in base_tools_labels.keys():\n if 'tool_panel_section_label' in tool.keys():\n # new tool already has a label\n if tool['tool_panel_section_label'] not in base_tools_labels[tool['name']]:\n # the label is not in the list of possible ones.\n # Update\n tool['tool_panel_section_label'] = base_tools_labels[tool['name']][0]\n else:\n # assign the first of the possible labels\n tool['tool_panel_section_label'] = base_tools_labels[tool['name']][0]\n\n return new_tools",
"def panelConfiguration(*args, addPanel: Union[List[bool, AnyStr, AnyStr, AnyStr, AnyStr],\n List[List[bool, AnyStr, AnyStr, AnyStr, AnyStr]]]=None, configString:\n Union[AnyStr, bool]=\"\", createStrings: bool=True, defaultImage:\n Union[AnyStr, bool]=\"\", defineTemplate: AnyStr=\"\", editStrings:\n bool=True, exists: bool=True, image: Union[AnyStr, bool]=\"\",\n isFixedState: bool=True, label: Union[AnyStr, bool]=\"\", labelStrings:\n bool=True, numberOfPanels: bool=True, removeAllPanels: bool=True,\n removeLastPanel: bool=True, replaceCreateString: List[int, AnyStr]=None,\n replaceEditString: List[int, AnyStr]=None, replaceFixedState: List[int,\n bool]=None, replaceLabel: List[int, AnyStr]=None, replacePanel: List[int,\n bool, AnyStr, AnyStr, AnyStr, AnyStr]=None, replaceTypeString: List[int,\n AnyStr]=None, sceneConfig: bool=True, typeStrings: bool=True,\n useTemplate: AnyStr=\"\", userCreated: bool=True, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def update_ext():\n panel_id = request.json['panel_id']\n region_id = request.json['region_id']\n e3 = request.json[\"ext_3\"]\n e5 = request.json[\"ext_5\"]\n\n current_version = get_current_version(s, panel_id)\n version = get_version_row(s, panel_id, region_id, current_version)\n version_id = version[0]\n intro = version[1]\n\n if e3 is not None:\n ext_3 = e3\n else:\n ext_3 = version[3]\n if e5 is not None:\n ext_5 = e5\n else:\n ext_5 = version[4]\n if int(intro) > int(current_version):\n update_ext_query(s, version_id, ext_3=ext_3, ext_5=ext_5)\n else:\n update_ext_query(s, version_id, panel_id=panel_id, ext_3=ext_3, ext_5=ext_5, current_version=current_version,\n region_id=region_id)\n\n return jsonify(\"complete\")",
"def handle_panel_update(self, section_dict):",
"def panel_genes(context, panel):\n LOG.info(\"Running scout export panel\")\n adapter = context.obj['adapter']\n \n if not panel:\n LOG.warning(\"Please provide at least one gene panel\")\n context.abort()\n\n LOG.info(\"Exporting panels: {}\".format(', '.join(panel)))\n for line in export_panels(adapter, panel):\n click.echo(line)",
"def add_data(self, skins, method='common', colours=8):\n if not isinstance(skins, list):\n skins = [skins]\n for skin in skins:\n if method == 'common':\n rgb = ImageColour.get_most_common(skin.get_file_path(self.skin_directory, 'loading'), colours)\n else:\n rgb = ImageColour.get_average(skin.get_file_path(self.skin_directory, 'loading'))\n h, radius, _ = rgb_to_hsv(rgb.r, rgb.g, rgb.b)\n angle = h * 2 * np.pi\n img = Image.open(skin.get_file_path(self.skin_directory, 'tiles'))\n ab = AnnotationBbox(OffsetImage(img, zoom=0.13), (angle, radius), frameon=False)\n self.ax.add_artist(ab)\n self.figure = plt.gcf()",
"def compute_regions(bigwigs, bedfiles, plot):\n bedfiles_split = bedfiles.split(',')\n bigwigs_split = bigwigs.split(',')\n\n stack = regions.regionsTwolists(bigwigs_split, bedfiles_split)\n plotarray = regions.create_plotarray(stack,bigwigs_split)\n \n if plot != None:\n regions.plot(plotarray, col_names=bedfiles_split, row_names=bigwigs_split, output=plot)\n return plotarray",
"def register(check_environ=False):\n from mundi.loader import register\n from mundi.types.region import REGION_PLUGINS\n\n if check_environ:\n import os\n\n if os.environ.get(\"MUNDI_DEMOGRAPHY\", \"on\").lower() in (\"off\", \"false\", \"no\"):\n return\n\n for k, v in FUNCTIONS.items():\n register(k, v)\n\n REGION_PLUGINS[\"population\"] = lambda x: population(x.id)\n REGION_PLUGINS[\"age_distribution\"] = lambda x: age_distribution(x.id)\n REGION_PLUGINS[\"age_pyramid\"] = lambda x: age_pyramid(x.id)",
"def Te_ne_P_panel(**kwargs):\n\n GR = glo.global_results()\n gal_indices = np.arange(GR.N_gal)\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n for gal_index in gal_indices:\n fig = plt.figure(figsize=(15,7),constrained_layout=False)\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n cell_data = gal_ob.cell_data.get_dataframe()\n\n gs1 = fig.add_gridspec(nrows=1, ncols=3, wspace=0.0, hspace=0.0)\n\n ax = fig.add_subplot(gs1[0,0])\n h = np.histogram(np.log10(cell_data.Te_mw),bins=100)\n bin_size = (h[1][1]-h[1][0])/2\n ax.fill_between(h[1][0:-1] + bin_size,h[0],color='orange', step='pre',alpha=0.6,label='G%i' % gal_index)\n ax.set_xlabel('log mass-weighted T$_{e}$ per cell')\n ax.set_ylabel('Mass fraction')\n\n ax = fig.add_subplot(gs1[0,1])\n h = np.histogram(np.log10(cell_data.ne_mw_grid),bins=100)\n bin_size = (h[1][1]-h[1][0])/2\n ax.fill_between(h[1][0:-1] + bin_size,h[0],color='orange', step='pre',alpha=0.6,label='G%i' % gal_index)\n ax.set_xlabel('log mass-weighted n$_{e}$ per cell')\n ax.set_ylabel('Mass fraction')\n\n ax = fig.add_subplot(gs1[0,2])\n h = np.histogram(np.log10(cell_data.P_HII),bins=100)\n bin_size = (h[1][1]-h[1][0])/2\n ax.fill_between(h[1][0:-1] + bin_size,h[0],color='orange', step='pre',alpha=0.6,label='G%i' % gal_index)\n ax.set_xlabel('log mass-weighted P$_{HII}$ per cell')\n ax.set_ylabel('Mass fraction')\n\n plt.tight_layout()\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'cell_data/pressure/'): os.mkdir(p.d_plot + 'cell_data/pressure/')\n plt.savefig(p.d_plot + 'cell_data/pressure/G%i' % gal_index, dpi=250, facecolor='w')\n plt.close()",
"def load_registration_as_layers() -> None:\n viewer = getattr(widget, \"viewer\").value\n registration_directory = pathlib.Path(\n getattr(widget, \"registration_output_folder\").value\n )\n add_registered_image_layers(\n viewer, registration_directory=registration_directory\n )",
"def addMod(self, newMod):\n newFrame = newMod(self.contentFrame, self.activeConflict)\n self.frameList.append(newFrame)\n newButton = newFrame.makeButton(self.pageSelectFrame, self)\n self.frameBtnList.append(newButton)\n newButton.grid(column=len(self.frameBtnList), row=0, sticky=NSEW)",
"def update_panel_displays(self):\n #\n # The individual gauges\n for name in self.people:\n person = self.people[name]\n panel = self.health_panels[name]\n panel.set_health(person.health)\n panel.set_water(person.water)\n #\n # The vessel of water\n self.vessel.new_value = self.water_container.amount",
"def editPanels(self, panels, **properties):\n\n if type(panels) is not list:\n panels = [panels]\n\n panels.reverse()\n\n panelsXML = []\n for panel in panels:\n panelXML = ET.fromstring(panel)\n\n multiTrackXML = panelXML.find(\"MultiTrackElements\")\n if multiTrackXML is not None:\n self.__saveSetupData(multiTrackDataXMLS=ET.tostring(multiTrackXML), properties=panelXML.attrib)\n\n panelsXML.append({\"properties\":panelXML.attrib, \"multiTrackXML\":multiTrackXML})\n\n if panelsXML:\n if (OSUtils.type == OSUtils.LINUX):\n paths = []\n for panel in panelsXML:\n properties = panel['properties'].copy()\n properties['frame'] = '#'\n mode = Mode(properties.get('show', None), properties.get('sequence', None))\n path = mode.get('[recipeCompedFile]', properties)\n paths.append(path)\n if not self.fileServiceLocal.exists(path):\n raise utils.FlixException(msg=\"Missing File: %s\"%path)\n command = Mode().get(\"[editImageCommand]\")\n log('Edit command %s' % command)\n os.system(command + \" \" + ' '.join(paths))\n else:\n Photoshop().createPhotoshopFileForPanels(panelsXML)\n\n return \"Done\"",
"def _addPanes(self):\n\n self._addPaneMapWindow()\n self._addPaneToolbar(name = 'digitMap')",
"def select_regions(data, region_col, regions, combine_subregions=True):",
"def write_regions(srclist, impath, ext='.reg'):\n fname = impath[:-5] + ext\n with open(fname, 'w') as f:\n f.write('global color=cyan font=\"helvetica 10 normal\" '\n 'select=1 highlite=1 edit=1 move=1 delete=1 '\n 'include=1 fixed=0 source\\n')\n f.write('fk5\\n')\n for src in srclist:\n f.write('ellipse(%f,%f,%.2f\",%.2f\",%.1f) # text={%s}\\n' % (\n src.ra, src.dec, src.maj, src.min, src.pa + 90.0, src.name))",
"def _choose_regions(self, display_regions=False):\n dstl = Load_DSTL()\n if self.class_type == 1:\n # Select regions where there are buildings (with red roofs)\n test_image, test_mask = dstl.extract_region_pos(2300, 3000, cutout_size=[400, 400], object_class=self.class_type)\n train_image, train_mask = dstl.extract_region_pos(1900, 3100, cutout_size=[400, 400], object_class=self.class_type)\n cv_image, cv_mask = dstl.extract_region_pos(950, 1450, cutout_size=[200, 200], object_class=self.class_type)\n elif self.class_type == 5:\n train_image, train_mask = dstl.extract_region_pos(1150, 2150, cutout_size=[400, 400], object_class=self.class_type)\n test_image, test_mask = dstl.extract_region_pos(2300, 3000, cutout_size=[400, 400], object_class=self.class_type)\n cv_image, cv_mask = dstl.extract_region_pos(1900, 1950, cutout_size=[400, 400], object_class=self.class_type)\n else:\n pass\n self.images = {'train': train_image, 'cv': cv_image, 'test': test_image}\n self.masks = {'train': train_mask, 'cv': cv_mask, 'test': test_mask}\n if display_regions:\n for key in self.images.keys():\n display_three_band(self.images[key], self.masks[key], colors='green', title='{:} region'.format(key))",
"def modify_cube_extend_info(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n # build request\n d = {\n \"ProjectId\": self.config.project_id,\n \"Region\": self.config.region,\n }\n req and d.update(req)\n d = apis.ModifyCubeExtendInfoRequestSchema().dumps(d)\n\n # build options\n kwargs[\"max_retries\"] = 0 # ignore retry when api is not idempotent\n\n resp = self.invoke(\"ModifyCubeExtendInfo\", d, **kwargs)\n return apis.ModifyCubeExtendInfoResponseSchema().loads(resp)"
]
| [
"0.64971155",
"0.64647776",
"0.61680335",
"0.573325",
"0.5657563",
"0.51716113",
"0.49223477",
"0.48954195",
"0.48890984",
"0.4851004",
"0.48300904",
"0.4819779",
"0.47491133",
"0.4738578",
"0.46695167",
"0.46603796",
"0.46551576",
"0.46383616",
"0.46241546",
"0.46217567",
"0.4610524",
"0.45919254",
"0.45901734",
"0.4588969",
"0.45809639",
"0.4562488",
"0.45546937",
"0.4547696",
"0.45394373",
"0.45302278"
]
| 0.7934862 | 0 |
Method to remove regions from a panel. The query associated with this method checks whether the region was live in the panel. If it has been in a live version the region remains in the versions table and the "last" field is populated with the current version. If the region has never been included in a live version of the panel, it is removed from the table. The method returns the number of regions in the panel. If this is zero, locks will be put in place within the wizard as the panel cannot be made live etc. | def remove_panel_regions():
if type(request.json['ids']) is list:
version_ids = request.json['ids']
else:
version_ids = request.json['ids'].replace('[', '').replace(']', '').split(',')
# TODO does this happen?
if type(version_ids) is str:
version_ids = version_ids.split(',')
panel_id = request.json['panel_id']
for i in version_ids:
remove_version_from_panel(s, int(panel_id), int(i))
panel = get_panel_by_id(s, panel_id) # check if there are still regions in the panel
length = len(list(panel))
return jsonify(length) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_this_region(self):",
"def cleanup_regions(self, timestamp, bid, ofr):\n regions = []\n\n for region in self.regions:\n if not region.can_delete(timestamp, bid, ofr):\n regions.append(region)\n\n # replace the regions list\n self.regions = regions",
"def add_panel_regions():\n version_ids = request.json['id_ext']\n panel_id = request.json['panel_id']\n project_id = request.json['project_id']\n gene_name = request.json['gene_name']\n\n try:\n tx_id = request.json['pref_tx_id']\n add_preftxs_to_panel(s, project_id, [{\"gene\": gene_name, \"tx_id\": tx_id}, ])\n except KeyError:\n pass\n\n for i in version_ids:\n if i[\"ext_5\"] == 0:\n ext_5 = None\n else:\n ext_5 = i[\"ext_5\"]\n\n if i[\"ext_3\"] == 0:\n ext_3 = None\n else:\n ext_3 = i[\"ext_3\"]\n add_region_to_panel(s, i[\"id\"], panel_id, ext_3=ext_3, ext_5=ext_5)\n s.commit()\n return jsonify(\"complete\")",
"def offer_fix_broken_regions(self, with_window: ProjectWindow = None):\n if with_window:\n result = with_window.CustomDialog(\n title=\"Region Cleanup\",\n message=\"In vanilla Dark Souls, the Duke's Archives has four unused regions that can break event\\n\"\n \"scripts. Would you like Soulstruct to delete those four regions now?\",\n button_names=(\"Yes, delete them\", \"No, leave them be\"),\n button_kwargs=(\"YES\", \"NO\"),\n cancel_output=1,\n default_output=1,\n )\n else:\n result = 1 if (\n input(\n \"In vanilla Dark Souls, the Duke's Archives has four unused regions that can break event\\n\"\n \"scripts. Would you like Soulstruct to delete those four regions now? [y]/n\",\n ).lower() == \"n\"\n ) else 0\n if result == 0:\n archives_msb = self.maps.DukesArchives\n repeats = archives_msb.get_repeated_entity_ids() # re-checking just in case\n if {e.entity_id for e in repeats[\"Regions\"]} == {1702745, 1702746, 1702747, 1702748}:\n for entry in repeats[\"Regions\"]:\n archives_msb.regions.delete_entry(entry)\n return True\n else:\n return False",
"def remove_regions_from_codes(self, regions_to_remove):\n reduced_regions = []\n reduced_regions_indx = []\n for indx, r in enumerate(self.Rs):\n if r in regions_to_remove:\n pass\n else:\n reduced_regions_indx.append(indx)\n reduced_regions.append(r)\n\n self.Rs = reduced_regions\n _, nCMs, nDs = self.ActiveCMs.shape\n self.reduce_regions_from_index(reduced_regions_indx)",
"def remove_panel():\n panel_name = request.json['panel_name']\n remove_panel_query(s, panel_name)\n return jsonify('complete')",
"def SubtractRegion(*args, **kwargs):\n return _gdi_.Region_SubtractRegion(*args, **kwargs)",
"def delete_region(self, region):\n\n self.contour_plot.vb.removeItem(region)\n del self.regions[id(region)]",
"def create_panel_custom_regions():\n panel_id = request.json[\"panel_id\"]\n chrom = request.json[\"chrom\"]\n start = request.json[\"start\"]\n end = request.json[\"end\"]\n name = request.json[\"name\"]\n regions = select_region_by_location(s, chrom, start, end) # if region already exists, return current entry\n if regions:\n for i in regions:\n add_region_to_panel(s, i.id, panel_id)\n s.commit()\n continue\n else:\n create_custom_region(s, panel_id, chrom, start, end, name)\n\n return jsonify(\"complete\")",
"def trace_region_count(self):\n cmd = enums.JLinkTraceCommand.GET_NUM_REGIONS\n data = ctypes.c_uint32(0)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n if (res == 1):\n raise errors.JLinkException('Failed to get trace region count.')\n return data.value",
"def closeRegions(regions):\n for orig,table in regions.items():\n for dest in table['neighbors']:\n if not regions.has_key(dest):\n regions[dest] = {'neighbors': set(),\n 'value': 4}\n regions[dest]['neighbors'].add(orig)\n return regions",
"def getStudyRegions(self):\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n self.cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in self.cursor:\n exclusionRows.append(state[0])\n query = 'SELECT * FROM sys.databases'\n df = pd.read_sql(query, self.conn)\n studyRegions = df[~df['name'].isin(exclusionRows)]['name']\n studyRegions = studyRegions.reset_index()\n studyRegions = studyRegions.drop('index', axis=1)\n self.studyRegions = studyRegions\n return studyRegions",
"def refresh(self, data):\n for region in self.loaded_regions:\n self.regions[region].unload()\n super().clear()\n self.data = data\n self.loaded_regions = set()\n self.draw_visible_area()",
"def delete_region(self, region_id):\n raise exception.NotImplemented() # pragma: no cover",
"def removeRows(self, position, rows=1, index=QModelIndex()):\n self.beginRemoveRows(QModelIndex(), position, position + rows - 1)\n\n del self.Grains[position:position+rows]\n\n self.endRemoveRows()\n self.dataChanged.emit(index, index) \n return True",
"def Subtract(*args, **kwargs):\n return _gdi_.Region_Subtract(*args, **kwargs)",
"def removeRegion(disk, startCoord):\n coordinates = [startCoord]\n while coordinates:\n coord = coordinates.pop()\n coordinates.extend(getNeighbors(disk, coord))\n disk[coord[0], coord[1]] = 0",
"def handle_region(self, region, args):\n result = [\n CHECKMARK, str(region), \"destroyed security group '{}'\".format(GROUP_NAME)\n ]\n\n try:\n resp = region.conn.delete_security_group(GroupName=GROUP_NAME)\n except Exception as e:\n result[0] = CROSSMARK\n result[2] = str(e)\n\n return result",
"def getStudyRegions():\n comp_name = os.environ['COMPUTERNAME']\n conn = py.connect('Driver=ODBC Driver 11 for SQL Server;SERVER=' +\n comp_name + '\\HAZUSPLUSSRVR; UID=SA;PWD=Gohazusplus_02')\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n cursor = conn.cursor()\n cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in cursor:\n exclusionRows.append(state[0])\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM sys.databases')\n studyRegions = []\n for row in cursor:\n if row[0] not in exclusionRows:\n studyRegions.append(row[0])\n studyRegions.sort(key=lambda x: x.lower())\n return studyRegions",
"def removeRows(self, row, count, parent=QModelIndex()):\n self.dict_lock.acquire()\n self.list_lock.acquire()\n\n self.beginRemoveRows(parent, row, row + count -1)\n\n for offset in range(count):\n level = self.view_list[row + offset]\n del self.levels_dict[level.code]\n\n del self.view_list[row:row+count]\n if(not self.sorting & Sorting.Reversed):\n del self.view_keys[row:row+count]\n else:\n del self.view_keys[len(self.view_keys) - (row + count): len(self.view_keys) - row]\n\n self.endRemoveRows()\n\n self.list_lock.release()\n self.dict_lock.release()\n\n return True",
"def add_regions(self, regions, **options):\n \n options.setdefault(\"col\", color(0,0,1))\n options.setdefault(\"style\", \"box\")\n options.setdefault(\"height\", 0.5)\n \n return self.add_track(RegionTrack, -.5, regions, **options)",
"def eliminar_rs(self):\n group = Group.objects.get(name=self.nombre)\n if self.es_utilizado():\n return False\n else:\n group.delete()\n self.delete()\n return True",
"def unload(self):\n for obj in self.objects:\n self.scene.removeItem(obj)\n for plant in self.plants:\n self.scene.removeItem(plant)\n for tile in self.tiles:\n tile.unload()\n self.scene.removeItem(tile)\n if self.region_back:\n self.scene.removeItem(self.region_back)\n self.tiles = []\n self.objects = []\n self.plants = []\n self.region_back = None\n self.loaded = False",
"def __clearRegionFromCurrentState(self,area, view, currentContainer):\r\n x,y,w,h = area\r\n\r\n # Loop through all objects on screen and remove ones which are completely covered by given area and are not in same container as area\r\n for node in view.findall('/*[@center and @container!=\"%s\"]'%currentContainer):\r\n nodeX, nodeY, nodeW, nodeH = [int(p) for p in node.getAttribute('coords').split(\",\")]\r\n if nodeX>=x and (nodeX+nodeW)<=(x+w) and nodeY>=y and (nodeY+nodeH)<=(y+h):\r\n if node.getName()=='label':\r\n debug.out(\"Removing node %s(%s,%s,%s,%s) from area %s,%s,%s,%s\"%(node.getAttribute(\"text\"),nodeX, nodeY,nodeW,nodeH,x,y,w,h))\r\n view.remove(node)\r\n\r\n return view",
"def remove_regions_min_deaths(self, min_num_deaths=100):\n reduced_regions = []\n reduced_regions_indx = []\n for indx, r in enumerate(self.Rs):\n if self.Deaths.data[indx, -1] < min_num_deaths:\n print(f\"Region {r} removed since it has {self.Deaths[indx, -1]} deaths on the last day\")\n elif np.isnan(self.Deaths.data[indx, -1]):\n print(f\"Region {r} removed since it has {self.Deaths[indx, -1]} deaths on the last day\")\n else:\n reduced_regions.append(r)\n reduced_regions_indx.append(indx)\n\n self.Rs = reduced_regions\n self.reduce_regions_from_index(reduced_regions_indx)",
"def RegionList(self):\n command = \"\"\"\n IPython.notebook.kernel.execute(\"RegionList=\" + JSON.stringify(JS9.GetShapes(\"regions\", {{display: '{wid}JS9'}})));\n \"\"\".format(wid=self.wid)\n get_ipython().run_cell_magic('javascript', '', command)",
"def check_for_unload(self):\n flag = False\n for i, layer in enumerate(self.chunk_loaded_list):\n for ticket, *data in layer[:]:\n if ticket.value in (0, 1):\n continue\n if (\n ticket == ChunkLoadTicketType.PLAYER_LOADED\n ): # check if player in range, if not, remove ticket\n pass\n else:\n layer.remove(ticket)\n if i != 15:\n self.chunk_loaded_list[i + 1].append(ticket)\n flag = flag or len(layer)\n\n if not flag:\n print(\"unloading chunk\", self)\n self.hide_all(True)\n self.get_dimension().unload_chunk(self)",
"def removeLvcrs(self):\n count = 0\n for cell in self.cells:\n #print cell.getId()\n objects = cell.getObjects()\n for object in objects.list():\n for objRecord in object[3]:\n if objRecord.name == 'LVCR':\n #print ' ',object[:3]\n objects.remove(object)\n count += 1\n break\n return count",
"def remove_measurement(self):\n idx = self.measurementsListWidget.currentRow()\n if len(self.mgr.obj.measurements) > 0:\n key = list(self.mgr.obj.measurements)[idx]\n del self.mgr.obj.measurements[key]\n\n # Flag the Survey as changed\n self.mgr.changed = True\n\n # Refresh lists/tables\n self.load_measurements()\n nmeas = len(self.mgr.obj.measurements)\n if nmeas > 0:\n self.measurementsListWidget.setCurrentRow(min(idx, nmeas-1))",
"def add_all_regions_vp():\n gene_id = request.json['gene_id']\n vpanel_id = request.json['vpanel_id']\n panel_id = request.json['panel_id']\n add_all_regions_to_vp(s, panel_id, gene_id, vpanel_id)\n return jsonify({\"genes\": [gene_id, ]})"
]
| [
"0.604837",
"0.56510043",
"0.545178",
"0.53380364",
"0.51581484",
"0.5128699",
"0.5109236",
"0.50571764",
"0.50442064",
"0.50144815",
"0.48666006",
"0.48078403",
"0.47030452",
"0.4667216",
"0.46481922",
"0.4605013",
"0.45865065",
"0.4569744",
"0.45658997",
"0.45476836",
"0.45310584",
"0.45246872",
"0.45159587",
"0.4485798",
"0.4477862",
"0.44764894",
"0.446919",
"0.44666427",
"0.44653788",
"0.44511226"
]
| 0.7042875 | 0 |
Method to make a panel live given a panel ID | def make_live():
panelid = request.args.get('id')
locked = check_if_locked(s, panelid)
if locked:
unlock_panel_query(s, panelid)
current_version = get_current_version(s, panelid)
if not current_version:
current_version = 0
new_version = current_version + 1
make_panel_live(s, panelid, new_version, current_user.id)
return redirect(url_for('panels.view_panels')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_virtualpanel_live():\n vpanelid = request.args.get('id')\n panelid = get_panel_by_vp_id(s, vpanelid)\n locked = check_if_locked(s, panelid)\n if locked:\n if current_user.id == get_locked_user(s, panelid):\n make_vp_panel_live(s, vpanelid)\n add_to_starlims(vpanelid)\n return redirect(url_for('panels.view_virtual_panels'))\n else:\n make_vp_panel_live(s, vpanelid)\n add_to_starlims(vpanelid)\n return redirect(url_for('panels.view_virtual_panels'))",
"def create_panel_process():\n form = CreatePanelProcess()\n if request.method == \"POST\":\n make_live = request.form['make_live']\n panel_id = request.args.get('id')\n project_id = get_project_id_by_panel_id(s, panel_id)\n preftx_id = get_preftx_id_by_project_id(s, project_id)\n version = get_current_preftx_version(s, preftx_id)\n if not version:\n version = 0\n if make_live == \"on\":\n make_preftx_live(s, preftx_id, version + 1, current_user.id)\n make_panel_live(s, panel_id, 1, current_user.id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_panel') + \"?id=\" + panel_id)\n elif request.method == \"GET\":\n form.project.choices = get_project_choices(s, current_user.id)\n return render_template('panel_createprocess.html', form=form, panel_id=\"main\",\n url=url_for('panels.create_panel_process'))",
"def show(self):\n self.window.run_command(\"show_panel\", {\"panel\": self.full_name})",
"def add_panel(self, panel):\n assert panel.PANEL_ID not in self.panels\n assert not self.tools, \"tools must be added after panels\"\n self.panels[panel.PANEL_ID] = panel\n panel.register_panel(self)",
"def view_panel():\n id = request.args.get('id')\n try:\n version = request.form[\"versions\"]\n except KeyError:\n version = None\n if id:\n status = check_panel_status(s, id)\n if not status:\n message = \"This panel has changes which cannot be viewed here as they have not been made live yet, if you have permission you can view these by editing the panel\"\n else:\n message = None\n panel_details = get_panel_details_by_id(s, id)\n if not version:\n version = panel_details.current_version\n panel_name = panel_details.name\n panel = get_regions_by_panelid(s, id, version)\n project_id = get_project_id_by_panel_id(s, id)\n result = []\n rows = list(panel)\n if len(rows) != 0:\n bed = ''\n for i in rows:\n row = dict(zip(i.keys(), i))\n result.append(row)\n # panel_name = i.panel_name\n current_version = i.current_version\n else:\n message = \"This Panel has no regions yet & may also have changes that have not been made live\"\n bed = 'disabled'\n current_version = version\n\n if check_user_has_permission(s, current_user.id, project_id):\n edit = ''\n else:\n edit = 'disabled'\n\n form = ViewPanel()\n v_list = range(1, current_version + 1)\n choices = []\n for i in v_list:\n choices.append((i, i))\n form.versions.choices = choices\n form.versions.default = version\n form.process()\n\n table = []\n\n for i in result:\n line = []\n line.append(i['chrom'])\n line.append(str(i['region_start']))\n line.append(str(i['region_end']))\n line.append(i['gene_name'])\n line.append(i['name'].replace(',', ' '))\n table.append(line)\n return render_template('panel_view.html', scope='Panel', table=json.dumps(table), panel=table,\n panel_name=panel_name, edit=edit, bed=bed,\n version=version, panel_id=id, project_id=project_id, message=message,\n url=url_for('panels.view_panel'),\n form=form)\n\n else:\n return redirect(url_for('panels.view_panels'))",
"def createPanel(self, LibraryID, Name, **kwargs):\n if self.request(\"createPanel\", LibraryID=LibraryID, Name=Name, **kwargs) is None:\n return None\n return self.json_response[\"Result\"][\"PanelID\"]",
"def create_virtual_panel_process():\n form = CreateVirtualPanelProcess()\n\n if request.method == \"POST\":\n make_live = request.form['make_live']\n vp_id = request.args.get('id')\n if make_live == \"on\":\n make_vp_panel_live(s, vp_id)\n add_to_starlims(vp_id)\n panel_id = get_panel_by_vp_id(s, vp_id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_vpanel') + \"?id=\" + vp_id)\n elif request.method == \"GET\":\n form.panel.choices = get_panel_choices(s, current_user.id)\n url = url_for('panels.create_virtual_panel_process')\n return render_template('virtualpanels_createprocess.html', form=form, url=url, vp_id=\"main\")",
"def edit_panel_process():\n if request.method == \"POST\":\n make_live = request.form['make_live']\n panel_id = request.args.get('id')\n project_id = get_project_id_by_panel_id(s, panel_id)\n preftx_id = get_preftx_id_by_project_id(s, project_id)\n tx_version = get_current_preftx_version(s, preftx_id)\n panel_version = get_current_version(s, panel_id)\n if not tx_version:\n tx_version = 0\n if make_live == \"on\":\n print('make_live')\n make_preftx_live(s, preftx_id, tx_version + 1, current_user.id)\n make_panel_live(s, panel_id, panel_version + 1, current_user.id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_panel') + \"?id=\" + panel_id)\n elif request.method == \"GET\":\n panel_id = request.args.get('id')\n form = EditPanelProcess()\n panel_info = get_panel_info(s, panel_id)\n project_id = panel_info.project_id\n form.project.choices = [(project_id, panel_info.project_name), ]\n form.panelname.data = panel_info.name\n\n lock_panel(s, current_user.id, panel_id)\n\n genes = get_genes_by_panelid_edit(s, panel_id, panel_info.current_version)\n html = \"\"\n buttonlist = \"\"\n print('hello')\n for gene in genes:\n gene_id = gene.id\n gene_name = gene.name\n preftx_id = get_preftx_by_gene_id\n upcoming_preftx = get_upcoming_preftx_by_gene_id(s, project_id, gene_id)\n all_tx = get_tx_by_gene_id(s, gene_id)\n\n buttonlist += render_template(\"gene_button.html\", gene_name=gene_name, gene_id=gene_id, added=True)\n tx_html = render_template(\"tx_list.html\", gene_name=gene_name, all_tx=all_tx, preftx=preftx_id,\n upcoming=upcoming_preftx, disabled=True)\n html += tx_html\n\n return render_template('panel_createprocess.html', form=form, genes=html, genelist=buttonlist,\n panel_id=panel_id,\n url=url_for('panels.edit_panel_process') + \"?id=\" + panel_id)",
"def add_panel():\n panel_name = request.json['panel_name']\n project_id = request.json['project_id']\n panel_id = create_panel_query(s, project_id, panel_name, current_user.id)\n return jsonify(panel_id)",
"def create_panel(self):\n return\n # return Panel(self)",
"def load_panelapp_panel(adapter, panel_id=None, institute=\"cust000\", confidence=\"green\"):\n panel_ids = [panel_id]\n\n if not panel_id:\n LOG.info(\"Fetching all panel app panels\")\n panel_ids = _panelapp_panel_ids()\n\n for _ in panel_ids:\n parsed_panel = _parse_panelapp_panel(adapter, _, institute, confidence)\n\n if len(parsed_panel[\"genes\"]) == 0:\n LOG.warning(\"Panel %s is missing genes. Skipping.\", parsed_panel[\"display_name\"])\n continue\n\n try:\n adapter.load_panel(parsed_panel=parsed_panel, replace=True)\n except Exception as err:\n raise err",
"def view_panels(id=None):\n if not id:\n id = request.args.get('id')\n\n if id:\n panels = get_panels_by_project_id(s, id)\n else:\n panels = get_panels(s)\n result = []\n project_name = \"All\"\n for i in panels:\n row = dict(zip(i.keys(), i))\n status = check_panel_status(s, row[\"panelid\"])\n row[\"status\"] = status\n permission = check_user_has_permission(s, current_user.id, row[\"projectid\"])\n locked = check_if_locked(s, row[\"panelid\"])\n row['permission'] = permission\n row['locked'] = locked\n\n if id:\n project_name = row['projectname']\n # if check_user_has_permission(s, current_user.id, row[\"projectid\"]):\n # result.append(row)\n result.append(row)\n table = ItemTablePanels(result, classes=['table', 'table-striped'])\n return render_template('panels.html', panels=table, project_name=project_name)",
"def panel(self):\n data_to_track = {}\n for possession in self.possessions_to_track_panel:\n data_to_track[possession] = self._haves[possession]\n\n for variable in self.variables_to_track_panel:\n try:\n data_to_track[variable] = self.__dict__[variable]\n except KeyError:\n pass\n self.database_connection.put([\"panel\",\n data_to_track,\n str(self.id),\n self.group,\n str(self.round)])",
"def liveview(self):\n if self.liveviewButton.isChecked():\n self.save = False\n self.channelsOpen()\n self.liveviewStart()\n\n else:\n self.liveviewStop()",
"def shopify_instances_onboarding_panel(self):\n\n current_company_id = request.httprequest.cookies.get('cids').split(',') if request.httprequest.cookies.get(\n 'cids', []) else []\n company = False\n if len(current_company_id) > 0 and current_company_id[0] and current_company_id[0].isdigit():\n company = request.env['res.company'].sudo().search([('id', '=', int(current_company_id[0]))])\n if not company:\n company = request.env.company\n hide_panel = company.shopify_onboarding_toggle_state != 'open'\n btn_value = 'Create More Shopify Instance' if hide_panel else 'Hide On boarding Panel'\n shopify_manager_group = request.env.ref(\"shopify_ept.group_shopify_manager_ept\")\n if request.env.uid not in shopify_manager_group.users.ids:\n return {}\n return {\n 'html': request.env.ref('shopify_ept.shopify_instances_onboarding_panel_ept')._render({\n 'company': company,\n 'toggle_company_id': company.id,\n 'hide_panel': hide_panel,\n 'btn_value': btn_value,\n 'state': company.get_and_update_shopify_instances_onboarding_state(),\n 'is_button_active': company.is_create_shopify_more_instance\n })\n }",
"def register(self, panel):\n new_instance = panel()\n new_event_type = new_instance._meta.event_type\n if new_event_type in self.__class__._panels:\n raise Exception(\"Two panels with the same event type: %s\" % \\\n new_event_type)\n self.__class__._panels[new_event_type] = new_instance\n self.storage.register_event(new_event_type, new_instance._meta.dimensions.keys())",
"def get_panels(config):\n\n task = TaskPanels(config)\n task.execute()\n\n task = TaskPanelsMenu(config)\n task.execute()\n\n logging.info(\"Panels creation finished!\")",
"def liveview(self):\n if self.liveviewButton.isChecked():\n# self.save = False\n self.paramChangedInitialize()\n self.openShutter(\"red\")\n self.liveviewStart()\n\n else:\n self.liveviewStop()",
"def makeLive(*args, none: bool=True, **kwargs)->None:\n pass",
"def _parse_panelapp_panel(adapter, panel_id, institute, confidence):\n hgnc_map = adapter.ensembl_to_hgnc_mapping()\n json_lines = fetch_resource(PANELAPP_BASE_URL.format(\"get_panel\") + panel_id, json=True)\n parsed_panel = parse_panel_app_panel(\n panel_info=json_lines[\"result\"],\n hgnc_map=hgnc_map,\n institute=institute,\n confidence=confidence,\n )\n if confidence != \"green\":\n parsed_panel[\"panel_id\"] = \"_\".join([panel_id, confidence])\n else: # This way the old green panels will be overwritten, instead of creating 2 sets of green panels, old and new\n parsed_panel[\"panel_id\"] = panel_id\n\n return parsed_panel",
"def main():\n PanelDemo().mainloop()",
"def live(self):\n curses.wrapper(self.__liveActually)",
"def edit_virtual_panel_process():\n form = EditVirtualPanelProcess()\n\n vp_id = request.args.get('id')\n panel_id = get_panel_by_vp_id(s, vp_id)\n if request.method == \"POST\":\n if request.form['make_live'] == \"on\":\n make_vp_panel_live(s, vp_id)\n add_to_starlims(vp_id)\n unlock_panel_query(s, panel_id)\n return redirect(url_for('panels.view_vpanel') + \"?id=\" + vp_id)\n elif request.method == \"GET\":\n lock_panel(s, current_user.id, panel_id)\n panel_info = get_panel_details_by_id(s, panel_id)\n panel_name = panel_info.name\n form.panel.choices = [(panel_id, panel_name), ]\n\n panel_version = get_current_version(s, panel_id)\n panel_genes = get_genes_by_panelid(s, panel_id, panel_version)\n vp_info = get_vpanel_details_by_id(s, vp_id)\n vp_version = vp_info.current_version\n vp_name = vp_info.name\n form.vpanelname.data = vp_name\n vp_genes = get_genes_by_vpanelid_edit(s, vp_id, vp_version)\n genelist = \"\"\n vp_list = []\n for i in vp_genes:\n vp_list.append(i.id)\n\n genes = []\n print('new method')\n for i in panel_genes:\n if i.id in vp_list:\n genes.append({\"name\": i.name, \"id\": i.id, \"vp_list\": True})\n button = render_template(\"gene_button.html\", gene_name=i.name, gene_id=i.id, added=True)\n genelist += button\n\n else:\n genes.append({\"name\": i.name, \"id\": i.id, \"vp_list\": False})\n\n gene_html = render_template(\"panel_genes.html\", panel_genes=genes)\n\n url = url_for('panels.edit_virtual_panel_process') + '?id=' + str(vp_id)\n return render_template('virtualpanels_createprocess.html', form=form, genes=gene_html, genelist=genelist,\n vp_id=vp_id, panel_name=vp_name, current_version=vp_version, url=url)",
"def get_panel(self, panel_id):\n return self.panels.get(panel_id, None)",
"def handle_panel_update(self, section_dict):",
"def adpanel():\n if 'user_id' not in session or session['user_id'] != 'admin':\n return redirect(url_for('login'))\n return render_template('adminpanel.html')",
"def panel(*args, control: bool=True, copy: AnyStr=\"\", createString: bool=True, defineTemplate:\n AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\", editString: bool=True, exists: bool=True,\n init: bool=True, isUnique: bool=True, label: Union[AnyStr, bool]=\"\",\n menuBarRepeatLast: bool=True, menuBarVisible: bool=True, needsInit: bool=True,\n parent: AnyStr=\"\", popupMenuProcedure: Union[Script, bool]=None, replacePanel:\n AnyStr=\"\", tearOff: bool=True, tearOffCopy: AnyStr=\"\", tearOffRestore: bool=True,\n unParent: bool=True, useTemplate: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass",
"def run(self):\n\n self.window.run_command(\"show_panel\", {\"panel\": \"output.reg_replace\"})",
"def set_upgrade_panel(self):\n lot = self.city_graphics.get_clicked_lot(pg.mouse.get_pos())\n if lot.construct:\n self.upgrade_panel.set_lot(lot)\n self.upgrade_panel.enable()\n else:\n self.upgrade_panel.disable()",
"def _independent_panel(width, height):\n\n from maya import cmds\n\n # center panel on screen\n screen_width, screen_height = _get_screen_size()\n topLeft = [int((screen_height-height)/2.0),\n int((screen_width-width)/2.0)]\n\n window = cmds.window(width=width,\n height=height,\n topLeftCorner=topLeft,\n menuBarVisible=False,\n titleBar=False)\n cmds.paneLayout()\n panel = cmds.modelPanel(menuBarVisible=False,\n label='CapturePanel')\n\n # Hide icons under panel menus\n bar_layout = cmds.modelPanel(panel, q=True, barLayout=True)\n cmds.frameLayout(bar_layout, e=True, collapse=True)\n\n cmds.showWindow(window)\n\n # Set the modelEditor of the modelPanel as the active view so it takes\n # the playback focus. Does seem redundant with the `refresh` added in.\n editor = cmds.modelPanel(panel, query=True, modelEditor=True)\n cmds.modelEditor(editor, e=1, activeView=True)\n\n # Force a draw refresh of Maya so it keeps focus on the new panel\n # This focus is required to force preview playback in the independent panel\n cmds.refresh(force=True)\n\n try:\n yield panel\n finally:\n # Delete the panel to fix memory leak (about 5 mb per capture)\n cmds.deleteUI(panel, panel=True)\n cmds.deleteUI(window)"
]
| [
"0.70315933",
"0.6383759",
"0.6093565",
"0.5932666",
"0.5906493",
"0.5849285",
"0.5781931",
"0.57565033",
"0.5752766",
"0.5693289",
"0.56888175",
"0.5603522",
"0.5540066",
"0.552902",
"0.5507206",
"0.54622364",
"0.54073757",
"0.5386678",
"0.5381173",
"0.5299995",
"0.52436495",
"0.523606",
"0.5219468",
"0.51768804",
"0.5155937",
"0.51507837",
"0.5149201",
"0.5138319",
"0.513595",
"0.5092676"
]
| 0.7831331 | 0 |
Method to unlock a panel so it can be edited by others | def unlock_panel():
panelid = request.args.get('panelid')
unlock_panel_query(s, panelid)
return redirect(url_for('panels.view_panels')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def toggle_locked():\n panel_id = request.args.get('id')\n json = False\n if not panel_id:\n json = True\n panel_id = request.json['id']\n project_id = get_project_id_by_panel_id(s, panel_id)\n if current_user.id == get_locked_user(s, panel_id) and json:\n unlock_panel_query(s, panel_id)\n return jsonify(\"complete\")\n elif check_user_has_permission(s, current_user.id, project_id):\n unlock_panel_query(s, panel_id)\n return manage_locked(message=\"Panel Unlocked\")\n else:\n return manage_locked(message=\"Hmmmm you don't have permission to do that\")",
"def unlock_view(self):\r\n self._logger.log('Unlocking {}'.format(self.name), self._logger.INFO)\r\n self._ui.uploadStackedWidget.setCurrentWidget(self._ui.connectedView)",
"def pilotUnlock (self):\n return self.unlock()",
"def lockSliderPanel(self, flag): \n\t\tself.doLockSliderPanel = flag",
"def hide(self):\n self.window.run_command(\"hide_panel\", {\"panel\": self.full_name})",
"def unlock_control(self):\n raise NotImplementedError('PlatformService: Implementation incomplete')",
"def f_unlock(self):\n self._locked = False",
"def okToUnblockVis(self):\n self.cancelUnblockVis()",
"def disable(self):\n if self.active != DISABLED:\n self.uimanager.remove_ui(self.active)\n self.uimanager.remove_action_group(self.action_group)\n self.active = DISABLED",
"def reenable(*args):\n self.controls.disabled = False\n self.disabled = False",
"def deactivate(self, newmode = None):\n\t\tself.urmaswin.Show(0) \n\t\tself.urmaswin.enableRendering(0) \n\t\tself.urmaswin.controlpanel.Show(0)\n\t\tself.visualizer.sliderWin.SetDefaultSize(self.origSliderWinSize)\n\n\t\tif not self.doLockSliderPanel and newmode != \"3d\":\n\t\t\tprint \"\\n\\n*** DEACTIVATING ANIMATOR\\n\"\n\t\t\tself.visualizer.setCurrentSliderPanel(self.visualizer.sliderPanel) \n\t\t\tself.visualizer.sliderPanel.Show(1)\n\t\tif newmode != \"3d\":\n\t\t\tself.menuManager.mainToolbar.EnableTool(MenuManager.ID_ADJUST, 1)\n\t\t\tself.menuManager.mainToolbar.EnableTool(MenuManager.ID_RESTORE, 1)\n\t\t\tself.menuManager.mainToolbar.EnableTool(MenuManager.ID_COLOCALIZATION, 1)\n\t\t\tself.menuManager.mainToolbar.EnableTool(MenuManager.ID_COLORMERGING, 1)\n\t\tself.urmaswin.cleanMenu()",
"def unlock(self):\n raise NotImplementedError",
"def unlock(self):\n self.shell(\"input keyevent MENU\")\n self.shell(\"input keyevent BACK\")",
"def unlock(self):\n self.mainloop().unlock()",
"def disable(self):\r\n self.update(enabled=False)",
"def ToggleLock(self, event):\n pass",
"def processUnlock(self):\r\n self.controller.executionUnlock()",
"def disable(self):\n self.colour_combo.config(state=tk.DISABLED)\n self.game_name_entry.config(state=tk.DISABLED)\n self.num_tickets_entry.config(state=tk.DISABLED)",
"def erase(self):\n\tself.state={}\n\tself.display(update_board=0)",
"def disable(self): \n self.feed_button.config(state=\"disabled\")\n self.eat_button.config(state=\"disabled\") \n for t in range(self.player.game.trait_limit): \n self.add_trait_buttons[t].config(state=\"disabled\") \n self.add_population_button.config(state=\"disabled\")\n self.add_body_size_button.config(state=\"disabled\")",
"def set_upgrade_panel(self):\n lot = self.city_graphics.get_clicked_lot(pg.mouse.get_pos())\n if lot.construct:\n self.upgrade_panel.set_lot(lot)\n self.upgrade_panel.enable()\n else:\n self.upgrade_panel.disable()",
"def unlock_instance(self, instance_name, check=True):\n with self.page_instances().table_instances.row(\n name=instance_name).dropdown_menu as menu:\n menu.button_toggle.click()\n menu.item_unlock.click()\n\n if check:\n self.close_notification('success')",
"def limit_panel_hide(self):\r\n\r\n self.limit_panel_toggle()\r\n self.optimize_size(height_only=True)",
"def OnPanelEraseBg(self, event):\r\n\r\n pass",
"def _disable(self):\n self.enabled = False",
"def disable(self):",
"def _enable_disable_gui(self, state):\r\n self.mainWidget.standbyPushButton.setDisabled(state)\r\n self.mainWidget.eventComboBox.setDisabled(state)\r\n self.mainWidget.roomComboBox.setDisabled(state)\r\n self.mainWidget.dateComboBox.setDisabled(state)\r\n self.mainWidget.talkComboBox.setDisabled(state)\r\n self.mainWidget.audioFeedbackCheckbox.setDisabled(state)",
"def unlock (self):\n if self.locked:\n self.locked = False\n if self._changed:\n return self._arrange_displays()\n return []",
"def display_cust_unlock(self):\n self.clear_terminal()\n self.unlock_menu_cust()\n self.handle_selection_cust_unlock()",
"def unlock(self):\n\n self.wait = False"
]
| [
"0.6602288",
"0.6435054",
"0.6348038",
"0.6270993",
"0.6200969",
"0.61717916",
"0.60962564",
"0.5970956",
"0.59518844",
"0.5920136",
"0.58595866",
"0.5852581",
"0.583533",
"0.5805647",
"0.5797462",
"0.57789946",
"0.5767917",
"0.5746708",
"0.57098585",
"0.5692884",
"0.5687889",
"0.5684263",
"0.56835175",
"0.56750804",
"0.56287605",
"0.56221336",
"0.5611511",
"0.5607465",
"0.5598212",
"0.55754286"
]
| 0.73430175 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.