query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Return the list of users, optionally filtered by a predicate.
def users(self, predicate=None): if predicate is None: return self._get("users").json() else: return self._get("users/search", params={"predicate":predicate}).json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_users(filter, api_site_parameter, page = 1, pagesize = 30, sort = 'reputation'):\n path = \"users\"\n results = __fetch_results(path, api_site_parameter, inname= filter, page = page, pagesize = pagesize, sort = sort)\n return results", "def list_users(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n filter_ = kwargs.get(\"filter\", \"all\")\n\n if verbose:\n attributes = self.engine.all_attributes()\n else:\n attributes = [\"sAMAccountName\", \"objectClass\"]\n\n if filter_ == \"all\":\n results = self.engine.query(self.engine.USER_ALL_FILTER(), attributes)\n elif filter_ == \"spn\":\n results = self.engine.query(self.engine.USER_SPN_FILTER(), attributes)\n elif filter_ == \"enabled\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER_NEG(USER_ACCOUNT_CONTROL[\"ACCOUNTDISABLE\"]), attributes)\n elif filter_ == \"disabled\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER(USER_ACCOUNT_CONTROL[\"ACCOUNTDISABLE\"]), attributes)\n elif filter_ == \"locked\":\n results = self.engine.query(self.engine.USER_LOCKED_FILTER(), attributes)\n elif filter_ == \"nopasswordexpire\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER(USER_ACCOUNT_CONTROL[\"DONT_EXPIRE_PASSWORD\"]), attributes)\n elif filter_ == \"passwordexpired\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER(USER_ACCOUNT_CONTROL[\"PASSWORD_EXPIRED\"]), attributes)\n elif filter_ == \"nokrbpreauth\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER(USER_ACCOUNT_CONTROL[\"DONT_REQ_PREAUTH\"]), attributes)\n elif filter_ == \"reversible\":\n results = self.engine.query(self.engine.USER_ACCOUNT_CONTROL_FILTER(USER_ACCOUNT_CONTROL[\"ENCRYPTED_TEXT_PWD_ALLOWED\"]), attributes)\n else:\n return None\n\n self.display(results, verbose)", "def get_queryset(self):\n return filterUsersByName( self.request.query_params.get('username', None) )", "def get_users():\n request_filters = request.args.get(\"filters\")\n request_filter_type = request.args.get(\"type\")\n parameters = None\n if request_filter_type and request_filters:\n parameters = {\"type\": request_filter_type, \"filters\": request_filters}\n\n if parameters is None:\n filters = {}\n else:\n if parameters[\"type\"] == \"in\":\n filters = make_filters(FilterType.IN, parameters[\"filters\"])\n elif parameters[\"type\"] == \"and\":\n filters = make_filters(FilterType.AND, parameters[\"filters\"])\n else:\n filters = make_filters(FilterType.OR, parameters[\"filters\"])\n\n users = user_service.get_users(filters)\n if not users:\n resp = make_response(\n dumps({\"status\": False, \"message\": \"No se encontraron usuarios\"}), 404\n )\n resp = make_response(dumps({\"status\": False, \"users\": users}), 200)\n return resp", "def get_users(self, *, Range=None, filter=None, fields=None, **kwargs):\n headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})\n function_endpoint = urljoin(self._baseurl, 'users')\n return self._call('GET', function_endpoint, headers=headers, **kwargs)", "def get_users(name=None):\n filters = create_filters(name=name)\n selection = User.objects.filter(**filters)\n if not len(selection):\n raise ObjectDoesNotFound('There is no users with selected filters.')\n return selection", "def get_all_users():", "def get_users():\n\n return User.query.all() # [<User user_id=1 fname=Alice lname=Apple>]", "def list(cls, context, filters=None, limit=3000, marker=1,\n sort_key='id', sort_dir='asc'):\n #import pdb; pdb.set_trace()\n db_users = cls.dbapi.get_user_list(\n context, limit=limit, marker=marker, sort_key=sort_key,\n sort_dir=sort_dir, filters=filters)\n total = db_users.total\n return [User._from_db_object(cls(context), obj) for obj in db_users], total", "def get_users(self):\n users = []\n page = 1\n while not len(users) % 100:\n users += self._get('/users?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not users:\n break\n page += 1\n return users", "def users_list(request):\n users_filter = UserFilter(request.GET, queryset=CustomUser.objects.filter(is_admin=False), request=request)\n return render(request, 'users/list.html', {'filter': users_filter})", "def filter_queryset(self, request, queryset, view):\n if view.action == \"list\":\n users = request.GET.get(\"users\")\n if users:\n users = users.split(\",\")\n return queryset.filter(user__username__in=users)\n if not request.user.is_anonymous:\n return queryset.filter(user__username=request.user.username)\n\n return queryset.none()\n\n return queryset", "def get(self):\n queries = {\"wildcard_properties\": []}\n\n fullname_query = request.args.get(\"fullName\", None)\n email_query = request.args.get(\"email\", None)\n\n if fullname_query:\n queries[\"fullName\"] = f\"TextP.startingWith('{fullname_query}')\"\n queries[\"wildcard_properties\"].append(\"fullName\")\n if email_query:\n queries[\"fullName\"] = f\"TextP.startingWith('{email_query}')\"\n queries[\"wildcard_properties\"].append(\"email\")\n\n users = User.filter(limit=10, **queries)\n response = UserListSchema(many=True).dumps(users).data\n\n return jsonify_response(json.loads(response), 200)", "def get_users_for(self, email):\n # this is a list rather than a generator because we probably want to\n # do a len() on it right away\n return [address.user for address in \\\n self.filter(verified=True, email=email)]", "def iter_users(self, selector: Optional[Callable[[User], bool]]=None) -> Generator[User, None, None]:\n if selector is None:\n for user in self.all_users.values():\n yield user\n else:\n for user in self.all_users.values():\n if selector(user):\n yield user", "def filter_users_by_username():\n username = request.args.get('username').strip().lower()\n users = User.query.all()\n users = [user for user in users if username in user.username.lower()]\n return jsonify([user.json() for user in users])", "def get_queryset(self):\n queryset = User.objects.all()\n username = self.request.query_params.get('username', None)\n if username is not None:\n queryset = queryset.filter(username=username)\n return queryset", "async def get_users(request):\n\n page = request.GET.getone(\"page\", None)\n page_size = request.GET.getone(\"page_size\", None)\n filter_name = request.GET.getone(\"q\", \"\")\n filter_admin = request.GET.getone(\"filter_admin\", \"false\")\n\n try:\n count_only = request.GET.getone(\"count_only\").lower() == \"true\"\n except (ValueError, KeyError):\n count_only = False\n\n if page:\n try:\n page = int(page)\n except (ValueError, TypeError):\n return web.Response(text=\"Incorrect value for page\", status=400)\n page = 1 if page < 1 else page\n\n if page_size:\n try:\n page_size = int(page_size)\n except (ValueError, TypeError):\n return web.Response(text=\"Incorrect value for page_size\", status=400)\n page_size = 1 if page_size < 1 else page_size\n\n query = request.cirrina.db_session.query(User)\n\n if filter_admin.lower() == \"true\":\n query = query.filter(User.is_admin)\n\n if filter_name:\n query = query.filter(User.username.like(\"%{}%\".format(filter_name)))\n\n nb_users = query.count()\n query = query.order_by(User.username)\n\n if page and page_size:\n users = query.limit(page_size).offset((page - 1) * page_size).all()\n else:\n users = query.all()\n\n data = {\"total_result_count\": nb_users}\n if not count_only:\n data[\"results\"] = [\n {\"id\": user.id, \"username\": user.username, \"is_admin\": user.is_admin}\n for user in users\n ]\n\n return web.json_response(data)", "def list_users():\n\n search = request.args.get('q')\n\n if not search:\n users = User.query.all()\n else:\n users = User.query.filter(User.username.like(f\"%{search}%\")).all()\n\n return render_template('users/index.html', users=users)", "def get_list(active=None, order=None, limit=None):\r\n user_query = User.query.order_by(User.username)\r\n\r\n if active is not None:\r\n user_query = user_query.filter(User.activated == active)\r\n\r\n if order:\r\n user_query = user_query.order_by(getattr(User, order))\r\n else:\r\n user_query = user_query.order_by(User.signup)\r\n\r\n if limit:\r\n user_query = user_query.limit(limit)\r\n\r\n return user_query.all()", "def fetch_users(self):\n users = super(type(self), self).fetch_users()\n return list(filter(self._check_active, users))", "def get_users():\n return db.fetch_users()", "def get_users_by_name(query):\n\n user_list = None\n if query == None:\n user_list = User.objects.filter(Q(user_profile__isnull=False))\n else:\n user_list = User.objects.filter(Q(first_name__icontains=query) | Q(last_name__icontains=query)).distinct()\n return user_list", "def get_users_for(self, email):\r\n # this is a list rather than a generator because we probably want to do a len() on it right away\r\n return [address.user for address in EmailAddress.objects.filter(verified=True, email=email)]", "def list_users(bookings):\n return[view_user(booking.user) for booking in bookings]", "def get_users():\n coll = data_access.get_user_collection()\n users = [User(**u) for u in coll.find()]\n return users", "def get_all_users(query):\n\n user_list = None\n if query == None:\n user_list = User.objects.filter(Q(user_profile__isnull=False))\n else:\n user_list = User.objects.filter(Q(first_name__icontains=query) | Q(last_name__icontains=query) | Q(user_profile__skill_section__skill_items__skill__icontains=query)).distinct()\n return user_list", "def select(self, filter_string):\n\n\t\twith self.lock:\n\t\t\tif filters.ALL == filter_string:\n\t\t\t\tfiltered_users = self.values()\n\n\t\t\telif filters.NONE == filter_string:\n\t\t\t\tfiltered_users = []\n\n\t\t\telif type(filter_string) == type([]):\n\t\t\t\tfiltered_users = filter_string\n\n\t\t\telif type(filter_string) == type(1):\n\t\t\t\tfiltered_users = []\n\n\t\t\t\tif filters.WATCHED == filter_string:\n\t\t\t\t\tfiltered_users.extend(user for user in self\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif user.inotified)\n\n\t\t\t\telif filters.NOT_WATCHED == filter_string:\n\t\t\t\t\tfiltered_users.extend(user for user in self\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif user.is_standard and not user.inotified)\n\n\n\t\t\t\telif filter_string & filters.NOT_SYSTEM \\\n\t\t\t\t\t\tor filter_string & filters.STANDARD:\n\t\t\t\t\tfiltered_users.extend(user for user in self if user.is_standard)\n\n\t\t\t\telif filters.SYSTEM == filter_string:\n\t\t\t\t\tfiltered_users.extend(user for user in self if user.is_system)\n\n\t\t\t\telif filters.SYSTEM_RESTRICTED & filter_string:\n\t\t\t\t\tfiltered_users.extend(user for user in self if user.is_system_restricted)\n\n\t\t\t\telif filters.SYSTEM_UNRESTRICTED & filter_string:\n\t\t\t\t\tfiltered_users.extend(user for user in self if user.is_system_unrestricted)\n\n\t\t\telse:\n\t\t\t\t\tuid_re = re.compile(\"^uid=(?P<uid>\\d+)\")\n\t\t\t\t\tuid = uid_re.match(filter_string)\n\t\t\t\t\tif uid is not None:\n\t\t\t\t\t\tif int(uid.group('uid')) in self.iterkeys():\n\t\t\t\t\t\t\tfiltered_users.append(self[uid])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise exceptions.DoesntExistException(\n\t\t\t\t\t\t\t\t_(u'UID {0} does not exist.').format(uid))\n\n\t\t\treturn filtered_users", "def query(self, *args, **kwargs) -> List[str]:\r\n self.logger.info(\"Returning Manual Users\")\r\n\r\n return kwargs['users']", "def get_all_users():\n return UserModel.query.filter_by(deleted_at=None)" ]
[ "0.718876", "0.69944775", "0.69089013", "0.68136847", "0.67803323", "0.6777134", "0.6736814", "0.66396964", "0.66324025", "0.65912765", "0.6577068", "0.6515583", "0.6474354", "0.64336705", "0.64172804", "0.63798445", "0.6375672", "0.6341821", "0.6323259", "0.6260486", "0.6231258", "0.621786", "0.62149036", "0.62137836", "0.6206472", "0.61871696", "0.61664265", "0.61663616", "0.6166009", "0.61488855" ]
0.81320375
0
Return the list of components, optionally filtered by a predicate.
def components(self, predicate=None): if predicate is None: return self._get("components").json() else: return self._get("components/search", params={"predicate":predicate}).json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_components(self,filt):\n comps = [self.components[i] for i in xrange(len(self.header)) if filt == self.header[i]]\n return comps", "def filter(self, predicate):\n def _filter(iterator):\n while True:\n item = next(iterator)\n if predicate(item):\n return item\n return self.__class__(self, _filter)", "def filterPick(list, filter, classification):\n y = []\n for job in list:\n x = [(job, classification) for l in job for m in (filter(l),) if m]\n y.append(x)\n return y", "def filter(iterable, predicate):\n\n for x in iterable:\n if predicate(x):\n yield x", "def queryComponent(type=None, filter=None, all=0):", "def _extract_predicates(predicate: Union[Callable, Iterable]) -> List[Callable]:\n if isinstance(predicate, collections.Iterable):\n return list(predicate)\n else:\n return [predicate]", "def filter(df, predicate):\n if not df:\n return []\n\n return [row for row in df if predicate(row)]", "def search_resources(self, conditional):\n return list(filter(conditional, self._resources))", "def filter(\n self, items: Iterable[Product], spec: Specification\n ) -> Generator[Product, None, None]:\n return (item for item in items if spec.is_satisfied(item))", "def cfilter(func,iterable):\n result = []\n\n for i in iterable:\n\n if func(i) == True:\n result.append(i)\n\n return result", "def filter(function, iterable):\n\n if function is bool:\n return [x for x in iterable if x]\n\n return [x for x in iterable if function(x)]", "def filter(self, predicate):\n self.children = [c for c in self.children if predicate(c)]\n for c in self.children:\n c.filter(predicate)", "def find(self, predicate):\n return [d for d in self.iter_tree if predicate(d)]", "def filtered(self, func):\n return PSetList(list(filter(func, self.sets)))", "def filter(\n self, predicate: Union[Callable, Iterable], columns: Optional[List[str]] = None\n ):\n if columns is None:\n return super().filter(predicate)\n\n self._check_columns(columns)\n\n if not isinstance(predicate, Iterable) and not callable(predicate):\n raise TypeError(\n \"predicate must be a unary boolean predicate or iterable of booleans\"\n )\n\n res = Scope._EmptyColumn(self._dtype)\n cols = []\n for n in columns:\n idx = self._data.type().get_child_idx(n)\n cols.append(\n ColumnFromVelox.from_velox(\n self.device,\n self.dtype.fields[idx].dtype,\n self._data.child_at(idx),\n True,\n )\n )\n if callable(predicate):\n for i in range(len(self)):\n if predicate(*[col[i] for col in cols]):\n res._append(self[i])\n elif isinstance(predicate, Iterable):\n for x, p in zip(self, predicate):\n if p:\n res._append(x)\n else:\n pass\n return res._finalize()", "def filter(self: Catalog, predicate: Callable[[CatalogSource], bool]) -> Catalog:\n cat1 = self.copy()\n\n new_cat = Catalog()\n for k in cat1.keys():\n for ver_id, version in cat1[k].versions.items():\n if predicate(version):\n new_cat[k][ver_id] = version\n return new_cat", "def filter(self, fn):\r\n\t\treturn FilterProjectedList(self, [fn])", "def visitCriteria(self, ctx: ApiQLParser.CriteriaContext):\n return lmap(lambda c: c.accept(self), ctx.getChildren(self.filter_ignored))", "def filterfalse(iterable, predicate):\n for x in iterable:\n if not predicate(x):\n yield x", "def my_filter(function,lst):\n return list(x for x in lst if function(x))", "def _get_visible_components(self, bounds):\n if bounds is None:\n return [c for c in self.components if c.visible]\n\n return [c for c in self.components\n if intersect_bounds(c.rect, bounds) != empty_rectangle]", "def targets(self, predicate=None):\r\n return filter(predicate, self._targets)", "def filter(self, func=bool):\n return _(filter(func, self._))", "def simple_filter(f, l):\n # a list comprehension with an 'if' clause goes the job nicely\n return [ item for item in l if f(item) ]", "def filter(predicate): #pylint: disable=redefined-builtin\n from xpedite.analytics.timelineFilter import TimelineFilter\n profiles = FilteredProfiles(TimelineFilter(predicate).apply(globalProfile()))\n return profiles", "def filter(self, op):\n def op_filter(seqs):\n r = [s for s in seqs if op(s)]\n if len(r) == 0:\n return None\n else:\n return r\n return self.element_wise(op_filter)", "def filter(self, predicate: Callable[[Cut], bool]) -> None:\n self._filter_fn = predicate\n self.provide_len = False", "def filter(data, mask, **kwargs):\n return Component(\n \"Filter\",\n arguments={\n 'data': Component.of(data),\n 'mask': Component.of(mask)\n },\n options={\n \n },\n constraints=kwargs)", "def remove(predicate, coll):\r\n return filter(lambda x: not predicate(x), coll)", "def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n return (entry for entry in iter(self) if fn(entry))" ]
[ "0.6674913", "0.59280175", "0.57847", "0.57690585", "0.5722505", "0.5720602", "0.56805724", "0.5619865", "0.56110597", "0.5601963", "0.5591747", "0.5566991", "0.5561191", "0.5532992", "0.5496582", "0.54930145", "0.54552966", "0.54481083", "0.5440294", "0.5438733", "0.5350558", "0.534563", "0.52861804", "0.5277633", "0.52774435", "0.52744865", "0.5255892", "0.52458644", "0.5209125", "0.52091146" ]
0.6776347
0
Returns the component that is the parent of the passed in component.
def component_parent(self, component): list = self.components("ANY children.identifier = '%s'" % _obj_id(component)) if len(list) > 0: return list[0] else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parentComponent(self):\n return fusion.Component()", "def get_parent(self):\n return self._find_by_locator().parent", "def get_parent(self):\n if self.parent:\n return self.parent()\n else:\n return None", "def get_parent(self):\n return self.parent", "def get_parent(self):\n return self.parent", "def get_parent(self):\n return self.parent", "def get_parent(self) :\n return self.parent", "def get_parent(self):\n return self.__parent", "def get_parent(self):\n return self.__parent", "def get_parent(self):\n return self._parent", "def parent(v=None):\n if v is None or isinstance(v, Widget):\n return v\n else:\n raise ValueError('parent must be a widget or None')", "def get_parent(self): # real signature unknown; restored from __doc__\n pass", "def GetItemParent(self, item):\r\n\r\n return item.GetParent()", "def find_parent_of(self, *args):\n return _ida_hexrays.citem_t_find_parent_of(self, *args)", "def parent(self):\n if self._parent is not None:\n return self._parent()\n else:\n return None", "def parent(self, node):\r\n return self.find_node(node).parent.content", "def parent(self):\n return self.get_parent().specific", "def GetParent(self):\n return self.parent", "def parent(self):\n\t\treturn self._parent", "def find_parent(self):\n pass", "def XPGetParentWidget(inWidget):\n pass", "def find_parent(self):\n parent = self._parent\n if parent:\n return parent\n elif not self.is_root:\n psobj = self.get_sobj().GetFather()\n parent = self.__class__(self._std, self._bld, psobj.GetID())\n self._parent = parent\n return parent", "def get_parent(self):\n return BinaryNode.or_none(self.parent)", "def _determine_parent(self, caller):\n self.msgin(4, \"determine_parent\", caller)\n\n parent = None\n if caller:\n pname = caller.identifier\n\n if isinstance(caller, Package):\n parent = caller\n\n elif '.' in pname:\n pname = pname[:pname.rfind('.')]\n parent = self.findNode(pname)\n\n elif caller.packagepath:\n # XXX: I have no idea why this line\n # is necessary.\n parent = self.findNode(pname)\n\n self.msgout(4, \"determine_parent ->\", parent)\n return parent", "def parent(self):\n return getattr(self, \"parent_%s\" % self.discriminator)", "def parent(self):\r\n return self._parent", "def parent(self):\n if self.__parent is None:\n return None\n parent = self.__parent()\n if parent is None:\n self.__parent = parent\n return parent", "def parent(self):\n \n return self._parent", "def getParent():", "def GetParent(self):\r\n\r\n return self._parent" ]
[ "0.7491849", "0.73969966", "0.7335824", "0.7116752", "0.7116752", "0.7116752", "0.7111655", "0.7023323", "0.7023323", "0.69511116", "0.6929134", "0.6915407", "0.69085526", "0.68869364", "0.68481857", "0.67514914", "0.6736015", "0.67348146", "0.67278624", "0.6721226", "0.67155606", "0.6689999", "0.6676756", "0.66730046", "0.666371", "0.6644296", "0.66343", "0.6633125", "0.66260713", "0.6622919" ]
0.8453336
0
Returns the immediate child components of the passed in component
def component_children(self, component): return self.components("parent.identifier = '%s'" % _obj_id(component))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def components(self):\n return self._components", "def components(self):\r\n children = self.container.findall(\"ComponentInstance\")\r\n return [XMLComponent(c) for c in children]", "def get_recursive_components (self, comp_name):\r\n comp_list = []\r\n # Current component\r\n comp_data = self.__get_component_structure( comp_name ) \r\n\r\n while comp_data is not None:\r\n comp_list.append(comp_data)\r\n # Parent components \r\n p_comp = comp_data[\"superclass\"]\r\n comp_data = self.__get_component_structure(p_comp)\r\n\r\n return comp_list", "def get_childs(self):\n\t\treturn self.__childs", "def find_children(self, name, recursive=True) -> Sequence['Component']:\n return ()", "def component_parent(self, component):\n \n list = self.components(\"ANY children.identifier = '%s'\" % _obj_id(component))\n if len(list) > 0:\n return list[0]\n else:\n return None", "def components(self):\r\n return list(self._components)", "def get_component():\n\t\tcomponentConsumed = consumed\n\t\tfor i in range(len(components)):\n\t\t\tif componentConsumed < len(components[-i]):\n\t\t\t\treturn components[-i][:-componentConsumed if componentConsumed else None]\n\t\t\telse:\n\t\t\t\tcomponentConsumed -= len(components[-i])\n\t\treturn []", "def components(self):\n return self._components", "def components(self):\n return self._components", "def components(self):\r\n return self.q(css=Component.BODY_SELECTOR).map(\r\n lambda el: Component(self.browser, el.get_attribute('data-locator'))).results", "def get_children(self):\n return self.children", "def components(self):\n return self.__components", "def components(self):\n return self.__components", "def get_child_nodes(node):\r\n return list(iter_child_nodes(node))", "def each_child(\n self,\n search_range=None,\n descended_from_type=_otio.Composable,\n shallow_search=False,\n):\n for child in self.children_if(descended_from_type, search_range, shallow_search):\n yield child", "def components(self):\n # The '_components' attribute is defined according to the\n # subclass of Dyadic the instance belongs to.\n return self._components", "def components_in(containers):\r\n components = []\r\n for container in containers:\r\n instances = container.components()\r\n components.extend(instances)\r\n subcontainers = container.containers()\r\n components.extend(components_in(subcontainers))\r\n\r\n return components", "def getChildren():", "def getChildren(self):\n return []", "def get_children_elements(self):\n\n pass", "def get_children(self):\r\n return self.children", "def getItemsInContainer(elm):\n items = []\n items.extend(getAllChildrenWithTagName(elm, \"action\"))\n items.extend(getAllChildrenWithTagName(elm, \"container\"))\n switches = getAllChildrenWithTagName(elm, \"switch\")\n for sw in switches:\n items.extend(getAllChildrenWithTagName(sw, \"action\"))\n items.extend(getAllChildrenWithTagName(sw, \"container\"))\n return items", "def find_children(start_tag, tag_table):\n pure_child = pd.Series([])\n parents = pd.Series([start_tag])\n while parents.shape[0] > 0:\n pure_child = pd.concat([pure_child,\n parents[~parents\n .isin(tag_table['Parent'])]])\n parents = tag_table.loc[tag_table['Parent']\n .isin(parents[parents\n .isin(tag_table['Parent'])]),\n 'Child']\n return pure_child", "def get_children(self):\n return []", "def children(node):\n\n return snd(node)", "def iter_components(self):\n return self.components.values()", "def GetChildren( self ):\n children = [\n cWrpr \n for cWrpr in GameNodePath.GetChildren( self ) \n if not cWrpr.data.getPythonTag( TAG_IGNORE )\n ]\n return children", "def get_children_with_tag(self, tag):\n if tag in self._children_tag_table:\n result = self._children_tag_table[tag]\n else:\n result = []\n composite_children = [c for c in self._children.values() if isinstance(c, Composite)]\n grand_children = []\n for c in composite_children:\n grand_children += c.get_children_with_tag(tag)\n return result + grand_children", "def get_root_children(self):\n return self.browser.tags" ]
[ "0.63954175", "0.634758", "0.6310204", "0.6247878", "0.62178755", "0.61164606", "0.61022353", "0.60726184", "0.59098697", "0.59098697", "0.5903744", "0.58811057", "0.5880784", "0.5880784", "0.5841002", "0.58345646", "0.5816388", "0.5808206", "0.5802079", "0.5671715", "0.5667768", "0.56393844", "0.5621664", "0.5584592", "0.55404276", "0.5539223", "0.5538252", "0.548561", "0.54717636", "0.5458978" ]
0.80775076
0
Return the list of all milestones, optionally filtered by a predicate
def milestones(self, predicate=None): if predicate is None: return self._get("milestones").json() else: return self._get("milestones/search", params={"predicate":predicate}).json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_issue_get_milestones_list(self):\n pass", "def milestones(self):\r\n return milestones.Milestones(self)", "def milestones(self):\r\n return IssueMilestones(self)", "def get_public_milestones(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Milestones/\"))", "def get_milestones(username, skillpath):\n muser = database_controller.get_user(username).id\n mskillid = database_controller.get_skill(skillpath).id\n milestonelist = MilestoneAssociation.query.filter(MilestoneAssociation.milestone_users_id == muser,\n MilestoneAssociation.milestone_skill_id == mskillid).all()\n milestone_models = []\n for milestone in milestonelist:\n date = database_controller.get_date_from_id(milestone.milestone_date_id).date\n level = milestone.level\n milestone_models.append(MilestoneModel(date, milestone.comment, level))\n return milestone_models", "def milestones_active(self, within_component=None):\n if within_component is not None:\n if isinstance(within_component, str):\n within_component = self.components(\"identifier = %s\" % within_component)[0]\n predicate = \"\"\"\n (StartDate == nil || StartDate < NOW()) \n AND \n (EndDate == nil || EndDate > NOW()) \n AND\n (component.identifier == nil OR %s BEGINSWITH component.fullName)\n \"\"\"\n return self.milestones(predicate % (_obj_id(within_component), within_component[\"fullName\"]))\n else:\n predicate = \"\"\"\n (StartDate == nil || StartDate < NOW()) \n AND \n (EndDate == nil || EndDate > NOW())\n \"\"\"\n return self.milestones(predicate)", "def get_wiki_lines(wt, predicate=None):\n return [line for line in wt.contents.split('\\n') if not callable(predicate) or predicate(line)]", "def getModelMilestones(self):\n if self.__jobInfo.engModelMilestones is not None:\n return json.loads(self.__jobInfo.engModelMilestones)\n else:\n return None", "def test_milestone_list_ok(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('milestone list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def predicates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['WorkflowIssuesFilterPredicateArgs']]]]:\n return pulumi.get(self, \"predicates\")", "def milestones(self, milestones):\n\n self._milestones = milestones", "def get_milestones(counts_by_site_per_interval):\n milestones = {}\n for hpo in counts_by_site_per_interval:\n milestones[hpo] = {}\n\n for hpo in counts_by_site_per_interval:\n dates = sorted(counts_by_site_per_interval[hpo].keys())\n for date in dates:\n count = counts_by_site_per_interval[hpo][date]\n if count > 0 and 'first_fp' not in milestones[hpo]:\n milestones[hpo]['first_fp'] = date\n if 'first_fp' not in milestones[hpo]:\n milestones[hpo]['first_fp'] = '0'\n return milestones", "def milestones(request):\n # we need to use {% url %} with an exhibit {{.foo}} as param,\n # fake { and } to be safe in urllib.quote, which is what reverse\n # calls down the line.\n if '{' not in urllib.always_safe:\n always_safe = urllib.always_safe\n urllib.always_safe = always_safe + '{}'\n else:\n always_safe = None\n r = render_to_response('shipping/milestones.html',\n {'login_form_needs_reload': True,\n 'request': request,\n },\n context_instance=RequestContext(request))\n if always_safe is not None:\n urllib.always_safe = always_safe\n return r", "def test_issue_get_milestone(self):\n pass", "def filter(self, fn):\r\n\t\treturn FilterProjectedList(self, [fn])", "def backlog_milestone():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"project\", help=\"name of the project\")\n parser.add_argument(\"milestone\", help=\"name of the milestone\")\n\n args = parser.parse_args()\n\n session = GithubSession()\n\n project_data = session.get_project(args.project)\n\n milestone_data = session.get_milestone(args.milestone)\n milestone_title = milestone_data[\"title\"]\n\n backlog_data = session.get_column(project_data, \"backlog\")\n icebox_data = session.get_column(project_data, \"icebox\")\n\n results = session.search(f'repo:openslate/openslate milestone:\"{milestone_title}\"')\n for search_data in results[\"items\"]:\n issue_data = get_issue(search_data[\"number\"]).issue\n issue_card = session.get_card(project_data, issue_data)\n\n if issue_card[\"column_url\"] == icebox_data[\"url\"]:\n session.move_card(issue_card, backlog_data)\n\n print(\".\", end=\"\")", "def github_filter_prs(**args):\n git_hub = Github(args['token'])\n repo = git_hub.get_repo(args['repo'])\n all_prs = [{'pr':repo.get_pull(pr.number), 'issue':repo.get_issue(pr.number)} for pr in repo.get_pulls()]\n filters = {}\n for filter_option in args['filters'].split(','):\n part = filter_option.partition(\"=\")\n filters[part[0]] = part[2]\n\n if 'owner' in filters:\n all_prs = _return_specific_owner_prs(all_prs, filters)\n if 'label' in filters:\n all_prs = _return_specific_labeled_prs(all_prs, filters)\n if 'status' in filters:\n all_prs = _return_specific_status_prs(all_prs, filters)\n if 'comment' in filters:\n all_prs = _return_specific_comment_prs(all_prs, filters)\n\n return all_prs", "def tickets_in_milestone(self, milestone_names, milestone_start, end):\n\n db = self.env.get_read_db()\n cursor = db.cursor()\n try:\n cursor.execute(\"\"\"\n SELECT _snapshottime, id\n FROM ticket_bi_historical\n WHERE milestone IN ({0})\n AND _snapshottime >=%s\n AND _snapshottime <=%s \n ORDER BY _snapshottime ASC\n \"\"\".format(','.join(('%s',)*len(milestone_names))), milestone_names + [milestone_start, end])\n except Exception:\n db.rollback()\n self.log.exception('Unable to query the historical ticket table')\n return []\n\n data = {}\n for key, ticket in groupby(cursor, itemgetter(0)):\n data[key] = set([])\n for i in ticket:\n data[key].update([i[1]]) \n # Note no sorting necessary as qpPlot does this for us\n\n return data", "def milestone(self, milestone_id):\r\n return milestones.Milestone(self, milestone_id)", "def filter(predicate): #pylint: disable=redefined-builtin\n from xpedite.analytics.timelineFilter import TimelineFilter\n profiles = FilteredProfiles(TimelineFilter(predicate).apply(globalProfile()))\n return profiles", "def _getAllMinistries(date):\n session = Session()\n mfilter=sql.or_( \n sql.between(date, schema.groups.c.start_date, schema.groups.c.end_date),\n sql.and_(\n (schema.groups.c.start_date < date ),\n (schema.groups.c.end_date == None)\n )\n )\n query = session.query(domain.Ministry).filter(mfilter)\n return query.all()", "def filter(full_poi_list, type_of_poi):\n pois = []\n if type_of_poi == \"all\":\n for i in full_poi_list:\n entry = i[0]\n pois.append(entry)\n if type_of_poi == \"gym\":\n for i in full_poi_list:\n if i[1] == 2:\n entry = i[0]\n pois.append(entry)\n return pois", "def get_sites_at_milestone(milestones, dates):\n\n # Initialize variable with zero-filled values by date \n sites_at_milestone = {'first_fp': {}}\n for date in dates:\n sites_at_milestone['first_fp'][date] = 0\n\n for hpo in milestones:\n first_fp_date = int(milestones[hpo]['first_fp'].replace('-', ''))\n for date in dates:\n first_fp_count = 0\n date_int = int(date.replace('-', ''))\n if first_fp_date > 0 and first_fp_date <= date_int:\n first_fp_count += 1\n sites_at_milestone['first_fp'][date] += 1\n\n return sites_at_milestone", "def filterPick(list, filter, classification):\n y = []\n for job in list:\n x = [(job, classification) for l in job for m in (filter(l),) if m]\n y.append(x)\n return y", "def test_get_project_list_with_page_filter(self):\n # Add test projects.\n projects = [\n add_project(title=str(i), description=str(i)) for i in range(10)\n ]\n pages = {\n 1: projects[5:],\n 2: projects[:5],\n }\n\n # Check first page results.\n result = get_project_list(page=1)\n first_page_results = result['projects'].object_list\n for first_page_project in pages[1]:\n self.assertTrue(first_page_project in first_page_results)\n self.assertFalse(\n any(project in first_page_results for project in pages[2]))\n\n # Check second page results.\n result = get_project_list(page=2)\n second_page_results = result['projects'].object_list\n self.assertFalse(\n any(project in second_page_results for project in pages[1]))\n for second_page_project in pages[2]:\n self.assertTrue(second_page_project in second_page_results)", "def label_milestone_issues():\n session = GithubSession()\n\n labels = list(session.get_labels())\n labels_by_name = dict([(x['name'], x) for x in labels])\n\n milestones = list(session.get_milestones())\n\n for milestone in milestones:\n label_data = labels_by_name[f'epic:{milestone[\"title\"].strip()}']\n\n for issue in session.get_issues(milestone=milestone[\"number\"], state='all'):\n session.add_label(label_data, number=issue['number'])", "def get_primaries(exclude_group):", "def test_returns_milestone_if_exists(self):\n repo = gnome.gh.repo_from_callback(MockCallback())\n repo._milestones = (MockFooMilestoneWrapper(),)\n found = repo.get_milestone('foo')\n self.assertTrue(found)", "def filter(self, predicate):\n self.children = [c for c in self.children if predicate(c)]\n for c in self.children:\n c.filter(predicate)", "def filter_projects():\n with open('../results/01.crawling/01.project_ci_services.json', 'r') as infile:\n projects = json.load(infile)\n tr_projects = []\n for project, value in projects.items():\n if \"GitHub\" in value or \"Travis\" in value:\n tr_projects.append(project)\n return tr_projects" ]
[ "0.6634532", "0.63786525", "0.60176355", "0.60008043", "0.5678339", "0.56678134", "0.52445894", "0.5214817", "0.5151322", "0.51425946", "0.5142451", "0.50417787", "0.49899873", "0.49864954", "0.49168158", "0.486399", "0.4834319", "0.47452897", "0.47217488", "0.4703715", "0.46419692", "0.46295688", "0.45994994", "0.45468175", "0.45390582", "0.45248574", "0.45184848", "0.44928026", "0.44742474", "0.44741696" ]
0.80855364
0
Returns only the milestones that are currently active (that is, those that either omit the start and end dates, or those with start and end dates where start < now < end).
def milestones_active(self, within_component=None): if within_component is not None: if isinstance(within_component, str): within_component = self.components("identifier = %s" % within_component)[0] predicate = """ (StartDate == nil || StartDate < NOW()) AND (EndDate == nil || EndDate > NOW()) AND (component.identifier == nil OR %s BEGINSWITH component.fullName) """ return self.milestones(predicate % (_obj_id(within_component), within_component["fullName"])) else: predicate = """ (StartDate == nil || StartDate < NOW()) AND (EndDate == nil || EndDate > NOW()) """ return self.milestones(predicate)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def active (self, after = None, before = None):\n\n active = ActivityList()\n active.list = [actor for actor in self.list\n if (after == None or\n actor[\"period\"].end >= after) and\n (before == None or\n actor[\"period\"].start <= before)]\n return active", "def active(self, **kwargs):\r\n now = date.today()\r\n return self.visible(is_approved=True, start_date__lte=now, end_date__gte=now, **kwargs)", "def active(self) -> models.QuerySet[PersistentMessage]:\n start_date_filter = models.Q(display_from__lte=tz_now())\n end_date_filter = models.Q(display_until__gte=tz_now()) | models.Q(\n display_until__isnull=True\n )\n return self.filter(start_date_filter).filter(end_date_filter)", "def get_active_filter():\n return (Q(start_date__gte=timezone.now()) |\n Q(end_date__gte=timezone.now()))", "def get_projects(self):\n rps = self.start_date\n\n return Project.objects.filter(\n Q(active=True)\n & Q(\n Q(start_date__lte=rps)\n | Q(\n Q(start_date__gte=rps)\n & Q(start_date__lte=datetime.datetime.now().date())\n )\n | Q(start_date__isnull=True)\n )\n & Q(\n Q(end_date__gte=rps)\n | Q(end_date__isnull=True)\n )\n )", "def milestones(self):\r\n return milestones.Milestones(self)", "def milestones(self, predicate=None):\n \n if predicate is None:\n return self._get(\"milestones\").json()\n else:\n return self._get(\"milestones/search\", params={\"predicate\":predicate}).json()", "def get_active_tasks(self):\n qry = Task.query.filter_by(user=self.id)\n qry = qry.filter_by(completed_on=None)\n return qry.all()", "def tickets_in_milestone(self, milestone_names, milestone_start, end):\n\n db = self.env.get_read_db()\n cursor = db.cursor()\n try:\n cursor.execute(\"\"\"\n SELECT _snapshottime, id\n FROM ticket_bi_historical\n WHERE milestone IN ({0})\n AND _snapshottime >=%s\n AND _snapshottime <=%s \n ORDER BY _snapshottime ASC\n \"\"\".format(','.join(('%s',)*len(milestone_names))), milestone_names + [milestone_start, end])\n except Exception:\n db.rollback()\n self.log.exception('Unable to query the historical ticket table')\n return []\n\n data = {}\n for key, ticket in groupby(cursor, itemgetter(0)):\n data[key] = set([])\n for i in ticket:\n data[key].update([i[1]]) \n # Note no sorting necessary as qpPlot does this for us\n\n return data", "def milestones(self):\r\n return IssueMilestones(self)", "def todo(self, start_time=None, end_time=None):\n\n if not start_time:\n start_time = datetime.datetime.now()\n if not end_time:\n end_time = datetime.datetime.now() + datetime.timedelta(days=1)\n if start_time > end_time:\n raise ValueError\n queryset = self.model.objects.filter(active=True)\n\n todo = []\n for t in queryset:\n execution_time = t.next_run(start_time)\n while execution_time < end_time:\n todo.append(t)\n execution_time = t.next_run(execution_time + datetime.timedelta(minutes=1))\n return todo", "def get_claimed_objects_in_range(start, stop):\n return RawPlantActivity.objects.filter(\n TS_LOAD__gte=start,\n TS_LOAD__lte=stop,\n POOL_CD__exact='03',\n )", "def test_issue_get_milestones_list(self):\n pass", "def tickets_open_between_dates(self, db, milestone_names,\n milestone_start, end):\n\n self.log.debug('Querying the database for historical tickets open data')\n cursor = db.cursor()\n try:\n cursor.execute(\"\"\"\n SELECT _snapshottime, COUNT(DISTINCT id)\n FROM ticket_bi_historical\n WHERE milestone IN ({0})\n AND _snapshottime >=%s\n AND _snapshottime <=%s\n AND isclosed = 0\n GROUP BY _snapshottime\n ORDER BY _snapshottime ASC\n \"\"\".format(','.join(('%s',)*len(milestone_names))), milestone_names + [milestone_start, end])\n except Exception:\n db.rollback()\n self.log.exception('Unable to query the historical ticket table')\n return []\n\n return [(str(i[0]), i[1]) for i in cursor]", "def active(self, when=None):\n if when is None:\n now = object_session(self).query(func.current_timestamp()).scalar()\n when = single(now)\n\n return when.overlaps(closed(self.begins_at, self.ends_at))", "def servicemanage_get_active_by_window(context,\n begin,\n end=None,\n project_id=None):\n session = get_session()\n query = session.query(models.ServiceManage)\n\n query = query.filter(or_(models.ServiceManage.deleted_at == None,\n models.ServiceManage.deleted_at > begin))\n if end:\n query = query.filter(models.ServiceManage.created_at < end)\n if project_id:\n query = query.filter_by(project_id=project_id)\n\n return query.all()", "def active_projects(self):\n return self.projects.filter(active=True)", "def upcoming(self):\n return self.filter(start__gte=timezone.now())", "def current_objs(self, oid):\n\n t = get_time()\n return self.query.filter(self.oid == oid, self.start <= t, t <= self.end).all()", "def active(cls, when=None):\n if when is None:\n now = session.utcnow()\n when = single(now)\n\n return and_(\n or_(cls.begins_at == null(), literal(when.end) == null(),\n cls.begins_at <= literal(when.end)),\n or_(literal(when.begin) == null(), cls.ends_at == null(),\n literal(when.begin) <= cls.ends_at)\n ).label(\"active\")", "def get_active_milestone(self, contract_reference):\n data = {}\n\n url = 'fp/milestones/statuses/active/contracts/{0}'.format(contract_reference)\n return self.get(url, data)", "def recent(self):\n return self.filter(\n start_date__lte=self.current().end_date + timezone.timedelta(days=1),\n end_date__gte=self.current().start_date - timezone.timedelta(days=1),\n )", "def is_active(self):\n return self.start_date <= timezone.now() <= self.end_date", "def active(self):\n return self.filter(active=True)", "def visible(self):\n return self.get_queryset().filter(\n record_status=self.model.ACTIVE, merged_with=None)", "def is_inactive(self):\n now = datetime.datetime.now()\n return not (self.start_date < now < self.end_date)", "def active_comics():\n # FUTURE: Should not include ended comics?\n return Comic.objects.exclude(active=False)", "def running(cls, query_set=None):\n if query_set is None:\n query_set = cls.objects_visible.all()\n return filter(lambda s: s.state.is_running, query_set)", "def get_queryset(self):\n kwargs = {}\n if self.ends_at:\n kwargs.update({'%s__lt' % self.date_field: self.ends_at})\n return super(BeforeMixin, self).get_queryset().filter(**kwargs)", "def instance_get_active_by_window_joined(context, begin, end=None,\n project_id=None, host=None,\n use_slave=False):\n session = get_session(use_slave=use_slave)\n query = session.query(models.Instance)\n\n query = query.options(joinedload('info_cache')).\\\n options(joinedload('security_groups')).\\\n filter(or_(models.Instance.terminated_at == null(),\n models.Instance.terminated_at > begin))\n if end:\n query = query.filter(models.Instance.launched_at < end)\n if project_id:\n query = query.filter_by(project_id=project_id)\n if host:\n query = query.filter_by(host=host)\n\n return _instances_fill_metadata(context, query.all())" ]
[ "0.61798745", "0.61652255", "0.6027968", "0.59534323", "0.5700915", "0.56925315", "0.5610645", "0.56026137", "0.5589813", "0.55476916", "0.55194217", "0.55154073", "0.54779494", "0.54456633", "0.54270303", "0.5396362", "0.53825986", "0.53591615", "0.5356484", "0.5319471", "0.5262411", "0.5255535", "0.5210986", "0.5182204", "0.51724476", "0.5143222", "0.51033294", "0.50910056", "0.50410515", "0.5036274" ]
0.7112044
0
Returns the list of priorities
def priorities(self): return self._get("priorities").json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPriorityList(self):", "def priorities(self) -> Optional[Iterable[str]]:\n\n return self.get_project_priorities()", "def getPriority(self):", "def id_priority_list(self):\n return self._id_priority_list", "def init_priority(self):\n arr = []\n priority_dict = dict()\n\n for p in self.processes:\n priority_dict[p.id] = int(p.period)\n\n for key, value in sorted(priority_dict.items(), key=lambda value: value[1]):\n arr.append(key)\n\n return arr", "def getPriorityList(self):\r\n simple_list = [(0, self.s)]\r\n if self.priority == \"fib\":\r\n fib_heap = makefheap()\r\n fheappush(fib_heap, simple_list[0])\r\n return fib_heap\r\n return simple_list", "def priority(self):\n return self._pri", "def prioritizers(self):\n return self._prioritizers", "def priority_db() -> Dict[str, int]:\n priorities = {\"low\": 3, \"medium\": 2, \"high\": 1}\n return priorities", "def getPriority(self):\n return self.priority", "def _get_priority(self):\n return self.__priority", "def _get_priority(self):\n return self.__priority", "def _get_priority(self):\n return self.__priority", "def _get_priority(self):\n return self.__priority", "def get_priority(self):\n return self.options[\"priority\"]", "def load_priority_list(cls):\n rval = []\n cfg = CrawlConfig.get_config()\n priglob = cfg.get_d('cv', 'priority', '')\n if priglob == '':\n return rval\n\n pricomp = cfg.get_d('cv',\n 'completed',\n U.pathjoin(U.dirname(priglob), 'completed'))\n\n for pripath in U.foldsort(glob.glob(priglob)):\n with open(pripath, 'r') as f:\n for line in f.readlines():\n path = line.strip()\n rval.append(Checkable(path=path, type='f'))\n os.rename(pripath, U.pathjoin(pricomp, U.basename(pripath)))\n\n return rval", "def get_priority(self):\n return self.options['priority']", "def priority(self):\n pass # pragma: no cover", "def priority(self):\n pass # pragma: no cover", "def priority(node):\n return node.priority", "def priority(self) -> str:\n return pulumi.get(self, \"priority\")", "def get_priority(self):\n return self._priority", "def get_priority(self):\n return self._priority", "def priority(self):\n return self._priority", "def priority(self):\n return self._priority", "def priority(self):\n return self._priority", "def sort_priors(self):\n return", "def get_priority(self):\n return str(self.priority)", "def priority(name):\n try:\n manager = Actions()\n priority = Priority[name]\n ordered_tasks = manager.order_by_priority(priority)\n click.echo(\"Ordered by priority:\" + click.style(name, bg='red', fg='white'))\n click.echo()\n console_utils.format_print_ordered(ordered_tasks)\n except IndexError as e:\n click.echo(\"IndexError: \"+e)\n except Exception as e:\n click.echo(e)", "def test_priority_list_ok(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('priority list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)" ]
[ "0.82035583", "0.79757464", "0.71184015", "0.69953656", "0.6906988", "0.66373295", "0.6556149", "0.6515386", "0.6501138", "0.6481751", "0.6454467", "0.6454467", "0.6454467", "0.6454467", "0.63370156", "0.63306737", "0.63022155", "0.626634", "0.626634", "0.6265599", "0.62434703", "0.6227856", "0.6227856", "0.6227751", "0.6227751", "0.6227751", "0.6221523", "0.61592585", "0.6140235", "0.6088697" ]
0.8173191
1
Return the list of valid state transitions from state.
def state_transitions(self, state): return self.states("ANY PreviousStates.identifier = '%s'" % _obj_id(state))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transitions(self, from_state=None):\n return list(self.iter_transitions(from_state))", "def transitions(self, state):\n if len(set(state)) < len(state):\n yield self.STOP_STATE\n return\n for hidx in xrange(self.num_players):\n for lidx in xrange(hidx):\n (lower, higher) = (state[lidx], state[hidx])\n yield self.makestate(((2*lower) if (i == lidx) else ((higher - lower) if (i == hidx) else s)) for (i, s) in enumerate(state))", "def transitions(self) -> List[Dict]:\n return []", "def iter_transitions(self, from_state=None):\n if from_state is None:\n return self._iter_transitions_all_()\n else:\n return iter(self.state(from_state).transitions)", "def get_transitions(self):\n transitions = []\n for row in self.states:\n t_row = []\n for column in self.states:\n t_row.append([row, column])\n transitions.append(t_row)\n return sorted(transitions)", "def can(self, event):\n return [t.new_state for t in self._transitions if t.event.equals(event)]", "def get_next_transitions(\n self, state: State\n ) -> Collection[Tuple[Character, float, State]]:\n _check_is_legal_state(state, self.nb_states)\n return {\n (character, probability, successor)\n for character, (successor, probability) in self.transition_dict[\n state\n ].items()\n }", "def get_active_transitions(self):\n return [t for st in self.get_active_states() for t in st.transitions]", "def state_list(self) -> Sequence[TState]:\n pass", "def get_valid_op_transitions(self, state):\n #assert( isinstance(state, self.STATES) )\n if state in self._tns.keys():\n out = self._tns[state]\n else:\n out = None\n return out", "def get_possible_states(self) -> List[State]:\n next_states = []\n for action in self._legal_moves():\n next_states.append(self.move(action))\n return next_states", "def get_all_states(self):\n return tuple(self._transition_probs.keys())", "def allowable_states(self, user):\n \n # I\"m the relevant state choices.\n choices = []\n \n # I\"m the states already allowed for the users\n existing_states = []\n \n for transition in workflow.STATE_TRANSITIONS:\n \n if self.state != str(transition[0]):\n # if the current state does not match a first element in the\n # state transitions we skip to the next transition\n continue\n \n # Fire the validation function.\n if transition[2](self, user):\n \n # grab the new state and state description\n new_state = str(transition[1])\n description = transition[3]\n \n # build new element\n element = (new_state, description)\n \n # append new element to choices\n choices.append(element)\n \n return choices", "def get_list_of_states(self):\n return self.states", "def valid() -> List[str]:\n return [\n AssignmentState.CREATED,\n AssignmentState.LAUNCHED,\n AssignmentState.ASSIGNED,\n AssignmentState.COMPLETED,\n AssignmentState.ACCEPTED,\n AssignmentState.MIXED,\n AssignmentState.REJECTED,\n AssignmentState.SOFT_REJECTED,\n AssignmentState.EXPIRED,\n ]", "def transitions(self) -> typing.Optional[typing.List[\"Transition\"]]:\n return self._values.get('transitions')", "def states(self):\n knownstates = set(self.keys())\n for possiblestates in self.values():\n for i in possiblestates:\n knownstates.add(i)\n return list(knownstates)", "def CHECK_transition_frames(self):\n tr_frames = []\n for i, frame in enumerate(self.y):\n if not np.all(frame == frame[0]):\n tr_frames.append(frame)\n\n print('there are ', len(tr_frames), ' frames containing a transition')\n return tr_frames", "def _iter_transitions_all_(self):\n for state in self.iter_states():\n for t in state.transitions:\n yield t", "def get_possible_actions(self, state):\n return tuple(self._transition_probs.get(state, {}).keys())", "def complete_list_of_states():\n # funny way of getting all the states that are defined in ConcertClientState.msg\n return concert_msgs.ConductorGraph.__slots__", "def States(self) -> List[Callable]:\r\n\t\treturn self.__STATES__", "def getLegalActions(self, state):\n actions = [i for i in range(-5, 6)]\n for action in actions:\n if action > state[0] or action < -state[1]:\n actions.remove(action)\n return actions", "def state_sequence(node):\n states = [node.state]\n while node.previous:\n node = node.previous\n states.append(node.state)\n return states[::-1]", "def get_states():\n # Getting all hidden state through time\n all_hidden_states = tf.scan(GRU, processed_input, \n initializer=initial_hidden, name='states')\n return all_hidden_states", "def state_change_times(self) -> typing.List[float]:\n state_change_times = {0.}\n state_change_times.update(self.population.presence_interval().transition_times())\n state_change_times.update(self.ventilation.transition_times(self.room))\n \n return sorted(state_change_times)", "def get_reward_states(self):\n state1 = State(7, 7)\n return [state1]", "def setup_transition_list():\n xn_list = []\n\n xn_list.append( Transition(3, 4, 2., 'left ejection') )\n xn_list.append( Transition(12, 2, 2., 'right ejection') )\n xn_list.append( Transition(19, 20, 2.e8, 'downward ejection, left') )\n xn_list.append( Transition(19, 24, 2.e8, 'downward ejection, right') )\n xn_list.append( Transition(28, 17, 1., 'upward ejection, left') )\n xn_list.append( Transition(28, 18, 1., 'upward ejection, right') )\n xn_list.append( Transition(11, 15, 3.0e7, 'demobilization (right wall)') )\n xn_list.append( Transition(13, 15, 3.0e7, 'demobilization (left wall)') )\n xn_list.append( Transition(29, 31, 2.0e6, 'demobilization (friction)') )\n xn_list.append( Transition(30, 31, 2.0e6, 'demobilization (friction)') )\n xn_list.append( Transition(1, 4, 3.0e8, 'leftward motion') )\n xn_list.append( Transition(8, 2, 3.0e8, 'rightward motion') )\n xn_list.append( Transition(20, 17, 2.0e6, 'upward motion') )\n xn_list.append( Transition(24, 18, 2.0e6, 'upward motion') )\n xn_list.append( Transition(18, 24, 2.0e8, 'downward motion') )\n xn_list.append( Transition(17, 20, 2.0e8, 'downward motion') )\n\n if _DEBUG:\n print()\n print('setup_transition_list(): list has',len(xn_list),'transitions:')\n for t in xn_list:\n print(' From state',t.from_state,'to state',t.to_state,'at rate',t.rate,'called',t.name)\n\n return xn_list", "def get_states(self):\n states = []\n for chords in self.training_data:\n chunks = [chords[x:x+self.order] for x in range(0,\n len(chords), self.order)]\n for chunk in chunks:\n chunk_string = \" \".join(chunk)\n if chunk_string not in states:\n states.append(chunk_string)\n return sorted(states)", "def predecessors(self, state, valid_input=None):\n if valid_input is not None:\n valid_list = list()\n for input in valid_input:\n input_list = input\n if not isinstance(input_list, list):\n input_list = [input]\n valid_list.append(input_list)\n valid_input = valid_list\n\n unhandeled_direct_predecessors = {s:[] for s in self.states() }\n for t in self.transitions():\n if valid_input is None or t.word_in in valid_input:\n unhandeled_direct_predecessors[t.to_state].append(t.from_state)\n done = []\n open = [state]\n while len(open) > 0:\n s = open.pop()\n candidates = unhandeled_direct_predecessors[s]\n if candidates is not None:\n open.extend(candidates)\n unhandeled_direct_predecessors[s] = None\n done.append(s)\n return(done)" ]
[ "0.7897553", "0.7058213", "0.6909843", "0.6835081", "0.67144054", "0.67064613", "0.6659277", "0.65004504", "0.6473224", "0.6464462", "0.6434455", "0.64069766", "0.6357915", "0.63360065", "0.6276572", "0.6270292", "0.6219843", "0.6212978", "0.61455226", "0.6142549", "0.6134658", "0.611697", "0.6081639", "0.60540617", "0.60434586", "0.60383946", "0.6035079", "0.60230845", "0.60051036", "0.5993673" ]
0.73548394
1
Fetch a single problem. identifier should be an int representing the problem identifier to fetch.
def problem(self, identifier): return self._get("problems/%d" % identifier).json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_problem(identifier):\n problem = database.session.query(Problem, ProblemData).join(ProblemData)\n if is_pid(identifier):\n problem = problem.filter(Problem.pid == identifier).first()\n else:\n problem = problem.\\\n filter(Problem.shortname == identifier).first()\n\n cases = list()\n for case in database.session.query(SampleCase).\\\n filter(SampleCase.pid == problem.Problem.pid).\\\n all():\n cases.append({\n 'case_num': case.case_num,\n 'input': case.input,\n 'output': case.output\n })\n\n return serve_response({\n 'pid': problem.Problem.pid,\n 'name': problem.Problem.name,\n 'shortname': problem.Problem.shortname,\n 'appeared': problem.Problem.appeared,\n 'difficulty': problem.Problem.difficulty,\n 'added': problem.Problem.added,\n 'comp_release': problem.Problem.comp_release,\n 'description': problem.ProblemData.description,\n 'input_desc': problem.ProblemData.input_desc,\n 'output_desc': problem.ProblemData.output_desc,\n 'sample_cases': cases\n })", "def get_problem(id):\n return query(WEB_EXAMPLE_BASE + f\"/classical/problem/{id}\")", "def get_problem(problem_id):\n Firebase = firebase.FirebaseApplication('https://team1robotsim.firebaseio.com/', None)\n result = Firebase.get('/problems', 'id_' + str(problem_id))\n if result is None:\n return jsonify(Error(404, \"Problem not found\")), status.HTTP_404_NOT_FOUND\n else:\n return jsonify(result)", "def details(self, identifier_type, identifier):\n self.require_librarian(flask.request.library)\n\n work = self.load_work(flask.request.library,\n identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n annotator = AdminAnnotator(self.circulation, flask.request.library)\n\n # single_entry returns an OPDSEntryResponse that will not be\n # cached, which is perfect. We want the admin interface\n # to update immediately when an admin makes a change.\n return AcquisitionFeed.single_entry(self._db, work, annotator)", "def get(self, identifier):\n fields = \",\".join(self.model.get_fields_name())\n query = \"select {0} from {1} where {2}=?\".format(\n fields,\n self.ressource_config[\"table\"],\n self.model.pk_field.name)\n cursor = self.get_connector().cursor()\n cursor.execute(query, (identifier,))\n obj = cursor.fetchone()\n\n if obj:\n fields = self.model.get_fields_name()\n return dict(zip(fields, obj))\n else:\n raise NotFound", "def Get(id):\n try:\n bug = Bug.get_by_id(id)\n if not bug:\n raise InvalidIdError\n except (db.Error, InvalidIdError), e:\n logging.error('bug.Get: Exception while retrieving bug (%s): %s', id, e)\n raise InvalidIdError('Bug not found [id=%s].%s' % (id, e))\n return bug", "def get_element_from_id(self, identifier):\n classification, org, rel, com = classify_id(identifier)\n if classification == id_classification.org:\n return self.get_org_question(org)\n elif classification == id_classification.rel:\n return self.get_rel_question(org, rel)\n elif classification == id_classification.com:\n return self.get_rel_comment(org, rel, com)\n return None", "def fetch_one(cls: Type[_T], session: Session, identifier: int) -> _T:\n return Query(cls, session=session).get(identifier)", "def node_by_id(self, identifier):\n for node in self.nodes:\n if node.identifier == identifier:\n return node\n raise Exception(\"Node '{0}' not available in {1}\".format(\n identifier, self.name))", "def details(self, identifier):\n return self.client.request_with_method(Methods.GET % (self.name, identifier,))", "def problem(request, problemid):\n curr_problem = get_object_or_404(Problem, id=problemid)\n context = {'problem': curr_problem}\n return render(request, 'code_challenge/problem.html', context)", "def get_issue(self, issue_id, **kwargs):\n raise NotImplementedError", "def get_issue(self, issue_id, **kwargs):\n raise NotImplementedError", "def get_issue(self):\n issue_id = self.kwargs['issue_id']\n try:\n issue = Issue.objects.get(pk=issue_id)\n except ObjectDoesNotExist:\n raise ObjectNotFound('Not found')\n if issue.project.pk != self.project.pk:\n raise ObjectNotFound('Not found')\n return issue", "def get(self, fetch_number: int):\n return self.results[fetch_number]", "def get_issue(self, issue_id):\n try:\n json = self.get('repos/%(owner)s/%(repo)s/issues/%(issue_id)d' % {\n 'owner': self.repo_owner,\n 'repo': self.repo_name,\n 'issue_id': issue_id,\n })\n\n label_list = [label_dict['name'] for label_dict in json['labels']]\n\n return Issue(json['number'], label_list)\n except ResourceNotFound:\n return None", "def get_bug(self, id, year=None):\n year = self.get_year(id, switch='bugs') if year is None else year\n directory = self.get_bugs_path(year)\n for path in self._get_files(directory, pattern='bugs.*.json'):\n bugs = helpers.load_json(path)\n for bug in bugs:\n if id == bug['id']:\n return bug\n raise Exception('No bug identified by {}'.format(id))", "def complaints(self, identifier_type, identifier):\n self.require_librarian(flask.request.library)\n\n work = self.load_work(flask.request.library,\n identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n counter = self._count_complaints_for_work(work)\n response = dict({\n \"book\": {\n \"identifier_type\": identifier_type,\n \"identifier\": identifier\n },\n \"complaints\": counter\n })\n\n return response", "def get(self, problem_id=None):\n problem = self.sess.query(\n DetailedProblem,\n func.ST_AsGeoJSON(DetailedProblem.location)).filter(\n DetailedProblem.id == problem_id)\n data = generate_data(problem)[0]\n self.write(data)", "def _load_issue(**args):\n check_required_fields(['token', 'repo', 'number'], **args)\n gh = Github(args['token'])\n repo = gh.get_repo(args['repo'])\n issue = repo.get_issue(args['number'])\n logger.debug(\" ISSUE: %s\", issue.number)\n return issue", "def lookup(self, identifier: str) -> IdentifierDeclaration:\n try:\n return self._declarations[identifier]\n except KeyError:\n if self.parent is None:\n raise errors.NotFoundError(\n f\"identifier {identifier!r} is unbound in scope {self!r}\"\n )\n return self.parent.lookup(identifier)", "def details(self, identifier):\n return self.client.request_with_method(Methods.GET % (self.name, identifier,))['item']", "async def get_issue(self, issue: int) -> \"AIOGitHubAPIRepositoryIssue\":\n _endpoint = f\"/repos/{self.full_name}/issues/{issue}\"\n\n response = await self.client.get(endpoint=_endpoint)\n return AIOGitHubAPIRepositoryIssue(self.client, response)", "def find_one_issue(connection, jsql):\n result = connection.retrieve_search(jsql, 0, 1)\n\n if result is None:\n raise ConnectionError(\"Fail to check if issue exist. Http status %d, %s\" %\n (connection.http_status, connection.value(\"errorMessages/0\")))\n\n cnt = int(connection.value(\"total\"))\n\n if cnt == 0:\n return None\n elif cnt == 1:\n return connection.get_issue(0)\n else:\n raise LookupError(\"Many issues are found with '%s'\" % jsql)", "def fetch_issue(repo, issue, issues_url, caching=CACHING_ACTIVE):\n if not caching:\n data = requests.get(issues_url + \"/{}\".format(issue)).json()\n if not response_check(data):\n return {}\n return data\n else:\n cached = get_cached_issue(repo, issue)\n if not cached:\n debug('cache miss issue', yellow)\n data = requests.get(issues_url + \"/{}\".format(issue)).json()\n if not response_check(data):\n return {}\n cache_issue(repo, issue, data)\n return data\n else:\n debug('Cache hit issue', green)\n return cached", "def report(self, identifier_type, identifier):\n\n # TODO: We don't have a reliable way of knowing whether the\n # complaing is being lodged against the work or against a\n # specific LicensePool.\n\n # Turn source + identifier into a set of LicensePools\n library = flask.request.library\n pools = self.load_licensepools(library, identifier_type, identifier)\n if isinstance(pools, ProblemDetail):\n # Something went wrong.\n return pools\n\n if flask.request.method == 'GET':\n # Return a list of valid URIs to use as the type of a problem detail\n # document.\n data = \"\\n\".join(Complaint.VALID_TYPES)\n return Response(data, 200, {\"Content-Type\": \"text/uri-list\"})\n\n data = flask.request.data\n controller = ComplaintController()\n return controller.register(pools[0], data)", "def get():\n id_num = int(input('Enter the ID number of the item you wish to retrieve\\n'))\n db_actions.retrieve(id_num)", "def get_job(self, identifier: str):\n self._log_operation('Getting job {i}'.format(i=identifier))\n return self._job_queue.get_job_details(identifier)", "def get_item_detail(self, identifier):\n\n try:\n return self.get_billing_item(identifier)\n except SoftLayerAPIError as exception:\n if exception.faultCode == 404:\n return self.get_billing_item_from_invoice(identifier)\n raise", "def retrieve(self, request, pk=None):\n try:\n # `pk` is a parameter to this function, and\n # Django parses it from the URL route parameter\n # http://localhost:8000/bugs/2\n #\n # The `2` at the end of the route becomes `pk`\n bug = Bug.objects.get(pk=pk)\n serializer = BugSerializer(bug, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)" ]
[ "0.6831753", "0.6268822", "0.5826658", "0.5733782", "0.5718513", "0.56838435", "0.563037", "0.55231625", "0.5486525", "0.5466694", "0.5442252", "0.5442006", "0.5442006", "0.53945965", "0.5390215", "0.5379254", "0.5360963", "0.5327507", "0.5321211", "0.52903354", "0.52741504", "0.52687687", "0.51949894", "0.5190855", "0.5143604", "0.51206785", "0.51074934", "0.5096703", "0.50904626", "0.5073682" ]
0.7195323
0
Search for a set of problems given a predicate.
def problem_search(self, predicate=None, savedQueryURL=None, includeDetail=False): params = {} if predicate is not None: params["predicate"] = predicate elif savedQueryURL is not None: params["savedQuery"] = savedQueryURL else: raise Error("Either predicate or savedQueryURL is required to do a search") if includeDetail: params["includeDetail"] = "true" return self._get("problems/search", params=params).json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find(self, predicate):\n return [d for d in self.iter_tree if predicate(d)]", "def search_alg(query, questions_list, tag_list, language, difficulty):\n global user_flag\n list_return = [] # The list which we will be returning\n query_tags = []\n query_titels = []\n\n if not query:\n query_tags = tag_list.all()\n else:\n for item in query:\n if item in map(str, tag_list):\n query_tags.append(get_tag(item, tag_list))\n query_tags.extend(get_children(item, tag_list))\n else:\n query_titels.append(item)\n for question in questions_list:\n assert isinstance(question, Question)\n if question.visible or user_flag:\n # Tag search\n question_tags = list(question.tags.all())\n for soln in question.solutions.all():\n for tag in soln.tags.all():\n question_tags.append(tag)\n for quest_tag in question_tags:\n if quest_tag in query_tags:\n print(\"here\")\n if language_difficulty_check(question, language, difficulty):\n list_return.append(question)\n continue\n # Title search\n for title in query_titels:\n assert isinstance(title, str)\n if title.lower() == question.title.lower() and language_difficulty_check(question, language, difficulty):\n list_return.append(question)\n\n return list(set(list_return)) # This removes duplicates", "def _search_issues(self, summary):\n try:\n issues = self.jira.get_issues_by_summary(summary)\n except Exception as e:\n logging.error(\"Failed searching issues: \"+ str(e))\n return []\n return issues", "def _find(xs, predicate):\n for x in xs:\n if predicate(x):\n return x\n return None", "def search_by_contains(self, tl):\n print(\"Search by string\")\n string = input(\"Please enter search string: \")\n return tl.findall_contains(string)", "def find_direct_containing(rules, param):\n\n return_list = []\n for rule in rules:\n if param in rules[rule]:\n return_list.append(rule)\n\n return return_list", "def search(self, *queries):\n haystack = self.make_haystack()\n # re.search() returns a match object or None. The call to any() will\n # cast everything to booleans.\n return [any([re.search(query, data) for data in haystack])\n for query in queries]", "def solve(grid):\n puzzle_dict = grid_values(grid)\n return search(puzzle_dict)", "def search(self):\n\n term = self.substitute()\n ##print (\"searching:\",term)\n ##print (\"in facts\",self.facts)\n ##input()\n bindings = deepcopy(self.bindings)\n found = False\n for fact in self.facts:\n found = self.unify(term,fact,bindings)\n if found:\n bound_vars = list(bindings.keys())\n n_bound_vars = len(bound_vars)\n for i in range(n_bound_vars):\n for j in range(i+1,n_bound_vars):\n if bindings[bound_vars[i]] == bindings[bound_vars[j]]:\n return False\n self.facts.remove(self.substitute_with_bindings(bindings)) #THINK ABOUT THIS\n break\n return found", "def search_launchpad(project='openstack-ansible', states=None, order_by='-datecreated'):\n # TODO: Move the creation of folders into a context manager if python3 support only (TBD with launchpadlib)\n #Other cases will trigger an exception it's just to remove common cases...\n # (To be improved later when I have access to that lib)\n if not states or len(states) == 0:\n raise ValueError(\"States must be a non-empty list\")\n\n cache_folder = tempfile.mkdtemp()\n # If many tasks have to be done with one login, maybe we should implement a context manager, and/or\n # move this outside the function.\n launchpad = Launchpad.login_anonymously('osa_toolkit', 'production', cache_folder, version='devel')\n lp_project = launchpad.projects[project]\n bugs = lp_project.searchTasks(status=states, order_by=order_by)\n shutil.rmtree(cache_folder)\n return bugs", "def search(self, filtro):\n return [nota for nota in self.notas if nota.match(filtro)]", "def search(self, filter):\n return [note for note in self.notes if note.match(filter)]", "def search(pac, man='all'):\n if man == 'all':\n print \"Searching all package managers\"\n if pacman['fink'] != 'None':\n search_fink(pac)\n if pacman['brew'] != 'None':\n search_brew(pac)\n if pacman['port'] != 'None':\n search_port(pac)\n if pacman['pip'] != 'None':\n search_pip(pac)\n if pacman['gem'] != 'None':\n search_gem(pac)\n if pacman['cpan'] != 'None':\n search_cpan(pac)\n search_whohas(pac)\n else:\n nofunzone = {'fink': search_fink, 'brew': search_brew, 'port': search_port, 'pip': search_pip, 'gem': search_gem, 'cpan': search_cpan} \n #print nofunzone\n try:\n f = nofunzone[man]\n print \"trying to run a search on %s for %s\" % (man, pac)\n f(pac)\n except KeyError:\n print \"Please use search like this: haberdashery.py search package manager: \\nhaberdashery.py search %s %s\" % (man, pac)\n # locals()['search_%s(pac)' % man]", "def find(cards: list, query: list, threshold=0.75) -> list:\n name_results = find_by_name(cards, \" \".join(query), threshold=threshold)\n keyword_results = find_by_keywords(cards, query)\n if len(name_results) == 0:\n return keyword_results\n elif len(name_results) > 1 and len(keyword_results) == 1:\n return keyword_results\n else:\n return name_results", "def search(self, filtr):\n return [note for note in self.notes if note.match(filtr)]", "def parameter_finder(target_list, search_list, msgflag=False, exact=False):\n target_list = [x.lower() for x in target_list]\n\n indexes = []\n\n if isinstance(search_list, str):\n cont = 0\n search_list = search_list.lower()\n for t in target_list:\n if exact == False and search_list in t:\n indexes.append(cont)\n elif exact == True and search_list == t:\n indexes.append(cont)\n cont += 1\n if isinstance(search_list, list):\n search_list = [x.lower() for x in search_list]\n\n for s in search_list:\n s = str(s)\n for cont, t in enumerate(target_list):\n if exact == False and s in t:\n print((s, t))\n indexes.append(cont)\n elif exact == True and s == t:\n print((s, t))\n indexes.append(cont)\n\n if msgflag == True:\n length = len(indexes)\n if length > 1: print(\"There were several ocurrences\")\n if length == 0: print(\"No ocurrences found\")\n\n return indexes", "def search(regex, paths, args, ignore_case=False, verbose=False):\n printer = MultiLinePrinter()\n for path in paths:\n if os.path.isdir(path):\n for dirname, subdirs, files in os.walk(path):\n for filename in files:\n if not KNOWN_TYPES or any([filename.endswith(suffix) for\n suffix in KNOWN_TYPES]):\n search_file(os.path.join(dirname, filename), regex,\n ignore_case, args.undefined, printer)\n else:\n search_file(path, regex, ignore_case, args.undefined, printer)", "def add_searcher_constraints(md, g, my_vars: dict, start: list, vertices_t: dict, deadline: int):\n # get variables\n X = get_var(my_vars, 'x')\n Y = get_var(my_vars, 'y')\n\n S, m = ext.get_set_searchers(start)\n Tau_ext = ext.get_set_time_u_0(deadline)\n\n # legality of the paths, for all s = {1,...m}\n for s in S:\n # 0, 1, 2... T\n for t in Tau_ext:\n v_t = vertices_t.get((s, t))\n # each searcher can only be at one place at each time (including the start vertex), Eq. (1, 7)\n if t == 0:\n md.addConstr(X[s, v_t[0], 0] == 1)\n\n for u in v_t:\n my_next_v = cm.get_next_vertices(g, s, u, t, vertices_t, Tau_ext)\n my_previous_v = cm.get_previous_vertices(g, s, u, t, vertices_t)\n if my_next_v is not None:\n # (Eq. 9) searcher can only move to: i in delta_prime(v) AND V^tau(t+1)\n # sum == 1 if searcher is at u, sum == zero if searcher is not at u (depends on X[s, u, t])\n md.addConstr(quicksum(Y[s, u, i, t] for i in my_next_v) == X[s, u, t])\n\n if my_previous_v is not None:\n # (Eq. 8) searcher can only move to v from j in delta_prime(v) AND V^tau(t-1)\n md.addConstr(quicksum(Y[s, i, u, t - 1] for i in my_previous_v) == X[s, u, t])", "def get_triples_with_predicate(self, predicate):\n query = read_query('content exploration/triples_with_predicate') % predicate\n response = self._submit_query(query)\n return [(elem['sname']['value'], elem['oname']['value']) for elem in response]", "def find_all(self, p):\n ln = self.ln\n t = self.t\n occurrences = []\n hints = self.__getHints(p)\n for i in hints:\n # compare rest char in pattern with chars in text after hinted substring\n if t[i + ln:i + len(p)] == p[ln:]:\n occurrences.append(i)\n return occurrences", "def solve(grid):\n return search(grid_values(grid))", "def solve(grid):\n return search(grid_values(grid))", "def testResolveMulti(self):\n subject = \"aff4:/metadata:11\"\n\n predicates = []\n for i in range(0, 100):\n predicate = \"metadata:predicate\" + str(i)\n predicates.append(predicate)\n data_store.DB.Set(subject, predicate, \"Cell \" + predicate, timestamp=1000,\n token=self.token)\n\n results = [x for x in data_store.DB.ResolveMulti(subject, predicates,\n token=self.token)]\n\n self.assertEqual(len(results), 100)\n\n # Value\n for i in range(0, 100):\n self.assertEqual(results[i][1], \"Cell \" + predicates[i])\n self.assertEqual(results[i][0], predicates[i])\n\n # Now try to query for non existent predicates.\n predicates = predicates[:10]\n for i in range(10):\n predicates.append(\"metadata:not_existing\" + str(i))\n\n results = [x for x in data_store.DB.ResolveMulti(subject, predicates,\n token=self.token)]\n\n self.assertEqual(10, len(results))\n for i in range(0, 10):\n self.assertEqual(results[i][1], \"Cell \"+predicates[i])\n self.assertEqual(results[i][0], predicates[i])", "def find(*rules, search=None, ignore=None, **kwargs):\n prefs = kwargs.get('prefs', settings.InternalSettings())\n dryrun = kwargs.get('dryrun', False)\n if search is None:\n search = []\n\n if not ignore:\n ignore = []\n ignore.extend(prefs.get_ignored_paths('find'))\n\n rules = list(flatten(rules))\n\n # use a list so we can iterate more than once\n characters = list(parser.get_characters(flatten(search), ignore))\n\n filtered_chars = find_characters(rules, characters=characters)\n\n paths = [char.path for char in filtered_chars]\n\n if dryrun:\n openable = []\n printables = paths\n else:\n openable = paths\n printables = []\n\n return result.Success(openable=openable, printables=printables)", "def search(self, subject: str, predicate: str, obj: str, last_read: Optional[str] = None, as_of: Optional[datetime] = None) -> Tuple[DBIterator, int]:\n pass", "def solve(grid):\n\n return search(grid_values(grid))", "def solve(grid):\n\n return search(grid_values(grid))", "def find(where, ids):\n if not ids:\n return\n if not isinstance(ids, (set, list, tuple)):\n ids = [ids]\n for key in ids:\n if key in where:\n for i in where[key]:\n yield i", "def search_term():\n search = input(\"Enter term or string: \")\n entries = select_entries()\n entries = entries.where(\n (Entry.task_name.contains(search)) |\n (Entry.notes.contains(search)))\n view_entries(entries)\n return entries", "def solve(self, grid):\n return self.search(self.parse_grid(grid))" ]
[ "0.558807", "0.518133", "0.5118416", "0.510903", "0.51005197", "0.5097114", "0.50305724", "0.48769644", "0.484233", "0.48394457", "0.48163816", "0.48043537", "0.48016292", "0.47515813", "0.47459945", "0.47293434", "0.47038335", "0.46879834", "0.46831623", "0.46495906", "0.46298048", "0.46298048", "0.46059301", "0.46041483", "0.46028835", "0.45903596", "0.45903596", "0.457182", "0.4564316", "0.45623094" ]
0.6172628
0
Create a new problem based on the provided problem data.
def problem_create(self, problem): return self._post("problems", json=problem).json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_problem():\n # Admin check\n if not current_user.admin == 1:\n return serve_error('You must be an admin to create problems',\n response_code=401)\n\n try:\n # Convert the JSON to python array of dictionaries\n cases = request.form['cases']\n cases = loads(cases)\n for case in cases:\n if 'input' not in case or 'output' not in case:\n return serve_error(\n 'Sample case(s) were not formed correctly',\n response_code=400)\n\n # Create the problem\n name = request.form['name'][:32]\n shortname = name.lower().replace(' ', '')\n problem = Problem(\n name=name,\n shortname=shortname\n )\n if 'difficulty' in request.form:\n problem.difficulty = request.form['difficulty']\n if 'appeared_in' in request.form:\n problem.appeared = request.form['appeared_in']\n\n # Create the problem data and add it to the database\n problem_data = ProblemData(\n description=request.form['description'],\n input_desc=request.form['input_desc'],\n output_desc=request.form['output_desc']\n )\n if 'time_limit' in request.form:\n problem_data.time_limit = request.form['time_limit']\n\n # Create list of sample cases\n case_num = 1\n sample_cases = list()\n for case in cases:\n sample = SampleCase(\n case_num=case_num,\n input=case['input'],\n output=case['output']\n )\n case_num += 1\n sample_cases.append(sample)\n\n in_file = zipfile.ZipFile(request.files['in_file'])\n out_file = zipfile.ZipFile(request.files['out_file'])\n sol_file = request.files['sol_file']\n\n # If any required values were missing, serve an error\n except KeyError as err:\n return serve_error('Form field not found: ' + err[0],\n response_code=400)\n\n # Commit everything to the database\n pid = problem.commit_to_session()\n problem_data.pid = pid\n problem_data.commit_to_session()\n for case in sample_cases:\n case.pid = pid\n case.commit_to_session()\n\n # Store the judge data\n directory = os.path.join(app.config['DATA_FOLDER'],\n 'problems', str(problem.pid))\n in_file.extractall(directory)\n out_file.extractall(directory)\n os.mkdir(os.path.join(directory, 'test'))\n sol_file.save(os.path.join(directory, 'test', sol_file.filename))\n\n return serve_response({\n 'name': problem.name,\n 'shortname': problem.shortname,\n 'description': problem_data.description,\n 'input_desc': problem_data.input_desc,\n 'output_desc': problem_data.output_desc,\n 'sample_cases': cases,\n 'pid': problem.pid,\n 'difficulty': problem.difficulty\n })", "def create_issue(self, data, **kwargs):\n raise NotImplementedError", "def create_issue(self, data, **kwargs):\n raise NotImplementedError", "def Create(data):\n try:\n bug = Bug()\n bug.Patch(data)\n bug.put()\n except (TypeError, db.Error, AssertionError), e:\n logging.error('bug.Create: Exception while creating bug: %s', e)\n raise CreateError('Failed to create a new bug.\\n%s\\n' % e)\n return bug", "def create_problem(self, name=\"\", problem_type=\"\", problem_type_details={},\n data_dir_train=\"\", data_dir_test=\"\", files=[], table_names=[],\n entities_table_name=\"\", entities_featurized_table_name=\"\",\n target_table_name=\"\"):\n\n with self.__orm.session_scope() as session:\n try:\n problem = session.query(Problem).filter(Problem.name == name).one()\n print(\"Problem {} already exists\".format(name))\n return\n except NoResultFound:\n pass # we will create it\n\n problem = Problem(\n name = name,\n problem_type = problem_type,\n problem_type_details = json.dumps(problem_type_details),\n data_dir_train = data_dir_train,\n data_dir_test = data_dir_test,\n files = json.dumps(files),\n table_names = json.dumps(table_names),\n entities_table_name = entities_table_name,\n entities_featurized_table_name = entities_featurized_table_name,\n target_table_name = target_table_name,\n )\n session.add(problem)\n print(\"Problem {} successfully created\".format(name))", "def getProblem(self):\n return ProblemInstance(nDays=self.nDays,\n nSC=self.nSC,\n nGS=self.nGS,\n timewindows=self.timewindows,\n requirements=self.requirements)", "def create_problem(request, group_id: int, document_id: int, content_type_id: int):\n content_type = get_object_or_404(ContentType, id=content_type_id)\n document = _get_document_if_allowed(request, group_id, document_id)\n # We create an instance of the generator class to get the example text.\n Generator = content_type.model_class()\n example_problem = Generator()\n example_data = example_problem.example_data()\n default_text = example_problem.render(example_data, default_text=True)\n form = problem_form(content_type, request.POST or None)\n if form.is_valid():\n problem: Problem = form.save(commit=False)\n problem.document = document\n problem.save()\n return redirect(problem.document.get_absolute_url())\n return render(\n request,\n \"problems/create_problem.html\",\n {\n \"document\": document,\n \"form\": form,\n \"example_datum\": example_data[0],\n \"default_text\": default_text,\n },\n )", "def __init__(self, problem_code=None, contest_code=None, creation_time=None, status=None, tags=None, problem_name=None, problem_redirect=None, is_added_to_practice=None, contest_url=None, problem_url=None): # noqa: E501 # noqa: E501\n\n self._problem_code = None\n self._contest_code = None\n self._creation_time = None\n self._status = None\n self._tags = None\n self._problem_name = None\n self._problem_redirect = None\n self._is_added_to_practice = None\n self._contest_url = None\n self._problem_url = None\n self.discriminator = None\n\n if problem_code is not None:\n self.problem_code = problem_code\n if contest_code is not None:\n self.contest_code = contest_code\n if creation_time is not None:\n self.creation_time = creation_time\n if status is not None:\n self.status = status\n if tags is not None:\n self.tags = tags\n if problem_name is not None:\n self.problem_name = problem_name\n if problem_redirect is not None:\n self.problem_redirect = problem_redirect\n if is_added_to_practice is not None:\n self.is_added_to_practice = is_added_to_practice\n if contest_url is not None:\n self.contest_url = contest_url\n if problem_url is not None:\n self.problem_url = problem_url", "def generate_problem_data(P, q, A, l, u, problem_name, sols_data={}):\n # Get problem dimension\n n = P.shape[0]\n m = A.shape[0]\n\n #\n # GENERATE HEADER FILE\n #\n f = open(problem_name + \"/\" + problem_name + \"_\" + \"data.h\", \"w\")\n\n # Add definition check\n f.write(\"#ifndef \" + problem_name.upper() + \"_DATA_H\\n\")\n f.write(\"#define \" + problem_name.upper() + \"_DATA_H\\n\")\n\n # Add Includes\n f.write(\"#include \\\"osqp_api.h\\\"\\n\")\n f.write(\"#include \\\"osqp_tester.h\\\"\\n\")\n f.write(\"\\n\\n\")\n\n f.write(\"/* Test case's QP problem data */\\n\")\n f.write(\"class %s_prob_data : public OSQPTestData {\\n\" % problem_name)\n f.write(\"public:\\n\")\n f.write(\" %s_prob_data();\\n\" % problem_name)\n f.write(\" ~%s_prob_data() = default;\\n\" % problem_name)\n f.write(\"};\\n\\n\")\n\n #\n # Create additional data structure\n #\n f.write(\"/* Test case's additional data and solution */\\n\")\n f.write(\"class %s_sols_data {\\n\" % problem_name)\n f.write(\"public:\\n\")\n f.write(\" %s_sols_data();\\n\" % problem_name)\n f.write(\" ~%s_sols_data();\\n\\n\" % problem_name)\n # Generate further data and solutions\n for key, value in sols_data.items():\n if isinstance(value, str):\n # Status test get from C code\n f.write(\" OSQPInt %s;\\n\" % key)\n # Check if it is an array or a scalar\n elif isinstance(value, np.ndarray):\n if isinstance(value.flatten(order='F')[0], int):\n f.write(\" OSQPInt* %s;\\n\" % key)\n elif isinstance(value.flatten(order='F')[0], float):\n f.write(\" OSQPFloat* %s;\\n\" % key)\n else:\n if isinstance(value, int):\n f.write(\" OSQPInt %s;\\n\" % key)\n elif isinstance(value, float):\n f.write(\" OSQPFloat %s;\\n\" % key)\n f.write(\"};\\n\\n\")\n\n #\n # Creator for the QP test data and additional test case data\n #\n f.write(\"/* Create test case data */\\n\")\n f.write(\"class %s_test_fixture : public OSQPTestFixture {\\n\" % problem_name)\n f.write(\"public:\\n\")\n f.write(\" %s_test_fixture() : OSQPTestFixture()\\n\" % problem_name)\n f.write(\" {\\n\")\n f.write(\" data.reset(new %s_prob_data());\\n\" % problem_name)\n f.write(\" sols_data.reset(new %s_sols_data());\\n\" % problem_name)\n f.write(\" }\\n\")\n f.write(\" ~%s_test_fixture() = default;\\n\\n\" % problem_name)\n f.write(\"protected:\\n\")\n f.write(\" std::unique_ptr<%s_sols_data> sols_data;\\n\" % problem_name)\n f.write(\"};\\n\\n\")\n\n # Close header file\n f.write(\"#endif\\n\")\n f.close()\n\n # Open a file to define the problem data\n f = open(problem_name + \"/\" + problem_name + \"_\" + \"data.cpp\", \"w\")\n\n # Write include headers\n f.write('#include \\\"%s_data.h\\\"\\n' % problem_name)\n f.write(\"\\n\\n\")\n\n #\n # Generate QP problem data\n #\n f.write(\"/* Function to generate QP problem data */\\n\")\n f.write(\"%s_prob_data::%s_prob_data() : OSQPTestData() {\\n\" % (problem_name, problem_name))\n\n # Write problem dimensions\n f.write(\"// Problem dimensions\\n\")\n write_int(f, n, \"n\", \"this\")\n write_int(f, m, \"m\", \"this\")\n f.write(\"\\n\")\n\n # Write problem vectors\n f.write(\"// Problem vectors\\n\")\n write_vec_float(f, l, \"l\", \"this\")\n write_vec_float(f, u, \"u\", \"this\")\n write_vec_float(f, q, \"q\", \"this\")\n f.write(\"\\n\")\n\n # Write matrix A\n write_mat_sparse(f, A, \"A\", \"this\")\n write_mat_sparse(f, P, \"P\", \"this\")\n\n f.write(\"}\\n\\n\")\n\n\n #\n # Generate additional problem data for solutions\n #\n f.write(\"/* Function to define solutions and additional data struct */\\n\")\n f.write(\"%s_sols_data::%s_sols_data() {\\n\" % (problem_name, problem_name))\n\n # Generate further data and solutions\n for key, value in sols_data.items():\n if isinstance(value, str):\n # Status test get from C code\n if value == 'optimal':\n f.write(\"%s = %s;\\n\" % (key, 'OSQP_SOLVED'))\n elif value == 'optimal_inaccurate':\n f.write(\"%s = %s;\\n\" % (key, 'OSQP_SOLVED_INACCURATE'))\n elif value == 'primal_infeasible':\n f.write(\"%s = %s;\\n\" % (key, 'OSQP_PRIMAL_INFEASIBLE'))\n elif value == 'primal_infeasible_inaccurate':\n f.write(\"%s = %s;\\n\" %\n (key, 'OSQP_PRIMAL_INFEASIBLE_INACCURATE'))\n elif value == 'dual_infeasible':\n f.write(\"%s = %s;\\n\" % (key, 'OSQP_DUAL_INFEASIBLE'))\n elif value == 'dual_infeasible_inaccurate':\n f.write(\"%s = %s;\\n\" % (key, 'OSQP_DUAL_INFEASIBLE_INACCURATE'))\n\n # Check if it is an array or a scalar\n if type(value) is np.ndarray:\n if isinstance(value.flatten(order='F')[0], int):\n write_vec_int(f, value.flatten(order='F'), key, \"this\")\n elif isinstance(value.flatten(order='F')[0], float):\n write_vec_float(f, value.flatten(order='F'), key, \"this\")\n else:\n if isinstance(value, int):\n write_int(f, value, key, \"this\")\n elif isinstance(value, float):\n write_float(f, value, key, \"this\")\n\n # End function\n f.write(\"}\\n\\n\")\n\n\n\n #\n # Clean additional problem data for solutions\n #\n f.write(\"/* Function to clean solutions and additional data struct */\\n\")\n f.write(\"%s_sols_data::~%s_sols_data() {\\n\" % (problem_name, problem_name))\n # Generate further data and solutions\n for key, value in sols_data.items():\n # Check if it is an array or a scalar\n if type(value) is np.ndarray:\n clean_vec(f, key)\n\n f.write(\"}\\n\\n\")\n\n f.close()", "def create(hints=None,\r\n previous_answers=None,\r\n user_submissions=None,\r\n user_voted=None,\r\n moderate=None,\r\n mod_queue=None):\r\n # Should have a single child, but it doesn't matter what that child is\r\n field_data = {'data': CHModuleFactory.sample_problem_xml, 'children': [None]}\r\n\r\n if hints is not None:\r\n field_data['hints'] = hints\r\n else:\r\n field_data['hints'] = {\r\n '24.0': {'0': ['Best hint', 40],\r\n '3': ['Another hint', 30],\r\n '4': ['A third hint', 20],\r\n '6': ['A less popular hint', 3]},\r\n '25.0': {'1': ['Really popular hint', 100]}\r\n }\r\n\r\n if mod_queue is not None:\r\n field_data['mod_queue'] = mod_queue\r\n else:\r\n field_data['mod_queue'] = {\r\n '24.0': {'2': ['A non-approved hint']},\r\n '26.0': {'5': ['Another non-approved hint']}\r\n }\r\n\r\n if previous_answers is not None:\r\n field_data['previous_answers'] = previous_answers\r\n else:\r\n field_data['previous_answers'] = [\r\n ['24.0', [0, 3, 4]],\r\n ['29.0', []]\r\n ]\r\n\r\n if user_submissions is not None:\r\n field_data['user_submissions'] = user_submissions\r\n else:\r\n field_data['user_submissions'] = ['24.0', '29.0']\r\n\r\n if user_voted is not None:\r\n field_data['user_voted'] = user_voted\r\n\r\n if moderate is not None:\r\n field_data['moderate'] = moderate\r\n\r\n descriptor = Mock(weight='1')\r\n # Make the descriptor have a capa problem child.\r\n capa_descriptor = MagicMock()\r\n capa_descriptor.name = 'capa'\r\n capa_descriptor.displayable_items.return_value = [capa_descriptor]\r\n descriptor.get_children.return_value = [capa_descriptor]\r\n\r\n # Make a fake capa module.\r\n capa_module = MagicMock()\r\n capa_module.lcp = MagicMock()\r\n responder = MagicMock()\r\n\r\n def validate_answer(answer):\r\n \"\"\" A mock answer validator - simulates a numerical response\"\"\"\r\n try:\r\n float(answer)\r\n return True\r\n except ValueError:\r\n return False\r\n responder.validate_answer = validate_answer\r\n\r\n def compare_answer(ans1, ans2):\r\n \"\"\" A fake answer comparer \"\"\"\r\n return ans1 == ans2\r\n responder.compare_answer = compare_answer\r\n\r\n capa_module.lcp.responders = {'responder0': responder}\r\n capa_module.displayable_items.return_value = [capa_module]\r\n\r\n system = get_test_system()\r\n # Make the system have a marginally-functional get_module\r\n\r\n def fake_get_module(descriptor):\r\n \"\"\"\r\n A fake module-maker.\r\n \"\"\"\r\n return capa_module\r\n system.get_module = fake_get_module\r\n module = CrowdsourceHinterModule(descriptor, system, DictFieldData(field_data), Mock())\r\n\r\n return module", "def __init__(self, data=None):\n self.problems = {}\n if data is not None:\n self.update(data)", "def load(cls):\n \n # Loop through problems and build patient problem lists:\n probs = csv.reader(file(PROBLEMS_FILE,'U'),dialect='excel-tab')\n header = probs.next() \n for prob in probs:\n cls(dict(zip(header,prob))) # Create a problem instance ", "def add_problem(self, problem, build, email):\n sql = \"\"\"INSERT INTO problem(title, description, privacy, solution_r, number_of_seen)\n VALUES(%s, %s, %s, %s, %s) RETURNING problem_id; \"\"\"\n\n sql2_1 = \"\"\"INSERT INTO notifying(problem_id, student_id)\n VALUES(%s, %s);\"\"\"\n sql3 = \"\"\"INSERT INTO build(problem_id, name)\n VALUES(%s, %s); \"\"\"\n sql4 = \"\"\"SELECT student_id FROM student WHERE email = %s;\"\"\"\n\n conn = None\n p_id = None\n s_id = None\n try:\n params = self.config()\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n cur.execute(sql, (problem.title, problem.description,\n problem.privacy, problem.solution_r, problem.n_seen,))\n p_id = cur.fetchone()[0]\n cur.execute(sql4, (email,))\n s_id = cur.fetchone()[0]\n cur.execute(sql2_1, (p_id, s_id,))\n cur.execute(sql3, (p_id, build.b_name,))\n conn.commit()\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n return p_id", "def _problem(self, parent, group):\r\n return ItemFactory.create(\r\n parent_location=parent.location,\r\n category=\"problem\",\r\n display_name=\"Group {} Sees This Problem\".format(group),\r\n data=\"<h1>No Problem Defined Yet!</h1>\",\r\n )", "def create_om_problem(prob):\n ivc = om.IndepVarComp()\n\n # Add subsystems to problem ##\n add_subsystems(prob, ivc)\n\n # Defining problem parameters ##\n add_parameters(prob, ivc)\n\n # Setting up the problem options ##\n driver_setup(prob)\n\n # Setup the model hierarchy for OpenMDAO ##\n prob.setup()", "def _make_problem(self, choices, in_type='radiotextgroup', script=''):\r\n return self.build_problem(\r\n choices=choices,\r\n type=in_type,\r\n script=script\r\n )", "def create(self, data):\n raise NotImplementedError", "def create_problems(problem_type, can_print=False):\n\n if problem_type == KNAPSACK_PACKING_PROBLEM_TYPE:\n return create_knapsack_packing_problems_with_manual_solutions(can_print)\n\n elif problem_type == PACKING_PROBLEM_TYPE:\n return create_packing_problems_with_optimal_solution_values()\n\n return None, None, None", "def _parse_problem(self, f_problem):\n\n parse_tree = PDDL_Tree.create(f_problem)\n\n assert \"problem\" in parse_tree, \"Problem must have a name\"\n self.problem_name = parse_tree [\"problem\"].named_children ()[0]\n\n # objects must be parsed first\n if \":objects\" in parse_tree:\n object_list = PDDL_Utils.read_type(parse_tree[\":objects\"])\n self._add_objects(object_list)\n\n #TODO this may not be valid with a non-flat type hierchy\n obj_map = {obj: list(self.obj_to_type[obj])[0] for obj in self.objects}\n\n # the goal can be expressed in either a formula form, or a direct form\n if len(parse_tree[\":goal\"].children) == 1 and parse_tree[\":goal\"].children[0].name == \"and\":\n self.goal = And([self.to_formula(c, obj_map) for c in parse_tree[\":goal\"].children[0].children])\n else:\n self.goal = And([self.to_formula(c, obj_map) for c in parse_tree[\":goal\"].children])\n\n # it is critical that the formula here be checked against the objects\n if len(parse_tree[\":init\"].children) == 1 and \\\n parse_tree[\":init\"].children[0].name == \"and\":\n self.init = self.to_formula(parse_tree[\":init\"].children[0], obj_map)\n else:\n # initial condition is one big AND\n self.init = And([self.to_formula(c, obj_map) for c in parse_tree[\":init\"].children])\n\n # Parse the multiagent stuff\n self.task = parse_tree[\":task\"].children[0].name\n self.depth = int(parse_tree[\":depth\"].children[0].name)\n self.projection = [a.name for a in parse_tree[\":projection\"].children]\n self.init_type = parse_tree[\":init-type\"].children[0].name\n self.plan = []\n if ':plan' in parse_tree:\n self.plan = ['_'.join(map(str, [x.name] + [y.name for y in x.children])) for x in parse_tree[\":plan\"].children]", "def create_from_data(\n self,\n spec: models.ITaskWriteRequest,\n resources: Sequence[StrPath],\n *,\n resource_type: ResourceType = ResourceType.LOCAL,\n data_params: Optional[Dict[str, Any]] = None,\n annotation_path: str = \"\",\n annotation_format: str = \"CVAT XML 1.1\",\n status_check_period: int = None,\n dataset_repository_url: str = \"\",\n use_lfs: bool = False,\n pbar: Optional[ProgressReporter] = None,\n ) -> Task:\n if getattr(spec, \"project_id\", None) and getattr(spec, \"labels\", None):\n raise exceptions.ApiValueError(\n \"Can't set labels to a task inside a project. \"\n \"Tasks inside a project use project's labels.\",\n [\"labels\"],\n )\n\n task = self.create(spec=spec)\n self._client.logger.info(\"Created task ID: %s NAME: %s\", task.id, task.name)\n\n task.upload_data(\n resource_type=resource_type,\n resources=resources,\n pbar=pbar,\n params=data_params,\n wait_for_completion=True,\n status_check_period=status_check_period,\n )\n\n if annotation_path:\n task.import_annotations(annotation_format, annotation_path, pbar=pbar)\n\n if dataset_repository_url:\n git.create_git_repo(\n self._client,\n task_id=task.id,\n repo_url=dataset_repository_url,\n status_check_period=status_check_period,\n use_lfs=use_lfs,\n )\n\n task.fetch()\n\n return task", "async def addprob(self, ctx, problem_name):\n if problem_name in problems:\n await ctx.send('Problem ' + problem_name + ' already exists.')\n return\n problems[problem_name] = Problem()\n problems[problem_name].cases = {}\n await ctx.send('Problem ' + problem_name + ' successfully added.')\n await write_problems()", "def __init__(self):\n self.points = []\n names = ['x1']\n lows = [10**-5]\n highs = [10**5]\n # TODO 2: Use names, lows and highs defined above to code up decision\n # and objective metadata for POM3.\n decisions = [Decision(n, l, h) for n, l, h in zip(names, lows, highs)]\n objectives = [Objective(\"f1\", True), Objective(\"f2\", True)]\n Problem.__init__(self, decisions, objectives)", "def __init__(self):\n self.points = []\n names = ['x1', 'x2', 'x3']\n lows = [-5, -5, -5]\n highs = [5, 5, 5]\n # TODO 2: Use names, lows and highs defined above to code up decision\n # and objective metadata for POM3.\n decisions = [Decision(n, l, h) for n, l, h in zip(names, lows, highs)]\n objectives = [Objective(\"f1\", True), Objective(\"f2\", True)]\n Problem.__init__(self, decisions, objectives)", "def __init__(self):\n self.points = []\n names = ['x1', 'x2', 'x3', 'x4', 'x5', 'x6']\n lows = [0, 0, 1, 0, 1, 0]\n highs = [10, 10, 5, 6, 5, 10]\n # TODO 2: Use names, lows and highs defined above to code up decision\n # and objective metadata for POM3.\n decisions = [Decision(n, l, h) for n, l, h in zip(names, lows, highs)]\n objectives = [Objective(\"f1\", True), Objective(\"f2\", True)]\n Problem.__init__(self, decisions, objectives)", "def __init__(self, domain_file, problem_file = None):\n\n # this is common to domain and problem file\n self.objects = set([])\n self.obj_to_type = {}\n self.type_to_obj = {}\n\n # make sure that domain is parsed before the problem\n self._parse_domain(domain_file)\n\n if problem_file is None:\n self.init = None\n self.goal = None\n self.objects = None\n else:\n self._parse_problem(problem_file)", "def create(self):\n # TODO: Properly validate data\n self._proj()\n if self.cfg.align_heading:\n self._align()\n self._griddata()\n if self.cfg.gap_filter[\"algorithm\"] != \"none\":\n self._gap_filter()", "def create_problem(connection, task_type, collection_type, table, vectorizer,\n features, term_vocabulary, doc_vocabulary,\n features_configpath, message_configpath):\n message_parser = TwitterMessageParser(message_configpath, task_type)\n limit = sys.maxint\n labeled_messages = []\n\n for score in [-1, 0, 1]:\n print \"Class:\\t%s\" % (score)\n # getting tweets with the same score\n request = tweets.tweets_filter_sql_request(task_type, table, score,\n limit)\n for row in core.utils.table_iterate(connection, request):\n text = row[0]\n index = row[1]\n\n message_parser.parse(text)\n terms = message_parser.get_terms()\n doc_vocabulary.add_doc(terms, str(score))\n labeled_message = {'score': score,\n 'id': index,\n 'terms': to_unicode(terms),\n 'features': features.vectorize(text)}\n labeled_messages.append(labeled_message)\n\n term_vocabulary.insert_terms(\n labeled_message['features'].iterkeys())\n\n # Create vectors\n problem = []\n for labeled_message in labeled_messages:\n vector = vectorizer(labeled_message, term_vocabulary, doc_vocabulary)\n if (collection_type == 'train'):\n problem.append([labeled_message['score'], vector])\n elif (collection_type == 'test'):\n problem.append([labeled_message['id'], vector])\n else:\n raise ValueError(\n 'Unexpected collection_type={}'.format(collection_type))\n\n return problem", "def post(self):\n arguments = self.request.arguments\n print arguments\n x = arguments['latitude']\n y = arguments['longitude']\n problem = Problem(\n title=arguments['title'],\n content=define_values(arguments,'content'),\n proposal=define_values(arguments,'proposal'),\n severity=define_values(arguments,'severity', '1'),\n status=define_values(arguments,'status','UNSOLVED'),\n location=create_location(x, y),\n problem_type_id=arguments['problem_type_id'],\n region_id=define_values(arguments,'region_id'))\n self.sess.add(problem)\n self.sess.commit()\n activity = ProblemsActivity(\n problem_id=problem.id,\n user_id=self.get_current_user(),\n datetime=get_datetime(),\n activity_type=\"ADDED\")\n self.sess.add(activity)\n self.sess.commit()\n if self.get_status() is 200:\n self.write({'id': problem.id})", "def _make_new(request, form):\n if not form.is_valid():\n return (None, None)\n account = models.Account.get_account_for_user(request.user)\n if account.blocked:\n # Early exit for blocked accounts.\n return (None, None)\n\n data_url = _get_data_url(form)\n if data_url is None:\n return (None, None)\n data, url, separate_patches = data_url\n\n reviewers = _get_emails(form, 'reviewers')\n if not form.is_valid() or reviewers is None:\n return (None, None)\n\n cc = _get_emails(form, 'cc')\n if not form.is_valid():\n return (None, None)\n\n base = form.get_base()\n if base is None:\n return (None, None)\n\n first_issue_id, _ = models.Issue.allocate_ids(1)\n issue_key = ndb.Key(models.Issue, first_issue_id)\n\n issue = models.Issue(subject=form.cleaned_data['subject'],\n description=form.cleaned_data['description'],\n project=form.cleaned_data['project'],\n base=base,\n repo_guid=form.cleaned_data.get('repo_guid', None),\n reviewers=reviewers,\n cc=cc,\n private=form.cleaned_data.get('private', False),\n n_comments=0,\n key=issue_key)\n issue.put()\n\n first_ps_id, _ = models.PatchSet.allocate_ids(1, parent=issue.key)\n ps_key = ndb.Key(models.PatchSet, first_ps_id, parent=issue.key)\n patchset = models.PatchSet(issue_key=issue.key, data=data, url=url, key=ps_key)\n patchset.put()\n\n if not separate_patches:\n try:\n patches = engine.ParsePatchSet(patchset)\n except:\n # catch all exceptions happening in engine.ParsePatchSet,\n # engine.SplitPatch. With malformed diffs a variety of exceptions could\n # happen there.\n logging.exception('Exception during patch parsing')\n patches = []\n if not patches:\n patchset.key.delete()\n issue.key.delete()\n errkey = url and 'url' or 'data'\n form.errors[errkey] = ['Patch set contains no recognizable patches']\n return (None, None)\n\n ndb.put_multi(patches)\n\n if form.cleaned_data.get('send_mail'):\n msg = _make_message(request, issue, '', '', True)\n issue.put()\n msg.put()\n return (issue, patchset)", "def redefineProblem(self):\n self.formulation = cp.Problem(self.obj, self.constraints)" ]
[ "0.6951319", "0.6625024", "0.6625024", "0.6603956", "0.6591881", "0.63943565", "0.63517785", "0.5923711", "0.5809345", "0.5769485", "0.5701272", "0.56902915", "0.5676844", "0.5644496", "0.5614058", "0.56054026", "0.55882806", "0.556935", "0.556253", "0.5546477", "0.5523147", "0.54934514", "0.5490188", "0.54874736", "0.5470787", "0.5464085", "0.54057664", "0.5387208", "0.5361658", "0.5354641" ]
0.7535201
0
Update an existing problem and add/update a keyword.
def problem_keyword_set(self, identifier, keyword, value=None): self._put("problems/%d/keywords/%s" % (identifier, quote(keyword, safe="")), json=value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def put(self, problem_id):\n args = self.request.arguments\n x = args.pop('latitude')\n y = args.pop('longitude')\n args['location'] = create_location(x, y)\n self.sess.query(Problem).filter_by(id=int(problem_id)). \\\n update(args)\n\n self.sess.commit()\n\n activity = ProblemsActivity(\n problem_id=int(problem_id),\n user_id=self.get_current_user(),\n datetime=get_datetime(),\n activity_type=\"UPDATED\"\n )\n self.sess.add(activity)\n self.sess.commit()", "def update_problem(problem_id, problem):\n Firebase = firebase.FirebaseApplication('https://team1robotsim.firebaseio.com/', None)\n result = Firebase.get('/problems', 'id_' + str(problem_id))\n \n try:\n if result is not None:\n \tproblem = Problem.from_dict(connexion.request.get_json())\n \treturn jsonify(Firebase.put('/problems', 'id_' + str(problem_id), problem))\n except ValueError:\n return jsonify(Error(404, \"Problem not found\")), status.HTTP_404_NOT_FOUND", "def update(self,haiku, typenum):\n self.occurrences += 1\n for i in range(2):\n for x in (haiku.triple[i]).wordarray:\n if (self.wordtype == dictionary.wordtype(x) and \n dictionary.word_filter(x) != self.word):\n self.update_adj_dict(x, i==typenum)", "def update_keywords(self, keyword, where=None):\n rowcount = 0\n if keyword is not None:\n self.update_generic_data(keyword, TABLE_NAME_KW, where)\n # done\n return rowcount", "def update_knowledge(self):\n pass", "def update_problem(identifier): # pylint: disable=too-many-branches\n # Admin check\n if not current_user.admin == 1:\n return serve_error('You must be an admin to update a problem',\n response_code=401)\n\n pid, problem = None, database.session.query(Problem)\n if is_pid(identifier):\n pid = identifier\n problem = problem.filter(Problem.pid == pid).first()\n else:\n problem = problem.filter(Problem.shortname == identifier).first()\n pid = problem.pid\n\n data = database.session.query(ProblemData).filter(ProblemData.pid == pid).first()\n if 'name' in request.form:\n problem.name = request.form['name'][:32]\n problem.shortname = request.form['name'][:32].replace(' ', '').lower()\n if 'description' in request.form:\n data.description = request.form['description']\n if 'input_desc' in request.form:\n data.input_desc = request.form['input_desc']\n if 'output_desc' in request.form:\n data.output_desc = request.form['output_desc']\n if 'appeared_in' in request.form:\n problem.appeared = request.form['appeared_in']\n if 'difficulty' in request.form:\n problem.difficulty = request.form['difficulty']\n\n # Save the changes\n problem.commit_to_session(database.session)\n data.commit_to_session(database.session)\n\n # If sample cases were uploaded, delete cases and go with the new ones\n case_lst = list()\n if 'cases' in request.form:\n for old_case in database.session.query(SampleCase).\\\n filter(SampleCase.pid == pid).all():\n database.session.delete(old_case)\n database.session.flush()\n database.session.commit()\n case_num = 1\n cases = loads(request.form['cases'])\n for case in cases:\n SampleCase(\n pid=pid,\n case_num=case_num,\n input=case['input'],\n output=case['output']\n ).commit_to_session()\n case_lst.append({\n 'case_num': case_num,\n 'input': case['input'],\n 'output': case['output']\n })\n case_num += 1\n\n directory = os.path.join(app.config['DATA_FOLDER'], 'problems', pid)\n if not os.path.exists(directory):\n os.mkdir(directory)\n\n # Add judge data if supplied\n if 'in_file' in request.files:\n in_file = zipfile.ZipFile(request.files['in_file'])\n in_file.extractall(directory)\n\n if 'out_file' in request.files:\n out_file = zipfile.ZipFile(request.files['out_file'])\n out_file.extractall(directory)\n\n if 'sol_file' in request.files:\n if os.path.exists(directory + '/test'):\n rmtree(directory + '/test')\n os.mkdir(os.path.join(directory, 'test'))\n request.files['sol_file'].save(\n os.path.join(directory, 'test', request.files['sol_file'].filename))\n\n\n return serve_response({\n 'pid': problem.pid,\n 'name': problem.name,\n 'shotrname': problem.shortname,\n 'description': data.description,\n 'input_desc': data.input_desc,\n 'output_desc': data.output_desc,\n 'difficulty' : problem.difficulty,\n 'cases': case_lst\n })", "async def setprob(self, ctx, problem_name=None):\n if problem_name:\n if not await problem_exists(ctx, problem_name):\n return\n current_problem[ctx.author.id] = problem_name\n if problem_name:\n await ctx.send('Problem successfully set.')\n else:\n await ctx.send('The bot will no longer check your submissions.')", "def add_keyword(self, new_word):\n\n self.keywords.append(new_word)\n words = Keywords()\n words.add(new_word)\n mongo.db.users.update({\"name\": self.username},\n {\"$set\": {\"keywords\": self.keywords}})", "def register_problem(self, location, name):\r\n self.problems[location] = name", "def problem_keyword_delete(self, identifier, keyword):\n self._delete(\"problems/%d/keywords/%s\" % (identifier, quote(keyword, safe=\"\")))", "def update(self, attrs):\n if attrs.get('name'):\n self.name = string.capwords(attrs.get('name'))\n if attrs.get('description'):\n self.description = attrs.get('description')\n if attrs.get('author'):\n self.author = attrs.get('author')\n\n try:\n db.session.add(self)\n db.session.commit()\n except IntegrityError as err:\n if isinstance(err.orig, UniqueViolation):\n raise Conflict(\"Name already used by another exercise.\")\n raise UnexpectedError(DATABASE_ERROR_MSG)\n except DBAPIError as err:\n raise UnexpectedError(DATABASE_ERROR_MSG)", "def update_word(self, word):\n self.word = word", "def edit_keywords(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)", "def problem_update(self, identifier, updates):\n return self._patch(\"problems/%d\" % identifier, json=updates).json()", "def update_follow_set(model: Dict[str, Set[str]], word: str, follow_word: str) -> None:\n if word not in model:\n model[word] = {follow_word}\n\n else:\n model[word].add(follow_word)", "def update(ctx, name, description, tags):\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n update_dict = {}\n\n if name:\n update_dict['name'] = name\n\n if description:\n update_dict['description'] = description\n\n tags = validate_tags(tags)\n if tags:\n update_dict['tags'] = tags\n\n if not update_dict:\n Printer.print_warning('No argument was provided to update the experiment.')\n sys.exit(0)\n\n try:\n response = PolyaxonClient().experiment.update_experiment(\n user, project_name, _experiment, update_dict)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not update experiment `{}`.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n Printer.print_success(\"Experiment updated.\")\n get_experiment_details(response)", "def problem(self, problem):\n\n self._problem = problem", "def update(self, data):\n if not isinstance(data, dict):\n raise ProblemConfigError('Config file error: content must be a dictionary, but is %s.' % (type(data)))\n\n for (problem_name, problem_spec) in data.items():\n if not isinstance(problem_name, str):\n raise ProblemConfigError('Config file error: problem names must be strings, but %s is %s.' % (\n problem_name, type(problem_name)))\n\n if not isinstance(problem_spec, dict):\n raise ProblemConfigError('Config file error: problem spec must be a dictionary, but spec of problem %s is %s.'\n % (problem_name, type(problem_spec)))\n\n if problem_name not in self.problems:\n self.problems[problem_name] = Problem(problem_name, problem_spec)\n else:\n self.problems[problem_name].update(problem_spec)", "def insert_keyword(kwd, pkg_id):\n global kwd_index\n try:\n sql = 'INSERT INTO keywords VALUES (' + str(kwd_index) + ',\"' + kwd + '\",' + str(pkg_id) + ',0)'\n print sql\n util.executeSQL(conn, sql)\n kwd_index += 1\n except Exception as e:\n print e\n return", "def update_general(info, key, val):\n\n info[\"model_params\"][key] = val", "def update(self, request, pk=None):\n lot = Lot.objects.get(pk=request.data[\"lotId\"])\n\n project = Project.objects.get(pk=pk)\n project.name = request.data[\"name\"]\n project.estimatedCost = request.data[\"estimatedCost\"]\n project.estimatedCompletionDate = request.data[\"estimatedCompletionDate\"]\n #project.projectNote = Note.objects.get(pk=request.data['projectNote'])\n\n project.lotId = lot\n project.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def edit_toml(file, key_word, phrase_to_replace):\n f = open(file, \"r+\")\n\n phrase = \"\"\n while True:\n line = f.readline()\n if not line:\n break\n if line.count(key_word) > 0:\n print('%s found in the code, about to change with given phrase' % key_word)\n phrase += phrase_to_replace+'\\n'\n else:\n phrase += line\n f.close()\n f = open(file, \"w+\")\n f.write(phrase)\n print(\"Successfully replaced with given phrase!\")\n f.close()", "def add_keyword(self,\r\n index,\r\n keywords):\r\n\r\n if isinstance(keywords, str):\r\n keywords = {keywords}\r\n\r\n self.edit(index,\r\n self.get_keys_from_note(index).union(keywords),\r\n self.get_text_from_note(index))", "async def addprob(self, ctx, problem_name):\n if problem_name in problems:\n await ctx.send('Problem ' + problem_name + ' already exists.')\n return\n problems[problem_name] = Problem()\n problems[problem_name].cases = {}\n await ctx.send('Problem ' + problem_name + ' successfully added.')\n await write_problems()", "async def setprobdetails(self, ctx, problem_name, *, arg):\n if not await problem_exists(ctx, problem_name):\n return\n problems[problem_name].details = arg\n await ctx.send('Problem details set.')\n await write_problems()", "def put(self, request, *args, **kwargs):\n message = self.check_word_id()\n if message is not None:\n return Response(message, status=status.HTTP_400_BAD_REQUEST)\n word_id = request.data.pop('word')\n queryset = models.Word.objects.all()\n word = get_object_or_404(queryset, id=word_id)\n serializer = serializers.Word(\n word, data=request.data, context={'request': request})\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)", "def cypher_problemItemTag_keyword(self, variable_tagProblemItem=\"problemitem_tag\"):\n\n if not self.keyword:\n return \"\"\n return f'({variable_tagProblemItem}{self.label}' + \\\n \"{\" + f'{self.databaseInfoTag[\"properties\"][\"keyword\"]}:\\'{self.keyword}\\'' + \"})\"", "def __editProjectPWL(self):\n pwl = e5App().getObject(\"Project\").getProjectDictionaries()[0]\n self.__editSpellingDictionary(pwl)", "def cmd_update(self, text):\n self.update(text)", "def cypher_problemTag_keyword(self, variable_tagProblem=\"tag_problem\"):\n\n if not self.keyword:\n return \"\"\n return f'({variable_tagProblem}{self.label}' + \\\n \"{\" + f'{self.databaseInfoTag[\"properties\"][\"keyword\"]}:\\'{self.keyword}\\'' + \"})\"" ]
[ "0.61769944", "0.58378965", "0.56088656", "0.5605392", "0.5590597", "0.54668194", "0.5348828", "0.5304439", "0.52890366", "0.526431", "0.5261165", "0.52269745", "0.5192112", "0.5146122", "0.5120042", "0.5079348", "0.506978", "0.50642264", "0.505732", "0.50392693", "0.50340545", "0.50296193", "0.50295335", "0.50162697", "0.50159407", "0.49883053", "0.4982857", "0.4948941", "0.49364778", "0.49357015" ]
0.6982569
0
Update an existing problem and remove a keyword.
def problem_keyword_delete(self, identifier, keyword): self._delete("problems/%d/keywords/%s" % (identifier, quote(keyword, safe="")))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def problem_keyword_set(self, identifier, keyword, value=None):\n self._put(\"problems/%d/keywords/%s\" % (identifier, quote(keyword, safe=\"\")), json=value)", "def delete(self, keyword, key):", "async def delprob(self, ctx, problem_name):\n if not await problem_exists(ctx, problem_name):\n return\n del problems[problem_name]\n await ctx.send('Problem ' + problem_name + ' successfully deleted.')\n await write_problems()", "def removeWord(self,category,word):\n\t\ttry:\n\t\t\tlist = self.dictData[category]\n\t\t\tlist.pop(list.index(word))\n\t\t\tself.__save()\n\t\texcept KeyError:\n\t\t\tprint (\"La categoría ingresada no existe.\")\n\t\texcept ValueError:\n\t\t\tprint (\"La palabra ingresada no existe en la categoría \" + category + \".\")", "def remove(self, keyword):\n tag = self._find(keyword)\n if tag is not None:\n self.meta.remove(tag)", "def update_problem(identifier): # pylint: disable=too-many-branches\n # Admin check\n if not current_user.admin == 1:\n return serve_error('You must be an admin to update a problem',\n response_code=401)\n\n pid, problem = None, database.session.query(Problem)\n if is_pid(identifier):\n pid = identifier\n problem = problem.filter(Problem.pid == pid).first()\n else:\n problem = problem.filter(Problem.shortname == identifier).first()\n pid = problem.pid\n\n data = database.session.query(ProblemData).filter(ProblemData.pid == pid).first()\n if 'name' in request.form:\n problem.name = request.form['name'][:32]\n problem.shortname = request.form['name'][:32].replace(' ', '').lower()\n if 'description' in request.form:\n data.description = request.form['description']\n if 'input_desc' in request.form:\n data.input_desc = request.form['input_desc']\n if 'output_desc' in request.form:\n data.output_desc = request.form['output_desc']\n if 'appeared_in' in request.form:\n problem.appeared = request.form['appeared_in']\n if 'difficulty' in request.form:\n problem.difficulty = request.form['difficulty']\n\n # Save the changes\n problem.commit_to_session(database.session)\n data.commit_to_session(database.session)\n\n # If sample cases were uploaded, delete cases and go with the new ones\n case_lst = list()\n if 'cases' in request.form:\n for old_case in database.session.query(SampleCase).\\\n filter(SampleCase.pid == pid).all():\n database.session.delete(old_case)\n database.session.flush()\n database.session.commit()\n case_num = 1\n cases = loads(request.form['cases'])\n for case in cases:\n SampleCase(\n pid=pid,\n case_num=case_num,\n input=case['input'],\n output=case['output']\n ).commit_to_session()\n case_lst.append({\n 'case_num': case_num,\n 'input': case['input'],\n 'output': case['output']\n })\n case_num += 1\n\n directory = os.path.join(app.config['DATA_FOLDER'], 'problems', pid)\n if not os.path.exists(directory):\n os.mkdir(directory)\n\n # Add judge data if supplied\n if 'in_file' in request.files:\n in_file = zipfile.ZipFile(request.files['in_file'])\n in_file.extractall(directory)\n\n if 'out_file' in request.files:\n out_file = zipfile.ZipFile(request.files['out_file'])\n out_file.extractall(directory)\n\n if 'sol_file' in request.files:\n if os.path.exists(directory + '/test'):\n rmtree(directory + '/test')\n os.mkdir(os.path.join(directory, 'test'))\n request.files['sol_file'].save(\n os.path.join(directory, 'test', request.files['sol_file'].filename))\n\n\n return serve_response({\n 'pid': problem.pid,\n 'name': problem.name,\n 'shotrname': problem.shortname,\n 'description': data.description,\n 'input_desc': data.input_desc,\n 'output_desc': data.output_desc,\n 'difficulty' : problem.difficulty,\n 'cases': case_lst\n })", "def update_problem(problem_id, problem):\n Firebase = firebase.FirebaseApplication('https://team1robotsim.firebaseio.com/', None)\n result = Firebase.get('/problems', 'id_' + str(problem_id))\n \n try:\n if result is not None:\n \tproblem = Problem.from_dict(connexion.request.get_json())\n \treturn jsonify(Firebase.put('/problems', 'id_' + str(problem_id), problem))\n except ValueError:\n return jsonify(Error(404, \"Problem not found\")), status.HTTP_404_NOT_FOUND", "def delete_keyword(self,\r\n index,\r\n keywords):\r\n\r\n if isinstance(keywords, str):\r\n keywords = {keywords}\r\n self.edit(index,\r\n self.get_keys_from_note(index).difference(keywords),\r\n self.get_text_from_note(index))", "def remove(self, item, issue):\n try:\n self[item].remove(issue)\n return 1\n except ValueError:\n pass", "def put(self, problem_id):\n args = self.request.arguments\n x = args.pop('latitude')\n y = args.pop('longitude')\n args['location'] = create_location(x, y)\n self.sess.query(Problem).filter_by(id=int(problem_id)). \\\n update(args)\n\n self.sess.commit()\n\n activity = ProblemsActivity(\n problem_id=int(problem_id),\n user_id=self.get_current_user(),\n datetime=get_datetime(),\n activity_type=\"UPDATED\"\n )\n self.sess.add(activity)\n self.sess.commit()", "def delete(self):\n try:\n from_table = self.get_from_table(self.content_type.name)\n\n if from_table is not None:\n combined_obj = CombinedTeledata.objects.get(id=self.object_id, from_table=from_table)\n combined_obj.keywords_combined.remove(self)\n except:\n logger.warn('Cannot remove keywords_combined record for {0} - {1}. Record may not exist.'.format(self.phrase, self.content_object.name))\n combined_obj = None\n\n super(Keyword, self).delete()", "def update(self,haiku, typenum):\n self.occurrences += 1\n for i in range(2):\n for x in (haiku.triple[i]).wordarray:\n if (self.wordtype == dictionary.wordtype(x) and \n dictionary.word_filter(x) != self.word):\n self.update_adj_dict(x, i==typenum)", "def __delitem__(self, key: str) -> None:\n del self.variables[key]", "def remove(self, key, category=\"inputline\", **kwargs):\n super().remove(key, category=category, **kwargs)", "def mRemove(self, **kw):\n kw = copy_non_reserved_keywords(kw)\n for key, val in kw.items():\n # It would be easier on the eyes to write this using\n # \"continue\" statements whenever we finish processing an item,\n # but Python 1.5.2 apparently doesn't let you use \"continue\"\n # within try:-except: blocks, so we have to nest our code.\n try:\n orig = self._dict[key]\n except KeyError:\n # No existing variable in the environment, so just skip it\n pass\n else:\n try:\n # Most straightforward: just try to substract it.\n # But this will not work in most cases :-(\n self._dict[key] = orig - val\n except TypeError:\n try:\n # It orig and val is dictionaties:\n for k in val.keys():\n del orig[k]\n # May be some recursion ?\n except AttributeError:\n try:\n # Check if the original is a list.\n remove_from_orig = orig.remove\n except AttributeError:\n # Can't do nothing more\n pass\n else:\n # The original is a list, so remove\n # value from it.\n try:\n i = val[0]\n except TypeError:\n val = [ val ]\n for i in val:\n try:\n remove_from_orig(i)\n except ValueError:\n pass\n self.scanner_map_delete(kw)", "async def delcase(self, ctx, problem_name, *, arg):\n if not await problem_exists(ctx, problem_name):\n return\n if arg not in problems[problem_name].cases:\n await ctx.send('The specified case does not exist.')\n del problems[problem_name].cases[arg]\n await ctx.send('Case deleted.')\n await write_problems()", "def remove_index_from_word(self,word,index):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n\r\n if word in self.word_dict:\r\n\r\n self.word_dict[word].remove(str(index))\r\n\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname,word,str(index),)\r\n db_cursor.execute(\"DELETE FROM\"\r\n +\" word_to_indexes\"\r\n +\" WHERE notebook=?\"\r\n +\" AND word=?\"\r\n +\" AND note_index=?;\",\r\n value_tuple)\r\n\r\n db_cursor.execute(\"SELECT * FROM word_to_indexes\"\r\n +\" WHERE notebook=?\"\r\n +\" and word=?;\",\r\n value_tuple[0:2])\r\n if db_cursor.fetchone():\r\n db_cursor.execute(\"DELETE FROM\"\r\n +\" all_words\"\r\n +\" WHERE notebook=?\"\r\n +\" AND word=?;\",\r\n value_tuple[0:2])", "def cypher_problemTag_keyword(self, variable_tagProblem=\"tag_problem\"):\n\n if not self.keyword:\n return \"\"\n return f'({variable_tagProblem}{self.label}' + \\\n \"{\" + f'{self.databaseInfoTag[\"properties\"][\"keyword\"]}:\\'{self.keyword}\\'' + \"})\"", "def remove(self, key):", "def __delitem__(self, name):\n tag = self._find(name)\n if tag is not None:\n self.meta.remove(tag)\n else:\n raise KeyError(name)", "def remove(ctx, command_id):\n _check_for_commands(ctx.obj[\"keep_path\"])\n keep = ctx.obj[\"keep\"]\n for cid in command_id:\n for kw, command_ids in keep[\"keyword2Ids\"].items():\n if int(cid) in command_ids:\n command_ids.remove(int(cid))\n if len(keep[\"keyword2Ids\"][kw]) == 0:\n del keep[\"keyword2Ids\"][kw]\n if cid in keep[\"id2Command\"]:\n command = keep[\"id2Command\"][cid]\n del keep[\"id2Command\"][cid]\n del keep[\"command2Id\"][command]\n if cid in keep[\"id2Explanation\"]:\n del keep[\"id2Explanation\"][cid]\n click.echo(\"The command #%s has been removed.\" % cid)\n _save_document(ctx.obj[\"keep_path\"], keep)", "def delete_word(event):\n get_by_name(\"backward-kill-word\").call(event)", "def cypher_problemItemTag_keyword(self, variable_tagProblemItem=\"problemitem_tag\"):\n\n if not self.keyword:\n return \"\"\n return f'({variable_tagProblemItem}{self.label}' + \\\n \"{\" + f'{self.databaseInfoTag[\"properties\"][\"keyword\"]}:\\'{self.keyword}\\'' + \"})\"", "def remove(self, item):\n try:\n entry = self.set.pop(item)\n entry[-1] = self.REMOVED\n except KeyError:\n print(\"Can't remove a non-existing item\")", "def delete(self, key):", "def DeconflictKeyword (s, aux_keywords=frozenset()):\n if (s in _Keywords) or (s in aux_keywords):\n return '%s_' % (s,)\n return s", "def delete_field(self, field):\n if field.type == 'keyword':\n query = '''select _keyword_id from keywords\n where _keyword=\"%s\"''' %field.name\n keyword_id = self.connection.execute(query).fetchall()[0][0]\n query = 'delete from keyword_x_file where _keyword_id=%d'%keyword_id\n self.connection.execute(query)\n query = 'delete from keywords where _keyword_id=%d'%keyword_id\n self.connection.execute(query)\n self.keywords.remove(field.name)\n else:\n query = 'alter table files drop column \"%s\"' % field.name\n self.connection.execute(query)\n self.connection.commit()\n self.init_fields()", "def delete_word(self,word):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n\r\n del self.word_dict[word]", "def replace_word(self):\n wordlist_path = self.get_wordlist_path()\n with open(wordlist_path) as f:\n data = json.load(f)\n\n for index, exist_word in data[\"words\"].items():\n if self.word == exist_word:\n new_word = input(\"New word:\\n\")\n if not check_word_format(new_word):\n exit()\n if exists_already(data,new_word):\n exit()\n # write new_word in\n data[\"words\"][index] = new_word\n data[\"words\"] = dict(sorted(data[\"words\"].items(), key=lambda item: item[1]))\n\n with open(wordlist_path, 'w') as f:\n json.dump(data, f, indent = 4)\n print(f\"[{self.word}] has been replaced by [{new_word}]!\")\n return\n\n print(f\"[{self.word}] does not exist in list!\")", "def _detach_skill_keywords(self, skill_id):\n skill_id = _entity_skill_id(skill_id)\n\n def match_skill_entities(data):\n return data and data[1].startswith(skill_id)\n\n self.engine.drop_entity(match_func=match_skill_entities)" ]
[ "0.6466727", "0.6054319", "0.55775964", "0.55644643", "0.55415493", "0.53721416", "0.5298688", "0.52898467", "0.5261552", "0.5184182", "0.5147284", "0.5146864", "0.51035714", "0.5096608", "0.50747263", "0.5059537", "0.5046001", "0.5015993", "0.50112444", "0.5003859", "0.50038", "0.49928552", "0.49560207", "0.49214396", "0.49163917", "0.49116847", "0.4911403", "0.4904485", "0.48974234", "0.48875076" ]
0.71324074
0
Returns the set of relationships that the provided problem is a part of.
def problem_relationships(self, identifier): return self._get("problems/%d/relationships" % identifier).json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relations(self):\n return set(self.triples()[\"relation\"])", "def relationships(self):", "def get_relations(self):\n triples = list(self.get_triples())\n\n for s, p, o in triples:\n if not p.startswith(\"rel\"):\n s, o = int(s.id), int(o.id)\n yield {\"predicate\": p,\n \"subject\": s,\n \"subject_nodes\": list(self.get_descendants(s, triples)),\n \"object\": o,\n \"object_nodes\": list(self.get_descendants(o, triples)),\n }", "def relations(cls):\n return [c.key for c in cls.__mapper__.iterate_properties\n if isinstance(c, RelationshipProperty)]", "def find_related_nodes(reltype, inst=None):\n if inst is None:\n inst = ctx.instance\n ret = []\n for rel in inst.relationships:\n if reltype in rel.type_hierarchy:\n ret.append(rel.target)\n return ret", "def get_all_relations(\n self,\n node: Tuple[str, str],\n relation: Optional[str] = None,\n ) -> List[Relation]:\n source_rels = self.get_source_relations(target=node, relation=relation)\n target_rels = self.get_target_relations(source=node, relation=relation)\n all_rels = source_rels + target_rels\n return all_rels", "def get_pt_relations(self, pt_set1, pt_set2):\n raise NotImplementedError('Abstract Method.')", "def relationship_types(self):\n return frozenset(self._relationships_by_type.keys())", "def getrelations(self):\n return self.getfieldnames('ONE')", "def relations(self):\n\n def functions_helper(returned_set: set()):\n if is_relation(self.root): # Populate self.root and self.arguments\n returned_set.add((self.root, len(self.arguments)))\n\n elif is_equality(self.root): # Populate self.first and self.second\n return\n elif is_quantifier(self.root): # Populate self.variable and self.predicate\n returned_set.update(self.predicate.relations())\n\n elif is_unary(self.root): # Populate self.first\n returned_set.update(self.first.relations())\n\n else: # Populate self.first and self.second\n returned_set.update(self.first.relations())\n returned_set.update(self.second.relations())\n return\n\n \"\"\" Return a set of pairs (function_name, arity) for all function names\n that appear in this formula \"\"\"\n returned_set = set()\n functions_helper(returned_set)\n return returned_set\n\n # Ex12", "def relationships(self, r_type=None, n_ids=()):\n if r_type is None:\n r_sets = []\n else:\n r_sets = [self._relationships_by_type.get(r_type, frozenset())]\n if not n_ids or (hasattr(n_ids, \"__iter__\") and all(n_id is None for n_id in n_ids)):\n pass\n elif isinstance(n_ids, Sequence):\n for n_index, n_id in enumerate_nodes(n_ids):\n if n_id is not None:\n r_sets.append({r_id for r_id, i in self._relationships_by_node.get(n_id, ())\n if i == n_index})\n elif isinstance(n_ids, Set):\n for n_id in n_ids:\n if n_id is not None:\n r_sets.append({r_id for r_id, i in self._relationships_by_node.get(n_id, ())})\n else:\n raise TypeError(\"Nodes must be supplied as a Sequence or a Set\")\n if r_sets:\n return iter(reduce(and_operator, r_sets))\n else:\n return iter(self._relationships)", "def get_goterms_upper(self):\n # Requires GODag is created with 'relationship' in optional_attrs argument\n # pylint: disable=no-member\n return set.union(self.parents, *self.relationship.values())", "def relations(self):\n\t\treturn [(self.factions[k][0], self._faction_affinity.get(k, 50)) for k in self.factions.keys()]", "def relationship_views(self) -> Iterable[RelationshipView]:\n return set(self._relationship_views)", "def find_relationship(person1, person2):\n lines1 = get_ancestor_lines(person1)\n lines2 = get_ancestor_lines(person2)\n mrcas = find_most_recent(set(lines1).intersection(set(lines2)))\n\n relationships = []\n for anc in mrcas:\n relationships.append((lines1[anc], lines2[anc]))\n return relationships", "def get_relations(self):\n if not hasattr(self, '_BasePublication__relations_cache'):\n tree_opts = Rubric._mptt_meta\n self.__relations_cache = self.forward_relations.select_related('rubric', 'to_publication').order_by(\n 'rubric__%s' % tree_opts.tree_id_attr, 'rubric__%s' % tree_opts.left_attr)\n return self.__relations_cache", "def _parts(self):\n return [part for part in Package.__walkparts(self.__relationships)]", "def get_related(this_obj, other_obj, m2m=False):\n # is het niet raar dat je voor twee concrete objecten ophaalt naar welke van het ene type\n # verwezen wordt vanuit het andere type? Of is dat om de vorige/volgende te kunnen bepalen?\n # als ik kijk naar het gebruik in GetRelations dan is het tweede argument ook niet een object\n # maar een relatie (uit de fields verzameling)\n if m2m:\n fields = [x for x in other_obj._meta.many_to_many]\n else:\n fields = [x for x in other_obj._meta.get_fields() if x.name != 'project' and\n x.get_internal_type() == 'ForeignKey']\n for fld in fields:\n if fld.related_model == this_obj._meta.model:\n related_name = fld.related_query_name()\n break\n else:\n return None # not found\n try:\n return this_obj.__getattribute__(related_name).all()\n except UnboundLocalError:\n return None\n # zou je deze ook kunnen vervangen door een aanroep van get_relation en dan met de opgehaalde\n # naam de gerelateerde objecten ophalen en meteen de vorige en de volgende bepalen?\n # (heeft uiteraard konsekwenties voor de aanroepende code)\n # oorspronkelijk lijkt dat ook zo geweest te zijn, de functie heette toen get_relation en het\n # gedeelte dat nu nog zo heet was daarin hardgecodeerd\n # deze functie wordt alleen aangeroepen in een paar methoden van de hieronder opgenomen klasse\n # GetRelations, namelijk om de namen van relaties uit andere objecten naar het huidige te kunnen\n # bepalen.\n # Als je get_relation zoals die nu is gebruikt zou je dat onderscheid (van versus naar relaties)\n # met dezelfde functie kunnen afhandelen", "def get_target_relations(\n self,\n source: Tuple[str, str],\n relation: Optional[str] = None,\n ) -> List[Relation]:\n return self.get_relations(source=source, target=None, relation=relation)", "def related(self):\n return [ch for ch in self.sentence.chunks \n if ch != self and intersects(unzip(0, ch.relations), unzip(0, self.relations))]", "def related_names(self) -> Set[str]:\n result = set()\n if self.default:\n result.update(self.default.related_names)\n if self.type_hint:\n result.update(self.type_hint.related_names)\n\n return result", "def selected_relationships(self):\n return self._selected_relationships", "def references(self):\n return self._get_related_resources(False)", "def get_relationships_by_parent_genus_type(self, relationship_genus_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type\n # STILL NEED TO IMPLEMENT!!!\n return objects.RelationshipList([])", "def get_relatives(\n self, reltypes=None, relfilter=None, fetch_objects=True, ignore_missing=True\n ):\n ret = defaultdict(set)\n relations = self.icalendar_component.get(\"RELATED-TO\", [])\n if not isinstance(relations, list):\n relations = [relations]\n for rel in relations:\n if relfilter and not relfilter(rel):\n continue\n reltype = rel.params.get(\"RELTYPE\", \"PARENT\")\n if reltypes and not reltype in reltypes:\n continue\n ret[reltype].add(str(rel))\n\n if fetch_objects:\n for reltype in ret:\n uids = ret[reltype]\n ret[reltype] = []\n for obj in uids:\n try:\n ret[reltype].append(self.parent.object_by_uid(obj))\n except error.NotFoundError:\n if not ignore_missing:\n raise\n return ret", "def get_queryset(self):\n\n return Relationship.objects.filter(\n Q(from_person=self.request.user.person) |\n Q(to_person=self.request.user.person))", "def get_rel_elements(self):\n return self.merged_root.findall('OrgQuestion/Thread/RelQuestion')", "def get_relations(\n self,\n source: Optional[Tuple[str, str]] = None,\n target: Optional[Tuple[str, str]] = None,\n relation: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> List[Relation]:\n if not source and not target:\n raise ValueError(\"source or target should be specified\")\n source = norm_id(*source) if source else None\n target = norm_id(*target) if target else None\n query = \"\"\"\n MATCH p=(%s)-[%s]->(%s)\n RETURN DISTINCT p\n %s\n \"\"\" % (\n \"{id: '%s'}\" % source if source else \"s\",\n \"\" if not relation else \":%s\" % relation,\n \"{id: '%s'}\" % target if target else \"t\",\n \"\" if not limit else \"LIMIT %s\" % limit,\n )\n rels = [self.neo4j_to_relation(res[0]) for res in self.query_tx(query)]\n return rels", "def get_relationship_query(self):\n # Implemented from template for\n # osid.resource.ResourceQuerySession.get_resource_query_template\n return queries.RelationshipQuery(runtime=self._runtime)", "def fm_all_parents(self):\n return self._relation_lst[self.PARENT].copy()" ]
[ "0.70870245", "0.6588074", "0.6436843", "0.6173217", "0.6168484", "0.6018063", "0.5981662", "0.5978576", "0.5977961", "0.59650034", "0.5942173", "0.5903671", "0.58759755", "0.57877916", "0.57769465", "0.576169", "0.5740245", "0.57243073", "0.56790555", "0.56746703", "0.5666522", "0.5630037", "0.562982", "0.5607453", "0.5598985", "0.55850995", "0.5547679", "0.55035114", "0.5475347", "0.5438184" ]
0.69632775
1
Create a relationship between two problems. It is only necessary to establish one side of the relationship. The complimentary side of the relationship will be established automatically by the server.
def problem_relationship_add(self, src_identifier, relation_type, dst_identifier): self._put("problems/%d/relationships" % src_identifier, json={"type" : relation_type, "problemIdentifier" : dst_identifier })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_relation(self, left_node, rel, right_node):\n rel = Relationship(left_node, rel, right_node)\n self.graph.merge(rel)\n return", "def relate(self, other):\n ...", "def allow_relation(self, obj1, obj2, **hints):\n return True", "def allow_relation(self, obj1, obj2, **hints):\n return None", "def create_relationship(self, source_node: Node, target_node: Node):\n if target_node.node_id not in source_node.out_nodes_ids:\n source_node.out_nodes_ids.add(target_node.node_id)\n self._collection.put_record(source_node.node_id, self._node_serializer.to_data(source_node))\n if source_node.node_id not in target_node.in_nodes_ids:\n target_node.in_nodes_ids.add(source_node.node_id)\n self._collection.put_record(target_node.node_id, self._node_serializer.to_data(target_node))", "def add_fact_relationship(self, table_from: str, entry_from: dict, table_to: str, entry_to: dict):\n\n table_lut = {'p': \"10\", # procedure\n 'c': \"19\", # condition\n 'm': \"21\", # measurement\n 'o': \"27\"} # observation\n self.fact_relations.append((table_lut[table_from], entry_from, table_lut[table_to], entry_to))", "def problem_create(self, problem): \n return self._post(\"problems\", json=problem).json()", "def post(self, request, *args, **kwargs):\n frompath = urlparse(request.DATA.get('from_person')).path\n topath = urlparse(request.DATA.get('to_person')).path\n\n #print(request.DATA)\n if type(frompath) is str and type(topath) is str:\n frompath_elements = frompath.split('/')\n topath_elements = topath.split('/')\n else:\n return Response({'error: invalid data'}, status=status.HTTP_400_BAD_REQUEST)\n\n fromPerson = get_object_or_404(Person, username=frompath_elements[-2])\n toPerson = get_object_or_404(Person, username=topath_elements[-2])\n count = Relationship.objects.filter(from_person=fromPerson, to_person=toPerson).count()\n\n #Reject a request to create Relationship with self\n if request.user.person.username == toPerson.username or count > 0:\n return Response({'error: Relationship with self not permitted'}, status=status.HTTP_400_BAD_REQUEST)\n\n if request.user.person.username == fromPerson.username or request.user.is_staff:\n return self.create(request, *args, **kwargs)\n return Response({'error': 'from_user does not match authenticated User'}, status=status.HTTP_400_BAD_REQUEST)", "def relationship(cls):\n return relationship.many_to_one(cls, 'relationship')", "def add_connection(self, room1_id, room2_id, direction):\n opposite_direction = {'n': 's', 's': 'n', 'e': 'w', 'w': 'e'}\n if room1_id in self.rooms and room2_id in self.rooms:\n self.rooms[room1_id]['exits'][direction] = room2_id\n self.rooms[room2_id]['exits'][opposite_direction[direction]] = room1_id\n else:\n raise IndexError('That room does not exist!')", "def _add_relationship(self, reltype, target_part):\n rId = self._relationships._next_rId\n rel = _Relationship(rId, reltype, target_part)\n self._relationships._additem(rel)\n return rel", "def relationships(self):", "def add_relationship(self, rel: ResourceRelationshipDescriptor) -> None:\n self._relationships[assert_not_none(rel.name)] = rel.bind(self)", "def allow_relation(self, obj1, obj2, **hints):\n\n if obj1._state.db == obj2._state.db:\n return True\n return False", "def post(self, *args, **kwargs):\n json_data = request.get_json()\n\n relationship_field, model_relationship_field, related_type_, related_id_field = self._get_relationship_data()\n\n if 'data' not in json_data:\n raise BadRequest('/data', 'You must provide data with a \"data\" route node')\n if isinstance(json_data['data'], dict):\n if 'type' not in json_data['data']:\n raise BadRequest('/data/type', 'Missing type in \"data\" node')\n if 'id' not in json_data['data']:\n raise BadRequest('/data/id', 'Missing id in \"data\" node')\n if json_data['data']['type'] != related_type_:\n raise InvalidType('/data/type', 'The type field does not match the resource type')\n if isinstance(json_data['data'], list):\n for obj in json_data['data']:\n if 'type' not in obj:\n raise BadRequest('/data/type', 'Missing type in \"data\" node')\n if 'id' not in obj:\n raise BadRequest('/data/id', 'Missing id in \"data\" node')\n if obj['type'] != related_type_:\n raise InvalidType('/data/type', 'The type provided does not match the resource type')\n\n self.before_post(args, kwargs, json_data=json_data)\n\n obj_, updated = self._data_layer.create_relationship(json_data,\n model_relationship_field,\n related_id_field,\n kwargs)\n\n qs = QSManager(request.args, self.schema)\n includes = qs.include\n if relationship_field not in qs.include:\n includes.append(relationship_field)\n schema = compute_schema(self.schema, dict(), qs, includes)\n\n if updated is False:\n return '', 204\n\n result = schema.dump(obj_).data\n if result.get('links', {}).get('self') is not None:\n result['links']['self'] = request.path\n self.after_post(result)\n return result, 200", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == 'emissions' or \\\n obj2._meta.app_label == 'emissions':\n return True\n return None", "def allow_relation(self, obj1, obj2, **hints):\n return self._route_by_model_type(obj1) == self._route_by_model_type(obj2)", "def __push_relation(self, id1, id2, id1_name, id2_name, table):\n # case: No entry about relation is in DB yet\n if not self.__postgre_db.is_in_table(table, id1_name + \"=\" + str(\n id1)):\n self.__postgre_db.insert(table, {\n id1_name: id1, id2_name: [id2], \"aggregation\": 0})\n\n # case: Entry about single_pattern is in DB\n else:\n old_list = self.__postgre_db.get(table, id1_name + \"=\" + str(\n id1), id2_name)\n new_list = list(set(old_list + [id2]))\n self.__postgre_db.update(\n table, id2_name + \"=\" + add_quotes(replace_brackets(str(new_list))), id1_name + \"=\" + str(id1))", "def test_multi_dependency(self):\n actoritem1 = self.create(ActorItem, UML.Actor)\n actoritem2 = self.create(ActorItem, UML.Actor)\n actor1 = actoritem1.subject\n actor2 = actoritem2.subject\n dep = self.create(DependencyItem)\n\n self.connect(dep, dep.head, actoritem1)\n self.connect(dep, dep.tail, actoritem2)\n\n assert dep.subject\n assert 1 == len(actor1.supplierDependency)\n assert actor1.supplierDependency[0] is dep.subject\n assert 1 == len(actor2.clientDependency)\n assert actor2.clientDependency[0] is dep.subject\n\n # Do the same thing, but now on a new diagram:\n\n diagram2 = self.element_factory.create(UML.Diagram)\n actoritem3 = diagram2.create(ActorItem, subject=actor1)\n actoritem4 = diagram2.create(ActorItem, subject=actor2)\n dep2 = diagram2.create(DependencyItem)\n\n self.connect(dep2, dep2.head, actoritem3)\n cinfo = diagram2.canvas.get_connection(dep2.head)\n assert cinfo is not None\n assert cinfo.connected is actoritem3\n self.connect(dep2, dep2.tail, actoritem4)\n assert dep2.subject is not None\n assert 1 == len(actor1.supplierDependency)\n assert actor1.supplierDependency[0] is dep.subject\n assert 1 == len(actor2.clientDependency)\n assert actor2.clientDependency[0] is dep.subject\n\n assert dep.subject is dep2.subject", "def create_problem(self, name=\"\", problem_type=\"\", problem_type_details={},\n data_dir_train=\"\", data_dir_test=\"\", files=[], table_names=[],\n entities_table_name=\"\", entities_featurized_table_name=\"\",\n target_table_name=\"\"):\n\n with self.__orm.session_scope() as session:\n try:\n problem = session.query(Problem).filter(Problem.name == name).one()\n print(\"Problem {} already exists\".format(name))\n return\n except NoResultFound:\n pass # we will create it\n\n problem = Problem(\n name = name,\n problem_type = problem_type,\n problem_type_details = json.dumps(problem_type_details),\n data_dir_train = data_dir_train,\n data_dir_test = data_dir_test,\n files = json.dumps(files),\n table_names = json.dumps(table_names),\n entities_table_name = entities_table_name,\n entities_featurized_table_name = entities_featurized_table_name,\n target_table_name = target_table_name,\n )\n session.add(problem)\n print(\"Problem {} successfully created\".format(name))", "def _create_new_relation_concept(self, rc_type, data_dict):\n # generate name, create individual with role assignments\n i = self.auto_generated_name_numbers[rc_type]\n self.auto_generated_name_numbers[rc_type] += 1\n relation_name = f\"i{rc_type.name}_{i}\"\n\n kwargs = {}\n for key, value in data_dict.items():\n res = self._handle_key_for_individual(key, value, relation_name, None)\n if res is not None:\n kwargs.update(res)\n\n relation_individual = self._create_individual(rc_type, relation_name, relation_name, label=None, kwargs=kwargs)\n\n return relation_individual", "def allow_relation(self, obj1, obj2, **hints):\n if (\n obj1._meta.app_label in self.route_app_labels or\n obj2._meta.app_label in self.route_app_labels\n ):\n return True\n return None", "def test_linked_problem(self):\r\n\r\n # Setup the peer grading module with the proper linked location.\r\n peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)\r\n\r\n # Ensure that it is properly setup.\r\n self.assertTrue(peer_grading.use_for_single_location)", "def createRelationships(RelationShipList, mainMesh, otherMesh):\r\n\r\n # We first get a list of border edges the mainMesh has\r\n cmds.select(mainMesh)\r\n cmds.polySelectConstraint(m=3, t=0x8000, w=1)\r\n MainBorderEdges = cmds.ls(sl=True)\r\n MainBorderEdges = cmds.filterExpand(MainBorderEdges, ex=True, sm=32)\r\n cmds.polySelectConstraint(dis=True)\r\n\r\n # We then convert these edges to vertices\r\n MainBorderVert = cmds.polyListComponentConversion(fe=True, tv=True)\r\n MainBorderVert = cmds.filterExpand(MainBorderVert, ex=True, sm=31)\r\n\r\n # With the vertices that we got, we get the position they are in\r\n MainBorderPosition = []\r\n\r\n for vert in MainBorderVert:\r\n position = cmds.xform(vert, q=True, t=True, ws=True)\r\n #Remove Maya's annoying exponential which is basically 0\r\n if \"e\" in str(position[0]):\r\n position[0] = 0.0\r\n if \"e\" in str(position[1]):\r\n position[1] = 0.0\r\n if \"e\" in str(position[2]):\r\n position[2] = 0.0\r\n MainBorderPosition.append([vert, position])\r\n\r\n\r\n # We then get a list of the border edges of the other mesh\r\n cmds.select(otherMesh)\r\n cmds.polySelectConstraint(m=3, t=0x8000, w=1)\r\n OtherBorderEdges = cmds.ls(sl=True)\r\n OtherBorderEdges = cmds.filterExpand(OtherBorderEdges, ex=True, sm=32)\r\n cmds.polySelectConstraint(dis=True)\r\n\r\n # We then convert these edges to vertices\r\n OtherBorderVert = cmds.polyListComponentConversion(fe=True, tv=True)\r\n OtherBorderVert = cmds.filterExpand(OtherBorderVert, ex=True, sm=31)\r\n\r\n # With the vertices that we got, we get the position they are in\r\n OtherBorderPosition = []\r\n\r\n for vert in OtherBorderVert:\r\n position = cmds.xform(vert, q=True, t=True, ws=True)\r\n # Remove Maya's annoying exponential which is basically 0\r\n if \"e\" in str(position[0]):\r\n position[0] = 0.0\r\n if \"e\" in str(position[1]):\r\n position[1] = 0.0\r\n if \"e\" in str(position[2]):\r\n position[2] = 0.0\r\n OtherBorderPosition.append([vert, position])\r\n\r\n\r\n # We then compare the positions. If they match, we get the matching vertices and create a relationship\r\n MatchVertices = []\r\n for i in range(0, len(MainBorderPosition)):\r\n similar = 0\r\n for j in range(0, len(OtherBorderPosition)):\r\n if MainBorderPosition[i][1] == OtherBorderPosition[j][1]:\r\n if checkUniqueChild(RelationShipList, MainBorderPosition[i][0]) == 1:\r\n MatchVertices.append([MainBorderPosition[i][0], OtherBorderPosition[j][0]])\r\n\r\n for i in MatchVertices:\r\n RelationShipList.append(i)", "def set_relation(\n self, other, reltype=None, set_reverse=True\n ): ## TODO: logic to find and set siblings?\n ##TODO: test coverage\n reltype = reltype.upper()\n reltype_reverse = {\"CHILD\": \"PARENT\", \"PARENT\": \"CHILD\", \"SIBLING\": \"SIBLING\"}[\n reltype\n ]\n if isinstance(other, CalendarObjectResource):\n if other.id:\n uid = other.id\n else:\n uid = other.icalendar_component[\"uid\"]\n else:\n uid = other\n if set_reverse:\n other = self.parent.object_by_uid(uid)\n if set_reverse:\n other.set_relation(other=self, reltype=reltype_reverse, set_reverse=False)\n\n existing_relation = self.icalendar_component.get(\"related-to\", None)\n existing_relations = (\n existing_relation\n if isinstance(existing_relation, list)\n else [existing_relation]\n )\n for rel in existing_relations:\n if rel == uid:\n return\n\n self.icalendar_component.add(\n \"related-to\", uid, parameters={\"RELTYPE\": reltype}, encode=True\n )\n\n self.save()", "def create_relationships(self, source_nodes, target_nodes):\n source_nodes = (source_nodes,) if isinstance(source_nodes, Node) else source_nodes\n target_nodes = (target_nodes,) if isinstance(target_nodes, Node) else target_nodes\n for source_node in source_nodes:\n for target_node in target_nodes:\n self.create_relationship(source_node, target_node)", "def createRelation(rid, rlabel, list, x, y):\n relation = Relation(rid, rlabel, x, y)\n list.append(relation)", "def _do_relation(self):\n if self.chunks:\n ch = self.chunks[-1]\n for relation, role in ch.relations:\n if role == \"SBJ\" or role == \"OBJ\":\n self.relations[role][relation] = ch\n if ch.type in (\"VP\",):\n self.relations[ch.type][ch.relation] = ch", "def derive_relationship(\n self,\n variable_follower,\n variable_leaders,\n ):\n return super().derive_relationship(\n variable_follower=variable_follower,\n variable_leaders=variable_leaders,\n interpkind=\"linear\",\n )", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == self.app_label or \\\n obj2._meta.app_label == self.app_label:\n return True\n return None" ]
[ "0.61737007", "0.6095098", "0.5975486", "0.5956993", "0.5893868", "0.5735954", "0.57261753", "0.56114084", "0.55028147", "0.5449277", "0.538438", "0.53711104", "0.5345818", "0.5305271", "0.52893794", "0.5283024", "0.52736306", "0.5254689", "0.5251221", "0.52238345", "0.5218089", "0.5213674", "0.5203576", "0.51942015", "0.5183119", "0.5181911", "0.51792973", "0.5176579", "0.51472485", "0.51304173" ]
0.6961868
0
Delete a relationship as returned by problem_relationships.
def problem_relationship_delete(self, src_identifier, relation_dict): self._delete("problems/%d/relationships" % src_identifier, json=relation_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self):\n _unset_related_objects_relations(self)\n self.deleted = now()\n self.save()\n\n return self", "def delete_relationship(tx, node_value_1=None, node_value_2=None, node_type_1=None, node_type_2=None, relationship=None):\n if node_value_1 is None and node_type_1 is None:\n cql = \"MATCH ()-[u:\" + relationship + \"]-(w:\" + node_type_2 + \"{name:$node_value_2}) \" \\\n \"DELETE u;\"\n try:\n tx.run(cql, node_value_2=node_value_2)\n except Exception as e:\n print(str(e))\n elif node_value_2 is None and node_type_2 is None:\n cql = \"MATCH (s:\" + node_type_1 + \"{name:$node_value_1})-[u:\" + relationship + \"]-() \" \\\n \"DELETE u;\"\n try:\n tx.run(cql, node_value_1=node_value_1)\n except Exception as e:\n print(str(e))\n else:\n cql = \"MATCH (s:\" + node_type_1 + \"{name:$node_value_1})-[u:\" + relationship + \"]-(w:\" + node_type_2 + \"{name:$node_value_2}) \" \\\n \"DELETE u;\"\n try:\n tx.run(cql, node_value_1=node_value_1, node_value_2=node_value_2)\n except Exception as e:\n print(str(e))", "def delete_relation(wn, source, target, change_list=None):\n delete_rel(source, target, change_list)\n delete_rel(target, source, change_list)", "def DeleteConceptRelations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete(self):\n for obj in self:\n _unset_related_objects_relations(obj)\n\n self.update(deleted=now())", "def delete(self, purpose, using, sender, recipient):\n Relation = self.models[using]\n self.session.query(Relation)\\\n .filter(Relation.purpose == purpose)\\\n .filter(Relation.using == using)\\\n .filter(Relation.sender == sender)\\\n .filter(Relation.recipient == recipient)\\\n .delete()\n self.session.flush()", "def delete_resource_relation_by_user(self, *,\n id: str,\n user_id: str,\n relation_type: UserResourceRel,\n resource_type: ResourceType) -> None:\n if resource_type not in resource_relation_model:\n raise NotImplementedError(f'The resource_type {resource_type.name} is not define!')\n\n if relation_type not in resource_relation_model[resource_type]:\n raise NotImplementedError(f'the relation type {relation_type} is not defined!')\n\n res_rel_model = resource_relation_model[resource_type][relation_type]\n res_key = f'{resource_type.name.lower()}_rk'\n user_attr = getattr(res_rel_model, 'user_rk')\n res_attr = getattr(res_rel_model, res_key)\n try:\n with self.client.create_session() as session:\n session.query(res_rel_model).filter(user_attr == user_id, res_attr == id).delete()\n session.commit()\n except Exception as e:\n LOGGER.exception(f'Failed to delete relation between user {user_id} and resource {id}')\n raise e", "def remove_relation(self, qid, relation, qid2):\n if self._kg_symbols is not None:\n self._kg_symbols.remove_relation(qid, relation, qid2)", "def delete_link(self, word):\n meaning = self.word2meaning[word]\n print(str(self.unique_id) + \" forgot \" +\n str(word) + \" for \" + str(meaning))\n del self.word2meaning[word]\n del self.meaning2word[meaning]\n del self.wordsuccess[word]\n\n # If the agent was the only one using the word, delete the word\n if len(self.model.vocabulary[meaning][word]) == 1:\n del self.model.vocabulary[meaning][word]\n # Else simply remove the agent\n else:\n self.model.vocabulary[meaning][word].remove(self.unique_id)", "def delete(self):\n self.graph._del(handle=self.handle)", "def delete(self):\r\n if self.__abstract__:\r\n raise ThunderdomeException('cant delete abstract elements')\r\n if self.eid is None:\r\n return self\r\n query = \"\"\"\r\n e = g.e(eid)\r\n if (e != null) {\r\n g.removeEdge(e)\r\n g.stopTransaction(SUCCESS)\r\n }\r\n \"\"\" \r\n results = execute_query(query, {'eid':self.eid})", "def delete(self, problem_id):\n\n activity = ProblemsActivity(\n problem_id=int(problem_id),\n user_id=self.get_current_user(),\n datetime=get_datetime(),\n activity_type='REMOVED')\n self.sess.add(activity)\n self.sess.commit()", "def delete_parent(sender, instance, **kwargs):\n ItemRelation.objects.filter(child_id=instance.item_id).delete()", "def delete_rel(source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source.id, target.id))\n ss = source\n source.synset_relations = [\n r for r in ss.synset_relations if r.target != target.id]\n if change_list:\n change_list.change_synset(source)", "def delete_related(request, scheme_id):\n scheme = get_object_or_404(ShamirSS, pk=scheme_id)\n documents = get_list_or_404(Document, scheme=scheme)\n form = DeleteRelatedForm()\n if request.method == 'POST':\n Document.objects.filter(scheme=scheme).delete()\n return redirect('/s')\n else:\n return render(request, 'shared_secret/del_related.html', {\n 'scheme': scheme,\n 'documents': documents,\n 'form': form\n })", "def remove_relation(request, id):\n user = request.user\n relation = get_object_or_404(User, id=id)\n user.profile.relations.remove(relation)\n user.profile.friends.add(relation)\n messages.success(\n request,\n 'Family member removed to your friends list'\n )\n return redirect('profiles:my_friends')", "def delete(self):\n DATABASE_CONNECTION.delete(self.__class__.__name__, self.id)", "def delete(self):\n if self.parent:\n assert isinstance(self.parent, Collection) # only know how to delete from Collection parents\n self.parent.delete_child(self)\n else:\n self._mark_deleted()", "def clear_relation(self, relation_name):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(sql.SQL(\"DELETE FROM {}\").format(sql.Identifier(relation_name)))\n cur.execute(\"ALTER SEQUENCE {0}_index_seq RESTART WITH 1;\".format(relation_name))\n conn.commit()\n cur.close()\n except Exception as e:\n print(e)", "def delete(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n shutil.rmtree(self.paths['root'])", "def delete(self, session):\n # ForeignKey constrains CASCADE on delete\n session.delete(self)\n session.commit()\n # Clean up any orphan CatalogStars\n session.query(CatalogStar)\\\n .filter(CatalogStar.catalog_id.is_(None))\\\n .delete(synchronize_session=False)\n session.commit()\n # Clean up any orphan Observations\n session.query(Observation)\\\n .filter(Observation.catalog_star_id.is_(None))\\\n .delete(synchronize_session=False)\n session.commit()", "def delete(self):\r\n self.domain.delete_item(self)", "def delete(self, id=None, **kwargs):\r\n rm = ResourceManager()\r\n pt = self.db.auth_permission\r\n if id and not isinstance(id, (list, tuple, set)):\r\n id = [id]\r\n\r\n # removing private args\r\n if self.private_args:\r\n private_args = self.private_args.table\r\n self.private_args.delete(id)\r\n else:\r\n private_args = None\r\n\r\n # # removing many to many references\r\n # m2ms = set()\r\n # for reference in (tuple(x.split('/')) for x in imap(itemgetter('indexName'),self.many_to_many)):\r\n # resource = rm.m2m(reference)\r\n # if resource:\r\n # m2ms.add(resource.table)\r\n # resource.delete(self,collection = id)\r\n\r\n # getting table names and field names to delete\r\n cascading_deletion = tuple((field.table, field) for field in self.table._referenced_by if\r\n field.ondelete == 'CASCADE' and field.table != private_args) # and field.table not in m2ms)\r\n # deleting all related objects\r\n for table, field in cascading_deletion:\r\n res = rm.resource(table)\r\n if res:\r\n # fetch all id of related rows\r\n ids = set(chain(*self.sql(field.belongs(id), table._id, as_dict=False)))\r\n if ids:\r\n # if related entitiy is a many to many relation delete reference with other objects, but not related objects\r\n if isinstance(res, ManyToManyRelation):\r\n # making deletion simpy by forign related attribute\r\n res.delete(self, resource_id=ids)\r\n else:\r\n res.delete(id=ids, _check_permissions=False)\r\n\r\n self.db(self.table.id.belongs(id)).delete()\r\n # deleting all directly related permissions\r\n self.db((pt.table_name == self.table._tablename) & pt.record_id.belongs(id)).delete()\r\n # if realtime_enabled and self.minimal_permissions:\r\n # sync_permissions(self.table._tablename, id, self.minimal_permissions)\r\n # perms = sql(pt.record_id.belongs(id) & (pt.table_name == self.table._tablename))\r\n # if perms:\r\n # rt_sync_permissions(self.table, id, perms)\r", "def revoke_qualification(self, qualification_id: str, worker_id: str) -> None:\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"DELETE FROM granted_qualifications\n WHERE (qualification_id = ?1)\n AND (worker_id = ?2);\n \"\"\",\n (int(qualification_id), int(worker_id)),\n )", "def delete(self, identifier):\n self.get(identifier)\n conn = self.get_connector()\n cursor = conn.cursor()\n\n query = \"delete from {0} where {2}={1}\".format(\n self.ressource_config[\"table\"],\n identifier,\n self.model.pk_field.name)\n try:\n cursor.execute(query)\n except sqlite3.IntegrityError, e:\n message = \"\"\n if \"foreign\" in e.message:\n message = \"\"\"another ressource depends on this\n object. Cloud not delete before all ressources\n depending on it are also deleted\"\"\"\n\n raise BadRequest(message)\n\n conn.commit()\n conn.close()", "def delete_required(required):\n required.delete_required()", "def delete_question(request, question_id):\n raise NotImplementedError", "def delete(self):\r\n s = self.get_session()\r\n s.delete(self)\r\n s.commit()", "def delete(self, *args, **kwargs):\n self.delete_relatives()\n old_content = self.content\n super().delete(*args, **kwargs)\n if old_content.isOrphaned():\n old_content.delete()", "def delete_policy(self, policy_ref: str) -> None:\n self.batch_write(\n [self.batch_detach_policy(policy_ref, obj_ref) for obj_ref in self.list_policy_attachments(\n policy_ref,\n ConsistencyLevel=ConsistencyLevel.SERIALIZABLE.name)])\n self.batch_write(\n [self.batch_detach_object(parent_ref, link_name) for parent_ref, link_name in self.list_object_parents(\n policy_ref,\n ConsistencyLevel=ConsistencyLevel.SERIALIZABLE.name)])\n retry(**cd_read_retry_parameters)(cd_client.delete_object)(\n DirectoryArn=self._dir_arn,\n ObjectReference={'Selector': policy_ref})" ]
[ "0.62468123", "0.6075937", "0.60754156", "0.6045664", "0.5829113", "0.57965726", "0.5750567", "0.57431644", "0.57239985", "0.5719725", "0.56914794", "0.563315", "0.5614135", "0.5602807", "0.5601607", "0.55926967", "0.5554488", "0.5542166", "0.5538604", "0.5506461", "0.55014753", "0.5453554", "0.54124045", "0.5397391", "0.5394182", "0.5390756", "0.53792065", "0.53704923", "0.536811", "0.5337826" ]
0.842289
0
Access the list of comments on the requested problem.
def problem_comments(self, identifier): return self._get("problems/%d/comments" % identifier).json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_comments(self):\n raise NotImplementedError", "def get_comments(self):\n\t\treturn self._client.get_comments(self)", "def comments(self):\r\n return IssueComments(self)", "def comments(self):\r\n return IssueComments(self)", "def comments(self) -> list:\n return self._node[\"app_data\"][\"ui_data\"].get(\"comments\", [])", "def comments(self):\r\n return c.Comments(self)", "def comments(self):\r\n return c.Comments(self)", "def comments(self):\r\n return c.Comments(self)", "def comments(self):\n return self.container['comments']", "def _get_comments(self, issue_id):\n data = self._get(\"/issues/{}/comments\".format(issue_id))\n comments = []\n for item in data:\n comments.append(\n Comment(item['user']['login'], item['body'])\n )\n return comments", "def comments(self):\n return self._comments", "def comments(self):\n return self._comments", "def _get_comments(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n return filter(lambda x: len(x) == 40, os.listdir(self.paths['comments']))", "def comments(self):\r\n return comments.Comments(self)", "def test_issue_get_comments(self):\n pass", "def comments(self):\r\n return comments.ForumSuggestionComments(self)", "def comments(self):\n return comments.Comments(self)", "def get_comments(self):\n\t\tself.comments = graph.get_connections(post['id'], 'comments')", "def comments(self):\r\n return Comments(self)", "def comments(self):\r\n return Comments(self)", "def comments(self):\r\n return Comments(self)", "def get_comments(self, project, story):\n ret_val = []\n resource = \"projects/{0:d}/stories/{1:d}/comments\".format(project.id,\n story.id)\n params = {\"fields\": Comment.FIELDS}\n comments = self._request(\"get\", resource, params=params)\n\n for comment in comments:\n ret_val.append(Comment(comment))\n\n return ret_val", "def get_comments(qint,conn):\n\n comms = ('SELECT DISTINCT ip.value '\n 'FROM interaction i, interactionprop ip, cvterm cvt '\n 'WHERE i.interaction_id = ip.interaction_id AND ip.type_id = cvt.cvterm_id '\n 'AND cvt.is_obsolete=0 AND cvt.name != \\'comments on source\\' '\n 'AND cvt.name != \\'internalnotes\\' AND i.uniquename = %s')\n comnts = connect(comms, qint, conn)\n return(comnts)", "def test_issue_get_comment(self):\n pass", "def getAllComments(self):\r\n return [(ind, comment) for ind, comment in enumerate(self.comments)]", "def get_comment(self, index):\r\n\r\n # Get request to get all the comments for all exercises\r\n comments = requests.get(API.url_comment, headers = self.headers).json()\r\n # Parse the response\r\n for my_comment in comments:\r\n if my_comment['id'] == index:\r\n print(my_comment['comment'])", "def issues_comments_list(self, mar, request):\n issue = self._services.issue.GetIssueByLocalID(\n mar.cnxn, mar.project_id, request.issueId)\n comments = self._services.issue.GetCommentsForIssue(\n mar.cnxn, issue.issue_id)\n comments = [comment for comment in comments if not comment.approval_id]\n visible_comments = []\n for comment in comments[\n request.startIndex:(request.startIndex + request.maxResults)]:\n visible_comments.append(\n api_pb2_v1_helpers.convert_comment(\n issue, comment, mar, self._services, mar.granted_perms))\n\n return api_pb2_v1.IssuesCommentsListResponse(\n kind='monorail#issueCommentList',\n totalResults=len(comments),\n items=visible_comments)", "def get_queryset(self):\n queryset = Comment.objects.filter(issue_id=self.issue.pk)\n return queryset", "async def get_todo_comments(self, *, todo: TodoInDB) -> List[CommentInDB]:\n comments = await self.db.fetch_all(query=GET_ALL_TODO_COMMENTS_QUERY, values={\"todo_id\": todo.id})\n return [CommentInDB(**comment) for comment in comments]", "def get_commentaire(self):\n return self.commentaire" ]
[ "0.7727881", "0.7471885", "0.73331463", "0.73331463", "0.73330116", "0.73051596", "0.73051596", "0.73051596", "0.7230867", "0.7223843", "0.7126679", "0.7126679", "0.7103531", "0.7061553", "0.7025779", "0.70209825", "0.6987613", "0.68704796", "0.68501776", "0.68501776", "0.68501776", "0.67420727", "0.67336166", "0.6666261", "0.6646312", "0.6617097", "0.6615102", "0.65750545", "0.65696734", "0.6568436" ]
0.8174743
0
Returns a chunk of length of window_size and the end of the window size
def get_chunks(sequence, window_size, step=1): # get the sequence length k = len(sequence) # get the index for each end and chunk for i in range(0, k - window_size + 1, step): # generate the end of the window end = i + window_size # get the slice of the sequence chunk = sequence[i:i + window_size] # assure the the chunk is the expected size assert len(chunk) == window_size yield chunk, end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_chunks(sequence, window_size, step=1):\n k = len(sequence)\n for i in range(0, k - window_size + 1, step):\n end = i + window_size\n chunk = sequence[i:i + window_size]\n assert len(chunk) == window_size\n yield chunk, end", "def next_window(self, window_size: int) -> Iterator[Optional[np.ndarray]]:\n if self._count < window_size:\n yield None\n else:\n # Preserve what we want to return by copying it.\n p1 = np.copy(self._data_store[:window_size, :])\n # Remove the data we don't need any more from the front of the buffer.\n frames_to_keep = self._count - window_size\n self._data_store[:frames_to_keep,\n :] = self._data_store[window_size:self._count, :]\n self._count -= window_size\n yield p1", "def window_partition(x, window_size):\n B, D, H, W, C = x.shape\n x = x.view(B, D // window_size[0], window_size[0], H // window_size[1], window_size[1], W // window_size[2], window_size[2], C)\n windows = x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, reduce(mul, window_size), C)\n return windows", "def rolling_window(seq, window_size):\n for i in xrange(len(seq) - window_size + 1):\n yield seq[i:i+window_size]", "def window(seq, size=2, stride=1):\n it = iter(seq)\n result = []\n for elem in it:\n result.append(elem)\n if len(result) == size:\n yield result\n result = result[stride:]", "def window_data(X, window_length):\n return X[int(len(X)/2-window_length/2):int(len(X)/2+window_length/2)]", "def windows(self,windowSize):\n for i in range(0,len(self)-windowSize):\n yield (i,i+windowSize)", "def windowify(window_size):\n def windowed(iterable):\n return WindowedIterable(iterable, window_size)\n return windowed", "def _sample_window(n, startwindow, stopwindow, window=\"hann\"):\n swindow = np.ones(n)\n\n if startwindow is not None:\n length = startwindow[1] - startwindow[0]\n w = get_window(window, 2 * length, fftbins=False)[:length]\n swindow[: startwindow[0]] = 0\n swindow[startwindow[0] : startwindow[1]] = w\n\n if stopwindow is not None:\n # stop window\n length = stopwindow[1] - stopwindow[0]\n w = get_window(window, 2 * length, fftbins=False)[length:]\n swindow[stopwindow[0] + 1 : stopwindow[1] + 1] = w\n swindow[stopwindow[1] + 1 :] = 0\n\n return swindow", "def _chunk_windows(windows, num_chunks):\n if num_chunks <= 0 or int(num_chunks) != num_chunks:\n raise ValueError(\"Number of chunks must be an integer > 0\")\n num_chunks = min(len(windows) - 1, num_chunks)\n splits = np.array_split(windows[:-1], num_chunks)\n chunks = []\n for j in range(num_chunks - 1):\n chunk = np.append(splits[j], splits[j + 1][0])\n chunks.append(chunk)\n chunk = np.append(splits[-1], windows[-1])\n chunks.append(chunk)\n return chunks", "def sliding_window_offsets(data, window_size=500, shift_size=1):\n offsets = np.asarray(_sliding_window_chunkoffsets(data, window_size, shift_size))\n return offsets", "def slidingWindow(sequence,winSize,step):\n \n # Verify the inputs\n try: it = iter(sequence)\n except TypeError:\n raise Exception(\"**ERROR** sequence must be iterable.\")\n if not ((type(winSize) == type(0)) and (type(step) == type(0))):\n raise Exception(\"**ERROR** type(winSize) and type(step) must be int.\")\n if step > winSize:\n raise Exception(\"**ERROR** step must not be larger than winSize.\")\n if winSize > len(sequence):\n raise Exception(\"**ERROR** winSize must not be larger than sequence\\\n length.\")\n # Pre-compute number of chunks to emit\n numOfChunks = ((len(sequence)-winSize)/step)+1\n # Do the work\n for i in range(0,int(numOfChunks)*step,step):\n yield sequence[i:i+winSize]", "def sliding_window(top, step=10, window_size=(20,20)):\n\tfor x in range(0, top.shape[0], step):\n\t\tif x + window_size[0] > top.shape[0]:\n\t\t\tx = top.shape[0] - window_size[0]\n\t\tfor y in range(0, top.shape[1], step):\n\t\t\tif y + window_size[1] > top.shape[1]:\n\t\t\t\ty = top.shape[1] - window_size[1]\n\t\t\tyield x, y, window_size[0], window_size[1]", "def next_window(self) -> Iterator[Optional[np.ndarray]]:\n while self._count >= self._window_width:\n # Preserve what we want to return by copying it.\n p1 = np.copy(self._data_store[:self._window_width, :])\n\n # Remove the data we don't need any more from the front of the buffer.\n frames_to_keep = self._count - self._window_step\n self._data_store[:frames_to_keep,\n :] = self._data_store[self._window_step:self._count, :]\n self._count -= self._window_step\n yield p1", "def dwindow(window):\r\n \r\n h=window\r\n nh=len(h)\r\n lh=(nh-1)/2\r\n stepheight=(h[0]+h[-1])/2.\r\n ramp=float((h[-1]-h[0]))/nh\r\n h2=np.zeros(nh+2)\r\n h2[1:nh+1]=h-stepheight-ramp*np.arange(start=-lh,stop=lh+1,step=1)\r\n \r\n dwin=(h2[2:nh+2]-h2[0:nh])/2.+ramp\r\n dwin[0]=dwin[0]+stepheight\r\n dwin[-1]=dwin[-1]-stepheight\r\n \r\n return dwin", "def sliding_window_rms(data, window=None, window_size=500, shift_size=1):\n num_chunks = len(_sliding_window_chunkoffsets(data, window_size, shift_size))\n if num_chunks == 0:\n return np.asarray([])\n if window is None:\n return _numba_sliding_window_rms(data, num_chunks, window_size, shift_size)\n else:\n return _numba_sliding_window_rms_with_window(data, num_chunks, window, window_size, shift_size)", "def sliding_window_analysis(sequence, function,\n window_size=100000, step_size=50000):\n for start in range(0, len(sequence), step_size):\n end = start + window_size\n if end > len(sequence):\n break\n yield start, end, function(sequence[start:end])", "def windows_partition(x, window_size):\n\n B, H, W, C = x.shape\n x = x.reshape([B, H//window_size, window_size, W//window_size, window_size, C])\n x = x.transpose([0, 1, 3, 2, 4, 5])\n x = x.reshape([-1, window_size, window_size, C]) #(num_windows*B, window_size, window_size, C)\n return x", "def window_blocks(large_array, window_size):\n y_size = large_array.shape[0]/window_size\n blocks_array = large_array.reshape(y_size, window_size)\n return blocks_array", "def window(spectrogram: np.ndarray, wlength: int) -> Iterator[np.ndarray]:\n\n y = spectrogram.shape[1]\n for j in range(y):\n ymin = j\n ymax = j + wlength if j + wlength <= y else y\n if ymax == y:\n break\n yield spectrogram[:, ymin:ymax]", "def sliding_window(image, stepSize, windowSize):\n # slide a window across the image\n for y in xrange(0, image.shape[0], stepSize):\n for x in xrange(0, image.shape[1], stepSize):\n # yield the current window\n yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])", "def win_slide(stream, start_time, win_size, step_size, max_windows):\n stream_list=[]\n for i in range(max_windows):\n ts = start_time + (i*step_size)\n st = stream.slice(ts, ts+win_size)\n # skip missing data\n if len(st)!=3: continue\n if not st[0].stats.starttime == st[1].stats.starttime == st[2].stats.starttime: continue\n if not st[0].stats.endtime == st[1].stats.endtime == st[2].stats.endtime: continue\n if len(st[0])!=int(win_size*100+1): continue\n if st.max()[0]==0.0 or st.max()[1]==0.0 or st.max()[2]==0.0: continue\n # add preprocessed time window\n st = preprocess_stream(st)\n stream_list.append(st)\n return stream_list", "def windows(self, size, overlap=0, rm_offset=False):\r\n rows = self.data.shape[0]\r\n if (0 < size <= rows) and (0 <= overlap < size):\r\n n = (rows - size) // (size - overlap) + 1\r\n\r\n for i in range(n):\r\n start = (size - overlap) * i\r\n end = start + size\r\n win = self.data.iloc[start:end, :]\r\n if rm_offset:\r\n win_offset = win - win.mean()\r\n win_offset['t'] = win['t']\r\n yield win_offset\r\n\r\n yield win\r\n\r\n else:\r\n raise IndexError(f\"{self} no possible window of size '{size}'.\")", "def movingWindow(rawData, n):\n data = np.array([rawData[i:i+n] for i in range(rawData.shape[0] - (n-1))])\n return data", "def window_index(serieslength,windowsize,overlap):\r\n\r\n p1=0\r\n p2=p1 + windowsize\r\n pt1=[p1]\r\n pt2=[p2]\r\n while p2 < serieslength:\r\n p1 = p2 - overlap\r\n p2 = min((p1 + windowsize, serieslength))\r\n pt1.append(p1)\r\n pt2.append(p2)\r\n \r\n return pt1, pt2", "def window(iterable, stride=3):\n for i in range(len(iterable) - stride + 1):\n yield iterable[i: i + stride]", "def sliding_window(frame_length, step, Xsampleslist, ysampleslist):\n Xsamples = []\n ysamples = []\n for j in range(len(Xsampleslist)):\n X = Xsampleslist[j]\n ybinary = ysampleslist[j]\n for i in range(0, X.shape[0] - frame_length, step):\n xsub = X[i:i + frame_length, :]\n ysub = ybinary\n Xsamples.append(xsub)\n ysamples.append(ysub)\n return Xsamples, ysamples", "def windows(X, width, skip_last):\n ret = []\n n = X.shape[0]\n for i in range(n - width + 1 - skip_last):\n window = X[i:i + width, :]\n ret.append([tuple(x) for x in window[:]])\n return np.array(ret)", "def extract_window_data(df, window_len=30, zero_base=True):\n window_data = []\n for idx in range(len(df) - window_len):\n tmp = df[idx: (idx + window_len)].copy()\n if zero_base:\n tmp = normalise_min_max(tmp)\n window_data.append(tmp.values)\n return np.array(window_data)\n #return window_data", "def _sliding_windows(a, N):\n a = np.asarray(a)\n p = np.zeros(N - 1, dtype=a.dtype)\n b = np.concatenate((p, a, p))\n s = b.strides[0]\n return np.lib.stride_tricks.as_strided(\n b[N - 1:],\n shape=(N, len(a) + N - 1),\n strides=(-s, s),\n )" ]
[ "0.6993197", "0.6883627", "0.6766788", "0.67095524", "0.6618862", "0.65909404", "0.6495501", "0.64910275", "0.64154613", "0.63859916", "0.6315355", "0.6278746", "0.6257563", "0.62292266", "0.6201356", "0.6188974", "0.61796284", "0.6144587", "0.6143719", "0.6140929", "0.6123296", "0.61091036", "0.610659", "0.61050767", "0.6071474", "0.60459787", "0.60124743", "0.59848684", "0.59785557", "0.59759885" ]
0.69789374
1
Function to count all umbigous bases in a sequence. Ambigous bases are bases that are not in the sequence alphabet, ie. 'ACGT' for DNA sequences.
def count_umbiguous_bases(sequence): sequence = sequence.upper() amb = ['N', 'R', 'Y', 'W', 'S', 'K', 'M'] return sum({base: sequence.count(base) for base in amb}.values())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_all_bases(sequence):\n # create a set of bases\n bases = set(sequence)\n all_bases = defaultdict(int)\n # iterates in the base set\n for base in bases:\n # count the bases in the sequence\n all_bases[base] = sequence.count(base)\n return all_bases", "def get_bases_stats(sequence, alphabet, start):\n seq = sequence.upper()\n seq_len = len(seq)\n half_seq = seq_len // 2\n ter = start + half_seq\n # as a circular genome\n if ter > seq_len:\n ter = ter - seq_len + 1\n counts = defaultdict(int)\n for base in alphabet:\n total = seq.count(base)\n if ter > start: # start ---> ter\n f_count = seq[start:ter].count(base)\n r_count = total - f_count\n else: # ter ---> start\n r_count = seq[ter:start].count(base)\n f_count = total - r_count\n counts[base] = (total, f_count, r_count)\n return counts", "def base_frequencies(seq):\n\n # Get the length of the sequence\n sequence_len = len(seq)\n\n # Initialize base frequencies\n base_frequencies = {\n 'A': 0,\n 'C': 0,\n 'T': 0,\n 'G': 0\n }\n\n # Count bases\n for base in seq:\n base_frequencies[base] += 1\n\n # Normalize count\n for base in base_frequencies:\n base_frequencies[base] = base_frequencies[base]/sequence_len\n\n return base_frequencies", "def get_base_usage(sequences):\n usage = {\"A\": 0, \"C\": 0, \"G\": 0, \"U\": 0}\n for sequence in sequences:\n for base in usage:\n usage[base] += sequence.count(base)\n return usage", "def countBasesInFasta(fastaFile):\n recordRE=re.compile(r'^>')\n whiteSpaceRE=re.compile(r'\\s+')\n totalBases=0\n totalSeqs=0\n with open(fastaFile) as f:\n for line in f:\n if recordRE.match(line):\n totalSeqs+=1\n continue\n totalBases+=len(whiteSpaceRE.sub('',line))", "def count_ambig(curr_seq, valid_chars='ATCG'):\r\n up_seq = curr_seq.upper()\r\n total = 0\r\n for vchar in valid_chars:\r\n total += up_seq.count(vchar)\r\n return len(curr_seq) - total", "def test_count_ab(self):\n AB = get_moltype(\"ab\")\n seq = AB.make_array_seq(\"aaba-\", alphabet=AB.alphabet.with_gap_motif())\n c = seq.counts()\n self.assertEqual(c.to_dict(), {\"a\": 3, \"b\": 1})\n c = seq.counts(allow_gap=True)\n self.assertEqual(c.to_dict(), {\"a\": 3, \"b\": 1, \"-\": 1})", "def count(self, base):\n return self._dna.count(base)", "def count_abbas(str):\r\n i = 0\r\n count = 0\r\n for i in range(0, len(str)):\r\n if str.startswith(\"abba\", i):\r\n count += 1\r\n return count", "def Solve(bases):\r\n n = 1\r\n while 1:\r\n n += 1\r\n done = True\r\n for b in bases:\r\n if not Happy(n, b):\r\n done = False\r\n break\r\n if done:\r\n return n", "def base_composition(reads, base):\n assert base.upper() in set(\"ACGT\")\n\n \"\"\" Reports nucelotide frequencies at each position in the\n sam sequences\n \"\"\"\n # DNA_Alphabet=[\"A\",\"C\",\"T\",\"G\",\"N\"]\n all_nucs = []\n for read in reads:\n nucs = {} # Dictionary to store nucleotide data.\n seq = read[9]\n for i in range(0, len(seq)):\n nucs[str(i + 1)] = seq[i]\n all_nucs.append(nucs)\n all_items = []\n counts = []\n for dicts in all_nucs:\n for item in dicts.items():\n all_items.append(item)\n all_items.sort(key=operator.itemgetter(0))\n groups = [map(operator.itemgetter(1), list(group))\n for key, group in itertools.groupby(\n all_items, operator.itemgetter(0))]\n for group in groups:\n counts.append(group.count(base))\n\n pos = range(1, len(seq) + 1)\n\n # Create plot.\n plt.figure(1, figsize=(8, 8))\n plt.axes([0.1, 0.1, 0.8, 0.8])\n plt.bar(pos, counts, facecolor='g')\n plt.xlabel(\"Position\")\n plt.ylabel(\"number of mapped reads\")\n plt.title(base)\n plt.show()", "def count_nucleic_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_nucleic_acids()\n return n", "def uracil_count(RNAsequence):\n uracil = 0\n for nucleotide in RNAsequence:\n if nucleotide == 'U':\n uracil += 1\n return uracil", "def count_padding_bases(seq1, seq2):\n if len(seq1) <= len(seq2):\n shorter, longer = seq1, seq2\n else:\n shorter, longer = seq2, seq1\n\n n = 0\n for base1, base2 in zip(shorter, longer[: len(shorter)]):\n if base1 == base2:\n n += 1\n else:\n break\n\n return n", "def determineIdenticalBases(string1, string2):\n S = 0\n D = 0\n if len(string1) != len(string2):\n return -1\n for i in range(len(string1)):\n if checkForNOrGap(string1[i]) and checkForNOrGap(string2[i]) :\n if string1[i] == string2[i]:\n S += 1\n else:\n D += 1\n return S, D", "def determineIdenticalBases(string1, string2):\n S = 0\n D = 0\n if len(string1) != len(string2):\n return -1\n for i in range(len(string1)):\n if checkForNOrGap(string1[i]) and checkForNOrGap(string2[i]) :\n if string1[i] == string2[i]:\n S += 1\n else:\n D += 1\n return S, D", "def base_codes(self):\n bases = []\n\n if self.is_gas_giant:\n bases.append(\"G\")\n if self.is_naval_base:\n bases.append(\"N\")\n if self.is_scout_base:\n bases.append(\"S\")\n if self.is_research_base:\n bases.append(\"R\")\n if self.is_tas:\n bases.append(\"T\")\n if self.is_consulate:\n bases.append(\"I\")\n if self.is_pirate_base:\n bases.append(\"P\")\n\n return \" \".join(bases)", "def codon_counts(self):\n # Removing 5' UTR and 3' UTR sequences\n sequence = self.sequence.replace(self.five_prime_utr_sequence, \"\").replace(self.three_prime_utr_sequence, \"\")\n return len(sequence) / 3", "def base_stats(sequence, alphabet, as_count=False, as_dict=False):\n # make the sequence upper case\n seq = sequence.upper()\n # count all bases in sequence and collect as an array\n counts = np.array([seq.count(i) for i in alphabet])\n # if is onle the counts\n if as_count:\n freqs = counts\n # other wise as frequencies\n else:\n freqs = counts / sum(counts * 1.0)\n # or as a dictionary like object\n if as_dict:\n return dict(zip(alphabet, freqs))\n else:\n return freqs", "def count_mapped_bases(bam):\n\n for read in open_bam(bam):\n if not read.is_secondary:\n count = Counter(read.query_alignment_sequence)\n yield(count)", "def generatebasepairs(self, x):\n currentbases = \"\"\n for u, v in zip(x, range(len(x))):\n if u == 0:\n currentbases += '_'\n else:\n currentbases += self.sequences[v][u-1]\n\n return currentbases", "def count_matches(sam_input):\n logging.info(\"Counting aligned bases in %s ...\", sam_input.name)\n\n total_bases = 0\n with pysam.AlignmentFile(sam_input, \"r\") as sam:\n for read in sam:\n total_bases += aligned_bases(read.cigar)\n return total_bases", "def get_alts_in_hom_pileup(pileup_str, ref_base):\n alts = {'A':0, 'C':0, 'G':0, 'T':0}\n for base in pileup_str:\n if base != ref_base and base in alts.keys():\n alts[base] += 1\n\n return max(alts, key=alts.get), alts[max(alts, key=alts.get)]", "def test_counts(self):\n # test DNA seq\n orig = \"AACCGGTTAN-T\"\n seq = self.DNA(orig)\n # no gaps, no ambiguities\n got = seq.counts()\n expect = dict(A=3, C=2, G=2, T=3)\n self.assertEqual(dict(got), expect)\n # gaps allowed\n got = seq.counts(allow_gap=True)\n expect = dict(A=3, C=2, G=2, T=3)\n expect.update({\"-\": 1})\n self.assertEqual(dict(got), expect)\n # ambig allowed\n got = seq.counts(include_ambiguity=True)\n expect = dict(A=3, C=2, G=2, T=3, N=1)\n self.assertEqual(dict(got), expect)\n # ambig and gap allowed\n got = seq.counts(include_ambiguity=True, allow_gap=True)\n expect = dict(A=3, C=2, G=2, T=3, N=1)\n expect.update({\"-\": 1})\n self.assertEqual(dict(got), expect)\n\n # test DNA seq motif length of 2\n got = seq.counts(motif_length=2)\n expect = dict(AA=1, CC=1, GG=1, TT=1)\n self.assertEqual(dict(got), expect)\n # gap allowed\n got = seq.counts(motif_length=2, allow_gap=True)\n expect = dict(AA=1, CC=1, GG=1, TT=1)\n expect.update({\"-T\": 1})\n # ambig allowed\n got = seq.counts(motif_length=2, include_ambiguity=True)\n expect = dict(AA=1, CC=1, GG=1, TT=1, AN=1)\n self.assertEqual(dict(got), expect)\n # ambig and gap allowed\n got = seq.counts(motif_length=2, include_ambiguity=True, allow_gap=True)\n expect = dict(AA=1, CC=1, GG=1, TT=1, AN=1)\n expect.update({\"-T\": 1})\n self.assertEqual(dict(got), expect)\n\n # test base -- no concept of ambiguity, but understands gap\n orig = \"AACCGGTTAN-T\"\n seq = self.SEQ(orig)\n got = seq.counts()\n expect = dict(A=3, C=2, G=2, T=3, N=1)\n self.assertEqual(dict(got), expect)\n\n # handle '?'\n orig = \"AACCGGTTAN-T?\"\n seq = self.DNA(orig)\n got = seq.counts()\n expect = dict(A=3, C=2, G=2, T=3)\n self.assertEqual(dict(got), expect)\n got = seq.counts(allow_gap=True, include_ambiguity=True)\n expect.update({\"-\": 1, \"N\": 1, \"?\": 1})\n self.assertEqual(dict(got), expect)", "def gc(sequence):\n sequence = sequence.upper()\n return (sequence.count('G') + sequence.count('C')) / float(len(sequence))", "def _contiguous_accounts_complete_fraction(self) -> float:\n starting_index = bytes_to_nibbles(self._next_trie_root_hash)\n unknown_prefixes = self._account_tracker._trie_fog._unexplored_prefixes\n if len(unknown_prefixes) == 0:\n return 1\n\n # find the nearest unknown prefix (typically, on the right)\n nearest_index = unknown_prefixes.bisect(starting_index)\n\n # Get the nearest unknown prefix to the left\n if nearest_index == 0:\n left_prefix = (0, ) * 64\n else:\n left_prefix = unknown_prefixes[nearest_index - 1]\n if key_starts_with(starting_index, left_prefix):\n # The prefix of the starting index is unknown, so the index\n # itself is unknown.\n return 0\n\n # Get the nearest unknown prefix to the right\n if len(unknown_prefixes) == nearest_index:\n right_prefix = (0xf, ) * 64\n else:\n right_prefix = unknown_prefixes[nearest_index]\n\n # Use the space between the unknown prefixes to estimate the completed contiguous fraction\n\n # At the base, every gap in the first nibble is a full 1/16th of the state complete\n known_first_nibbles = right_prefix[0] - left_prefix[0] - 1\n completed_fraction_base = (1 / 16) * known_first_nibbles\n\n # Underneath, you can count completed subtrees on the right, each child 1/16 of the parent\n right_side_completed = sum(\n nibble * (1 / 16) ** nibble_depth\n for nibble_depth, nibble\n in enumerate(right_prefix[1:], 2)\n )\n # Do the same on the left\n left_side_completed = sum(\n (0xf - nibble) * (1 / 16) ** nibble_depth\n for nibble_depth, nibble\n in enumerate(left_prefix[1:], 2)\n )\n\n # Add up all completed areas\n return left_side_completed + completed_fraction_base + right_side_completed", "def cleaning_ambiguous_bases(seq):\n # compile the regex with all ambiguous bases\n pat = re.compile(r'[NRYWXSKM]')\n # look for the ambiguous bases and replace by\n # nothing\n return re.sub(pat, '', seq)", "def n_count(dna_string):\n a_count = 0\n c_count = 0\n g_count = 0\n t_count= 0\n for nuc in dna_string:\n if nuc.upper() == 'A':\n a_count += 1\n elif nuc.upper() == 'C':\n c_count += 1\n elif nuc.upper() == 'G':\n g_count += 1\n elif nuc.upper() == 'T':\n t_count += 1\n else:\n continue\n print(a_count, c_count, g_count, t_count)", "def get_biomarkes(abundant, xxx):\n cc = []\n bk = set()\n lvl = 0\n\n for _, t in abundant:\n cc.append(t.split('.'))\n\n while lvl < len(max(cc)):\n bk = set()\n\n for c in cc:\n if lvl < len(c):\n bk |= set([c[lvl]])\n\n if len(bk) >= xxx:\n break\n\n lvl += 1\n\n return bk", "def length_uc(x):\r\n return sum(length(m) for m in metamer(x))" ]
[ "0.7385681", "0.6816655", "0.65618616", "0.60834855", "0.6077257", "0.60242873", "0.6008956", "0.59128505", "0.5824625", "0.5747893", "0.56719303", "0.5662238", "0.5654305", "0.5579492", "0.5552459", "0.5552459", "0.5543022", "0.55394393", "0.5512835", "0.5503739", "0.54584694", "0.5437764", "0.5326231", "0.53243273", "0.5320083", "0.52598995", "0.5256034", "0.52469075", "0.5238744", "0.52158993" ]
0.7984436
0
Calculates the at/gc ratio of a genome.
def get_at_gc_ratio(at, gc): return at / gc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gc_rate(dna: str, percent=False):\n c = Counter(dna)\n result = (c[\"G\"] + c[\"C\"]) / len(dna)\n return result * 100 if percent else result", "def gc(self) -> float:\n g = self.count(\"G\")\n c = self.count(\"C\")\n return (g + c) / len(self) * 100", "def calculate_gear_ratio(front_gear, back_gear):\n return front_gear/back_gear", "def gc_content(seq):\n result = float(str(seq).count('G') + str(seq).count('C'))/len(seq) *100\n return result", "def getGCpercentage(DNA):\n dnaLength = len(DNA) #counts the length of the DNA string\n findG = DNA.count(\"G\") #finds the letter G in DNA string\n findC = DNA.count(\"C\") #finds the letter C in DNA string\n print(findG)\n print(findC)\n print(dnaLength)\n GCpercent = ((findC + findG)/dnaLength) * 100 #calculates percentage of Gs and Cs\n print(\"Percentage of G and C:\",\" %6.2f\" % GCpercent)\n \n return getGCpercentage", "def gc_content(sequence):\n gc = sequence.count('G') + sequence.count('C')\n atgc = sequence.count('A') + sequence.count('T') + sequence.count('G') + sequence.count('C')\n \n return (gc/atgc) * 100", "def _gc_prop(s, length):\n\n gc = sum(map(s.count, [\"c\", \"g\"]))\n\n return gc / length", "def gc(self):\n g = self.seq.count('G')\n g += self.seq.count('g')\n c = self.seq.count('C')\n c += self.seq.count('c')\n return (g + c) / len(self.seq)", "def assemblyCoverage(self, genomeSize):\n\t\tstats = self.scores()\n\t\tassemblySize = stats['lengthOfAssembly']\n\t\treturn float(assemblySize)/genomeSize", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def gc(sequence):\n sequence = sequence.upper()\n return (sequence.count('G') + sequence.count('C')) / float(len(sequence))", "def get_occupancy(self):\n # Compute logo for current alignment\n logo = self.get_logo()\n # Compute occupancy denominator by summing number of occurriencies\n den = np.sum(logo, axis=0)\n # Compute occupancy numerator by subtracting gaps from total\n num = den - logo[-1, :]\n # Compute occupancy as fraction of non-gap aligned residues\n return num / den", "def compute_gc(seq): # seq should be a string\n num_GC = list(seq).count('g')+list(seq).count('c')+list(seq).count('G')+list(seq).count('C')\n amount_GC = num_GC/len(seq)\n return amount_GC", "def getGC(self):\n numGC = self.sequence.upper().count(\"G\") + self.sequence.upper().count(\"C\")\n self.gc = float(numGC)/len(self.sequence)\n return self.gc", "def golden_ratio():\n return 1.61803398875", "def calculate_gc_content(sequence):\n sequence = sequence.upper()\n sc = Counter(sequence)\n return round((sc['C'] + sc['G']) / (sc['A'] + sc['C'] + sc['G'] + sc['T']) * 100, 2)", "def _calc_coverage(self, cds_aln):\n # Aligned region is part of a read that intersects with cds.\n coverage = 0\n for aln_reg in cds_aln.aligned_regions.values(): # aln_reg is of type CdsAlnSublocation\n location = aln_reg.location # location is of type Location\n coverage += location.length()\n coverage = coverage / float(Location.from_location_str(cds_aln.cds.location).length())\n return coverage", "def calc_GC(filepath):\n liste=['small.exon.piRNA_2.fa', 'small.exon.piRNA_1.fa', 'small.exon.piRNA_3.fa']\n \n length=list(range(0,34))\n d={}\n for i in length:\n d[i]={'A':0, 'G':0, 'T':0, 'C':0}\n for i in liste:\n with open(filepath+'/'+i, 'r') as f:\n for line in f:\n #fasta header starts with >\n if line.startswith('>'):\n pass\n else:\n line_l=list(line)\n for el in range(len(line_l)):\n if line_l[el]=='A':\n d[el]['A']+=1\n elif line_l[el]=='T':\n d[el]['T']+=1\n elif line_l[el]== 'G':\n d[el]['G']+=1\n elif line_l[el]== 'C':\n d[el]['C']+=1\n\n df=pd.DataFrame.from_dict(d)\n df=df.transpose()\n df.index = np.arange(1, len(df) + 1)\n \n\n df['A [%]']=df['A']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100\n df['G [%]']=df['G']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100\n df['T [%]']=df['T']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100\n df['C [%]']=df['C']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100", "def bw_ratio(self):\r\n bw = self.bwstats.mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/(1024.*bw)", "def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)", "def reciprocity_ratio(gd):\r\n reciprocal = 0.0\r\n edge_list = gd.get_edgelist()\r\n for i in it.permutations(range(gd.vcount()),2):\r\n if i in edge_list and i[::-1] in edge_list:\r\n reciprocal += 1.0\r\n return reciprocal/gd.ecount()", "def get_genome_coverage(file_path, chrom_lengths_file):\n lines_in_input = float(get_number_of_lines(file_path))\n chrom_lengths = get_chrom_lengths(chrom_lengths_file)\n genome_size = float(get_genome_size(chrom_lengths))\n if genome_size == 0:\n # Use default.\n genome_size = float(MAX_GENOME_SIZE)\n genome_coverage = '%.4f' % float(lines_in_input / genome_size)\n return float(genome_coverage)", "def golden_ratio():\n print((1+math.sqrt(5))/2)", "def get_camb_total_ratio(matpow, transfer):\n omegam = 0.288\n mnu=0.3\n kt,total = get_camb_power(matpow)\n kc,cdm = get_camb_cdm_power(matpow, transfer)\n kn,nu = get_camb_nu_power(matpow, transfer)\n total_sum = ((omegam-omeganu(mnu))*cdm**0.5/omegam+nu**0.5*omeganu(mnu)/omegam)**2\n return kt, total,total_sum", "def gpa(self):\n try:\n return sum(self.courses.values()) / len(self.courses)\n except ZeroDivisionError:\n return 0", "def get_gc_count(dataset):\n\n gc_count_dict = {}\n\n for sequence in SeqIO.parse(dataset, 'fasta'):\n c_count = sequence.seq.count('C')\n g_count = sequence.seq.count('G')\n gc_count = ((c_count + g_count)/len(sequence))*100\n gc_count_dict[sequence.id] = gc_count\n\n\n return gc_count_dict", "def evaluate(genome):\n # base fitness\n fit = 1.0\n # promote 1001 starting motif\n matches = 0\n if genome.sequence_A[0] == 1:\n matches += 1\n if genome.sequence_A[1] == 0:\n matches += 1\n if genome.sequence_A[2] == 0:\n matches += 1\n if genome.sequence_A[3] == 1:\n matches += 1\n fit += matches * 0.1\n # finish\n return fit", "def _ratio(sim: xr.DataArray, ref: xr.DataArray) -> xr.DataArray:\n out = sim / ref\n out.attrs[\"units\"] = \"\"\n return out", "def calc_allocation_fraction(gen_pm_fuel, drop_interim_cols=True):\n # break out the table into these four different generator types.\n no_pm_mask = gen_pm_fuel.net_generation_mwh_fuel_missing_pm.notnull()\n no_pm = gen_pm_fuel[no_pm_mask]\n all_gen = gen_pm_fuel.loc[gen_pm_fuel.in_g_tbl_all & ~no_pm_mask]\n some_gen = gen_pm_fuel.loc[\n gen_pm_fuel.in_g_tbl_any & ~gen_pm_fuel.in_g_tbl_all &\n ~no_pm_mask]\n gf_only = gen_pm_fuel.loc[~gen_pm_fuel.in_g_tbl_any & ~no_pm_mask]\n\n logger.info(\"Ratio calc types: \\n\"\n f\" All gens w/in generation table: {len(all_gen)}#, {all_gen.capacity_mw.sum():.2} MW\\n\"\n f\" Some gens w/in generation table: {len(some_gen)}#, {some_gen.capacity_mw.sum():.2} MW\\n\"\n f\" No gens w/in generation table: {len(gf_only)}#, {gf_only.capacity_mw.sum():.2} MW\\n\"\n f\" GF table records have no PM: {len(no_pm)}#\")\n if len(gen_pm_fuel) != len(all_gen) + len(some_gen) + len(gf_only) + len(no_pm):\n raise AssertionError(\n 'Error in splitting the gens between records showing up fully, '\n 'partially, or not at all in the generation table.'\n )\n\n # In the case where we have all of teh generation from the generation\n # table, we still allocate, because the generation reported in these two\n # tables don't always match perfectly\n all_gen = all_gen.assign(\n frac_net_gen=lambda x: x.net_generation_mwh_g_tbl /\n x.net_generation_mwh_g_tbl_pm_fuel,\n frac=lambda x: x.frac_net_gen)\n # _ = _test_frac(all_gen)\n\n # a brief explaination of the equations below\n # input definitions:\n # ng == net generation from the generation table (by generator)\n # ngf == net generation from the generation fuel table (summed by PM/Fuel)\n # ngt == total net generation from the generation table (summed by PM/Fuel)\n #\n # y = ngt / ngf (fraction of generation reporting in the generation table)\n # z = ng * ngt (fraction of generation from generation table by generator)\n # g = y * z (fraction of generation reporting in generation table by generator - frac_gen)\n\n some_gen = some_gen.assign(\n # fraction of the generation that should go to the generators that\n # report in the generation table\n frac_from_g_tbl=lambda x:\n x.net_generation_mwh_g_tbl_pm_fuel / x.net_generation_mwh_gf_tbl,\n # for records within these mix groups that do have net gen in the\n # generation table..\n frac_net_gen=lambda x:\n x.net_generation_mwh_g_tbl / # generator based net gen from gen table\n x.net_generation_mwh_g_tbl_pm_fuel,\n frac_gen=lambda x:\n x.frac_net_gen * x.frac_from_g_tbl,\n\n # fraction of generation that does not show up in the generation table\n frac_missing_from_g_tbl=lambda x:\n 1 - x.frac_from_g_tbl,\n capacity_mw_missing_from_g_tbl=lambda x: np.where(\n x.in_g_tbl, 0, x.capacity_mw),\n frac_cap=lambda x:\n x.frac_missing_from_g_tbl * \\\n (x.capacity_mw_missing_from_g_tbl / x.capacity_mw_in_g_tbl_group),\n\n # the real deal\n # this could aslo be `x.frac_gen + x.frac_cap` because the frac_gen\n # should be 0 for any generator that does not have net gen in the g_tbl\n # and frac_cap should be 0 for any generator that has net gen in the\n # g_tbl.\n frac=lambda x: np.where(\n x.in_g_tbl,\n x.frac_gen,\n x.frac_cap)\n )\n # _ = _test_frac(some_gen)\n\n # Calculate what fraction of the total capacity is associated with each of\n # the generators in the grouping.\n gf_only = gf_only.assign(\n frac_cap=lambda x: x.capacity_mw / x.capacity_mw_pm_fuel,\n frac=lambda x: x.frac_cap)\n # _ = _test_frac(gf_only)\n\n no_pm = no_pm.assign(\n # ratio for the records with a missing prime mover that are\n # assocaited at the plant fuel level\n frac_net_gen_fuel=lambda x:\n x.net_generation_mwh_gf_tbl\n / x.net_generation_mwh_g_tbl_fuel,\n frac_cap_fuel=lambda x:\n x.capacity_mw / x.capacity_mw_fuel,\n frac=lambda x: np.where(\n x.frac_net_gen_fuel.notnull() | x.frac_net_gen_fuel != 0,\n x.frac_net_gen_fuel, x.frac_cap_fuel)\n )\n\n # squish all of these methods back together.\n gen_pm_fuel_ratio = pd.concat([all_gen, some_gen, gf_only, no_pm])\n # null out the inf's\n gen_pm_fuel_ratio.loc[abs(gen_pm_fuel_ratio.frac) == np.inf] = np.NaN\n _ = _test_frac(gen_pm_fuel_ratio)\n\n # drop all of the columns we needed to get to the `frac` column\n if drop_interim_cols:\n gen_pm_fuel_ratio = gen_pm_fuel_ratio[\n IDX_PM_FUEL +\n ['generator_id', 'energy_source_code_num', 'frac',\n 'net_generation_mwh_gf_tbl', 'net_generation_mwh_g_tbl',\n 'capacity_mw', 'fuel_consumed_mmbtu']]\n return gen_pm_fuel_ratio", "def coverage(self):\n try:\n return self.found * 100 / self.needed\n except ZeroDivisionError:\n return 100.0" ]
[ "0.6823025", "0.6695999", "0.6404715", "0.6330023", "0.6317178", "0.6289386", "0.6241849", "0.6231716", "0.6224794", "0.6191345", "0.603549", "0.6034941", "0.6000487", "0.5982671", "0.597742", "0.58961", "0.5885077", "0.58844954", "0.5851501", "0.58212274", "0.57130134", "0.56896114", "0.5688394", "0.56398714", "0.5628468", "0.56117743", "0.55987334", "0.5561946", "0.55583835", "0.5534555" ]
0.799837
0
GC Content in a DNA/RNA subsequence length k. In overlapp windows of lenght k.
def gc_content_sequence_window(sequence, as_overlap=False, k=20): # make sequence upper case and getting the length of it sequence, seq_len = sequence.upper(), len(sequence) # the array-like object to collect the data gc_content = [] # non overlap sequence length non_overlap = range(0, len(sequence) - k + 1, k) # overlap sequence length overlap = range(0, seq_len - k + 1) # overlap is needed if as_overlap: # iterates to the overlap region for i in overlap: # creates the substring to count the gc_content subseq = sequence[i:i + k] # count and sum up the Gs and Cs counts g_c = subseq.count('C') + subseq.count('G') # collect the data in the array container gc_content.append(round(g_c / len(subseq), 4) * 100) # if non overlap is choosed else: # iterates to the mon overlap region for j in non_overlap: # creates the substring to count the gc_content subseq = sequence[j:j + k] # count and sum up the Gs and Cs counts g_c = subseq.count('C') + subseq.count('G') # collect the data in the array container gc_content.append(round(g_c / len(subseq), 4) * 100) return gc_content
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gc_content_along_the_chain(dna_sequence, window_size):\n sub_sequences = extract_sub_sequences(dna_sequence, window_size)\n gc_results = []\n for sub_sequence in sub_sequences:\n gc_results.append(gc_content(sub_sequence))\n return gc_results", "def gc_var(sequence, as_overlap=False, k=20):\n # calculates the percent of gc content\n gc = get_gc_content(sequence) * 100\n # get the gc content in the window space as an array\n gc_i = np.array(gc_content_sequence_window(sequence, as_overlap, k=k))\n # get the len of the gc content in the window space\n len_gc_i = np.shape(gc_i)[0]\n # check the difference of each point \n dif = gc_i - gc\n return np.log((1 / len_gc_i) * sum(abs(dif)))", "def GC_Content(self):\n GC_content = lambda dna: (dna.count('G')+dna.count('C'))\\\n /self.length\n return round(GC_content(self.sequence),4)", "def gc_content(seq):\n return round( (seq.count('C') + seq.count('G')) / len(seq) * 100 , 6 )", "def gc_blocks(seq, block_size):\n\n # Make all capital\n seq = seq.upper()\n iterations = len(seq) // block_size\n\n # Iterate through finding the GC content\n gc = []\n for i in range(iterations):\n block = seq[i*block_size:(i+1)*block_size]\n gc.append((block.count('G') + block.count('C')) / block_size)\n return tuple(gc)", "def get_gc_content(sequence):\n # get the sequence length and \n # make all the sequence characters upper case\n seq_len, sequence = len(sequence), sequence.upper()\n # count all gs and cs\n c = sequence.count('C')\n g = sequence.count('G')\n # returns the gc content from a sequence\n # sum up the |Gs and Cs counts and divide \n # by the sequence length\n return round((c + g) / seq_len, 4)", "def _iRep_gc_content(seq, window = 5000, slide = 100):\n # convert GC\n replacements = {'G':1, 'C':1, 'A':0, 'T':0, 'N':0}\n GC = [] # G - C\n for base in seq:\n try:\n GC.append(replacements[base.upper()])\n except:\n GC.append(0)\n # calculate gc content over sliding windows\n i = 0\n weights = np.ones(window)\n table = defaultdict(list)\n for gc in scipy.signal.fftconvolve(GC, weights, 'valid').tolist()[0::slide]:\n table['index'].append(i)\n table['GC_content'].append(gc/window)\n i += slide\n return pd.DataFrame(table)", "def generateSubSequences(k, ch):\n seq = [\"\".join(c) for c in itertools.product(ch, repeat = k)]\n# discussion about the best way to do this:\n# https://stackoverflow.com/questions/7074051/what-is-the-best-way-to-generate-all-possible-three-letter-strings\n return seq", "def get_gc_content(sequence):\n len_seq = len(sequence) - sum(alternative_bases_counter(sequence).values())\n sequence = sequence.upper()\n c = sequence.count('C')\n g = sequence.count('G')\n return round((c + g) / len_seq, 4)", "def get_gc_content(cst, nmsk, segs):\n assert isinstance(cst, ChromStruct)\n\n # load the reference chromosome\n # ref = fasta_array(cst.chrom, cst.refg_files)\n ref = fasta_array(cst.chrom, cst.ancs_files)\n\n # get the GC content at neutral sites for each segment\n gc = []\n for (start, end) in segs:\n cur_msk = nmsk[start:end]\n if not np.sum(cur_msk > 0):\n gc.append(0)\n else:\n cur_ref = ref[start:end]\n cur_neut = cur_ref[cur_msk > 0]\n gc_count = np.sum(np.in1d(cur_neut, ['C', 'G']))\n gc_fract = 1.0 * gc_count / len(cur_neut)\n gc.append(gc_fract)\n\n return np.array(gc)", "def kmer_count(self,size):\n if size == 1:\n return ['A','T','C','G']\n else:\n result = []\n for seq in Analyze_DNA_Sequence.kmer_count(self,size-1):\n for base in ['A','T','C','G']:\n result.append(seq+base)\n return result", "def gc_map(seq, block_size, gc_thresh):\n\n # Get the GC content for each block\n gc_cont = gc_blocks(seq, block_size)\n\n # Iterate through the sequence adding the appropriate cased block_size\n new_seq = ''\n iterations = len(seq) // block_size\n for i in range(iterations):\n block = seq[i*block_size:(i+1)*block_size]\n if gc_cont[i] >= gc_thresh:\n new_seq += block.upper()\n else:\n new_seq += block.lower()\n return new_seq", "def better_clumps_finding(text, k, t, L):\n frequent_patterns = []\n clumps = [0 for i in range(0, 4**k)]\n first_subtext = text[:L]\n freq_array = compute_freq(first_subtext, k)\n for index, freq in enumerate(freq_array):\n if freq >= t:\n clumps[index] = 1\n for i in range(1, len(text) - L + 1):\n old_kmer = text[i - 1:i - 1 + k]\n old_kmer_number = pattern_to_number(old_kmer)\n freq_array[old_kmer_number] -= 1\n new_kmer = text[i + L:i + L + k]\n new_kmer_number = pattern_to_number(new_kmer)\n freq_array[new_kmer_number] += 1\n if freq_array[new_kmer_number] >= t:\n clumps[new_kmer_number] = 1\n for index, clump in enumerate(clumps):\n if clump == 1:\n pattern = number_to_pattern(index, k)\n frequent_patterns.append(pattern) \n return frequent_patterns", "def compute_gc(seq): # seq should be a string\n num_GC = list(seq).count('g')+list(seq).count('c')+list(seq).count('G')+list(seq).count('C')\n amount_GC = num_GC/len(seq)\n return amount_GC", "def count_subsequence_in_sliding_window(kmin, kmax, sequence):\n if isinstance(sequence, str):\n for n in range(kmin, kmax + 1):\n for sub in zip(*(deque(itertools.islice(it, i), 0) or\n it for i, it in enumerate(itertools.tee(sequence,\n n)))):\n yield ''.join(sub)", "def get_chunks(sequence, ck_size):\n \n list_chunk = []\n i=1\n l = len(sequence)\n if l < 4*ck_size:\n raise ValueError(\"Chunk size should be of 4 at least \")\n for i in range(1, l):\n if i*ck_size < l:\n list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #while(i*ck_size < l):\n #list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #i += 1\n return list_chunk", "def next_rgs(seq, n, k):\n # b[i] = max(seq[i - 1], b[0], ..., b[i - 1]) = max(seq[i - 1], b[i - 1])\n # All restricted growth sequences start with 0\n b = [0]\n result = seq[:]\n for i in range(1, n):\n b.append(max(seq[i - 1], b[i - 1]))\n # Find the earliest index when previous and next sequence are diverging\n for j in range(n - 1, 0, -1):\n if seq[j] + 1 > k:\n continue\n if seq[j] > b[j]:\n continue\n break\n # Create components of new result\n # prefix - maximal common prefix of original and new sequence\n prefix = seq[:j]\n # incremented - the value at j-th place that was incremented\n incremented = seq[j] + 1\n # suffix_length - how many nonzero numbers should we put at the end\n # of new sequence to make it restricted-growing\n # and to have all numbers 0..(k-1) in it.\n suffix_length = k - max(b[j], incremented)\n zeroes = [0] * (n - j - suffix_length - 1)\n suffix = list(range(k - suffix_length + 1, k + 1))\n # Construct new sequence\n result = prefix + [incremented] + zeroes + suffix\n return result", "def kmer(text, i, k):\r\n return text[i:(i+k)]", "def kmer(text, i, k):\r\n return text[i:(i+k)]", "def get_gc_sliding(self, window=500):\n\n gc_res = []\n\n # Get contigID for each window position\n labels, xbars = self._get_window_labels(window)\n\n # Get complete sequence to calculate sliding window values\n complete_seq = \"\".join(self.contigs.values()).lower()\n\n for p, i in enumerate(range(0, len(complete_seq), window)):\n\n seq_window = complete_seq[i:i + window]\n\n # Get GC proportion\n gc_res.append(self._gc_prop(seq_window, len(seq_window)))\n\n return gc_res, labels, xbars", "def cut_kmer(sequence, kmer_size):\n for i in range(len(sequence)-kmer_size+1):\n yield sequence[i:i+kmer_size]", "def gc_content(sequence):\n gc = sequence.count('G') + sequence.count('C')\n atgc = sequence.count('A') + sequence.count('T') + sequence.count('G') + sequence.count('C')\n \n return (gc/atgc) * 100", "def gc(self):\n g = self.seq.count('G')\n g += self.seq.count('g')\n c = self.seq.count('C')\n c += self.seq.count('c')\n return (g + c) / len(self.seq)", "def calculate_gc_content(sequence):\n sequence = sequence.upper()\n sc = Counter(sequence)\n return round((sc['C'] + sc['G']) / (sc['A'] + sc['C'] + sc['G'] + sc['T']) * 100, 2)", "def gc(sequence):\n sequence = sequence.upper()\n return (sequence.count('G') + sequence.count('C')) / float(len(sequence))", "def build_kmers(\n sequence, \n ksize):\n\n kmers = list()\n n_kmers = len(sequence) - ksize + 1\n # Loop to store khmers in each sequence\n for i in range(n_kmers):\n kmer = sequence[i:i + ksize]\n kmers.append(kmer)\n \n return kmers, n_kmers\n\n # It is an example that needs to say the size of Kmer you would like.", "def MCS(n,k):\n\tglobal dict_all\n\tdict_val=copy.deepcopy(dict_all)\n\t#start_time = time.time()\n\tfinal = {}\t\t\t\t\t # Store all result with the count as key. For example final[1]=[[1,0,0],[0,1,1]]\n\tseq = []\t\t\t\t\t\t# Store the count with no duplication\n\tfor i in range(n):\n\t\tleaf={}\t\t\t\t\t\t# leaf is the dictionary to store the random value of each leaf\n\t\t#count=0\n\t\tfor i in leaves:\n\t\t\tleaf[i] = choice([0,1])\n\t\t\tdict_val[i]=leaf[i]\n\t\t\t#count += leaf[i]\n\t\tresult = Cal_FT(dict_val)\t\n\t\t'''\n\t\tif result:\n\t\t\tcutset = []\n\t\t\tfor i in leaves:\n\t\t\t\tcutset.append(str(leaf[i]))\n\t\t\tcutset=\"\".join(cutset)\n\t\t\tif cutset not in final:\n\t\t\t\tfinal[cutset]=count\n\tfinal_sorted=sorted(zip(final.values(),final.keys())) \t\t\t\t#Order the cutset by its count\n\tfor i in range(k):\t\t\t\t\t\t\t\t\t\t\t\t\t#Print the first k result\n\t\tcutset=list(final_sorted[i][1])\n\t\tresult=[]\n\t\tfor index in range(len(cutset)):\n\t\t\tif cutset[index] is \"1\":\n\t\t\t\tresult.append(leaves[index])\n\t\tprint result\n\t#end_time=time.time()\n\t#print \"Running time is\", end_time-start_time\n\t'''", "def find_GC_content(fasta_file_name):\n\twith open(fasta_file_name) as fasta:\n\t\tGC_content = {}\n\t\tfor line in fasta:\n\n\t\t\t# Each line (bar the last) ends with '\\n'\n\t\t\tloc_line = line.replace('\\n', '')\n\n\t\t\t# Finds '>' at opening of line (FASTA seq title)\n\t\t\tif re.match(r'^>', loc_line):\n\t\t\t\tGC_content[loc_line] = 0\n\t\t\t\tG_count = 0\n\t\t\t\tC_count = 0\n\t\t\t\tcount = 0\n\t\t\t\tcurrent = loc_line\n\t\t\telse:\n\t\t\t\tG_count += loc_line.count('G')\n\t\t\t\tC_count += loc_line.count('C')\n\t\t\t\tcount += len(loc_line)\n\t\t\t\tGC_content[current] = float((G_count + C_count)) / count\n\treturn GC_content", "def randomKmers(dna, k):\n kmers = []\n for seq in dna:\n n = len(seq)\n i = random.randint(0, n-k)\n kmer = seq[i:i+k]\n kmers.append( kmer)\n return kmers", "def getKmers(seq, k):\n \n kmd = {}\n \n for i in range(len(seq)+1-k):\n kmer = seq[i:i+k]\n kmd[kmer] = kmd.get(kmer,0) + 1\n return kmd" ]
[ "0.6752567", "0.64076644", "0.6161453", "0.6118112", "0.5984678", "0.58789796", "0.5851222", "0.5846533", "0.58085054", "0.57129765", "0.5636258", "0.5616689", "0.5599156", "0.557875", "0.5574082", "0.5573126", "0.5561715", "0.5556962", "0.5556962", "0.5536127", "0.5490504", "0.54763186", "0.54339343", "0.54249555", "0.54240215", "0.5398924", "0.5387752", "0.5344335", "0.53298104", "0.52887017" ]
0.74124485
0
Function to calculate the frequency of the codons in a sequence.
def codon_frequency(sequence, codon_table): # initialize the counter with the list of triplets from codon_table counter = Counter(dict([(c, 0) for c in codon_table])) # create a list/array of all possible codons found in the input sequence triplets = [sequence.upper()[i:i + 3] for i in range(0, len(sequence), 3)] # filters the triplets list from sequences that don't have length of 3 # nucleotides triplets = filter(lambda x: len(x) == 3, triplets) # updates counter with the triplets counts and return it return counter + Counter(triplets)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def codon_frequency(seq, aminoacid):\n tmpList = []\n for i in range(0, len(seq) - 2, 3):\n if CodonTable[seq[i:i + 3]] == aminoacid:\n tmpList.append(seq[i:i + 3])\n\n freqDict = dict(Counter(tmpList))\n totalScore = sum(freqDict.values())\n for seq in freqDict:\n freqDict[seq] = round(freqDict[seq] / totalScore, 2)\n return freqDict", "def freq(self) -> int:", "def get_cod_freq(gene):\r\n header = gene.iloc[:,0].values[0].split(' ')\r\n geneID=header[0][1:]\r\n\r\n\r\n #get coding sequence\r\n cds = gene.iloc[:,1].values[0].upper().replace('T','U')\r\n codon_count=dict() \r\n \r\n #build dictionary to accumulate codon counts; ignore with stop codons\r\n for codon in list(codon_aa.keys()):\r\n if codon not in [ \"UAA\",\"UAG\", \"UGA\" ]:\r\n codon_count[codon]=0\r\n \r\n ##count codons in cds\r\n codons = []\r\n for c in range(0,len(cds),3): #O(len cds)\r\n cod=cds[c:c+3]\r\n try:\r\n codon_count[cod]+=1\r\n except KeyError:\r\n continue\r\n \r\n #store the fractional freq of each codon in the codon dictionary\r\n total_cod=len(cds)/3 #total number of codons in the cds\r\n for c in list(codon_count.keys()): #O(len codondict)\r\n codon_count[c]/=total_cod\r\n \r\n df_codcnt=pd.DataFrame(list(codon_count.items()) )\r\n df_codcnt.columns=['Codon', 'Fractional_Freq']\r\n df_codcnt=df_codcnt.set_index('Codon').T.reset_index(drop=True)\r\n \r\n df_codcnt['GeneID']=geneID\r\n\t#reorder columns\r\n cols2=[df_codcnt.columns[-1]]+sorted(df_codcnt.columns[:61])\r\n df_codcnt=df_codcnt[cols2]\r\n return df_codcnt", "def codon_bgfreq(codon_seqs, data_mm):\n codon_counts = np.zeros(( len(codons_nonstop) ))\n list_orfs = list( data_mm.keys() )\n\n for ix, orf in enumerate(list_orfs):\n current_seq = codon_seqs[orf]\n current_mm = data_mm[orf]\n\n for pos in range( len(current_mm) ):\n if current_mm[pos] and current_seq[pos] in codons_nonstop:\n current_index = codons_nonstop.index(current_seq[pos])\n codon_counts[current_index] += 1\n codon_counts = np.around( codon_counts / np.sum(codon_counts), 5)\n\n return codon_counts", "def freq():", "def codon_counts(self):\n # Removing 5' UTR and 3' UTR sequences\n sequence = self.sequence.replace(self.five_prime_utr_sequence, \"\").replace(self.three_prime_utr_sequence, \"\")\n return len(sequence) / 3", "def codonfreqs_kmerdf(kmertable): \n codon_counts_kmer = np.zeros(( len(codons_nonstop) ))\n for kmer in kmertable['kmer']:\n current_kmer_codons = [ kmer[(i*3):((i*3)+3)] for i in range(3) ] # ! hard coded for length L=3\n for codon in current_kmer_codons:\n current_index = codons_nonstop.index(codon)\n codon_counts_kmer[current_index] += 1 \n codon_counts_kmer /= np.sum(codon_counts_kmer)\n\n return np.around(codon_counts_kmer, 5)", "def count_codon(self, codon):\n return sum([1 for c in self if c == codon])", "def at_frequency(self):\n result = str(self.seq).count(\"A\") + str(self.seq).count(\"T\")\n return result", "def gc_frequency(self):\n result = str(self.seq).count(\"G\") + str(self.seq).count(\"C\")\n return result", "def codon_usage(self):\n codons_dict = CodonUsage.CodonsDict.copy()\n codons = [str(self.sequence[i:i+3]) for i in range(0, len(self.sequence), 3)]\n for codon in codons:\n codons_dict[codon] += 1\n return codons_dict", "def frequency(self):\n return self._pca.frequency", "def get_frequency_dict(sequence):\n freq = {}\n for x in sequence:\n freq[x] = freq.get(x, 0) + 1\n return freq", "def transitionfreq(molecule, transition) :\n return s.transitionFreq(molecule, transition)", "def freq(self, value: int, /) -> None:", "def cnt_freq(filename):\n freq = [0] * 256\n try:\n f_in = open(filename,'r')\n except:\n raise FileNotFoundError\n for line in f_in:\n for char in line:\n freq[ord(char)] = freq[ord(char)] + 1\n f_in.close()\n return freq", "def count(seq):\n\treturn sum(1 for x in seq)", "def cnt_freq(filename):\r\n counts = [0] * 256\r\n fp_file = open(filename, \"r\")\r\n lines = fp_file.readlines()\r\n for i in lines:\r\n for j in i: # count every letter in the line\r\n counts[ord(j)] += 1 # counts[ord(j)] + 1\r\n counts[0] = 1\r\n fp_file.close()\r\n return counts", "def getFrequencyDict(sequence):\n # freqs: dictionary (element_type -> int)\n freq = {}\n for x in sequence:\n freq[x] = freq.get(x,0) + 1\n return freq", "def getFrequencyDict(sequence):\n # freqs: dictionary (element_type -> int)\n freq = {}\n for x in sequence:\n freq[x] = freq.get(x,0) + 1\n return freq", "def getFrequencyDict(sequence):\n # freqs: dictionary (element_type -> int)\n freq = {}\n for x in sequence:\n freq[x] = freq.get(x,0) + 1\n return freq", "def frequencies(seq):\n d = dict()\n for item in seq:\n try:\n d[item] += 1\n except KeyError:\n d[item] = 1\n return d", "def Counting(seq):\n\n #Scan the sequence, looking for motifs\n\n counting = {k: 0 for k in MOT} # Initialize the counting dictionary.\n # Scan all the motifs and find them in the sequence\n for motif in MOT:\n if len(seq) > len(motif): # Check if the sequence is longer than the motif itself.\n for i in range(len(seq)-len(motif)+1):\n if i == 0: # In case the motif is in the beginning of the sequence\n # print(\"start: \" + seq[i:i+len(motif)] + \" next nuc: \" + seq[i+len(motif)])\n if seq[i:i+len(motif)] == motif and seq[i+len(motif)] != motif[0]: # Check if the next nucleotide is in not part of the motif.\n counting[motif] += 1\n elif i == len(seq)-len(motif): # In case the motif is in the end of the sequence\n \n if seq[i:i+len(motif)] == motif and seq[i-1] != motif[0]: # Check if the previuos nucleotide is in not part of the motif.\n counting[motif] += 1\n elif len(seq) > len(motif)+1: # In case the motif is in the middle of the sequence.\n # Check if the motif is not part of another motif (e.g. TT is in TTT).\n\n if seq[i:i+len(motif)] == motif and seq[i+len(motif)] != motif[0] and seq[i-1] != motif[0]:\n counting[motif] += 1\n for nuc_nr in NUC_NR:\n counting[nuc_nr+\"_NR\"] = seq.count(nuc_nr)\n\n return counting", "def base_frequencies(seq):\n\n # Get the length of the sequence\n sequence_len = len(seq)\n\n # Initialize base frequencies\n base_frequencies = {\n 'A': 0,\n 'C': 0,\n 'T': 0,\n 'G': 0\n }\n\n # Count bases\n for base in seq:\n base_frequencies[base] += 1\n\n # Normalize count\n for base in base_frequencies:\n base_frequencies[base] = base_frequencies[base]/sequence_len\n\n return base_frequencies", "def GetFrequency(self):\n ...", "def get_frequency(frame):\n frame = clip_centre(frame)\n frame = auto_correlate(frame)\n threshold: int = SAMPLE_RATE // 500\n lag = frame[threshold:].argmax()\n frequency = SAMPLE_RATE / lag\n return frequency", "def getFrequencyDict(sequence: Union[str, list]) -> d_si:\n freq = {}\n for x in sequence:\n freq[x] = freq.get(x,0) + 1\n return freq", "def codonComposition(self):#works\n return {codon: self.countDicNuc.get(codon) for codon in self.rnaCodonTable.keys()}", "def countFreq(self,document):\n self.document = document\n vocab=['python','js','android','php','django','javascript','oracle','ruby','rails','java']\n cnt_vector = CountVectorizer(vocabulary=vocab)\n self.freq_term_matrix = cnt_vector.fit_transform(self.document)\n return self.freq_term_matrix.toarray()", "def seqfreqs(seqs):\n #if \"seqfreqs\" in options.debug:\n # print(\"There are {} seqs\".format(len(seqs)))\n x = []\n #this block calculates the frequencies of each sequence\n for i in range(len(seqs)):\n this_x = 0\n for j in range(len(seqs)):\n if str(seqs[i]) == str(seqs[j]):\n #if \"seqfreqs\" in options.debug:\n # print(\"{} == {}\".format(i, j))\n this_x += 1\n x.append(this_x/len(seqs))\n #print(\"done with these seqfreqs\\n\")\n #if \"seqfreqs\" in options.debug:\n # print(\"the frequencies are {}\".format(x))\n return x" ]
[ "0.7567119", "0.72417057", "0.71978146", "0.7188881", "0.7131045", "0.70670915", "0.6903608", "0.68454266", "0.66870964", "0.6682764", "0.661773", "0.6477869", "0.6368345", "0.63172907", "0.62968326", "0.62675095", "0.62490714", "0.62382305", "0.6193833", "0.6193833", "0.6193833", "0.6193301", "0.6171558", "0.6119968", "0.6097287", "0.60880035", "0.60540485", "0.60154337", "0.6005897", "0.59665006" ]
0.81066716
0
Calculates the gc content variance in a sequence according to a window of length k.
def gc_var(sequence, as_overlap=False, k=20): # calculates the percent of gc content gc = get_gc_content(sequence) * 100 # get the gc content in the window space as an array gc_i = np.array(gc_content_sequence_window(sequence, as_overlap, k=k)) # get the len of the gc content in the window space len_gc_i = np.shape(gc_i)[0] # check the difference of each point dif = gc_i - gc return np.log((1 / len_gc_i) * sum(abs(dif)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gc_content_sequence_window(sequence, as_overlap=False, k=20):\n # make sequence upper case and getting the length of it\n sequence, seq_len = sequence.upper(), len(sequence)\n # the array-like object to collect the data\n gc_content = []\n # non overlap sequence length\n non_overlap = range(0, len(sequence) - k + 1, k)\n # overlap sequence length\n overlap = range(0, seq_len - k + 1)\n # overlap is needed\n if as_overlap:\n # iterates to the overlap region\n for i in overlap:\n # creates the substring to count the gc_content\n subseq = sequence[i:i + k]\n # count and sum up the Gs and Cs counts\n g_c = subseq.count('C') + subseq.count('G')\n # collect the data in the array container\n gc_content.append(round(g_c / len(subseq), 4) * 100)\n # if non overlap is choosed\n else:\n # iterates to the mon overlap region\n for j in non_overlap:\n # creates the substring to count the gc_content\n subseq = sequence[j:j + k]\n # count and sum up the Gs and Cs counts\n g_c = subseq.count('C') + subseq.count('G')\n # collect the data in the array container\n gc_content.append(round(g_c / len(subseq), 4) * 100)\n return gc_content", "def recovered_variance_proportion(self, S, k): # [5pts]\n if S.ndim == 1:\n recovered_var = 0\n denom = np.sum(S**2)\n for i in range(k):\n recovered_var += ((S[i]**2)/denom) \n \n elif S.shape[1] == 3:\n recovered_var = []\n for col in range(S.shape[1]):\n var_temp = 0\n denom = np.sum(S[:,col]**2)\n for i in range(k):\n var_temp += (S[i,col]**2)/denom\n recovered_var.append(var_temp)\n \n return recovered_var", "def expected_jk_variance(K):\r\n\r\n kf = float(K)\r\n return ((1 / kf) * (kf - 1) / (kf - 0.5) *\r\n ((kf - 1) / (kf - 2)) ** 2 * (kf - 3) / (kf - 2))", "def mass_variance(self, logM, k = [], pk = [], var = 'cb', window = 'th', **kwargs):\n return self.mass_variance_multipoles(logM = logM, k = k, pk = pk, var = var, window = window, **kwargs)", "def get_gc_sliding(self, window=500):\n\n gc_res = []\n\n # Get contigID for each window position\n labels, xbars = self._get_window_labels(window)\n\n # Get complete sequence to calculate sliding window values\n complete_seq = \"\".join(self.contigs.values()).lower()\n\n for p, i in enumerate(range(0, len(complete_seq), window)):\n\n seq_window = complete_seq[i:i + window]\n\n # Get GC proportion\n gc_res.append(self._gc_prop(seq_window, len(seq_window)))\n\n return gc_res, labels, xbars", "def variance(self):\n return self.k * self.theta ** 2", "def dimension_reduction_np(X, k=10):\n cov = cov_generation(X)\n eig, eigv = np.linalg.eig(cov)\n sort_args = np.argsort(np.abs(eig))[::-1]\n projection_matrix = np.real(eigv[sort_args][:, :k])\n reduce_x = np.dot(X, projection_matrix)\n\n return projection_matrix, reduce_x", "def get_cumulative_explained_variances(self):\n tot = sum(self.eigen_vals)\n var_exp = [(i / tot) for i in sorted(self.eigen_vals, reverse=True)]\n selected_var_exp = var_exp[:self.n_components]\n \n return np.cumsum(selected_var_exp)", "def dimension_reduction(X, k=10):\n cov = cov_generation(X)\n eig, eigv, _, _ = jacobi_loop(cov)\n sort_args = np.argsort(np.abs(eig))[::-1]\n projection_matrix = eigv[sort_args][:, :k]\n reduce_x = np.dot(X, projection_matrix)\n \n return projection_matrix, reduce_x", "def _get_cum_variance(self) -> np.ndarray:\n return np.cumsum(self.pca.explained_variance_ratio_)", "def dcg_at_k(cls, r, k):\n assert k >= 1\n r = np.asfarray(r)[:k]\n if r.size:\n return np.sum(r / np.log2(np.arange(2, r.size + 2)))\n return 0.", "def conditional_variance(self, gp):\n raise NotImplementedError", "def rand_k(self, k):\n\n k_N = self.prior.k_0 + self.counts[k]\n v_N = self.prior.v_0 + self.counts[k]\n m_N = self.m_N_numerators[k]/k_N\n S_N = self.S_N_partials[k] - k_N*np.square(m_N)\n\n mean = np.zeros(self.D)\n var = np.zeros(self.D)\n\n for i in range(self.D):\n var[i] = invchisquared_sample(v_N, S_N[i]/v_N, 1)[0]\n mean[i] = np.random.normal(m_N[i], np.sqrt(var[i]/k_N))\n\n return mean, var", "def lac(X, conts, k, nsteps=30, window_size=1):\n dim = len(conts)\n\n #np.random.seed(42)\n # Initialize parameters\n priors = np.ones(k) / k\n\n\n import sys; sys.stdout.flush()\n if X is not None:\n means, covars = initialize_sample_kmeans(X, k)\n else:\n means, covars = initialize_random(conts, k)\n\n #means, covars = initialize_kmeans(conts, k)\n\n w = [np.empty((k, len(c[0]),)) for c in conts]\n\n\n for i in range(1, nsteps + 1):\n for l, (c, cw) in enumerate(conts):\n lower = l - window_size if l - window_size >= 0 else None\n upper = l + window_size + 1 if l + window_size + 1 <= dim else None\n dims = slice(lower, upper)\n active_dim = min(l, window_size)\n\n x = c\n\n # E step\n for j in range(k):\n if any(np.abs(covars[j, dims]) < 1e-15):\n assert False, 'covars should be fixed'\n\n det = covars[j, dims].prod()\n inv_covars = 1. / covars[j, dims]\n xn = x - means[j, dims]\n factor = (2.0 * np.pi) ** (x.shape[1]/ 2.0) * det ** 0.5\n w[l][j] = priors[j] * np.exp(np.sum(xn * inv_covars * xn, axis=1) * -.5) / factor\n wsum = w[l].sum(axis=0)\n wsum[wsum == 0] = 1.\n w[l] /= wsum\n\n # M step\n n = np.sum(w[l], axis=1)\n priors = n / np.sum(n)\n for j in range(k):\n if n[j]:\n mu = np.dot(w[l][j, :] * cw, x[:, active_dim]) / (w[l][j, :] * cw).sum()\n\n xn = x[:, active_dim] - mu\n sigma = np.sum(xn ** 2 * w[l][j] * cw, axis=0) / (w[l][j, :] * cw).sum()\n sigma = sigma if sigma > 1e-3 else 1e-3\n\n if np.isnan(mu).any() or np.isnan(sigma).any():\n return w, means, covars, priors\n else:\n mu = means[j, l]\n sigma = MIN_COVARIANCE\n means[j, l] = mu\n covars[j, l] = sigma\n\n # w = np.zeros((k, m))\n # for j in range(k):\n # if active[j]:\n # det = covars[j].prod()\n # inv_covars = 1. / covars[j]\n # xn = X - means[j]\n # factor = (2.0 * np.pi) ** (xn.shape[1] / 2.0) * det ** 0.5\n # w[j] = priors[j] * exp(-.5 * np.sum(xn * inv_covars * xn, axis=1)) / factor\n # w[active] /= w[active].sum(axis=0)\n\n return w, means, covars, priors", "def findMaximumSubarraySlidingWindow(self, k, nums):\n window_start, window_sum, window_max= 0, 0, 0\n for i in range(len(nums)):\n window_sum += nums[i] #add the next element\n # slide the window, we don't need to slide if we have not hit the required window size of K\n if i >= k-1:\n window_max = max(window_sum, window_max) # calculate the maximum sum\n window_sum -= nums[window_start] #substract the element going out\n window_start += 1 #slide the window ahead\n return window_max", "def get_var(df=songs_df):\n n_years = len(years)\n n_songs = len(songs_df['page'])\n variances = np.zeros((n_songs, n_songs))\n annual_diffs = np.zeros((n_songs, n_songs, n_years))\n\n # Figure out how to just get upper/lower triangle rather than populating w dups\n for s1 in range(n_songs):\n for s2 in range(n_songs):\n s1_ranks = songs_df['ranks'][s1]\n s2_ranks = songs_df['ranks'][s2]\n\n # Set up an offset/normalizer so that we're just looking at\n # functional form, not call totals. Maybe do this as a frac instead.\n offset = s1_ranks[0] - s2_ranks[0]\n\n annual_difference = [s1_ranks[year] - s2_ranks[year] - offset for year in range(n_years)]\n variance = sum( (annual_difference - np.mean(annual_difference))**2)/float(n_years)\n\n variances[s1][s2] = variance\n annual_diffs[s1][s2] = annual_difference\n\n\n mask = np.zeros_like(variances)\n mask[np.triu_indices_from(mask)] = True\n corr_matrix=variances.corr()\n\n sns.heatmap(variances, mask=mask) #, vmin=510, vmax=530)\n plt.show()\n return variances", "def precision_at_k(r, k):\n assert k >= 1\n r = np.asarray(r)[:k] != 0\n if r.size != k:\n raise ValueError('Relevance score length < k')\n return np.mean(r)", "def calc_variances(ds):\n if ds.size <= 1:\n print 'Fail: not enough items for calculation %d' % ds.size\n return 0,1\n obs_var = ((ds.storage - ds.storage.sum()/ds.size)**2).sum()/(ds.size-1)\n rep_var = ds.var.sum()/ds.size\n return obs_var,rep_var", "def _compute_variance(params):\n batch_grad = self._fetch_batch_grad(params, aggregate=True)\n grad = self._fetch_grad(params, aggregate=True)\n batch_size = batch_grad.size(0)\n\n if self._use_double:\n batch_grad = batch_grad.double()\n grad = grad.double()\n\n return (1 / (batch_size - 1)) * ((batch_size * batch_grad - grad) ** 2).sum(\n 0\n )", "def precision_at_k(r, k):\n assert k >= 1\n r = np.asarray(r)[:k] != 0\n if r.size != k:\n raise ValueError('Relevance score length < k')\n return np.mean(r)", "def cdf(self, k):\n\n if k < 0 or k > self.n:\n return 0\n\n k = int(k)\n ans = 0\n for i in range(0, k + 1):\n ans += self.pmf(i)\n return ans", "def _variance(self,gp):\r\n return self.variance", "def shrunk_covariance(self, delta=0.2):\n self.delta = delta\n N = self.S.shape[1]\n # Shrinkage target\n mu = np.trace(self.S) / N\n F = np.identity(N) * mu\n # Shrinkage\n shrunk_cov = delta * F + (1 - delta) * self.S\n return self.format_and_annualise(shrunk_cov)", "def reconstructions_variance(self):\n self.assert_sampled()\n return [[j.variance().numpy() for j in i] for i in self._reconstructions]", "def stat_variance(echantillon):\n \n n=len(echantillon) #CG# size\n mq=stat_moyenne(echantillon)**2\n s=sum([x**2 for x in echantillon])\n variance=s/n-mq\n return variance", "def recalculate_emission(self, i, k, corpus):\n num = sum(sum(self.gamma(i, t, O) for t in xrange(len(O)) if O[t] == k) for O in corpus)\n denom = sum(sum(self.gamma(i,t, O) for t in xrange(len(O))) for O in corpus)\n\n return num / denom", "def get_kNN_measure(curr_vec, pop_vecs, archive_vecs, k):\n\n\t# vectorize this computation to do it all at once\n\t# each row of pop_vecs should be an output/phenotype\n\tpop_vec_dist = get_euclid_dist(curr_vec, pop_vecs)\n\tif not archive_vecs.size == 0:\t\n\t\tarchive_vec_dist = get_euclid_dist(curr_vec, archive_vecs)\n\t\t# sort the distances and take the top k for average\n\t\tall_dist = sorted(np.concatenate((pop_vec_dist, archive_vec_dist)))\n\telse:\n\t\tall_dist = sorted(pop_vec_dist)\n\n\t# return average of the k smallest distances\n\treturn (np.mean(all_dist[:k]),)", "def getKmers(seq, k):\n \n kmd = {}\n \n for i in range(len(seq)+1-k):\n kmer = seq[i:i+k]\n kmd[kmer] = kmd.get(kmer,0) + 1\n return kmd", "def for_loop(self, S, K):\n if len(S) < K:\n return 0\n i, count = 0, 0\n window_set = set()\n for j in range(len(S)):\n if S[j] in window_set:\n while i < j and S[j] in window_set:\n window_set.remove(S[i])\n i += 1\n window_set.add(S[j])\n if len(window_set) == K and j - i + 1 == K:\n window_set.remove(S[i])\n i, count = i + 1, count + 1\n j += 1\n return count", "def kday_moving_average(x, k):\n if not isinstance(k, int):\n raise ValueError('k must be int.')\n # temp = np.append(np.zeros(k - 1), x)\n temp = np.append(np.nan*np.ones(k-1), x)\n y = np.convolve(temp, np.ones(k, dtype=int), 'valid') / k\n return y" ]
[ "0.63860196", "0.6235115", "0.60946256", "0.57806844", "0.57092375", "0.5630119", "0.5332496", "0.5323007", "0.5286635", "0.5262322", "0.52159446", "0.5187272", "0.5152176", "0.5151929", "0.5133126", "0.5115792", "0.5113375", "0.50864863", "0.5030746", "0.5030157", "0.50121015", "0.500299", "0.4997339", "0.49660826", "0.49656978", "0.49589023", "0.4931923", "0.49175656", "0.49069056", "0.48923984" ]
0.755605
0
Returns the complement strand of the genome.
def get_strand_complement(sequence): # make the sequence upper case seq = sequence.upper() # table to change the complement characters change = str.maketrans('ACGT', 'TGCA') return seq.translate(change)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_complement(nucleotide):\n if nucleotide=='A':\n \treturn 'T'\n if nucleotide=='C':\n \treturn 'G'\n if nucleotide=='T':\n \treturn 'A'\n if nucleotide=='G':\n \treturn 'C'", "def get_strand_complement(sequence):\n seq = sequence.upper()\n change = str.maketrans('ACGT', 'TGCA')\n return seq.translate(change)", "def get_complement(nucleotide):\n if nucleotide == 'A':\n return 'T'\n elif nucleotide == 'T':\n return 'A'\n elif nucleotide == 'C':\n return 'G'\n elif nucleotide == 'G':\n return 'C'", "def get_complement(nucleotide):\n\n if nucleotide == 'T':\n return 'A'\n elif nucleotide == 'A':\n return 'T'\n elif nucleotide == 'C':\n return 'G'\n elif nucleotide == 'G':\n return 'C'", "def complement_strand(dna):\n reverse_complement = \"\"\n\n for character in dna[::-1]:\n if character == \"A\":\n reverse_complement += \"T\"\n elif character == \"T\":\n reverse_complement += \"A\"\n elif character == \"C\":\n reverse_complement += \"G\"\n elif character == \"G\":\n reverse_complement += \"C\"\n\n return reverse_complement", "def get_complement(nucleotide):\n #if statements change nucleotide inputs to their complementary nucleotide\n if nucleotide == \"A\":\n return \"T\"\n if nucleotide == \"T\":\n return \"A\" \n if nucleotide == \"C\":\n return \"G\"\n if nucleotide == \"G\":\n return \"C\"", "def get_complement(nucleotide):\n if nucleotide == 'A':\n return 'T'\n elif nucleotide == 'C':\n return 'G'\n elif nucleotide == 'G':\n return 'C'\n elif nucleotide == 'T':\n return 'A'\n else:\n return None", "def get_reverse_complement(sequence):\n return get_strand_complement(sequence)[::-1]", "def get_complement(c):\n if c == 'A':\n return 'T'\n if c == 'C':\n return 'G'\n if c == 'G':\n return 'C'\n if c == 'T':\n return 'A'", "def get_complement(nucleotide):\n\t# TODO: implement this\n\tletter = str(nucleotide)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# set letter = parameter (make sure it's a string)\n\tif letter == 'A':\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# check if letter is A\n\t\treturn 'T'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# return T\n\telif letter == 'T':\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# check if letter is T\n\t\treturn 'A'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# return A\n\telif letter == 'G':\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# check if letter is G\n\t\treturn 'C'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# return C\n\telif letter == 'C':\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# check if letter is C\n\t\treturn 'G'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# return G\n\telse:\n\t\treturn None", "def get_reverse_complement(sequence):\n seq = sequence.upper()\n return get_strand_complement(seq)[::-1]", "def get_reverse_complement(dna):\r\n\r\n seq = Seq(dna)\r\n rev_seq = seq.reverse_complement()\r\n return str(rev_seq)", "def reverse_complement_strand(dna):\n reverse_complement = \"\"\n\n for character in dna[::-1]:\n if character == \"A\":\n reverse_complement += \"T\"\n elif character == \"T\":\n reverse_complement += \"A\"\n elif character == \"C\":\n reverse_complement += \"G\"\n elif character == \"G\":\n reverse_complement += \"C\"\n\n return reverse_complement", "def get_reverse_complement(dna):\n dna2 = get_complement(dna)\n dna3 = dna2[::-1]\n return str(dna3)", "def reverse_complement_strand(dna):\n assert (is_dna(dna))\n return ''.join(_rev_mapping[nn] for nn in dna[::-1])", "def get_complement(s):\n dna_complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\n return ''.join(filter(None, [ dna_complement[c.upper()] if c.upper() in dna_complement else '' for c in s ] ))", "def get_reverse_complement(dna):\n l = [get_complement(char) for char in dna[::-1]]\n return ''.join(l)", "def get_complement(nucleotide):\n\n nucDict={'A':'T','G':'C','T':'A','C':'G'}\n return(nucDict[nucleotide])", "def get_complement(nucleotide): # This one works\n nuc = list(nucleotide)\n count = 0\n complement = ''\n for element in nuc:\n if element == 'A':\n nuc[count] = 'T'\n elif element == 'T':\n nuc[count] = 'A'\n elif element == 'C':\n nuc[count] = 'G'\n elif element == 'G':\n nuc[count] = 'C'\n complement = complement + nuc[count]\n count = count + 1\n return complement", "def complement(seq):\n complement_dict = {'A': 'T', 'C': 'G', 'T': 'A', 'G': 'C'}\n seq_list = list(seq)\n seq_list = [complement_dict[base] for base in seq_list]\n return ''.join(seq_list)", "def get_reverse_complement(dna):\n res = \"\";\n for c in dna:\n if c == 'A':\n res = 'T' + res\n elif c == 'T':\n res = 'A' + res\n elif c == 'G':\n res = 'C' + res\n elif c == 'C':\n res = 'G' + res\n return res", "def get_reverse_complement(cls, pattern: str) -> str:\n return ''.join(reversed([cls.dna_complement[nuc] for nuc in pattern]))", "def complement_RNA(RNAsequence):\n complement = \"\"\n for nucleotide in RNAsequence:\n if nucleotide == \"A\":\n complement += \"U\"\n if nucleotide == \"C\":\n complement += \"G\"\n if nucleotide == \"G\":\n complement += \"C\"\n if nucleotide == \"U\":\n complement += \"A\"\n return complement", "def complement(seq):\n if PY3:\n table = str.maketrans('ACTGNactg', 'TGACNtgac')\n elif PY2:\n table = string.maketrans('ACTGNactg', 'TGACNtgac')\n return str(seq).translate(table)", "def get_reverse_complement(s):\n dna_complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\n return ''.join(filter(None, reversed([ dna_complement[c.upper()] if c.upper() in dna_complement else '' for c in s ])))", "def get_complement(sequence):\n #Convert all rna_sequence to upper case:\n sequence=sequence.upper()\n # Conver RNA sequence into a list\n rna_list=list(sequence)\n #Create an empty list to store complement sequence:\n comlement_sequence=[]\n #Complement code corresponsing for all RNA bases\n complement= {'A' : 'U', 'C' : 'G', 'G': 'C', 'U': 'A'}\n # Looping through all the bases in RNA seq. to convert to its complement seq using dictionary values.\n for i in rna_list:\n comlement_sequence.append(complement[i])\n return ''.join(comlement_sequence)", "def get_reverse_complement(dna):\n reversed_dna_string = ''\n dna_reverse = dna[::-1]\n for i in range(len(dna)):\n dna_nucleotide = dna_reverse[i]\n reversed_dna_string += get_complement(dna_nucleotide)\n return reversed_dna_string", "def complement(self):\n comp = self.__class__(self.name, complement(self.seq),\n start=self.start, end=self.end)\n comp.comp = False if self.comp else True\n return comp", "def get_reverse_complement(dna):\n \n dna = dna.replace('T','N')\n dna = dna.replace('A','T')\n dna = dna.replace('N','A')\n dna = dna.replace('C','N')\n dna = dna.replace('G','C')\n dna = dna.replace('N','G')\n dna = dna[::-1]\n return dna", "def find_complement(num):\n pass" ]
[ "0.71287143", "0.7112783", "0.7098898", "0.7065957", "0.70642453", "0.700329", "0.69763005", "0.68346596", "0.6797956", "0.673517", "0.6713025", "0.66734636", "0.6657223", "0.6595245", "0.6563522", "0.6555067", "0.6521378", "0.6405061", "0.63837147", "0.632016", "0.63072795", "0.6271428", "0.6208313", "0.62066215", "0.6152841", "0.6152722", "0.6115556", "0.6113569", "0.6061335", "0.60531074" ]
0.73389566
0
Returns the reverse complement strand of the genome.
def get_reverse_complement(sequence): return get_strand_complement(sequence)[::-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reverse_complement_strand(dna):\n assert (is_dna(dna))\n return ''.join(_rev_mapping[nn] for nn in dna[::-1])", "def get_reverse_complement(sequence):\n seq = sequence.upper()\n return get_strand_complement(seq)[::-1]", "def get_reverse_complement(dna):\n dna2 = get_complement(dna)\n dna3 = dna2[::-1]\n return str(dna3)", "def get_reverse_complement(dna):\r\n\r\n seq = Seq(dna)\r\n rev_seq = seq.reverse_complement()\r\n return str(rev_seq)", "def get_reverse_complement(dna):\n l = [get_complement(char) for char in dna[::-1]]\n return ''.join(l)", "def get_reverse_complement(cls, pattern: str) -> str:\n return ''.join(reversed([cls.dna_complement[nuc] for nuc in pattern]))", "def reverse_complement_strand(dna):\n reverse_complement = \"\"\n\n for character in dna[::-1]:\n if character == \"A\":\n reverse_complement += \"T\"\n elif character == \"T\":\n reverse_complement += \"A\"\n elif character == \"C\":\n reverse_complement += \"G\"\n elif character == \"G\":\n reverse_complement += \"C\"\n\n return reverse_complement", "def get_reverse_complement(dna):\n reversed_dna_string = ''\n dna_reverse = dna[::-1]\n for i in range(len(dna)):\n dna_nucleotide = dna_reverse[i]\n reversed_dna_string += get_complement(dna_nucleotide)\n return reversed_dna_string", "def get_reverse_complement(dna):\n rdna = dna[::-1]\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# reverses input\n\n rev_dna = \"\"\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# initializes empty string\n index = 0\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# intitialize index\n while index < len(rdna):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# while loop, ends at len(dna)-1\n \treverse_letter = get_complement(rdna[index])\t\t\t\t\t\t\t\t\t\t# gets the complement for the string\n \trev_dna = rev_dna + reverse_letter\t\t\t\t\t\t\t\t\t\t\t\t\t# adds the new letter to the string\n \tindex += 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# indexes up 1\n return rev_dna \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# returns string", "def get_reverse_complement(dna):\n \n dna = dna.replace('T','N')\n dna = dna.replace('A','T')\n dna = dna.replace('N','A')\n dna = dna.replace('C','N')\n dna = dna.replace('G','C')\n dna = dna.replace('N','G')\n dna = dna[::-1]\n return dna", "def get_reverse_complement(dna):\n rev_comp = ''\n for i in range(0, len(dna)):\n nucleo = dna[i]\n comp = get_complement(nucleo)\n rev_comp = comp + rev_comp\n return rev_comp", "def complement_strand(dna):\n reverse_complement = \"\"\n\n for character in dna[::-1]:\n if character == \"A\":\n reverse_complement += \"T\"\n elif character == \"T\":\n reverse_complement += \"A\"\n elif character == \"C\":\n reverse_complement += \"G\"\n elif character == \"G\":\n reverse_complement += \"C\"\n\n return reverse_complement", "def get_reverse_complement(dna):\n L=dna\n rdna=L[::-1]\n print rdna\n newrdna=\"\"\n for i in range(0,len(rdna)):\n if rdna[i]=='A':\n newrdna='T'+newrdna\n elif rdna[i]=='G':\n newrdna='C'+newrdna\n elif rdna[i]=='T':\n newrdna='A'+newrdna\n elif rdna[i]=='C':\n newrdna='G'+newrdna\n S=newrdna\n P=S[::-1]\n return P", "def get_reverse_complement(dna):\n\n n = len(dna)\n\n i = 0\n\n reverse_dna = []\n\n for i in range(n):\n reverse_dna.append (get_complement(dna[n - 1 - i]))\n reverse_complement = ''.join(reverse_dna)\n return reverse_complement", "def get_reverse_complement(s):\n dna_complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\n return ''.join(filter(None, reversed([ dna_complement[c.upper()] if c.upper() in dna_complement else '' for c in s ])))", "def reverse_complement(seq):\n return ''.join([BASE_TO_COMP[b] for b in seq][::-1])", "def get_reverse_complement(dna):\n res = \"\";\n for c in dna:\n if c == 'A':\n res = 'T' + res\n elif c == 'T':\n res = 'A' + res\n elif c == 'G':\n res = 'C' + res\n elif c == 'C':\n res = 'G' + res\n return res", "def get_strand_complement(sequence):\n # make the sequence upper case\n seq = sequence.upper()\n # table to change the complement characters\n change = str.maketrans('ACGT', 'TGCA')\n return seq.translate(change)", "def get_reverse_complement(dna):\n reverse=[None]*len(dna)\n for i in range(0,len(dna)):\n \treverse[len(dna)-i-1]=get_complement(dna[i])\n s=''\n reverse=s.join(reverse)\n return reverse", "def get_strand_complement(sequence):\n seq = sequence.upper()\n change = str.maketrans('ACGT', 'TGCA')\n return seq.translate(change)", "def reverseComplement(seq):\n seq=seq.upper()\n # complement\n compl = complement(seq)\n # reverse\n return compl[::-1]", "def get_reverse_complement(dna):\n reverseDNA = ''\n newDNA = ''\n for i in range(len(dna)): \n reverseDNA+= dna[-1-i]\n for k in range(len(dna)): \n if reverseDNA[k] == 'A': \n newDNA+='T'\n elif reverseDNA[k] =='T':\n newDNA+= 'A' \n elif reverseDNA[k] =='G':\n newDNA+= 'C'\n elif reverseDNA[k] =='C':\n newDNA+= 'G' \n return newDNA", "def reverse_complement_RNA(RNAsequence):\n complement = \"\"\n for nucleotide in RNAsequence:\n if nucleotide == \"A\":\n complement = \"U\" + complement\n if nucleotide == \"C\":\n complement = \"G\" + complement\n if nucleotide == \"G\":\n complement = \"C\" + complement\n if nucleotide == \"U\":\n complement = \"A\" + complement\n return complement", "def reverse_complement(seq):\n if sys.version_info.major == 2:\n conversion = string.maketrans('ACGTacgt','TGCAtgca')\n else:\n conversion = str.maketrans('ACGTacgt','TGCAtgca')\n\n comp = seq.translate(conversion)\n rev_comp = comp[::-1]\n return rev_comp", "def get_reverse_complement(dna):\n #set up initial empty strings for later use\n complement = \"\"\n reverse_complement = \"\"\n\n #for each element in the list of dna, get the complement and add it to an empty list\n for i in range(len(dna)):\n complement = complement + get_complement(dna[i]) \n\n #for each element in the list of complement, return the complement by calling the list in reverse\n for i in range(len(dna)):\n reverse_complement = reverse_complement + complement[len(dna)-1-i]\n\n return reverse_complement", "def get_reverse_complement(dna):\n dna=\"ATCG\"\n b = list(dna)\n b.reverse()\n \"\".join(b)\n\n for n in range(0, len(b), 1):\n if b[n] == \"A\":\n b[n] = \"T\"\n elif b[n] == \"C\":\n b[n] = \"G\"\n elif b[n] == \"T\":\n b[n] = \"A\"\n elif b[n] == \"G\":\n b[n] = \"C\"\n \n print \"\".join(b)", "def reverse_rna_complement(seq):\n\n seq_upper = seq.isupper()\n\n seq = seq[::-1]\n\n seq = seq.upper()\n\n #compute complement\n seq = seq.replace('A','u')\n seq = seq.replace('T','a')\n seq = seq.replace('G','c')\n seq = seq.replace('C','g')\n\n if seq_upper:\n return seq.upper()\n else:\n return seq", "def reverse_complement(seq):\n seq = reverse(seq)\n seq = complement(seq)\n return seq", "def get_complement(nucleotide):\n if nucleotide == 'A':\n return 'T'\n elif nucleotide == 'T':\n return 'A'\n elif nucleotide == 'C':\n return 'G'\n elif nucleotide == 'G':\n return 'C'", "def get_complement(nucleotide):\n if nucleotide=='A':\n \treturn 'T'\n if nucleotide=='C':\n \treturn 'G'\n if nucleotide=='T':\n \treturn 'A'\n if nucleotide=='G':\n \treturn 'C'" ]
[ "0.764635", "0.76432216", "0.76206285", "0.75781304", "0.7459168", "0.7373911", "0.7336976", "0.7114455", "0.7025804", "0.70229846", "0.7021352", "0.7019306", "0.7013256", "0.69664097", "0.69514114", "0.6932707", "0.6928373", "0.69146174", "0.68675524", "0.6778184", "0.6766795", "0.6743321", "0.67363673", "0.6695526", "0.6689073", "0.66885304", "0.6646064", "0.6645604", "0.6635013", "0.6632064" ]
0.7836419
0
Calculates a position in a sequence minimizing the skew.
def get_minimum_skew(sequence): # start the array min_skew = [] # calculates the sequence gc skew skew = get_sequence_skew(sequence) # get the minimized skew values m_skew = min(skew) # iterates to the length of the sequence # to get the index positions for idx in range(len(sequence) + 1): # if the position i has the same value # as the minimum appende to the array if skew[idx] == m_skew: min_skew.append(idx) return min_skew
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_min_skew_position(genome):\n assert (is_dna(genome))\n skew = get_skew(genome)\n min_skew = min(skew)\n return [pos for (pos, sk) in enumerate(skew) if sk == min_skew]", "def min_skew_positions(genome: str) -> list:\n\n min_skew = 0\n result = [0]\n\n curr_skew = 0\n for i in range(len(genome)):\n n = genome[i]\n\n if n == SKEW_INCREASER:\n curr_skew += 1\n elif n == SKEW_DECREASER:\n curr_skew -= 1\n\n if curr_skew < min_skew:\n min_skew = curr_skew\n result.clear()\n\n if curr_skew == min_skew:\n result.append(i + 1)\n\n return result", "def getStartPosMapper(seq, subst=None):\n if subst is None:\n subst = make_identity_substitution_matrix(1, -1, alphabet=AALPHABET)\n def findPos(pep):\n d = ssw(pep)\n return int(d['query_begin'] - d['target_begin'])\n \n ssw = StripedSmithWaterman(query_sequence=seq,\n protein=True,\n substitution_matrix=subst)\n return findPos", "def min_entropy_pos(self):\n min_entropy = float(\"inf\")\n for Knot in self.wait_to_collapse:\n noise = random.random() / 1000\n # Add some noise to mix things up a little\n if self[Knot].entropy - noise < min_entropy:\n position = Knot[:]\n min_entropy = self[position].entropy - noise\n return position", "def calc_nearest_ind(self, robot_pose):\n pass", "def compute_offset_pos(seq, pos):\n \n nogap_seq = transform_seq(seq)\n assert(pos >= 0 and pos < len(nogap_seq))\n\n maps = dict()\n cnt = 0\n maxi = 0\n for i in range(len(seq)):\n if seq[i] not in msa_characters:\n maps[i-cnt] = i\n maxi = i\n else:\n cnt += 1\n return maps.get(pos, maxi)\n \n #cnt = 0\n #k = 0\n #while k<len(seq):\n #print(k, cnt, seq[k])\n #offset = 0\n #while k+offset < len(seq) and seq[k+offset] in msa_characters:\n #offset += 1\n #else:\n #cnt += 1\n #k+=offset+1\n #if cnt == pos:\n #break\n #return k\n \n #k = 0 \n #cnt = 0 if seq[k] not in msa_characters else -1\n #while cnt != pos and k < len(seq):\n #if seq[k] not in msa_characters:\n #cnt += 1\n #k += 1 \n ##print(pos, cnt, k, seq)\n #return k", "def relative_positions(positions, point, box_size):\n\n return (np.array(positions) - np.array(point)\n + np.array(box_size)/2)%np.array(box_size) - np.array(box_size)/2", "def find_rotation_point( word_list ):", "def lmin(scape, start):\n i = start\n while scape[i - 1] < scape[i] - 0.06:\n i -= 1\n while scape[i + 1] < scape[i] - 0.06:\n i += 1\n return i", "def compute_revoffset_pos(seq, pos):\n\n cnt = 0 \n for c in seq:\n if c in msa_characters:\n cnt += 1\n return pos - cnt", "def distinter(self,pos):\n\t\tdist = 0\n\t\taux = self.posbase\n\t\twhile not self.eob():\n\t\t\tif self.checkintercambio(pos):\n\t\t\t\tdist = self.posbase - aux\n\t\t\tself.posbase +=1\n\t\tself.posbase = aux\n\t\treturn dist", "def compute_pos_seq2msa(msaseq, start, stop):\n new_start = compute_offset_pos(msaseq, start)\n new_stop = compute_offset_pos(msaseq, stop-1)+1 \n return new_start, new_stop", "def get_sequence_skew(sequence):\n skew = [0]\n for idx, element in enumerate(sequence):\n if sequence[idx] == 'G':\n skew.append(skew[idx] + 1)\n elif sequence[idx] == 'C':\n skew.append(skew[idx] - 1)\n else:\n skew.append(skew[idx])\n return skew", "def s_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n ayxx = 0\n for j in range(self.prob.num):\n ayxx += self.alphas[j] * self.prob.Y[j] * self.prob.xkernel(self.prob.X[j], self.prob.X[i])\n running_total += 1 - ayxx\n return running_total", "def get_s0_xy_equipartition(N, angle):\n s0x= [1]\n s0y = [0]\n s0z = [0]\n for i in range(1,N):\n s_old = [s0x [-1],s0y [-1],s0z [-1] ]\n #final_angle = random.choice([-1,1]) * np.radians(angle - 3 + 6*random.random())\n #final_angle = random.choice([-1,1]) * np.radians(angle * 2*random.random())\n final_angle = random.choice([-1,1]) * np.radians(random.gauss(0, 1.2 * angle))\n sin_final = sin(final_angle)\n cos_final = cos(final_angle)\n s_new = [cos_final*s_old[0] +sin_final*s_old[1], -sin_final*s_old[0] + cos_final*s_old[1],0]\n s0x.append(s_new[0])\n s0y.append(s_new[1])\n s0z.append(s_new[2])\n norm = np.linalg.norm([s0x [-1],s0y [-1],s0z [-1] ])\n s0x [-1] = s0x[-1] / norm\n s0y [-1] = s0y[-1] / norm\n s0z [-1] = s0z[-1] / norm\n\n return np.concatenate((s0x,s0y,s0z),axis = 0)", "def pos(self):\n return (self.raw - self.raw_zero) / self.ratio", "def find_closest_trajectory_pose(self):\n np_state = numpy.array([[self.x], [self.y]])\n temp_distance = numpy.sum(\n (self.np_trajectory[0:2, :] - np_state) ** 2,\n axis=0)\n best_idx = numpy.argmin(temp_distance)\n return best_idx", "def position_adjustment(self, position):\n changed_position = 0\n if position in self.ladders:\n return self.ladders[position] - position\n elif position in self.snakes:\n return self.snakes[position] - position\n return changed_position", "def calc_pos(x):\n a = torch.arange(1, x.shape[1] + 1).unsqueeze(0).to(x.device)\n p = a.expand(x.shape[0], -1)\n mask = (x != 0).long()\n return p * mask", "def get_closest_waypoint(self, pose):\n #TODO implement - Done\n # Iterate the base_waypoints' x value with current position's x value and find the closest\n # match, and pick that waypoint location index. \n min_idx = 0\n min_dist = None\n cur_x = pose.position.x\n cur_y = pose.position.y\n if self.waypoints is not None:\n for i, wp in enumerate(self.waypoints):\n wp_x = wp.pose.pose.position.x\n wp_y = wp.pose.pose.position.y\n dist = np.sqrt((cur_x - wp_x)**2 + (cur_y - wp_y)**2)\n if min_dist is None or min_dist >= dist:\n min_dist = dist\n min_idx = i\n \n # check whether the identified index is behind the current position, if so, move it by 1 index\n # https://gamedev.stackexchange.com/questions/75072/how-can-i-compare-two-quaternions-for-logical-equality\n # rospy.logwarn('min_idx before = %d', min_idx)\n eps = 1e-12\n if self.waypoints is not None:\n q1 = self.waypoints[min_idx].pose.pose.orientation\n q2 = pose.orientation\n q1_a = np.array([q1.x, q1.y, q1.z, q1.w])\n q2_a = np.array([q2.x, q2.y, q2.z, q2.w])\n direction = abs(np.dot(q1_a, q2_a))\n #rospy.logwarn('calculated direction %f', direction)\n wp_x = self.waypoints[min_idx].pose.pose.position.x\n if direction > 1-eps:\n if wp_x < cur_x:\n min_idx += 1\n else:\n min_idx -= 1\n else:\n if wp_x < cur_x:\n min_idx -= 1\n else:\n min_idx += 1\n\n # rospy.logwarn('min_idx after = %d', min_idx)\n return min_idx", "def aa_pos(self, pos):\n return self.nt_pos(pos) // 3", "def position_from_seed(seed):\n random.seed(seed)\n ascii_character_sum = sum(bytearray(seed, \"utf8\")) # Sums the ASCII values of every character\n offset = random.randint(1, 100)\n start_position = (math.log(ascii_character_sum / 100) + offset, math.log(ascii_character_sum / 100) + offset)\n end_positon = (start_position[0] + 100, start_position[1] + 100)\n square_position = (start_position, end_positon)\n print(square_position)\n \n return square_position", "def move(self):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception \n if self.destination is None:\n return array([0,0])\n\n if not self.awake:\n return array([0,0])\n\n\n if self.phase == 4 and self.proper_formation is not None:\n no_go = []\n for i in range(0,len(self.proper_formation)):\n if i != self.order and self.proper_formation[i][0] == self.proper_formation[self.order][0]:\n no_go.append(self.transform(self.proper_formation[i][1] - self.position))\n pos = merge_array_lists(pos, no_go)\n\n if self.phase == 2:\n point = self.destination.copy() - self.position\n elif self.phase > 2:\n point = self.transform(self.destination.copy() - self.position)\n else:\n point = self.destination.copy()\n\n if not array_equal(point, array([0,0])):\n reachable, path = findpathtoclosest(array([0,0]), point, pos)\n \n if len(path) == 0:\n move = array([0,0]) \n else:\n move = path[0]\n if not reachable and not array_equal(move,array([0,0])):\n if self.phase == 2:\n self.closest_i_could_get = path[-1] + self.position\n elif self.phase > 2:\n self.closest_i_could_get = self.transform2(path[-1]) + self.position\n else:\n self.closest_i_could_get = path[-1]\n elif not reachable:\n if self.phase > 1:\n self.closest_i_could_get = self.position\n else:\n self.closest_i_could_get = array([0,0])\n else:\n self.closest_i_could_get = None\n\n if reachable and self.phase == 4 and array_equal(move,array([0,0])):\n move = self.randomStep()\n self.closest_i_could_get = None\n\n else:\n move = array([0,0])\n self.closest_i_could_get = None\n\n return move", "def __find_next_position_in_degrees(self, movement: int) -> int:\n next_position = (\n self.counterpoint[-1].scale_element.position_in_degrees\n + movement\n )\n return next_position", "def compute_new_position_from_dice(self, player_index, thr):\r\n new_position = self.player_positions[player_index] + thr.get_amount()\r\n\r\n if new_position >= len(self.squares):\r\n new_position -= len(self.squares)\r\n\r\n return new_position", "def question7(seq_x, seq_y):\n \n diag_score = 2\n off_diag_score = 1\n dash_score = 0\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n score_matrix = student.build_scoring_matrix(alphabet, diag_score, off_diag_score, dash_score)\n \n align_matrix = student.compute_alignment_matrix(seq_x, seq_y, score_matrix, True)\n score, align_x, align_y = student.compute_global_alignment(seq_x, seq_y, score_matrix, align_matrix)\n \n edit_distance = len(seq_x) + len(seq_y) - score\n \n print \"Edit distance: \" + str(edit_distance)\n print align_x\n print align_y", "def _calculate_position(self, lookup, alignment):\n index = 0 # Index of our split CIGAR string\n if alignment.get_rc() or lookup.get_rc(): # If we're reverse complementing\n qpos = lookup.get_reverse_position() - 1 # Start with the reverse position of the SNP, must subtract one\n else: # Otherwise\n qpos = lookup.get_forward_position() # Start with the forward posittion\n while True: # Endless loop to do weird things...\n try: # While we have a CIGAR string to parse\n old = qpos # Store our previously calculated SNP position\n # Seach the CIGAR string as a list, starting with index 0, for indels\n if re.search('M', alignment.get_cigar()[index]): # If we have a perfect match\n if qpos < int(''.join(re.findall(r'\\d+', alignment.get_cigar()[index]))): # If our SNP is in the perfect match\n break # Exit the loop, we have our position\n if re.search('D', alignment.get_cigar()[index]): # If we have a deletion relative to reference\n qpos += int(''.join(re.findall(r'\\d+', alignment.get_cigar()[index]))) # Add the deletion to our SNP position\n if re.search('[IS]', alignment.get_cigar()[index]): # If we have an insertion relative to reference\n qpos -= int(''.join(re.findall(r'\\d+', alignment.get_cigar()[index]))) # Subtract the insertion from our SNP postion\n index += 1 # Increase the index\n if qpos <= 0 or qpos >= lookup.get_length(): # If we've gone beyond the scope of our lookup: 0 is before the sequence, lookup.get_length() is after\n qpos = old # Go back to our previously calculated SNP postion\n break # Exit the loop, we have our position\n except IndexError: # If we run out of CIGAR string codes\n break # Exit the loop, we have our position\n self._position = alignment.get_position() + qpos # Our SNP position is at the mapping position plus the SNP position", "def compute_local_alignment(seq_x,seq_y,scoring_matrix,alignment_matrix):\n #initialization of variables\n x_pos = -1\n y_pos = -1\n result_seq_x = ''\n result_seq_y = ''\n score = 0\n\n #determine start position in alignment_matrix as position with maximum value \n for row in range(len(seq_x) + 1):\n for col in range(len(seq_y) + 1):\n if alignment_matrix[row][col] > score:\n score = alignment_matrix[row][col]\n x_pos = row\n y_pos = col\n\n #start in start position and go upwards till we reach first entry with value 0\n #in every iteration we reconstruct alignments based on value in alignment_matrix and scoring_matrix\n while x_pos != 0 and y_pos !=0:\n current_value = alignment_matrix[x_pos][y_pos]\n if current_value == 0:\n break\n \n if current_value == alignment_matrix[x_pos-1][y_pos-1] + scoring_matrix[seq_x[x_pos-1]][seq_y[y_pos-1]]:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n x_pos -= 1\n y_pos -= 1\n elif current_value == alignment_matrix[x_pos-1][y_pos] + scoring_matrix[seq_x[x_pos-1]][\"-\"]:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = \"-\" + result_seq_y\n x_pos -= 1\n else: \n result_seq_x = \"-\" + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n y_pos -= 1\n\n return (score,result_seq_x,result_seq_y)", "def get_sequence_skew(sequence):\n # make the sequence upper case\n sequence = sequence.upper()\n # start the array\n skew = [0]\n # iterates to the sequence elements and it indexes\n for idx, element in enumerate(sequence):\n # check if element[i] is a G\n # if so add 1\n if sequence[idx] == 'G':\n skew.append(skew[idx] + 1)\n # if the element[i] is a C\n # add to the array -1\n elif sequence[idx] == 'C':\n skew.append(skew[idx] - 1)\n else:\n # if it is not G or C add 0\n skew.append(skew[idx])\n return skew", "def findIndVarOfMinAbsQuantity(self, indVar, point, quantity,\n function=(lambda x, q: q), target=0.0):\n assert indVar in self.indVars, \"Input indVar %s is not a valid indVar!\" % indVar\n\n indVarsTable = self.getIndVarsTable(omitTheseIndVars=(indVar,))\n closestIndVar = None\n closestQuantity = 1.0e300\n\n for i, var in enumerate(self.h5file[indVar][:]):\n index = self.tableIndexer(indVar, i)\n thisQuantity = function(var,\n multidimInterp(point, indVarsTable,\n self.h5file[quantity][index],\n linInterp, 2)\n ) - target\n if abs(thisQuantity) < closestQuantity:\n closestIndVar = var\n closestQuantity = abs(thisQuantity)\n return closestIndVar" ]
[ "0.67086536", "0.62083524", "0.5892266", "0.5836606", "0.56663465", "0.5659582", "0.5613757", "0.55811", "0.5522433", "0.5512218", "0.5506874", "0.54924464", "0.54878193", "0.5467683", "0.54343665", "0.5357526", "0.53443265", "0.5343145", "0.5330511", "0.5313186", "0.53067976", "0.5294906", "0.5289677", "0.52863985", "0.5283365", "0.5206636", "0.5204437", "0.5193716", "0.5186433", "0.5179981" ]
0.65810394
1
Make a plot from the base frequency distribution in a DNA sequence.
def plot_base_frequency_genome(x_data, y_data, x_label, y_label): # color for the bases base_markers = {"A": "b-", "C": "r-", "G": "g-", "T": "y-", "N": "k-"} # drawing the plot fig = plt.figure(figsize=(16, 8)) ax = fig.add_subplot(111) y_names = [] for y in y_data: y_names.append(y) # adding colors to the lines representing the bases # the x and y data and the labels ax.plot(x_data, y_data[y], base_markers[y], label=y) plt.xlabel(x_label) plt.ylabel(y_label) ax.legend(y_names) plt.grid(True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def base_composition(reads, base):\n assert base.upper() in set(\"ACGT\")\n\n \"\"\" Reports nucelotide frequencies at each position in the\n sam sequences\n \"\"\"\n # DNA_Alphabet=[\"A\",\"C\",\"T\",\"G\",\"N\"]\n all_nucs = []\n for read in reads:\n nucs = {} # Dictionary to store nucleotide data.\n seq = read[9]\n for i in range(0, len(seq)):\n nucs[str(i + 1)] = seq[i]\n all_nucs.append(nucs)\n all_items = []\n counts = []\n for dicts in all_nucs:\n for item in dicts.items():\n all_items.append(item)\n all_items.sort(key=operator.itemgetter(0))\n groups = [map(operator.itemgetter(1), list(group))\n for key, group in itertools.groupby(\n all_items, operator.itemgetter(0))]\n for group in groups:\n counts.append(group.count(base))\n\n pos = range(1, len(seq) + 1)\n\n # Create plot.\n plt.figure(1, figsize=(8, 8))\n plt.axes([0.1, 0.1, 0.8, 0.8])\n plt.bar(pos, counts, facecolor='g')\n plt.xlabel(\"Position\")\n plt.ylabel(\"number of mapped reads\")\n plt.title(base)\n plt.show()", "def generate_plot(tokens):\n\n return FreqDist(word for word in tokens if len(word) > 4).plot(50, cumulative=True)", "def generate_counthist(counts, label, view_lim=[1e-6,1e0,1e0,1e5]):\n max_size = max(counts.values())\n num_chains = sum(counts.values())\n sizes = np.arange(1,max_size+1)\n freqs = np.float_(sizes) / num_chains\n (hist,garbage) = np.histogram(counts.values(),bins=sizes)\n idxs = hist > 0\n \n fig = plt.figure()\n \n ax = fig.add_subplot(111)\n ax2 = ax.twiny()\n \n ax.spines['top'].set_position(('outward',5))\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward',5))\n ax.spines['left'].set_position(('outward',5))\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.plot(freqs[idxs],hist[idxs],marker='o',linestyle='None',color='#e31a1c',markeredgewidth=0,markersize=4,clip_on=False,label=label)\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlim(view_lim[:2])\n ax.set_ylim(view_lim[2:])\n \n ax2.spines['top'].set_position(('outward',5))\n ax2.spines['right'].set_visible(False)\n ax2.spines['bottom'].set_visible(False)\n ax2.spines['left'].set_visible(False)\n ax2.xaxis.set_ticks_position('top')\n ax2.yaxis.set_ticks_position('none')\n ax2.set_xscale('log')\n ax2.set_xlim([view_lim[0]*num_chains,view_lim[1]*num_chains])\n \n ax.set_xlabel('junction frequency (bottom) or count (top)')\n ax.set_ylabel('number of junctions')\n \n leg = ax.legend(loc=0,numpoints=1,prop=mpl.font_manager.FontProperties(size='small'))\n leg.get_frame().set_visible(False)\n \n return fig", "def generateFreqGraph( invertedIndex ):\n print('Printing plot for Step 3 frequencies')\n print('----------------------------------------------------------------')\n tempList = sorted( invertedIndex, key=lambda element: element[1], reverse = True )\n freqDict = {}\n count = 1\n for term, freq in tempList:\n freqDict[count] = freq\n count+=1\n \n #Plot the frequency based graph\n plt.figure()\n plt.xlabel('$\\log_{10}(i)$ for $i^{th}$ most frequent term')\n plt.ylabel('$\\log_{10}(y_i)$ for freq of $i^{th}$ term')\n plt.title('$\\log_{10} y_i$ vs $\\log_{10}i$')\n plt.plot(np.log10(list(freqDict.keys())), np.log10(list(freqDict.values())), '-o')", "def generate_counthistline(counts, label, view_lim=[1e-6,1e0,1e0,1e5]):\n max_size = max(counts.values())\n num_chains = sum(counts.values())\n bins = np.logspace(0,np.log10(max_size),21)\n bins_freqs = np.float_(bins) / num_chains\n (hist,garbage) = np.histogram(counts.values(),bins=bins)\n \n fig = plt.figure()\n \n ax = fig.add_subplot(111)\n ax2 = ax.twiny()\n \n ax.spines['top'].set_position(('outward',5))\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward',5))\n ax.spines['left'].set_position(('outward',5))\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.plot(bins_freqs,list(hist)+[hist[-1]],color='#e31a1c',drawstyle='steps-post',clip_on=False,label=label)\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlim(view_lim[:2])\n ax.set_ylim(view_lim[2:])\n \n ax2.spines['top'].set_position(('outward',5))\n ax2.spines['right'].set_visible(False)\n ax2.spines['bottom'].set_visible(False)\n ax2.spines['left'].set_visible(False)\n ax2.xaxis.set_ticks_position('top')\n ax2.yaxis.set_ticks_position('none')\n ax2.set_xscale('log')\n ax2.set_xlim([view_lim[0]*num_chains,view_lim[1]*num_chains])\n \n ax.set_xlabel('junction frequency (bottom) or count (top)')\n ax.set_ylabel('number of junctions')\n \n leg = ax.legend(loc=0,numpoints=1,prop=mpl.font_manager.FontProperties(size='small'))\n leg.get_frame().set_visible(False)\n \n return fig", "def plot_sample_length_distribution(sample_texts):\n plt.hist([len(s) for s in sample_texts], 50)\n plt.xlabel('Length of a sample')\n plt.ylabel('Number of samples')\n plt.title('Sample length distribution')\n plt.show()", "def plot_sample_length_distribution(sample_texts):\n plt.hist([len(s) for s in sample_texts], 50)\n plt.xlabel('Length of a sample')\n plt.ylabel('Number of samples')\n plt.title('Sample length distribution')\n plt.show()", "def plot_sample_length_distribution(sample_texts):\n plt.hist([len(s) for s in sample_texts], 50)\n plt.xlabel('Length of a sample')\n plt.ylabel('Number of samples')\n plt.title('Sample length distribution')\n plt.show()", "def plot_frequency(word_frequency, n, output_name=\"output.png\"):\r\n # partially completed for you, complete the rest according to the instructions.\r\n # setting up plot variables\r\n words = tuple(zip(*word_frequency))[0]\r\n frequencies = tuple(zip(*word_frequency))[1]\r\n y_pos = np.arange(len(words))\r\n fig, ax = plt.subplots(figsize=(15, 10))\r\n # set up color spectrum\r\n colors = [\r\n \"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\",\r\n \"violet\"\r\n ]\r\n rvb = mcolors.LinearSegmentedColormap.from_list(\"\", colors)\r\n nlist = np.arange(n).astype(float)\r\n ax.barh(y_pos, frequencies, align='center', color=rvb(nlist/n))\r\n ax.set_yticks(y_pos)\r\n ax.set_yticklabels(words)\r\n ax.invert_yaxis()\r\n ax.set_xlabel('Frequency')\r\n ax.set_title(\"Word Frequency: Top {}\".format(n))\r\n # Only comment below line when debugging. Uncomment when submitting\r\n plt.savefig(output_name)", "def plot_sample_distribution(samples):\n plt.hist(samples, 50)\n plt.xlabel('Value of a sample')\n plt.ylabel('Number of samples')\n plt.title('Sample distribution')\n plt.show()", "def plot_base_cycle_distributions(df_, cycles=4, size=2.5, codes=24, lines=2):\n short_barcode = 'barcode_'\n cycle_col = 'cycle'\n base_col = 'first_base'\n value_col = 'first'\n \n grey_for_means = (0.4, 0.4, 0.4, 1)\n \n cycle_names = sorted(set(df_['cycle']))[:cycles]\n df_[short_barcode] = df_['barcode'].apply(lambda x: x[:cycles])\n # over all barcodes\n df_mean = df_.copy()\n df_mean[short_barcode] = ['mean_%s' % x for x in df_mean[base_col]]\n means = sorted(set(df_mean[short_barcode]))\n barcodes = list(df_[short_barcode].value_counts().index[:codes])\n\n df_plot = pd.concat([df_, df_mean])\n filt = df_plot[cycle_col].isin(cycle_names)\n filt &= df_plot[short_barcode].isin(barcodes + means)\n\n sns.set_context(font_scale=size * 0.45)\n palette = sns.color_palette('Set2', codes)\n palette += [grey_for_means,]*len(means)\n fg = sns.FacetGrid(data=df_plot[filt], row='cycle', col=base_col\n , row_order=cycle_names, col_order=list('TCGA')\n , hue=short_barcode, size=size\n , palette=palette)\n\n (fg.map(sns.distplot, value_col, kde=True, hist=False)\n .set_titles(\"{col_name}\"))\n [ax.set_title('') for ax in fg.axes[1:].flat[:]]\n for name, ax in zip(fg.row_names, fg.axes[:,0]):\n ax.set_ylabel(name)\n legend_min_lines(fg, lines)\n fg.fig.tight_layout()\n return fg", "def dendogram(self):\r\n \r\n plt.figure(figsize=(20, 7))\r\n dendrogram = sch.dendrogram(sch.linkage(self.X, method='ward'))\r\n plt.title(\"Dendograms\")\r\n plt.axhline(linestyle='--', y=5) \r\n plt.show()", "def GenFrequencies(alignment):\n bases = {'A':0,'C':0,'G':0,'T':0,'-':0}\n FreqArray = []\n SeqLen = getLen(alignment)\n for i in range(SeqLen):\n FreqArray.append(bases.copy())\n count = 0\n SeqNum = 0\n with open(alignment,'rU') as F:\n data = 'placeHolder'\n while data:\n data = F.readline().strip()\n if data and not data[0] == '>':\n for char in data:\n FreqArray[count][char] += 1\n count +=1\n elif data:\n count = 0\n SeqNum += 1\n else:\n break\n for position in FreqArray:\n for base in position:\n position[base] /= float(SeqNum)\n return FreqArray", "def plot_freq_spec(data, title):\n plt.title(title)\n\n def plot_freq_spec(axis, line, label):\n n = len(axis)\n fft = fftpack.fft(axis) / n\n fft = fft[range(int(n / 2))]\n plt.plot(range(int(n / 2)), abs(fft), line, label=label)\n plot_freq_spec(data[:, 0], 'r-', label='x')\n plot_freq_spec(data[:, 1], 'g-', label='y')\n plot_freq_spec(data[:, 2], 'b-', label='z')", "def plot_codon_usage(sequence, ax):\n\n x1 = x2 = numpy.arange(len(sequence.codons))\n bar_width = 0.5\n xlabels = []\n\n origin_f = []\n target_f = []\n\n # extract data to plot from sequence object\n for c in sequence.codons:\n origin_f.append(c['origin_f'])\n target_f.append(c['target_f'])\n xlabels.append(c['aa'])\n\n # convert lists to numpy arrays\n origin_f = numpy.array(origin_f)\n target_f = numpy.array(target_f)\n\n # plot data\n p1 = ax.bar(x1, origin_f, color='b', width=bar_width)\n p2 = ax.bar(x2 + (0.5 * bar_width), target_f, color='r', width=bar_width)\n\n # hide top and right axes\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n # set tick parameters\n ax.tick_params(axis='both', which='both', direction='out')\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n # position xticks and labels on x axis to be centered for both bars\n ax.set_xticks(x1 + bar_width / 2)\n ax.set_xticklabels(xlabels, **{'family': 'monospace'})\n ax.set_xlabel('amino acid')\n # add a legend to the plot\n ax.legend((p1, p2), ('Origin organism', 'Host organism'), loc=2, bbox_to_anchor=(1, 1))\n ax.hlines(sequence.lower_threshold, 0, len(x1), colors='k', linestyles='solid', **{'linewidth': 1})\n\n if not sequence.use_frequency:\n # set the y axis label\n ax.set_ylabel('codon usage [fraction]')\n # specify the distance between the ticks on the y axis\n major_locator = matplotlib.ticker.MultipleLocator(0.1)\n minor_locator = matplotlib.ticker.MultipleLocator(0.01)\n else:\n # set the y axis label if frequency is used instead of fractions\n ax.set_ylabel('codon usage [frequency/1000]')\n # specify the distance between the ticks on the y axis\n major_locator = matplotlib.ticker.MultipleLocator(10)\n minor_locator = matplotlib.ticker.MultipleLocator(1)\n\n # set the distance between the ticks on the y axis\n ax.yaxis.set_major_locator(major_locator)\n ax.yaxis.set_minor_locator(minor_locator)", "def display_gender_freq(d, title):\n he_val = []\n she_val = []\n authors = []\n\n for entry in d:\n authors.append(entry)\n he_val.append(d[entry][0])\n she_val.append(d[entry][1])\n\n fig, ax = plt.subplots()\n plt.ylim(0, 1)\n\n index = np.arange(len(d.keys()))\n bar_width = 0.35\n opacity = 0.4\n\n he_val = tuple(he_val)\n she_val = tuple(she_val)\n authors = tuple(authors)\n\n rects1 = ax.bar(index, he_val, bar_width, alpha=opacity, color='b', label='He')\n rects2 = ax.bar(index + bar_width, she_val, bar_width, alpha=opacity, color='r', label='She')\n\n ax.set_xlabel('Authors')\n ax.set_ylabel('Frequency')\n ax.set_title('Gendered Pronouns by Author')\n ax.set_xticks(index + bar_width / 2)\n plt.xticks(fontsize=8, rotation=90)\n ax.set_xticklabels(authors)\n ax.legend()\n\n fig.tight_layout()\n filepng = \"visualizations/he_she_freq\" + title + \".png\"\n filepdf = \"visualizations/he_she_freq\" + title + \".pdf\"\n plt.savefig(filepng, bbox_inches='tight')\n plt.savefig(filepdf, bbox_inches='tight')", "def plot_codon_usage_differences(sequence, ax):\n\n # Generate a range of residues out of the length of the sequence array\n x1 = numpy.arange(len(sequence.codons))\n\n # Set the threshold according to use_frequency\n if sequence.use_frequency:\n threshold = 5\n else:\n threshold = 0.2\n\n # Set width of bars\n bar_width = 0.8\n # Initialize array of labels for the x axis\n xlabels = []\n\n # Initialize arrays of data and labels for the bars\n df = []\n bar_labels = []\n\n # walk over the codons in sequence\n for c in sequence.codons:\n # add final_df to data array\n df.append(c['final_df'])\n # add residue to xlabels\n xlabels.append(c['aa'])\n # generate bar label and add to list\n label = u'{} → {}'.format(c['original'], c['new'])\n bar_labels.append(label)\n\n # convert lists to numpy arrays\n bar_labels = numpy.array(bar_labels)\n df = numpy.array(df)\n # find bars that exceed the threshold\n mask1 = numpy.ma.where(df > threshold)\n mask2 = numpy.ma.where(df <= threshold)\n\n # plot and color bars accordingly\n p1 = ax.bar(x1[mask1], df[mask1], color='r', width=bar_width)\n autolabel(p1, ax, bar_labels[mask1], vertical=True)\n\n p2 = ax.bar(x1[mask2], df[mask2], color='b', width=bar_width)\n autolabel(p2, ax, bar_labels[mask2], vertical=True)\n\n # hide top and right axis\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.tick_params(axis='both', which='both', direction='out')\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n\n # set x axis labels to be centered and to use a monospaced font\n ax.set_xticks(x1 + bar_width / 2)\n ax.set_xticklabels(xlabels, **{'family': 'monospace'})\n ax.set_xlabel('amino acid')\n\n ax.set_ylabel(r'Differential codon usage $f_{origin} - f_{host}$')\n\n if not sequence.use_frequency:\n major_locator = matplotlib.ticker.MultipleLocator(0.05)\n minor_locator = matplotlib.ticker.MultipleLocator(0.01)\n else:\n major_locator = matplotlib.ticker.MultipleLocator(10)\n minor_locator = matplotlib.ticker.MultipleLocator(1)\n\n ax.legend((p1, p2), (u'Δf > {}'.format(threshold), u'Δf ≤ {}'.format(threshold)), loc=2, bbox_to_anchor=(1, 1))\n\n ax.yaxis.set_major_locator(major_locator)\n ax.yaxis.set_minor_locator(minor_locator)\n\n ax.hlines(threshold, 0, len(x1), colors='k', linestyles='dotted', **{'linewidth': 1})", "def plot_frequency(self, x='age') -> None:\n data = PreprocessData.impute(self.data)\n strokers = data[data['stroke'] == 1]\n print(strokers.head())\n fig = plt.figure()\n sns.distplot(strokers[x], norm_hist=False, kde=False,\n hist_kws=dict(edgecolor='black', linewidth=2),\n color='green')\n sns.despine(fig=fig, top=True, right=True)\n plt.ylabel('Stroke Frequency')\n plt.title('Distribution of stroke incidence by {}'.format(x))\n\n if self.savefig:\n fname = os.path.join(stroke_assessment.HIST_PLOTS_DIR, f'{x}.png')\n plt.savefig(fname, dpi=300, bbox_inches='tight')\n else:\n plt.show()", "def bar_plot(df_NP):\n cnt = Counter()\n for tax_list in df_NP.taxonomy:\n for tax in list(tax_list):\n if tax != 'no':\n cnt[tax] += 1\n plt.bar(cnt.keys(),cnt.values())\n plt.xlabel('taxonomic provenance')\n plt.ylabel('number of molecules')\n plt.title('number of aglycons with taxonomies')\n plt.savefig(\"output_data/Barplot.png\")\n print(\"BAR PLOT DONE\")", "def plot_barcode_distribution(ax, barcodes, expected, barcode_map=barcode_map):\n colors = cm.rainbow(np.linspace(0, 1, len(barcodes)))\n\n #maximum barcodes at any one location\n max_count = max(barcodes.apply(max))\n filtered_barcodes = {barcode : name for barcode, name in barcode_map.items() if barcode in barcodes.index}\n for color, (barcode, label) in zip(colors, sorted(filtered_barcodes.items(), key=lambda x: int(x[1][-2:]))):\n if label in expected:\n marker = \"^\"\n else:\n marker = \".\"\n try:\n ax.scatter(barcodes.ix[barcode].index,\n barcodes.ix[barcode].values,\n s=70,\n marker=marker,\n label=label,\n lw=0,\n c=color)\n except IndexError:\n pass\n\n #ax.set_xticks(range(0, 47), range(1, 48))\n ax.set_xlim(0, 46)\n ax.set_ylim(bottom=0)\n ax.set_xlabel(\"Base\", fontsize=18)\n ax.set_xticks(range(0,46))\n ax.set_ylabel(\"Number of Barcodes at Base\")\n ax.legend(loc=0)", "def histogramOfContigLengths(self):\n\t\tseqLengths = []\n\t\tfor x in self.contigsInfo.keys():\n\t\t\tseq = self.contigsInfo[x]\n\t\t\tseqLengths.append(len(seq))\n\n\t\tseqLengths = sorted(seqLengths)\n\t\tl = seqLengths[0:540000]\n\t\tmatplotlib.use('Agg')\n\t\tpylab.hist(l, bins=50)\n\t\tpylab.title(\"Contigs historgram\")\n\t\tpylab.xlabel('Sequence Length (bp)')\n\t\tpylab.ylabel('Count')\n\t\tpylab.savefig('contig_histogram.png')", "def plot_distribution(d, start=0.01, stop=10.0, resolution=0.1):\n import pylab\n X = numpy.arange(start, stop, resolution)\n Y = [math.exp(d.log_pdf(x)) for x in X]\n pylab.plot(X, Y)", "def plot_top_ngrams(corpus, title, ylabel, xlabel=\"Number of Occurences\", n=2):\n true_b = (pd.Series(nltk.ngrams(corpus.split(), n)).value_counts())[:20]\n true_b.sort_values().plot.barh(color='blue', width=.9, figsize=(12, 8))\n plt.title(title)\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.show()", "def plot_ungrounded_frequencies(counts_list, labels, colors, plot_filename):\n bin_interval = 1\n fracs_total_list = []\n bin_starts_list = []\n for counts in counts_list:\n freq_dist = []\n bin_starts = list(range(0, len(counts), bin_interval))\n bin_starts_list.append(bin_starts)\n for bin_start_ix in bin_starts:\n bin_end_ix = bin_start_ix + bin_interval\n if bin_end_ix < len(counts):\n freq_dist.append(np.sum(counts[bin_start_ix:bin_end_ix]))\n else:\n freq_dist.append(np.sum(counts[bin_start_ix:]))\n freq_dist = np.array(freq_dist)\n fracs_total = np.cumsum(freq_dist)\n fracs_total_list.append(fracs_total)\n\n fig = plt.figure(figsize=(2.3, 2.2), dpi=300)\n plt.ion()\n ax = fig.gca()\n for i, (bin_starts, fracs_total) in \\\n enumerate(zip(bin_starts_list, fracs_total_list)):\n xvals = np.array(bin_starts) / len(counts_list[i])\n yvals = fracs_total / float(np.sum(counts_list[i]))\n ax.plot(xvals, yvals, color=colors[i])\n ax.plot(xvals, xvals, color='gray', linestyle='dotted')\n labels = list(labels)\n labels.append('Uniform distribution')\n pf.format_axis(ax)\n ax.legend(labels, loc='lower right', frameon=False, fontsize=pf.fontsize)\n plt.xlim([0,1])\n plt.ylim([0,1])\n plt.subplots_adjust(left=0.18, bottom=0.15, right=0.96, top=0.92)\n ax.set_xlabel('String rank (normalized)')\n ax.set_ylabel('Rel. freq. of occurrences')\n plt.savefig(plot_filename)", "def plot_density(sampler, threshold, sigma, width, n_random_samples = 10000):\n recX, labels = sampler.sample(n_random_samples)\n rec_t0 = recX[:,0]\n rec_amplitude = recX[:,1]\n generator.generate_pdf(threshold, sigma, width)\n fig = plt.figure(figsize = (12, 12))\n # pdf and random samples go to bottom right, margins on appropriate sides\n ax1 = plt.subplot2grid((12,12),(4,0), colspan = 9, rowspan = 8)\n pdf_map = ax1.contourf(generator.t0s, generator.amplitudes, generator.pdf, 10, cmap = 'Blues')\n ax1.scatter(rec_t0, rec_amplitude, s = 0.03, c = 'y')\n ax1.set_title('Probability density and random samples'.format(n_random_samples))\n ax1.set_xlabel('t0 [ns]')\n ax1.set_ylabel('amplitude [S/N]')\n ax1c = plt.subplot2grid((12,12), (1,9), rowspan = 3, colspan = 2)\n plt.colorbar(pdf_map, cax = ax1c, format = ticker.FuncFormatter(_fmt))\n ax2 = plt.subplot2grid((12,12),(1,0), colspan = 9, rowspan = 3, sharex = ax1)\n ax2.plot(generator.t0s[:,-1], generator.pdfu)\n ax2.hist(rec_t0, bins = generator.t0s[:,0], normed = True, alpha = 0.5)\n ax2.set_title('t0 margin distribution')\n ax2.set_ylabel('P(1 over)')\n plt.setp(ax2.get_xticklabels(), visible = False)\n ax3 = plt.subplot2grid((12,12),(4,9), rowspan = 8, colspan = 3, sharey = ax1)\n ax3.plot(generator.pdfv, generator.amplitudes[-1,:])\n ax3.hist(rec_amplitude, bins = generator.amplitudes[0,:], normed = True, orientation = 'horizontal', alpha = 0.5)\n ax3.set_title('Amplitude margin distribution')\n ax3.set_xlabel('P(1 over)')\n plt.setp(ax3.get_yticklabels(), visible = False)\n ax4 = plt.subplot2grid((12,12),(0,0), colspan = 9)\n ax4.text(0.5, 1.0, 'Exact P(one over) distribution and {0} random samples \\nthreshold : {1}, sigma : {2}, width : {3}'.format(n_random_samples, threshold, sigma, width), horizontalalignment = 'center', verticalalignment = 'top', fontsize = 18)\n ax4.set_axis_off()\n plt.tight_layout()\n plt.savefig('{0}/rng_test_thr{1}_sig{2}_w{3}.png'.format(plotdir, threshold, sigma, width))", "def display(self, bin_size):\n xs = np.linspace(self.sample_min, self.sample_max, 2000)\n ys = np.zeros_like(xs)\n for (l, s), w in zip(self.gauss_params, self.dist_weights):\n ys += ss.norm.pdf(xs, loc=l, scale=s) * w\n plt.plot(xs, ys, color=\"blue\")\n plt.hist(self.samples, density=True, bins=bin_size, color=\"palegreen\")\n plt.xlabel(\"duration\")\n plt.ylabel(\"density\")\n _, _, ymin, ymax = plt.axis()\n if self.lower_bound > 0:\n plt.vlines([self.lower_bound], ymin, ymax, color=\"crimson\")\n if self.upper_bound < float(\"inf\"):\n plt.vlines([self.upper_bound], ymin, ymax, color=\"crimson\")\n plt.show()", "def make_plot(counts):\n cn1 = []\n cn2 = []\n time = []\n\n for x in counts:\n y1 = x[0]\n cn1.append(y1[1])\n y2 = x[1]\n cn2.append(y2[1])\n\n for i in range(len(counts)):\n time.append(i)\n\n posLine = plt.plot(time, cn1,'bo-', label='Positive')\n negLine = plt.plot(time, cn2,'go-', label='Negative')\n plt.axis([0, len(counts), 0, max(max(cn1), max(cn2))+50])\n plt.xlabel('Time step')\n plt.ylabel('Word count')\n plt.legend(loc = 'upper left')\n plt.show()\n plt.savefig(\"plot.png\", format=\"png\")", "def base_frequencies(seq):\n\n # Get the length of the sequence\n sequence_len = len(seq)\n\n # Initialize base frequencies\n base_frequencies = {\n 'A': 0,\n 'C': 0,\n 'T': 0,\n 'G': 0\n }\n\n # Count bases\n for base in seq:\n base_frequencies[base] += 1\n\n # Normalize count\n for base in base_frequencies:\n base_frequencies[base] = base_frequencies[base]/sequence_len\n\n return base_frequencies", "def distribution_magnitude_histogram(cur, var, table, label):\n x = select(cur,var, table)\n print(\"Number of entries: \", len(x))\n print(\"Maximum: \", max(x))\n print(\"Minimum: \", min(x))\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"Sentiment Magnitude\")\n ax.set_ylabel(\"Number of Sentences\")\n fig.suptitle(label)\n ax.hist(x, bins = 20)\n plt.show()", "def plotting(self, figsize=(12, 12), types=['freqs']):\n ax = plt.figure(figsize=figsize)\n if 'freqs' in types:\n count_dict = self.count_freq(types=1)\n plt.title(\n f'Total keys in count_dict: {sum(list(count_dict.values()))}')\n barh = plt.barh(list(count_dict.keys()), list(count_dict.values()), color=[\n np.random.rand(3,) for _ in range(self.categories)])\n for rect in barh:\n height = rect.get_height()\n plt.text(rect.get_x() + rect.get_height()/2.0, height,\n '%d' % int(height), ha='center', va='bottom')\n\n plt.legend()\n plt.show()" ]
[ "0.6750949", "0.6312379", "0.6261643", "0.60959035", "0.6060645", "0.60301715", "0.60301715", "0.60301715", "0.6027653", "0.6027587", "0.5995123", "0.5985474", "0.5812763", "0.58085936", "0.58050704", "0.57880205", "0.5758242", "0.57470506", "0.57378995", "0.57230926", "0.57209146", "0.5720402", "0.5719483", "0.5718986", "0.57113236", "0.5680573", "0.5672786", "0.5636147", "0.5622539", "0.5616502" ]
0.64471364
1
Calculates the base/nucleotide frequencies in a window of size window and step and make a plot of the base distribution along of the sequence length.
def base_content_slide_window(sequence, path, name, alphabet, window, step, plot=False): # sequence as a string of upper cases characters # bases as a set of upper cases characters sequence, bases = sequence.upper(), alphabet # initialize the dictionary container and the array base_freqs = defaultdict(list) sizes = [] # iterates to the bases and start filling the dictionary # with the keys and a empty array for base in bases: base_freqs[base] = base_freqs.get(base, []) # iterates to the sequence windows for i in range(0, len(sequence) - window + 1, step): # gets the sequence of length of the desired window subseq = sequence[i:i + window] # check if the length of the window is correct assert (len(subseq) == window), 'The lenght of the subsequence must have the same size of the window' # start calculating the frequencies # and feeding the containers for base in bases: freq = subseq.count(base) / len(subseq) * 100 base_freqs[base].append(round(freq, 4)) sizes.append((i + window)) # if it is to plot the data if plot: plot_base_frequency_genome(sizes, base_freqs, 'Genome (kb)', 'Frequencies') plt.title(f"Base Distribuition in {name} genome") plt.savefig(f"{path}/{name}_base_freq_slidewindow_plot.png") # return the data return base_freqs, sizes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_counthist(counts, label, view_lim=[1e-6,1e0,1e0,1e5]):\n max_size = max(counts.values())\n num_chains = sum(counts.values())\n sizes = np.arange(1,max_size+1)\n freqs = np.float_(sizes) / num_chains\n (hist,garbage) = np.histogram(counts.values(),bins=sizes)\n idxs = hist > 0\n \n fig = plt.figure()\n \n ax = fig.add_subplot(111)\n ax2 = ax.twiny()\n \n ax.spines['top'].set_position(('outward',5))\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward',5))\n ax.spines['left'].set_position(('outward',5))\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.plot(freqs[idxs],hist[idxs],marker='o',linestyle='None',color='#e31a1c',markeredgewidth=0,markersize=4,clip_on=False,label=label)\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlim(view_lim[:2])\n ax.set_ylim(view_lim[2:])\n \n ax2.spines['top'].set_position(('outward',5))\n ax2.spines['right'].set_visible(False)\n ax2.spines['bottom'].set_visible(False)\n ax2.spines['left'].set_visible(False)\n ax2.xaxis.set_ticks_position('top')\n ax2.yaxis.set_ticks_position('none')\n ax2.set_xscale('log')\n ax2.set_xlim([view_lim[0]*num_chains,view_lim[1]*num_chains])\n \n ax.set_xlabel('junction frequency (bottom) or count (top)')\n ax.set_ylabel('number of junctions')\n \n leg = ax.legend(loc=0,numpoints=1,prop=mpl.font_manager.FontProperties(size='small'))\n leg.get_frame().set_visible(False)\n \n return fig", "def generate_counthistline(counts, label, view_lim=[1e-6,1e0,1e0,1e5]):\n max_size = max(counts.values())\n num_chains = sum(counts.values())\n bins = np.logspace(0,np.log10(max_size),21)\n bins_freqs = np.float_(bins) / num_chains\n (hist,garbage) = np.histogram(counts.values(),bins=bins)\n \n fig = plt.figure()\n \n ax = fig.add_subplot(111)\n ax2 = ax.twiny()\n \n ax.spines['top'].set_position(('outward',5))\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward',5))\n ax.spines['left'].set_position(('outward',5))\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.plot(bins_freqs,list(hist)+[hist[-1]],color='#e31a1c',drawstyle='steps-post',clip_on=False,label=label)\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlim(view_lim[:2])\n ax.set_ylim(view_lim[2:])\n \n ax2.spines['top'].set_position(('outward',5))\n ax2.spines['right'].set_visible(False)\n ax2.spines['bottom'].set_visible(False)\n ax2.spines['left'].set_visible(False)\n ax2.xaxis.set_ticks_position('top')\n ax2.yaxis.set_ticks_position('none')\n ax2.set_xscale('log')\n ax2.set_xlim([view_lim[0]*num_chains,view_lim[1]*num_chains])\n \n ax.set_xlabel('junction frequency (bottom) or count (top)')\n ax.set_ylabel('number of junctions')\n \n leg = ax.legend(loc=0,numpoints=1,prop=mpl.font_manager.FontProperties(size='small'))\n leg.get_frame().set_visible(False)\n \n return fig", "def get_freq_grid():\n (bins_per_octave, n_octaves, _, _, f_min, _) = get_hcqt_params()\n freq_grid = librosa.cqt_frequencies(\n bins_per_octave*n_octaves, f_min, bins_per_octave=bins_per_octave\n )\n return freq_grid", "def freq():", "def freq_window(self, startwindow, stopwindow, window=\"hann\"):\n n = self.times.size\n fwindow = _freq_window(self.fs, n, startwindow, stopwindow, window=window)\n new_response = self.from_freq(self.fs, self.in_freq * fwindow)\n\n return new_response", "def frequency_generator(N,min_period,max_period,n_changepoints):\n # vector of random indices < N, padded with 0 and N at the ends:\n changepoints = np.insert(np.sort(rng.randint(0,N,n_changepoints)),[0,n_changepoints],[0,N])\n # list of interval boundaries between which the control sequence should be constant:\n const_intervals = zip(changepoints,np.roll(changepoints,-1))[:-1]\n # populate a control sequence\n frequency_control = np.zeros((N,1))\n for (t0,t1) in const_intervals:\n frequency_control[t0:t1] = rng.rand()\n periods = frequency_control * (max_period - min_period) + max_period\n\n # run time through a sine, while changing the period length\n frequency_output = np.zeros((N,1))\n z = 0\n for i in range(N):\n z = z + 2 * np.pi / periods[i]\n frequency_output[i] = (np.sin(z) + 1)/2\n return np.hstack([np.ones((N,1)),1-frequency_control]),frequency_output", "def plot_ungrounded_frequencies(counts_list, labels, colors, plot_filename):\n bin_interval = 1\n fracs_total_list = []\n bin_starts_list = []\n for counts in counts_list:\n freq_dist = []\n bin_starts = list(range(0, len(counts), bin_interval))\n bin_starts_list.append(bin_starts)\n for bin_start_ix in bin_starts:\n bin_end_ix = bin_start_ix + bin_interval\n if bin_end_ix < len(counts):\n freq_dist.append(np.sum(counts[bin_start_ix:bin_end_ix]))\n else:\n freq_dist.append(np.sum(counts[bin_start_ix:]))\n freq_dist = np.array(freq_dist)\n fracs_total = np.cumsum(freq_dist)\n fracs_total_list.append(fracs_total)\n\n fig = plt.figure(figsize=(2.3, 2.2), dpi=300)\n plt.ion()\n ax = fig.gca()\n for i, (bin_starts, fracs_total) in \\\n enumerate(zip(bin_starts_list, fracs_total_list)):\n xvals = np.array(bin_starts) / len(counts_list[i])\n yvals = fracs_total / float(np.sum(counts_list[i]))\n ax.plot(xvals, yvals, color=colors[i])\n ax.plot(xvals, xvals, color='gray', linestyle='dotted')\n labels = list(labels)\n labels.append('Uniform distribution')\n pf.format_axis(ax)\n ax.legend(labels, loc='lower right', frameon=False, fontsize=pf.fontsize)\n plt.xlim([0,1])\n plt.ylim([0,1])\n plt.subplots_adjust(left=0.18, bottom=0.15, right=0.96, top=0.92)\n ax.set_xlabel('String rank (normalized)')\n ax.set_ylabel('Rel. freq. of occurrences')\n plt.savefig(plot_filename)", "def plot_sample_length_distribution(sample_texts):\n plt.hist([len(s) for s in sample_texts], 50)\n plt.xlabel('Length of a sample')\n plt.ylabel('Number of samples')\n plt.title('Sample length distribution')\n plt.show()", "def plot_sample_length_distribution(sample_texts):\n plt.hist([len(s) for s in sample_texts], 50)\n plt.xlabel('Length of a sample')\n plt.ylabel('Number of samples')\n plt.title('Sample length distribution')\n plt.show()", "def plot_sample_length_distribution(sample_texts):\n plt.hist([len(s) for s in sample_texts], 50)\n plt.xlabel('Length of a sample')\n plt.ylabel('Number of samples')\n plt.title('Sample length distribution')\n plt.show()", "def histogramOfContigLengths(self):\n\t\tseqLengths = []\n\t\tfor x in self.contigsInfo.keys():\n\t\t\tseq = self.contigsInfo[x]\n\t\t\tseqLengths.append(len(seq))\n\n\t\tseqLengths = sorted(seqLengths)\n\t\tl = seqLengths[0:540000]\n\t\tmatplotlib.use('Agg')\n\t\tpylab.hist(l, bins=50)\n\t\tpylab.title(\"Contigs historgram\")\n\t\tpylab.xlabel('Sequence Length (bp)')\n\t\tpylab.ylabel('Count')\n\t\tpylab.savefig('contig_histogram.png')", "def display(self, bin_size):\n xs = np.linspace(self.sample_min, self.sample_max, 2000)\n ys = np.zeros_like(xs)\n for (l, s), w in zip(self.gauss_params, self.dist_weights):\n ys += ss.norm.pdf(xs, loc=l, scale=s) * w\n plt.plot(xs, ys, color=\"blue\")\n plt.hist(self.samples, density=True, bins=bin_size, color=\"palegreen\")\n plt.xlabel(\"duration\")\n plt.ylabel(\"density\")\n _, _, ymin, ymax = plt.axis()\n if self.lower_bound > 0:\n plt.vlines([self.lower_bound], ymin, ymax, color=\"crimson\")\n if self.upper_bound < float(\"inf\"):\n plt.vlines([self.upper_bound], ymin, ymax, color=\"crimson\")\n plt.show()", "def callback_freq_cut(val):\n global plot_mode\n global idx_freq\n last_plot_mode = plot_mode\n plot_mode = 'freq_cut'\n# print( 'scale_freq', scale_freq)\n idx_freq = freq_to_idx( val, scale_freq )\n val_freq = idx_freq * scale_freq\n# print( 'val idx_freq val_freq', val, idx_freq, val_freq )\n update_num_shadow(int(sld['neighbors'].val))\n #plot 121\n lcutfreq.set_ydata( [val_freq, val_freq])\n lcuttime.set_alpha( 0.0 )\n lcutfreq.set_alpha( alpha_hm )\n #plot 122\n if plot_mode == last_plot_mode:\n replot_flags = get_replot_flag( idx_freq )\n replot_shadow( replot_flags )\n update_shadow( ~replot_flags )\n update_light()\n else:\n replot_shadow( [True, True])\n replot_light()\n reform_axis()\n \n fig.canvas.draw_idle()", "def generate_plot(tokens):\n\n return FreqDist(word for word in tokens if len(word) > 4).plot(50, cumulative=True)", "def plot_freq_spec(data, title):\n plt.title(title)\n\n def plot_freq_spec(axis, line, label):\n n = len(axis)\n fft = fftpack.fft(axis) / n\n fft = fft[range(int(n / 2))]\n plt.plot(range(int(n / 2)), abs(fft), line, label=label)\n plot_freq_spec(data[:, 0], 'r-', label='x')\n plot_freq_spec(data[:, 1], 'g-', label='y')\n plot_freq_spec(data[:, 2], 'b-', label='z')", "def make_obs_phase_plot(data_file, period, ref_mjd=58369.30, nbins=40, save=False,\n show=False, log=False, min_freq=200, max_freq=2500):\n\n burst_dict, snr_dict, obs_duration_dict, obs_startmjds_dict, fmin_dict, fmax_dict, fcen_dict = open_json(data_file)\n\n bursts = []\n for k in burst_dict.keys():\n bursts = bursts + burst_dict[k]\n\n obs_duration = []\n for k in obs_duration_dict.keys():\n obs_duration = obs_duration + obs_duration_dict[k]\n\n obs_startmjds = []\n for k in obs_startmjds_dict.keys():\n obs_startmjds = obs_startmjds + obs_startmjds_dict[k]\n\n assert len(obs_startmjds) == len(obs_duration)\n\n bursts = np.array(bursts)\n obs_duration = np.array(obs_duration)\n obs_startmjds = np.array(obs_startmjds)\n\n obs_start_phases = get_phase(obs_startmjds, period, ref_mjd=ref_mjd)\n hist, bin_edges_obs = np.histogram(obs_start_phases, bins=nbins)\n\n obs_start_phases_dict = {}\n duration_per_phase_dict = {}\n burst_per_phase_dict = {}\n duration_per_phase_tot = np.empty(nbins)\n for k in obs_startmjds_dict.keys():\n obs_start_phases_dict[k] = get_phase(np.array(obs_startmjds_dict[k]),\n period, ref_mjd=ref_mjd)\n durations = np.array(obs_duration_dict[k])\n start_phases = obs_start_phases_dict[k]\n\n d_hist = []\n for i in range(len(bin_edges_obs)):\n if i>0:\n dur = durations[(start_phases < bin_edges_obs[i]) &\n (start_phases > bin_edges_obs[i-1])].sum()\n d_hist.append(dur)\n duration_per_phase_tot[i-1] += dur\n duration_per_phase_dict[k] = np.array(d_hist)\n\n obs_duration = np.array(obs_duration)\n duration_hist = []\n for i in range(len(bin_edges_obs)):\n if i>0:\n duration_hist.append(\n obs_duration[(obs_start_phases < bin_edges_obs[i]) &\n (obs_start_phases > bin_edges_obs[i-1])].sum())\n\n duration_hist = np.array(duration_hist)\n bin_mids = (bin_edges_obs[:-1] + bin_edges_obs[1:])/2\n phase_lst = []\n for i,k in enumerate(burst_dict.keys()):\n print(\"phase list\", k, len(burst_dict[k]))\n phase_lst.append(list(get_phase(np.array(burst_dict[k]), period,\n ref_mjd=ref_mjd)))\n burst_per_phase_dict[k], _ = np.histogram(phase_lst[-1],\n bins=nbins, range=(0,1))\n\n phase_tot = [p for l in phase_lst for p in l]\n phase_tot.sort()\n burst_tot, _ = np.histogram(phase_tot, bins=nbins, range=(0,1))\n\n # PRINTING AVERAGE RATE PER INSTRUMENT\n for i,k in enumerate(burst_dict.keys()):\n tobs = np.sum(obs_duration_dict[k])\n nbursts = len(burst_dict[k])\n rate = nbursts / tobs\n print(\"Average rate {}: {:.3f} / h\".format(k, rate))\n\n # off = np.where(burst_per_phase_dict[k] == 0)[0]\n # on = np.where(burst_per_phase_dict[k] > 0)[0]\n # print(\"Hours Apertif observed TOTAL: {:.2f}\".format(\n # np.sum(duration_per_phase_dict[k])))\n # print(\"Hours Apertif observed during on phase: {:.2f}\".format(\n # np.sum(duration_per_phase_dict[k][on])))\n # print(\"Hours Apertif observed during off phase: {:.2f}\".format(\n # np.sum(duration_per_phase_dict[k][off])))\n\n # DEFINING COLORS\n cm = plt.cm.get_cmap('Spectral_r')\n\n burst_hist_colors = []\n obs_hist_colors = {}\n if 'uGMRT650' in obs_duration_dict.keys():\n fcen_dict['uGMRT650'] = 1000\n for i,k in enumerate(obs_duration_dict.keys()):\n freq = np.log10(fcen_dict[k])\n col = (np.log10(max_freq)-freq)/(np.log10(max_freq)-np.log10(min_freq))\n color = cm(col)\n print(k, mpl.colors.to_hex(color))\n if k in burst_dict.keys():\n burst_hist_colors.append(color)\n obs_hist_colors[k] = color\n rate_colors = {\n 'high': cm((np.log10(max_freq)-np.log10(1800))/(np.log10(max_freq)-np.log10(min_freq))),\n 'middle': cm((np.log10(max_freq)-np.log10(500))/(np.log10(max_freq)-np.log10(min_freq))),\n 'low': cm((np.log10(max_freq)-np.log10(300))/(np.log10(max_freq)-np.log10(min_freq)))\n }\n if 'uGMRT650' in obs_duration_dict.keys():\n fcen_dict['uGMRT650'] = 650\n\n # PLOTTING\n fig, ax = plt.subplots(2, 1, sharex=True, figsize=(9,7),\n gridspec_kw={'height_ratios': [1,1]})\n ax1 = ax[0]\n yhist,xhist,_ = ax1.hist(phase_lst, bins=bin_edges_obs, stacked=True,\n density=False, label=burst_dict.keys(),\n edgecolor='black', linewidth=0.5, color=burst_hist_colors)\n\n ax1.set_ylabel('N. Bursts')\n ax1.set_xlim(0,1)\n print(\"YLIM\", 0, int(yhist[-1].max()*1.1))\n ax1.set_ylim(0, max(int(yhist[-1].max()*1.1), 4))\n ax1.legend(loc=2)\n ax1.text(-0.07, 0.95, \"a\", transform=ax1.transAxes, weight='bold')\n\n ax2 = ax[1]\n cum_ds = np.zeros(nbins)\n for i, k in enumerate(duration_per_phase_dict):\n d = duration_per_phase_dict[k]\n ax2.bar(bin_edges_obs[:-1], d, width=bin_edges_obs[1]-bin_edges_obs[0],\n align='edge', bottom=cum_ds, alpha=1,\n label=\"{} {:d} MHz\".format(k, int(fcen_dict[k])),\n edgecolor='black', linewidth=0.2, color=obs_hist_colors[k])\n cum_ds += d\n ax2.set_xlabel('Phase')\n ax2.set_ylabel('Obs. Duration (h)')\n ax2.legend(loc=2)\n ax2.text(-0.07, 0.95, \"b\", transform=ax2.transAxes, weight='bold')\n plt.tight_layout()\n\n if save:\n print('Plot saved: ./burst_obs_phase_hist.png')\n plt.savefig('./burst_obs_phase_hist.png', pad_inches=0,\n bbox_inches='tight', dpi=200)\n plt.savefig('./burst_obs_phase_hist.pdf', pad_inches=0,\n bbox_inches='tight', dpi=200)\n if show:\n plt.show()\n\n # SAVING COUNTS, OBS_DURATION AND PHASE BIN\n if log:\n print(\"Writing log\")\n dir_out = '/home/ines/Documents/projects/R3/periodicity/burst_phases/'\n with open(dir_out+'counts_per_phase_p{:.2f}.txt'.format(period), 'w') as f:\n f.write(\"# phase_bin counts chime_counts arts_counts lofar_counts obs_duration chime_duration arts_duration lofar_duration\\n\")\n for i in range(nbins):\n f.write(\"{:.3f} {} {} {} {} {:.3f} {:.3f} {:.3f} {:.3f}\\n\".format(\n bin_mids[i], burst_tot[i],\n burst_per_phase_dict[\"CHIME/FRB\"][i],\n burst_per_phase_dict[\"Apertif\"][i],\n burst_per_phase_dict[\"LOFAR\"][i],\n duration_per_phase_tot[i],\n duration_per_phase_dict[\"CHIME/FRB\"][i],\n duration_per_phase_dict[\"Apertif\"][i],\n duration_per_phase_dict[\"LOFAR\"][i]))\n for i,k in enumerate(burst_dict.keys()):\n if k == \"CHIME/FRB\":\n inst = k.replace(\"/FRB\", \"\")\n else:\n inst = k\n np.save(dir_out + 'phase_{}_p{:.2f}_f{:.1f}'.format(inst, period,\n fcen_dict[k]), [burst_dict[k], phase_lst[i]])", "def _configure_frequencies(self) -> None:\n i = 3\n while i < len(self._lora_frequencies):\n self.set_ch_parameters(i, self._lora_frequencies[i], 0, 5, True)\n i += 1\n self.set_ch_parameters(i, 868800000, 7, 7, True)", "def make_plot_all(data_file, period, ref_mjd=58369.30, nbins=40, save=False,\n show=False, log=False, min_freq=200, max_freq=2500):\n\n burst_dict, snr_dict, obs_duration_dict, obs_startmjds_dict, fmin_dict, fmax_dict, fcen_dict = open_json(data_file)\n\n bursts = []\n for k in burst_dict.keys():\n bursts = bursts + burst_dict[k]\n\n obs_duration = []\n for k in obs_duration_dict.keys():\n obs_duration = obs_duration + obs_duration_dict[k]\n\n obs_startmjds = []\n for k in obs_startmjds_dict.keys():\n obs_startmjds = obs_startmjds + obs_startmjds_dict[k]\n\n bursts = np.array(bursts)\n obs_duration = np.array(obs_duration)\n obs_startmjds = np.array(obs_startmjds)\n\n obs_start_phases = get_phase(obs_startmjds, period, ref_mjd=ref_mjd)\n hist, bin_edges_obs = np.histogram(obs_start_phases, bins=nbins)\n\n obs_start_phases_dict = {}\n duration_per_phase_dict = {}\n burst_per_phase_dict = {}\n duration_per_phase_tot = np.empty(nbins)\n for k in obs_startmjds_dict.keys():\n obs_start_phases_dict[k] = get_phase(np.array(obs_startmjds_dict[k]),\n period, ref_mjd=ref_mjd)\n durations = np.array(obs_duration_dict[k])\n start_phases = obs_start_phases_dict[k]\n\n d_hist = []\n for i in range(len(bin_edges_obs)):\n if i>0:\n dur = durations[(start_phases < bin_edges_obs[i]) &\n (start_phases > bin_edges_obs[i-1])].sum()\n d_hist.append(dur)\n duration_per_phase_tot[i-1] += dur\n duration_per_phase_dict[k] = np.array(d_hist)\n\n obs_duration = np.array(obs_duration)\n duration_hist = []\n for i in range(len(bin_edges_obs)):\n if i>0:\n duration_hist.append(\n obs_duration[(obs_start_phases < bin_edges_obs[i]) &\n (obs_start_phases > bin_edges_obs[i-1])].sum())\n\n duration_hist = np.array(duration_hist)\n bin_mids = (bin_edges_obs[:-1] + bin_edges_obs[1:])/2\n phase_lst = []\n for k in burst_dict.keys():\n phase_lst.append(list(get_phase(np.array(burst_dict[k]), period,\n ref_mjd=ref_mjd)))\n burst_per_phase_dict[k], _ = np.histogram(phase_lst[-1],\n bins=nbins, range=(0,1))\n\n phase_tot = [p for l in phase_lst for p in l]\n phase_tot.sort()\n burst_tot, _ = np.histogram(phase_tot, bins=nbins, range=(0,1))\n\n # Defining phase and cycle\n l = {}\n c = {}\n for k in burst_dict:\n l[k] = get_phase(burst_dict[k], period, ref_mjd=ref_mjd)\n c[k] = get_cycle(burst_dict[k], period, ref_mjd=ref_mjd)\n n_cycles = int(max([m for k in c.keys() for m in c[k]]))+1\n\n obs_start_phases = {}\n obs_start_cycles = {}\n obs_duration_phase = {}\n for k in obs_startmjds_dict.keys():\n obs_start_phases[k] = get_phase(obs_startmjds_dict[k], period,\n ref_mjd=ref_mjd)\n obs_start_cycles[k] = get_cycle(obs_startmjds_dict[k], period,\n ref_mjd=ref_mjd)\n obs_duration_phase[k] = np.array(obs_duration_dict[k])/(24*period)\n\n # DEFINING COLORS\n cm = plt.cm.get_cmap('Spectral_r')\n burst_hist_colors = []\n obs_hist_colors = {}\n for i,k in enumerate(obs_duration_dict.keys()):\n freq = np.log10(fcen_dict[k])\n col = (np.log10(max_freq)-freq)/(np.log10(max_freq)-np.log10(min_freq))\n # c = i/len(obs_duration_dict.keys())\n color = cm(col)\n if k in burst_dict.keys():\n burst_hist_colors.append(color)\n obs_hist_colors[k] = color\n\n # PLOTTING\n fig = plt.figure(figsize=(7,7))\n gs = gridspec.GridSpec(3,1, wspace=0.01, height_ratios=[1,2,1])\n\n ax1 = fig.add_subplot(gs[0, 0])\n ax1.hist(phase_lst, bins=bin_edges_obs, stacked=True, density=False, label=burst_dict.keys(),\n edgecolor='black', linewidth=0.5, color=burst_hist_colors)\n\n ax1.set_xlabel('Phase')\n ax1.set_ylabel('N. Bursts')\n ax1.set_xlim(0,1)\n ax1.legend()\n ax1.tick_params(axis='x', which='both', direction='in', bottom=True,\n top=True)\n ax1.tick_params(axis='y', which='both', direction='in', left=True,\n right=True)\n\n ax2 = fig.add_subplot(gs[2, 0], sharex=ax1)\n cum_ds = np.zeros(nbins)\n for i, k in enumerate(duration_per_phase_dict):\n d = duration_per_phase_dict[k]\n ax2.bar(bin_edges_obs[:-1], d, width=bin_edges_obs[1]-bin_edges_obs[0],\n align='edge', bottom=cum_ds, alpha=1,\n label=\"{} {:d} MHz\".format(k, int(fcen_dict[k])),\n edgecolor='black', linewidth=0.2, color=obs_hist_colors[k])\n cum_ds += d\n ax2.set_xlabel('Phase')\n ax2.set_ylabel('Obs. Duration (h)')\n ax2.legend()\n ax2.tick_params(axis='x', which='both', direction='in', bottom=True,\n top=True)\n ax2.tick_params(axis='y', which='both', direction='in', left=True,\n right=True)\n plt.tight_layout()\n\n ax3 = fig.add_subplot(gs[1, 0], sharex=ax1)\n for i,k in enumerate(burst_dict.keys()):\n ax3.scatter(l[k], c[k], color=obs_hist_colors[k],\n edgecolors='k', linewidth=0.5, label=k, zorder=10)\n ax3.hlines(range(n_cycles), [0 for i in range(n_cycles)],\n [1 for i in range(n_cycles)], linestyles='-', alpha=0.1, zorder=0)\n\n for i, k in enumerate(obs_duration_dict.keys()):\n obs_patches = []\n for j,s in enumerate(obs_start_phases[k]):\n obs = Rectangle((s, obs_start_cycles[k][j]-0.5),\n obs_duration_phase[k][j], 1)\n obs_patches.append(obs)\n pc = PatchCollection(obs_patches, facecolor=obs_hist_colors[k],\n alpha=0.5, edgecolor=obs_hist_colors[k], label=k, zorder=5)\n ax3.add_collection(pc)\n\n ax3.text(0.05, 0.95, \"P = {0} days\".format(period),\n transform=ax1.transAxes, verticalalignment='top', fontsize=14)\n\n ax3.set_ylabel('Cycle')\n ax3.set_ylim(-0.5, n_cycles+0.5)\n ax3.legend()\n ax3.tick_params(axis='x', which='both', direction='in', bottom=True,\n top=True)\n ax3.tick_params(axis='y', which='both', direction='in', left=True,\n right=True)\n\n plt.setp(ax1.get_xticklabels(), visible=False)\n plt.setp(ax3.get_xticklabels(), visible=False)\n\n if save:\n plt.savefig('./R3_obs_detections.png', pad_inches=0,\n bbox_inches='tight', dpi=200)\n plt.savefig('./R3_obs_detections.pdf', pad_inches=0,\n bbox_inches='tight', dpi=200)\n if show:\n plt.show()", "def plot_frequency(word_frequency, n, output_name=\"output.png\"):\r\n # partially completed for you, complete the rest according to the instructions.\r\n # setting up plot variables\r\n words = tuple(zip(*word_frequency))[0]\r\n frequencies = tuple(zip(*word_frequency))[1]\r\n y_pos = np.arange(len(words))\r\n fig, ax = plt.subplots(figsize=(15, 10))\r\n # set up color spectrum\r\n colors = [\r\n \"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\",\r\n \"violet\"\r\n ]\r\n rvb = mcolors.LinearSegmentedColormap.from_list(\"\", colors)\r\n nlist = np.arange(n).astype(float)\r\n ax.barh(y_pos, frequencies, align='center', color=rvb(nlist/n))\r\n ax.set_yticks(y_pos)\r\n ax.set_yticklabels(words)\r\n ax.invert_yaxis()\r\n ax.set_xlabel('Frequency')\r\n ax.set_title(\"Word Frequency: Top {}\".format(n))\r\n # Only comment below line when debugging. Uncomment when submitting\r\n plt.savefig(output_name)", "def get_Frequency_Range(cubestat_contsub):\n \n line = subprocess.check_output(['sed', '-n', '3p', cubestat_contsub])\n TOKS = line.split()\n start_freq = round(float(TOKS[1]), 3)\n \n line = subprocess.check_output(['tail', '-1', cubestat_contsub])\n TOKS = line.split()\n end_freq = round(float(TOKS[1]), 3)\n \n return start_freq, end_freq", "def distribution_magnitude_histogram(cur, var, table, label):\n x = select(cur,var, table)\n print(\"Number of entries: \", len(x))\n print(\"Maximum: \", max(x))\n print(\"Minimum: \", min(x))\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"Sentiment Magnitude\")\n ax.set_ylabel(\"Number of Sentences\")\n fig.suptitle(label)\n ax.hist(x, bins = 20)\n plt.show()", "def plot_frequency(self):\n canvas = xboa.common.make_root_canvas(\"frequency vs time\")\n canvas.Draw()\n freq_list = [freq for freq in self.freq_list]\n hist, graph = xboa.common.make_root_graph(\"frequency vs time\",\n self.time_list, \"time [ns]\",\n freq_list, \"f [GHz]\")\n hist.Draw()\n graph.Draw(\"sameL\")\n fit = ROOT.TF1(\"fit\", \"pol4\", 0, 20*1e6)\n fit.FixParameter(0, freq_list[0])\n graph.Fit(fit)\n canvas.Update()", "def get_freqs(Fs, n):\r\n\r\n return np.linspace(0, float(Fs) / 2, float(n) / 2 + 1)", "def make_obs_phase_plot_csv(data_file, period, ref_mjd=58369.30, nbins=40,\n save=False, show=True, min_freq=900, max_freq=6000):\n\n burst_dict, fmin_dict, fmax_dict, fcen_dict = open_csv(data_file)\n\n burst_per_phase_dict = {}\n phase_lst = []\n for i,k in enumerate(burst_dict.keys()):\n print(\"phase list\", k, len(burst_dict[k]))\n phase_lst.append(list(get_phase(np.array(burst_dict[k]), period,\n ref_mjd=ref_mjd)))\n burst_per_phase_dict[k], _ = np.histogram(phase_lst[-1],\n bins=nbins, range=(0,1))\n\n phase_tot = [p for l in phase_lst for p in l]\n phase_tot.sort()\n burst_tot, _ = np.histogram(phase_tot, bins=nbins, range=(0,1))\n\n # DEFINING COLORS\n cm = plt.cm.get_cmap('Spectral_r')\n\n # burst_hist_colors = {}\n burst_hist_colors = []\n for i,k in enumerate(burst_dict.keys()):\n freq = np.log10(fcen_dict[k])\n col = (np.log10(max_freq)-freq)/(np.log10(max_freq)-np.log10(min_freq))\n # freq = fcen_dict[k]\n # col = (max_freq-freq)/(max_freq-min_freq))\n color = cm(col)\n # burst_hist_colors[k] = color\n burst_hist_colors.append(color)\n\n # PLOTTING\n fig, ax1 = plt.subplots(1, 1, sharex=True, figsize=(9,7))\n yhist,xhist,_ = ax1.hist(phase_lst, bins=nbins, range=(0,1), stacked=True,\n density=False, label=burst_dict.keys(),\n edgecolor='black', linewidth=0.5, color=burst_hist_colors)\n\n ax1.set_ylabel('N. Bursts')\n ax1.set_xlim(0,1)\n ax1.set_ylim(0, int(yhist[-1].max()*1.1))\n ax1.legend(loc=2)\n\n if save:\n print('Plot saved: ./burst_obs_phase_hist.png')\n plt.savefig('./burst_obs_phase_hist.png', pad_inches=0,\n bbox_inches='tight', dpi=200)\n plt.savefig('./burst_obs_phase_hist.pdf', pad_inches=0,\n bbox_inches='tight', dpi=200)\n if show:\n plt.show()", "def plot_spectrogram_lengths(feature, path, idx):\n lengths = []\n if 'raw' in feature.lower():\n for file in os.listdir(path+'/train_curated/'):\n _, data = wavfile.read(path+'/train_curated/'+file)\n lengths.append(data.shape)\n for file in os.listdir(path+'/train_noisy/'):\n _, data = wavfile.read(path+'/train_noisy/'+file)\n lengths.append(data.shape)\n else:\n for file in os.listdir(path+'/train_curated/'):\n lengths.append(np.load(path+'/train_curated/'+file).shape)\n for file in os.listdir(path+'/train_noisy/'):\n lengths.append(np.load(path+'/train_noisy/'+file).shape)\n lengths = np.asarray(lengths)\n plt.figure(figsize=(10,10))\n plt.xlabel('Lenghts')\n plt.ylabel('Occurences')\n plt.title('Distribution of {} lengths'.format(feature))\n n, bins, patches = plt.hist(lengths[:, idx], color=cm.get_cmap('viridis')(0.))\n plt.axvline(x=np.median(lengths[:, idx]), color=cm.get_cmap('viridis')(1.), linestyle='dashed', linewidth=2)\n print('Maximum: ' + str(max(lengths[:, idx])))\n print('Minimum: ' + str(min(lengths[:, idx])))\n print('Median: ' + str(np.median(lengths[:, idx])))\n plt.gcf().savefig('../../plots/{}_lengths.png'.format(feature))\n plt.close()", "def fft_frequencies(sr=22050, n_fft=2048):\n\n return np.linspace(0, float(sr) / 2, int(1 + n_fft // 2), endpoint=True)", "def generateFreqGraph( invertedIndex ):\n print('Printing plot for Step 3 frequencies')\n print('----------------------------------------------------------------')\n tempList = sorted( invertedIndex, key=lambda element: element[1], reverse = True )\n freqDict = {}\n count = 1\n for term, freq in tempList:\n freqDict[count] = freq\n count+=1\n \n #Plot the frequency based graph\n plt.figure()\n plt.xlabel('$\\log_{10}(i)$ for $i^{th}$ most frequent term')\n plt.ylabel('$\\log_{10}(y_i)$ for freq of $i^{th}$ term')\n plt.title('$\\log_{10} y_i$ vs $\\log_{10}i$')\n plt.plot(np.log10(list(freqDict.keys())), np.log10(list(freqDict.values())), '-o')", "def update_frequencies():\n pass", "def make_snr_time_plot(data_file, period, width, n_min=0, n_max=30,\n min_freq=200, max_freq=2500, ref_mjd=58369.30,\n cmap=None, title=None, save=False, show=False):\n\n burst_dict, snr_dict, obs_duration_dict, obs_startmjds_dict, fmin_dict, fmax_dict, fcen_dict = open_json(data_file)\n\n # Defining duty cycle\n frequency_hours = '%fH' % (24 * period)\n t = Time(ref_mjd, format='mjd')\n t0 = t+((period/2)*u.day)\n tf = Time('2020-12-15T00:00:00', format='isot') #datetime.datetime.now()\n\n t0_low = t+((period/2)*u.day) - (2.6 * u.day)\n t0_high = t+((period/2)*u.day) + (2.6 * u.day)\n\n df_period = [t0]\n df_duty_low = [t0_low]\n df_duty_high = [t0_high]\n t_activity, t_low, t_high = t0, t0_low, t0_high\n while t_activity <= tf:\n t_activity += period\n t_low += period\n t_high += period\n df_period.append(t_activity)\n df_duty_low.append(t_low)\n df_duty_high.append(t_high)\n\n n_periods = len(df_period)\n print(n_periods)\n\n t_min = ref_mjd + period * n_min\n t_max = ref_mjd + period * n_max\n\n # Instruments to show\n telescopes = ['CHIME/FRB']\n\n # DEFINING COLORS\n cm = plt.cm.get_cmap('Spectral_r')\n obs_hist_colors = {}\n for i,k in enumerate(obs_duration_dict.keys()):\n freq = np.log10(fcen_dict[k])\n c = (np.log10(max_freq)-freq)/(np.log10(max_freq)-np.log10(min_freq))\n color = cm(c)\n obs_hist_colors[k] = color\n\n # PLOTTING\n fig = plt.figure(figsize=(15,8))\n gs = gridspec.GridSpec(2,1, hspace=0., height_ratios=[5,1])\n\n ax1 = fig.add_subplot(gs[0, 0]) #ax[0]\n for i,k in enumerate(telescopes):\n inst = ''.join([i for i in k if not i.isdigit()])\n print(inst, len(burst_dict[k]), len(snr_dict[k]))\n if k in ['Apertif', 'LOFAR']:\n s = 50\n else:\n s = 25\n ax1.scatter(burst_dict[k], snr_dict[k],\n color=obs_hist_colors[k], label=\"{} {} MHz\".format(inst, int(fcen_dict[k])),\n marker='o', s=s, zorder=10, edgecolors='k', linewidth=0.5)\n\n max_snr = max([m for k in snr_dict.keys()\n for m in snr_dict[k]])*1.1\n ax1.set_ylim(0, max_snr)\n ax1.set_ylabel('SNR')\n\n\n ax2 = fig.add_subplot(gs[1, 0], sharex=ax1) #ax[1]\n for i, k in enumerate(telescopes):\n obs_patches = []\n inst = ''.join([i for i in k if not i.isdigit()])\n for j,start in enumerate(obs_startmjds_dict[k]):\n #obs = Rectangle((start,i), max(0.2,obs_duration_dict[k][j]/24), 1)\n obs = Rectangle((start,i), obs_duration_dict[k][j]/24, 1)\n obs_patches.append(obs)\n pc = PatchCollection(obs_patches, facecolor=obs_hist_colors[k],\n alpha=1, edgecolor=obs_hist_colors[k], label=inst, linewidth=0.1)\n ax2.add_collection(pc)\n\n max_mjdstart = max([m for k in obs_startmjds_dict.keys()\n for m in obs_startmjds_dict[k]])\n min_mjdstart = min([m for k in obs_startmjds_dict.keys()\n for m in obs_startmjds_dict[k]])\n max_f = max(fmax_dict.values())+1e3\n min_f = min(fmin_dict.values())-10\n ax2.set_xlim(t_min, t_max)\n ax2.set_ylim(-0.5, len(telescopes)+1)\n ax2.set_xlabel('MJD')\n #ax2.set_yticks([])\n ax2.set_yticks([i+0.5 for i in range(len(telescopes))])\n ax2.set_yticklabels(telescopes, fontsize=10)\n ax2.tick_params(axis='y', which='both', length=0)\n\n # duty cycle\n for low, high in zip(df_duty_low, df_duty_high):\n ax1.axvspan(low.value, high.value, facecolor='#0f0f0f', alpha=0.1)\n for ii,peak in enumerate(df_period):\n ax1.vlines(peak.value, 0, max_snr, linestyles='dashed', alpha=0.2)\n if peak.value >= t_min and peak.value <= t_max:\n ax1.text(peak.value, 128, ii, horizontalalignment='center')\n\n ax1.legend(loc=1)\n plt.setp(ax1.get_xticklabels(), visible=False)\n for ax in (ax1,ax2):\n ax.tick_params(axis='x', which='both', direction='in', bottom=True,\n top=True)\n ax.tick_params(axis='y', which='both', direction='in', left=True,\n right=True)\n plt.show()\n\n # plt.savefig(plt_out, pad_inches=0, bbox_inches='tight', dpi=200)\n # print('Plot saved:', plt_out)", "def get_frequency(self):\r\n # print '*********in get freq'\r\n self.cntr.run('FREQ 1')\r\n f_0_ = self.cntr.get_measurements(1)\r\n self.f_0 = f_0_[0]\r\n self.cntr.run('FREQ 2')\r\n f_rep_ = self.cntr.get_measurements(1)\r\n self.f_rep = f_rep_[0]" ]
[ "0.62747514", "0.61388403", "0.6041006", "0.5977068", "0.5822721", "0.58037317", "0.5766557", "0.5721363", "0.5721363", "0.5721363", "0.5663317", "0.5646493", "0.5640953", "0.5637996", "0.56258035", "0.5622705", "0.56148934", "0.55990183", "0.5596233", "0.5590827", "0.55875367", "0.5574809", "0.5560631", "0.55583084", "0.55296206", "0.55248874", "0.55227876", "0.5516907", "0.5511269", "0.55083734" ]
0.6481207
0
Calculates the DNA strand base statistics over a sequence.
def strand_stats(sequence, alphabet, start): # assure the characters are upper case alphabet = alphabet.upper() # assure the characters are upper case # get the sequence length seq_len, seq = len(sequence), sequence.upper() # get the middle position of the sequence half_gen = (seq_len // 2) # get the final position ter = (start + half_gen) # initialyze the container strand_stat = defaultdict(tuple) # for circular genomes if ter > seq_len: ter = ter - (seq_len + 1) # iterates through the alphabet # count the bases for base in alphabet: base_total = seq.count(base) # check the strand if ter > start: for_strand = seq[start:ter].count(base) rev_strand = (base_total - for_strand) else: rev_strand = seq[ter:start].count(base) for_strand = (base_total - rev_strand) # calculates the differences between strand dif = (for_strand - rev_strand) # get the data in the container strand_stat[base] = (base_total, for_strand, rev_strand, dif) return strand_stat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bases_stats(sequence, alphabet, start):\n seq = sequence.upper()\n seq_len = len(seq)\n half_seq = seq_len // 2\n ter = start + half_seq\n # as a circular genome\n if ter > seq_len:\n ter = ter - seq_len + 1\n counts = defaultdict(int)\n for base in alphabet:\n total = seq.count(base)\n if ter > start: # start ---> ter\n f_count = seq[start:ter].count(base)\n r_count = total - f_count\n else: # ter ---> start\n r_count = seq[ter:start].count(base)\n f_count = total - r_count\n counts[base] = (total, f_count, r_count)\n return counts", "def base_frequencies(seq):\n\n # Get the length of the sequence\n sequence_len = len(seq)\n\n # Initialize base frequencies\n base_frequencies = {\n 'A': 0,\n 'C': 0,\n 'T': 0,\n 'G': 0\n }\n\n # Count bases\n for base in seq:\n base_frequencies[base] += 1\n\n # Normalize count\n for base in base_frequencies:\n base_frequencies[base] = base_frequencies[base]/sequence_len\n\n return base_frequencies", "def get_base_usage(sequences):\n usage = {\"A\": 0, \"C\": 0, \"G\": 0, \"U\": 0}\n for sequence in sequences:\n for base in usage:\n usage[base] += sequence.count(base)\n return usage", "def base_stats(sequence, alphabet, as_count=False, as_dict=False):\n # make the sequence upper case\n seq = sequence.upper()\n # count all bases in sequence and collect as an array\n counts = np.array([seq.count(i) for i in alphabet])\n # if is onle the counts\n if as_count:\n freqs = counts\n # other wise as frequencies\n else:\n freqs = counts / sum(counts * 1.0)\n # or as a dictionary like object\n if as_dict:\n return dict(zip(alphabet, freqs))\n else:\n return freqs", "def compute_strand_balance(record):\n try:\n info = record.info\n except:\n info = record.INFO\n\n strand_bal = [strand_ratio(info[\"SAF\"][i], info[\"SAR\"][i]) for i in range(len(info[\"SAF\"]))]\n\n return strand_bal", "def rs1_score(sequence):\n import math\n import numpy as np\n seq = str(sequence).upper()\n seq = list(seq)\n matrix1 = np.zeros([len(sequence),4], dtype=int)\n for i,item in enumerate(sequence):\n if item == 'A':\n matrix1[i,0] = 1\n if item == 'T':\n matrix1[i,1] = 1\n if item == 'U':\n matrix1[i,1] = 1\n if item == 'C':\n matrix1[i,2] = 1\n if item == 'G':\n matrix1[i,3] = 1\n\n\n \"\"\"\n Generates a binary matrix for DNA/RNA sequence, where each column is a possible\n pair of adjacent bases, and each row is a position along the sequence.\n Matrix column order is AA, AT, AC, AG, TA, TT, TC, TG, CA, CT, CC, CG, GA, GT, GC, GG\n \"\"\"\n sequence = sequence.replace('U','T')\n pairwise_sequence = []\n for i in range(len(sequence)):\n if i < len(sequence)-1:\n basepair = sequence[i]+sequence[i+1]\n pairwise_sequence.append(basepair)\n matrix2 = np.zeros([len(pairwise_sequence),16], dtype=int)\n for i,item in enumerate(pairwise_sequence):\n if item == 'AA':\n matrix2[i,0] = 1\n if item == 'AT':\n matrix2[i,1] = 1\n if item == 'AC':\n matrix2[i,2] = 1\n if item == 'AG':\n matrix2[i,3] = 1\n if item == 'TA':\n matrix2[i,4] = 1\n if item == 'TT':\n matrix2[i,5] = 1\n if item == 'TC':\n matrix2[i,6] = 1\n if item == 'TG':\n matrix2[i,7] = 1\n if item == 'CA':\n matrix2[i,8] = 1\n if item == 'CT':\n matrix2[i,9] = 1\n if item == 'CC':\n matrix2[i,10] = 1\n if item == 'CG':\n matrix2[i,11] = 1\n if item == 'GA':\n matrix2[i,12] = 1\n if item == 'GT':\n matrix2[i,13] = 1\n if item == 'GC':\n matrix2[i,14] = 1\n if item == 'GG':\n matrix2[i,15] = 1\n\n\n \"\"\"\n Scoring matrix\n \"\"\"\n intersect = 0.59763615\n low_gc = -0.2026259\n high_gc = -0.1665878\n\n first_order = ['G02','A03','C03','C04','C05',\n 'G05','A06','C06','C07','G07',\n 'A12','A15','C15','A16','C16',\n 'T16','A17','G17','C18','G18',\n 'A19','C19','G20','T20','G21',\n 'T21','C22','T22','T23','C24',\n 'G24','T24','A25','C25','T25',\n 'G28','T28','C29','G30']\n first_scores = [-0.2753771,-0.3238875,0.17212887,-0.1006662,-0.2018029,\n 0.24595663,0.03644004,0.09837684,-0.7411813,-0.3932644,\n -0.466099,0.08537695,-0.013814,0.27262051,0.1190226,\n -0.2859442,0.09745459,-0.1755462,-0.3457955,-0.6780964,\n 0.22508903,-0.5077941,-0.4173736,-0.054307,0.37989937,\n -0.0907126,0.05782332,-0.5305673,-0.8770074,-0.8762358,\n 0.27891626,-0.4031022,-0.0773007,0.28793562,-0.2216372,\n -0.6890167,0.11787758,-0.1604453,0.38634258]\n first_order_scores = dict(zip(first_order,first_scores))\n\n second_order = ['GT02','GC05','AA06','TA06','GG07',\n 'GG12','TA12','TC12','TT12','GG13',\n 'GA14','GC14','TG17','GG19','TC19',\n 'CC20','TG20','AC21','CG21','GA21',\n 'GG21','TC22','CG23','CT23','AA24',\n 'AG24','AG25','CG25','TG25','GT27',\n 'GG29']\n second_scores = [-0.6257787,0.30004332,-0.8348362,0.76062777,-0.4908167,\n -1.5169074,0.7092612,0.49629861,-0.5868739,-0.3345637,\n 0.76384993,-0.5370252,-0.7981461,-0.6668087,0.35318325,\n 0.74807209,-0.3672668,0.56820913,0.32907207,-0.8364568,\n -0.7822076,-1.029693,0.85619782,-0.4632077,-0.5794924,\n 0.64907554,-0.0773007,0.28793562,-0.2216372,0.11787758,\n -0.69774]\n second_order_scores = dict(zip(second_order,second_scores))\n\n\n # order 1 score matrix\n \"\"\" row order == A T/U C G \"\"\"\n first_matrix = np.zeros([4,30], dtype=float)\n def posit(key):\n return int(key[1:])-1\n for k,v in first_order_scores.items():\n if k[0] == 'A':\n first_matrix[0,posit(k)] = v\n elif k[0] == 'T':\n first_matrix[1,posit(k)] = v\n elif k[0] == 'C':\n first_matrix[2,posit(k)] = v\n elif k[0] == 'G':\n first_matrix[3,posit(k)] = v\n\n\n # order 2 score matrix\n \"\"\" row order == AA AT AC AG TA TT TC TG CA CT CC CG GA GT GC GG \"\"\"\n second_matrix = np.zeros([16,29], dtype=float)\n for k,v in second_order_scores.items():\n if k[0:2] == 'AA':\n second_matrix[0,int(k[2:])-1] = v\n if k[0:2] == 'AT':\n second_matrix[1,int(k[2:])-1] = v\n if k[0:2] == 'AC':\n second_matrix[2,int(k[2:])-1] = v\n if k[0:2] == 'AG':\n second_matrix[3,int(k[2:])-1] = v\n if k[0:2] == 'TA':\n second_matrix[4,int(k[2:])-1] = v\n if k[0:2] == 'TT':\n second_matrix[5,int(k[2:])-1] = v\n if k[0:2] == 'TC':\n second_matrix[6,int(k[2:])-1] = v\n if k[0:2] == 'TG':\n second_matrix[7,int(k[2:])-1] = v\n if k[0:2] == 'CA':\n second_matrix[8,int(k[2:])-1] = v\n if k[0:2] == 'CT':\n second_matrix[9,int(k[2:])-1] = v\n if k[0:2] == 'CC':\n second_matrix[10,int(k[2:])-1] = v\n if k[0:2] == 'CG':\n second_matrix[11,int(k[2:])-1] = v\n if k[0:2] == 'GA':\n second_matrix[12,int(k[2:])-1] = v\n if k[0:2] == 'GT':\n second_matrix[13,int(k[2:])-1] = v\n if k[0:2] == 'GC':\n second_matrix[14,int(k[2:])-1] = v\n if k[0:2] == 'GG':\n second_matrix[15,int(k[2:])-1] = v\n\n item_gc = sequence[0][5:-5]\n gc_count = item_gc.count('G') + item_gc.count('C')\n if gc_count < 10:\n gc_score = low_gc\n else:\n gc_score = high_gc\n first_first = np.matmul(first_matrix,matrix1)\n score_first = np.trace(first_first)\n score_second = np.trace(np.matmul(second_matrix,matrix2))\n score = (1/(1 + math.exp(-(intersect + gc_score + score_first + score_second))))\n return score", "def get_base_frequencies(self):\n return get_base_frequencies(self._dna)", "def process_sequences(sequences: list):\n transformed_sequences = transform_sequences(sequences)\n profile = calculate_profile(transformed_sequences)\n consensus = get_consensus(profile)\n\n print(consensus)\n bases = [\"A\", \"C\", \"G\", \"T\"]\n for i, counts in enumerate(profile):\n counts = [str(count) for count in counts]\n print(\"{}: {}\".format(bases[i], \" \".join(counts)))", "def dnds(seq1, seq2):\n # Strip any whitespace from both strings\n seq1 = clean_sequence(seq1)\n seq2 = clean_sequence(seq2)\n # Check that both sequences have the same length\n assert len(seq1) == len(seq2)\n # Check that sequences are codons\n assert len(seq1) % 3 == 0\n syn_sites = syn_sum(seq1, seq2)\n non_sites = len(seq1) - syn_sites\n logging.info('Sites (syn/nonsyn): {}, {}'.format(syn_sites, non_sites))\n syn_subs, non_subs = substitutions(seq1, seq2)\n pn = non_subs / non_sites\n ps = syn_subs / syn_sites\n dn = -0.75 * log(1 - (4 * pn / 3))\n ds = -0.75 * log(1 - (4 * ps / 3))\n logging.info('dN: {}\\t\\tdS: {}'.format(round(dn, 3), round(ds, 3)))\n return round(float(pn), 3), round(float(ps), 3), round(float(dn), 3), round(float(ds), 3)", "def count_umbiguous_bases(sequence):\n sequence = sequence.upper()\n amb = ['N', 'R', 'Y', 'W', 'S', 'K', 'M']\n return sum({base: sequence.count(base) for base in amb}.values())", "def design_grna(seq):\n\n transcript = {'A': 'U', 'C': 'G', 'G': 'C', 'T': 'A'}\n grna = \"\".join(transcript[n] for n in seq)\n\n return grna", "def coding_strand_to_AA(dna):\n coding_strand = ''\n for i in range(len(dna)/3):\n aa = dna[3*i:(3*i)+3]\n coding_strand += aa_table[aa]\n return coding_strand", "def calculate_fitness(dna: str) -> float:\r\n # make minimum fitness 1 to avoid possible division by zero later\r\n fitness = 1\r\n for c in range(DNA_SIZE):\r\n fitness += abs(ord(dna[c]) - ord(OPTIMAL[c]))\r\n return 1 / fitness", "def calculate_gc_content(sequence):\n sequence = sequence.upper()\n sc = Counter(sequence)\n return round((sc['C'] + sc['G']) / (sc['A'] + sc['C'] + sc['G'] + sc['T']) * 100, 2)", "def base_composition(reads, base):\n assert base.upper() in set(\"ACGT\")\n\n \"\"\" Reports nucelotide frequencies at each position in the\n sam sequences\n \"\"\"\n # DNA_Alphabet=[\"A\",\"C\",\"T\",\"G\",\"N\"]\n all_nucs = []\n for read in reads:\n nucs = {} # Dictionary to store nucleotide data.\n seq = read[9]\n for i in range(0, len(seq)):\n nucs[str(i + 1)] = seq[i]\n all_nucs.append(nucs)\n all_items = []\n counts = []\n for dicts in all_nucs:\n for item in dicts.items():\n all_items.append(item)\n all_items.sort(key=operator.itemgetter(0))\n groups = [map(operator.itemgetter(1), list(group))\n for key, group in itertools.groupby(\n all_items, operator.itemgetter(0))]\n for group in groups:\n counts.append(group.count(base))\n\n pos = range(1, len(seq) + 1)\n\n # Create plot.\n plt.figure(1, figsize=(8, 8))\n plt.axes([0.1, 0.1, 0.8, 0.8])\n plt.bar(pos, counts, facecolor='g')\n plt.xlabel(\"Position\")\n plt.ylabel(\"number of mapped reads\")\n plt.title(base)\n plt.show()", "def calculateSNR(self):\n pass", "def count(self, base):\n return self._dna.count(base)", "def coding_strand_to_AA(dna):\n list1 = get_codons(dna)\n string = ''\n for codon in list1:\n try:\n string = string + aa_table[codon]\n except KeyError:\n continue\n return string", "def coding_strand_to_AA(dna):\n #inital conditions\n protein = ''\n i = 0\n\n #for the length of DNA, translate each codon in an ORF to an amino acid\n while i < (len(dna)-2):\n codon = dna[i:i+3] \n amino_acid = aa_table[codon]\n protein= protein + amino_acid\n i += 3\n\n #return the string of amino acids\n return protein", "def get_length(dna):\n return len (dna)", "def print_strand_stats(strand_statistics):\n print(' Total\\tFor\\tRev\\tDif')\n for base, count in strand_statistics.items():\n print(f'{base}: {str(count[0])}\\t{str(count[1])}\\t{str(count[2])}\\t{str(count[3])}')", "def coding_strand_to_AA(dna):\n protein=''\n for i in range(0,len(dna),3):\n\t if dna[i:i+3] in aa_table.keys():\n\t \tprotein += aa_table[dna[i:i+3]]\n return protein", "def processNT(organism, chain, nuc, quals):\n\n ch = chain.lower()\n\n quals = np.array(quals.split('.')).astype(int)\n res = parse_unpaired_dna_sequence_blastn(organism, chain, nuc, info='',\n nocleanup=False, hide_nucseq=False,\n extended_cdr3=True,\n return_all_good_hits=True,\n max_bit_score_delta_for_good_hits=50)\n genes,evalues,status,all_good_hits_with_scores = res\n labels = ['v%s_gene','v%s_rep', 'v%s_mm', 'j%s_gene', 'j%s_rep', 'j%s_mm', 'cdr3%s_plus']\n tmp = {g:v for g,v in zip([lab % ch for lab in labels], genes)}\n tmp.update({'%s_evalue' % k.lower():evalues[k][0] for k in evalues.keys()})\n tmp.update({'%s_bitscore_gap' % k.lower():evalues[k][1] for k in evalues.keys()})\n\n tmp['%s_status' % ch] = 'OK' if not status else status\n tmp['%s_good_hits' % ch] = all_good_hits_with_scores\n\n tmp['cdr3%s' % ch],tmp['cdr3%s_nucseq' % ch] = tmp['cdr3%s_plus' % ch].split('-')\n tmp['cdr3%s_quals' % ch] = get_qualstring( tmp['cdr3%s_plus' % ch], nuc, quals )\n tmp['v%s_mismatches' % ch] = tmp['v%s_mm' % ch][0]\n tmp['j%s_mismatches' % ch] = tmp['j%s_mm' % ch][0]\n tmp['v%s_alignlen' % ch] = np.sum(tmp['v%s_mm' % ch])\n tmp['j%s_alignlen' % ch] = np.sum(tmp['j%s_mm' % ch])\n\n hits = tmp['%s_good_hits' % ch]\n if hits and len(hits) == 2 and hits[0] and hits[1]:\n tmp['v%s_blast_hits' % ch] = ';'.join( '{}:{}'.format(x[0],x[1]) for x in hits[0] )\n tmp['j%s_blast_hits' % ch] = ';'.join( '{}:{}'.format(x[0],x[1]) for x in hits[1] )\n va_genes = util.get_top_genes( tmp['v%s_blast_hits' % ch] ) ## a set\n ja_genes = util.get_top_genes( tmp['j%s_blast_hits' % ch] )\n tmp['v%s_genes' % ch] = ';'.join( sorted( va_genes ) )\n tmp['j%s_genes' % ch] = ';'.join( sorted( ja_genes ) )\n tmp['v%s_reps' % ch] = ';'.join( sorted( util.get_top_reps( tmp['v%s_blast_hits' % ch], organism ) ) )\n tmp['j%s_reps' % ch] = ';'.join( sorted( util.get_top_reps( tmp['j%s_blast_hits' % ch], organism ) ) )\n tmp['v%s_countreps' % ch] = ';'.join( sorted( set( (util.get_mm1_rep_gene_for_counting(x,organism) for x in va_genes ))))\n tmp['j%s_countreps' % ch] = ';'.join( sorted( set( (util.get_mm1_rep_gene_for_counting(x,organism) for x in ja_genes ))))\n\n chain = TCRChain(**tmp)\n return chain", "def get_length(dna):\n return len(dna)", "def get_gc_content(sequence):\n len_seq = len(sequence) - sum(alternative_bases_counter(sequence).values())\n sequence = sequence.upper()\n c = sequence.count('C')\n g = sequence.count('G')\n return round((c + g) / len_seq, 4)", "def stats(self, s_sample, d_sample, x_sample, wav_len):\n\t\ts_STDCT_sample, d_STDCT_sample, x_STDCT_sample = self.transfrom_stats(s_sample,\n\t\t\td_sample, x_sample, wav_len)\n\t\txi_sample = self.xi(s_STDCT_sample, d_STDCT_sample)\n\t\tself.xi_map.stats(xi_sample)\n\t\tcd_sample = self.cd(s_STDCT_sample, d_STDCT_sample)\n\t\tself.cd_map.stats(cd_sample)", "def calc_GC(filepath):\n liste=['small.exon.piRNA_2.fa', 'small.exon.piRNA_1.fa', 'small.exon.piRNA_3.fa']\n \n length=list(range(0,34))\n d={}\n for i in length:\n d[i]={'A':0, 'G':0, 'T':0, 'C':0}\n for i in liste:\n with open(filepath+'/'+i, 'r') as f:\n for line in f:\n #fasta header starts with >\n if line.startswith('>'):\n pass\n else:\n line_l=list(line)\n for el in range(len(line_l)):\n if line_l[el]=='A':\n d[el]['A']+=1\n elif line_l[el]=='T':\n d[el]['T']+=1\n elif line_l[el]== 'G':\n d[el]['G']+=1\n elif line_l[el]== 'C':\n d[el]['C']+=1\n\n df=pd.DataFrame.from_dict(d)\n df=df.transpose()\n df.index = np.arange(1, len(df) + 1)\n \n\n df['A [%]']=df['A']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100\n df['G [%]']=df['G']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100\n df['T [%]']=df['T']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100\n df['C [%]']=df['C']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100", "def count_matches(sam_input):\n logging.info(\"Counting aligned bases in %s ...\", sam_input.name)\n\n total_bases = 0\n with pysam.AlignmentFile(sam_input, \"r\") as sam:\n for read in sam:\n total_bases += aligned_bases(read.cigar)\n return total_bases", "def complexity(s, **kwargs):\n num, den = 1, 1\n for k in range(1, len(s)):\n k4 = 4**k # For DNA\n num += min(len(set(s[i:i+k] for i in range(len(s) - k + 1))), k4)\n den += min(len(s) - k + 1, k4)\n return num / den", "def runstats(fasta_file):\n loc = locale.setlocale(locale.LC_ALL, '')\n locale.setlocale(locale.LC_ALL, loc)\n\n seqs = [seq for seq in SeqIO.parse(fasta_file, \"fasta\")]\n lengths_list = [len(i.seq) for i in seqs]\n\n total_size = locale.format(\"%d\", np.sum(lengths_list), grouping=True)\n ctg_n50 = locale.format(\"%d\", calculate_N50(lengths_list), grouping=True)\n minsize = locale.format(\"%d\", min(lengths_list), grouping=True)\n maxsize = locale.format(\"%d\", max(lengths_list), grouping=True)\n gc_percent = np.average([GC(i.seq) for i in seqs])\n\n print '{0:25}\\t{1}'.format(\"Total number of contigs:\", locale.format(\"%d\", len(seqs), grouping=True))\n print '{0:25}\\t{1}'.format(\"Total size of all contigs:\", total_size)\n print '{0:25}\\t{1}'.format(\"N50 of all contigs:\", ctg_n50)\n print '{0:25}\\t{1}'.format(\"Largest contig:\", maxsize)\n print '{0:25}\\t{1}'.format(\"Smallest contig:\", minsize)\n print '{0:25}\\t{1:.2f}'.format(\"G+C % of contigs:\", gc_percent)" ]
[ "0.6844656", "0.5866069", "0.5852457", "0.5767654", "0.5659145", "0.56240505", "0.5556784", "0.54912585", "0.54409754", "0.5419822", "0.5405742", "0.5402327", "0.53218985", "0.52998936", "0.528539", "0.5226978", "0.5214499", "0.51802236", "0.5163815", "0.5162196", "0.51621354", "0.51608473", "0.5142463", "0.51265246", "0.5117232", "0.51003236", "0.50802183", "0.5077999", "0.5065051", "0.5062793" ]
0.7544242
0
Prints the strand statistics.
def print_strand_stats(strand_statistics): print(' Total\tFor\tRev\tDif') for base, count in strand_statistics.items(): print(f'{base}: {str(count[0])}\t{str(count[1])}\t{str(count[2])}\t{str(count[3])}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_stats(self, output_type='count'):\n if not self._stats:\n raise TypeError(\"self._stats is not defined. Try running run_parser first!\")\n self._stats.print_spec(output_type)", "def print_stat(self):\n all_stat = self.get_all_stat()\n for stat_type, stat in all_stat.items():\n print(stat_type,\":\",stat, end=' / ')", "def __str__(self):\n #{{{ Nicely print of elements in class.\n\n if config.verbose: print \"Stations():\"\n\n for st in self.stachan_cache.keys():\n chans = self.stachan_cache[st].keys()\n print \"\\t%s: %s\" % (st,chans)", "def show_current_scattering_statistics(self, out=sys.stdout):\n print(\"\", file=out)\n print(\"Model and map statistics:\", file=out)\n print(\" mean mFo map height @ carbon: %s\" % format_value(\"%.2f\",\n flex.max(self.carbon_fo_values)), file=out)\n if (self.calpha_mean_two_fofc > 0):\n print(\" mean 2mFo-DFc map height @ C-alpha: %s\" % format_value(\n \"%.2f\", self.calpha_mean_two_fofc), file=out)\n print(\" mean B-factor: %s\" % format_value(\"%.2f\", self.b_mean_all), file=out)\n if (self.b_mean_calpha > 0):\n print(\" mean C-alpha B-factor: %s\" % format_value(\"%.2f\",\n self.b_mean_calpha), file=out)\n print(\" mean water B-factor: %s\" % format_value(\"%.2f\",\n self.b_mean_hoh), file=out)\n n_water_fofc_peaks = 0\n n_water_anom_peaks = 0\n water_sel = self.water_selection()\n print(\" %d water molecules\" % len(water_sel), file=out)\n for i_seq in water_sel :\n map_stats = self.map_stats(i_seq)\n if (map_stats.fofc >= 3.0):\n n_water_fofc_peaks += 1\n if (map_stats.anom is not None) and (map_stats.anom >= 3.0):\n n_water_anom_peaks += 1\n print(\" %d waters have mFo-DFc map >= 3.0 sigma\" % \\\n n_water_fofc_peaks, file=out)\n if (self.anomalous_flag):\n print(\" %d waters have anomalous map >= 3.0 sigma\" % \\\n n_water_anom_peaks, file=out)\n print(\"\", file=out)", "def print_stats(self):\n if self.n_iter % 5 != 0:\n return\n\n s_iter = \"%7i - \" % self.n_iter\n s_stat = ' || '.join([\n '{}: {:7.4f}'.format(k, np.mean(v)) for k, v in self.stats.items()\n if type(v) is list and len(v) > 0\n ])\n for k in self.stats.keys():\n if type(self.stats[k]) is list:\n del self.stats[k][:]\n\n # transformer learning rate\n # learning rates\n s_lr = \" - \"\n for k, v in self.optimizers.items():\n s_lr = s_lr + (\" - %s LR: \" % k) + \" / \".join(\n \"{:.4e}\".format(group['lr']) for group in v.param_groups)\n\n # processing speed\n new_time = time.time()\n diff = new_time - self.last_time\n s_speed = \"{:7.2f} sent/s - {:8.2f} words/s - \".format(\n self.stats['processed_s'] * 1.0 / diff,\n self.stats['processed_w'] * 1.0 / diff\n )\n self.stats['processed_s'] = 0\n self.stats['processed_w'] = 0\n self.last_time = new_time\n\n # log speed + stats + learning rate\n logger.info(s_iter + s_speed + s_stat + s_lr)", "def print_statistics(self):\n print 'Ran %s iterations in %0.3f seconds\\n' % (\n self.iterations, self.elapsed_time)\n\n print 'Overall Equity'\n for index in range(len(self.holdem_ranges)):\n range_short_form = '%r' % self.holdem_ranges[index]\n print 'P%s) %-15s %0.3f' % (\n index,\n range_short_form,\n float(self.win_stats.get(index, 0))/self.iterations)\n print '\\n'\n print 'Hand distribution for each player'\n for stats in self.player_stats:\n stats.print_report()", "def showStat(self):\n print \">>[Stat Information]:\"\n if self.gid != DEFALUT_GROUP_ID:\n print \"Gid = %u\" % self.gid\n print \"[Queries] Arp = %u, Original_to_controller= %u, Current_to_controller = %u\" % (self.query_arp, self.query_control_origin, self.query_control_current)\n print \"TP = %u, TN = %u, FP = %u\" % (self.tp, self.tn, self.fp)\n print \"[Flow] local_switch = %u, within the group = %u,across groups = %u\" % (self.flow_local, self.flow_within_group, self.flow_cross_group)\n print \"[Traffic] local_switch = %u byte, within the group = %u byte,across groups = %u byte\" % (self.byte_local, self.byte_within_group, self.byte_cross_group)", "def show_stats(self):\n print(\"\\nName: \" + self.name)\n print(\"Element Type: \" + self.element)\n print(\"Health: \" + str(self.current_health) + \" / \" + str(self.max_health))\n print(\"Speed: \" + str(self.speed))", "def displayStatistics(self):\n return \"\"", "def print_stats():\n if spritegroup_stats[0] > 0:\n generic.print_info(\"Concurrent spritegroups: {}/{} ({})\".format(spritegroup_stats[0], total_action2_ids, str(spritegroup_stats[1])))\n if a2register_stats[0] > 0:\n generic.print_info(\"Concurrent Action2 registers: {}/{} ({})\".format(a2register_stats[0], total_tmp_locations, str(a2register_stats[1])))", "def __str__(self):\n\t\tprint \"generating graph stats...\\n\"\n\t\tstart_time = time.time()\n\t\tbf = self.branching_factor()\n\t\treq_time = float(time.time() - start_time)\n\t\tb = str(bf[0])\n\t\tn = str(bf[1])\n\t\terr = str(100.0 * float(self.default_num_samples-bf[1])/self.default_num_samples)\n\t\tsize = str(self.num_articles())\n\t\tpg_time = str(req_time/bf[1])\n\t\tt = str(time.time() - start_time)\n\t\treturn \"_______Wikipedia Graph Stats_______\\n\" + \\\n\t\t\t\t\"# of nodes:\\t\\t\"+size+\"\\n\" + \\\n\t\t\t\t\"Avg. branching factor\\t\"+b+\"\\n\" + \\\n\t\t\t\t\"\\t\\t\\t(n=\"+n+\")\\n\" + \\\n\t\t\t\t\"Page Req. Fail Rate:\\t\"+err+\"%\\n\" + \\\n\t\t\t\t\"Avg. Page Req. Time:\\t\"+pg_time+\" sec\\n\" + \\\n\t\t\t\t\"<stats generated in \"+t+ \" sec>\"", "def print_global_statistics(stats):\n\n print('Final Results')\n print('LED: {} WED: {}'.format(stats.global_letter_edit_distance,stats.global_word_edit_distance))", "def statistics(self, **kwargs) -> None:\n print(\n tabulate.tabulate(\n list(self._iter_statistics(**kwargs)),\n headers=[\"path\", \"type\", \"occurences\", \"%\"],\n floatfmt=\".3f\",\n )\n )", "def printStations(self):\n print(\"Bus numero \" + str(self._num) + \" :\")\n for i in range(len(self._stations)) :\n print(self._stations[i])\n print('\\n')", "def stat(**kwargs):\n print(\"output stats\")", "def print_output():\n print(\"count: [primary: \"+str(primary_shards)+\", replica: \"+str(secondary_shards)+\"]\")\n print(\"size: [primary: \"+pretty_print_storage(total_size_primary)+\", replica: \"+pretty_print_storage(total_size_secondary)+\"]\")\n print(\"disk-max-node: \"+max_size_node_name)\n print(\"watermark-breached: \"+str(watermark_breached))", "def print_stats(self):\n if self.df_avg is None:\n self.collect_stats()\n\n print(\"Simulation Results\")\n print(tabulate(self.df_avg, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"FleetManager stats\")\n print(tabulate(self.manager_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"Customer stats\")\n print(tabulate(self.customer_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"Transport stats\")\n print(tabulate(self.transport_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"Station stats\")\n print(tabulate(self.station_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))", "def printSummary(self):\n pass", "def printResults(self, stream=sys.stdout):\n # Only master writes.\n if MPICommons.isMaster():\n stream.write(\"%15s %15s %15s %12s\\n\"%(\" time (t)\", \" count (n)\", \"(dn/dt) \", \"stdErr\"))\n n_tot = 0\n\t actualTot = 0\n t = 0.0\n for i,n in enumerate(self.__data):\n # Calculate the values to present.\n t = i * self.__time_interval\n actualTot += n\n dt = self.__time_interval\n n_tot += n\n dn = n\n rateEst = self.__floatAnalInterval*dn/dt\n stdErr = self.__floatAnalInterval*math.sqrt(dn)/dt\n # Only for times != zero.\n if (i > 0):\n stream.write(\"%15.5f %15i\"%(t, n_tot) +\" \"+ \"{:.6E}\".format(rateEst) +\" \"+\"{:.3E}\".format(stdErr) +\"\\n\")\n eqTime = self.__finalTime - self.__initialTime\n stream.write(\"\\nOverall we counted the following number of counts in the following amount of time: \" + \"%6i\"%(actualTot) + \" \" + \"{:.6E}\".format(eqTime))", "def show_stats(self):\n print(self.team_one.name + \" stats: \")\n self.team_one.stats()\n print(self.team_two.name + \" stats: \")\n self.team_two.stats()", "def printLenStats(data):\n print \"statistics of training trips length: mean\",\n print data[\"triplen\"].mean(), # Mean of values\n print \"std\",\n print data[\"triplen\"].std(), # Unbiased standard deviation\n print \"var\",\n print data[\"triplen\"].var(), # Unbiased variance\n print \"max\",\n print data[\"triplen\"].max(),\n print \"min\",\n print data[\"triplen\"].min()", "def write_stats(self, filestream):\n if not self.summary:\n self.summarize()\n\n print(self.scores, file=filestream)", "def pretty_print(self):\n output = \"Count: \"\n if self.soft:\n output += \"S\"\n output += str(self.count)\n if self.can_double:\n output += \", can double\"\n if self.can_split:\n output += \", can split\"\n print(output)", "def string_stats(self):\n sys.stdout.flush()\n sys_stdout = sys.stdout\n output = StringIO()\n try:\n sys.stdout = output\n self.print_stats()\n finally:\n sys.stdout = sys_stdout\n sys.stdout.flush()\n return output.getvalue()", "def debug(self):\n \n #path\n print('Path information:')\n for k, v in self.__path.items():\n print(k, v)\n \n #sample count\n print('Sample statistic of each phase')\n for k, v in self.__phase_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each class')\n for k, v in self.__area_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each train')\n for k, v in self.__train_sample_count.items():\n print(k, v)", "def print_summary_stats(self) -> None:\n print(\"Number of Users: {}\".format(len(self.all_users)))\n print(\"Number of Utterances: {}\".format(len(self.utterances)))\n print(\"Number of Conversations: {}\".format(len(self.conversations)))", "def display_results_line(stats):\n # Line output.\n template = ' %5d |%6.2f |%6.2f %6.2f %6.2f |%3d %3d %3d'\n\n num_bytes = stats['data_size']\n\n P_times = stats['P_times']\n val = [num_bytes]\n for p in P_times:\n val.append(p*1000.)\n\n val.append(stats['count_lost'])\n val.append(stats['count_timeout'])\n val.append(stats['count_corrupt'])\n val = tuple(val)\n\n print(template % val)", "def print_performance_info(self):\n pass", "def stats(filename):\n from .utils import stats as print_stats\n click.echo('Starting to gather statistics on file {}'.format(filename))\n print_stats(filename)\n click.echo('Statistics printing finished')", "def info(self):\n txt = \"\"\"Lick Index {s.name}\n wavelength units: {s.wavelength_unit}\n Index Band: {s.band}\n Blue continuum band: {s.blue}\n Red continuum band: {s.red}\n Measurement unit: {s.index_unit}\"\"\".format(s=self)\n print(txt)" ]
[ "0.6812522", "0.6772878", "0.66958606", "0.66482264", "0.65283674", "0.65146375", "0.64881164", "0.6446352", "0.64255565", "0.6423256", "0.62889", "0.62166005", "0.6209804", "0.6180282", "0.6165024", "0.61648023", "0.61368006", "0.6129409", "0.61040974", "0.6102065", "0.6075413", "0.6061142", "0.6046407", "0.6043358", "0.60401165", "0.6013992", "0.5995773", "0.59933233", "0.5982576", "0.59682024" ]
0.74928737
0
Returns a list of all possible combinations of kmers of length k from a input alphabet.
def get_all_possible_kmers(alphabet, kmin, kmax): kmers = [''.join(letters) for n in range(kmin, kmax + 1) for letters in product(alphabet, repeat=n)] return kmers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_kmers(k):\n\n kmers_list = []\n kmers_tuples = itertools.product('ACGT', repeat=k)\n for kmer in kmers_tuples:\n kmers_list.append(''.join(kmer))\n\n return kmers_list", "def kmers(sequence, alphabet, k):\n mers = (''.join(c) for c in windowed(k, sequence))\n return [mer for mer in mers if all(base in set(alphabet) for base in mer)]", "def enumerate_kmers(alphabet: Union[str, List[str]], length: int):\n for value in itertools.product(alphabet, repeat=length):\n yield \"\".join(value)", "def kmers_composition(dna: str, k: int, alphabet: str = \"ACGT\"):\n dna = Counter(string_to_kmers(dna, k))\n for k_mer in enumerate_kmers(alphabet, k):\n yield dna[k_mer]", "def count_kmers(dna: str, k: int, alphabet: str = \"ACGT\"):\n c = Counter(dna[i:i + k] for i in range(len(dna) - k + 1))\n result = []\n for k_mer in enumerate_kmers(alphabet, k):\n result.append(c[k_mer])\n return result", "def get_kmers(seq, k):\n\n return [seq[i:i+k] for i in range(len(seq)-k+1)]", "def find_kmers(in_fasta, k):\n n= len(in_fasta)-k+1\n kmers=[]\n for i in range(0, n):\n kmers.append(in_fasta[i:i+k])\n return(kmers)", "def get_kmers(seq,k=2):\n pair_list = []\n for i in range(0,len(seq),k):\n pair_list.append(str(seq)[i:i+k])\n return pair_list", "def kmers_from_dna(dna, k):\n assert k >= 1\n assert len(dna) >= k\n\n assert len(dna) >= k\n for i in range(0, len(dna) - k + 1):\n kmer = dna[i:i + k]\n yield kmer", "def all_kmers(k):\n for i in range(0, 4 ** k):\n res = number_to_kmer(i, k)\n yield res", "def generate_all_kmers(k, ignore_N=True):\n alphabet = \"ACGT\"\n if not ignore_N:\n alphabet += \"N\"\n possible_kmers = itertools.product(alphabet, repeat=k)\n retval = collections.OrderedDict()\n for i, kmer in enumerate(possible_kmers):\n retval[''.join(kmer)] = i\n return retval", "def randomKmers(dna, k):\n kmers = []\n for seq in dna:\n n = len(seq)\n i = random.randint(0, n-k)\n kmer = seq[i:i+k]\n kmers.append( kmer)\n return kmers", "def kmer_list(s, k):\n kmer = []\n n = len(s)\n # n-k+1 is the available range of values or probablities.\n for x in range(0, n-k+1):\n kmer.append(s[x:x+k])\n return kmer", "def enumerate_kmers(string, k, start=0):\n for i in range(0, len(string) - k + 1):\n yield start + i, string[i:i+k]", "def string_to_kmers(s: str, k: int) -> List[str]:\n for i in range(0, len(s), k):\n yield s[i:i + k]", "def build_kmers(\n sequence, \n ksize):\n\n kmers = list()\n n_kmers = len(sequence) - ksize + 1\n # Loop to store khmers in each sequence\n for i in range(n_kmers):\n kmer = sequence[i:i + ksize]\n kmers.append(kmer)\n \n return kmers, n_kmers\n\n # It is an example that needs to say the size of Kmer you would like.", "def get_all_kmers(pattern, k, ordered=False):\n ordered_kmers = [pattern[i:i + k] for i in range(len(pattern) - k + 1)]\n if ordered:\n return ordered_kmers\n return set(ordered_kmers)", "def clump_forming_kmers(string, k, l, t):\n clumpFormingKmers = set()\n # Initial counts of k-mers within length l window starting from the first\n # chracter of the string.\n counts = Counter([kmer for i, kmer in enumerate_kmers(string[:l], k)])\n clumpFormingKmers = add_clump_forming_kmers(counts, clumpFormingKmers)\n\n for i in range(1, len(string) - l + 1):\n counts[string[i-1:i-1+k]] -= 1\n counts[string[i+l-k:i+l]] += 1\n clumpFormingKmers = add_clump_forming_kmers(counts, clumpFormingKmers)\n\n return list(clumpFormingKmers)", "def create_kmers(seq,kmer_size):\n\n return [seq[i:(i+kmer_size)] for i in range(len(seq)-kmer_size+1)]", "def pattern_list(k):\r\n p_list=[]\r\n for i in list(itertools.product('ACGT', repeat=k)):\r\n x = ''.join(i)\r\n p_list.append(x)\r\n return p_list", "def generateSubSequences(k, ch):\n seq = [\"\".join(c) for c in itertools.product(ch, repeat = k)]\n# discussion about the best way to do this:\n# https://stackoverflow.com/questions/7074051/what-is-the-best-way-to-generate-all-possible-three-letter-strings\n return seq", "def allcombinations(orgset, k):\n return itertools.chain(*[combination(orgset, i) for i in range(1, k + 1)])", "def gkm_name(l=4, k=3, rev_comp=False):\n assert k < l\n ungapped_kmers = list(itertools.product(*[\"ACGT\" for _ in range(k)]))\n discard_locs = list(itertools.combinations(range(l), l - k))\n retval = []\n for loc in discard_locs: # Locations of N chars\n for kmer in ungapped_kmers:\n s = ['' for _ in range(l)]\n j = 0\n for i in range(l):\n if i in loc: s[i] = \"N\"\n else:\n s[i] = kmer[j]\n j += 1\n retval.append(''.join(s))\n if rev_comp:\n retval_first = [retval[i] for i in gkm_rc_indices(l=l, k=k)[0, :]]\n retval_second = [retval[i] for i in gkm_rc_indices(l=l, k=k)[1, :]]\n retval = [\"/\".join(pair) for pair in zip(retval_first, retval_second)]\n return retval", "def GenKmers(consensus,MinLen=18,MaxLen=22):\n lengths = [i+MinLen for i in range(MaxLen+1-MinLen)]\n kmers = []\n for length in lengths:\n for i in range(len(consensus)+1 - length):\n kmer = consensus[i:i+length]\n kmers.append((i,kmer))\n return kmers", "def get_kmers_from_sequence(sequence, kmin, kmax):\n limits = range(kmin, kmax + 1)\n seq_range = len(sequence) - kmax + 1\n for i in range(0, seq_range):\n for j in limits:\n yield sequence[i:i + j]", "def kmer_set(s, k):\n kmer = set([])\n n = len(s)\n #n-k+1 is the available range of values or probablities.\n for x in range(0, n - k + 1):\n kmer.add(s[x:x + k])\n return kmer", "def get_combo(starting_letter, length): # Apparently ngrams beyond bigrams only have two letter file names. Still keeping this for generality, but should always be run with length=2 in this context\n alpha = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',\n 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n combos = list(itertools.combinations(alpha, length - 1))\n combos = [starting_letter + ''.join(item) for item in combos]\n\n return combos", "def combo(N,K):\n assert type(N)==list\n assert type(K)==int\n for k in N:\n assert type(k)==int\n assert K>0 and K<=len(N)\n \n main_combo = []\n #Finds the power list of the inputted list and loops through the power list for lists with length 'K'.\n for l in power_list(N):\n if len(l)==K:\n main_combo.append(l)\n return main_combo #Returns a list of list combinations with length 'K'.", "def generate_alphabet_combinations(length: int = 2) -> List[str]:\n assert length > 0\n alphabets = string.ascii_lowercase\n\n return [\n ''.join(combination)\n for n in range(1, length+1)\n for combination in product(alphabets, repeat=n)\n ]", "def allpermutations(orgset, k):\n return itertools.chain(*[permutation(orgset, i) for i in range(1, k + 1)])" ]
[ "0.7966488", "0.7883599", "0.77914125", "0.7568773", "0.75586224", "0.74191636", "0.7416181", "0.740234", "0.73228323", "0.7060401", "0.7002437", "0.69293416", "0.68837583", "0.68600106", "0.6826388", "0.68097705", "0.680771", "0.6743084", "0.66071045", "0.65487576", "0.65257245", "0.65150416", "0.6418437", "0.63830453", "0.635902", "0.632221", "0.630652", "0.6300755", "0.62670743", "0.61961037" ]
0.81739914
0
Find clumps of repeated kmers in string. A clump occurs when times or more a kmers appear within a window of size window. A list of (kmer, position, count) tuples is returned.
def get_kmer_clumps(sequence, kmer_list, window, times): kmer_pos = defaultdict(list) k = len(kmer_list[0]) clumps = defaultdict(list) for kmer in kmer_list: kmer_pos[kmer] = kmer_pos.get(kmer, []) + get_pattern_positions(sequence, kmer) for kmer, pos in kmer_pos.items(): clumps[kmer] = clumps.get(kmer, []) for i in range(len(pos) - times): end = i + times - 1 while (pos[end] - pos[i]) <= window - k: end += 1 if end >= len(pos): break if end - i >= times: clumps[kmer].append((pos[i], end - i)) return clumps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clump_forming_kmers(string, k, l, t):\n clumpFormingKmers = set()\n # Initial counts of k-mers within length l window starting from the first\n # chracter of the string.\n counts = Counter([kmer for i, kmer in enumerate_kmers(string[:l], k)])\n clumpFormingKmers = add_clump_forming_kmers(counts, clumpFormingKmers)\n\n for i in range(1, len(string) - l + 1):\n counts[string[i-1:i-1+k]] -= 1\n counts[string[i+l-k:i+l]] += 1\n clumpFormingKmers = add_clump_forming_kmers(counts, clumpFormingKmers)\n\n return list(clumpFormingKmers)", "def find_clumps(genome, k, L, t):\n assert (is_dna(genome))\n counts = collections.defaultdict(int)\n\n # compute counts of kmers in first L-length part of genome\n for k_start in range(L - k + 1):\n counts[genome[k_start:k_start + k]] += 1\n kmers = _get_keys(counts, t)\n\n # slide L-length window and update counts\n # remove previous leftmost kmer and add new kmer being rightmost in current window\n for L_start in range(1, len(genome) - L + 1):\n counts[genome[L_start - 1:L_start + k - 1]] -= 1\n new_kmer = genome[L_start + L - k:L_start + L]\n counts[new_kmer] += 1\n if counts[new_kmer] >= t:\n kmers.add(new_kmer)\n return kmers", "def find_clumps(text, k, L, t):\n clumps = []\n k_mers = frequent_words_t(text, k, t)\n for k_mer in k_mers:\n positions = find_position(k_mer, text)\n for position in positions:\n subtext = text[position:position + L]\n count = pattern_count(subtext, k_mer)\n if count >= t and k_mer not in clumps:\n clumps.append(k_mer)\n return clumps", "def find_clumps(DNA, k, L, t):\n assert len(DNA) >= L\n clumps = set()\n\n # Construct the frequency dict for the first region of size L in the DNA\n freq_dict = dictionaries.FrequencyDict(DNA[:L], k)\n\n # For each kmer in the first window, check if frequency >= t and correspondingly\n # add the kmer to the clumps set\n kmers = set()\n for i in range(L - k + 1):\n kmer = DNA[i: i + k]\n if not kmer in kmers:\n kmers.add(kmer)\n _t = freq_dict[kmer]\n if _t >= t:\n clumps.add(kmer)\n\n # Decrease the frequency of the first kmer for the next iteration, as our\n # sliding window will escape it\n first_kmer = DNA[0:k]\n freq_dict[first_kmer] -= 1\n\n # Cool beans -- the initial freqs are set up and the window is in place.\n # Now, we're ready to go through all other regions of length L in the DNA\n for i in range(1, len(DNA) - L + 1):\n\n # If not the first iteration, increase the frequency of the recently added\n # last kmer. If that frequency >= t, add the kmer to the set of clumps\n last_kmer = DNA[i+L-k : i+L]\n freq_dict[last_kmer] += 1\n if freq_dict[last_kmer] >= t:\n clumps.add(last_kmer)\n\n # Decrease the frequency of the first kmer in the region, as\n # the sliding window will escape it\n first_kmer = DNA[i:i+k]\n freq_dict[first_kmer] -= 1\n\n return clumps # Victory", "def add_clump_forming_kmers(counts, clumpFormingKmers):\n for kmer in counts:\n if counts[kmer] >= t:\n clumpFormingKmers.add(kmer)\n\n return clumpFormingKmers", "def chunkedClumpFinder(sequence, k, L, t):\n\n frequentPatterns = set([])\n for i in range(len(sequence)):\n window = sequence[i:i + L]\n frequencies = {}\n\n for j in range(len(window)):\n pattern = window[j:j + k]\n if pattern not in frequencies:\n frequencies[pattern] = 1\n else:\n frequencies[pattern] += 1\n for p in frequencies:\n if frequencies[p] >= t:\n frequentPatterns.add(p)\n return frequentPatterns", "def better_clumps_finding(text, k, t, L):\n frequent_patterns = []\n clumps = [0 for i in range(0, 4**k)]\n first_subtext = text[:L]\n freq_array = compute_freq(first_subtext, k)\n for index, freq in enumerate(freq_array):\n if freq >= t:\n clumps[index] = 1\n for i in range(1, len(text) - L + 1):\n old_kmer = text[i - 1:i - 1 + k]\n old_kmer_number = pattern_to_number(old_kmer)\n freq_array[old_kmer_number] -= 1\n new_kmer = text[i + L:i + L + k]\n new_kmer_number = pattern_to_number(new_kmer)\n freq_array[new_kmer_number] += 1\n if freq_array[new_kmer_number] >= t:\n clumps[new_kmer_number] = 1\n for index, clump in enumerate(clumps):\n if clump == 1:\n pattern = number_to_pattern(index, k)\n frequent_patterns.append(pattern) \n return frequent_patterns", "def clumps_finding(text, k, t, L):\n frequent_patterns = []\n clumps = [0 for i in range(0, 4**k)]\n for i in range(0, len(text) - L + 1):\n subtext = text[i:i + L]\n freq_array = compute_freq(subtext, k)\n for index, freq in enumerate(freq_array):\n if freq >= t:\n clumps[index] = 1\n for index, clump in enumerate(clumps):\n if clump == 1:\n pattern = number_to_pattern(index, k)\n frequent_patterns.append(pattern)\n return frequent_patterns", "def fastClumpFinder(sequence, k, L, t):\n\n # to be implemented ;)\n pass", "def kmers(sequence, alphabet, k):\n mers = (''.join(c) for c in windowed(k, sequence))\n return [mer for mer in mers if all(base in set(alphabet) for base in mer)]", "def count_kmers(seq, k=3):\n # Start with an empty dictionary\n counts = {}\n # Calculate how many kmers of length k there are\n num_kmers = len(str(seq)) - k + 1\n # Loop over the kmer start positions\n for i in range(num_kmers):\n # Slice the string to get the kmer\n kmer = str(seq)[i:i+k]\n # Add the kmer to the dictionary if it's not there\n if kmer not in counts:\n counts[kmer] = 0\n # Increment the count for this kmer\n counts[kmer] += 1\n # Return the final counts\n return counts", "def get_composition(group, kmer_len, species):\n chrom, data = group\n total_triplets = Counter()\n\n for i, row in data.iterrows():\n try:\n seq = 0\n if kmer_len == 5:\n seq = refseq(species, chrom, int(row['s']) - 2, int(row['distance']) + 4)\n elif kmer_len == 7:\n seq = refseq(species, chrom, int(row['s']) - 3, int(row['distance']) + 6)\n elif kmer_len == 3:\n seq = refseq(species, chrom, int(row['s']) - 1, int(row['distance']) + 2)\n\n if len(seq)>0:\n total_triplets = total_triplets + Counter(kmers_generator(seq, kmer_len))\n except:\n continue\n\n return total_triplets", "def find_clumps(text, k, len_win, t):\n\n patterns = []\n len_text = len(text)\n for i in range(len_text - len_win + 1):\n window = text[i:i + len_win]\n freq_map = frequency_table(window, k)\n for key in freq_map.keys():\n if freq_map[key] >= t and key not in patterns:\n patterns.append(key)\n return patterns", "def count_kmers(dna, k):\n kmer_count = Counter()\n for i in range(len(dna)):\n kmer = dna[i:(i+k)]\n if len(kmer) == k:\n kmer_count[kmer] += 1\n return kmer_count", "def kmers_generator(sequence, kmer):\n return list(slicing_window(sequence, kmer))", "def build_kmers(\n sequence, \n ksize):\n\n kmers = list()\n n_kmers = len(sequence) - ksize + 1\n # Loop to store khmers in each sequence\n for i in range(n_kmers):\n kmer = sequence[i:i + ksize]\n kmers.append(kmer)\n \n return kmers, n_kmers\n\n # It is an example that needs to say the size of Kmer you would like.", "def sequence_kmer_pileup(seq, query_kmers):\n assert isinstance(query_kmers, list)\n lengths = set([len(kmer) for kmer in query_kmers])\n retval = np.zeros((len(query_kmers), len(seq))).astype(int)\n for length in lengths:\n assert length <= len(seq), \"Cannoty query a kmer against a seq shorter than that kmer\"\n kmers = [seq[i:i+length] for i in range(len(seq) - length + 1)]\n kmer_to_idx = generate_all_kmers(length)\n # Row vector\n kmers_int = np.array([kmer_to_idx[k] for k in kmers if \"N\" not in k], dtype=int)\n # Column vector\n query_int = np.atleast_2d(np.array([kmer_to_idx[k] for k in query_kmers if len(k) == length and \"N\" not in k], dtype=int)).T\n # Array of where each query is found in the seq, by the first index of occurrence\n hits = np.where(query_int == kmers_int) # Automatically broadcasts\n this_rows = np.zeros((len(query_int), len(seq)))\n for i in range(length):\n this_rows[hits[0], hits[1] + i] += 1\n retval_idx = np.array([i for i, k in enumerate(query_kmers) if len(k) == length], dtype=int)\n retval[retval_idx, ] = this_rows\n return retval", "def divide_and_count(L_windows, k, t):\n\n results = set()\n\n for L_mer in L_windows:\n k_windows = divide_genome(L_mer, k) # We extract in a list all the possible k-mers\n\n # Generate a set of unique elements to avoid multicounts...\n k_windows_set = set(k_windows)\n\n for k_window in k_windows_set:\n if k_windows.count(k_window) == t:\n results.add(k_window)\n\n\n print(\"\\t\".join(results))", "def count_kmers(dna: str, k: int, alphabet: str = \"ACGT\"):\n c = Counter(dna[i:i + k] for i in range(len(dna) - k + 1))\n result = []\n for k_mer in enumerate_kmers(alphabet, k):\n result.append(c[k_mer])\n return result", "def remove_redundant_kmers(\n search_sets: List[SearchSet],\n) -> List[Tuple[int, Optional[int], List[str]]]:\n\n kmer_search_list = []\n for start, stop, kmer_set in search_sets:\n for kmer in kmer_set:\n kmer_search_list.append((kmer, start, stop))\n minimized_search_list = minimize_kmer_search_list(kmer_search_list)\n result_dict = defaultdict(list)\n for kmer, start, stop in minimized_search_list:\n result_dict[(start, stop)].append(kmer)\n return [(start, stop, kmers) for (start, stop), kmers in result_dict.items()]", "def GenKmers(consensus,MinLen=18,MaxLen=22):\n lengths = [i+MinLen for i in range(MaxLen+1-MinLen)]\n kmers = []\n for length in lengths:\n for i in range(len(consensus)+1 - length):\n kmer = consensus[i:i+length]\n kmers.append((i,kmer))\n return kmers", "def getMatches(self, searchString):\n # Code to complete - you are free to define additional functions\n size = len(searchString)\n minimers = set()\n for nuc in range(size - self.w + 1):\n\n # builds the window size per iteration\n window = searchString[nuc : nuc + self.w]\n for k_position in range(self.w - self.k + 1):\n\n # looks at all k's in the window\n kmer = window[k_position:k_position + self.k]\n \n # if a new minimer appears, yield it\n if kmer in self.minimizerMap and kmer not in minimers:\n minimers.add(kmer)\n yield (nuc + k_position, self.minimizerMap[kmer])", "def get_kmers(file, size):\n\tkmers = defaultdict(int)\n\tregex = re.compile('[' + string.punctuation + ']')\n\tfor line in open(file):\n\t\tfor word in [regex.sub('', w) for w in line.lower().split()]:\n\t\t\tnkmers = len(word) - size + 1\n\t\t\tfor kmer in [word[i:i+size] for i in range(nkmers)]:\n\t\t\t\tkmers[kmer] += 1\n\treturn kmers", "def create_kmers(seq,kmer_size):\n\n return [seq[i:(i+kmer_size)] for i in range(len(seq)-kmer_size+1)]", "def get_counts_from_list(string_list, alphabet, kmin, kmax):\n # get the list of kmers to count with length between kmin and kmax\n kmers_list = get_all_possible_kmers(alphabet, kmin, kmax)\n # initialyze the counter with all possible kmer with length\n # between kmin and kmax with zero counts\n counter = Counter(dict([(km, 0) for km in kmers_list]))\n # open and read in the kmers/string in the file\n for string in string_list:\n # check if kmer/string is in the counter\n if string in counter:\n # if kmer is in add 1 other wise keep the zero count\n counter[string] += 1\n return counter", "def get_mers(sequence, kmin, kmax):\n for k in range(kmin, kmax + 1):\n return (''.join(mers) for mers in windowed(sequence, k))", "def kmerNeighbors(text,k):\r\n L=set()\r\n for i in range(0,len(text)-k+1):\r\n for d in range(0,k+1):\r\n L.update(Neighbors(kmer(text,i,k),d))\r\n D=dict()\r\n for l in L:\r\n D[l]=minHamm(text,l)\r\n return D", "def assemble_kmer_motifs(seq, kmers, min_len=10, gap_allowed=2):\n try:\n pileup = sequence_kmer_pileup(seq, kmers)\n except AssertionError:\n return []\n pileup_flat = np.clip(np.sum(pileup, axis=0), 0, 1) # Flatten\n pileup_flat = connect_nearby_runs(pileup_flat, gap_allowed) # Connect runs that are separated by 1 gap\n motif_idx = find_long_runs(pileup_flat, l=min_len)\n retval = [seq[i:i+l] for i, l in motif_idx]\n # Sanity check against weird off by 1 indexing errors\n assert all([len(s) == l for s, (_i, l) in zip(retval, motif_idx)])\n return retval", "def kmer_list(s, k):\n kmer = []\n n = len(s)\n # n-k+1 is the available range of values or probablities.\n for x in range(0, n-k+1):\n kmer.append(s[x:x+k])\n return kmer", "def find_kmers(in_fasta, k):\n n= len(in_fasta)-k+1\n kmers=[]\n for i in range(0, n):\n kmers.append(in_fasta[i:i+k])\n return(kmers)" ]
[ "0.7578869", "0.725475", "0.7170416", "0.7065859", "0.70088464", "0.6737378", "0.6601845", "0.64199483", "0.62262785", "0.6064274", "0.6063467", "0.5913066", "0.58647", "0.58513254", "0.5823833", "0.56832606", "0.5652791", "0.5648163", "0.563255", "0.5630395", "0.5593445", "0.55928266", "0.5570809", "0.55401045", "0.5514173", "0.5510635", "0.55087304", "0.54938996", "0.5345401", "0.5338373" ]
0.768465
0
Function to insert any wild card character in a word or string, generally the string must be a palindrome.
def insert_wild_card(word, num_n=1): mid = len(word) // 2 # to insert only one wild card character # with a predefinited condiction if num_n == 1 and is_palindrome(word) and len(word) % 2 != 0: return word[:mid] + 'N' + word[mid + 1:], word # the even words can receive two wild card chars # with a predefinited condiction elif num_n == 2 and is_palindrome(word) and len(word) % 2 == 0: return word[:mid - 1] + 'NN' + word[mid + 1:], word # only odd words can return a word with 3 chars wild cards # with a predefinited condiction elif num_n == 3 and word[:mid - 1] == get_reverse_complement(word[mid + 2:]) and len(word) % 2 != 0 and \ len(word) >= 5: return word[:mid - 1] + 'NNN' + word[mid + 2:], word # if the condictions were not satisfied # return the word else: return word
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def EscapeWildcards(string: Text) -> Text:\n precondition.AssertType(string, Text)\n return string.replace(\"%\", r\"\\%\").replace(\"_\", r\"\\_\")", "def insert_special_char(phrase_words):\n SPECIAL = [\n [\"~\", \"!\", \"#\", \"$\", \"%\", \"^\"],\n [\"&\", \"*\", \"(\", \")\", \"-\", \"=\"],\n [\"+\", \"[\", \"]\", \"\\\\\", \"{\", \"}\"],\n [\":\", \";\", \"\\\"\", \"\\'\", \"<\", \">\"],\n [\"?\", \"/\", \"0\", \"1\", \"2\", \"3\"],\n [\"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]]\n n = len(phrase_words)\n\n rand_i = secrets.randbelow(n)\n rand_w = phrase_words[rand_i] # Random word\n rand_w_i = secrets.randbelow(len(rand_w)) # Random character\n\n rand_row = secrets.choice(SPECIAL)\n rand_special = secrets.choice(rand_row)\n\n # Replace char and word\n rand_w = rand_w[rand_w_i] + rand_special + rand_w[:rand_w_i+1]\n phrase_words[rand_i] = rand_w", "def update_word_pattern(word,pattern,letter):\r\n new_pattern = list()\r\n for i in range(len(word)):\r\n if word[i] == letter and pattern[i] == '_':\r\n new_pattern.append(letter)\r\n else:\r\n new_pattern.append(pattern[i])\r\n return_pattern=''.join(new_pattern)\r\n return return_pattern", "def as_you_type_replace(self, word: str) -> None:\n c = self.c\n w = c.frame.body.wrapper\n txt = w.getAllText()\n j = i = w.getInsertPoint()\n i -= 1\n while i and not txt[i].isalpha():\n i -= 1\n xtra = j - i\n j = i + 1\n while i and txt[i].isalpha():\n i -= 1\n if i or (txt and not txt[0].isalpha()):\n i += 1\n txt = txt[:i] + word + txt[j:]\n w.setAllText(txt)\n c.p.b = txt\n w.setInsertPoint(i + len(word) + xtra - 1)\n c.bodyWantsFocusNow()", "def duplicate_encode(word):\n word = word.lower()\n modWord = [letter.replace(letter, ')') if word.count(letter) > 1 else '(' for letter in word]\n return ''.join(modWord)", "def profanity_word_handler(word):\n return word[0] + ''.join([settings.CENSOR_PROFANITY_REPLACEMENT_CHARACTER for I in range(len(word)-2)]) + word [-1]", "def banned_word_handler(word):\n return ''.join([settings.CENSOR_BANNED_REPLACEMENT_CHARACTER for x in word])", "def duplicate_encode(word):\r\n return ''.join(['(' if word.lower().count(i) is 1 else ')' for i in word.lower()])", "def insert(self, word):\n pointer = self.tries\n for i in range(len(word)):\n ascii = ord(word[i]) - ord('a')\n if pointer[ascii] == None:\n pointer[ascii] = [None] * 26\n pointer = pointer[ascii]\n pointer.append(word)", "def escape_like(string, escape_char='*'):\n return (\n string\n .replace(escape_char, escape_char * 2)\n .replace('%', escape_char + '%')\n .replace('_', escape_char + '_')\n )", "def compile_word(word):\n \n result = ''\n for i,ltr in enumerate(word):\n result = str(10**(len(word)-i-1)) + '*' + ltr + result\n if i != len(word)-1:\n result = '+' + result\n\n return result", "def censor(text: Optional[str]) -> str:\n char = \"*\"\n text = text if text else \"\"\n return text[0] + (len(text) - 1) * char if text else text", "def latinize_word(word):\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()", "def pack(word, pattern):\n ret = []\n for i, char in enumerate(word):\n if pattern[i]:\n ret.append(char)\n return \"\".join(ret)", "def update_word_pattern(word, pattern, letter):\r\n # make pattern string as list for changing object inside it\r\n pattern_list = list(pattern)\r\n # Go through the pattern and reveal the letters.\r\n for i in range(len(word)):\r\n # Check where the letter exist, and reveal it on the pattern.\r\n if word[i] == letter:\r\n pattern_list[i] = letter\r\n # Rejoin the list onto one string\r\n pattern = \"\".join(pattern_list)\r\n return pattern", "def readd(new, old):\n\n new = [x for x in new]\n for i, char in enumerate(old):\n if char not in ALPH:\n try:\n if new[i] != char:\n new.insert(i, char)\n except IndexError:\n new.append(char)\n\n return \"\".join(new)", "def inner(word):\n return word + '!!!'", "def add_punctuation(word):\n punct = \"a\"\n while punct not in punctuation:\n punct = input(\"Punctuation: \")\n return \"{}{}\".format(word, punct)", "def insert_spaces(word):\n new_word = \"\"\n for c in word:\n new_word += c + \" \" \n return new_word", "def replace_char(text):\n\n for ch in ['/', '`', '*', '{', '}', '[', ']', '(', ')', '#', '+', '-', '.', '!', '\\$', ':', '|']:\n text = text.replace(ch, \"_\")\n return text", "def substitute_word(text: str, word: str, substitute_by: str) -> str:\n return re.sub('\\\\b' + word + '\\\\b', substitute_by, text)", "def compile_word(word):\n # Your code here.\n if word.isalpha() and word.islower():\n return word\n if not word.isalpha():\n return word\n result = []\n mul = 1\n word = word[::-1]\n for w in word:\n if w.isalpha and w.isupper():\n result.append(str(mul) + '*' + w + \"+\")\n else:\n result.append(w)\n mul = mul*10\n ans = ''.join(result)\n return ans[:-1]", "def _glob_to_sql(self, string):\n\n # What's with the chr(1) and chr(2) nonsense? It's a trick to\n # hide \\* and \\? from the * and ? substitutions. This trick\n # depends on the substitutiones being done in order. chr(1)\n # and chr(2) were picked because I know those characters\n # almost certainly won't be in the input string\n table = ((r'\\\\', chr(1)), (r'\\*', chr(2)), (r'\\?', chr(3)),\n (r'%', r'\\%'), (r'?', '_'), (r'*', '%'),\n (chr(1), r'\\\\'), (chr(2), r'\\*'), (chr(3), r'\\?'))\n\n for (a, b) in table:\n string = string.replace(a,b)\n\n string = string[1:] if string.startswith(\"^\") else \"%\" + string\n string = string[:-1] if string.endswith(\"$\") else string + \"%\"\n\n return string", "def insert(self, word: str) -> None:\r\n nroot=self.root\r\n for i in word:\r\n \r\n # index=ord(i)-ord('a')\r\n if i not in nroot.children:\r\n nroot.children[i]=self.root\r\n nroot=nroot.children[i] \r\n \r\n nroot.endofword=True", "def insert(self, word: str) -> None:\n curr_chars = self.chars\n for c in list(word):\n if c not in curr_chars:\n curr_chars[c] = {}\n curr_chars = curr_chars[c]\n\n curr_chars[self.end_of_word] = self.end_of_word", "def pig_word(self, original):\n word = original.lower()\n if word[0] in \"aeiou\":\n new_word = word + 'ay'\n else:\n new_word = word[1:] + word[0] + 'ay'\n return new_word", "def swapCharacters(word):\n l = list(word)\n temp = word[-1]\n l[-1] = l[0]\n l[0] = temp\n return ''.join(l)", "def replace_special_chars(self, word):\n try:\n if (self.lang==\"tr\"):\n word = re.sub(u\"\\^db\", u\"+db\", word)\n word = re.sub(u\"\\^\", u\"¬\", word)\n word = re.sub(u\"\\$\", u\"£\", word)\n except UnicodeDecodeError:\n word = ''\n return word", "def apply_rule(word):\n return re.sub(search, replace, word)", "def insert(self, word: str) -> None:\n curr = self.root\n for ch in word:\n curr = curr.children[ch]\n curr.is_word = True" ]
[ "0.64717144", "0.63470495", "0.6299257", "0.626429", "0.61380047", "0.6095588", "0.59981585", "0.597494", "0.5928834", "0.5911483", "0.58997667", "0.58580613", "0.58496493", "0.5781776", "0.57784766", "0.5746187", "0.574614", "0.57364184", "0.57253385", "0.5724377", "0.56742114", "0.56443274", "0.5612119", "0.5580861", "0.5580336", "0.5568425", "0.5531157", "0.55100435", "0.54650563", "0.54635394" ]
0.64887744
0
Find number of occurrences of each value in sequence.
def counts(sequence): # initialize the countainer count = defaultdict(int) # iterates through sequence elements for item in sequence: # if element not in counts add 0 # else add 1 count[item] = count.get(item, 0) + 1 return dict(count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(seq):\n\treturn sum(1 for x in seq)", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def count(self,value = 1):\n n = 0\n for s in self.sample:\n if s == value:\n n += 1\n return n", "def count_for(s, value):\n total = 0\n for elem in s:\n if elem == value:\n total = total + 1\n return total", "def get_count_of_elements_by_condition(sequence):\n elements_and_indexes = {sequence[i]: i + 1\n for i in range(1, len(sequence) - 1)}\n filtered_values = filter(lambda element:\n 2 ** element[1] < element[0] < math.factorial(element[1]),\n elements_and_indexes.items())\n return len(dict(filtered_values))", "def count(self, value):\n self.__validate_value(value)\n counter = 0\n for v in self.__list:\n if v == value:\n counter += 1\n return counter", "def count(s, value):\n total, index = 0, 0\n while index < len(s):\n element = s[index]\n if element == value:\n total += 1\n index += 1\n return total", "def count_occurrences(x):\r\n tmp_x = sorted(copy(x))\r\n ux = unique(x)\r\n return searchsorted(tmp_x, ux, 'right') - searchsorted(tmp_x, ux, 'left')", "def counts(e, x):\n arr = np.asarray(arr)\n return len(np.where(arr == x)[0])", "def count(array, value):\n count = 0\n for i in range (len(array)):\n if (array[i] == value):\n count += 1\n return count", "def count(seq, predicate):\n count = 0\n for item in seq:\n if predicate(item):\n count += 1\n return count", "def sequence_sorted_count(self, x, reverse=False):\n c = 0\n if reverse: it = reversed(self)\n else: it = iter(self)\n for v in it:\n if x == v:\n c += 1\n break\n for v in it:\n if x == v: c += 1\n else: break\n return c", "def frequencies(seq):\n d = dict()\n for item in seq:\n try:\n d[item] += 1\n except KeyError:\n d[item] = 1\n return d", "def _get_observation_count(self):\n observation_count = 0\n for sequence in self.seq_list:\n observation_count += sequence.shape[0] \n \n return observation_count", "def count(x):\n return sum(len(y) for y in x)", "def _count_sequence(sequence, regex=None):\n # type: (pyfaidx.Sequence, Pattern[str]) -> int\n\n if regex is None:\n count = len(sequence)\n else:\n count = sum((1 for _ in regex.finditer(str(sequence))))\n\n return count", "def count(seq):\n\n if not seq:\n return 0\n elif isinstance(seq[0], list):\n return count(seq[0]) + count(seq[1:])\n else:\n return 1 + count(seq[1:])", "def countOccurrences(lst, x):\n res = 0\n for i in lst:\n if i == x:\n res += 1\n return res", "def count(self,val):\n return sum(1 for e in self.frontierpq if e[0]==val)", "def count(iterable):\n\treturn sum(1 for _ in iterable)", "def get_terminals_count(self, sequence: str) -> int:\n\n res = 0\n\n for terminal in self._terminals:\n if terminal != '':\n res += sequence.count(terminal)\n\n return res", "def count(self, i):\n return sum([1 for j in self if i==j])", "def count(self, value: object) -> int:\n count = 0\n for _ in range(self.da.length()):\n if self.da[_] == value:\n count += 1\n return count", "def countby(iteratee, seq):\n return dict(Counter(map(iteratee, seq)))", "def count_runlength_per_character(sequence):\n character_counts = defaultdict(list)\n current_character = None\n\n for character in sequence:\n if character != current_character:\n character_counts[character].append(1)\n else:\n character_counts[character][-1] += 1\n\n current_character = character\n\n return character_counts", "def how_many(e, x):\n return count(np.asarray(x) == e)", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def count(self, value):\n # Note: objects are never coerced into other types for comparison\n if type(value).__eq__ in _int__eq__s:\n return int(self._contains_int(value))\n # take the slow path, compare every single item\n return sum(1 for self_item in self if self_item == value)", "def number_positives(seq):\n # Convert sequence to upper case\n seq = seq.upper()\n\n # Check for a valid sequence\n for aa in seq:\n if aa not in bootcamp_utils.aa.keys():\n raise RuntimeError(aa + ' is not a valid amino acid.')\n\n return seq.count('R') + seq.count('K') + seq.count('H')" ]
[ "0.81605697", "0.7333692", "0.7333692", "0.7317099", "0.7203902", "0.71936685", "0.71691054", "0.7076678", "0.7049411", "0.69177294", "0.69103754", "0.6907409", "0.682325", "0.6822243", "0.6801181", "0.6785441", "0.67659605", "0.676138", "0.67390096", "0.6727958", "0.66564286", "0.6646175", "0.6642021", "0.6636702", "0.6631621", "0.6631233", "0.66242486", "0.6618641", "0.65764946", "0.6568036" ]
0.7605741
1
Couns the number of overlapping subsequences of lenght between kmin kmax in a input sequence.
def count_subsequence_in_sliding_window(kmin, kmax, sequence): if isinstance(sequence, str): for n in range(kmin, kmax + 1): for sub in zip(*(deque(itertools.islice(it, i), 0) or it for i, it in enumerate(itertools.tee(sequence, n)))): yield ''.join(sub)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_overlapping(seq, subseq):\n \n pos, count = 0, 0\n while True:\n pos = seq.find(subseq, pos)\n if pos < 0:\n break\n pos += 1 \n count += 1\n return count", "def _get_subsequence_counts(sequences, length, min_support=None):\n # type: (Union[List[str], List], Int, Union[Int, Float]) -> OrderedDict\n subsequences = Counter([''.join(seq.values.tolist()[i:(i + length)])\n for seq in sequences\n for i in range(len(seq) - length + 1)\n if len(seq) > 0])\n\n if min_support is not None:\n if min_support < 1.0 and min_support > 0.0:\n min_support = np.round(np.sum(subsequences.values()) * min_support)\n if min_support < 1 or np.round(min_support) != min_support:\n raise ValueError('Wrong value for min_support parameter!')\n for key, count in dropwhile(lambda key_count: key_count[1] >= min_support,\n subsequences.most_common()):\n del subsequences[key]\n\n return subsequences", "def final_kmer_counts(seq_dict, num_seqs, alphabet, min_k, max_k):\n counted = Counter()\n len_seqs = 0\n for name, sequence in seq_dict.items():\n seq = seq_cleaner(sequence, alphabet)\n len_seqs += len(seq)\n counted.update(count_kmers_cython(seq, min_k, max_k))\n final_count = {k: (v // num_seqs) for k, v in counted.items()}\n # total_len = (len_seqs // num_seqs)\n return final_count, len_seqs", "def howmany_sequences(listOfTuples):\r\n #initialize number of pairs as 0\r\n pairs = 0\r\n #count pairs\r\n for n in listOfTuples:\r\n pairs += 1\r\n k = 1\r\n #find number of initial sequences \r\n while k*(k-1) != pairs*2:\r\n k += 1\r\n return(k)", "def beautifulSubsets(self, nums: List[int], k: int) -> int:\n\n \"\"\"\n queue = deque([([], -1)])\n res = 0\n\n while queue:\n cur, idx = queue.popleft()\n res += 1\n\n for i in range(idx + 1, len(nums)):\n if nums[i] - k in cur or nums[i] + k in cur:\n continue\n\n queue.append((cur + [nums[i]], i))\n\n return res - 1\n \"\"\"\n\n \"\"\"\n # dp0 is the ways that without A[i]\n # dp1 is the ways that with A[i]\n\n count = [Counter() for i in range(k)]\n for n in nums:\n count[n % k][n] += 1\n\n res = 1\n for i in range(k):\n prev, dp0, dp1 = 0, 1, 0\n for n in sorted(count[i]):\n v = pow(2, count[i][n])\n if prev + k == n:\n dp0, dp1 = dp0 + dp1, dp0 * (v - 1)\n else:\n dp0, dp1 = dp0 + dp1, (dp0 + dp1) * (v - 1)\n\n prev = n\n\n res *= dp0 + dp1\n\n return res - 1\n \"\"\"\n\n # Count the frequency of A, and then consider all the arithmetic sequence with difference k.\n # Each arithmetic sequence can be solve as a hourse robber problem.\n # We solve the hourse robber by dp.\n # dp(a) return the result for sequence no bigger than a.\n\n # dp(a)[0] is the ways that without a\n # dp(a)[1] is the ways that with a\n\n # dp(a)[0] = dp(a - k)[0] + dp(a - k)[1]\n # dp(a)[1] = dp(a - k)[0] * (2 ^ count(a) - 1\n\n count = Counter(nums)\n\n def dp(n):\n dp0, dp1 = dp(n - k) if n - k in count else (1, 0)\n return dp0 + dp1, dp0 * (pow(2, count[n]) - 1)\n\n return functools.reduce(operator.mul, (sum(dp(n)) for n in count if not count[n + k])) - 1", "def length_of_longest_substring(arr, k):\n window_start = 0\n max_repeat_times = 0\n frequency_map = {0: 0, 1: 0}\n len_longest = 0\n\n for window_end in range(len(arr)):\n right_char = arr[window_end]\n left_char = arr[window_start]\n frequency_map[right_char] += 1\n max_repeat_times = frequency_map[0]\n\n if max_repeat_times > k:\n frequency_map[left_char] -= 1\n window_start += 1\n len_longest = max(len_longest, window_end - window_start + 1)\n\n return len_longest", "def count_mers(sequence, alphabet, kmin, kmax):\n alphabet = set(alphabet)\n counts = defaultdict(int)\n for kmer in get_kmers_from_sequence(sequence, kmin, kmax):\n if set(kmer).issubset(alphabet):\n counts[kmer] = counts.get(kmer, 0) + 1\n return counts", "def count_kmers(dna, k):\n kmer_count = Counter()\n for i in range(len(dna)):\n kmer = dna[i:(i+k)]\n if len(kmer) == k:\n kmer_count[kmer] += 1\n return kmer_count", "def overlappingKmers(s, k=15, overlap=11, includeFinalPeptide=True, returnStartInds=False):\n inds = [i for i in range(0, len(s), k-overlap) if i+k < len(s)]\n\n if includeFinalPeptide and not s[-k:] == s[inds[-1]:inds[-1]+k]:\n inds.append(len(s)-k)\n\n mers = [s[i:i+k] for i in inds]\n\n if returnStartInds:\n return mers, inds\n else:\n return mers", "def _find_max_number_of_grouping(cls, reserved_seats, k):\n # print(reserved_seats)\n n = len(reserved_seats)\n count_groups = 0\n count_empty_contigous_seats = 0\n i = 0\n while i < n:\n if reserved_seats[i] != 0:\n # print('continue', i)\n count_empty_contigous_seats = 0\n i += 1\n continue\n\n count_empty_contigous_seats += 1\n # print('empty', i, count_empty_contigous_seats)\n if count_empty_contigous_seats >= k:\n count_groups += 1\n # print('found', i, count_groups)\n\n if ((i + 1) % len(cls._PLANE_ROW)) == 0:\n # print('new row', i)\n count_empty_contigous_seats = 0\n\n i += 1\n\n return count_groups", "def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:\n\n if not nums:\n return 0\n\n if k <= 1:\n return 0\n\n count = 0\n lo = 0\n product = 1\n for hi in range(len(nums)):\n product *= nums[hi]\n while product >= k:\n product /= nums[lo]\n lo += 1\n count += hi - lo + 1\n return count", "def find_subarrays(nums, k):\n res = pre_sum = 0\n dic = {0: 1}\n for i in nums:\n pre_sum += i\n res += dic.get(pre_sum - k, 0)\n dic[pre_sum] = dic.get(pre_sum, 0) + 1\n return res", "def real_overlap(from_sequence, to_sequence):\n sequence_overlap = difflib.SequenceMatcher(None, from_sequence, to_sequence)\n start = 0\n start_pos_from = -1\n start_pos_to = -1\n size = -1\n while start_pos_to != 0 or not start_pos_from+size == len(from_sequence):\n start_pos_from, start_pos_to, size = sequence_overlap.find_longest_match(start, \\\n len(from_sequence), 0, len(to_sequence))\n if not start_pos_to == 0 or not start_pos_from+size == len(from_sequence):\n start = start_pos_from+1\n\n return size", "def numKLenSubstrNoRepeats(self, S, K):\n return self.for_loop(S, K)", "def maxSumOfThreeSubarrays(self, nums: List[int], k: int) -> List[int]:\n\n n = len(nums)\n if n < 3 * k or k == 0:\n return 0\n\n prefix_sum = [0]\n for num in nums:\n prefix_sum.append(prefix_sum[-1] + num)\n\n left = [0] * n\n left_i = [0] * n\n right = [0] * (n + 1) # add one to right (for case of k == 1)\n right_i = [0] * (n + 1)\n\n for i in range(k - 1, n):\n window = prefix_sum[i + 1] - prefix_sum[i + 1 - k]\n if window > left[i - 1]: # > cause we prefex left start\n left[i] = window\n left_i[i] = i - (k - 1)\n else:\n left[i] = left[i - 1]\n left_i[i] = left_i[i - 1]\n\n for i in reversed(range(n - k + 1)):\n window = prefix_sum[i + k] - prefix_sum[i]\n if window >= right[i + 1]: # >= cause we prefex left start\n right[i] = window\n right_i[i] = i\n else:\n right[i] = right[i + 1]\n right_i[i] = right_i[i + 1]\n\n max_sum = 0\n a, b, c = 0, 0, 0\n for i in range(k, n - 2 * k + 1):\n curr_sum = prefix_sum[i + k] - prefix_sum[i] + left[i - 1] + right[i + k]\n if curr_sum > max_sum:\n max_sum = curr_sum\n a, b, c = left_i[i - 1], i, right_i[i + k]\n\n return [a, b, c]", "def count_k(n, k):\n if n == 0:\n return 1\n elif n < 0:\n return 0\n else:\n total = 0\n i = 1\n while i <= k:\n total += count_k(n - i, k)\n i += 1\n return total", "def get_seq_lenght(seq_arry, end_symbol):\n scale_arry = np.argmax(seq_arry, axis=2) + np.sum(seq_arry, axis=2)\n end_symbol_scale = np.argmax(end_symbol) + np.sum(end_symbol)\n cond = (scale_arry != end_symbol_scale).astype(np.int)\n lens = cond.sum(axis=1)\n return lens", "def solveProblem(list):\n return len(findSubArray(list))", "def for_loop(self, S, K):\n if len(S) < K:\n return 0\n i, count = 0, 0\n window_set = set()\n for j in range(len(S)):\n if S[j] in window_set:\n while i < j and S[j] in window_set:\n window_set.remove(S[i])\n i += 1\n window_set.add(S[j])\n if len(window_set) == K and j - i + 1 == K:\n window_set.remove(S[i])\n i, count = i + 1, count + 1\n j += 1\n return count", "def count_sequences(self, size):\n raise NotImplementedError", "def getKmers(seq, k):\n \n kmd = {}\n \n for i in range(len(seq)+1-k):\n kmer = seq[i:i+k]\n kmd[kmer] = kmd.get(kmer,0) + 1\n return kmd", "def gc_content_sequence_window(sequence, as_overlap=False, k=20):\n # make sequence upper case and getting the length of it\n sequence, seq_len = sequence.upper(), len(sequence)\n # the array-like object to collect the data\n gc_content = []\n # non overlap sequence length\n non_overlap = range(0, len(sequence) - k + 1, k)\n # overlap sequence length\n overlap = range(0, seq_len - k + 1)\n # overlap is needed\n if as_overlap:\n # iterates to the overlap region\n for i in overlap:\n # creates the substring to count the gc_content\n subseq = sequence[i:i + k]\n # count and sum up the Gs and Cs counts\n g_c = subseq.count('C') + subseq.count('G')\n # collect the data in the array container\n gc_content.append(round(g_c / len(subseq), 4) * 100)\n # if non overlap is choosed\n else:\n # iterates to the mon overlap region\n for j in non_overlap:\n # creates the substring to count the gc_content\n subseq = sequence[j:j + k]\n # count and sum up the Gs and Cs counts\n g_c = subseq.count('C') + subseq.count('G')\n # collect the data in the array container\n gc_content.append(round(g_c / len(subseq), 4) * 100)\n return gc_content", "def pick_Maximal_overlap(reads, k):\n reada, readb = None, None\n best_olen = 0\n for a, b in permutations(reads, 2):\n olen = Overlap(a, b, min_length=k)\n if olen > best_olen:\n reada, readb = a, b\n best_olen = olen\n return reada, readb, best_olen", "def find_long_runs(num_sequence, l):\n chunked = [(k, list(g)) for k, g in itertools.groupby(num_sequence)]\n retval = [(i, len(g)) for i, (k, g) in enumerate(chunked) if k and len(g) > l]\n return retval", "def count_kmers(seq, k=3):\n # Start with an empty dictionary\n counts = {}\n # Calculate how many kmers of length k there are\n num_kmers = len(str(seq)) - k + 1\n # Loop over the kmer start positions\n for i in range(num_kmers):\n # Slice the string to get the kmer\n kmer = str(seq)[i:i+k]\n # Add the kmer to the dictionary if it's not there\n if kmer not in counts:\n counts[kmer] = 0\n # Increment the count for this kmer\n counts[kmer] += 1\n # Return the final counts\n return counts", "def overlap_count(haystack, needle):\n count = 0\n index = 0\n while True:\n try:\n i = haystack.index(needle, index)\n except ValueError:\n break\n count += 1\n index = i+1\n return count", "def subsequence_lengths(sequence):\n\n lengths = defaultdict(list)\n\n # Go through the first n-1 elements\n i = 1\n for pre, post in zip(sequence, sequence[1:]):\n if pre == post:\n i += 1\n else:\n lengths[pre].append(i)\n i = 1\n\n # Check the nth element\n if sequence[-1] == sequence[-2]:\n lengths[sequence[-1]].append(i)\n else:\n lengths[sequence[-2]].append(i + 1)\n lengths[sequence[-1]].append(1)\n\n return dict(lengths)", "def count(arr, k):\n dp = [[None]*(k+1) for _ in range(len(arr)+1)]\n for i in range(len(dp)):\n dp[i][0] = 1\n for i in range(1, len(dp[0])):\n dp[0][i] = 0\n for a in dp:\n print(a)\n for i in range(1, len(dp)):\n for j in range(1, len(dp[0])):\n if arr[i-1] <= j:\n dp[i][j] = dp[i-1][j-arr[i-1]] + dp[i-1][j]\n else:\n dp[i][j] = dp[i-1][j]\n for a in dp:\n print(a)\n return dp[-1][-1]", "def check_through_arr(arr, k):\n i = 0\n j = 1\n count = 0\n while j < len(arr):\n if arr[i] + k > arr[j]:\n j += 1\n elif arr[i] + k == arr[j]:\n count += 1\n i += 1\n j += 1\n else:\n i += 1\n\n return count", "def LCSubSeq(X, Y, m, n):\n LCSuff = [[0 for k in range (n+1)] for l in range(m+1)]\n result = 0\n\n for i in range(m+1):\n for j in range(n+1):\n if (i==0 or j==0):\n LCSuff[i][j] = 0\n elif (X[i-1] == Y[j - 1]):\n LCSuff[i][j] = LCSuff[i-1][j-1] + 1\n result = max(result, LCSuff[i][j])\n else:\n LCSuff[i][j] = 0\n return result" ]
[ "0.6897632", "0.66734475", "0.66401356", "0.61437935", "0.6082373", "0.60758173", "0.6067477", "0.6067079", "0.60625184", "0.60437757", "0.59478396", "0.591973", "0.58829165", "0.5873242", "0.5867987", "0.5842749", "0.5830168", "0.5733634", "0.57134503", "0.5704528", "0.57030535", "0.5683471", "0.5683013", "0.5679447", "0.5642333", "0.5638441", "0.56052667", "0.56014025", "0.56008923", "0.5581779" ]
0.72154963
0
Returns a plot of the genome gc skew
def plot_gc_skew(x, y): plt.figure(num=None, figsize=(24, 7), dpi=100) yargmax = y.index(max(y)) plt.axvline(oriCStart + oriOffset, color="r", linestyle='--') plt.axvline(x[yargmax], color="g", linestyle='--') plt.plot(x, y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_gc_skew(x, y, start, offset):\n plt.figure(num=None, figsize=(24, 7), dpi=100)\n yargmax = y.index(max(y))\n plt.axvline(start + offset, color=\"r\", linestyle='--')\n plt.axvline(x[yargmax], color=\"g\", linestyle='--')\n plt.plot(x, y)\n plt.savefig('Skew_genome.pdf', format='pdf', dpi=1200)\n plt.show()", "def gc_skew(sequence):\n seq = sequence.upper()\n half = len(sequence) // 2\n genome = np.frombuffer(seq.encode() + seq.encode(), dtype='uint8')\n g = np.concatenate(([0], np.array(genome == ord('G'), dtype='uint8').cumsum()))\n c = np.concatenate(([0], np.array(genome == ord('C'), dtype='uint8').cumsum()))\n gc = g - c\n skew = gc[half:(half + len(sequence))] \\\n - gc[0:len(sequence)] \\\n + gc[(len(sequence) - half):(2 * len(sequence) - half)] \\\n - gc[len(sequence):(2 * len(sequence))]\n return skew", "def compute_gc_skew(DNA, chart=False):\n running_skew = [0]\n G_C = 0\n min = 0\n min_indexes = []\n\n for i in range(len(DNA)):\n\n if DNA[i] == \"G\":\n G_C += 1\n\n elif DNA[i] == \"C\":\n G_C -= 1\n\n running_skew.append(G_C)\n\n # Compute the min. NOTE: We have to sum one to the indexes because we already\n # start with an extra element in the res (a 0)\n min_skew = np.min(running_skew)\n\n for i in range(len(running_skew)):\n if running_skew[i] == min_skew:\n min_indexes.append(i)\n\n if chart:\n if sys.modules.get('matplotlib', None):\n plt.plot(running_skew)\n plt.ylabel('G - C diff')\n plt.title('Skew diagram')\n plt.savefig('skew.png')\n else:\n print(\"No matplotlib module found -- no skew diagram for you :-(\")\n\n return (running_skew, min_indexes)", "def test_chroma_plot(self):\n plt.xlabel('chromaticity x')\n plt.ylabel('chromaticity y')\n plt.title(\"Standard Gamut\")\n plt.axis([-0.1, 0.8, -0.4, 0.65])\n plt.grid(True)\n mplh.plot_spectrum_locus_76()\n mplh.plot_colorspace_gamut(colorspaces.ACES, lines_color=\"c\",\n upvp_conversion=True)\n mplh.plot_colorspace_gamut(colorspaces.REC709, lines_color=\"m\",\n upvp_conversion=True)\n plt.legend(loc=4)\n if DISPLAY:\n plt.show()\n plt.clf()\n plt.close()", "def plot_phase_diagram(self):\n t_max = np.log(max(self.temperatures))\n d_min = np.log(min(self.distortions))\n y_axis = [np.log(i) - d_min for i in self.distortions]\n x_axis = [t_max - np.log(i) for i in self.temperatures]\n\n plt.figure(figsize=(12, 9))\n plt.plot(x_axis, y_axis)\n\n region = {}\n for i, c in list(enumerate(self.n_eff_clusters)):\n if c not in region:\n region[c] = {}\n region[c]['min'] = x_axis[i]\n region[c]['max'] = x_axis[i]\n for c in region:\n if c == 0:\n continue\n plt.text((region[c]['min'] + region[c]['max']) / 2, 0.2,\n 'K={}'.format(c), rotation=90)\n plt.axvspan(region[c]['min'], region[c]['max'], color='C' + str(c),\n alpha=0.2)\n plt.title('Phases diagram (log)')\n plt.xlabel('Temperature')\n plt.ylabel('Distortion')\n plt.show()", "def show_dbscan():\n\n # simulate normal hourly data\n weekday = ([0.05, 0.95], 0.05) #bath, bed\n weekend = ([0.3, 0.7], 0.1)\n roomperwd, truelabelswd = make_blobs(n_samples=23, centers=weekday[0],\n cluster_std=weekday[1], random_state=0)\n roomperwe, truelabelswe = make_blobs(n_samples=8, centers=weekend[0],\n cluster_std=weekend[1], random_state=0)\n\n # combine modes\n roompers = np.vstack((roomperwd, roomperwe))\n\n # make positive and sum to one to simulate valid distribution\n for i in range(roompers.shape[0]):\n for j in range(roompers.shape[1]):\n if roompers[i, j] < 0:\n roompers[i, j] = 0\n roompersnorm = normalize(roompers, norm='l1')\n\n # simulate anomaly on most recent day where don't leave bedroom\n roompersnorm[-1, :] = np.array([0.8, 0.2])\n\n # detect outliers\n roompersdetector = HourlyRoomPercentageAnomalyDetection(roompersnorm, eps=0.3, min_samples=3)\n labels = roompersdetector.scale_and_proximity_cluster(eps=0.3, min_samples=3)\n\n # plot results\n plt.figure()\n seenflag1 = False; seenflag2 = False; seenflag3 = False;\n for i, label in enumerate(labels):\n if label == 0:\n if seenflag1:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'ro')\n else:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'ro', label='Cluster 1')\n seenflag1 = True\n elif label == 1:\n if seenflag2:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'kx')\n else:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'kx', label='Cluster 2')\n seenflag2 = True\n elif label == -1:\n if seenflag3:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'b^')\n else:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'b^', label='Outlier')\n seenflag3 = True\n plt.legend(loc='lower left')\n plt.axis([0, 1, 0, 1])\n plt.show()", "def dendogram(self):\r\n \r\n plt.figure(figsize=(20, 7))\r\n dendrogram = sch.dendrogram(sch.linkage(self.X, method='ward'))\r\n plt.title(\"Dendograms\")\r\n plt.axhline(linestyle='--', y=5) \r\n plt.show()", "def get_skew(genome):\n assert (is_dna(genome))\n skew = [0] * (len(genome) + 1)\n for i, base in enumerate(genome, 1):\n if base == 'C':\n skew[i] = skew[i - 1] - 1\n elif base == 'G':\n skew[i] = skew[i - 1] + 1\n else:\n skew[i] = skew[i - 1]\n return skew", "def quick_plot(solution):\n plt.suptitle('GNLSE solution')\n\n plt.subplot(1, 2, 1)\n plot_wavelength_vs_distance(solution)\n\n plt.subplot(1, 2, 2)\n plot_delay_vs_distance(solution)\n\n plt.show()", "def gc_skew(dna):\n gcount = dna.count('G') + dna.count('g')\n ccount = dna.count('C') + dna.count('c')\n if gcount + ccount == 0:\n return 0.0\n return float(gcount - ccount) / float(gcount + ccount)", "def plot_gheat_g(seed=1):\n fig, ax = plt.subplots(figsize=[2.5*plotdl.latex_width_inch, 3*plotdl.latex_height_inch])\n \n r = Factory_psi1_psiN( \"aapta_of_s_N{number_of_points[0]}.npz\", N=400)\n ckg = r.create_if_missing(dict(model_name= [\"Anderson\",], \n number_of_points=[400,], bandwidth=[1,],\n dis_param=np.linspace(0,1,100),c=[1,], k=[1.57,], seed=np.arange(1,6))) \n color_seq = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])\n for (seed,c) in zip(np.arange(1,6),color_seq):\n ck = ckg[ckg['seed']==seed]\n g, psi_1, psi_N = ck['g'], ck['psi_N'], ck['psi_1']\n\n psi_heat = 2*(abs(psi_1)**2)*(abs(psi_N)**2) / ((abs(psi_1)**2) + (abs(psi_N)**2))\n \n phs = np.nansum(psi_heat,axis=1)\n \n psi1psiN = np.nansum(abs(psi_1*psi_N), axis=1)\n #print(ckg['dis_param'], phs)\n ax.plot(ck['dis_param'], phs,'.', color=c)\n ax.plot(ck['dis_param'], abs(g),'+', color=c)\n ax.plot(ck['dis_param'], psi1psiN,'d', color=c)\n ax.set_xlabel('dis_param')\n mkdir_and_savefig(fig, 'pta_comparison_of_s_N400.png')\n plt.close(fig)\n ## use last ck\n fig1, axes1 = plt.subplots(3,2,figsize=[2*plotdl.latex_width_inch, 3*plotdl.latex_height_inch],\n sharex=True, sharey=True)\n axes1.flat[0].xaxis.set_major_locator(MaxNLocator(4))\n axes1.flat[0].yaxis.set_major_locator(MaxNLocator(4))\n for n, ax1 in zip(range(1,20,3), axes1.flat):\n ax1.plot(abs(ck['psi_1'][n]), abs(ck['psi_N'][n]), '.') \n ax1.set_title(\"W = {:0.2}\".format(ck['dis_param'][n]))\n fig1.savefig('pta_psi_1_psi_2_N400.png')\n \n ax.cla()\n ax.plot(ck['dis_param'], np.real(g), label='real')\n ax.plot(ck['dis_param'], np.imag(g), label='imag')\n ax.plot(ck['dis_param'], np.abs(g), label='abs')\n ax.legend(loc = 'upper right')\n ax.set_xlabel('dis_param')\n ax.set_ylabel('g')\n mkdir_and_savefig(fig, 'pta_real_imag_g_s_N400')", "def calculate_skews(genome):\n\n skew = 0\n skew_list = [0]\n\n for base in genome:\n if base == 'G':\n skew = skew + 1\n elif base == 'C':\n skew = skew - 1\n skew_list.append(skew)\n return skew_list", "def plot_bv_swarm(df, xcolname, ycolname, icol=1):\n # set plot size\n fig, ax = plt.subplots(figsize=(8,6))\n \n # plotting... box+kde\n sns.swarmplot(ax=ax, data = df\n , x = str(xcolname)\n , y = str(ycolname)\n , color = sns.color_palette()[icol]);\n \n \n # title and labels\n plt.title(xcolname+' Vs '+ycolname, fontsize=20)\n plt.xlabel(xcolname+ ' (units)', fontsize=16)\n plt.ylabel(ycolname+ ' (units)', fontsize=16)\n \n return plt.show()", "def test_plot_cspad(geometry, fname_data, amp_range=(0,0.5)):\n #rad1 = 93\n #rad2 = 146\n rad1 = 655\n rad2 = 670\n\n # get pixel coordinate index arrays:\n xyc = xc, yc = 500, 500# None\n\n #rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=None)\n rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=xyc, do_tilt=True)\n\n ixo, iyo = geometry.point_coord_indexes(xy0_off_pix=xyc, do_tilt=True)\n logger.info('Detector origin indexes ixo:%d iyo:%d' % (ixo, iyo))\n\n root, ext = os.path.splitext(fname_data)\n arr = np.load(fname_data) if ext == '.npy' else np.loadtxt(fname_data, dtype=np.float)\n arr.shape= (4,8,185,388)\n\n logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))\n\n arr.shape = rows.shape\n img = img_from_pixel_arrays(rows, cols, W=arr)\n\n rcc_ring = (iyo, ixo)\n axim = gg.plotImageLarge(img,amp_range=amp_range)\n gg.drawCircle(axim, rcc_ring, rad1, linewidth=1, color='w', fill=False)\n gg.drawCircle(axim, rcc_ring, rad2, linewidth=1, color='w', fill=False)\n gg.drawCenter(axim, rcc_ring, rad1, linewidth=1, color='w')\n gg.move(500,10)\n gg.show()", "def inner_CoherentLength():\r\n\r\n ax = fig.add_subplot(111)\r\n\r\n try:\r\n ax.cla()\r\n #print(\"clear self.cbar !\")\r\n except:\r\n pass\r\n #print(\"fail to clear self.cbar !\")\r\n xCorr, yCorr = self.APP_dataprocess.SpatialCorrelation([self.spinBox_PixelX.value(), self.spinBox_PixelY.value()])\r\n ax.plot(xCorr)\r\n ax.set_title(\"G2 @({}, {})\".format(self.spinBox_PixelX.value(), self.spinBox_PixelY.value()))\r\n fig.savefig(\"G2 @({}, {}).png\".format(self.spinBox_PixelX.value(), self.spinBox_PixelY.value()), format=\"png\", dpi = 100)\r\n plt.close()", "def make_four_pdf(args):\n params = make_four_params(args)\n m4_filename = params['m4_filename']\n prefix = params['prefix']\n min_matching_length = params['min_matching_length']\n output_prefix = params['output_prefix']\n\n # if there are fewer than threshold reads then skip it\n threshold = 25 # threshold before plotting.\n if len(open(m4_filename).readlines()) < threshold:\n print('skipping %s because it has %d lines' % (\n m4_filename,\n len(open(m4_filename).readlines()))\n )\n return\n\n plb.rcParams['figure.figsize'] = 30, 30\n plt.clf()\n plt.figure(1)\n\n remove_punctuation = lambda x: ''.join(e for e in x if e.isdigit() or e == '.')\n coords = [int(remove_punctuation(a)) for a in prefix.split('_')[1:3]]\n dist = coords[1] - coords[0]\n\n graph = generate_graph(params)\n preset, postset, spanset, gapset = get_read_classifications(params)\n # Draw Ground Truth\n plt.subplot(2, 3, 1)\n node_colors = node_set_colors(graph.nodes(), spanset, gapset, preset, postset)\n pos = nx.spring_layout(graph)\n\n assert(len(node_colors) == nx.number_of_nodes(graph))\n title = \"Chr {0}; L={1}; Ground Truth Colors\\n\\\n Red=Preset, Yellow=Postset, Blue=GapSet, Green=SpanSet\\n\\\n num_edges = {2}\\\n \"\\\n .format(prefix, min_matching_length, nx.number_of_edges(graph))\n nx.draw_spring(graph, node_color=node_colors, node_size=100)\n #nx.draw(graph, node_color=node_colors, node_size=100, pos=pos)\n plt.title(title)\n\n # Draw histogram of smith waterman scores and remove bad edges\n\n # squash preset and postset nodes\n graph = nx_helpers.remove_nodes(graph, preset)\n graph = nx_helpers.remove_nodes(graph, postset)\n\n # filter nodes by smith_waterman\n with utils.Timer(\"smith_waterman_filter\"):\n flanking_reads = preset.union(postset)\n # subplots 2 and 3 occur in smith_waterman_filter\n graph = smith_waterman_filter(graph, flanking_reads, params)\n\n # Draw groudn truth with squashed nodes\n plt.subplot(2, 3, 4)\n node_colors = node_set_colors(graph.nodes(), spanset, gapset, preset, postset)\n assert(len(node_colors) == nx.number_of_nodes(graph))\n title = \"Chr {0}; L={1}; Ground Truth Colors \\n\\\n Removed Preset and Postsetnodes; Blue=GapSet, Green=SpanSet\\n\\\n number of edges = {2}\"\\\n .format(prefix, min_matching_length, nx.number_of_edges(graph))\n nx.draw_spring(graph, node_color=node_colors, node_size=100)\n #nx.draw(graph, node_color=node_colors, node_size=100, pos=pos)\n plt.title(title)\n\n # Drop Small Communities and Draw\n plt.subplot(2, 3, 5)\n communities = nx_helpers.get_communities(graph)\n graph, communities = drop_small_communities(graph, communities)\n node_colors = node_community_colors(graph, communities)\n assert(len(node_colors) == nx.number_of_nodes(graph))\n title = \"Chr {0}; L={1}; After Removing Small Communities; NumCom={2}\\n\\\n ComQual={3}, MapQual={4}\\n\\\n number of edges = {5}\"\\\n .format(prefix, min_matching_length, len(communities),\n community_quality(communities, spanset, gapset),\n mapping_quality(graph, spanset, gapset),\n nx.number_of_edges(graph))\n #nx.draw(graph, node_color=node_colors, node_size=100, pos=pos)\n nx.draw_spring(graph, node_color=node_colors, node_size=100)\n plt.title(title)\n\n # IGV Line Plot\n plt.subplot(2, 3, 6)\n make_line_plot((spanset, gapset, preset, postset), params)\n\n plt.savefig(output_prefix + '_figs/%s-communities.pdf' % (prefix))\n\n ret_string = '%s\\t%s\\t%d\\t%d\\t%d\\t%d\\t%d\\t%d\\tchr%s_slop5000.png\\t%s-communities.pdf' % (\n prefix,\n prefix.split('_')[0],\n coords[0],coords[1],coords[1]-coords[0],\n len(communities),\n community_quality(communities, spanset, gapset),\n mapping_quality(graph, spanset, gapset),\n prefix,prefix\n )\n\n return ret_string", "def _generate_plot(ax, power_data, title, min_db, max_db):\n # only generate plots for the transducers that have data\n if power_data.size <= 0:\n return\n\n ax.set_title(title, fontsize=ZPLSCCPlot.font_size_large)\n return imshow(ax, power_data, interpolation='none', aspect='auto', cmap='jet', vmin=min_db, vmax=max_db)", "def plot_network(genome):\n g = genome.n\n # width = g.graph[\"size\"]\n # height = g.graph[\"size\"]\n\n # fig = plt.figure(figsize=(width,height))\n fig = plt.figure()\n fig.patch.set_facecolor('white')\n ax = fig.add_subplot(111, aspect='equal')\n # ax.set_axis_off()\n\n # collision_coords = find_collisions(genome)\n # das_coords = find_das_extended(genome)\n # slp_coords = find_slp(genome)\n slp_nodes = find_attacker_path(genome.n)\n\n # Plot the parent-child tree\n for n in g.nodes_iter():\n if g.node[n][\"parent\"] is not None:\n _line(g.node[n][\"coord\"], g.node[g.node[n][\"parent\"]][\"coord\"], zorder=0, color='k')\n\n for n in g.nodes_iter():\n coord = g.node[n][\"coord\"]\n shape = _circles\n colour = 'b'\n s = 0.4\n if n in slp_nodes:\n shape = _hexagons\n colour = 'y'\n s = 0.45\n if n == g.graph[\"source\"]:\n shape = _squares\n colour = 'g'\n if n == g.graph[\"sink\"]:\n shape = _octogons\n colour = 'k'\n s = 0.45\n shape(coord[0], coord[1], s, fc=\"white\", ec=colour)\n if(len(str(g.node[n][\"slot\"])) == 1):\n ax.text(coord[0]-0.15, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 2):\n ax.text(coord[0]-0.25, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 3):\n ax.text(coord[0]-0.4, coord[1]+0.15, str(g.node[n][\"slot\"]))\n else:\n ax.text(coord[0]-0.5, coord[1]+0.15, str(g.node[n][\"slot\"]))\n\n\n plt.gca().invert_yaxis()\n fig.show()", "def plot_hr_diag(hr_df, x='B_V', y='M_V', cutoff=0.2, bvcutoff=0.05):\n plt.figure(figsize=(11., 10.))\n print \"Plotting background stars..\"\n plt.set_cmap('gray_r')\n plt.hist2d(hr_df[x].tolist(), hr_df[y].tolist(), (200, 200), norm=LogNorm(), cmin=10)\n plt.axis([-0.2, 2.35, -3., 7.])\n plt.gca().invert_yaxis()\n plt.xlabel(r'$BT-VT$ (mag)')\n plt.ylabel(r'$M_{VT}$ (mag)') # Plotting M_{VT}\n plt.title(r'$\\sigma_\\pi / \\pi < %s, \\sigma_{BT-VT}< %s$ mag' % (cutoff, bvcutoff))\n print \"..Done\"\n return", "def plot_fig43b_spreading_yeast():\n fig, ax = plt.subplots(figsize=(default_width, default_height))\n y = np.loadtxt('csvs/ratio_lp50a10ad5hp0.0067379_yeast.txt')\n links35 = np.tile(35, 44)\n Rlinks = np.array([47, 21, 18, 15, 20, 17])\n Llinks = np.array([245, 30, 26, 15, 23, 35])\n #links to right of methylation site (50 in total)\n Rlinks = np.concatenate((Rlinks, links35))\n #links to left of methylation site (50 in total)\n Llinks = np.concatenate((Llinks, links35))\n #cumulative chain length including burried basepairs\n unwrap = 0\n #plot as positive distance from TSS in bp\n ldna_Rlinks = convert.genomic_length_from_links_unwraps(Rlinks, unwraps=unwrap) #max WLC chain length in bp\n #plot as negative distance from TSS in bp\n ldna_Llinks = -1*convert.genomic_length_from_links_unwraps(Llinks, unwraps=unwrap) #max WLC chain length in bp\n x = np.concatenate((ldna_Llinks[::-1], ldna_Rlinks))\n ax.plot(x, y, color='k')\n ax.set_xlabel(r'Distance from TSS (bp)')\n ax.set_ylabel('Relative enrichment')\n #plot inset using Crabtree data\n axins = inset_axes(ax, width=\"40%\", height=\"40%\", \n bbox_to_anchor=(.1, .1, .8, .8),\n bbox_transform=ax.transAxes, loc=2)\n xcrabtree = np.array([-10256, -3077, -2241, -1485, -739, -309, -169, 489, 1746, 3087, 4400, 5300])\n REday0 = np.array([0.27, 0.13, 0.46, 0.12, 0.17, 0.33, 0.33, 0.31, 0.32, 0.27, 0.21, 0.33])\n REday5 = np.array([0.19, 0.40, 0.89, 1.55, 0.97, 1.25, 2.25, 3.57, 3.03, 2.09, 1.12, 0.14])\n ycrabtree = REday5/np.mean(REday0)\n axins.plot(xcrabtree, ycrabtree)\n axins.set_xlim([-10000, 10000])\n ax.set_xlim([-10000, 10000])\n #axins.set_ylabel('Relative enrichment', fontsize=8)\n #axins.set_xlabel('Distance from TSS (bp)', fontsize=8)\n plt.subplots_adjust(left=0.16, bottom=0.19, top=0.98, right=0.96)\n plt.savefig(f'plots/thesis/fig43b_spreading-TSS-yeast.pdf', bbox_inches='tight')", "def scree_plot(self, ev):\n plt.scatter(range(1,len(ev)+1), ev)\n plt.plot(range(1,len(ev)+1), ev)\n plt.title(\"Scree Plot\")\n plt.xlabel(\"Factors\")\n plt.ylabel(\"Eigenvalue\")\n plt.grid()\n plt.show()", "def inner_PlotDistrifun():\r\n \r\n font = {'family': 'serif',\r\n 'color': 'darkred',\r\n 'weight': 'normal',\r\n 'size': 16}\r\n\r\n Nmax = 100\r\n bins = np.linspace(0, Nmax, Nmax+1)\r\n nList = np.linspace(0, Nmax, Nmax+1, dtype = int)\r\n\r\n y_location = self.spinBox_PixelY.value()\r\n x_location = self.spinBox_PixelX.value()\r\n\r\n # get pixel intensity data\r\n Array1 = self.APP_dataprocess.PixelData(y_location, x_location)\r\n Array2 = Array1\r\n g2 = G2(Array1, Array2)\r\n print(\"g2 is:\", g2)\r\n\r\n arr = []\r\n rv = poisson(self.firstOrdImaging[y_location, x_location])\r\n for num in range(0,40):\r\n arr.append(rv.pmf(num))\r\n\r\n ax = fig.add_subplot(111)\r\n\r\n try:\r\n ax.cla()\r\n #print(\"clear self.cbar !\")\r\n except:\r\n pass\r\n #print(\"fail to clear self.cbar !\")\r\n \r\n ax.hist(Array1 , bins, normed=True, label = \"Data distribution\") \r\n ax.plot(nList, BoseEinstein(self.firstOrdImaging[y_location, x_location], Nmax), label =\"BoseEinstein distribution\")\r\n ax.plot(arr, linewidth=2.0, label =\"Possion distribution\")\r\n ax.set_title(\"Pixel Position({},{}); <$I$>:{}\".format(x_location , y_location, self.firstOrdImaging[y_location, x_location]), fontdict=font)\r\n \r\n ax.text(22, .08, r\"g2:{}\".format(g2), fontdict=font)\r\n ax.legend() \r\n \r\n fig.savefig('PixelPosition({},{})PhotDist.eps'.format(x_location , y_location), format='eps', dpi=300)\r\n plt.close()", "def sse_plot(self):\n df_sse = self.df[\"sse\"].sort_values(ascending=False)\n plt.figure(figsize=(self.plot_width, self.plot_height))\n df_sse.plot(\"bar\")\n plt.title(\"SSE por cluster\")\n output_path_sse = os.path.join(self.output_folder, 'sse_plot.png')\n plt.savefig(output_path_sse)", "def plotPersistenceDiagrams(dgm, **args):\n plot_diagrams(dgm, **args)\n jtplot.style(ticks=True, grid=True, gridlines='--') # Ugh", "def plot_stereomatic(name, database):\n fig, ax = plt.subplots()\n fig.set_size_inches(16,12)\n x = [ xx * 0.0005 for xx in range(-10000,10000)]\n y = []\n for xx in x:\n y.append(stereomatic_descriptor(name, xx, database))\n\n ax.scatter(x, y, label='%s'%name, color='k', s=10)\n plt.xlim((0,4))\n plt.ylim((0,5))\n x_new_ticks = np.linspace(0,4,21)\n y_new_ticks = np.linspace(0,5,11)\n plt.xticks(x_new_ticks, fontsize=10)\n plt.yticks(y_new_ticks, fontsize=10)\n plt.xlabel('x', fontsize=10)\n plt.ylabel('y', fontsize=10)\n plt.title('stereometic Function', fontsize=10, y=1.05)\n plt.legend(loc='best', fontsize=10)\n # plt.show()\n plt.savefig('%s.png'%name)\n plt.close(fig)", "def show3(dlist,r=2,c=2,greyscale=False,output=False,samerange=True):\n\n#distrib.show3((d63[:128,:128,0]-1,d0[:128,:128,0]-1,N.log(d63[:128,:128,0]),d63ga[:128,:128,0]),greyscale=True)\n\n M.clf()\n\n fig = M.figure(figsize=(6.4, 6.4), dpi=100) \n axesarr=N.array([[0.01,0.51,0.4,0.4],\n [0.51,0.51,0.4,0.4],\n [0.01,0.01,0.4,0.4],\n [0.51,0.01,0.4,0.4]])\n\n print axesarr\n colorbax = 1.*axesarr\n print colorbax\n colorbax[:,2] = 0.*colorbax[:,2] + 0.03\n colorbax[:,0] += 0.4\n\n print colorbax\n\n if greyscale:\n colorscheme='binary'\n else:\n colorscheme='jet'\n\n # d63, d0, log d63, d63g\n titlearr=[r'$\\delta$',r'$\\delta_{\\rm initial}$',r'$\\log(1+\\delta)$',r'$\\delta_{\\rm Gauss}$']\n\n if (dlist[1] != None):\n min23 = min(min(dlist[2].flatten()),min(dlist[3].flatten()))\n max23 = max(max(dlist[2].flatten()),max(dlist[3].flatten()))\n\n max0 = max(dlist[1].flatten())\n min0 = min(dlist[1].flatten())\n\n initfact = min(max23/max0,min23/min0)\n print min23,max23, initfact\n\n sc = 0\n for d in dlist:\n if (d != None):\n M.axes(axesarr[sc])\n M.title(titlearr[sc],fontsize=23)\n if (sc > 1):\n print titlearr[sc]\n if (samerange):\n M.pcolor(d,cmap=M.get_cmap(colorscheme),vmin = min23,vmax=max23)\n else:\n M.pcolor(d,cmap=M.get_cmap(colorscheme))\n elif (sc == 1):\n #print min(d.flatten()*initfact),max(d.flatten()*initfact)\n if (samerange):\n M.pcolor(d*initfact,cmap=M.get_cmap(colorscheme),vmin = min23,vmax=max23)\n else:\n M.pcolor(d,cmap=M.get_cmap(colorscheme))\n\n else:\n M.pcolor(d,cmap=M.get_cmap(colorscheme))\n\n# if (sc == 1):\n# M.colorbar(ticks=[-0.1,-0.05,0,0.05,0.1])\n# else:\n\n M.axis('tight')\n M.axis('equal')\n M.axis('tight')\n M.xticks([])\n M.yticks([])\n\n cax = M.axes(colorbax[sc])\n M.colorbar(cax=cax)\n\n sc += 1\n\n #M.savefig('showdens.eps',dpi=8)\n #M.gcf().set_size_inches((6.4,6.4))\n #M.gcf().set_size_inches((15.,12.))\n if (output):\n if greyscale:\n M.savefig('showdens_grey.png',dpi=100)\n M.savefig('showdens_grey.pdf')\n else:\n fig.savefig('showdens.png',dpi=100)\n M.savefig('showdens.pdf')\n\n #M.show()", "def plot_dereddening():\n extinction_coefficients = {'2365-2764-1': np.array([0.2622, 0.844]), '4109-638-1': np.array([0.0524, 0.1576]),\n '2058-56-1': np.array([0.0751, 0.248]), '3642-2459-1': np.array([0.1907, 0.608]),\n '3999-1391-1': np.array([0.3911, 1.2480]), '2607-1448-1': np.array([0.0430, 0.1310])}\n cepheids = {'2365-2764-1': np.array([0.959, 2.09]), '4109-638-1': np.array([0.705, 2.385]), '2058-56-1':\n np.array([1.222, 1.333]), '3642-2459-1': np.array([1.088, 2.0518]), '3999-1391-1':\n np.array([1.360, 1.2567]), '2607-1448-1': np.array([1.484, 0.6963])}\n periods = {'2365-2764-1': 1.61, '4109-638-1': 15.31, '2058-56-1': 63.08, '3642-2459-1': 1.86, '3999-1391-1': 24.98,\n '2607-1448-1': 8.54}\n max_periods = max(periods.values())\n\n new_positions_bv_mv = [] # in M_V vs B-V space\n colors = []\n theoretical_position = []\n for obj in extinction_coefficients.keys():\n # new_positions_bv_mv.append(cepheids[obj]-extinction_coefficients[obj])\n new_positions_bv_mv.append(cepheids[obj])\n colors.append(periods[obj]/max_periods)\n theoretical_position.append(-2.78*np.log10(periods[obj])-1.35)\n\n for pos in range(len(new_positions_bv_mv)):\n plt.scatter(new_positions_bv_mv[pos][0], new_positions_bv_mv[pos][1], marker='^', facecolor='w', s=40)\n plt.scatter(new_positions_bv_mv[pos][0], theoretical_position[pos], marker='o', facecolor='r', s=50)\n return new_positions_bv_mv, colors", "def test_lightcurve_seismology_plot():\n KeplerLightCurveFile(TABBY_Q8).PDCSAP_FLUX.periodogram().plot()", "def main():\n strikes, dips, normals, slip = generate_normal_ss_data(330, 60, n=500, porp=1)\n #strikes, dips, normals, slip = generate_normal_data(330, 60, n=500, porp=10)\n sigma = invert_plane_stress(normals, slip)\n plot(sigma, strikes, dips)\n plt.show()", "def plot_visual_abstract():\n # Which generations to plot\n GENERATIONS = [100, 230, 350]\n\n # LunarLander CMA-ES\n experiment_path = glob(\"experiments/wann_LunarLander-v2_CMAES*\")\n assert len(experiment_path) == 1, \"There should be only one CMA-ES experiment with LunarLander-v2\"\n experiment_path = experiment_path[0]\n\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n\n tsnes = []\n rewards = []\n for generation in GENERATIONS:\n # Find pivector files for specific generation, load them and store points\n generation_paths = [path for path in pivector_paths if \"gen_{}_\".format(generation) in path]\n\n population = [np.load(path) for path in generation_paths]\n population_tsnes = np.array([x[\"tsne\"] for x in population])\n population_rewards = np.array([x[\"average_episodic_reward\"] for x in population])\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n\n figure, axs = pyplot.subplots(\n figsize=[2.5 * 3, 2.5],\n nrows=1,\n ncols=len(GENERATIONS),\n sharex=\"all\",\n sharey=\"all\"\n )\n\n min_reward = min(x.min() for x in rewards)\n max_reward = max(x.max() for x in rewards)\n scatter = None\n\n for idx in range(len(GENERATIONS)):\n population_tsne = tsnes[idx]\n population_rewards = rewards[idx]\n generation = GENERATIONS[idx]\n ax = axs[idx]\n\n scatter = ax.scatter(\n population_tsne[:, 0],\n population_tsne[:, 1],\n c=population_rewards,\n vmin=min_reward,\n vmax=max_reward,\n cmap=\"plasma\"\n )\n ax.set_title(\"Generation {}\".format(generation))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis(\"off\")\n\n # Making room for colorbar\n # Stackoverflow #13784201\n figure.subplots_adjust(right=1.0)\n cbar = figure.colorbar(scatter)\n cbar.set_ticks([])\n cbar.ax.set_ylabel(\"Reward $\\\\rightarrow$\", rotation=90, fontsize=\"large\")\n\n figure.tight_layout()\n figure.savefig(\"figures/visual_abstract.pdf\", bbox_inches=\"tight\", pad_inches=0.05)" ]
[ "0.70905507", "0.5991147", "0.58976996", "0.5865311", "0.57162565", "0.55291164", "0.55249155", "0.55096096", "0.54807127", "0.54696524", "0.54455096", "0.54164976", "0.5409248", "0.5373216", "0.53698695", "0.535764", "0.53407437", "0.5330776", "0.53299135", "0.53097653", "0.52756274", "0.52735007", "0.52558535", "0.52492267", "0.5249155", "0.52469105", "0.52423286", "0.52271885", "0.5209037", "0.52024865" ]
0.6686433
1
When passed a string, representing a nucleotide sequence, treats it as a short inverted repeat, and returns the number of mismatched compared to its reverse complement for half the length of the sequence.
def count_sequence_mismatches(seq): trans_table = str.maketrans('ACGT', 'TGCA') half_len = len(seq) // 2 second_half = seq[-half_len:].translate(trans_table) mismatches = 0 for i in range(half_len): if seq[i] != second_half[-i - 1]: mismatches += 1 return mismatches
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def repeat_again(s: str) -> int:\n string_dict = dict()\n max_length = 0\n met_repeat = False\n for index, v in enumerate(s):\n if v in string_dict:\n m = index - string_dict[v]\n if m > max_length:\n max_length = m\n met_repeat = True\n string_dict[v] = index\n\n if met_repeat is True:\n return max_length\n else:\n return len(s)", "def countPalindromicSubsequences(self, s: str) -> int:\n MOD = 10 ** 9 + 7\n \n def dp(i, j) -> (int, set):\n distinct = set()\n if i > j:\n return (0, distinct)\n if i == j:\n distinct.add(s[i])\n return (1, distinct)\n ret = 0\n for c in 'abcd':\n l = s.find(c, i, j)\n if l < 0:\n continue\n r = s.rfind(c, i, j)\n sub_ret, sub_set = dp(l, r)\n print(sub_ret, sub_set)\n # print(f'{c}-{sub_set}-{c}')\n ret += sub_ret + 1\n ret %= MOD\n distinct.union(sub_set)\n distinct.add(c)\n\n return ret, distinct\n return dp(0, len(s))[0]", "def reverse(s):\n flag=0\n n=len(s)\n for i in range(len(s)):\n if s[i]!=s[n-i-1]:\n flag=1\n return -1\n return 1", "def determineIdenticalBases(string1, string2):\n S = 0\n D = 0\n if len(string1) != len(string2):\n return -1\n for i in range(len(string1)):\n if checkForNOrGap(string1[i]) and checkForNOrGap(string2[i]) :\n if string1[i] == string2[i]:\n S += 1\n else:\n D += 1\n return S, D", "def determineIdenticalBases(string1, string2):\n S = 0\n D = 0\n if len(string1) != len(string2):\n return -1\n for i in range(len(string1)):\n if checkForNOrGap(string1[i]) and checkForNOrGap(string2[i]) :\n if string1[i] == string2[i]:\n S += 1\n else:\n D += 1\n return S, D", "def non_repeating_substring(str1: str) -> int:\n max_length = 0\n seen = {}\n window_start = 0\n for window_end in range(len(str1)):\n right_char = str1[window_end]\n if right_char in seen:\n window_start = max(window_start, seen[right_char] + 1)\n seen[right_char] = window_end\n max_length = max(max_length, window_end - window_start + 1)\n return max_length", "def check(s1):\n chars = [0] * 128\n for c in s1:\n chars[ord(c)]+=1\n\n counter = 0\n for i in range(len(chars)):\n if chars[i] %2 != 0:\n counter+=1\n \n return counter <= 1", "def get_num_mismatches(sequence, ref_genome, position):\n characters = list(sequence)\n num_mismatches = 0\n for i in range(0, len(characters)):\n if position + i >= len(ref_genome):\n break\n if characters[i] != ref_genome[position + i]:\n num_mismatches += 1\n\n return num_mismatches", "def _matched_len(self, string, idx1, idx2):\n\n str_len = len(string)\n if idx1 == idx2:\n return str_len - idx1\n counter = 0\n while idx1 < str_len and idx2 < str_len and string[idx1] == string[idx2]:\n counter += 1\n idx1 += 1\n idx2 += 1\n\n return counter", "def sticky_count_wrapper(fwd_str):\n length = len(fwd_str)\n count = 0\n rev_index = length-1\n for index in range(length/2):\n # print fwd_str[index], \" \", fwd_str[rev_index - index]\n if get_opposite_character(fwd_str, index) is not fwd_str[rev_index-index]:\n # print \"Breaking the Code :\",get_opposite_character(fwd_str, index), \" is not equal to \",\n # fwd_str[rev_index - index]\n break\n count += 1\n return count", "def findRepeatedDnaSequences(s):\n\n # empty string or string < 10 chars\n if s == '' or len(s) < 10:\n raise ValueError(\"String must be of length 10+. String: '{}' is too small.\".format(s))\n\n ten_letter_counts = {}\n two_or_more = set()\n\n i = 0\n while (i + 10) <= len(s):\n # get 10 letter substring\n sub = s[i:i+10]\n\n # get current count, if not seen before, set count to 1\n ten_letter_counts[sub] = ten_letter_counts.get(sub, 0) + 1\n\n # check if occures >= 2\n if ten_letter_counts[sub] >= 2:\n two_or_more.append(sub)\n\n\n i += 1\n\n return two_or_more", "def find_non_repeat(a_string: str) -> str:\n # TODO: Implement this function\n ...", "def common_prefix_length(s, u):\n length = 0\n for cs, cu in zip(s, u):\n if cs != cu:\n break\n length += 1\n return length", "def has_n_same(string, n):\n all_chars = {}\n for char in string: # sum up count of each char\n all_chars.setdefault(char, 0)\n all_chars[char] += 1\n for char, count in all_chars.items(): # check how many appeared n times\n if count == n:\n return True\n return False", "def count_unanimous_answer(g):\n chars = set(g.replace('\\n', ''))\n ppl = g.splitlines()\n unanimous = 0\n for c in chars:\n if all([c in p for p in ppl]):\n unanimous += 1\n return unanimous", "def strings_differ(string1, string2):\n if len(string1) != len(string2):\n return True\n invalid_bits = 0\n for a, b in zip(string1, string2):\n invalid_bits += a != b\n return invalid_bits != 0", "def substrCount(n, s):\r\n lst = []\r\n character = s[0]\r\n count = 1\r\n result = 0\r\n for i in range(1, n):\r\n if s[i] == character:\r\n count += 1\r\n else:\r\n lst.append((character, count))\r\n character = s[i]\r\n count = 1\r\n lst.append((character, count))\r\n\r\n for tpl in lst:\r\n \"\"\"calculate all possible palindromes created from same characters that are close to each other\r\n E.g: aaa => 6 possibles (3*4//2 = 6)\r\n \"\"\"\r\n result += tpl[1] * (tpl[1] + 1) // 2\r\n\r\n for i in range(1, len(lst) - 1):\r\n if lst[i - 1][0] == lst[i + 1][0] and lst[i][1] == 1:\r\n \"\"\"\r\n check palindromes created from 3 tuples with a different character in between\r\n \"\"\"\r\n result += min(lst[i - 1][1], lst[i + 1][1])\r\n\r\n return result", "def count_ambig(curr_seq, valid_chars='ATCG'):\r\n up_seq = curr_seq.upper()\r\n total = 0\r\n for vchar in valid_chars:\r\n total += up_seq.count(vchar)\r\n return len(curr_seq) - total", "def is_unique_n_2(string: str) -> bool:\n\n for idx, letter in enumerate(string):\n for next_letter in string[idx + 1:]:\n if letter == next_letter:\n return False\n return True", "def is_unique_n_lg(string: str) -> bool:\n\n start = 0\n sorted_string = sorted(string)\n\n while start + 1 < len(sorted_string):\n if string[start] == string[start + 1]:\n return False\n\n start += 1\n\n return True", "def check_gapped(sequence):\n w_regexp = re.compile('n|N')\n regexp_obj = w_regexp.search(sequence)\n if (regexp_obj):\n return True\n else:\n return False", "def theLoveLetterMystery(s):\n mincount = 0\n for i in range(len(s) // 2):\n mincount += abs(ord(s[i]) - ord(s[-1 - i]))\n\n return mincount", "def get_nmismatches(bases_mask):\n # Check mask is valid\n if not bases_mask_is_valid(bases_mask):\n raise Exception(\"'%s': not a valid bases mask\" % bases_mask)\n # Total the length of all index reads\n index_length = 0\n for read in bases_mask.upper().split(','):\n if read.startswith('I'):\n try:\n i = read.index('N')\n read = read[:i]\n except ValueError:\n pass\n try:\n index_length += int(read[1:])\n except ValueError:\n index_length += len(read)\n # Return number of mismatches\n if index_length >= 6:\n return 1\n else:\n return 0", "def is_palindrome_permutation(string):\n\n letter_to_count = dict()\n\n for letter in string:\n letter_to_count[letter] = letter_to_count.get(letter, 0) + 1\n\n residual = 0\n for count in letter_to_count.values():\n residual += count % 2\n\n # there are can be a single letter with an odd character count when the palindrome is of odd length\n return residual <= 1", "def count_decodings(s):\n\n if len(s) == 1:\n return 1\n if len(s) == 2:\n return 2\n including_last_digit = 0\n including_last_two_digit = 0\n if int(s[-1]) > 0:\n including_last_digit = count_decodings(s[:-1])\n if int(s[-2:]) < 28:\n including_last_two_digit = count_decodings(s[:-2])\n return including_last_digit + including_last_two_digit", "def determine_unknown_indel_length(seq1, seq2):\n \n bp1 = seq1[0]\n bp2 = seq2[0]\n skip = 0 \n\n if len(seq1) != len(seq2):\n return 'error - cannot compare', 0, 0\n\n if bp1 != '*' and bp2 != '*':\n return 'no diff', 0, 0\n\n if bp1 != '*' and bp2 == '*':\n five_prime_diff = 'deletion'\n\n elif bp1 == '*' and bp2 != '*':\n five_prime_diff = 'insertion'\n\n else:\n five_prime_diff = 'tbd'\n\n variant_len = 0\n for i in range(len(seq1)):\n\n bp1 = seq1[i]\n bp2 = seq2[i]\n\n if five_prime_diff == 'deletion':\n if bp1 != '*' and bp2 == '*':\n variant_len +=1 \n\n elif bp2 != '*':\n break \n\n elif five_prime_diff == 'insertion':\n if bp1 == '*' and bp2 != '*':\n variant_len +=1 \n\n elif bp1 != '*':\n break\n\n elif five_prime_diff == 'tbd':\n\n if bp1 == '*' and bp2 != '*':\n five_prime_diff = 'insertion'\n variant_len += 1 \n\n elif bp1 != '*' and bp2 == '*':\n five_prime_diff = 'deletion'\n variant_len += 1 \n\n elif bp1 != '*' and bp2 != '*':\n five_prime_diff = 'no diff'\n\n else:\n skip += 1\n \n return five_prime_diff, variant_len, skip", "def longestAwesome(self, s: str) -> int:\n\n # So we are moving right, and reducing length by 1\n # for every time we move right - we start from the longest substring that can be formed to lowest one\n # So the moment, we find something we can instantly breal\n\n max_length = 0\n\n if s == s[::-1]:\n return len(s)\n\n for i in range(0, len(s)):\n left = i\n right = len(s)\n\n if right - left > max_length:\n\n while right > left:\n\n candidate = s[left:right]\n # print(f\"The candidate is: {candidate}\")\n ctr = Counter(candidate)\n\n # initial base check\n odd_cnt = 0\n fl = False\n for k, v in ctr.items():\n if v & 1:\n odd_cnt += 1\n if odd_cnt > 1:\n fl = True\n break\n\n if not fl:\n if max_length < (right - left):\n max_length = right - left\n # max_length = max(max_length, len(candidate))\n\n right -= 1\n\n return max_length", "def get_longest_valid_repeated_nonoverlapping_substring(string, validity_function):\n string_length=len(string) \n table=[[0 for i2 in range(string_length+1)] for i in range(string_length+1)] \n\n repeated_substring_length=0\n repeated_substring=[]\n for i in range(1, string_length+1): \n for i2 in range(i+1, string_length+1): \n if (string[i-1]==string[i2-1] and table[i-1][i2-1]<(i2-i)): \n table[i][i2]=table[i-1][i2-1]+1\n if table[i][i2]>repeated_substring_length:\n candidate_string=string[i-table[i][i2]:i]\n if validity_function(candidate_string):\n repeated_substring_length=table[i][i2]\n repeated_substring=candidate_string\n else: \n table[i][i2]=0\n\n return repeated_substring", "def isAlternativeString(s):\n L = list(set(s))\n if len(L) != 2:\n return False\n for i in range(len(s) - 1):\n if s[i] == s[i + 1]:\n return False\n return True", "def contig_complementary_score(s1, s2) -> int:\n matcher = SequenceMatcher()\n matcher.set_seq1(str(s1))\n matcher.set_seq2(str(s2.complement()))\n _, _, size = matcher.find_longest_match(0, len(s1), 0, len(s2))\n return size" ]
[ "0.6090975", "0.6080663", "0.6072563", "0.6008404", "0.6008404", "0.6002933", "0.5976948", "0.5958161", "0.5908225", "0.58983845", "0.5886952", "0.5879738", "0.58791685", "0.5874548", "0.5846228", "0.5821024", "0.57775706", "0.5776557", "0.5747761", "0.5744451", "0.5737736", "0.5728471", "0.5728071", "0.5726042", "0.5715921", "0.5713786", "0.5713018", "0.5680885", "0.56772983", "0.56766933" ]
0.6970629
0
Function to save the palindrome search result as a csv.
def write_palindromes_to_file(path, csv_name, results): if not os.path.exists(path): os.makedirs(path) df = pd.DataFrame(results, columns=['length', 'start', 'probability', 'mismatches', 'count', 'sequence']).sort_values(by='probability').reset_index(drop=True) csv_name = f'{csv_name}_pal_search.csv' df.to_csv(f'{path}/{csv_name}.gz', index=False, compression='gzip')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def palindrome(self):\n vas = []\n file = self.read1()\n print(file[0])\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n s_ii = s_i[::-1]\n if s_ii == s_i and s_i!= \"\":\n vas.append(s_i)\n self.print(vas)\n self.write(vas)\n logging.debug(\"Starting with to\")\n return vas", "def exportEvaluation(self,results,url):\n profbox()\n if not os.path.exists(url):\n open(url, 'w').close()\n myfile = open(url, 'a')\n\n wr = csv.writer(myfile)\n r = numpy.array(results)\n if len(r.shape) == 1:\n wr.writerow(results)\n else:\n wr.writerows(results)", "def exportEvaluation(self, results, url):\r\n # research\r\n profprint()\r\n if not os.path.exists(url):\r\n print \"creating new results file: \",url\r\n open(url, 'w').close()\r\n myfile = open(url, 'a')\r\n\r\n wr = csv.writer(myfile)\r\n r = numpy.array(results)\r\n if len(r.shape) == 1:\r\n wr.writerow(results)\r\n else:\r\n wr.writerows(results)", "def find_palindromes(self, start_file: str, result_file: str) -> list:\n input_words = self.read_file(start_file)\n result_words = []\n stack = ArrayStack()\n\n for word in input_words:\n for letter in word:\n stack.push(letter)\n\n reversed_line = ''\n\n while not stack.isEmpty():\n reversed_line += stack.pop()\n\n if word == reversed_line:\n result_words.append(word)\n\n if len(result_words) != 0:\n self.write_file(result_words, result_file)\n return result_words", "def write_file(self, lst_of_palidroms: list, result_file: str):\n with open(result_file, 'w', encoding='utf-8', errors='ignore') as result:\n for word in lst_of_palidroms:\n result.write(word + '\\n')", "def write_relevance_tocsv(relevance, corpus):\n csv_filename = config.CORPUS[corpus]['relevance_file']\n print('writing relevance')\n print(relevance)\n with open(csv_filename, 'w') as file:\n csv.writer(file).writerows((k,) + v for k, v in relevance.items())", "def write_results(self, results):\n predictions = open('hmm_results.csv', 'w')\n predictions.write(\"Type,Prediction\")\n for type in results:\n if type == 'O':\n continue\n predictions.write(\"\\n\" + str(type) + \",\")\n for interval in results[type]:\n predictions.write(str(interval) + \" \")\n predictions.close()", "def _split_palindrome(self):\n if not op.exists(self.sdp_out_file) or self.force_redo is True:\n self._self_align()\n\n logging.debug(\"Parsing sdp and detect plindrome reads\")\n split_table = {}\n with SDPReader(self.sdp_out_file) as reader:\n for sdp in reader:\n if sdp.score <= self.palindrome_score_cutoff:\n split_table[str(sdp.qID)] = sdp\n\n logging.debug(\"Splitting palindrom reads.\")\n with FastaReader(self.ori_all_reads_fasta) as reader, \\\n FastaWriter(self.tmp_all_reads_fasta) as writer, \\\n FastaWriter(self.palindrome_reads_fasta) as palindrome_writer:\n for r in reader:\n if r.name in split_table:\n # found a palindrome\n sdp = split_table[r.name]\n # Write palindrome subreads to palindrome_subreads.fasta\n palindrome_writer.writeRecord(r.name, r.sequence)\n#\n# # split this read in the middle\n# split_point = int(sdp.qstart +\n# (sdp.alnqstart + sdp.alnqend)/2)\n# # Write the first half\n# rname_1 = \"{movie}/{zmw}/{s}_{e}\".format(\n# movie=sdp.movie, zmw=sdp.zmw, s=sdp.qstart,\n# e=split_point)\n# writer.writeRecord(rname_1,\n# r.sequence[0:(split_point-sdp.qstart)])\n#\n# # Write the second half\n# rname_2 = \"{movie}/{zmw}/{s}_{e}\".format(\n# movie=sdp.movie, zmw=sdp.zmw,\n# s=(split_point+1), e=sdp.qend)\n# writer.writeRecord(rname_2,\n# r.sequence[(split_point-sdp.qstart):])\n else:\n writer.writeRecord(r.name, r.sequence)\n\n logging.debug(\"Moving {i} to {o}.\".format(i=self.tmp_all_reads_fasta,\n o=self.all_reads_fasta))\n shutil.move(self.tmp_all_reads_fasta, self.all_reads_fasta)", "def write_matches(matches: List[Result],out: str):\n data = pd.DataFrame(matches)\n data.to_csv(out,sep=\"\\t\",index=False)", "def write_results(file_path, predictions):\n with open(file_path, \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\",\")\n writer.writerow([\"Id\", \"Bound\"])\n for id, bound in enumerate(predictions):\n writer.writerow([id, bound])", "def write_all_correct_answers_for_wrong_qs_in_csv(self):\n # from nose.tools import set_trace; set_trace()\n my_csv_file = open(\"output_csv\", \"w\")\n for form_card_ind in range(2, 5):\n for div_ind in range(2, 4):\n question_title = self.score_page.find_all_grading_questions_title(form_card_ind, div_ind)\n print question_title\n answers_list = self.score_page.find_all_correct_answers_from_each_section(form_card_ind, div_ind)\n print answers_list\n if len(answers_list):\n my_csv_file.write('Q: ' + str(question_title[0]) + \"\\n\")\n for each in answers_list:\n print each\n my_csv_file.write(str(each) + \"\\n\")", "def export_to_pairwise_csv(request, token, project):\n from appraise.local_settings import EXPORT_TOKEN\n if not token == EXPORT_TOKEN:\n return HttpResponseForbidden()\n \n annotation_project = get_object_or_404(Project, name=project)\n \n queryset = RankingResult.objects.filter(item__hit__completed=True)\n\n results = [u'srclang,trglang,srcIndex,segmentId,judgeId,' \\\n 'system1Id,system1rank,system2Id,system2rank,rankingID']\n \n for result in queryset:\n if isinstance(result, RankingResult):\n if result.item.hit.project_set.filter(id=annotation_project.id):\n current_csv = result.export_to_pairwise_csv()\n if current_csv is None:\n continue\n results.append(current_csv)\n \n export_csv = u\"\\n\".join(results)\n export_csv = export_csv + u\"\\n\"\n return HttpResponse(export_csv, mimetype='text/plain')", "def write_results(results):\n with RESULTS_PATH.open(\"w\") as writer:\n csvwriter = csv.writer(writer)\n csvwriter.writerows(results)", "def save_results(predictions, filename):\n with open(filename, 'w') as f:\n f.write(\"id,ACTION\\n\")\n for i, pred in enumerate(predictions):\n f.write(\"%d,%f\\n\" % (i + 1, pred))", "def dwn_saved_result_csv(request):\n source_id = request.GET.get('source_id')\n data = []\n objs = ExtractedRelation.objects.filter(source=source_id)\n s = Source.objects.filter(source_id=source_id)[0]\n for i in objs:\n data.append((i.sentence, i.head, i.tail, i.pred_relation, i.sentiment, i.conf, s.source, i.rel_id, os.path.basename(i.ckpt)))\n \n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence', 'Source', 'rel_id', 'Checkpoint'])\n df.to_csv(\"temp/analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/analysis_results.csv','rb'))", "def check_palindrome():", "def saveMatches(matches,personDict,fileOutName1,fileOutName2):\n sorted_x = sorted(matches.items(), key=operator.itemgetter(1),reverse=True)\n sorted_matches = []\n for i in sorted_x:\n sorted_matches.append(i[0])\n with open(fileOutName1, 'w', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=',')\n spamwriter.writerow(['EnterpriseID1','EnterpriseID2','MATCH_SCORE'])\n for p in sorted_matches:\n spamwriter.writerow([p[0],p[1],str(matches[p])])\n \n with open(fileOutName2, 'w', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=',')\n spamwriter.writerow(['EnterpriseID','LAST','FIRST','MIDDLE','SUFFIX','DOB','GENDER','SSN','ADDRESS1','ADDRESS2','ZIP','MOTHERS_MAIDEN_NAME','MRN','CITY','STATE','PHONE','PHONE2','EMAIL','ALIAS'])\n for p in sorted_matches:\n spamwriter.writerow(list(personDict['EnterpriseID'][p[0]]))\n spamwriter.writerow(list(personDict['EnterpriseID'][p[1]]))\n spamwriter.writerow([])", "def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)", "def convert_to_csv(self, branch):\n names = [\"CSE_results.csv\", \"IT_results.csv\"]\n self.results = {\"ROLL_NO\": self.roll_nos, \"Name\": self.names, \"SGPA\": self.sgpa}\n print(self.results)\n df = DataFrame.from_dict(self.results)\n df.to_csv(names[branch], index=False)", "def export_csv_search(cart, tag = None):\n # Reads all the tweets in folder\n try:\n tweets = tbf.load_stream(cart, tag = tag)\n\n if tag is None:\n ii = cart.index('/#')\n tag = cart[ii+1:-1]\n\n nodes = np.unique(np.array([twe.user_name for twe in tweets]))\n #links_A = [lin.name_A for lin in twe.link_to]\n\n links_A = []\n links_B = []\n for twe in tweets:\n links_A += [lin.name_A for lin in twe.link_to]\n links_B += [lin.name_B for lin in twe.link_to]\n\n #tbf.export_csv(links_A, links_B)\n fileo = open(cart + tag + '_links.csv', 'w')\n filecsv = csv.writer(fileo,delimiter='\\t')\n\n for A, B in zip(links_A, links_B):\n filecsv.writerow([A,B])\n\n fileo.close()\n status = True\n cazzillo = None\n\n except Exception as cazzillo:\n print(cazzillo)\n status = False\n\n return status, cazzillo", "def write_to_csv(self, output_dir, delimiter, include_language, filename=None):\n\n if filename is not None:\n self.filename = filename\n\n if len(self.values) == 0:\n logger.info(\"Nothing to export.\")\n return\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n file_path = os.path.join(output_dir, self.filename)\n\n # write search results to UTF8-encoded CSV file (see also http://stackoverflow.com/a/844443)\n with codecs.open(file_path, 'w', encoding='utf8') as fp:\n logger.info('Exporting search results to ' + file_path + '...')\n writer = csv.writer(fp, delimiter=delimiter)\n\n column_names = SearchResult.get_column_names(include_language)\n\n # write header of CSV file\n writer.writerow(column_names)\n\n count = 0\n try:\n for row in self.get_rows(include_language):\n if len(row) == len(column_names):\n writer.writerow(row)\n count = count + 1\n else:\n raise IllegalArgumentError(\n str(abs(len(column_names) - len(row))) + ' parameter(s) is/are missing for \"'\n + str(row) + '\"')\n\n except UnicodeEncodeError:\n logger.error('Encoding error while writing data for: ' + str(row))\n\n logger.info(str(count) + ' search results have been exported.')", "def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(zip(y_pred))\n out.close()", "def write_results(results):\n fields = results[0].keys()\n with open('results.csv', 'w') as f:\n dw = csv.DictWriter(f, fieldnames=fields, delimiter='|')\n dw.writer.writerow(list(dw.fieldnames))\n dw.writerows(results)", "def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(list(zip(y_pred)))\n out.close()", "def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(list(zip(y_pred)))\n out.close()", "def to_csv(self, path):\n results = self.all()\n if self.stop_check is not None and self.stop_check():\n return\n results.to_csv(path)", "def save_output(pris):\n pris.to_csv('reactors_pris_2016.csv',\n index=False,\n sep=',',\n )", "def save_to_csv(info):\n all_ranks = sorted(output_data(info)[0], key=lambda x: x[1])\n with open('page_ranks.csv', 'w') as file:\n file.write('\\t\\t' + \"Contains ranks of all pages\" + '\\n')\n for rank in all_ranks:\n file.write('Page number: ' + str(rank[1]) + ', page rank:' + str(rank[0]) + '\\n')", "def save_submission(results, file_name='submission.csv'):\n submission_path = path.join('..', 'output', file_name)\n results.to_csv(submission_path)", "def download_to_csv(self, search_results, filename):\n\n current_page = search_results\n\n with open(filename, \"w\") as csvfile:\n fieldnames = [\"id\", \"name\", \"name_abbreviation\", \"decision_date\", \"court_id\", \"court_name\", \"court_slug\",\n \"judges\", \"attorneys\", \"citations\", \"url\", \"head\", \"body\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n while True:\n for case in current_page[\"results\"]:\n case_data = {\n \"id\": case[\"id\"],\n \"name\": case[\"name\"],\n \"name_abbreviation\": case[\"name_abbreviation\"],\n \"decision_date\": case[\"decision_date\"],\n \"court_id\": case[\"court\"][\"id\"],\n \"court_name\": case[\"court\"][\"name\"],\n \"court_slug\": case[\"court\"][\"slug\"],\n \"judges\": str(case[\"casebody\"][\"data\"][\"judges\"]),\n \"attorneys\": str(case[\"casebody\"][\"data\"][\"attorneys\"]),\n \"citations\": str(case[\"citations\"]),\n \"url\": case[\"url\"],\n \"head\": case[\"casebody\"][\"data\"][\"head_matter\"],\n \"body\": case[\"casebody\"][\"data\"][\"opinions\"][0][\"text\"]\n }\n writer.writerow(case_data)\n\n try:\n next_result = self._request(current_page[\"next\"])\n current_page = next_result.json()\n\n except:\n break\n\n print(\"Downloaded \" + str(search_results[\"count\"]) + \" court cases to file \" + filename + \".\")" ]
[ "0.6158536", "0.5577743", "0.5539694", "0.553265", "0.55125475", "0.5374812", "0.5370445", "0.53692037", "0.5338849", "0.53149337", "0.52733535", "0.5226261", "0.5215528", "0.5195561", "0.51878613", "0.5160146", "0.5150842", "0.5132658", "0.5131876", "0.5128219", "0.5125109", "0.5122458", "0.5116616", "0.51125383", "0.51125383", "0.5108728", "0.5100285", "0.5094004", "0.5049363", "0.5042197" ]
0.7444958
0
Function to search through a data set (list of list) if one of the palindromes sequence has insert in it another palindrome. For example, if M is the longer palindrome sequence, if the p start > m start and p end < m end, p is inside m. The output file should be a new data set containing only unique palindrome derived from the content of the input file
def repeated_palindrome(palindromes_list): # the list is ordered in the reversed form (long to short) ordered_palindrome = sorted(palindromes_list) longest_first = ordered_palindrome[::-1] # initialize a new list to receive unique plaindromes data pal_list = [longest_first[0]] # the longest palindrome cannot fit in any other sequence # iterates over the longest_first original palindromes # get the start and end positions for data in longest_first: start = data[1] end = start + data[0] # iterate through the pal_list and # compare the start and end of the potential and palindromes # to check if the potential palindrome is unique. unique_palindrome = None for dat in pal_list: start_unique = dat[1] end_unique = start_unique + dat[0] # statement should test to check if the test palindrome fits # inside any of the identified 'real/unique' palindromes. if start >= start_unique and end <= end_unique: # if the palindrome tested fits inside unique_palindrome = False break else: # other wise it is unique unique_palindrome = True if unique_palindrome: # check if if it is not in the list if data not in pal_list: pal_list += [data] return pal_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_palindromes(self, start_file: str, result_file: str) -> list:\n input_words = self.read_file(start_file)\n result_words = []\n stack = ArrayStack()\n\n for word in input_words:\n for letter in word:\n stack.push(letter)\n\n reversed_line = ''\n\n while not stack.isEmpty():\n reversed_line += stack.pop()\n\n if word == reversed_line:\n result_words.append(word)\n\n if len(result_words) != 0:\n self.write_file(result_words, result_file)\n return result_words", "def search_palindromes(src_file, min_len):\n #Get digit source\n source = NumReader(src_file)\n #Old digits. Should always be length 100-200, unless there aren't enough digits.\n old_d = []\n #Current digit (possibly None)\n cur_d = source.read(1)[0]\n #Future digits. Should always be length 100-200, unless there aren't enough digits.\n next_d = source.read(100)\n #List of accumulated palindromes as strings\n pals = []\n\n #Keep running until out of digits\n while source.has_digits:\n #Look for palindrome centered at current digit\n branch_len = pal_length(old_d, next_d)\n cur_length = 1 + 2 * branch_len\n #If long enough, add to list\n if cur_length >= min_len:\n p = pal_str(cur_d, old_d[:branch_len])\n pals.append((p, source.digits_read - len(next_d)))\n\n #Look for \"even\" palindrome centered at current digit\n #Shift current digit into old buffer\n old_d.insert(0, cur_d)\n cur_d = None\n branch_len = pal_length(old_d, next_d)\n cur_length = 2 * branch_len\n #If long enough, add to list\n if cur_length >= min_len:\n p = pal_str(cur_d, old_d[:branch_len])\n pals.append((p, source.digits_read - len(next_d)))\n\n #Pull next digit\n cur_d = next_d.pop(0)\n\n #Maintain buffers\n if len(old_d) > 50:\n old_d = old_d[:50]\n if len(next_d) < 50:\n next_d += source.read(50)\n return pals", "def get_palindromes(kmer_list):\n rev_kmers = [get_reverse_complement(kmer) for kmer in kmer_list]\n palindromes = set()\n for mer1, mer2 in zip(kmer_list, rev_kmers):\n if check_is_palindrome(mer1, mer2):\n palindromes.add(mer1)\n return palindromes", "def palindrome(self):\n vas = []\n file = self.read1()\n print(file[0])\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n s_ii = s_i[::-1]\n if s_ii == s_i and s_i!= \"\":\n vas.append(s_i)\n self.print(vas)\n self.write(vas)\n logging.debug(\"Starting with to\")\n return vas", "def find_palindromes(self):\n\t\tself.square_palindromes = [x for x in self.squares if self.is_palindrome(x)]", "def test_palendrome_long_list_true():\n from kth_to_last import LinkedList\n from palendrome import linked_palendrome\n test_ll = LinkedList()\n test_ll.push('a')\n test_ll.push('b')\n test_ll.push('c')\n test_ll.push('d')\n test_ll.push('c')\n test_ll.push('b')\n test_ll.push('a')\n assert linked_palendrome(test_ll) is True", "def find_reversed(word_list):\n reversed_list = []\n word_set = set(word_list)\n for word in word_list:\n if word[::-1] in word_set and not check_palindrome(word):\n reversed_list.append(word)\n return reversed_list", "def palindrome_itertive(a):\n # TODO make this less crappy\n start = 0 \n end = len(a) - 1\n while start != end:\n # print(end)\n # print('start: ', start, ' a: ', a[start])\n # print('end: ', end, ' a: ', a[end])\n if not a[start] == a[end]:\n return False\n else:\n start += 1\n end -= 1\n return True", "def check_the_palindromes_starts(palindromes_list):\n # to get all the start positions\n starts = [(s[1]) for s in palindromes_list]\n # all the data sorted\n palin = palindromes_list[1:]\n sorted(palin)\n # the sorted function orders the list for low to high\n longest_ordered = sorted(starts, reverse=True)\n # empty list to append the results to\n data = []\n # iteration through the start positions to return the results of the start\n # position minus the next start position in the list\n for i in range(len(longest_ordered) - 1):\n j = i + 1\n value = longest_ordered[i] - longest_ordered[j]\n data.append(value)\n return data", "def _split_palindrome(self):\n if not op.exists(self.sdp_out_file) or self.force_redo is True:\n self._self_align()\n\n logging.debug(\"Parsing sdp and detect plindrome reads\")\n split_table = {}\n with SDPReader(self.sdp_out_file) as reader:\n for sdp in reader:\n if sdp.score <= self.palindrome_score_cutoff:\n split_table[str(sdp.qID)] = sdp\n\n logging.debug(\"Splitting palindrom reads.\")\n with FastaReader(self.ori_all_reads_fasta) as reader, \\\n FastaWriter(self.tmp_all_reads_fasta) as writer, \\\n FastaWriter(self.palindrome_reads_fasta) as palindrome_writer:\n for r in reader:\n if r.name in split_table:\n # found a palindrome\n sdp = split_table[r.name]\n # Write palindrome subreads to palindrome_subreads.fasta\n palindrome_writer.writeRecord(r.name, r.sequence)\n#\n# # split this read in the middle\n# split_point = int(sdp.qstart +\n# (sdp.alnqstart + sdp.alnqend)/2)\n# # Write the first half\n# rname_1 = \"{movie}/{zmw}/{s}_{e}\".format(\n# movie=sdp.movie, zmw=sdp.zmw, s=sdp.qstart,\n# e=split_point)\n# writer.writeRecord(rname_1,\n# r.sequence[0:(split_point-sdp.qstart)])\n#\n# # Write the second half\n# rname_2 = \"{movie}/{zmw}/{s}_{e}\".format(\n# movie=sdp.movie, zmw=sdp.zmw,\n# s=(split_point+1), e=sdp.qend)\n# writer.writeRecord(rname_2,\n# r.sequence[(split_point-sdp.qstart):])\n else:\n writer.writeRecord(r.name, r.sequence)\n\n logging.debug(\"Moving {i} to {o}.\".format(i=self.tmp_all_reads_fasta,\n o=self.all_reads_fasta))\n shutil.move(self.tmp_all_reads_fasta, self.all_reads_fasta)", "def palindrom():\r\n pal = []\r\n\r\n sub_str = gen_substring(\"abaabbaab\")\r\n\r\n for i in range(len(sub_str)):\r\n\r\n rev = reverse_string(sub_str[i])\r\n\r\n if rev == sub_str[i]:\r\n\r\n pal.append(rev)\r\n\r\n return pal", "def check_palindrome_using_reverse(self):\n slow = self.head\n fast = self.head\n midnode = None\n prev_to_slow = None\n while fast and fast.next:\n prev_to_slow = slow\n slow = slow.next\n fast = fast.next.next\n if fast:\n midnode = slow\n slow = slow.next\n prev_to_slow.next = None\n second_half = slow\n second_half = LinkedListReverse.iterative_reverse(second_half)\n res = CheckPalindrome.compare_list(self.head, second_half)\n second_half = LinkedListReverse.iterative_reverse(second_half)\n if midnode:\n prev_to_slow.next = midnode\n midnode.next = second_half\n else:\n prev_to_slow.next = second_half\n return res", "def check_palindrome():", "def is_palindrome_v3(s):\n i = 0\n j = len(s)-1\n\n while i < j and s[i] == s[j]:\n i = i + 1\n j = j -1\n\n return j <= i", "def main():\n for l in range(999,890,-1):\n for r in range(999,890,-1):\n num= l*r\n ans= palindrome_check(num)\n if ans:\n print l,r,num\n return\n print l,r,num\n print \"No palindrome found.\"\n return", "def palindrome(sll):\n\n node = sll.head\n counter = 1\n half_len = ceil(len(sll) / 2)\n\n while node is not None:\n if counter >= half_len:\n break\n elif node.data != k_to_last(sll, counter):\n return False\n else:\n counter += 1\n node = node.next\n return True", "def find_palindromes(word_list):\n palindrome_list = []\n\n for word in word_list:\n if check_palindrome(word):\n palindrome_list.append(word)\n\n return palindrome_list", "def isPalendrome(number):\n\t\n\tnum = str(number)\n\ti \t= 0\n\tj \t= len(num) - 1\n\tmid = len(num) // 2\n\n\t#print(mid)\n\t\n\t# While i and j are not in the middle\n\twhile( i != mid):\n\t\t#print(i,j,sep=\"\\t\")\n\t\t#print(num[i],num[j], sep=\"\\t\")\n\t\tif(num[i] != num[j]):\n\t\t\treturn(False)\n\t\telse:\n\t\t\ti = i + 1\n\t\t\tj = j - 1\n\n\treturn(True)", "def palindromePairs(self, words: List[str]) -> List[List[int]]:\n d = {w : i for i, w in enumerate(words)}\n \n res = []\n for idx, word in enumerate(words):\n for i in range(len(word)+1):\n str1 = word[:i]\n str2 = word[i:]\n # first part should be palindrome, second part (reverse) should be in w\n if str1 == str1[::-1]:\n back = str2[::-1]\n if back in d and back != word:\n res.append([d[str2[::-1]], idx])\n # second part should be palindrome, first part (reverse) should be in w\n if str2 and str2 == str2[::-1]: # if the last part is empty, it is calculated before \n back = str1[::-1]\n if back in d and back != word: \n res.append([idx, d[str1[::-1]]])\n # print(res)\n return res", "def has_palindrome_permutation(given_string):\n\n unpaired_characters = set()\n\n for char in given_string:\n if char in unpaired_characters:\n unpaired_characters.remove(char)\n else:\n unpaired_characters.add(char) \n\n return len(unpaired_characters) <= 1", "def palindrome_search(sequence, min_len, max_len, alphabet, prob_cutoff=None):\n # get the sequence complement\n trans_table = str.maketrans('ACGT', 'TGCA')\n seq_complement = sequence.translate(trans_table)\n # gets the base composition\n nucs = base_stats(sequence, alphabet, False, True)\n # define maches bases\n matches = ['AT', 'TA', 'GC', 'CG']\n # probability of a match according tho the background\n p_match = 0\n # iterates tohrough the bases matches\n for b in matches:\n # calculate the probabilities\n p_match += nucs[b[0]] * nucs[b[1]]\n # checks if the results matches\n assert p_match == sum([nucs[b[0]] * nucs[b[1]] for b in matches])\n # initialize the container of possible probability using length and mismatches\n # as the indexes\n prob_dict = defaultdict(float)\n # iterates through the range of lengths\n for length in range(min_len, max_len):\n # iterates throught the half of the sequence\n for mismatches in range(0, (length // 2) + 1):\n # get the probabilities and number the mismatches\n p = probability(length, mismatches, p_match)\n prob_dict[(length, mismatches)] = prob_dict.get((length, mismatches), 0.0) + p\n # create an container for the results\n palindromes = []\n # iterates through the range of lengths\n for length in range(min_len, max_len):\n # defined mismatch threshold\n half_l = length // 2\n mismatches_cutoff = 0.5 * half_l\n half_list = range(half_l)\n # iterates throught to find the starts\n for start in range(0, (len(sequence) - length + 1)):\n # gets the putative palindromes\n seq = sequence[start:start + length]\n # gets the complement\n seq_comp = seq_complement[start:start + length]\n mismatches = 0\n # iterates throught the half lengths\n for i in half_list:\n # check for mismatches and increment the counts\n if seq[i] != seq_comp[-i - 1]:\n mismatches += 1\n # check if the number of mismatches is allowed\n if mismatches <= mismatches_cutoff:\n # look up the probability,\n pr = prob_dict[(length, mismatches)]\n # if it passes the cutoff\n if pr <= prob_cutoff:\n # add the results into the container\n # count the number of the palindrome in the sequence\n cnt_pal = get_pattern_count(sequence, seq)\n palindromes += [[length, start, pr, mismatches, cnt_pal, seq]]\n return palindromes", "def palindromePairs(lst):\n results = []\n for i, e1 in enumerate(lst):\n for j, e2 in enumerate(lst):\n if i != j:\n if isPalindrome(e1+e2):\n results.append((i, j))\n return results", "def match(list_string):\n assert type(list_string)==list\n for i in list_string:\n assert type(i)==str\n assert i.isalpha()\n #Loops through all the possible substrings of the list of words to find the word pairs that are palindromes.\n my_match = []\n for i in range(0,len(list_string)):\n for j in range(0,len(list_string)):\n if i!=j:\n a = list_string[i]\n b = list_string[j]\n c = a+b\n d = b+a\n if c==c[::-1]:\n if (i,j) not in my_match:\n my_match.append((i,j))\n elif d==d[::-1]:\n if (j,i) not in my_match:\n my_match.append((j,i))\n return my_match", "def palindrome_reads_fasta(self):\n return op.join(self.out_dir, \"palindrome_subreads.fasta\")", "def check_is_palindrome(mer1, mer2):\n return mer1.find(mer2[::-1]) == 0", "def num_palindrome():\n nums = map(str, range(1000000))\n odo = []\n for i in range(len(nums)):\n if len(nums[i]) < 6:\n odo.append('0'*(6-len(nums[i])) + nums[i])\n elif len(nums[i]) == 6:\n odo.append(nums[i])\n \n for i in range(len(odo)-3): \n first = odo[i][2:] == odo[i][:1:-1]\n second = odo[i+1][1:] == odo[i+1][:0:-1]\n third = odo[i+2][1:5] == odo[i+2][4:0:-1]\n fourth = odo[i+3][:] == odo[i+3][::-1]\n if first & second & third & fourth:\n print 'A possible odometer reading is '+odo[i]", "def is_palindromic(lst):\n return all( lst[i] == lst[-(i+1)] for i in range(len(lst)) )", "def palindromes():\n for n in count(1):\n if str(n) == str(n)[::-1]:\n yield n", "def palCheck(input_string):\n\n # ADD NECESSARY LINES OF CODE SO THAT ALL UNITTESTS PASS\n\n d = Deque()\n for char in input_string:\n d.addFront(char)\n\n while d.size() > 1:\n firstChar = d.removeRear()\n lastChar = d.removeFront()\n if firstChar != lastChar:\n print(\"No, '\" + input_string + \"', is not a palindrom\")\n return False\n\n print(\"Yes, '\" + input_string + \"', is a palindrom!!\")\n return True", "def is_palindrome(sub):\n for i in range(len(sub)):\n if sub[i] != sub[len(sub) - i - 1]:\n return False\n return True" ]
[ "0.69024855", "0.6746088", "0.64815366", "0.6367615", "0.6211623", "0.6194972", "0.61857194", "0.61326253", "0.61166674", "0.6096614", "0.6090969", "0.60547453", "0.60349464", "0.6011474", "0.59989303", "0.59753597", "0.5967636", "0.5963775", "0.5936155", "0.5925398", "0.59026027", "0.58816445", "0.58208245", "0.5789174", "0.5752623", "0.57352734", "0.56930906", "0.568839", "0.5672578", "0.56386244" ]
0.73361176
0
This is a function to return start position minus start positions in an ordered data set of identified palindromes.
def check_the_palindromes_starts(palindromes_list): # to get all the start positions starts = [(s[1]) for s in palindromes_list] # all the data sorted palin = palindromes_list[1:] sorted(palin) # the sorted function orders the list for low to high longest_ordered = sorted(starts, reverse=True) # empty list to append the results to data = [] # iteration through the start positions to return the results of the start # position minus the next start position in the list for i in range(len(longest_ordered) - 1): j = i + 1 value = longest_ordered[i] - longest_ordered[j] data.append(value) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def palindrome_itertive(a):\n # TODO make this less crappy\n start = 0 \n end = len(a) - 1\n while start != end:\n # print(end)\n # print('start: ', start, ' a: ', a[start])\n # print('end: ', end, ' a: ', a[end])\n if not a[start] == a[end]:\n return False\n else:\n start += 1\n end -= 1\n return True", "def find_reverse_palindromes(dna: str, min_len: int=4, max_len: int=12, zero_based: bool=True):\n def helper_for_non_zero_based(indexes: List[Tuple[int, int]]):\n if not zero_based:\n return [(i + 1, l) for i, l in indexes]\n else:\n return indexes\n\n length = len(dna)\n result = []\n for i in range(length):\n for l in range(min(min_len, length - i), min(max_len + 1, length - i + 1)):\n if l > max_len or l < min_len:\n continue\n sub_dna = dna[i: i + l]\n if sub_dna == reverse_complement(sub_dna):\n result.append((i, l))\n return helper_for_non_zero_based(result)", "def palindrom():\r\n pal = []\r\n\r\n sub_str = gen_substring(\"abaabbaab\")\r\n\r\n for i in range(len(sub_str)):\r\n\r\n rev = reverse_string(sub_str[i])\r\n\r\n if rev == sub_str[i]:\r\n\r\n pal.append(rev)\r\n\r\n return pal", "def shortestPalindrome(self, string):\n\t\tif not string:\n\t\t\treturn ''\n\t\tright = 0\n\t\tcenter = 0\n\t\tdataString = string\n\t\tstring = self.interleave(string)\n\t\tdps = [0] * len(string)\n\t\t\n\t\tfor i in range(1, len(string)):\n\t\t\tmirror = 2*center - i\n\t\t\tif i + dps[mirror] < right:\n\t\t\t\tdps[i] = dps[mirror]\n\t\t\telse:\n\t\t\t\tcenter = i\n\t\t\t\tmirror = 2 * center - right - 1\n\t\t\t\tridx = right + 1\n\t\t\t\t# print (i, center, right, mirror)\n\t\t\t\twhile ridx < len(string):\n\t\t\t\t\tif mirror >= 0 and string[mirror] == string[ridx]:\n\t\t\t\t\t\tmirror -= 1\n\t\t\t\t\t\tridx += 1\n\t\t\t\t\telse :\n\t\t\t\t\t\tbreak\n\t\t\t\t# print (i, center, ridx, mirror)\n\t\t\t\tright = ridx - 1\n\t\t\t\tdps[i] = right - i\n\n\t\t# print (string)\n\t\tidx = len(dps) - 1\n\t\twhile idx > 0:\n\t\t\tif idx == dps[idx]:\n\t\t\t\tbreak\n\t\t\tidx -= 1\n\t\t# print (idx, 'idx')\n\t\treturn dataString[:idx - 1 - len(dataString): -1] + dataString", "def ascents(self):\n a = self.array_form\n pos = [i for i in xrange(len(a)-1) if a[i] < a[i+1]]\n return pos", "def compute_revoffset_pos(seq, pos):\n\n cnt = 0 \n for c in seq:\n if c in msa_characters:\n cnt += 1\n return pos - cnt", "def listPosition(word):\n if len(word) == 1: return 1\n pos = 0\n for c in set(word):\n if c < word[0]:\n letters = list(word)\n letters.remove(c)\n pos += arrangements(letters)\n pos += listPosition(word[1:])\n return pos", "def find_rpt_coords(self) -> (int, int):\n start_size = self.size\n end_size = self.size + len(self.allele)\n coord = self.coord\n fasta_alt = self.fasta_alt\n while self.allele == fasta_alt:\n coord += len(self.allele)\n start_size += len(self.allele)\n end_size += len(self.allele)\n fasta_alt = self.seq[start_size:end_size]\n new_start = coord - len(self.allele)\n new_end = new_start + len(self.allele) - 1\n return new_start, new_end", "def get_start(i,v):\n return i-v[i]-1", "def longest_palindromic_substring(s):\n longest = s[0] if len(s) > 0 else \"\"\n for i in range(len(s)):\n j = len(s)\n while s[i] in s[i+1:j] and j <= len(s):\n j = s[i + 1:j].rfind(s[i]) + i + 2\n print(i, j)\n if is_palindrome(s[i:j]) and len(longest) < len(s[i:j]):\n longest = s[i:j]\n j = len(s) + 1\n else:\n j -= 1\n if len(s) - len(longest) <= i:\n break\n return longest", "def search_palindromes(src_file, min_len):\n #Get digit source\n source = NumReader(src_file)\n #Old digits. Should always be length 100-200, unless there aren't enough digits.\n old_d = []\n #Current digit (possibly None)\n cur_d = source.read(1)[0]\n #Future digits. Should always be length 100-200, unless there aren't enough digits.\n next_d = source.read(100)\n #List of accumulated palindromes as strings\n pals = []\n\n #Keep running until out of digits\n while source.has_digits:\n #Look for palindrome centered at current digit\n branch_len = pal_length(old_d, next_d)\n cur_length = 1 + 2 * branch_len\n #If long enough, add to list\n if cur_length >= min_len:\n p = pal_str(cur_d, old_d[:branch_len])\n pals.append((p, source.digits_read - len(next_d)))\n\n #Look for \"even\" palindrome centered at current digit\n #Shift current digit into old buffer\n old_d.insert(0, cur_d)\n cur_d = None\n branch_len = pal_length(old_d, next_d)\n cur_length = 2 * branch_len\n #If long enough, add to list\n if cur_length >= min_len:\n p = pal_str(cur_d, old_d[:branch_len])\n pals.append((p, source.digits_read - len(next_d)))\n\n #Pull next digit\n cur_d = next_d.pop(0)\n\n #Maintain buffers\n if len(old_d) > 50:\n old_d = old_d[:50]\n if len(next_d) < 50:\n next_d += source.read(50)\n return pals", "def palindromePairs(self, words: List[str]) -> List[List[int]]:\n d = {w : i for i, w in enumerate(words)}\n \n res = []\n for idx, word in enumerate(words):\n for i in range(len(word)+1):\n str1 = word[:i]\n str2 = word[i:]\n # first part should be palindrome, second part (reverse) should be in w\n if str1 == str1[::-1]:\n back = str2[::-1]\n if back in d and back != word:\n res.append([d[str2[::-1]], idx])\n # second part should be palindrome, first part (reverse) should be in w\n if str2 and str2 == str2[::-1]: # if the last part is empty, it is calculated before \n back = str1[::-1]\n if back in d and back != word: \n res.append([idx, d[str1[::-1]]])\n # print(res)\n return res", "def predecessor_pair(basepair, start, stop):\n\tx , y = basepair\n\tif (x - 1 < start) or (y + 1 > stop):\n\t\treturn (-1,-1)\n\telse:\n\t\treturn ( x - 1 , y + 1 )", "def descents(self):\n a = self.array_form\n pos = [i for i in xrange(len(a)-1) if a[i] > a[i+1]]\n return pos", "def num_palindrome():\n nums = map(str, range(1000000))\n odo = []\n for i in range(len(nums)):\n if len(nums[i]) < 6:\n odo.append('0'*(6-len(nums[i])) + nums[i])\n elif len(nums[i]) == 6:\n odo.append(nums[i])\n \n for i in range(len(odo)-3): \n first = odo[i][2:] == odo[i][:1:-1]\n second = odo[i+1][1:] == odo[i+1][:0:-1]\n third = odo[i+2][1:5] == odo[i+2][4:0:-1]\n fourth = odo[i+3][:] == odo[i+3][::-1]\n if first & second & third & fourth:\n print 'A possible odometer reading is '+odo[i]", "def countPalindromicSubsequences(self, s: str) -> int:\n MOD = 10 ** 9 + 7\n \n def dp(i, j) -> (int, set):\n distinct = set()\n if i > j:\n return (0, distinct)\n if i == j:\n distinct.add(s[i])\n return (1, distinct)\n ret = 0\n for c in 'abcd':\n l = s.find(c, i, j)\n if l < 0:\n continue\n r = s.rfind(c, i, j)\n sub_ret, sub_set = dp(l, r)\n print(sub_ret, sub_set)\n # print(f'{c}-{sub_set}-{c}')\n ret += sub_ret + 1\n ret %= MOD\n distinct.union(sub_set)\n distinct.add(c)\n\n return ret, distinct\n return dp(0, len(s))[0]", "def reverse_difference():", "def rle(inarray):\n ia = np.asarray(inarray) # force numpy\n n = len(ia)\n if n == 0: \n return (None, None, None)\n else:\n y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)\n i = np.append(np.where(y), n - 1) # must include last element posi\n z = np.diff(np.append(-1, i)) # run lengths\n p = np.cumsum(np.append(0, z))[:-1] # positions\n return(z, p, ia[i])", "def palindromePairs(lst):\n results = []\n for i, e1 in enumerate(lst):\n for j, e2 in enumerate(lst):\n if i != j:\n if isPalindrome(e1+e2):\n results.append((i, j))\n return results", "def find_palindromes(self, start_file: str, result_file: str) -> list:\n input_words = self.read_file(start_file)\n result_words = []\n stack = ArrayStack()\n\n for word in input_words:\n for letter in word:\n stack.push(letter)\n\n reversed_line = ''\n\n while not stack.isEmpty():\n reversed_line += stack.pop()\n\n if word == reversed_line:\n result_words.append(word)\n\n if len(result_words) != 0:\n self.write_file(result_words, result_file)\n return result_words", "def seq_numbers_diff(start_seq: int, end_seq: int) -> int:\n if start_seq < 0 or end_seq < 0:\n return None\n if start_seq > end_seq:\n return end_seq + (SEQ_NUM_MOD_CONST - start_seq)\n else:\n return end_seq - start_seq", "def longestPalindromeSubseq(self, s: str) -> int:\n n = len(s)\n dp = [[1] * n for _ in range(n)]\n for length in range(1, n + 1):\n for i in range(n - length + 1):\n j = i + length - 1\n print(i, j)\n if length == 1:\n dp[i][j] = 1\n elif s[i] == s[j]:\n dp[i][j] = dp[i + 1][j - 1] + 2\n else:\n dp[i][j] = max(dp[i][j - 1], dp[i + 1][j])\n return dp[0][n - 1]", "def get_longest_palindrome(v,s):\n m,j = max( (x,i) for i,x in enumerate(v) )\n start = j//2 - m//2\n return s[start:start+m]", "def get_rel_pos(gRNA, min_anchor_length):\n if gRNA['cassette_label'] == 'Orphan':\n return -gRNA['gene_rel_start']\n\n if gRNA['strand'] == 'coding':\n rel_pos = gRNA['circle_start']-gRNA['forward_end']-gRNA['gene_rel_start']\n else:\n rel_pos = gRNA['reverse_start']-gRNA['gene_rel_start']-gRNA['circle_end']\n if rel_pos is pd.NA:\n rel_pos = 0\n\n if rel_pos < 0:\n # find position of first non-WC in pairing\n # If the resulting shortening of the alignment causes the anchor to be\n # less than the min anchor length, make rel_pos just past the non-WC bp\n match = mm_regex.search(gRNA['pairing'][::-1])\n mm_dist = match.start(0)\n if mm_dist + rel_pos < min_anchor_length:\n rel_pos = -(mm_dist+1)\n return rel_pos", "def longestPalindrome(self, s):\n if not s:\n return 0\n #init i and list\n i = 0\n singles = []\n while i < len(s):\n count_in_singles = singles.count(s[i])\n if count_in_singles > 0:\n singles.pop(singles.index(s[i]))\n else:\n singles.append(s[i])\n i += 1\n if len(singles) > 0:\n len_longest_palindrome = len(s) - len(singles) + 1\n else:\n len_longest_palindrome = len(s)\n return len_longest_palindrome", "def palindrome():\n c = 0\n d = ''\n e = 0\n f = 0\n g = 0\n for a in range(100, 1000):\n for b in range(100, 1000):\n c = a * b\n d = str(c)\n if d == d[::-1] and c > e:\n e = c\n f = a\n g = b\n return e", "def propagate(v,s):\n # if the palindrome at the current center expends until the\n # end of s, we have a certain length for all subpalindromes\n # to the right of this center\n suffix = is_suffix(v,s)\n\n # consider the length of the palindrome\n # centered at v's tail in order to further populate\n # v with what we can for sure predict\n l = v[-1]\n c = len(v)-1\n for j in reversed(range(c-l+1, c)):\n pre = is_prefix(c,j,v)\n if not pre or suffix: # we have a definite answer\n v.append(v[j])\n else: # pre and no suffix\n break\n # if it's a prefix, we can only give lower\n # bounds, but we'd have to check the palindrome's\n # length anyway, so we don't get much.", "def palindrome(x):\n pass", "def palindromes():\n for n in count(1):\n if str(n) == str(n)[::-1]:\n yield n", "def search_start_end_index_in_sentence(sent, np):\n\n nps = [x for x in np.split() if x]\n if len(nps) == 0:\n return (-1, -1)\n elif len(nps) == 1:\n indices = search_one_token_reducing_suffix(sent, np)\n if len(indices) > 0:\n return (indices[0], search_next_whitespace(sent, indices[0]))\n else:\n return (-1, -1)\n else:\n # search start:\n start = search_correct_position(sent, nps)\n end = search_correct_position(sent, nps, True)\n if end != -1:\n end = search_next_whitespace(sent, end)\n return (start,end)" ]
[ "0.6057061", "0.6030277", "0.59521663", "0.59443873", "0.5820138", "0.5816679", "0.5804056", "0.57669723", "0.5748433", "0.57475626", "0.5710381", "0.5705939", "0.5700604", "0.56801945", "0.5648704", "0.56265986", "0.5622517", "0.56057376", "0.5605318", "0.5587142", "0.5563741", "0.5561266", "0.55423135", "0.552865", "0.54640645", "0.5463761", "0.5452293", "0.5445537", "0.54226565", "0.5410516" ]
0.72034436
0
Function to clean up all umbigous bases in a sequence. Ambigous bases are bases that are not in the sequence alphabet, ie. 'ACGT' for DNA sequences.
def cleaning_ambiguous_bases(seq): # compile the regex with all ambiguous bases pat = re.compile(r'[NRYWXSKM]') # look for the ambiguous bases and replace by # nothing return re.sub(pat, '', seq)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sequence_cleaner(sequence, alphabet):\n seq = sequence.upper()\n sequence = [base for base in seq if base in alphabet]\n return ''.join(sequence)", "def check_and_clean_sequence(sequence, alphabet):\n if set(sequence).issubset(alphabet):\n return sequence\n else:\n return cleaning_ambiguous_bases(sequence)", "def count_umbiguous_bases(sequence):\n sequence = sequence.upper()\n amb = ['N', 'R', 'Y', 'W', 'S', 'K', 'M']\n return sum({base: sequence.count(base) for base in amb}.values())", "def back_translate(seq):\n\n base_nucleotide_list = []\n for i in seq:\n res = __get_key(i,CodonTable)\n base_nucleotide_list.append(res)\n return ''.join(base_nucleotide_list)", "def test_consistent_gap_degen_handling(self):\n # the degen character '?' can be a gap, so when we strip either gaps or\n # degen characters it should be gone too\n raw_seq = \"---??-??TC-GGCG-GCA-G-GC-?-C-TAN-GCGC-CCTC-AGGA?-???-??--\"\n raw_ungapped = re.sub(\"[-?]\", \"\", raw_seq)\n raw_no_ambigs = re.sub(\"[N?]+\", \"\", raw_seq)\n dna = self.DNA(raw_seq)\n self.assertEqual(dna.degap(), raw_ungapped)\n self.assertEqual(dna.strip_degenerate(), raw_no_ambigs)\n self.assertEqual(dna.strip_bad_and_gaps(), raw_ungapped)", "def test_preprocess_ambi_trunc(self):\r\n\r\n # Will truncate sequences at the first N character, remove one seq\r\n # due to being less than 30 nucleotides following truncation\r\n # counting the sequence length of the barcodes + primer\r\n\r\n fasta_files = [self.sample_fasta_ambi_chars_f]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_file\r\n barcode_type = \"golay_12\"\r\n min_seq_len = 30\r\n max_seq_len = 1000\r\n min_qual_score = 22\r\n starting_ix = 1\r\n keep_primer = False\r\n max_ambig = 0\r\n max_primer_mm = 0\r\n trim_seq_len = False\r\n dir_prefix = self.output_dir\r\n max_bc_errors = 2\r\n max_homopolymer = 4\r\n retain_unassigned_reads = False\r\n keep_barcode = False\r\n attempt_bc_correction = True\r\n qual_score_window = False\r\n disable_primer_check = False\r\n reverse_primers = 'disable'\r\n record_qual_scores = False\r\n discard_bad_windows = False\r\n median_length_filtering = None\r\n added_demultiplex_field = None\r\n reverse_primer_mismatches = 0\r\n truncate_ambi_bases = True\r\n\r\n preprocess(fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n reverse_primer_mismatches,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field,\r\n truncate_ambi_bases)\r\n\r\n output_seqs = open(dir_prefix + \"seqs.fna\", \"U\")\r\n output_log = open(dir_prefix + \"split_library_log.txt\", \"U\")\r\n output_histograms = open(dir_prefix + \"histograms.txt\", \"U\")\r\n\r\n actual_seqs = [line for line in output_seqs]\r\n actual_log = [line for line in output_log]\r\n actual_histograms = [line for line in output_histograms]\r\n\r\n expected_seqs = [\r\n '>s1_1 a orig_bc=ACACATGTCTAC new_bc=ACACATGTCTAC bc_diffs=0\\n',\r\n 'CCCTTATATATAT\\n',\r\n '>s3_2 c orig_bc=AACTGTGCGTAC new_bc=AACTGTGCGTAC bc_diffs=0\\n',\r\n 'AACCGGCCGGTT\\n',\r\n '>s1_3 d orig_bc=ACTCATGTCTAC new_bc=ACACATGTCTAC bc_diffs=1\\n',\r\n 'CCCTTACTACCGA\\n']\r\n expected_log = [\r\n 'Number raw input seqs\\t6\\n',\r\n '\\n',\r\n 'Length outside bounds of 30 and 1000\\t2\\n',\r\n 'Missing Qual Score\\t0\\n',\r\n 'Mean qual score below minimum of 22\\t0\\n',\r\n 'Max homopolymer run exceeds limit of 4\\t0\\n',\r\n 'Num mismatches in primer exceeds limit of 0: 1\\n',\r\n '\\n',\r\n 'Truncation at first ambiguous \"N\" character enabled.\\n',\r\n 'Sequences discarded after truncation due to sequence length below the minimum 30: 0\\n',\r\n '\\n',\r\n 'Sequence length details for all sequences passing quality filters:\\n',\r\n 'Raw len min/max/avg\\t32.0/39.0/35.7\\n',\r\n 'Wrote len min/max/avg\\t12.0/13.0/12.7\\n',\r\n '\\n',\r\n 'Barcodes corrected/not\\t1/0\\n',\r\n 'Uncorrected barcodes will not be written to the output fasta file.\\n',\r\n 'Corrected barcodes will be written with the appropriate barcode category.\\n',\r\n 'Corrected but unassigned sequences will not be written unless --retain_unassigned_reads is enabled.\\n',\r\n '\\n',\r\n 'Total valid barcodes that are not in mapping file\\t0\\n',\r\n 'Sequences associated with valid barcodes that are not in the mapping file will not be written.\\n',\r\n '\\n',\r\n 'Barcodes in mapping file\\n',\r\n 'Num Samples\\t2\\n',\r\n 'Sample ct min/max/mean: 1 / 2 / 1.50\\n',\r\n 'Sample\\tSequence Count\\tBarcode\\n',\r\n 's1\\t2\\tACACATGTCTAC\\n',\r\n 's3\\t1\\tAACTGTGCGTAC\\n',\r\n 's2\\t0\\tAGAGTCCTGAGC\\n',\r\n '\\n',\r\n 'Total number seqs written\\t3']\r\n expected_histograms = [\r\n '# bins raw sequence lengths, length of sequences that pass quality filters before processing, and lengths of sequences that pass quality filters post processing.\\n',\r\n 'Length\\tRaw\\tBefore\\tAfter\\n',\r\n '10\\t0\\t0\\t3\\n',\r\n '20\\t2\\t0\\t0\\n',\r\n '30\\t4\\t3\\t0']\r\n\r\n self.assertEqual(actual_seqs, expected_seqs)\r\n self.assertEqual(actual_log, expected_log)\r\n self.assertEqual(actual_histograms, expected_histograms)", "def ungapped(self):\n s = self.sequence\n for sGapChar in GAP_CHARACTERS:\n s = s.replace(sGapChar, '')\n return s", "def cleaning_sequence_regex(sequence):\n amb = re.compile(r\"[^ACGT]\")\n return amb.sub(\"\", sequence)", "def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n self.assertEqual(\n self.RNA(\"UxxCAGwsnyrHBNz#!D-D\", check=False).strip_bad_and_gaps(),\n \"UCAGWSNYRHBNDD\",\n )\n self.assertEqual(\n self.RNA(\"@#^*($@!#&()!@QZX\", check=False).strip_bad_and_gaps(), \"\"\n )\n self.assertEqual(\n self.RNA(\"aaa ggg ---!ccc\", check=False).strip_bad_and_gaps(), \"AAAGGGCCC\"\n )", "def complement_base(base):\n\n if base == 'A' or base == 'a':\n return 'T'\n elif base == 'T' or base == 't':\n return 'A'\n elif base == 'G' or base == 'g':\n return 'C'\n else:\n return 'G'", "def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n r = self.RNA(\"ACG--GRN?\")\n self.assertEqual(r.strip_bad_and_gaps(), \"ACGGRN\")\n r._data[0] = 99\n self.assertEqual(r.strip_bad_and_gaps(), \"CGGRN\")", "def strip_barcodes(input_file, wanted_set):\n file_name = os.path.splitext(os.path.basename(input_file))[0]\n with open(file_name + \"_adapters_removed.fasta\", \"w\") as out:\n for record in SeqIO.parse(input_file, \"fasta\"):\n match = re.search(r'\\S*:', record.id)\n if match:\n correct = match.group().rstrip(\":\")\n else:\n correct = str(record.id)\n SEQ = str(record.seq)\n if correct in wanted_set:\n out.write(\">\" + correct + \"\\n\" + SEQ + \"\\n\")", "def count_all_bases(sequence):\n # create a set of bases\n bases = set(sequence)\n all_bases = defaultdict(int)\n # iterates in the base set\n for base in bases:\n # count the bases in the sequence\n all_bases[base] = sequence.count(base)\n return all_bases", "def cleanup(self):\n for residue in self.debumper.biomolecule.residues:\n if not isinstance(residue, aa.Amino):\n continue\n if residue.name == \"GLH\" or \"GLH\" in residue.patches:\n if residue.has_atom(\"HE1\") and residue.has_atom(\"HE2\"):\n residue.remove_atom(\"HE1\")\n elif residue.name == \"ASH\" or \"ASH\" in residue.patches:\n if residue.has_atom(\"HD1\") and residue.has_atom(\"HD2\"):\n residue.remove_atom(\"HD1\")", "def reverse_complement(base):\n try:\n assert isinstance(base, str)\n assert len(base) is 1\n rc = str.maketrans('ACGT', 'TGCA') # Traslation table for reverse complentary sequences\n return base.translate(rc)\n except AssertionError:\n raise NotABaseError", "def complement_base(base, material='DNA'):\n\n if base == 'A' or base == 'a':\n if material == 'DNA':\n return 'T'\n elif material == 'RNA':\n return 'U'\n elif base == 'T' or base == 't' or base == 'U' or base == 'u':\n return 'A'\n elif base == 'G' or base == 'g':\n return 'C'\n else:\n return 'G'", "def complement_base(base,material='DNA'):\n if base in 'Aa':\n if material == 'DNA':\n return 'T'\n elif material == 'RNA':\n return 'U'\n elif base in 'TtUu':\n return 'A'\n elif base in 'Gg':\n return 'C'\n else:\n return 'G'", "def test_trim_fasta(self):\r\n expected = [\"\"\">HWUSI-EAS552R_0357:8:1:10040:6364#0/1\r\nGACGAG\r\n\"\"\",\r\n \"\"\">HWUSI-EAS552R_0357:8:1:10184:6365#0/1\r\nGTCTGA\r\n\"\"\"]\r\n\r\n self.assertEqual(list(trim_fasta(self.fasta_barcodes, 6)), expected)", "def remove_guff(seqs):\n new_seqs = {}\n stop_codons = [\"TGA\", \"TAA\", \"TAG\"]\n for key, value in seqs.items():\n new_seq = \"\"\n for i in range(len(value)-2):\n if value[i:i+3] == \"ATG\":\n break\n\n for j in range(i, len(value)-2, 3):\n if value[j:j+3] in stop_codons:\n new_seqs[key] = value[i:j+3]\n break\n\n return new_seqs", "def reverse_rna_complement(seq):\n\n seq_upper = seq.isupper()\n\n seq = seq[::-1]\n\n seq = seq.upper()\n\n #compute complement\n seq = seq.replace('A','u')\n seq = seq.replace('T','a')\n seq = seq.replace('G','c')\n seq = seq.replace('C','g')\n\n if seq_upper:\n return seq.upper()\n else:\n return seq", "def prepare_fasta_for_blastclust(in_fasta, out_fasta):\n with open(out_fasta, 'w') as out:\n i = 0\n for seq_record in SeqIO.parse(in_fasta, \"fasta\"):\n if len(seq_record.seq) > 5 and 'XXXXX' not in seq_record.seq and 'UUUUU' not in seq_record.seq:\n out.write(\n '>' + seq_record.id.split('|')[0] + '_' + str(i) + '\\n' + str(seq_record.seq) + '\\n')\n i += 1", "def generatebasepairs(self, x):\n currentbases = \"\"\n for u, v in zip(x, range(len(x))):\n if u == 0:\n currentbases += '_'\n else:\n currentbases += self.sequences[v][u-1]\n\n return currentbases", "def base_codes(self):\n bases = []\n\n if self.is_gas_giant:\n bases.append(\"G\")\n if self.is_naval_base:\n bases.append(\"N\")\n if self.is_scout_base:\n bases.append(\"S\")\n if self.is_research_base:\n bases.append(\"R\")\n if self.is_tas:\n bases.append(\"T\")\n if self.is_consulate:\n bases.append(\"I\")\n if self.is_pirate_base:\n bases.append(\"P\")\n\n return \" \".join(bases)", "def back_translate(self):\n base = Bio.Alphabet._get_base_alphabet(self.alphabet)\n if not isinstance(base, Bio.Alphabet.ProteinAlphabet):\n raise ValueError(\"Nucleic acids cannot be back translated!\")\n\n # right now this just uses the most-prevalent codon for each AA\n # TODO: select codons with a weighted average using random.choice\n return Seq(\n \"\".join([CodonUsage.SynonymousCodons[seq3(AA).upper()][0] for AA in str(self)]),\n IUPAC.unambiguous_dna,\n )", "def _reduced_alphabet(cls):\n base_alphabet = frozenset(cls.ALPHABET)\n # 1 = l, L, I\n # 2 = Z; 5 = S; 8 = B; 0 = O\n numbers = frozenset('12580')\n # also remove the letters that could be confused for numbers.\n letters_numbers = frozenset('lL I Zz S B O')\n # these may all sound like \"ee\"\n ee_letters = frozenset('bcdegptvz BCDEGPTVZ')\n # these may sound like \"a\"\n aa_letters = frozenset('ajk AJK')\n # these sound like \"m\"\n m_letters = frozenset('mn MN')\n\n alphabet = (base_alphabet - letters_numbers - numbers -\n ee_letters - aa_letters - m_letters)\n logger.debug(\"available alphabet is: {0!r}\".format(alphabet))\n return ''.join(sorted(alphabet))", "def degenerate2(s):\n from lasagna.utils import base_repr\n\n n = s.count('N')\n seed = hash(s) % (2**32 - 1)\n rng = random.Random(seed)\n random_base_ix = lambda: base_repr(rng.randint(0, 4**(n + 1) - 1), 4, n + 1)[::-1]\n while True:\n bases = ['ACTG'[int(j)] for j in random_base_ix()]\n s2 = s\n for b in bases:\n s2 = s2.replace('N', b, 1)\n yield s2", "def fasta2bases(fastafn, ref, start, end, strands=\"+-\", n=3):\n fasta = pysam.FastaFile(fastafn)\n ref2len = {r: l for r, l in zip(fasta.references, fasta.lengths)}\n if ref not in ref2len: #fasta.references:\n raise StopIteration\n for pos, refbase in enumerate(fasta.fetch(ref, start, end), start+1):\n refbase = refbase.upper()\n # combine before start NNN (if needed) sequence from ref and after start NNN (if needed)\n mer = \"N\"*(n-pos+1) + \"\".join(fasta.fetch(ref, pos-n-1 if pos>n+1 else 0, pos+n)) + \"N\"*(pos-ref2len[ref]+n)\n mer = mer.upper() # need to be upper case\n for si, strand in enumerate(strands):\n if si:\n refbase = base2complement[refbase]\n mer = get_revcomp(mer)\n yield pos, si, strand, refbase, mer", "def test_strip_degenerate(self):\n self.assertEqual(self.RNA(\"UCAG-\").strip_degenerate(), \"UCAG-\")\n self.assertEqual(self.RNA(\"NRYSW\").strip_degenerate(), \"\")\n self.assertEqual(self.RNA(\"USNG\").strip_degenerate(), \"UG\")", "def autodoc_process_bases(app, name, obj, options, bases):\n # Determine the bases to be removed\n remove_bases = []\n for base in bases:\n if base.__name__[0] == \"_\" or \"Mixin\" in base.__name__:\n remove_bases.append(base)\n\n # Remove from the bases list in-place\n for base in remove_bases:\n bases.remove(base)", "def normalize_alleles_by_strand(snp_string):\n # get alleles as tuple\n allele1, allele2 = extract_alleles_from_snp_string(snp_string)\n # get reverse compliment of bases and return\n return REVERSE_COMPLIMENT[allele1], REVERSE_COMPLIMENT[allele2]" ]
[ "0.61357856", "0.60041034", "0.5838772", "0.55369747", "0.5463635", "0.5433621", "0.534109", "0.5335156", "0.5331861", "0.5317939", "0.52726936", "0.52646846", "0.5252673", "0.51944333", "0.5191811", "0.51877165", "0.5157462", "0.5120989", "0.5086719", "0.50287366", "0.5014518", "0.5011419", "0.49926537", "0.4963334", "0.4963305", "0.4962518", "0.4954538", "0.49176693", "0.49012512", "0.48975646" ]
0.72724897
0
Function to check and clean up all umbigous bases in a sequence. Ambigous bases are bases that are not in the sequence alphabet, ie. 'ACGT' for DNA sequences.
def check_and_clean_sequence(sequence, alphabet): if set(sequence).issubset(alphabet): return sequence else: return cleaning_ambiguous_bases(sequence)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleaning_ambiguous_bases(seq):\n # compile the regex with all ambiguous bases\n pat = re.compile(r'[NRYWXSKM]')\n # look for the ambiguous bases and replace by\n # nothing\n return re.sub(pat, '', seq)", "def count_umbiguous_bases(sequence):\n sequence = sequence.upper()\n amb = ['N', 'R', 'Y', 'W', 'S', 'K', 'M']\n return sum({base: sequence.count(base) for base in amb}.values())", "def sequence_cleaner(sequence, alphabet):\n seq = sequence.upper()\n sequence = [base for base in seq if base in alphabet]\n return ''.join(sequence)", "def count_all_bases(sequence):\n # create a set of bases\n bases = set(sequence)\n all_bases = defaultdict(int)\n # iterates in the base set\n for base in bases:\n # count the bases in the sequence\n all_bases[base] = sequence.count(base)\n return all_bases", "def test_consistent_gap_degen_handling(self):\n # the degen character '?' can be a gap, so when we strip either gaps or\n # degen characters it should be gone too\n raw_seq = \"---??-??TC-GGCG-GCA-G-GC-?-C-TAN-GCGC-CCTC-AGGA?-???-??--\"\n raw_ungapped = re.sub(\"[-?]\", \"\", raw_seq)\n raw_no_ambigs = re.sub(\"[N?]+\", \"\", raw_seq)\n dna = self.DNA(raw_seq)\n self.assertEqual(dna.degap(), raw_ungapped)\n self.assertEqual(dna.strip_degenerate(), raw_no_ambigs)\n self.assertEqual(dna.strip_bad_and_gaps(), raw_ungapped)", "def test_preprocess_ambi_trunc(self):\r\n\r\n # Will truncate sequences at the first N character, remove one seq\r\n # due to being less than 30 nucleotides following truncation\r\n # counting the sequence length of the barcodes + primer\r\n\r\n fasta_files = [self.sample_fasta_ambi_chars_f]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_file\r\n barcode_type = \"golay_12\"\r\n min_seq_len = 30\r\n max_seq_len = 1000\r\n min_qual_score = 22\r\n starting_ix = 1\r\n keep_primer = False\r\n max_ambig = 0\r\n max_primer_mm = 0\r\n trim_seq_len = False\r\n dir_prefix = self.output_dir\r\n max_bc_errors = 2\r\n max_homopolymer = 4\r\n retain_unassigned_reads = False\r\n keep_barcode = False\r\n attempt_bc_correction = True\r\n qual_score_window = False\r\n disable_primer_check = False\r\n reverse_primers = 'disable'\r\n record_qual_scores = False\r\n discard_bad_windows = False\r\n median_length_filtering = None\r\n added_demultiplex_field = None\r\n reverse_primer_mismatches = 0\r\n truncate_ambi_bases = True\r\n\r\n preprocess(fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n reverse_primer_mismatches,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field,\r\n truncate_ambi_bases)\r\n\r\n output_seqs = open(dir_prefix + \"seqs.fna\", \"U\")\r\n output_log = open(dir_prefix + \"split_library_log.txt\", \"U\")\r\n output_histograms = open(dir_prefix + \"histograms.txt\", \"U\")\r\n\r\n actual_seqs = [line for line in output_seqs]\r\n actual_log = [line for line in output_log]\r\n actual_histograms = [line for line in output_histograms]\r\n\r\n expected_seqs = [\r\n '>s1_1 a orig_bc=ACACATGTCTAC new_bc=ACACATGTCTAC bc_diffs=0\\n',\r\n 'CCCTTATATATAT\\n',\r\n '>s3_2 c orig_bc=AACTGTGCGTAC new_bc=AACTGTGCGTAC bc_diffs=0\\n',\r\n 'AACCGGCCGGTT\\n',\r\n '>s1_3 d orig_bc=ACTCATGTCTAC new_bc=ACACATGTCTAC bc_diffs=1\\n',\r\n 'CCCTTACTACCGA\\n']\r\n expected_log = [\r\n 'Number raw input seqs\\t6\\n',\r\n '\\n',\r\n 'Length outside bounds of 30 and 1000\\t2\\n',\r\n 'Missing Qual Score\\t0\\n',\r\n 'Mean qual score below minimum of 22\\t0\\n',\r\n 'Max homopolymer run exceeds limit of 4\\t0\\n',\r\n 'Num mismatches in primer exceeds limit of 0: 1\\n',\r\n '\\n',\r\n 'Truncation at first ambiguous \"N\" character enabled.\\n',\r\n 'Sequences discarded after truncation due to sequence length below the minimum 30: 0\\n',\r\n '\\n',\r\n 'Sequence length details for all sequences passing quality filters:\\n',\r\n 'Raw len min/max/avg\\t32.0/39.0/35.7\\n',\r\n 'Wrote len min/max/avg\\t12.0/13.0/12.7\\n',\r\n '\\n',\r\n 'Barcodes corrected/not\\t1/0\\n',\r\n 'Uncorrected barcodes will not be written to the output fasta file.\\n',\r\n 'Corrected barcodes will be written with the appropriate barcode category.\\n',\r\n 'Corrected but unassigned sequences will not be written unless --retain_unassigned_reads is enabled.\\n',\r\n '\\n',\r\n 'Total valid barcodes that are not in mapping file\\t0\\n',\r\n 'Sequences associated with valid barcodes that are not in the mapping file will not be written.\\n',\r\n '\\n',\r\n 'Barcodes in mapping file\\n',\r\n 'Num Samples\\t2\\n',\r\n 'Sample ct min/max/mean: 1 / 2 / 1.50\\n',\r\n 'Sample\\tSequence Count\\tBarcode\\n',\r\n 's1\\t2\\tACACATGTCTAC\\n',\r\n 's3\\t1\\tAACTGTGCGTAC\\n',\r\n 's2\\t0\\tAGAGTCCTGAGC\\n',\r\n '\\n',\r\n 'Total number seqs written\\t3']\r\n expected_histograms = [\r\n '# bins raw sequence lengths, length of sequences that pass quality filters before processing, and lengths of sequences that pass quality filters post processing.\\n',\r\n 'Length\\tRaw\\tBefore\\tAfter\\n',\r\n '10\\t0\\t0\\t3\\n',\r\n '20\\t2\\t0\\t0\\n',\r\n '30\\t4\\t3\\t0']\r\n\r\n self.assertEqual(actual_seqs, expected_seqs)\r\n self.assertEqual(actual_log, expected_log)\r\n self.assertEqual(actual_histograms, expected_histograms)", "def test_check_dna_chars_bcs(self):\r\n\r\n header =\\\r\n ['SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AARNCWSVDAA', 's1&data'],\r\n ['s2', 'CGTA', 'AAA1A', 's2_data']]\r\n errors = []\r\n\r\n errors = check_dna_chars_bcs(header, mapping_data, errors)\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should find no errors\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'ReversePrimer', 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AARNCWSVDAA', 'ACGT', 's1&data'],\r\n ['s2', 'C1GTA', 'AAA1A', 'ACGTF', 's2_data']]\r\n errors = []\r\n\r\n errors = check_dna_chars_bcs(header, mapping_data, errors,\r\n has_barcodes=False)\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should find errors with has_barcodes=True\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'ReversePrimer', 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AARNCWSVDAA', 'ACGT', 's1&data'],\r\n ['s2', 'CNGTA', 'AAA1A', 'ACGTF', 's2_data']]\r\n errors = []\r\n\r\n errors = check_dna_chars_bcs(header, mapping_data, errors,\r\n has_barcodes=True)\r\n\r\n expected_errors = ['Invalid DNA sequence detected: CNGTA\\t2,1']\r\n\r\n self.assertEqual(errors, expected_errors)", "def count_ambig(curr_seq, valid_chars='ATCG'):\r\n up_seq = curr_seq.upper()\r\n total = 0\r\n for vchar in valid_chars:\r\n total += up_seq.count(vchar)\r\n return len(curr_seq) - total", "def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n self.assertEqual(\n self.RNA(\"UxxCAGwsnyrHBNz#!D-D\", check=False).strip_bad_and_gaps(),\n \"UCAGWSNYRHBNDD\",\n )\n self.assertEqual(\n self.RNA(\"@#^*($@!#&()!@QZX\", check=False).strip_bad_and_gaps(), \"\"\n )\n self.assertEqual(\n self.RNA(\"aaa ggg ---!ccc\", check=False).strip_bad_and_gaps(), \"AAAGGGCCC\"\n )", "def test_is_gapped(self):\n assert not self.RNA(\"\").is_gapped()\n assert not self.RNA(\"ACGUCAGUACGUCAGNRCGAUcaguaguacYRNRYRN\").is_gapped()\n assert self.RNA(\"-\").is_gapped()\n assert self.PROT(\"--\").is_gapped()\n assert self.RNA(\"CAGUCGUACGUCAGUACGUacucauacgac-caguACUG\").is_gapped()\n assert self.RNA(\"CA--CGUAUGCA-----g\").is_gapped()\n assert self.RNA(\"CAGU-\").is_gapped()", "def check_random_bc(seq):\n if seq.startswith('TGATC'):\n return seq[5:]\n else:\n return seq[:16]", "def test_attempt_bc_correction_no_barcode(self):\r\n\r\n curr_bc = \"\"\r\n all_bcs = [\"\"]\r\n barcode_type = 0\r\n actual_bc, actual_errs = attempt_bc_correction(curr_bc,\r\n all_bcs, barcode_type)\r\n\r\n expected_bc = \"\"\r\n expected_errs = 0\r\n self.assertEqual(actual_bc, expected_bc)\r\n self.assertEqual(actual_errs, expected_errs)", "def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n r = self.RNA(\"ACG--GRN?\")\n self.assertEqual(r.strip_bad_and_gaps(), \"ACGGRN\")\n r._data[0] = 99\n self.assertEqual(r.strip_bad_and_gaps(), \"CGGRN\")", "def test_attempt_bc_correction_hamming8(self):\r\n\r\n curr_bc = \"AGCAGCAC\"\r\n all_bcs = [\"AGCAGAAC\", \"TGCAGTAC\", \"ACAGAGTC\"]\r\n barcode_type = \"hamming_8\"\r\n actual_bc, actual_errs = attempt_bc_correction(curr_bc,\r\n all_bcs, barcode_type)\r\n\r\n expected_bc = \"AGCAGAAC\"\r\n expected_errs = 0.5\r\n self.assertEqual(actual_bc, expected_bc)\r\n self.assertEqual(actual_errs, expected_errs)", "def test_attempt_bc_correction_generic(self):\r\n\r\n curr_bc = \"GGCAGCACTA\"\r\n all_bcs = [\"AACTCGTCGA\", \"AGCAGCACTT\", \"ACAGAGTCGG\"]\r\n barcode_type = 10\r\n actual_bc, actual_errs = attempt_bc_correction(curr_bc,\r\n all_bcs, barcode_type)\r\n\r\n expected_bc = \"AGCAGCACTT\"\r\n expected_errs = 2\r\n self.assertEqual(actual_bc, expected_bc)\r\n self.assertEqual(actual_errs, expected_errs)", "def check_dna_chars_bcs(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True):\r\n\r\n valid_dna_chars = DNASequence.iupac_standard_characters()\r\n # Detect fields directly, in case user does not have fields in proper\r\n # order in the mapping file (this will generate error separately)\r\n header_fields_to_check = []\r\n if has_barcodes:\r\n header_fields_to_check.append(\"BarcodeSequence\")\r\n\r\n check_indices = []\r\n\r\n for curr_field in range(len(header)):\r\n if header[curr_field] in header_fields_to_check:\r\n check_indices.append(curr_field)\r\n\r\n # Correction factor for header being the first line\r\n correction_ix = 1\r\n # Check for missing data\r\n for curr_data in range(len(mapping_data)):\r\n for curr_ix in check_indices:\r\n if len(mapping_data[curr_data][curr_ix]) == 0:\r\n errors.append(\"Missing expected DNA sequence\\t%d,%d\" %\r\n (curr_data + correction_ix, curr_ix))\r\n continue\r\n for curr_nt in mapping_data[curr_data][curr_ix]:\r\n if curr_nt not in valid_dna_chars:\r\n errors.append(\"Invalid DNA sequence detected: %s\\t%d,%d\" %\r\n (mapping_data[curr_data][curr_ix],\r\n curr_data + correction_ix, curr_ix))\r\n continue\r\n\r\n return errors", "def complement_base(base):\n\n if base == 'A' or base == 'a':\n return 'T'\n elif base == 'T' or base == 't':\n return 'A'\n elif base == 'G' or base == 'g':\n return 'C'\n else:\n return 'G'", "def checkBC(bc):\n if isinstance(bc, string_types):\n bc = [bc, bc]\n assert isinstance(bc, list), 'bc must be a list'\n assert len(bc) == 2, 'bc must have two elements'\n\n for bc_i in bc:\n assert isinstance(bc_i, string_types), \"each bc must be a string\"\n assert bc_i in ['dirichlet', 'neumann'], (\"each bc must be either,\"\n \"'dirichlet' or 'neumann'\")\n return bc", "def test_preprocess_bad_chars_in_mapping(self):\r\n\r\n # Should discard all reads due to sequence length being too short\r\n # But should not halt due to bad characters in a data field\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_bad_char_datafield_f\r\n barcode_type = \"golay_12\"\r\n min_seq_len = 200\r\n max_seq_len = 1000\r\n min_qual_score = 25\r\n starting_ix = 1\r\n keep_primer = False\r\n max_ambig = 0\r\n max_primer_mm = 1\r\n trim_seq_len = True\r\n dir_prefix = self.output_dir\r\n max_bc_errors = 2\r\n max_homopolymer = 4\r\n retain_unassigned_reads = False\r\n keep_barcode = False\r\n attempt_bc_correction = True\r\n qual_score_window = 0\r\n disable_primer_check = False\r\n reverse_primers = 'disable'\r\n record_qual_scores = False\r\n discard_bad_windows = False\r\n median_length_filtering = None\r\n added_demultiplex_field = None\r\n\r\n preprocess(fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field)\r\n\r\n output_seqs = open(dir_prefix + \"seqs.fna\", \"U\")\r\n output_log = open(dir_prefix + \"split_library_log.txt\", \"U\")\r\n output_histograms = open(dir_prefix + \"histograms.txt\", \"U\")\r\n\r\n actual_seqs = [line for line in output_seqs]\r\n actual_log = [line for line in output_log]\r\n actual_histograms = [line for line in output_histograms]\r\n\r\n expected_seqs = []\r\n expected_log = [\r\n 'Number raw input seqs\\t6\\n',\r\n '\\n',\r\n 'Length outside bounds of 200 and 1000\\t6\\n',\r\n 'Num ambiguous bases exceeds limit of 0\\t0\\n',\r\n 'Missing Qual Score\\t0\\n',\r\n 'Mean qual score below minimum of 25\\t0\\n',\r\n 'Max homopolymer run exceeds limit of 4\\t0\\n',\r\n 'Num mismatches in primer exceeds limit of 1: 0\\n',\r\n '\\n',\r\n 'Sequence length details for all sequences passing quality filters:\\n',\r\n 'No sequences passed quality filters for writing.\\n',\r\n '\\n',\r\n 'Barcodes corrected/not\\t0/0\\n',\r\n 'Uncorrected barcodes will not be written to the output fasta file.\\n',\r\n 'Corrected barcodes will be written with the appropriate barcode category.\\n',\r\n 'Corrected but unassigned sequences will not be written unless --retain_unassigned_reads is enabled.\\n',\r\n '\\n',\r\n 'Total valid barcodes that are not in mapping file\\t0\\n',\r\n 'Sequences associated with valid barcodes that are not in the mapping file will not be written.\\n',\r\n '\\n',\r\n 'Barcodes in mapping file\\n',\r\n 'Sample\\tSequence Count\\tBarcode\\n',\r\n 's2\\t0\\tAGAGTCCTGAGC\\n',\r\n 's1\\t0\\tACACATGTCTAC\\n',\r\n 's3\\t0\\tAACTGTGCGTAC\\n',\r\n '\\n',\r\n 'Total number seqs written\\t0']\r\n expected_histograms = [\r\n '# bins raw sequence lengths, length of sequences that pass quality filters before processing, and lengths of sequences that pass quality filters post processing.\\n',\r\n 'Length\\tRaw\\tBefore\\tAfter\\n',\r\n '20\\t2\\t0\\t0\\n',\r\n '30\\t4\\t0\\t0']\r\n\r\n self.assertEqual(actual_seqs, expected_seqs)\r\n self.assertEqual(actual_log, expected_log)\r\n self.assertEqual(actual_histograms, expected_histograms)\r\n\r\n '''# With invalid character in a SampleID, should raise ValueError\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_bad_char_sampleid_f\r\n barcode_type=\"golay_12\"\r\n min_seq_len=200\r\n max_seq_len=1000\r\n min_qual_score=25\r\n starting_ix=1\r\n keep_primer=False\r\n max_ambig=0\r\n max_primer_mm=1\r\n trim_seq_len=True\r\n dir_prefix=self.output_dir\r\n max_bc_errors=2\r\n max_homopolymer=4\r\n retain_unassigned_reads=False\r\n keep_barcode=False\r\n attempt_bc_correction=True\r\n qual_score_window=0\r\n disable_primer_check=False\r\n reverse_primers='disable'\r\n record_qual_scores=False\r\n discard_bad_windows=False\r\n median_length_filtering=None\r\n added_demultiplex_field=None\r\n\r\n\r\n self.assertRaises(ValueError, preprocess, fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field)'''", "def test_check_seqs_fixed_len_bc(self):\r\n\r\n # Third test, fixed length barcodes, fixed length primers + one\r\n # degenerate test. Should correct one of the passed barcodes\r\n in_seqs = self.in_seqs_fixed_len_bc1\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_fasta_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=0,\r\n disable_primer_check=False,\r\n reverse_primers='disable',\r\n rev_primers={},\r\n qual_out=False)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)\r\n\r\n # Fourth test-set max_bc_errors to 0, and allow some primer mismatches\r\n in_seqs = self.in_seqs_fixed_len_bc2\r\n bc_map = self.bc_map_fixed_len_bc2\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc2\r\n all_primers = self.all_primers_fixed_len_bc2\r\n expected = self.expected_fasta_fixed_len_bc2\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=0.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=1,\r\n disable_primer_check=False,\r\n reverse_primers='disable',\r\n rev_primers={},\r\n qual_out=False)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)", "def guess_seq(seq):\n dna = \"ACTG-N\"\n \n chars = util.unique(seq.upper())\n \n for char in chars:\n if char not in dna:\n return \"pep\"\n return \"dna\"", "def complement_base(base, material='DNA'):\n\n if base == 'A' or base == 'a':\n if material == 'DNA':\n return 'T'\n elif material == 'RNA':\n return 'U'\n elif base == 'T' or base == 't' or base == 'U' or base == 'u':\n return 'A'\n elif base == 'G' or base == 'g':\n return 'C'\n else:\n return 'G'", "def back_translate(seq):\n\n base_nucleotide_list = []\n for i in seq:\n res = __get_key(i,CodonTable)\n base_nucleotide_list.append(res)\n return ''.join(base_nucleotide_list)", "def test_trim_fasta(self):\r\n expected = [\"\"\">HWUSI-EAS552R_0357:8:1:10040:6364#0/1\r\nGACGAG\r\n\"\"\",\r\n \"\"\">HWUSI-EAS552R_0357:8:1:10184:6365#0/1\r\nGTCTGA\r\n\"\"\"]\r\n\r\n self.assertEqual(list(trim_fasta(self.fasta_barcodes, 6)), expected)", "def test_to_from_iupac(self, molecule):\n from openforcefield.utils.toolkits import UndefinedStereochemistryError\n\n # All the molecules that raise UndefinedStereochemistryError in Molecule.from_iupac()\n # (This is a larger list than the normal group of undefined stereo mols, probably has\n # something to do with IUPAC information content)\n iupac_problem_mols = {\n \"DrugBank_977\",\n \"DrugBank_1634\",\n \"DrugBank_1700\",\n \"DrugBank_1962\",\n \"DrugBank_2148\",\n \"DrugBank_2178\",\n \"DrugBank_2186\",\n \"DrugBank_2208\",\n \"DrugBank_2519\",\n \"DrugBank_2538\",\n \"DrugBank_2592\",\n \"DrugBank_2651\",\n \"DrugBank_2987\",\n \"DrugBank_3332\",\n \"DrugBank_3502\",\n \"DrugBank_3622\",\n \"DrugBank_3726\",\n \"DrugBank_3844\",\n \"DrugBank_3930\",\n \"DrugBank_4161\",\n \"DrugBank_4162\",\n \"DrugBank_4778\",\n \"DrugBank_4593\",\n \"DrugBank_4959\",\n \"DrugBank_5043\",\n \"DrugBank_5076\",\n \"DrugBank_5176\",\n \"DrugBank_5418\",\n \"DrugBank_5737\",\n \"DrugBank_5902\",\n \"DrugBank_6295\",\n \"DrugBank_6304\",\n \"DrugBank_6305\",\n \"DrugBank_6329\",\n \"DrugBank_6355\",\n \"DrugBank_6401\",\n \"DrugBank_6509\",\n \"DrugBank_6531\",\n \"DrugBank_6647\",\n # These test cases are allowed to fail.\n \"DrugBank_390\",\n \"DrugBank_810\",\n \"DrugBank_4316\",\n \"DrugBank_4346\",\n \"DrugBank_7124\",\n }\n undefined_stereo = molecule.name in iupac_problem_mols\n\n iupac = molecule.to_iupac()\n\n if undefined_stereo:\n with pytest.raises(UndefinedStereochemistryError):\n Molecule.from_iupac(iupac)\n\n molecule_copy = Molecule.from_iupac(\n iupac, allow_undefined_stereo=undefined_stereo\n )\n assert molecule.is_isomorphic_with(\n molecule_copy, atom_stereochemistry_matching=not undefined_stereo\n )", "def base_codes(self):\n bases = []\n\n if self.is_gas_giant:\n bases.append(\"G\")\n if self.is_naval_base:\n bases.append(\"N\")\n if self.is_scout_base:\n bases.append(\"S\")\n if self.is_research_base:\n bases.append(\"R\")\n if self.is_tas:\n bases.append(\"T\")\n if self.is_consulate:\n bases.append(\"I\")\n if self.is_pirate_base:\n bases.append(\"P\")\n\n return \" \".join(bases)", "def complement_base(base,material='DNA'):\n if base in 'Aa':\n if material == 'DNA':\n return 'T'\n elif material == 'RNA':\n return 'U'\n elif base in 'TtUu':\n return 'A'\n elif base in 'Gg':\n return 'C'\n else:\n return 'G'", "def test_all_garbage_naive(test_input):\n tokens = list(sp.tokenize(test_input))\n assert tokens[0] is tk.START_GARBAGE\n assert tokens[-1] is tk.END_GARBAGE", "def verifyDistinct( options, data ):\n tot = 0\n for c in data.chrNames:\n s = set()\n d = mafDataOrNone( data.mafBlocksByChrom, c )\n if d is None:\n continue\n for mb in d:\n for i in xrange( mb.refStart, mb.refEnd + 1):\n if i in s:\n sys.stderr.write('duplicate base found! %s %d [%d-%d], %s [%d-%d]\\n'\n % (mb.refChr, i, mb.refStart, mb.refEnd, \n mb.pairChr, mb.pairStart, mb.pairEnd ))\n sys.exit( 1 )\n else:\n s.add( i )\n tot += len( s )\n sys.stderr.write( 'Verify all bases sent to be binned are distinct: Found %s distinct bases in the alignment to the reference genome, no duplicates, OK.\\n' % tot)", "def cleanup(self):\n for residue in self.debumper.biomolecule.residues:\n if not isinstance(residue, aa.Amino):\n continue\n if residue.name == \"GLH\" or \"GLH\" in residue.patches:\n if residue.has_atom(\"HE1\") and residue.has_atom(\"HE2\"):\n residue.remove_atom(\"HE1\")\n elif residue.name == \"ASH\" or \"ASH\" in residue.patches:\n if residue.has_atom(\"HD1\") and residue.has_atom(\"HD2\"):\n residue.remove_atom(\"HD1\")" ]
[ "0.6972512", "0.6083541", "0.5664274", "0.55122876", "0.5495145", "0.5406091", "0.5340601", "0.53405964", "0.53322184", "0.5290064", "0.5243587", "0.52224714", "0.51917106", "0.5164841", "0.5138689", "0.50656164", "0.5061921", "0.5047701", "0.5016211", "0.50158536", "0.5006811", "0.4992301", "0.49904066", "0.49886358", "0.49796617", "0.4967366", "0.49515408", "0.49450073", "0.49320835", "0.4901832" ]
0.6441113
1
Wraps the check fragment in the outer parts of the sql query
def _full_check_sql(self, sql: str) -> str: return f"SELECT col_name, check_type, check_result FROM ({sql}) AS check_columns"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _valid_filter_query(self):\n if self._output_invalid_imeis:\n valid_filter_sql = sql.SQL('TRUE')\n else:\n valid_filter_sql = sql.SQL('is_valid IS TRUE')\n return valid_filter_sql", "def test_if_paren_statement():\n r = convert_code(\n \"{if (foo and bar) or foo and (bar or (foo and bar))}\\nbar\\n{else}\\nfoo{/if}\")\n assert r == \"{% if (foo and bar) or foo and (bar or (foo and bar)) %}\\nbar\\n{% else %}\\nfoo{% endif %}\"", "def _validate_select_where(self):", "def _sql_where(self, cursor, table, prefix=None, aggregate=False):\n assert False, \"subclass responsibility\"", "def query(mdx_stmt):", "def checkRowInTable(table, conditions):\n select = \"SELECT COUNT(*) FROM {0}\".format(table)\n if conditions is None:\n return select\n else:\n select += \" WHERE \"\n for c in conditions:\n select += \"{0}=\\'{1}\\' AND \".format(c[0], c[1])\n return select[:-5] + \" ALLOW FILTERING\"", "def test_if_statement_multiple():\n r = convert_code(\n \"{if !foo or foo.bar or foo|bar:foo['hello']}\\nfoo\\n{/if}\")\n assert r == \"{% if not foo or foo.bar or foo|bar(foo['hello']) %}\\nfoo\\n{% endif %}\"", "def render_insertion_check(self, r, method, row):\n \n opt_bits = self.get_local_opt_bits(row)\n if opt_bits:\n r.line('if(%s)' % opt_bits)\n \n key_ex = self.project_key_local_to_foreign(row)\n if self.foreign_index.is_complex:\n key_ex = '%s(%s)' % (self.foreign_index.get_key_type(), key_ex)\n \n r.lines(\"\"\"\n {\n %(find_type)s::const_iterator it(\n %(find_get)s.find(\n %(key_ex)s));\n \n if(it == %(find_get)s.end())\n {\n throw std::logic_error(\"%(method)s(): \"\n \"insertion would violate foreign constraint \"\n \"%(debug_name)s\");\n }\n }\n \"\"\", 8,\n find_type = self.foreign_index.get_index_type(),\n find_get = self.foreign_index.get_index(),\n key_ex = key_ex,\n method = method,\n debug_name = self.get_debug_name(),\n )\n return", "def conditional(self) -> global___Statement.Conditional:", "def get_basic_query_cond(column: str, val: str, query_params: dict):\n if val is not None:\n query_params[column] = val\n return 'AHJ.' + column + '=%(' + column + ')s AND '\n return ''", "def gen_v_stmt(q1n, q2n):\n return \"verify {} {};\\n\".format(q1n, q2n)", "def where_select_query(temp_table, all_columns, where):\n\tif len(where.tokens) >= 7:\t\t\t\t\t\t\t\t# AND or OR are present\n\t\tif str(where.tokens[4]) == \"AND\":\n\t\t\ttemp_table = where_helper(temp_table, all_columns, where)\n\t\t\t\n\t\t\tcomparison = where.tokens[6]\t\t\t\t\t# comparison = \"A=8\";\n\t\t\tcomparison.tokens = [x for x in comparison.tokens if not x.is_whitespace()]\t\t# No more white spaces\t\t\t\n\t\t\tkey = str(comparison.tokens[0])\t\t\t\t\t# key = \"A\"\n\t\t\t\n\t\t\tif '.' not in key:\n\t\t\t\tkey = check_overlapping_fields(all_columns, key)\n\t\t\ttry:\n\t\t\t\tvalue = int(str(comparison.tokens[2]))\t\t# whether it is an int value on RHS of comparison or some column\n\t\t\t\ttemp_table.delete_rows_by_int(key, value, str(comparison.tokens[1]))\n\t\t\texcept:\n\t\t\t\tvalue = str(comparison.tokens[2])\n\t\t\t\tif '.' not in value:\n\t\t\t\t\tvalue = check_overlapping_fields(all_columns, value)\n\t\t\t\ttemp_table.delete_rows_by_col(key, value, str(comparison.tokens[1]))\n\n\t\telif str(where.tokens[4]) == \"OR\":\n\t\t\t\n\t\t\tcomparison1 = where.tokens[2]\t\t\t\t\t\t# comparison = \"A=8\";\n\t\t\tcomparison1.tokens = [x for x in comparison1.tokens if not x.is_whitespace()]\t\t# No more white spaces\t\t\t\n\t\t\tkey1 = str(comparison1.tokens[0])\t\t\t\t\t\t# key = \"A\"\n\t\t\t\n\t\t\tif '.' not in key1:\n\t\t\t\tkey1 = check_overlapping_fields(all_columns, key1)\n\t\t\ttry:\n\t\t\t\tvalue1 = int(str(comparison1.tokens[2]))\n\t\t\texcept:\n\t\t\t\tvalue1 = str(comparison1.tokens[2])\n\t\t\t\tif '.' not in value1:\n\t\t\t\t\tvalue1 = check_overlapping_fields(all_columns, value1)\n\t\t\t\n\t\t\tcomparison2 = where.tokens[6]\t\t\t\t\t\t# comparison = \"A=8\";\n\t\t\tcomparison2.tokens = [x for x in comparison2.tokens if not x.is_whitespace()]\t\t# No more white spaces\t\t\t\n\t\t\tkey2 = str(comparison2.tokens[0])\t\t\t\t\t\t# key = \"A\"\n\t\n\t\t\tif '.' not in key2:\n\t\t\t\tkey2 = check_overlapping_fields(all_columns, key2)\n\t\t\ttry:\n\t\t\t\tvalue2 = int(str(comparison2.tokens[2]))\n\t\t\texcept:\n\t\t\t\tvalue2 = str(comparison2.tokens[2])\n\t\t\t\tif '.' not in value2:\n\t\t\t\t\tvalue2 = check_overlapping_fields(all_columns, value2)\n\n\t\t\tif type(value1) == int and type(value2) == int:\n\t\t\t\ttemp_table.delete_rows_by_both_ints(key1, value1, str(comparison1.tokens[1]), key2, value2, str(comparison2.tokens[1]))\n\t\t\telif type(value1) == str and type(value2) == str:\n\t\t\t\ttemp_table.delete_rows_by_both_cols(key1, value1, str(comparison1.tokens[1]), key2, value2, str(comparison2.tokens[1]))\n\t\t\telse:\n\t\t\t\traise SqlException(\"Only OR on joins with either comparisons with int or columns in both conditions supported.\")\n\t\telse:\n\t\t\traise SqlException(\"Invalid where condition\")\n\telif len(where.tokens) <= 5:\t\t\t\t\t\t\t\t\t\t\t\t\t# Only where is present\n\t\ttemp_table = where_helper(temp_table, all_columns, where)\n\telse:\n\t\traise SqlException(\"Invalid where syntax\")\n\treturn temp_table", "def make_query_where_1(self):\r\n\r\n # s = select([self.cookies]).where(self.cookies.c.cookie_name == 'peanut butter')\r\n # s = select([pos_incidents_related_ts],pos_incidents_related_ts.c.incident_id.in_ )\r\n s = select([self.pos_incidents_related_ts]).where(\r\n self.pos_incidents_related_ts.c.incident_id.in_(\r\n select([self.pos_incidents.c.incident_id]).where(\r\n self.pos_incidents.c.incident_type_id == 'id_imitating_barcode_scanning')))\r\n s = s.order_by(self.pos_incidents_related_ts.c.incident_related_ts)\r\n\r\n rp = self.engine.execute(s)\r\n record = rp.fetchall()\r\n print(len(record))", "def _cond_where_sql(cursor, conds, tables, prefix=None, aggregate=False):\n isa = isinstance\n pieces = []\n for c in conds:\n if isa(c, Query) or (isa(c, Comparison) and c._table in tables):\n sql = c._sql_where(cursor, tables, prefix=prefix,\n aggregate=aggregate)\n if len(sql) > 0:\n pieces.append(sql)\n return pieces", "def check_query(report):\n\n if string.find(report.db_query, \"count(\") != -1:\n\tif report.db_query[len(report.db_query) - 1] == \")\" or report.db_query[len(report.db_query) - 1] == \";\":\n\t db_query = report.db_query\n\telse:\n\t print \"error - unsupported query: report title: %s, id: %s\" % (report.title, report.id)\n\t return (False, '')\n elif string.find(report.db_query, \"group(\") != -1 or string.find(report.db_query, \"mapReduce(\") != -1:\n\tif report.db_query[len(report.db_query) - 1] == \")\":\n\t if string.find(report.db_query, \"forEach(printjson)\") == -1:\n\t\tdb_query = report.db_query+\".forEach(printjson)\"\n\t else:\n\t\tdb_query = report.db_query\n\telif report.db_query[len(report.db_query) - 1] == \";\":\n\t if string.find(report.db_query, \"forEach(printjson)\") == -1:\n\t\tdb_query = report.db_query[0:len(report.db_query) - 1]+\".forEach(printjson)\"\n\t else:\n\t\tdb_query = report.db_query\n\telse:\n\t print \"error - unsupported query: report title: %s, id: %s\" % (report.title, report.id)\n\t return (False, '')\n else:\n\tprint \"error - unsupported query: report title: %s, id: %s\" % (report.title, report.id)\n\treturn (False, '')\n\n return (True, db_query)", "def _build_statement(self, query, query_key, beets_key):\n statement = \"\"\n if query_key in query:\n for query_string in query[query_key]:\n if '\"' in query_string:\n statement += \" and %s = \\'%s\\' \" % (beets_key,\n query_string)\n else:\n statement += ' and %s = \\\"%s\\\" ' % (beets_key,\n query_string)\n return statement", "def test_if_elseif_paren_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif (foo and bar) or foo and (bar or (foo and bar))}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif (foo and bar) or foo and (bar or (foo and bar)) %}\\nfoo{% endif %}\"", "def test_if_elseif_and_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif awesome.sauce[1] and blue and 'hello'}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif awesome.sauce[1] and blue and 'hello' %}\\nfoo{% endif %}\"", "def condition(self) -> global___Expression:", "def condition(self) -> global___Expression:", "def _blacklisted_pairings_filter_query(self):\n if self._restrict_exceptions_list:\n blacklisted_filter_sql = sql.SQL('is_blacklisted IS TRUE')\n else:\n blacklisted_filter_sql = sql.SQL('TRUE')\n return blacklisted_filter_sql", "def _sql_where(cur, tables, andalso, orelse, prefix=None, aggregate=False):\n disjunctions = []\n andsql = _cond_where_sql(cur, andalso, tables, prefix=prefix,\n aggregate=aggregate)\n andsql = ' AND '.join(andsql)\n\n if len(andsql) > 0:\n andsql = '(%s)' % andsql\n disjunctions.append(andsql)\n disjunctions += _cond_where_sql(cur, orelse, tables, prefix=prefix,\n aggregate=aggregate)\n\n if len(disjunctions) == 0:\n return ''\n return '(%s)' % (' OR '.join(disjunctions))", "def test_add_dummy_where_with_where_present_and_not_added(self):\n updated_sql = add_dummy_where(self.SQL_WITH_WHERE)\n self.assertEqual(updated_sql, self.SQL_WITH_WHERE)", "def test_add_dummy_where_with_where_not_present_and_added(self):\n updated_sql = add_dummy_where(self.SQL_WITHOUT_WHERE)\n self.assertEqual(updated_sql, self.SQL_WITH_WHERE)", "def check(self):\n self.conn = psycopg2.connect(self.conn_string)\n self.cur = self.conn.cursor(\"rifflecursor\")\n self.cur.execute(\"\"\"\n SELECT * FROM yelp_stored WHERE business_id = %s;\n \"\"\", (self.bus_id,))\n sql_tup = self.cur.fetchall()\n self.conn.close()\n if sql_tup == []:\n return False\n else:\n return sql_tup", "def _sql_where(self, cur, tables, prefix=None, aggregate=False):\n if aggregate:\n return _sql_where(cur, tables, self._agg_andalso, self._agg_orelse,\n prefix=prefix, aggregate=aggregate)\n else:\n return _sql_where(cur, tables, self._andalso, self._orelse,\n prefix=prefix, aggregate=aggregate)", "def build_sql(start, stop):\n # logging.critical(f\"\"\"bld step {step} start = {start} stop ={stop}\"\"\")\n st = f\"\"\"select server1_qry,server2_qry from\n {self.schemaRepo}.tablediff where step = 0 and server1_qry is not\n null and server1_rows<>0 and server2_rows<>0 and lower(table_name)=\n lower('{table1.tableName}') order by id\"\"\"\n conn = self.connect(self.cxRepo)\n sql = None\n with conn.cursor() as curs:\n if st != \"\":\n curs.execute(st)\n qryrow = curs.fetchone()\n if qryrow is not None:\n qry1st = qryrow[0].replace(\"'\", \"''\")\n qry2st = qryrow[1].replace(\"'\", \"''\")\n qry1_select = table1.select.replace(\"'\", \"''\")\n qry2_select = table2.select.replace(\"'\", \"''\")\n sql = f\"\"\"INSERT INTO {self.schemaRepo}.tablediff\n (cxstring1, schema1, cxstring2, schema2,table_name,\n server1_select,server2_select,step, start, stop,\n server1_qry,server2_qry,result)\n SELECT '{table1.cxString}',\n '{table1.schema}',\n '{table2.cxString}',\n '{table2.schema}',\n '{table1.tableName}',\n '{qry1_select}',\n '{qry2_select}',\n {step},{start},{stop},\n '{qry1st}','{qry2st}','ready'\n WHERE NOT EXISTS\n (SELECT 1 FROM {self.schemaRepo}.tablediff WHERE\n (table_name,\n start,stop,step)=\n ('{table1.tableName}' ,\n {start},\n {stop},\n {step}))\"\"\"\n conn = self.connect(self.cxRepo)\n# logging.debug(f\"\"\"splitting qry:\n# {sql}\"\"\")\n with conn.cursor() as curs:\n try:\n curs.execute(sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(\n f\"\"\"error executing {sql} : {error}\"\"\")\n conn.commit()\n return None", "def _get_union_shared_collection_cond(self, tree_sql, sql_param):\n trees_sql = \"(%s)\" % (str(tree_sql))\n # print \"self.sub_scheme_version :: \", self.sub_scheme_version\n # if self.sub_scheme_version >= CAT_SHARECOLL_VERSION:\n # print \"sql_param 1> : \", sql_param\n shared_coll_cond = SQLBinaryExpr(COL_NANE_SHARECOLL_PARENT_COLLID, OP_IN, (trees_sql))\n shared_coll_query = GenericSQLSelect([COL_NANE_SHARECOLL_CHILD_COLLID], True,\n [TABLE_NAME_CAT_SHAREDCOLLECTIONMAP], shared_coll_cond)\n shared_coll = [i[0] for i in self.execute(str(shared_coll_query), **sql_param)]\n # print \"sql_param 2> : \", sql_param\n for i in range(len(shared_coll)):\n shared_coll = shared_coll + self.get_collections(shared_coll[i])\n shared_coll = list(set(shared_coll))\n shared_coll = [shared_coll[i:i + 999] for i in range(0, len(shared_coll), 999)]\n\n cond = SQLBinaryExpr(COL_NAME_COLL_COLLID, OP_IN, trees_sql)\n\n label_cond = SQLBinaryExpr(COL_NAME_COLL_COLLID, OP_EQ, \":%s\" % (len(sql_param)))\n # label = self.select_generic_data([COL_NAME_COLL_CP_LABEL], [TABLE_NAME_COLL],\n # label_cond, sqlparams=sql_param)[0][COL_NAME_COLL_CP_LABEL]\n label_qry = GenericSQLSelect([COL_NAME_COLL_CP_LABEL], False,\n [TABLE_NAME_COLL], label_cond)\n # print \"str(label_qry) :\", str(label_qry), sql_param\n label = self.execute(str(label_qry), **sql_param)[0][0]\n # print \"label : \", label\n if label == None: label = 'NULL'\n label_sel = ''' AND NVL(%s,'NULL') = '%s' ''' % (COL_NAME_COLL_CP_LABEL, label)\n if self.db_type[0] == 0:\n label_sel = label_sel.replace('NVL', 'IFNULL')\n trees_sql = \"(%s%s\" % (str(GenericSQLSelect([COL_NAME_COLL_COLLID], False, [TABLE_NAME_COLL], cond)), label_sel)\n else:\n trees_sql = \"(%s%s)\" % (str(GenericSQLSelect([COL_NAME_COLL_COLLID], False, [TABLE_NAME_COLL], cond)), label_sel)\n for entry in shared_coll:\n #Modified for one entry - Tanveer\n if len(entry)==1:\n cond = SQLBinaryExpr(COL_NAME_COLL_COLLID, OP_EQ, entry[0])\n else:\n cond = SQLBinaryExpr(COL_NAME_COLL_COLLID, OP_IN, str(tuple(entry)))\n if self.db_type[0] == 0:\n trees_sql += \" UNION %s\" % (str(GenericSQLSelect([COL_NAME_COLL_COLLID],\n False, [TABLE_NAME_COLL], cond)))\n else:\n trees_sql += \" UNION (%s)\" % (str(GenericSQLSelect([COL_NAME_COLL_COLLID],\n False, [TABLE_NAME_COLL], cond)))\n #Add Paranthesis else it will consider collection IDs insted of MeasID. Informed Zaheer.\n if self.db_type[0] != 0:\n trees_sql = \"(\" + trees_sql + \")\"\n cond = SQLBinaryExpr(COL_NAME_COLL_COLLID, OP_IN, trees_sql)\n\n return cond", "def compile_else(self):\n\n\t\txml = self.tokenizer.keyword() + self.tokenizer.symbol() + '<statements>\\n'\n\t\tself.outfile.write(xml)\n\n\t\twhile self.tokenizer.get_token() != '}':\n\t\t\tself.compile_statements()\n\n\t\txml = '</statements>\\n' + self.tokenizer.symbol()\n\t\tself.outfile.write(xml)", "def _getSQLWhere(self, inputTable, queryMeta):\n\t\tsqlPars = {}\n\t\tinputPars = dict((p.name, p.value) for p in inputTable.iterParams())\n\t\treturn base.joinOperatorExpr(\"AND\",\n\t\t\t[cd.asSQL(inputPars, sqlPars, queryMeta)\n\t\t\t\tfor cd in self.condDescs]), sqlPars" ]
[ "0.5628021", "0.5415898", "0.5397555", "0.5312858", "0.5302598", "0.5286318", "0.52754784", "0.5252579", "0.52508163", "0.520744", "0.51931447", "0.5180745", "0.51778483", "0.51662195", "0.5128626", "0.5119018", "0.5098157", "0.5095183", "0.5057997", "0.5057997", "0.50473386", "0.50050676", "0.4987436", "0.49627712", "0.49548954", "0.4931533", "0.48995492", "0.48990557", "0.48972124", "0.48784447" ]
0.6226931
0
Checks that the generated sql respects a templated partition clause
def test_sql_check_partition_clause_templating(self, conn_id): operator = SQLTableCheckOperator( task_id="test_task", table="employees", checks={"row_count_check": {"check_statement": "COUNT(*) = 5"}}, conn_id=conn_id, partition_clause="employment_year = {{ params.year }}", ) hook = operator.get_db_hook() hook.run( [ """ CREATE TABLE IF NOT EXISTS employees ( employee_name VARCHAR(50) NOT NULL, employment_year INT NOT NULL ); """, "INSERT INTO employees VALUES ('Adam', 2021)", "INSERT INTO employees VALUES ('Chris', 2021)", "INSERT INTO employees VALUES ('Frank', 2021)", "INSERT INTO employees VALUES ('Fritz', 2021)", "INSERT INTO employees VALUES ('Magda', 2022)", "INSERT INTO employees VALUES ('Phil', 2021)", ] ) try: operator.render_template_fields({"params": {"year": 2021}}) operator.execute({}) finally: hook.run(["DROP TABLE employees"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _random_sample_for_partitioned_tables(self) -> Query:\n partition_field = self._partition_details[\"partition_field\"]\n col = self.table.__table__.c.get(partition_field.lower())\n col_type = None\n if col is not None:\n col_type = col.type\n if partition_field == \"_PARTITIONDATE\":\n col_type = sqlalchemy.DATE\n if partition_field == \"_PARTITIONTIME\":\n col_type = sqlalchemy.DATETIME()\n\n if not self._partition_details.get(\"partition_values\"):\n sample = (\n self.session.query(self.table)\n .filter(\n format_partition_datetime(\n partition_field,\n self._partition_details[\"partition_interval\"],\n self._partition_details[\"partition_interval_unit\"],\n col_type,\n )\n )\n .subquery()\n )\n return aliased(self.table, sample)\n sample = (\n self.session.query(self.table)\n .filter(\n column(partition_field).in_(self._partition_details[\"partition_values\"])\n )\n .subquery()\n )\n return aliased(self.table, sample)", "def test_partition_tables_types(sdc_builder, sdc_executor, gcp, partition_type, file_format):\n\n if Version(sdc_builder.version) < Version('5.5.0') and file_format == 'JSON':\n pytest.skip('JSON staging introduced in 5.5.0')\n\n bucket_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n records_count = 20\n\n partition = {\"dataset\": dataset_name,\n \"table\": table_name,\n \"partitionType\": partition_type,\n \"timePartitionExpiration\": 0}\n\n if partition_type == 'INGESTION':\n # it could be whatever, we do not partition on any column here\n partition[\"timePartitionType\"] = \"MONTH\"\n data_type = 'STRING'\n elif partition_type == 'TIMESTAMP':\n partition[\"columnName\"] = \"partition_column\"\n partition[\"timePartitionType\"] = \"MONTH\"\n data_type = 'DATETIME'\n elif partition_type in ['DATE', 'DATETIME']:\n partition[\"columnName\"] = \"partition_column\"\n partition[\"timePartitionType\"] = \"MONTH\"\n data_type = partition_type\n elif partition_type == 'INTEGER':\n partition[\"columnName\"] = \"partition_column\"\n partition[\"integerPartitionStart\"] = -1000\n partition[\"integerPartitionStep\"] = 100\n partition[\"integerPartitionEnd\"] = 1000\n data_type = partition_type\n\n # Build the pipeline\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev data generator\n dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')\n dev_data_generator.set_attributes(batch_size=10,\n records_to_be_generated=records_count,\n fields_to_generate=[\n {\"type\": data_type, \"field\": \"partition_column\"},\n {\"type\": \"POKEMON\", \"field\": \"name\"}\n ])\n\n # Google BigQuery destination stage\n bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n bigquery.set_attributes(project_id=gcp.project_id,\n dataset=dataset_name,\n table=table_name,\n bucket=bucket_name,\n staging_file_format=file_format,\n enable_data_drift=True,\n create_table=True,\n create_dataset=True,\n purge_stage_file_after_ingesting=True,\n partition_table=True,\n partition_configuration=[partition])\n\n dev_data_generator >> bigquery\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n\n bigquery_client = gcp.bigquery_client\n dataset_ref = DatasetReference(gcp.project_id, dataset_name)\n\n try:\n logger.info(f'Creating temporary bucket {bucket_name}')\n bucket = gcp.retry_429(gcp.storage_client.create_bucket)(bucket_name)\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Verify by reading records using Google BigQuery client\n table = bigquery_client.get_table(f'{dataset_name}.{table_name}')\n data_from_bigquery = [tuple(row.values()) for row in bigquery_client.list_rows(table)]\n data_from_bigquery.sort()\n\n # Assert table is partitioned as well\n if partition_type == 'INTEGER':\n assert table.range_partitioning.field == 'partition_column'\n assert table.range_partitioning.range_.start == -1000\n assert table.range_partitioning.range_.interval == 100\n assert table.range_partitioning.range_.end == 1000\n elif partition_type == 'INGESTION':\n assert table.time_partitioning.type_ == 'MONTH'\n else:\n assert table.time_partitioning.field == 'partition_column'\n assert table.time_partitioning.type_ == 'MONTH'\n # And that we have records in the table\n assert len(data_from_bigquery) == records_count\n finally:\n _clean_up_bigquery(bigquery_client, dataset_ref)\n _clean_up_gcs(gcp, bucket, bucket_name)", "def test_partition_tables_no_partition(sdc_builder, sdc_executor, gcp):\n bucket_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_el_var = \"${record:attribute('sdc.dataset.name')}\"\n table_el_var = \"${record:attribute('sdc.table.name')}\"\n records_count = 20\n\n # Build the pipeline\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev data generator\n dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')\n dev_data_generator.set_attributes(batch_size=10,\n records_to_be_generated=records_count,\n fields_to_generate=[\n {\"type\": \"POKEMON\", \"field\": \"name\"}\n ])\n\n # Build Expression Evaluator\n expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')\n expression_evaluator.set_attributes(header_attribute_expressions=[\n {'attributeToSet': 'sdc.dataset.name',\n 'headerAttributeExpression': dataset_name},\n {'attributeToSet': 'sdc.table.name',\n 'headerAttributeExpression': table_name}]\n )\n\n # Google BigQuery destination stage\n bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n bigquery.set_attributes(project_id=gcp.project_id,\n dataset=dataset_el_var,\n table=table_el_var,\n bucket=bucket_name,\n enable_data_drift=True,\n create_table=True,\n create_dataset=True,\n purge_stage_file_after_ingesting=True,\n partition_table=True,\n partition_configuration=[\n {\"dataset\": \"wrong_dataset\",\n \"table\": \"wrong_table\",\n \"partitionType\": \"INGESTION\",\n \"timePartitionType\": \"MONTH\",\n \"timePartitionExpiration\": 0}\n ])\n\n dev_data_generator >> expression_evaluator >> bigquery\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n\n bigquery_client = gcp.bigquery_client\n dataset_ref = DatasetReference(gcp.project_id, dataset_name)\n\n try:\n logger.info(f'Creating temporary bucket {bucket_name}')\n bucket = gcp.retry_429(gcp.storage_client.create_bucket)(bucket_name)\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Verify by reading records using Google BigQuery client\n table = bigquery_client.get_table(f'{dataset_name}.{table_name}')\n data_from_bigquery = [tuple(row.values()) for row in bigquery_client.list_rows(table)]\n data_from_bigquery.sort()\n\n # Assert table is not partitioned\n assert not table.time_partitioning\n # And that we have records in the table\n assert len(data_from_bigquery) == records_count\n finally:\n _clean_up_bigquery(bigquery_client, dataset_ref)\n _clean_up_gcs(gcp, bucket, bucket_name)", "def _is_partitioned(self):\n ## check if the table are partitioned, need the split because of a change in the type of partitions in pydantic\n partitions = self.table_config[\"partitions\"]\n if partitions is None or len(partitions) == 0:\n return False\n\n if isinstance(partitions, list):\n # check if any None inside list.\n # False if it is the case Ex: [None, 'partition']\n # True otherwise Ex: ['partition1', 'partition2']\n return all(item is not None for item in partitions)\n\n raise ValueError(\"Partitions must be a list or None\")", "def test_partition_T(self):\n Z = Partition(size=1000)\n for p in chain(Z, [{'k':-1, 'r': 0}, {'k': 1, 'r': -1},\n {'k': -1, 'r': -1}]):\n rows = Z.T(**p)\n self.assertEqual(rows, legacy_T(Z, **p))\n\n out = Z.V(**p)\n self.assertEqual(out, legacy_V(Z, **p))\n\n out = Z.S(**p)\n self.assertEqual(out, legacy_S(Z, **p))\n\n out = Z.S(ignore_samplesize=True, **p)\n self.assertEqual(out, legacy_S(Z, ignore_samplesize=True, **p))\n\n out = Z.A(**p)\n self.assertEqual(out, legacy_A(Z, **p))", "def renderPartition(*args, q=True, query=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def gen_partition_statement(partition_tuples, target_root, run_id=None):\n if run_id is not None:\n partition_tuples = [('run_id', run_id)] + partition_tuples\n # todo: part_a1, part_a2, part_b, part_c, part_what? you lost me.\n part_a1 = \", \".join(\n [\"{label}='{value}'\".format(label=i[0], value=i[1]) for i in partition_tuples]\n )\n part_a2 = \"/\".join(\n [\"{label}={value}\".format(label=i[0], value=i[1]) for i in partition_tuples]\n )\n part_b = \"partition ({partitions_str})\".format(partitions_str=part_a1)\n part_c = \"location '{location}'\".format(location=os.path.join(target_root, part_a2))\n return part_b + ' ' + part_c", "def postgres_auto_partition(\n model: PostgresPartitionedModel,\n count: int,\n interval_unit: PostgresAutoPartitioningIntervalUnit,\n interval: int,\n start_from: Optional[date] = None,\n using=\"default\",\n):\n\n connection = connections[using]\n\n with connection.cursor() as cursor:\n table = connection.introspection.get_partitioned_table(\n cursor, model._meta.db_table\n )\n\n if not table:\n raise PostgresAutoPartitioningError(\n f\"Model {model.__name__}, with table {model._meta.db_table} \"\n \"does not exists in the database. Did you run \"\n \"`python manage.py migrate`?\"\n )\n\n if table.method != PostgresPartitioningMethod.RANGE:\n raise PostgresAutoPartitioningError(\n f\"Table {table.name} is not partitioned by a range. Auto partitioning \"\n \"only supports partitioning by range.\"\n )\n\n schema_editor = connection.schema_editor()\n\n start_datetime = datetime.now()\n if interval_unit == PostgresAutoPartitioningIntervalUnit.MONTH:\n start_datetime = start_datetime.replace(day=1)\n elif interval_unit == PostgresAutoPartitioningIntervalUnit.WEEK:\n start_datetime = start_datetime - relativedelta(\n days=start_datetime.weekday()\n )\n\n for _ in range(count):\n if interval_unit == PostgresAutoPartitioningIntervalUnit.MONTH:\n end_datetime = start_datetime + relativedelta(months=+interval)\n partition_name = start_datetime.strftime(\"%Y_%b\").lower()\n elif interval_unit == PostgresAutoPartitioningIntervalUnit.WEEK:\n end_datetime = start_datetime + relativedelta(weeks=+interval)\n partition_name = start_datetime.strftime(\"%Y_week_%W\").lower()\n\n from_values = start_datetime.strftime(\"%Y-%m-%d\")\n to_values = end_datetime.strftime(\"%Y-%m-%d\")\n\n logger = LOGGER.bind(\n model_name=model.__name__,\n name=partition_name,\n from_values=from_values,\n to_values=to_values,\n )\n\n if start_from and start_datetime.date() < start_from:\n start_datetime = end_datetime\n logger.info(\n \"Skipping creation of partition, before specified start date\",\n start_from=start_from,\n )\n continue\n\n partition_table_name = schema_editor.create_partition_table_name(\n model, partition_name\n )\n\n existing_partition = next(\n (\n table_partition\n for table_partition in table.partitions\n if table_partition.name == partition_table_name\n ),\n None,\n )\n\n if existing_partition:\n start_datetime = end_datetime\n logger.info(\"Skipping creation of partition, already exists\")\n continue\n\n schema_editor.add_range_partition(\n model=model,\n name=partition_name,\n from_values=from_values,\n to_values=to_values,\n )\n\n logger.info(\"Created partition\")\n\n start_datetime = end_datetime", "def test_partition_tables_default_partition(sdc_builder, sdc_executor, gcp):\n bucket_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name_1 = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name_1 = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name_2 = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name_2 = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_el_var = \"${record:attribute('sdc.dataset.name')}\"\n table_el_var = \"${record:attribute('sdc.table.name')}\"\n records_count = 20\n\n # Build the pipeline\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev data generator\n dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')\n dev_data_generator.set_attributes(batch_size=10,\n records_to_be_generated=records_count,\n fields_to_generate=[{\"type\": \"POKEMON\", \"field\": \"name\"},\n {\"type\": \"INTEGER\", \"field\": \"id\"}])\n\n # Build Stream Selector\n selector = pipeline_builder.add_stage('Stream Selector')\n\n # Build Expression Evaluators\n expression_evaluator_1 = pipeline_builder.add_stage('Expression Evaluator')\n expression_evaluator_1.set_attributes(header_attribute_expressions=[\n {'attributeToSet': 'sdc.dataset.name',\n 'headerAttributeExpression': dataset_name_1},\n {'attributeToSet': 'sdc.table.name',\n 'headerAttributeExpression': table_name_1}]\n )\n\n expression_evaluator_2 = pipeline_builder.add_stage('Expression Evaluator')\n expression_evaluator_2.set_attributes(header_attribute_expressions=[\n {'attributeToSet': 'sdc.dataset.name',\n 'headerAttributeExpression': dataset_name_2},\n {'attributeToSet': 'sdc.table.name',\n 'headerAttributeExpression': table_name_2}]\n )\n\n # Google BigQuery destination stage\n bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n bigquery.set_attributes(project_id=gcp.project_id,\n dataset=dataset_el_var,\n table=table_el_var,\n bucket=bucket_name,\n enable_data_drift=True,\n create_table=True,\n create_dataset=True,\n purge_stage_file_after_ingesting=True,\n partition_table=True,\n partition_configuration=[\n {\"dataset\": dataset_name_1,\n \"table\": table_name_1,\n \"partitionType\": \"INGESTION\",\n \"timePartitionType\": \"MONTH\",\n \"timePartitionExpiration\": 0},\n {\"defaultPartition\": True,\n \"partitionType\": \"INGESTION\",\n \"timePartitionType\": \"YEAR\",\n \"timePartitionExpiration\": 0}\n ])\n\n dev_data_generator >> selector >> expression_evaluator_1 >> bigquery\n selector >> expression_evaluator_2 >> bigquery\n\n selector.condition = [dict(outputLane=selector.output_lanes[0], predicate='${record:value(\\'/id\\')%2==0}'),\n dict(outputLane=selector.output_lanes[1], predicate='default')]\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n\n bigquery_client = gcp.bigquery_client\n dataset_ref_1 = DatasetReference(gcp.project_id, dataset_name_1)\n dataset_ref_2 = DatasetReference(gcp.project_id, dataset_name_2)\n\n try:\n logger.info(f'Creating temporary bucket {bucket_name}')\n bucket = gcp.retry_429(gcp.storage_client.create_bucket)(bucket_name)\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Verify by reading records using Google BigQuery client\n table_1 = bigquery_client.get_table(f'{dataset_name_1}.{table_name_1}')\n data_from_bigquery_1 = [tuple(row.values()) for row in bigquery_client.list_rows(table_1)]\n data_from_bigquery_1.sort()\n\n table_2 = bigquery_client.get_table(f'{dataset_name_2}.{table_name_2}')\n data_from_bigquery_2 = [tuple(row.values()) for row in bigquery_client.list_rows(table_2)]\n data_from_bigquery_2.sort()\n\n # Assert table is partitioned as well\n assert table_1.time_partitioning.type_ == 'MONTH'\n assert table_2.time_partitioning.type_ == 'YEAR'\n assert len(data_from_bigquery_1) + len(data_from_bigquery_2) == records_count\n finally:\n _clean_up_bigquery(bigquery_client, dataset_ref_1)\n _clean_up_bigquery(bigquery_client, dataset_ref_2)\n _clean_up_gcs(gcp, bucket, bucket_name)", "def test_partition_keys(self):\r\n class ModelWithPartitionKeys(cqlengine.Model):\r\n id = columns.UUID(primary_key=True, default=lambda:uuid4())\r\n c1 = cqlengine.Text(primary_key=True)\r\n p1 = cqlengine.Text(partition_key=True)\r\n p2 = cqlengine.Text(partition_key=True)\r\n\r\n cols = ModelWithPartitionKeys._columns\r\n\r\n self.assertTrue(cols['c1'].primary_key)\r\n self.assertFalse(cols['c1'].partition_key)\r\n\r\n self.assertTrue(cols['p1'].primary_key)\r\n self.assertTrue(cols['p1'].partition_key)\r\n self.assertTrue(cols['p2'].primary_key)\r\n self.assertTrue(cols['p2'].partition_key)\r\n\r\n obj = ModelWithPartitionKeys(p1='a', p2='b')\r\n self.assertEquals(obj.pk, ('a', 'b'))", "def require_partition_filter(self) -> bool:\n return pulumi.get(self, \"require_partition_filter\")", "def partition(self, dimension, processes=None):\n if processes:\n q = (self._table.source.isin(processes) |\n self._table.target.isin(processes))\n values = self._table.loc[q, dimension].unique()\n else:\n values = self._table[dimension].unique()\n return Partition.Simple(dimension, values)", "def _partitionize(df, settings, grids, frag):\n column = settings['feature']\n if len(df) > 0:\n init, end, end2 = grids\n tmp = df.apply(lambda row: _inblock(row, column, init, end), axis=1)\n tmp = df.loc[tmp]\n\n if len(frag) > 0:\n frag = pd.concat([frag, tmp])\n else:\n frag = tmp\n return frag", "def test_ingest_dataframe_partition(\n self, mocked_client, mocker, partitioned_df, tmp_path\n ):\n mocked_client._core_service_stub = Core.CoreServiceStub(\n grpc.insecure_channel(\"\")\n )\n\n mocker.patch.object(\n mocked_client._core_service_stub,\n \"GetFeatureTable\",\n return_value=_ingest_test_getfeaturetable_mocked_resp(\n f\"file://{tmp_path}\", \"date\"\n ),\n )\n\n mocked_client.set_project(\"my_project\")\n ft = mocked_client.get_feature_table(\"ingest_featuretable\")\n mocked_client.ingest(ft, partitioned_df, timeout=600)\n\n pq_df = pq.read_table(tmp_path).to_pandas().drop(columns=[\"date\"])\n\n partitioned_df, pq_df = _ingest_test_format_dataframes(\n partitioned_df, pq_df, True\n )\n\n assert_frame_equal(partitioned_df, pq_df)", "def partition_is_raw(self):\n is_raw_partition = self.get_is_raw_partition()\n df = self.get_df()\n st_data_dt = self.get_st_data_dt()\n end_data_dt = self.get_end_data_dt()\n date_series = pd.date_range(*(pd.to_datetime([st_data_dt, end_data_dt]) + pd.offsets.MonthEnd()), freq='M', name='ft_data_dt')\n date_series = date_series.to_list()\n for d in date_series:\n is_raw_partition[d.strftime(\"%Y-%m-%d\")] = df[df['ft_data_dt'] == d]\n self.set_is_raw_partition(is_raw_partition)\n self.set_df(df)", "def _paginate_query_across_partitioned_databases(model_class, q_expression, load_source):\n from corehq.messaging.scheduling.scheduling_partitioned.models import (\n CaseAlertScheduleInstance,\n CaseTimedScheduleInstance,\n )\n\n if model_class not in (CaseAlertScheduleInstance, CaseTimedScheduleInstance):\n raise TypeError(\"Expected CaseAlertScheduleInstance or CaseTimedScheduleInstance\")\n\n db_names = get_db_aliases_for_partitioned_query()\n for db_name in db_names:\n for row in _paginate_query(db_name, model_class, q_expression, load_source):\n yield row", "def generate_pk_clause(catalog_entry, state):\n key_properties = common.get_key_properties(catalog_entry)\n escaped_columns = [common.escape(c) for c in key_properties]\n\n max_pk_values = singer.get_bookmark(state,\n catalog_entry.tap_stream_id,\n 'max_pk_values')\n\n last_pk_fetched = singer.get_bookmark(state,\n catalog_entry.tap_stream_id,\n 'last_pk_fetched')\n\n if last_pk_fetched:\n pk_comparisons = ['({} > {} AND {} <= {})'.format(common.escape(pk),\n last_pk_fetched[pk],\n common.escape(pk),\n max_pk_values[pk])\n for pk in key_properties]\n else:\n pk_comparisons = [f'{common.escape(pk)} <= {max_pk_values[pk]}' for pk in key_properties]\n\n sql = ' WHERE {} ORDER BY {} ASC'.format(' AND '.join(pk_comparisons),\n ', '.join(escaped_columns))\n\n return sql", "def _ingest_test_format_dataframes(\n partitioned_df: pd.DataFrame, pq_df: pd.DataFrame, with_partitions: bool = False\n) -> Tuple[pd.DataFrame, pd.DataFrame]:\n partitioned_df.sort_values(by=[\"dev_feature_float\"], inplace=True)\n pq_df.sort_values(by=[\"dev_feature_float\"], inplace=True)\n pq_df = pq_df.reindex(sorted(pq_df.columns), axis=1)\n partitioned_df = partitioned_df.reindex(sorted(partitioned_df.columns), axis=1)\n partitioned_df.reset_index(drop=True, inplace=True)\n pq_df.reset_index(drop=True, inplace=True)\n\n if with_partitions:\n partitioned_df[\"datetime_col\"] = pd.to_datetime(\n partitioned_df.datetime_col\n ).dt.tz_convert(\"UTC\")\n pq_df[\"datetime_col\"] = pd.to_datetime(pq_df.datetime_col).dt.tz_convert(\"UTC\")\n\n return partitioned_df, pq_df", "def _partitioner(shape, dtype):\n if not isinstance(shape, tensor_shape.TensorShape):\n raise ValueError(f\"shape is not a TensorShape: {shape}\")\n if not shape.is_fully_defined():\n raise ValueError(f\"shape is not fully defined: {shape}\")\n if not isinstance(dtype, dtypes.DType):\n raise ValueError(f\"dtype is not a DType: {dtype}\")\n\n if dtype.base_dtype == dtypes.string:\n element_size = bytes_per_string_element\n else:\n element_size = dtype.size\n\n partitions = [1] * shape.ndims\n bytes_per_slice = 1.0 * (\n shape.num_elements() / shape.dims[axis].value) * element_size\n # How many slices can we fit on one shard of size at most max_shard_bytes?\n # At least one slice is required.\n slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice))\n # How many shards do we need for axis given that each shard fits\n # slices_per_shard slices from a total of shape[axis] slices?\n axis_shards = int(math.ceil(\n 1.0 * shape.dims[axis].value / slices_per_shard))\n if max_shards:\n axis_shards = min(max_shards, axis_shards)\n\n partitions[axis] = axis_shards\n\n return partitions", "def _make_aggregation_query(assets: List[str], search_by: str, partitioned_cols: Dict[str, Set[str]], date_col: str,\n start: pd.Timestamp = None, end: pd.Timestamp = None) -> Tuple[\n List[Dict[str, any]], List[str]]:\n\n # this always needs to be made no matter if there is no static wanted\n static_projection = {field: 1 for field in (partitioned_cols['static'] if partitioned_cols['static'] else [])}\n static_projection['_id'] = 0\n static_projection[search_by] = 1\n\n if partitioned_cols['timeseries']:\n # making the timeseries projection dict\n timeseries_projection = {field: '$timeseries.' + field for field in partitioned_cols['timeseries']}\n timeseries_projection['date'] = f'$timeseries.{date_col}'\n\n aggregation_query = [\n {'$match': {search_by: {'$in': assets}}},\n {'$unwind': '$timeseries'},\n {'$match': {f'timeseries.{date_col}': {'$gte': start, '$lt': end}}},\n {'$project': {**static_projection, **timeseries_projection}}\n ]\n primary_key = ['date', search_by]\n\n else:\n aggregation_query = [{'$match': {search_by: {'$in': assets}}},\n {'$project': static_projection}]\n primary_key = [search_by]\n\n return aggregation_query, primary_key", "def __create_partition(self,partition_dt):\n\n p_array = self.__partition_date_to_path_array(partition_dt)\n \n # For each component, fetch the group or create it\n # Year\n try:\n y_group = self.root_group._f_get_child(p_array[0])\n except tables.NoSuchNodeError:\n y_group = self.file.create_group(self.root_group,p_array[0])\n\n # Month\n try:\n m_group = y_group._f_get_child(p_array[1])\n except tables.NoSuchNodeError:\n m_group = self.file.create_group(y_group,p_array[1])\n\n # Day\n try:\n d_group = m_group._f_get_child(p_array[2])\n except tables.NoSuchNodeError:\n d_group = self.file.create_group(m_group,p_array[2])\n\n # We need to create the table in the day group\n ts_data = self.file.create_table(d_group,'ts_data',self.table_description,self.table_title,\n self.table_filters, self.table_expectedrows, self.table_chunkshape, self.table_byteorder)\n\n # Need to save this as an attribute because it doesn't seem to be saved anywhere\n ts_data.attrs._TS_TABLES_EXPECTEDROWS_PER_PARTITION = self.table_expectedrows\n\n return ts_data", "def get_partition_keys(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[T_str]:\n ...", "def test_greedy_partition(self):\r\n\r\n #(non) partition into one bucket\r\n obs_part, obs_levels = greedy_partition({'1': 2,\r\n '2': 1,\r\n '3': 3}, 1)\r\n self.assertEquals(obs_levels, [6])\r\n self.assertEquals(obs_part, [['3', '1', '2']])\r\n\r\n # two buckets\r\n obs_part, obs_levels = greedy_partition({'1': 2,\r\n '2': 1,\r\n '3': 3}, 2)\r\n\r\n self.assertEquals(obs_levels, [3, 3])\r\n self.assertEquals(obs_part, [['3'], ['1', '2']])\r\n\r\n # larger input\r\n obs_part, obs_levels = greedy_partition({'1': 1, '2': 2, '3': 3,\r\n '4': 4, '5': 5, '6': 6}, 2)\r\n self.assertEquals(obs_levels, [11, 10])\r\n self.assertEquals(obs_part, [['6', '3', '2'], ['5', '4', '1']])", "def test_subquery_no_order(self):\n with self.patch_schema({}):\n sql = (\n \"SELECT COUNT(*) FROM (SELECT DISTINCT id FROM a)\"\n )\n stmt = sqlparse.parse(sql)[0]\n assert False == self.has_order_by_count(stmt)", "def create_query(window,con,input_table_name,output_table_name,input_columns, stat_columns):\n sql = \"CREATE TABLE {} AS \".format(output_table_name)\n sql = sql + \"SELECT\" \n for input_column in input_columns:\n sql = sql + \" {},\".format(input_column)\n for stat_column in stat_columns:\n sql = sql + \" {},\".format(stat_column)\n for stat_column in stat_columns:\n sql = sql + \" AVG({}) OVER(PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS ma{:02.0f}_{},\".format(stat_column,window-1,window,stat_column)\n sql = sql + \" MIN({}) OVER(PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS min{:02.0f}_{},\".format(stat_column,window-1,window,stat_column)\n sql = sql + \" MAX({}) OVER(PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS max{:02.0f}_{},\".format(stat_column,window-1,window,stat_column)\n sql = sql + \" regr_slope({},year) OVER (PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS slope{:02.0f}_{},\".format(stat_column,window-1,window,stat_column)\n sql = sql + \" regr_intercept({},year) OVER (PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS intercept{:02.0f}_{},\".format(stat_column,window-1,window,stat_column)\n sql = sql + (\" regr_slope({},year) OVER (PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) * year \"\n \"+ regr_intercept({},year) OVER (PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS ols{:02.0f}_{},\".format(stat_column,window-1,stat_column,window-1,window,stat_column))\n \n \n sql = sql[:-1]\n sql = sql + \" FROM {}\".format(input_table_name)\n return sql", "def createPartitions(self, databaseCursor, iterator):\n self.logger.debug(\"%s - in createPartitions\", threading.currentThread().getName())\n partitionTableClasses = getOrderedPartitionList([self.__class__])\n #self.logger.debug(\"DEBUG - Classes are %s\",partitionTableClasses)\n uniqueItems = [x for x in iterator]\n for tableClass in partitionTableClasses:\n tableObject = self\n if not self.__class__ == tableClass:\n tableObject = tableClass(logger = self.logger)\n #self.logger.debug(\"DEBUG - Handling %s /w/ sql %s\",tableObject.name,tableObject.partitionCreationSqlTemplate)\n tableObject._createOwnPartition(databaseCursor,uniqueItems)", "def test_get_id_range_for_partition_with_empty_partitions():\n min_id = 1\n max_id = 100\n partition_size = 20\n id_range_item_count = max_id - min_id + 1 # this many individual IDs should be processed for continuous ID range\n record_ids = {1, 5, 7, 15, 19, 20, 41, 100}\n etl_config = {\"partition_size\": partition_size}\n ctrl = PostgresElasticsearchIndexerController(etl_config)\n ctrl.min_id = min_id\n ctrl.max_id = max_id\n ctrl.record_count = len(record_ids)\n ctrl.config[\"partitions\"] = ctrl.determine_partitions()\n assert ctrl.config[\"partitions\"] == ceil(id_range_item_count / partition_size)\n partition_range = range(0, ctrl.config[\"partitions\"])\n # First batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[0])\n assert lower_bound == min_id\n assert upper_bound == lower_bound + (partition_size - 1)\n # Second batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[1])\n assert lower_bound == min_id + partition_size\n assert upper_bound == lower_bound + (partition_size - 1)\n # Last batch should go all the way up to max_id\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[-1])\n assert lower_bound == (min_id + (partition_size * partition_range[-1]))\n assert upper_bound == max_id\n assert _remove_seen_ids(ctrl, record_ids) == set({})", "def _record_specific_partition(r_d, numnodes, cur):\n # No partitioning has been specified. Create the appropriate entries.\n if r_d['partmtd'] == 0:\n for i in range(1, numnodes + 1):\n Database.execute(cur, 'UPDATE dtables '\n 'SET partmtd = 0 '\n 'WHERE nodeid = ? AND tname = ?',\n ErrorHandle.raise_handler, (i, r_d['tname']))\n\n # Range partitioning has been specified. Create the appropriate entries.\n elif r_d['partmtd'] == 1:\n for i in range(1, numnodes + 1):\n Database.execute(cur, 'UPDATE dtables '\n 'SET partcol = ?, partparam1 = ?, '\n 'partparam2 = ?, partmtd = 1 '\n 'WHERE nodeid = ? AND tname = ?',\n ErrorHandle.raise_handler,\n (r_d['partcol'], r_d['param1'][i - 1], r_d['param2'][i - 1], i,\n r_d['tname']))\n\n # Hash partitioning has been specified. Create the appropriate entries.\n elif r_d['partmtd'] == 2:\n for i in range(1, numnodes + 1):\n Database.execute(cur, 'UPDATE dtables '\n 'SET partcol = ?, partparam1 = ?, partmtd = 2 '\n 'WHERE nodeid = ? AND tname = ?',\n ErrorHandle.raise_handler,\n (r_d['partcol'], r_d['param1'], i, r_d['tname']))", "def partition(ary, predicate, extra_args=[], preamble=\"\", queue=None, wait_for=None):\n if len(ary) > np.iinfo(np.uint32).max:\n scan_dtype = np.uint64\n else:\n scan_dtype = np.uint32\n\n extra_args_types, extra_args_values = extract_extra_args_types_values(extra_args)\n\n knl = _partition_template.build(\n ary.context,\n type_aliases=((\"item_t\", ary.dtype), (\"scan_t\", scan_dtype)),\n var_values=((\"predicate\", predicate),),\n more_preamble=preamble, more_arguments=extra_args_types)\n\n out_true = cl.array.empty_like(ary)\n out_false = cl.array.empty_like(ary)\n count = ary._new_with_changes(data=None, offset=0,\n shape=(), strides=(), dtype=scan_dtype)\n\n # **dict is a Py2.5 workaround\n evt = knl(ary, out_true, out_false, count, *extra_args_values,\n **dict(queue=queue, wait_for=wait_for))\n\n return out_true, out_false, count, evt", "def generate_idrac_sql(metric: str, \n fqdd: str,\n start: str, \n end: str, \n interval: str, \n aggregate: str,\n schema: str = 'idrac'):\n sql = f\"SELECT time_bucket_gapfill('{interval}', timestamp) AS time, \\\n nodeid, fqdd AS label, {aggregate}(value) AS value \\\n FROM {schema}.{metric} \\\n WHERE timestamp >= '{start}' \\\n AND timestamp < '{end}' \\\n AND fqdd = '{fqdd}' \\\n GROUP BY time, nodeid, label \\\n ORDER BY time;\"\n return sql" ]
[ "0.65572757", "0.5455361", "0.5396073", "0.535287", "0.5346023", "0.52282643", "0.5199034", "0.51658976", "0.5140471", "0.5028549", "0.49780765", "0.4906591", "0.48571414", "0.47534937", "0.47362173", "0.4730835", "0.4704191", "0.47004652", "0.4684612", "0.46640965", "0.46412754", "0.46247536", "0.4617206", "0.4587321", "0.45373014", "0.45300275", "0.45193416", "0.45040867", "0.45008639", "0.44976535" ]
0.73940617
0
Check if BranchSQLOperator throws an exception for unsupported connection type
def test_unsupported_conn_type(self): op = BranchSQLOperator( task_id="make_choice", conn_id="redis_default", sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES", follow_task_ids_if_true="branch_1", follow_task_ids_if_false="branch_2", dag=self.dag, ) with pytest.raises(AirflowException): op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_invalid_conn(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def _is_db_connection_error(args):\n # NOTE(adam_g): This is currently MySQL specific and needs to be extended\n # to support Postgres and others.\n # For the db2, the error code is -30081 since the db2 is still not ready\n conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')\n for err_code in conn_err_codes:\n if args.find(err_code) != -1:\n return True\n return False", "def is_db_connection_error(args):\n # NOTE(adam_g): This is currently MySQL specific and needs to be extended\n # to support Postgres and others.\n conn_err_codes = ('2002', '2003', '2006')\n for err_code in conn_err_codes:\n if args.find(err_code) != -1:\n return True\n return False", "def _check_connection(self):\n if \"_connection\" not in self.__dict__:\n message = \"use connect method before doing operation on this database\"\n raise Exception(message)", "def check_connection(self):\n pass", "def test_postgresql_connect_fail(self):\n if _is_backend_avail('postgresql', user=\"openstack_cifail\"):\n self.fail(\"Shouldn't have connected\")", "def test_invalid_follow_task_true(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=None,\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_connect_invalid_string(self):\n with pytest.raises(ValueError):\n DatabaseDriver.connect('not a valid connect string')", "def test_postgresql_connect_fail(self):\n if test_migrations._is_backend_avail(\n 'postgres', 'kickstand_cifail', self.PASSWD, self.DATABASE):\n self.fail(\"Shouldn't have connected\")", "def test_invalid_follow_task_false(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=None,\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_mysql_connect_fail(self):\n if test_migrations._is_backend_avail(\n 'mysql', 'kickstand_cifail', self.PASSWD, self.DATABASE):\n self.fail(\"Shouldn't have connected\")", "def test_mysql_connect_fail(self):\n if _is_backend_avail('mysql', user=\"openstack_cifail\"):\n self.fail(\"Shouldn't have connected\")", "def _check_sql_mode(self, **kwargs):\n return []", "def _retry_on_connection_error(exc):\n\n if isinstance(exc, db_exception.DBConnectionError):\n LOG.warning(\"Connection error detected. Retrying...\")\n return True\n return False", "def test_sql_branch_operator_mysql(self):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_sql_branch_operator_postgres(self):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"postgres_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def check_connection(self):\n return False", "def test_wrong_args(self, bad_context):\n with pytest.raises(TypeError):\n Connection(bad_context)", "def test_unsupported_sql(self):\n user = getuser()\n impala_client = self.create_impala_client()\n error_msg = \"UnsupportedFeatureException: {0} is not supported by Sentry.\"\n statements = [(\"grant select on database functional to user foo\",\n error_msg.format(\"GRANT <privilege> TO USER\")),\n (\"grant select on database functional to group foo\",\n error_msg.format(\"GRANT <privilege> TO GROUP\")),\n (\"revoke select on database functional from user foo\",\n error_msg.format(\"REVOKE <privilege> FROM USER\")),\n (\"revoke select on database functional from group foo\",\n error_msg.format(\"REVOKE <privilege> FROM GROUP\")),\n (\"show grant group foo\", error_msg.format(\"SHOW GRANT GROUP\"))]\n for statement in statements:\n result = self.execute_query_expect_failure(impala_client, statement[0], user=user)\n assert statement[1] in str(result)", "def meets_condition(db_type: str):\n\t\t...", "def test_db_connection(self):\n try:\n database = Database()\n database.get_server_version()\n except (Exception) as error:\n logging.error(\"\\n\\nConnection to postgresql\"\n \" failed with error: {}\\n\\n\".format(error))\n assert(False)", "def _connect_ping_listener(connection, branch):\n if branch:\n return\n\n save_should_close_with_result = connection.should_close_with_result\n connection.should_close_with_result = False\n try:\n connection.scalar(select([1]))\n except Exception as ex:\n connection.scalar(select([1]))\n finally:\n connection.should_close_with_result = save_should_close_with_result", "def test_unsupported_syntax(self):\n\n self.assertRaises(\n (TypeError, ValueError), self.table.where, 'c_bool[0]'\n )\n self.assertRaises(TypeError, self.table.where, 'c_bool()')\n self.assertRaises(NameError, self.table.where, 'c_bool.__init__')", "def get_backend(self, name):\n if name == DATABASE_TYPE_MYSQL:\n ret = 2\n elif name == DATABASE_TYPE_POSTGRESQL:\n ret = 3\n elif name == DATABASE_TYPE_SQLITE:\n ret = 4\n # sqlcoder: this assignment fixes unicode problems for me with sqlite (windows, cp1252)\n # feel free to remove or improve this if you understand the problems\n # better than me (not hard!)\n Charset.not_needed1, Charset.not_needed2, Charset.not_needed3 = True, True, True\n else:\n raise ValueError('Unsupported database backend: %s' % self.supported_databases[name].db_server)\n\n return ret", "def check(self, connection):\n return True", "def check_connection():\n qry = QtSql.QSqlQuery()\n try:\n if qry.exec_(\"Select name from user\"):\n return True, None\n else:\n print(\"Connection Checked - Error\")\n return False, qry.lastError().text()\n except Exception:\n print(\"Connection Checked - Failed\")\n return False, qry.lastError().text()", "def _identify_connection(self):\n pass #nothing to identify...\n #raise NotImplementedError(\"Implement!\")", "def test_type(self):\n ctx = Context(SSLv23_METHOD)\n assert is_consistent_type(Connection, \"Connection\", ctx, None)", "def ConnectByNameError(self) -> _n_0_t_14:", "def test_loqusdb_wrong_version(loqus_exe):\n # GIVEN a loqusdb version < 2.5\n loqus_extension = LoqusDB(loqusdb_binary=loqus_exe, version=1.0)\n # WHEN instantiating an adapter\n with pytest.raises(SyntaxError):\n # THEN assert a syntax error is raised since version is wrong\n loqus_extension.version_check()" ]
[ "0.6958899", "0.6256558", "0.6233108", "0.5780643", "0.5682214", "0.56752306", "0.5596914", "0.5594159", "0.5581828", "0.55458724", "0.5538639", "0.55344284", "0.5532713", "0.54795086", "0.5462565", "0.5447765", "0.54015386", "0.5363145", "0.5348738", "0.5261789", "0.52582824", "0.5257969", "0.52335083", "0.52234256", "0.5216062", "0.5186144", "0.5166108", "0.5152996", "0.5143823", "0.5141222" ]
0.8031548
0
Check if BranchSQLOperator throws an exception for invalid connection
def test_invalid_conn(self): op = BranchSQLOperator( task_id="make_choice", conn_id="invalid_connection", sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES", follow_task_ids_if_true="branch_1", follow_task_ids_if_false="branch_2", dag=self.dag, ) with pytest.raises(AirflowException): op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unsupported_conn_type(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"redis_default\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def _is_db_connection_error(args):\n # NOTE(adam_g): This is currently MySQL specific and needs to be extended\n # to support Postgres and others.\n # For the db2, the error code is -30081 since the db2 is still not ready\n conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')\n for err_code in conn_err_codes:\n if args.find(err_code) != -1:\n return True\n return False", "def is_db_connection_error(args):\n # NOTE(adam_g): This is currently MySQL specific and needs to be extended\n # to support Postgres and others.\n conn_err_codes = ('2002', '2003', '2006')\n for err_code in conn_err_codes:\n if args.find(err_code) != -1:\n return True\n return False", "def _check_connection(self):\n if \"_connection\" not in self.__dict__:\n message = \"use connect method before doing operation on this database\"\n raise Exception(message)", "def test_invalid_follow_task_false(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=None,\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_invalid_follow_task_true(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=None,\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def check_connection(self):\n pass", "def _retry_on_connection_error(exc):\n\n if isinstance(exc, db_exception.DBConnectionError):\n LOG.warning(\"Connection error detected. Retrying...\")\n return True\n return False", "def test_mysql_connect_fail(self):\n if test_migrations._is_backend_avail(\n 'mysql', 'kickstand_cifail', self.PASSWD, self.DATABASE):\n self.fail(\"Shouldn't have connected\")", "def test_connect_invalid_string(self):\n with pytest.raises(ValueError):\n DatabaseDriver.connect('not a valid connect string')", "def test_mysql_connect_fail(self):\n if _is_backend_avail('mysql', user=\"openstack_cifail\"):\n self.fail(\"Shouldn't have connected\")", "def test_postgresql_connect_fail(self):\n if _is_backend_avail('postgresql', user=\"openstack_cifail\"):\n self.fail(\"Shouldn't have connected\")", "def check_connection(self):\n return False", "def test_postgresql_connect_fail(self):\n if test_migrations._is_backend_avail(\n 'postgres', 'kickstand_cifail', self.PASSWD, self.DATABASE):\n self.fail(\"Shouldn't have connected\")", "def connection_failed(self, connection, error):\n assert False", "def _check_connection(self, check_db=True) -> None:\n if not self._connected:\n raise InterfaceError(\"Client is not connected to a TerminusDB server.\")\n if check_db and self._db is None:\n raise InterfaceError(\n \"No database is connected. Please either connect to a database or create a new database.\"\n )", "def _check_closed(self):\n if self.closed:\n raise Error(\"cursor is closed\")\n if self.session.closed:\n raise Error(\"connection is closed\")", "def check_connection():\n qry = QtSql.QSqlQuery()\n try:\n if qry.exec_(\"Select name from user\"):\n return True, None\n else:\n print(\"Connection Checked - Error\")\n return False, qry.lastError().text()\n except Exception:\n print(\"Connection Checked - Failed\")\n return False, qry.lastError().text()", "def connection_lost(self, exc):\n pass", "def test_connection_error(self, bad_mock):\n bad_mock.side_effect = ConnectionError()\n self.assertRaises(DatabaseConnectionError, Promotion.init_db, 'test')", "def check(self, connection):\n return True", "def test_get_conn_uri_non_existent_key(self):\n conn_id = \"test_mysql\"\n param = {\n 'Name': '/airflow/connections/test_postgres',\n 'Type': 'String',\n 'Value': 'postgresql://airflow:airflow@host:5432/airflow',\n }\n\n ssm_backend = SystemsManagerParameterStoreBackend()\n ssm_backend.client.put_parameter(**param)\n\n assert ssm_backend.get_conn_uri(conn_id=conn_id) is None\n assert [] == ssm_backend.get_connections(conn_id=conn_id)", "def test_operational_error_asis(self):\n\n matched = self._run_test(\n \"mysql\", \"select some_operational_error\",\n self.OperationalError(\"some op error\"),\n sqla.exc.OperationalError\n )\n self.assertSQLAException(\n matched,\n \"OperationalError\", \"some op error\"\n )", "def ConnectByNameError(self) -> _n_0_t_14:", "def test_db_connection_bad_host():\n\n with pytest.raises(AddressError, match=r\"Cannot resolve address .*\"):\n\n from cwf2neo.neo4j import Neo4j\n\n db = Neo4j(host=\"invalidneo4jhostname\")\n\n db.graph.database.name", "def db_connection_error(error):\n return internal_server_error(error)", "def test_db_connection(self):\n try:\n database = Database()\n database.get_server_version()\n except (Exception) as error:\n logging.error(\"\\n\\nConnection to postgresql\"\n \" failed with error: {}\\n\\n\".format(error))\n assert(False)", "def _check_connect(self) -> bool:\n\n if (self._conn is None):\n if (self._exception):\n raise base_connection.ConnectException(\n \"No connection established\")\n\n else:\n return False\n\n return True", "def connection_failed(self):\n return self.conn_status == self.CONN_FAILED", "def handle_connection_lost(self, exc: Optional[Exception]) -> None:" ]
[ "0.7339684", "0.67935187", "0.672039", "0.658468", "0.63629395", "0.6320076", "0.6312238", "0.6206086", "0.61220324", "0.61099696", "0.60807556", "0.60751575", "0.60306495", "0.60255706", "0.58591485", "0.58430076", "0.57226694", "0.57224053", "0.5707363", "0.56728756", "0.563901", "0.56172514", "0.561417", "0.56141526", "0.56109166", "0.55957186", "0.5590059", "0.5576567", "0.5556868", "0.55550975" ]
0.79077697
0
Check if BranchSQLOperator works with backend
def test_sql_branch_operator_mysql(self): branch_op = BranchSQLOperator( task_id="make_choice", conn_id="mysql_default", sql="SELECT 1", follow_task_ids_if_true="branch_1", follow_task_ids_if_false="branch_2", dag=self.dag, ) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sql_branch_operator_postgres(self):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"postgres_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_unsupported_conn_type(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"redis_default\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_invalid_conn(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_branch_true_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for true_value in SUPPORTED_TRUE_VALUES:\n mock_get_records.return_value = true_value\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.SKIPPED\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def test_branch_false_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for false_value in SUPPORTED_FALSE_VALUES:\n mock_get_records.return_value = false_value\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.SKIPPED\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.NONE\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def test_branch_list_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=[\"branch_1\", \"branch_2\"],\n follow_task_ids_if_false=\"branch_3\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.branch_3 = EmptyOperator(task_id=\"branch_3\", dag=self.dag)\n self.branch_3.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n mock_get_records.return_value = [[\"1\"]]\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_3\":\n assert ti.state == State.SKIPPED\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def test_branch_single_value_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n mock_get_records.return_value = 1\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.SKIPPED\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def test_invalid_follow_task_true(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=None,\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def check_for_branch_op(op_info: ModuleIdentifierOpInfo):\n\n op = conn_graph.get_all_ops()[op_info.module_name]\n return_bool = True\n product = op.output\n if \"branch\" not in product.name:\n logger.error(\"branch not in product name\")\n return_bool = False\n if len(product.consumers) > 1:\n logger.error(\"branch op is not parent op's only consumer\")\n return_bool = False\n branch_op = product.consumers[0]\n if branch_op.type != \"branch\":\n logger.error(\"parent op's child op is not of type branch\")\n return_bool = False\n branch_product = branch_op.output\n if \"multiple_ops\" not in branch_product.name:\n logger.error(\"multiple_ops not in branch op's product's name\")\n return_bool = False\n if len(branch_product.consumers) <= 1:\n logger.error(\"branch op's product has one or fewer consumers\")\n return_bool = False\n for consumer in branch_product.consumers:\n for input_product in consumer.inputs:\n if input_product.producer == op:\n logger.error(\"parent op is still one of child op's inputs (as opposed to branch op)\")\n return_bool = False\n return return_bool", "def validate_branch_ops(conn_graph: ConnectedGraph):\n\n def check_for_branch_op(op_info: ModuleIdentifierOpInfo):\n \"\"\"\n Look inside conn_graph ops and products for branch ops, and validate connections to parent and child ops\n \"\"\"\n\n op = conn_graph.get_all_ops()[op_info.module_name]\n return_bool = True\n product = op.output\n if \"branch\" not in product.name:\n logger.error(\"branch not in product name\")\n return_bool = False\n if len(product.consumers) > 1:\n logger.error(\"branch op is not parent op's only consumer\")\n return_bool = False\n branch_op = product.consumers[0]\n if branch_op.type != \"branch\":\n logger.error(\"parent op's child op is not of type branch\")\n return_bool = False\n branch_product = branch_op.output\n if \"multiple_ops\" not in branch_product.name:\n logger.error(\"multiple_ops not in branch op's product's name\")\n return_bool = False\n if len(branch_product.consumers) <= 1:\n logger.error(\"branch op's product has one or fewer consumers\")\n return_bool = False\n for consumer in branch_product.consumers:\n for input_product in consumer.inputs:\n if input_product.producer == op:\n logger.error(\"parent op is still one of child op's inputs (as opposed to branch op)\")\n return_bool = False\n return return_bool\n\n # pylint: disable=protected-access\n module_identifier = StructureModuleIdentifier(conn_graph.graph, conn_graph._starting_op_names,\n conn_graph._valid_ops)\n num_branches_found = 0\n for tf_op in conn_graph.graph.get_operations():\n # Ignore ops which were not found in the initial depth first search\n if tf_op not in module_identifier.processed_ops:\n continue\n\n found_branch = False\n for output_tensor in tf_op.outputs:\n if len(output_tensor.consumers()) > 1:\n # Potential branch op. Check if children go to separate modules\n child_module_set = set()\n for consumer_op in output_tensor.consumers():\n if consumer_op in module_identifier._valid_ops:\n child_module_info = module_identifier.get_op_info(consumer_op)\n child_module_set.add(child_module_info.module_name)\n\n # If children go to separate modules, this should be a branch op\n if len(child_module_set) > 1:\n found_branch = True\n break\n\n if found_branch:\n num_branches_found += 1\n tf_op_info = module_identifier.get_op_info(tf_op)\n if not check_for_branch_op(tf_op_info):\n return False\n\n logger.info(\"Found %s branches\", num_branches_found)\n return True", "def test_invalid_follow_task_false(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=None,\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_with_skip_in_branch_downstream_dependencies(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n branch_op >> self.branch_1 >> self.branch_2\n branch_op >> self.branch_2\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for true_value in SUPPORTED_TRUE_VALUES:\n mock_get_records.return_value = [true_value]\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.NONE\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def _check_sql_mode(self, **kwargs):\n return []", "def test_invalid_query_result_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n mock_get_records.return_value = [\"Invalid Value\"]\n\n with pytest.raises(AirflowException):\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)", "def splitflow(self):\n if self.name in conditional_branch:\n return True\n return False", "def check(self):\n self.conn = psycopg2.connect(self.conn_string)\n self.cur = self.conn.cursor(\"rifflecursor\")\n self.cur.execute(\"\"\"\n SELECT * FROM yelp_stored WHERE business_id = %s;\n \"\"\", (self.bus_id,))\n sql_tup = self.cur.fetchall()\n self.conn.close()\n if sql_tup == []:\n return False\n else:\n return sql_tup", "def validate_backend_version(self):\n pass", "def get_backend(self, name):\n if name == DATABASE_TYPE_MYSQL:\n ret = 2\n elif name == DATABASE_TYPE_POSTGRESQL:\n ret = 3\n elif name == DATABASE_TYPE_SQLITE:\n ret = 4\n # sqlcoder: this assignment fixes unicode problems for me with sqlite (windows, cp1252)\n # feel free to remove or improve this if you understand the problems\n # better than me (not hard!)\n Charset.not_needed1, Charset.not_needed2, Charset.not_needed3 = True, True, True\n else:\n raise ValueError('Unsupported database backend: %s' % self.supported_databases[name].db_server)\n\n return ret", "def is_local_backend(backend):\n return backend.configuration().local", "def test_with_skip_in_branch_downstream_dependencies2(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n branch_op >> self.branch_1 >> self.branch_2\n branch_op >> self.branch_2\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for false_value in SUPPORTED_FALSE_VALUES:\n mock_get_records.return_value = [false_value]\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.SKIPPED\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.NONE\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def backend_name(self) -> str:\n return self._db_data.backend", "def is_thrift(self):\r\n return False", "def backend_info(self):\n\t\treturn {'valid': False}", "def is_ibmq_provider(backend):\n if has_ibmq():\n return isinstance(backend.provider(), IBMQProvider)\n else:\n return False", "def is_dask_backend(backend: Optional['Backend']) ->bool:\n return backend is not None and is_dask_lib(backend.df_engine.df_lib)", "def is_statevector_backend(backend):\n return backend.name().startswith('statevector') if backend is not None else False", "def isMSSQL(self):\n if self._engine == \"ODBCMSSQL\":\n return True\n return False", "def is_dev_version(cls):\n\n # We initiate the command we have to run in order to\n # get the branch we are currently working with.\n command = \"git branch\"\n\n # We execute and get the command output.\n command_result = PyFunceble.helpers.Command(command).execute()\n\n for branch in command_result.split(\"\\n\"):\n # We loop through each line of the command output.\n\n if branch.startswith(\"*\") and (\"dev\" in branch or \"3.x\" in branch):\n # The current branch is `dev`.\n\n # We return True.\n return True\n\n # The current branch is not `dev`.\n\n # We return False.\n return False", "def tob_connection_synced():\n global app_config\n\n return (\"TOB_CONNECTION\" in app_config) and (app_config[\"TOB_CONNECTION\"] in synced) and (synced[app_config[\"TOB_CONNECTION\"]])", "def backend_protocol(self) -> Optional[pulumi.Input[Union[str, 'BackendProtocol']]]:\n return pulumi.get(self, \"backend_protocol\")" ]
[ "0.709817", "0.63938904", "0.58970475", "0.58533704", "0.57342464", "0.5657157", "0.56091696", "0.55608296", "0.55274713", "0.5468663", "0.5457718", "0.538129", "0.53299373", "0.5242461", "0.5170583", "0.51125485", "0.5051785", "0.5049264", "0.50373596", "0.5036543", "0.49722496", "0.4971945", "0.49387047", "0.49339116", "0.49169272", "0.48994127", "0.48695636", "0.48693642", "0.48620814", "0.48511934" ]
0.6937743
1
Check if BranchSQLOperator works with backend
def test_sql_branch_operator_postgres(self): branch_op = BranchSQLOperator( task_id="make_choice", conn_id="postgres_default", sql="SELECT 1", follow_task_ids_if_true="branch_1", follow_task_ids_if_false="branch_2", dag=self.dag, ) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sql_branch_operator_mysql(self):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_unsupported_conn_type(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"redis_default\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_invalid_conn(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_branch_true_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for true_value in SUPPORTED_TRUE_VALUES:\n mock_get_records.return_value = true_value\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.SKIPPED\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def test_branch_false_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for false_value in SUPPORTED_FALSE_VALUES:\n mock_get_records.return_value = false_value\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.SKIPPED\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.NONE\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def test_branch_list_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=[\"branch_1\", \"branch_2\"],\n follow_task_ids_if_false=\"branch_3\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.branch_3 = EmptyOperator(task_id=\"branch_3\", dag=self.dag)\n self.branch_3.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n mock_get_records.return_value = [[\"1\"]]\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_3\":\n assert ti.state == State.SKIPPED\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def test_branch_single_value_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n mock_get_records.return_value = 1\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.SKIPPED\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def test_invalid_follow_task_true(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=None,\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def check_for_branch_op(op_info: ModuleIdentifierOpInfo):\n\n op = conn_graph.get_all_ops()[op_info.module_name]\n return_bool = True\n product = op.output\n if \"branch\" not in product.name:\n logger.error(\"branch not in product name\")\n return_bool = False\n if len(product.consumers) > 1:\n logger.error(\"branch op is not parent op's only consumer\")\n return_bool = False\n branch_op = product.consumers[0]\n if branch_op.type != \"branch\":\n logger.error(\"parent op's child op is not of type branch\")\n return_bool = False\n branch_product = branch_op.output\n if \"multiple_ops\" not in branch_product.name:\n logger.error(\"multiple_ops not in branch op's product's name\")\n return_bool = False\n if len(branch_product.consumers) <= 1:\n logger.error(\"branch op's product has one or fewer consumers\")\n return_bool = False\n for consumer in branch_product.consumers:\n for input_product in consumer.inputs:\n if input_product.producer == op:\n logger.error(\"parent op is still one of child op's inputs (as opposed to branch op)\")\n return_bool = False\n return return_bool", "def validate_branch_ops(conn_graph: ConnectedGraph):\n\n def check_for_branch_op(op_info: ModuleIdentifierOpInfo):\n \"\"\"\n Look inside conn_graph ops and products for branch ops, and validate connections to parent and child ops\n \"\"\"\n\n op = conn_graph.get_all_ops()[op_info.module_name]\n return_bool = True\n product = op.output\n if \"branch\" not in product.name:\n logger.error(\"branch not in product name\")\n return_bool = False\n if len(product.consumers) > 1:\n logger.error(\"branch op is not parent op's only consumer\")\n return_bool = False\n branch_op = product.consumers[0]\n if branch_op.type != \"branch\":\n logger.error(\"parent op's child op is not of type branch\")\n return_bool = False\n branch_product = branch_op.output\n if \"multiple_ops\" not in branch_product.name:\n logger.error(\"multiple_ops not in branch op's product's name\")\n return_bool = False\n if len(branch_product.consumers) <= 1:\n logger.error(\"branch op's product has one or fewer consumers\")\n return_bool = False\n for consumer in branch_product.consumers:\n for input_product in consumer.inputs:\n if input_product.producer == op:\n logger.error(\"parent op is still one of child op's inputs (as opposed to branch op)\")\n return_bool = False\n return return_bool\n\n # pylint: disable=protected-access\n module_identifier = StructureModuleIdentifier(conn_graph.graph, conn_graph._starting_op_names,\n conn_graph._valid_ops)\n num_branches_found = 0\n for tf_op in conn_graph.graph.get_operations():\n # Ignore ops which were not found in the initial depth first search\n if tf_op not in module_identifier.processed_ops:\n continue\n\n found_branch = False\n for output_tensor in tf_op.outputs:\n if len(output_tensor.consumers()) > 1:\n # Potential branch op. Check if children go to separate modules\n child_module_set = set()\n for consumer_op in output_tensor.consumers():\n if consumer_op in module_identifier._valid_ops:\n child_module_info = module_identifier.get_op_info(consumer_op)\n child_module_set.add(child_module_info.module_name)\n\n # If children go to separate modules, this should be a branch op\n if len(child_module_set) > 1:\n found_branch = True\n break\n\n if found_branch:\n num_branches_found += 1\n tf_op_info = module_identifier.get_op_info(tf_op)\n if not check_for_branch_op(tf_op_info):\n return False\n\n logger.info(\"Found %s branches\", num_branches_found)\n return True", "def test_invalid_follow_task_false(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=None,\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_with_skip_in_branch_downstream_dependencies(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n branch_op >> self.branch_1 >> self.branch_2\n branch_op >> self.branch_2\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for true_value in SUPPORTED_TRUE_VALUES:\n mock_get_records.return_value = [true_value]\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.NONE\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def _check_sql_mode(self, **kwargs):\n return []", "def test_invalid_query_result_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n mock_get_records.return_value = [\"Invalid Value\"]\n\n with pytest.raises(AirflowException):\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)", "def splitflow(self):\n if self.name in conditional_branch:\n return True\n return False", "def check(self):\n self.conn = psycopg2.connect(self.conn_string)\n self.cur = self.conn.cursor(\"rifflecursor\")\n self.cur.execute(\"\"\"\n SELECT * FROM yelp_stored WHERE business_id = %s;\n \"\"\", (self.bus_id,))\n sql_tup = self.cur.fetchall()\n self.conn.close()\n if sql_tup == []:\n return False\n else:\n return sql_tup", "def validate_backend_version(self):\n pass", "def get_backend(self, name):\n if name == DATABASE_TYPE_MYSQL:\n ret = 2\n elif name == DATABASE_TYPE_POSTGRESQL:\n ret = 3\n elif name == DATABASE_TYPE_SQLITE:\n ret = 4\n # sqlcoder: this assignment fixes unicode problems for me with sqlite (windows, cp1252)\n # feel free to remove or improve this if you understand the problems\n # better than me (not hard!)\n Charset.not_needed1, Charset.not_needed2, Charset.not_needed3 = True, True, True\n else:\n raise ValueError('Unsupported database backend: %s' % self.supported_databases[name].db_server)\n\n return ret", "def is_local_backend(backend):\n return backend.configuration().local", "def test_with_skip_in_branch_downstream_dependencies2(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n branch_op >> self.branch_1 >> self.branch_2\n branch_op >> self.branch_2\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for false_value in SUPPORTED_FALSE_VALUES:\n mock_get_records.return_value = [false_value]\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.SKIPPED\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.NONE\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def backend_name(self) -> str:\n return self._db_data.backend", "def is_thrift(self):\r\n return False", "def backend_info(self):\n\t\treturn {'valid': False}", "def is_ibmq_provider(backend):\n if has_ibmq():\n return isinstance(backend.provider(), IBMQProvider)\n else:\n return False", "def is_dask_backend(backend: Optional['Backend']) ->bool:\n return backend is not None and is_dask_lib(backend.df_engine.df_lib)", "def is_statevector_backend(backend):\n return backend.name().startswith('statevector') if backend is not None else False", "def isMSSQL(self):\n if self._engine == \"ODBCMSSQL\":\n return True\n return False", "def is_dev_version(cls):\n\n # We initiate the command we have to run in order to\n # get the branch we are currently working with.\n command = \"git branch\"\n\n # We execute and get the command output.\n command_result = PyFunceble.helpers.Command(command).execute()\n\n for branch in command_result.split(\"\\n\"):\n # We loop through each line of the command output.\n\n if branch.startswith(\"*\") and (\"dev\" in branch or \"3.x\" in branch):\n # The current branch is `dev`.\n\n # We return True.\n return True\n\n # The current branch is not `dev`.\n\n # We return False.\n return False", "def tob_connection_synced():\n global app_config\n\n return (\"TOB_CONNECTION\" in app_config) and (app_config[\"TOB_CONNECTION\"] in synced) and (synced[app_config[\"TOB_CONNECTION\"]])", "def backend_protocol(self) -> Optional[pulumi.Input[Union[str, 'BackendProtocol']]]:\n return pulumi.get(self, \"backend_protocol\")" ]
[ "0.6937743", "0.63938904", "0.58970475", "0.58533704", "0.57342464", "0.5657157", "0.56091696", "0.55608296", "0.55274713", "0.5468663", "0.5457718", "0.538129", "0.53299373", "0.5242461", "0.5170583", "0.51125485", "0.5051785", "0.5049264", "0.50373596", "0.5036543", "0.49722496", "0.4971945", "0.49387047", "0.49339116", "0.49169272", "0.48994127", "0.48695636", "0.48693642", "0.48620814", "0.48511934" ]
0.709817
0
Test SQL Branch with skipping all downstream dependencies
def test_with_skip_in_branch_downstream_dependencies(self, mock_get_db_hook): branch_op = BranchSQLOperator( task_id="make_choice", conn_id="mysql_default", sql="SELECT 1", follow_task_ids_if_true="branch_1", follow_task_ids_if_false="branch_2", dag=self.dag, ) branch_op >> self.branch_1 >> self.branch_2 branch_op >> self.branch_2 self.dag.clear() dr = self.dag.create_dagrun( run_id="manual__", start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING, ) mock_get_records = mock_get_db_hook.return_value.get_first for true_value in SUPPORTED_TRUE_VALUES: mock_get_records.return_value = [true_value] branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) tis = dr.get_task_instances() for ti in tis: if ti.task_id == "make_choice": assert ti.state == State.SUCCESS elif ti.task_id == "branch_1": assert ti.state == State.NONE elif ti.task_id == "branch_2": assert ti.state == State.NONE else: raise ValueError(f"Invalid task id {ti.task_id} found!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_skip_in_branch_downstream_dependencies2(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n branch_op >> self.branch_1 >> self.branch_2\n branch_op >> self.branch_2\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for false_value in SUPPORTED_FALSE_VALUES:\n mock_get_records.return_value = [false_value]\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.SKIPPED\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.NONE\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def test_branch_false_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for false_value in SUPPORTED_FALSE_VALUES:\n mock_get_records.return_value = false_value\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.SKIPPED\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.NONE\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def test_sql_branch_operator_postgres(self):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"postgres_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_parent_skip_branch():\n with create_session() as session:\n session.query(DagRun).delete()\n session.query(TaskInstance).delete()\n start_date = pendulum.datetime(2020, 1, 1)\n dag = DAG(\"test_parent_skip_branch_dag\", schedule_interval=None, start_date=start_date)\n dag.create_dagrun(run_type=DagRunType.MANUAL, state=State.RUNNING, execution_date=start_date)\n op1 = BranchPythonOperator(task_id=\"op1\", python_callable=lambda: \"op3\", dag=dag)\n op2 = DummyOperator(task_id=\"op2\", dag=dag)\n op3 = DummyOperator(task_id=\"op3\", dag=dag)\n op1 >> [op2, op3]\n TaskInstance(op1, start_date).run()\n ti2 = TaskInstance(op2, start_date)\n dep = NotPreviouslySkippedDep()\n\n assert len(list(dep.get_dep_statuses(ti2, session, DepContext()))) == 1\n session.commit()\n assert not dep.is_met(ti2, session)\n assert ti2.state == State.SKIPPED", "def test_branch_true_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n for true_value in SUPPORTED_TRUE_VALUES:\n mock_get_records.return_value = true_value\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.SKIPPED\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def test_branch_list_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=[\"branch_1\", \"branch_2\"],\n follow_task_ids_if_false=\"branch_3\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.branch_3 = EmptyOperator(task_id=\"branch_3\", dag=self.dag)\n self.branch_3.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n mock_get_records.return_value = [[\"1\"]]\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_3\":\n assert ti.state == State.SKIPPED\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def test_sql_branch_operator_mysql(self):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_parent_not_executed():\n start_date = pendulum.datetime(2020, 1, 1)\n dag = DAG(\"test_parent_not_executed_dag\", schedule_interval=None, start_date=start_date)\n op1 = BranchPythonOperator(task_id=\"op1\", python_callable=lambda: \"op3\", dag=dag)\n op2 = DummyOperator(task_id=\"op2\", dag=dag)\n op3 = DummyOperator(task_id=\"op3\", dag=dag)\n op1 >> [op2, op3]\n\n ti2 = TaskInstance(op2, start_date)\n\n with create_session() as session:\n dep = NotPreviouslySkippedDep()\n assert len(list(dep.get_dep_statuses(ti2, session, DepContext()))) == 0\n assert dep.is_met(ti2, session)\n assert ti2.state == State.NONE", "def test_invalid_follow_task_false(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=None,\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_invalid_follow_task_true(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=None,\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_branch_single_value_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n dr = self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n mock_get_records.return_value = 1\n\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n\n tis = dr.get_task_instances()\n for ti in tis:\n if ti.task_id == \"make_choice\":\n assert ti.state == State.SUCCESS\n elif ti.task_id == \"branch_1\":\n assert ti.state == State.NONE\n elif ti.task_id == \"branch_2\":\n assert ti.state == State.SKIPPED\n else:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")", "def test_parent_follow_branch():\n start_date = pendulum.datetime(2020, 1, 1)\n dag = DAG(\"test_parent_follow_branch_dag\", schedule_interval=None, start_date=start_date)\n dag.create_dagrun(run_type=DagRunType.MANUAL, state=State.RUNNING, execution_date=start_date)\n op1 = BranchPythonOperator(task_id=\"op1\", python_callable=lambda: \"op2\", dag=dag)\n op2 = DummyOperator(task_id=\"op2\", dag=dag)\n op1 >> op2\n TaskInstance(op1, start_date).run()\n ti2 = TaskInstance(op2, start_date)\n\n with create_session() as session:\n dep = NotPreviouslySkippedDep()\n assert len(list(dep.get_dep_statuses(ti2, session, DepContext()))) == 0\n assert dep.is_met(ti2, session)\n assert ti2.state != State.SKIPPED", "def skip_or_run_sql_test(func):\n\n return skip_or_run_test_tarantool(func, '2.0.0', 'does not support SQL')", "def test_idem_make_branch_new_case(self):\n # set up\n new_deployment_pipeline_patcher = patch(\n 'factories.new_deployment_pipeline')\n mock_new_deployment_pipeline = new_deployment_pipeline_patcher.start()\n mock_rowcount = PropertyMock(return_value=0)\n type(self.mock_get_cur.return_value).rowcount = mock_rowcount\n self.mock_get_cur.return_value.fetchone.return_value = (199,)\n\n # run SUT\n branch_id = idem_make_branch('mock-branch-name', 1)\n\n # confirm that reasonable sql was executed\n self.mock_get_cur.return_value.execute.assert_any_call(\n \"SELECT branch_id FROM branch \" + \\\n \"WHERE branch_name=%s AND feature_id=%s\",\n ('mock-branch-name', 1),\n )\n self.mock_get_cur.return_value.execute.assert_any_call(\n \"INSERT INTO branch (branch_name, feature_id) \" + \\\n \"VALUES (%s, %s) \" + \\\n \"RETURNING branch_id\",\n ('mock-branch-name', 1),\n )\n\n self.mock_get_cur.return_value.execute.assert_any_call(\n \"INSERT INTO config (key_value_pairs) \" + \\\n \"VALUES (%s) \" + \\\n \"RETURNING config_id\",\n ('',),\n )\n self.mock_get_cur.return_value.execute.assert_any_call(\n \"INSERT INTO environment\\n\" + \\\n \"(settings, infrastructure_backend, environment_name)\\n\" + \\\n \"VALUES (%s, %s, %s)\\n\" + \\\n \"RETURNING environment_id\",\n ('', 'mockdib', 'qa-sandbox'),\n )\n self.mock_get_cur.return_value.execute.assert_any_call(\n \"INSERT INTO deployment_pipeline \" + \\\n \"(branch_id, config_id, environment_id, automatic) \" \\\n \"VALUES (%s, %s, %s, %s) \" + \\\n \"RETURNING deployment_pipeline_id\",\n (199, 199, 199, True),\n )\n\n # confirm that we got back a good id\n self.assertEqual(type(branch_id), type(0))\n\n # make sure we closed the cursor\n self.assertEqual(self.mock_get_cur.return_value.close.call_count, 4)", "def test_merging_diamond_flow():\n\n with Flow(name=\"test\") as flow:\n condition = Condition()\n true_branch = [SuccessTask(name=\"true branch {}\".format(i)) for i in range(3)]\n false_branch = [SuccessTask(name=\"false branch {}\".format(i)) for i in range(3)]\n ifelse(condition, true_branch[0], false_branch[0])\n\n flow.chain(*true_branch)\n flow.chain(*false_branch)\n\n merge_task = merge(true_branch[-1], false_branch[-1])\n\n with prefect.context(CONDITION=True):\n state = flow.run()\n\n for t in true_branch:\n assert isinstance(state.result[t], Success)\n for t in false_branch:\n assert isinstance(state.result[t], Skipped)\n assert isinstance(state.result[merge_task], Success)", "def test_default_repo_branch(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check\", exitcode=None)\n self.assertIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Target: ywangd:dev\", output)", "def test_heads_contains_false(repository: Repository) -> None:\n assert \"branch\" not in repository.heads", "def test_branching(self):\r\n repo_dir = self.GIT_REPO_DIR\r\n # Test successful import from command\r\n if not os.path.isdir(repo_dir):\r\n os.mkdir(repo_dir)\r\n self.addCleanup(shutil.rmtree, repo_dir)\r\n\r\n # Checkout non existent branch\r\n with self.assertRaisesRegexp(GitImportError, GitImportError.REMOTE_BRANCH_MISSING):\r\n git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', 'asdfasdfasdf')\r\n\r\n # Checkout new branch\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n def_ms = modulestore()\r\n # Validate that it is different than master\r\n self.assertIsNotNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n\r\n # Attempt to check out the same branch again to validate branch choosing\r\n # works\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n\r\n # Delete to test branching back to master\r\n delete_course(def_ms, contentstore(),\r\n self.TEST_BRANCH_COURSE,\r\n True)\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n 'master')\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n self.assertIsNotNone(def_ms.get_course(SlashSeparatedCourseKey.from_deprecated_string(self.TEST_COURSE)))", "def test_invalid_query_result_with_dag_run(self, mock_get_db_hook):\n branch_op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"mysql_default\",\n sql=\"SELECT 1\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n self.branch_1.set_upstream(branch_op)\n self.branch_2.set_upstream(branch_op)\n self.dag.clear()\n\n self.dag.create_dagrun(\n run_id=\"manual__\",\n start_date=timezone.utcnow(),\n execution_date=DEFAULT_DATE,\n state=State.RUNNING,\n )\n\n mock_get_records = mock_get_db_hook.return_value.get_first\n\n mock_get_records.return_value = [\"Invalid Value\"]\n\n with pytest.raises(AirflowException):\n branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)", "def test_invalid_conn(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_quick_build1(self):\n pass", "def test_branch_if_not_equal(self, test_cpu, branch_, zero_flag):\n test_cpu.status.zero = zero_flag\n\n branch.branch_if_not_equal(test_cpu, 10)\n\n branch_.assert_called_with(test_cpu, not zero_flag, 10)", "def test_block_missing_batch_dependency(self):\n pass", "def testSiblingDAGConsistency(self):\n options = Job.Runner.getDefaultOptions(self._createTempDir() + '/jobStore')\n options.clean = 'always'\n options.logLevel = 'debug'\n i = Job.wrapJobFn(diamond)\n with Toil(options) as toil:\n try:\n toil.start(i)\n except FailedJobsException:\n # we expect this exception to be raised\n pass\n else:\n self.fail()", "def test_build_comment_database_pipeline():\n build_comment_database_pipeline('politics', 1)", "def test_quick_build(self):\n pass", "def test_no_skipmixin_parent():\n start_date = pendulum.datetime(2020, 1, 1)\n dag = DAG(\"test_no_skipmixin_parent_dag\", schedule_interval=None, start_date=start_date)\n op1 = DummyOperator(task_id=\"op1\", dag=dag)\n op2 = DummyOperator(task_id=\"op2\", dag=dag)\n op1 >> op2\n\n ti2 = TaskInstance(op2, start_date)\n\n with create_session() as session:\n dep = NotPreviouslySkippedDep()\n assert len(list(dep.get_dep_statuses(ti2, session, DepContext()))) == 0\n assert dep.is_met(ti2, session)\n assert ti2.state != State.SKIPPED", "def test_03(self):\n e = Emulator()\n e.init()\n e.make_transfer_prepare_condition()\n\n Emulator.run_transfer_prepare()\n qs = TransferPrepare.objects.filter(is_processed=False)\n assert qs.count() > 0\n\n Emulator.run_transfer_donkies_prepare()\n\n qs = TransferPrepare.objects.filter(is_processed=False)\n assert qs.count() == 0", "def test_with_links_cases_and_issues():\n pass", "def test_no_parent():\n start_date = pendulum.datetime(2020, 1, 1)\n dag = DAG(\"test_test_no_parent_dag\", schedule_interval=None, start_date=start_date)\n op1 = DummyOperator(task_id=\"op1\", dag=dag)\n\n ti1 = TaskInstance(op1, start_date)\n\n with create_session() as session:\n dep = NotPreviouslySkippedDep()\n assert len(list(dep.get_dep_statuses(ti1, session, DepContext()))) == 0\n assert dep.is_met(ti1, session)\n assert ti1.state != State.SKIPPED" ]
[ "0.77353257", "0.6849428", "0.6762589", "0.6694354", "0.6585679", "0.6435495", "0.6368142", "0.6346273", "0.62660396", "0.6240822", "0.6154489", "0.607924", "0.59821784", "0.58053684", "0.57516813", "0.57352877", "0.57304096", "0.5728055", "0.5725766", "0.57254946", "0.5685254", "0.5668975", "0.56424904", "0.5634285", "0.5596824", "0.55118454", "0.5485988", "0.5485598", "0.5483419", "0.5474269" ]
0.77809846
0
Create a connection with a server. Send the data from the client to the server and check if the connection is accepted before accessing it
def create_connection(self): try: # Turns the port into an integer. self.port = int(self.port) # The privileged port are between 1024 and 60000. if self.port < 1024 or self.port > 60000: raise ValueError("The port is not between 1024 and 60000.") # Create the connection with the server self.server_connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.server_connection.connect((self.host, self.port)) # Send data user to the server. self.server_connection.send(pickle.dumps(self.data_user)) # Receive and decrypt the server authorization message. msg_connection = self.server_connection.recv(1024) msg_connection = pickle.loads(msg_connection) # The connection with the server is authorized. if msg_connection[0] == "server connection accepted": self.msg_report = ["Connection with the server.", "Connection au serveur."] self.data_server = msg_connection[1] self.is_connected = True self.updt_user = True # The connection with the server is refused. # Name already exists. if msg_connection[0] == "server connection refused" and msg_connection[1] == "user name": self.msg_report = ["The user name already exists. Please try again.", "Cet identifiant existe déjà. Veuillez réessayer."] # Password incorrect. elif msg_connection[0] == "server connection refused" and msg_connection[1] == "password": self.msg_report = ["The password does not match. Please try again.", "Le mot de passe ne correspond pas. Veuillez réessayer."] # The port is not an integer. except ValueError as ve: self.msg_report = [f"The server could not be launched. Please check the port.\nError : {ve}", f"Le serveur n'a pas pu être lancé. Veuillez vérifier le port.\nErreur : {ve}"] # Incorrect address IP or Port. except socket.error as e: self.msg_report = [f"There is no server which correspond with these informations. Please check the IP address and port.\nError : {e}", f"Aucun serveur ne correspond à ces informations. Veuillez vérifier l'adresse IP et le port.\nErreur : {e}"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connectToServer(self):\n self.client = Client(base_url = self.server)\n self.ping()", "def connect_to_server(self):\n\n try:\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.connect((self.hostname, self.port))\n return client\n except Exception as e:\n print(\"Can't connect to server: \", e)\n sys.exit()", "def connect(self) -> None:\n print(\"Trying to connect server...\")\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.connect((self.SERVER_HOST, self.SERVER_PORT))\n print(\"Connected to server.\")\n self.authenticate()\n encrypted = self.validate_encryption()\n if encrypted is False:\n self.sock.close()\n return None\n\n self.register_username()\n # Create thread for connection\n server_connection = ClientConnection(self)\n server_connection.start()\n # Start conversation\n self.converse()", "def _establish_connection(self):\n self.conn = self.listener.accept()", "def connect_to_server(self):\r\n self.client_socket.connect((SERVER_IP, SERVER_PORT))\r\n print('[CLIENT] connected to streamer.')", "def connect(self):\n sock_version = socket.AF_INET if self.ip_version == 4 else socket.AF_INET6\n with socket.socket(sock_version, socket.SOCK_STREAM) as sock:\n sock.connect((self.server_ip, self.port))\n print(\"Client connected\")\n self.__send_request(\"01\", sock)\n\n while True:\n response = self.__receive_response(sock)\n if len(response) >= 2:\n msg_id_code = int(response[:2])\n if msg_id_code == 2:\n udp_port = self.__request_info_file(response, sock)\n if msg_id_code == 4:\n self.__handle_udp_transfer(self.server_ip, udp_port, sock)\n if msg_id_code == 5:\n print(\"Closing connection\")\n sock.close()\n return 0\n if msg_id_code == 8:\n print(\"Invalid file name. Max size: 15bytes\")\n sock.close()\n return -1", "def connect_to_server(self, host=HOST, port=PORT):\r\n\t\t# HOST = server.ipAddress\r\n\t\t# PORT = int(server.port)\r\n\t\t# self.tcpSocket.disconnectFromHost()\r\n\t\t# self.tcpSocket.waitForDisconnected ()\r\n\t\t# print(HOST, PORT)\r\n\t\t# self.__tcpSocket.connectToHost(host, port, QIODevice.ReadWrite)\r\n\t\tself.__tcpSocket.connectToHost(host, port, QIODevice.ReadWrite)\r\n\t\tif self.__tcpSocket.waitForConnected(5000):\r\n\t\t\tprint('Client connected to server.')\r\n\t\t\tself.connection_established.emit((host, port))\r\n\t\telse:\r\n\t\t\tself._window.open_dialog(\"Impossible de se connecter au serveur !\",\r\n\t\t\t\t\t\t\t\t\t \"Vérifiez que les paramètres que vous avez entré sont corrects et que le serveur est en fonctionnement.\",\r\n\t\t\t\t\t\t\t\t\t type=\"warning\")\r\n\t\t\tprint('Unable to connect...')", "def connectToServer(self):\r\n\t\tself.rtspSocket_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\ttry:\r\n\t\t\tself.rtspSocket_client.connect((self.serverAddr, self.serverPort))\r\n\t\texcept:\r\n\t\t\tprint(\"Fail to connect to server\")", "def init_conn(self):\n \n SERVER_ADDRESS = '192.168.0.21'\n PORT = 8018\n SERVER_PASSWORD = \"biratkingofcomedy\" \n connected = False\n \n # check if test module is being run\n if self.testing == 'n': \n while not connected:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n \n try:\n self.socket.connect((SERVER_ADDRESS, PORT))\n \n # server verification\n self.socket.sendall(self.make_packet(\"DATA\", SERVER_PASSWORD))\n \n response = self.socket.recv(4096)\n \n if response:\n response_hdr, response_msg, response_sdr = self.parse_packet(response)\n \n if response_hdr == \"ERROR\" and response_msg == \"IDENTIFY FAILED\":\n raise Exception(\"PASSWORD FAIL\")\n \n elif response_hdr == \"DATA\" and response_msg == \"CONNECTED\":\n connected = True\n \n else:\n raise Exception(\"CONNECTION FAIL\") \n \n except Exception as e:\n if e == \"PASSWORD FAIL\":\n print(\"DEBUG: server connection failed (invalid credentials)\")\n print(\"DEBUG: quitting\")\n break\n \n else:\n print(e)\n print(\"DEBUG: server connection failed (could not connect), trying again in 10s\")\n time.sleep(10)\n \n else:\n print(\"DEBUG: socket setup skipped\")", "def conectar(self):\r\n self.socket = socket.create_connection((self.host, self.puerto))", "def __connect():\n # Create socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect socket to server\n sock.connect((SERVER_IP, SERVER_PORT))\n\n # Return connected socket\n return sock", "def socket_client_send() -> socket.socket:\n try:\n # Create a socket object \n s = socket.socket() \n \n # Define the port on which you want to connect \n port = 55624\n host = '127.0.0.1'\n # connect to the server on local computer \n s.connect((host, port))\n \n except:\n logging.debug(\"Unexpected error\")\n raise\n\n else:\n logging.debug(\"Else clause\")\n return s", "def connect(self) -> None:\n self.client_socket.connect((self.server_name, self.server_port))", "def Connection(self):\n try:\n system(\n f'netsh advfirewall firewall add rule name=\"Open Port {self.PORT}\" dir=in action=allow protocol=TCP localport={self.PORT} remoteip={self.HOST}')\n with socket() as s: # Create a socket object\n print('Server started!')\n print('Waiting for clients...')\n s.bind((self.HOST, self.PORT)) # Bind to the port\n s.listen(5) # Now wait for client connection.\n self.c, addr = s.accept() # Establish connection with client.\n # Remote client machine connection\n print('Got connection from', addr)\n except error as strerror:\n print(\"Network problems:\", strerror)\n return 0\n return 1", "def server(conn, address):\n print(\"Client Connection Open\")\n while True:\n request = server_read(conn)\n if request:\n print(request)\n manage_client(request, conn)", "def run_server(self):\n self.establish_connection()\n while True:\n self.receive_data(self.conn)", "def connect(self):\n\n print(\"Connecting to server at {}:{}\".format(self.hostname, self.port))\n\n self._sock = socket.socket()\n self._sock.setblocking(True)\n self._sock.connect((self.hostname, self.port))\n self._sockfile = self._sock.makefile(encoding=\"utf-8\")\n self._connected = True\n\n if self.password:\n self._sendmsg(\"PASS :{}\".format(self.password))\n self._sendmsg(\"NICK {}\".format(self.nickname))\n self._sendmsg(\"USER {} 0 * :ORE Utility Bot\".format(getpass.getuser()))\n if self.ident_password:\n self._sendmsg(\"PRIVMSG NickServ :identify {}\".format(\n self.ident_password))\n self._sendmsg(\"JOIN {}\".format(\",\".join(self.channels)))", "def establish_connection(self):\n print('Listening...')\n self.socket.listen()\n self.conn, addr = self.socket.accept()\n print('Received connection', addr)", "def connect(self):\n try:\n self._send = 0\n self.socket = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM,\n socket.getprotobyname('tcp'))\n self.socket.connect((self.host, self.port))\n self.socket.recv(self.packet_size)\n except socket.error:\n raise ConnectionError(\n 'Cannot connect to server at %s' % self.name)", "def __init__(self, server_addr, server_port, local_port):\n\n if local_port is None:\n self.local_addr = ('localhost', 7700) \n else:\n self.local_addr = ('localhost', local_port)\n self.server_socket = (server_addr, server_port)\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.connection.bind(self.local_addr)\n self.message_q = []\n self.failed = False\n \n try:\n self.connection.create_connect(server_port)\n\n except:\n sys.stderr.write('failed to connect to server \\n')\n self.failed = True\n self.connection.close()\n return None", "async def connect(self) -> None:\n buffer = bytes()\n with trio.socket.socket() as client_sock:\n self.socket = client_sock\n self.address = await self.socket.resolve_remote_address((self.host, self.port))\n await client_sock.connect(self.address)\n async with trio.open_nursery() as nursery:\n nursery.spawn(self.connection_made)\n while True:\n if not self.socket._sock._closed:\n data = await client_sock.recv(self.bufsize)\n if not data:\n break\n buffer += data\n pts = buffer.split(b\"\\n\")\n buffer = pts.pop()\n for el in pts:\n nursery.spawn(self.data_received, el)\n else:\n break\n nursery.spawn(self.connection_lost)", "def open_socket(self):\n try:\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)\n self.server.bind((self.host,self.port))\n self.server.listen(5)\n self.server.setblocking(0)\n except socket.error, (value,message):\n if self.server:\n self.server.close()\n print \"Could not open socket: \" + message\n sys.exit(1)", "def connect(self):\r\n if self.__socket:\r\n return\r\n try:\r\n # This is the server communicator, try and accept connections.\r\n if self.__server_socket is not None:\r\n self.__socket, _ = self.__server_socket.accept()\r\n self.__socket.setblocking(0)\r\n self.__server_socket.close()\r\n # This is the client communicator, try and connect (quickly).\r\n else:\r\n self.__socket = socket.socket()\r\n self.__socket.settimeout(self.CONNECT_TIMEOUT)\r\n self.__socket.connect((self.__ip, self.__port))\r\n self.__socket.setblocking(0)\r\n self.__get_message()\r\n except socket.error:\r\n # Always close the socket if created, then make it none (this\r\n # way it is evident that a connection was not yet established).\r\n if self.__socket:\r\n self.__socket.close()\r\n self.__socket = None\r\n # Try again in a given interval.\r\n self.__root.after(self.WAIT_PERIOD, self.connect)", "def start(self) -> None:\n try:\n self._socket.bind((self.ip, self.port))\n\n except socket.error as e:\n print(e)\n\n else:\n self._socket.listen()\n logger.info('Server is online!')\n\n run = True\n while run:\n conn_data = ConnectionData()\n self._accept_conn(conn_data)\n\n # Makes the server stoppable\n while conn_data.conn is None or conn_data.addr is None:\n try:\n time.sleep(0.1)\n except KeyboardInterrupt:\n run = False\n break\n\n conn, addr = conn_data.conn, conn_data.addr\n logger.info(f'Connection established to {addr}')\n\n if self.func is not None:\n self.func(conn, addr)", "def connect(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.connect((self.host, PORT)) # probably throws errors\n self.connected = True", "def createConnectionToCli(self):\n connected = False\n # loop until connected\n while not connected:\n try:\n self.dataClient = Client(\n ('localhost', 5000), authkey=b'secret password')\n connected = True\n except ConnectionRefusedError:\n pass\n\n self.logger.debug('Connected to Process!')", "def accept_connection(self):\n # if not settings.USE_SOCKETS:\n # return\n self.dbg(\"sockets_event\",\n \"Blocking on accept_connection for {}\",\n [self.data_name])\n # now keep talking with the client\n self.conn, self.addr = self.s.accept()", "def clientConnect(self,server_ip=\"localhost\"):\n try:\n server_socket = int(12345)\n m = TaskManager(address=(server_ip, server_socket), authkey = b'secret')\n m.connect()\n return m\n except:\n from gui import qt_gui\n qt_gui.set_status_text(\"Connection Failed!\")\n return None", "def run_server(self):\n print('Starting socket server (host {}, port {})'.format(self.host, self.port))\n\n client_sock, client_addr = self.sock.accept()\n\n print('Client {} connected'.format(client_addr))\n\n stop = False\n while not stop:\n if client_sock:\n # Check if the client is still connected and if data is available:\n try:\n rdy_read, rdy_write, sock_err = select.select([client_sock,], [], [])\n except select.error:\n print('Select() failed on socket with {}'.format(client_addr))\n return 1\n\n if len(rdy_read) > 0:\n read_data = client_sock.recv(255)\n # Check if socket has been closed\n if len(read_data) == 0:\n print('{} closed the socket.'.format(client_addr))\n stop = False # True\n client_sock, client_addr = self.sock.accept()\n print(\"New connection opened\")\n else:\n print('>>> Received: {}'.format(read_data.rstrip()))\n if read_data.rstrip() == 'quit':\n stop = False #True\n else:\n if read_data == 'right':\n self.moveRight(0.5)\n elif read_data == 'left':\n self.moveLeft(0.5)\n elif read_data == 'forward':\n self.moveForward(0.5)\n self.setGPIO(0,0,0,0,.01)\n client_sock.send(read_data)\n else:\n print(\"No client is connected, SocketServer can't receive data\")\n #stop = True\n time.delay(1)\n client_sock, client_addr = self.sock.accept()\n print(\"New connection opened\")\n\n # Close socket\n print('Closing connection with {}'.format(client_addr))\n client_sock.close()\n return 0", "def handle_connection(conn, addr, create):\r\n print 'connection recieved from:', addr\r\n server = create()\r\n server.startup()\r\n while True:\r\n data = conn.recv(1024) # read data from the connection / raw input\r\n if not data:\r\n break\r\n print 'from client:', data\r\n response = server.process(data)\r\n conn.send(response)\r\n server.finish()\r\n conn.close()" ]
[ "0.73600185", "0.7100143", "0.701498", "0.6990414", "0.6946399", "0.69457126", "0.6896873", "0.68900764", "0.6880835", "0.68647265", "0.6862838", "0.6828347", "0.6762873", "0.6746322", "0.674184", "0.66967577", "0.66883373", "0.66488475", "0.6604633", "0.6588412", "0.65870297", "0.6581088", "0.6570965", "0.6520353", "0.65195096", "0.64899343", "0.6485431", "0.6477623", "0.64752656", "0.6468538" ]
0.7221236
1
Get average load stat from /proc/loadavg.
def load_stat(): loadavg = {} f = open("/proc/loadavg") con = f.read().split() f.close() loadavg['lavg_1'] = con[0] loadavg['lavg_5'] = con[1] loadavg['lavg_15'] = con[2] loadavg['nr'] = con[3] loadavg['last_pid'] = con[4] return loadavg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getloadavg():\n global _loadavg_inititialized\n\n if not _loadavg_inititialized:\n cext.init_loadavg_counter()\n _loadavg_inititialized = True\n\n # Drop to 2 decimal points which is what Linux does\n raw_loads = cext.getloadavg()\n return tuple([round(load, 2) for load in raw_loads])", "def get_loadavg(cls):\n\n with open(\"/proc/loadavg\") as loadavg:\n loadavg = loadavg.read().split()\n kernel_entities = loadavg[3].split(\"/\")\n loadavg_stat = { StatsKeys.LOADAVG :\n {\n StatsKeys.LAST_1_MIN : float(loadavg[0]),\n StatsKeys.LAST_5_MIN : float(loadavg[1]),\n StatsKeys.LAST_15_MIN : float(loadavg[2]),\n StatsKeys.RUNNABLE_ENTITIES : int(kernel_entities[0]),\n StatsKeys.SCHEDULING_ENTITIES : int(kernel_entities[1])\n }\n }\n logger.debug(\"Loadavg stats: {}\".format(' '.join(loadavg)))\n\n return loadavg_stat", "def get_load_avg():\n \n with open('/proc/loadavg') as f:\n line = f.readline()\n \n return [float(x) for x in line.split()[:3]]", "def load_avg():\n \n with open(Path.proc_loadavg()) as f:\n line = f.readline()\n \n load_avgs = [float(x) for x in line.split()[:3]]\n \n return load_avgs", "def loadavg():\n sin = psutil.getloadavg()\n return [\n round(sin[0], 3),\n round(sin[1], 3),\n round(sin[2], 3)\n ]", "def get_avg_load(verbose=False):\n output = run(\"top -d0.5 -n4 | grep Cpu\", quiet=True)\n\n # Strip formatting control characters (top output can have a lot of these)\n output = (output.replace('\\x1b(B','')\n .replace('\\x1b[m','')\n .replace('\\x1b[K','')\n .replace('\\x1b[39;49m',''))\n\n output = output.splitlines()\n\n loads = []\n for i in xrange(len(output)):\n # Top output tends to look like\n # Cpu(s): 2.9%us, 0.0%sy, 0.0%ni, ... OR\n # Cpu(s): 2.9% us, 0.0% sy, 0.0% ni, ... OR\n # %Cpu(s): 2.9 us, 0.0 sy, 0.0 ni, ...\n # We use a regex to match the floating point value for percentage load\n regex = re.compile(\n \"\"\"\n .*Cpu\\(s\\): # any chars before \"Cpu(s):\"\n \\s* # any amount of whitespace\n (\\d*.?\\d*) # any digits, <= 1 period, any digits (i.e. any positive float)\n \\s* # any amount of whitespace\n %? # <= 1 percent symbol (some versions of top just have one \"%\" on this line, before \"Cpu(s)\"\n \\s* # any amount of whitespace\n us # total system load appears to be marked \"us\"\n \"\"\", re.VERBOSE)\n\n matches = regex.findall(output[i])\n #print(repr(output[i]))\n if (len(matches) == 1):\n load = float(matches[0])\n loads.append(load)\n else:\n print(\"Error: On host = {Host}, unable to match total cpu load in string\\n{Output}\"\n .format(Host = env.host, Output = output[i]))\n\n # Throw out the first record of CPU load because it always seems to spike\n # briefly after the command is issued.\n loads = loads[1:]\n avg_load = None\n if len(loads) != 0:\n avg_load = sum(loads)/float(len(loads))\n else:\n print(\"Error: On host = {Host}, len(loads) == 0\"\n .format(Host = env.host))\n\n if (verbose):\n print(\"{Host:4} | Average load: {Load:3.2f}%\".format(Host=env.host, Load=avg_load))\n\n return avg_load", "def avgcpu(self):\n return (self._total_cpu['value'] / self._total_cpu['count']) if self._total_cpu['count'] else 0", "def get_load_data():\n proc_stat = open(\"/proc/stat\", \"r\")\n ret = []\n #times_since_startup = proc_stat.readline().strip().split()[1:]\n for line in proc_stat:\n line_split = line.strip().split()\n if(not (\"cpu\" in line_split[0])): #we have gone past the CPU lines\n break\n else:\n #everything but the label since we know [0] is overall and after that is per core by index\n ret.append(line_split[1:]) \n proc_stat.close()\n return ret", "def load_average(self):\n return _favg(self.load_samples)", "def load_list(self):\n import numpy.distutils.proc as numpy_proc\n res = self.apply(numpy_proc.load_avg,())\n return res", "def report_cpuavg_for_system(stat_path):\n if not os.path.exists(stat_path):\n collectd.error('stat path does not exist: %s' % stat_path)\n return\n\n with open(stat_path, 'r') as stat_file:\n lines = [line for line in stat_file if line.startswith('cpu ')]\n if len(lines) == 1: # There can be only one [cpu avg].\n fields = lines[0].strip().split()\n if len(fields) >= 9:\n submit_cputotal('user', int(fields[1]))\n submit_cputotal('nice', int(fields[2]))\n submit_cputotal('system', int(fields[3]))\n submit_cputotal('idle', int(fields[4]))\n submit_cputotal('wait', int(fields[5]))\n submit_cputotal('interrupt', int(fields[6]))\n submit_cputotal('softirq', int(fields[7]))\n submit_cputotal('steal', int(fields[8]))\n else:\n collectd.warning('Found too few fields (%s) in stat file: %s' %\n (len(fields), stat_path))\n\n submit_cpucores()", "def CPUStats(cls):\n\t\t# From <http://ubuntuforums.org/showthread.php?t=148781>\n\t\ttime_list = cat(\"/proc/stat\").split(\"\\n\")[0].split(\" \")[2:6]\n\t\tres = map(int, time_list)\n\t\tcls.LAST_CPU_STAT = res\n\t\treturn res", "def calc_average_load (self):\n #~ self.generation = self.forecast.generation_by_type['generation diesel']\\\n #~ [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]", "def average_performance(self):\n\n print(f\"Average performance: {self.performance / 10}\")", "def test_load_avg_1():\n result = _run_metric('load_avg_1')\n assert result.exit_code == 0", "def avg_latency(self):\n return self._avg_latency", "def avg_latency(self):\n return self._avg_latency", "def color_loadavg(value):\n if not __colored__ or not __psutil__:\n return \"{0:d}%\".format(int(math.floor(value * 100)))\n\n value /= psutil.cpu_count()\n\n color = color_level(value, min_value=0.5, max_value=1.05)\n display = \"{0:d}%\".format(int(math.floor(value * 100)))\n reset = attr(\"reset\")\n\n return (color + display + reset), len(display)", "def test_load_avg_15():\n result = _run_metric('load_avg_15')\n assert result.exit_code == 0", "def get_cpu_load (processor_number=0):\n\ttry:\n\t\tf = open(\"/proc/stat\", \"r\")\n\t\ttmp = f.readlines(2000)\n\t\tf.close()\n\texcept:\n\t\tprint _(\"Failed to open /proc/stat\")\n\t\treturn None\n\tif processor_number == 0 : sufix = ''\n\telse: sufix = str(processor_number -1)\n\tline = tmp[processor_number]\n\n\tif line.startswith(\"cpu%s\"% (sufix)):\n\t\tcuse = float( line.split()[1] )\n\t\tcn = float( line.split()[2] )\n\t\tcsys = float( line.split()[3])\n\t\tif sufix == '':\n\t\t\tload = cuse + cn\n\t\telse:\n\t\t\tload = cuse + csys + cn\n\t\t#load = int(load / .update_interval)\n\t\treturn load\n\treturn None", "def avg_hops(self):\n return self._avg_hops", "def calc_average_load (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.average_load = None\n self.generation = self.forecast.generation['generation diesel']\\\n [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]\n #~ print 'self.average_load',self.average_load", "def test_load_avg_5():\n result = _run_metric('load_avg_5')\n assert result.exit_code == 0", "def get_runs_to_average(self):\n\n if Test.performance_params: return int(Test.performance_params[1])\n elif self._check_performance: return self._runs_to_average\n else: return None", "def get_cpu_usage():\n return psutil.cpu_percent()", "def avg(ev):\n profData = getProfilingData(ev)\n if profData is not None:\n return profData.Tavg()\n return \"\"", "def _sample_load(proc):\n return 0.01 * _for_process_and_descendants(\n psutil.Process.get_cpu_percent,\n proc,\n )", "def _avg_performance(bd_dims, BD_directory, run,archive_file_path,max_performance,conversion_func=None,from_fitfile=False):\n path=get_archive_filepath(BD_directory,run, archive_file_path)\n all_performances=get_all_performances(bd_dims, path, conversion_func,from_fitfile)\n return np.mean(all_performances)/max_performance", "def get_system_load(self, interval, time_period, resource):\n\n interval = int(interval)\n time_period = int(time_period)\n stats = []\n\n # get running time in minutes, div by interval plus 1 sec for network baseline\n num_of_polls = int((time_period * 60) / (interval + 1))\n i = 0\n # get the average for minimum for time period, before dropping the oldest values\n while i < num_of_polls:\n if resource == 'cpu':\n stats.append(sysmon.get_cpu_utilisation())\n elif resource == 'memory':\n stats.append(sysmon.get_memory_usage())\n elif resource == 'network':\n stats.append(sysmon.get_network_interface_traffic(INTERFACE))\n time.sleep(interval)\n i += 1\n return stats", "def cpu_times():\n \n with open(Path.proc_stat()) as f:\n line = f.readline()\n \n cpu_times = [int(x) for x in line.split()[1:]]\n \n return cpu_times" ]
[ "0.8758361", "0.841387", "0.8378472", "0.8371394", "0.8343031", "0.80248725", "0.7039417", "0.6765554", "0.65243626", "0.6463336", "0.64101344", "0.6311138", "0.62721884", "0.6262719", "0.62597936", "0.6221697", "0.6221697", "0.6221329", "0.6178771", "0.6143471", "0.6116354", "0.611251", "0.60916114", "0.6076159", "0.60215837", "0.6019808", "0.6016113", "0.59810036", "0.5980207", "0.5940714" ]
0.8684609
1
Retry Decorator. Realizar un retry de la funcion si esta levanta alguna de las ``exceptions``. Hace tantos retries como ``maxRetries``.
def retry(maxRetries, *exceptions): def _doDecoration(fn): def _doRetry(*args, **kwargs): retries = 0 while retries <= maxRetries: try: return fn(*args, **kwargs) except tuple(exceptions): retries +=1 if retries > maxRetries: raise return _doRetry return _doDecoration
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def retry(func):\n # ... retry MAX_RETRIES times\n # ...\n # make sure you include this for testing:\n # except Exception as exc:\n # print(exc)\n # ...\n # and use wraps to preserve docstring\n #\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n tries = MAX_RETRIES\n while tries > 0:\n try:\n return func(*args, **kwargs)\n except Exception as err:\n print(err)\n\n tries -= 1\n\n raise MaxRetriesException\n\n return wrapper", "def retry_multi(max_retries=5):\n\n def retry(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n num_retries = 0\n ret = None\n while num_retries <= max_retries:\n try:\n ret = func(*args, **kwargs)\n break\n except Exception as e:\n logger.exception(e)\n if num_retries == max_retries:\n raise\n num_retries += 1\n time.sleep(5)\n return ret\n\n return wrapper\n\n return retry", "def _retry(func):\n @wraps(func)\n def _retry_wrapper(self, *args, **kwargs):\n error_message = \"\"\n for retry in range(self.retries + 1):\n try:\n return func(self, *args, **kwargs)\n except ValueError as err:\n error_message = str(err)\n raise ValueError(str(error_message))\n return _retry_wrapper", "def _retry_provider_call(self, func):\n\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n max_retries = 29\n attempts = 0\n while attempts < max_retries:\n try:\n return func(*args, **kwargs)\n except ClientError as e:\n attempts += 1\n raise RetryLimitExceededError(\n \"Exceeded request limit {} times. Aborting.\".format(max_retries)\n )\n return decorated", "def retry(exception, tries=10, delay=1, backoff=2, max_delay=30):\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n m_tries, m_delay = tries, delay\n while m_tries > 1:\n try:\n return f(*args, **kwargs)\n except exception:\n time.sleep(min(m_delay, max_delay))\n m_tries -= 1\n m_delay *= backoff\n return f(*args, **kwargs)\n return f_retry # true decorator\n return deco_retry", "def test_retry(self):\n retries = [0]\n max_tries = 5\n\n @retry(Exception, max_retries=5)\n def f():\n retries[0] += 1\n raise Exception(\"Faulty function\")\n\n with self.assertRaises(Exception):\n f()\n\n self.assertEqual(max_tries, retries[0])", "def retry(exceptions, tries=3, delay=2, _logger=logger()):\n\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except exceptions as e:\n msg = '{}, Retrying in {} seconds...'.format(e, mdelay)\n _logger.warning(msg)\n time.sleep(mdelay)\n mtries -= 1\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def call_with_retries(function, max_retries=10,\n exception_types=(Exception),\n _args=(), _kwargs={}):\n assert max_retries >= 0\n\n retries = 0\n last_exc = Exception('Unknown exception')\n while retries <= max_retries:\n try:\n return function(*_args, **_kwargs)\n except exception_types as exc:\n retries += 1\n wait = 2.0 ** retries * 0.1 + (random.randint(0, 1000) / 1000)\n time.sleep(wait)\n last_exc = exc\n raise last_exc", "def test_retry_raises_error_on_negative_retries(self):\n\n @retry(Exception, max_retries=-1)\n def f():\n raise Exception(\"Faulty function\")\n\n self.assertRaises(ValueError, f)", "def _retry_on_exception(\n exception: Union[Exception, Tuple[Exception]],\n regex: Optional[str] = None,\n max_retries: int = MAX_POLLS,\n retry_interval_s: int = POLL_INTERVAL,\n):\n\n def dec(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n def try_catch_exc():\n try:\n value = func(*args, **kwargs)\n return value\n except Exception as e:\n if not isinstance(e, exception) or (\n regex and not re.search(regex, str(e))\n ):\n raise e\n return e\n\n for _ in range(max_retries):\n ret = try_catch_exc()\n if not isinstance(ret, Exception):\n break\n time.sleep(retry_interval_s)\n if isinstance(ret, Exception):\n raise ret\n return ret\n\n return wrapper\n\n return dec", "def retry(retries=5):\n\n def decorator(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n count = 0\n while True:\n try:\n return fn(*args, **kwargs)\n except (\n BadRequest,\n BadResponseException,\n ReadTimeout,\n RequestException,\n TraktBadGateway,\n TraktUnavailable,\n TraktInternalException,\n ) as e:\n if count == retries:\n logger.error(f\"Error: {e}\")\n\n if isinstance(e, BadResponseException):\n logger.error(f\"Details: {e.details}\")\n if isinstance(e, TraktInternalException):\n logger.error(f\"Error message: {e.error_message}\")\n\n logger.error(\n \"API didn't respond properly, script will abort now. Please try again later.\"\n )\n logger.error(\n f\"Last call: {fn.__module__}.{fn.__name__}({args[1:]}, {kwargs})\"\n )\n exit(1)\n\n seconds = 1 + count\n count += 1\n logger.warning(\n f\"{e} for {fn.__module__}.{fn.__name__}(), retrying after {seconds} seconds (try: {count}/{retries})\"\n )\n sleep(seconds)\n\n return wrapper\n\n return decorator", "def retry(exceptions=Exception, tries=3, delay=1):\n\n def retry_decorator(func):\n def func_wrapper(*args, **kwargs):\n _tries = tries\n while _tries:\n try:\n return func(*args, **kwargs)\n except exceptions as e:\n _tries -= 1\n if not _tries:\n raise\n\n time.sleep(delay)\n\n return func_wrapper\n\n return retry_decorator", "def retry(func, *args, **kwargs):\n\n # config\n backoff = 1. + random.random() * 0.1\n max_backoff = 32\n max_retries = 5\n\n # try to make the request\n for i in range(max_retries):\n try:\n # return on success\n return func(*args, **kwargs)\n except Exception:\n # sleep on failure\n time.sleep(backoff)\n backoff = 2 * backoff if backoff < max_backoff else backoff\n \n # max retries exceeded\n raise RuntimeError('The connection to the server timed out.')", "def retryable(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n retries = 0\n max_retries = kwargs.get(\"max_retries\", DEFAULT_RETRIES)\n backoff = kwargs.get(\"backoff\", default_backoff)\n while retries <= max_retries:\n try:\n return func(*args, **kwargs)\n except IntegrityError:\n logging.debug(\n \"Race-condition caught? ({}/{} retries)\".format(retries, max_retries)\n )\n if retries >= max_retries:\n logging.error(f\"Unable to execute {func}, max retries exceeded\")\n raise\n retries += 1\n backoff(retries, max_retries)\n\n return wrapper", "def retry(tries, delay=3, backoff=2, except_on=(Exception, )):\n\n tries = math.floor(tries)\n\n def decorator(f):\n def f_retry(*args, **kwargs):\n return function_retry(\n tries, delay, backoff, except_on, f, *args, **kwargs)\n return f_retry # true decorator -> decorated function\n return decorator # @retry(arg[, ...]) -> true decorator", "def retry(\n self, n: int, /, *args, error: Catchable = Exception, sleep=None, **kwargs\n ) -> \"fn\":\n\n func = self._mod.retry(n, self, error=error, sleep=sleep)\n return func(*args, **kwargs)", "def _retry(method, max_tries=5, backoff_s=1):\n\n @wraps(method)\n def method_with_retries(self, *args, **kwargs):\n try_count = 0\n while try_count < max_tries:\n try:\n return method(self, *args, **kwargs)\n except BrokenPipeError:\n logger.warning(\"Caught a BrokenPipeError. Retrying.\")\n try_count += 1\n if try_count < max_tries:\n self._construct_clients()\n time.sleep(backoff_s)\n else:\n raise\n\n return method_with_retries", "def retry(times: int, on_exceptions: List[Exception]):\n def decorator(function: Callable):\n @wraps(function)\n def wrapper(*args, **kwargs):\n raised = []\n for _ in range(times):\n try:\n return function(*args, **kwargs)\n except Exception as ex:\n raised.append(ex)\n if type(ex) not in on_exceptions:\n raise RetryError(\n 'An unexpected error occurred while calling the function '+\n f'{function.__name__}.'\n ) from ex\n raise raised.pop()\n return wrapper\n return decorator", "def retry(tries, delay=3, backoff=2):\n tries = math.floor(tries)\n if tries < 0:\n raise ValueError(\"tries must be 0 or greater\")\n\n\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay # make mutable\n err = None\n while mtries > 0:\n print(\"Trial Number:\" + str(mtries))\n try:\n rv = f(*args, **kwargs)\n except DBException as e:\n print(\"Retry..\")\n mtries -= 1 # consume an attempt\n time.sleep(mdelay) # wait...\n mdelay += backoff # make future wait longer\n err = e\n\n # except Exception as e:\n # print(str(e))\n # mtries -= 1 # consume an attempt\n # time.sleep(mdelay) # wait...\n # mdelay += backoff # make future wait longer\n # err = e\n else:\n return rv\n raise err\n\n return f_retry # true decorator -> decorated function\n\n return deco_retry # @retry(arg[, ...]) -> true decorator", "def _Retry(func, *args, **kwargs):\n retries = _RETRIES\n while True:\n try:\n return func(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except\n retries -= 1\n if retries > 0:\n log.info('Exception {e} thrown in {func}. Retrying.'.format(\n e=e, func=func.__name__))\n time.sleep(1)\n else:\n raise e", "def retry(retry_times=3, interval=0.5, exceptions=Exception):\n def _decorator(func):\n @wraps(func)\n def _wrapped_func(*args, **kwargs):\n for attempt in range(1, retry_times + 1):\n try:\n return func(*args, **kwargs)\n except exceptions: # pylint: disable=broad-except\n if attempt < retry_times:\n logger.debug(\"%s failed in No. %d attempt\", func, attempt)\n import traceback\n import time\n logger.debug(traceback.format_exc())\n time.sleep(interval)\n else:\n raise # End of retry. Re-raise the exception as-is.\n return _wrapped_func\n return _decorator", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck as e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n if logger:\n logger.warning(msg)\n else:\n print(msg)\n sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck, e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n if logger:\n logger.warning(msg)\n else:\n print(msg)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def set_max_retries(cls, max_retries):\n LOGGER.debug(\"Updating max retries to {}\".format(max_retries))\n # See https://twistedmatrix.com/documents/19.10.0/api/twisted.internet.protocol.ReconnectingClientFactory.html\n cls.maxRetries = max_retries", "def retry(nattempts, exception=None):\n \n def tryIt(func):\n def wrapper(*args, **kwargs):\n attempts = 0\n while attempts < nattempts - 1:\n try:\n return func(*args, **kwargs)\n except (exception if exception is not None else Exception):\n attempts += 1\n return func(*args, **kwargs)\n return wrapper\n return tryIt", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck, e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n if logger:\n logger.warning(msg)\n else:\n print msg\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def max_retries(self, max_retries: ConfigNodePropertyInteger):\n\n self._max_retries = max_retries", "def retry(exception, tries=10, delay=3, backoff=0.1):\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except exception as ex:\n print \"{0}, Retrying in {1} seconds...\".format(ex, mdelay)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n return f_retry # true decorator\n return deco_retry", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n\tdef deco_retry(f):\n\t\t@wraps(f)\n\t\tdef f_retry(*args, **kwargs):\n\t\t\tmtries, mdelay = tries, delay\n\t\t\twhile mtries > 1:\n\t\t\t\ttry:\n\t\t\t\t\treturn f(*args, **kwargs)\n\t\t\t\texcept ExceptionToCheck, e:\n\t\t\t\t\tmsg = \"func: '{}' > exc: {}, Retrying in {} seconds...\".format(str(f.__name__), str(e), mdelay)\n\t\t\t\t\tif logger:\n\t\t\t\t\t\tlogger.warning(msg)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint msg\n\t\t\t\t\ttime.sleep(mdelay)\n\t\t\t\t\tmtries -= 1\n\t\t\t\t\tmdelay *= backoff\n\t\t\treturn f(*args, **kwargs)\n\t\treturn f_retry\t# true decorator\n\treturn deco_retry", "def retry(ExceptionToCheck, tries=3, delay=3, backoff=2):\n\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck as e:\n logging.warning('%s, Retrying in %d seconds...', str(e), mdelay)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry\n\n return deco_retry" ]
[ "0.8104126", "0.7775", "0.7661286", "0.7297725", "0.72590345", "0.72263867", "0.7220899", "0.7175091", "0.7168709", "0.7098994", "0.7014514", "0.7011338", "0.6975068", "0.69649476", "0.6956976", "0.6946955", "0.6942677", "0.68957996", "0.68939567", "0.68889207", "0.6867125", "0.6844439", "0.6836648", "0.6828642", "0.68283135", "0.6827335", "0.6813594", "0.6797231", "0.6754642", "0.67456526" ]
0.83807915
0
Decorator para funciones con transparencia referncial. Implementa un cache de ``qty`` llamadas para evitar calculos repetidos.
def memo(qty): def decorator(f): decoratee = Memo(qty,f) return functools.wraps(f)(decoratee) return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addqty(b, name, fn, *args, **kwargs):\n if b is None or brevity < b:\n with _timed_block(name, formatStr='{:45}', printer=printer, verbosity=2):\n qtys[name] = fn(*args, **kwargs)", "def addqty(b, name, fn, *args, **kwargs):\n if b is None or brevity < b:\n with _timed_block(name, formatStr='{:45}', printer=printer, verbosity=2):\n qtys[name] = fn(*args, **kwargs)", "def cache(self, period=\"1h\", quantity=None, prefix=\"cache_\"):\n\n def decorator(func):\n func_name = \"{}.{}\".format(func.__module__, func.__name__)\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n key_name = self.get_key_name(\n func, func_name, None, None, *args, **kwargs\n )\n\n if prefix:\n key_name = \"{}_{}\".format(prefix, key_name)\n\n # check cache validity\n ttl = self.ttl(key_name)\n if ttl and ttl > 0:\n try:\n result_serialized = self.get(key_name)\n result = pickle.loads(result_serialized)\n logger.debug(\n \"Fetched: {}(*{}, **{}) at {}\".format(\n func_name, args, kwargs, key_name\n )\n )\n\n # if cache is size limited, control the size\n if quantity and quantity > 0:\n self.control_quantity(func_name, quantity)\n\n return result\n except Exception as exc:\n logger.error(\"Fetch serialization failed: {}\".format(exc))\n\n # exec & create cache\n res = func(*args, **kwargs)\n try:\n result_serialized = pickle.dumps(\n res, protocol=pickle.HIGHEST_PROTOCOL\n )\n\n self.set(\n key_name, result_serialized, ex=self.get_period_seconds(period)\n )\n\n # if cache is size limited, store our pointer and control size\n if quantity:\n self.rpush(func_name, key_name)\n self.control_quantity(func_name, quantity)\n\n logger.debug(\n \"Caching: {}(*{}, **{}) at {}\".format(\n func_name, args, kwargs, key_name\n )\n )\n except Exception as exc:\n logger.error(\"Caching serialization failed: {}\".format(exc))\n finally:\n return res\n\n # append methods to wrapper\n def cache_clear_all():\n\n key_name = \"{}*\".format(func_name)\n if prefix:\n key_name = \"{}_{}\".format(prefix, key_name)\n logger.debug(\"Erasing all caches for {}\".format(key_name))\n keys = self.keys(key_name)\n if keys:\n self.delete(*keys)\n\n setattr(wrapper, \"cache_clear_all\", cache_clear_all)\n\n def cache_bypass(*args, **kwargs):\n logger.debug(\"Bypassing cache for {}\".format(func_name))\n return func(*args, **kwargs)\n\n setattr(wrapper, \"cache_bypass\", cache_bypass)\n\n def cache_clear(*args, **kwargs):\n key_name = self.get_key_name(\n func, func_name, None, None, *args, **kwargs\n )\n logger.debug(\"Erasing cache for {}\".format(key_name))\n self.delete(key_name)\n\n setattr(wrapper, \"cache_clear\", cache_clear)\n\n def cache_refresh(*args, **kwargs):\n key_name = self.get_key_name(\n func, func_name, None, None, *args, **kwargs\n )\n logger.debug(\"Refreshing cache for {}\".format(key_name))\n res = func(*args, **kwargs)\n result_serialized = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL)\n self.set(\n key_name, result_serialized, ex=self.get_period_seconds(period)\n )\n return res\n\n setattr(wrapper, \"cache_refresh\", cache_refresh)\n\n return wrapper\n\n return decorator", "def cache(self, period=\"1h\", quantity=None, prefix=\"cache_\"):\n\n def decorator(func):\n func_name = \"{}.{}\".format(func.__module__, func.__name__)\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n key_name = self.get_key_name(func_name, *args, **kwargs)\n\n if prefix:\n key_name = \"{}_{}\".format(prefix, key_name)\n\n # check cache validity\n ttl = self.ttl(key_name)\n if ttl and ttl > 0:\n try:\n result_serialized = self.get(key_name)\n result = pickle.loads(result_serialized)\n logger.debug(\n \"Fetched: {}(*{}, **{}) at {}\".format(\n func_name, args, kwargs, key_name\n )\n )\n\n # if cache is size limited, control the size\n if quantity and quantity > 0:\n self.control_quantity(func_name, quantity)\n\n return result\n except Exception as exc:\n logger.error(\"Fetch serialization failed: {}\".format(exc))\n\n # exec & create cache\n res = func(*args, **kwargs)\n try:\n result_serialized = pickle.dumps(\n res, protocol=pickle.HIGHEST_PROTOCOL\n )\n\n self.set(\n key_name, result_serialized, ex=self.get_period_seconds(period)\n )\n\n # if cache is size limited, store our pointer and control size\n if quantity:\n self.rpush(func_name, key_name)\n self.control_quantity(func_name, quantity)\n\n logger.debug(\n \"Caching: {}(*{}, **{}) at {}\".format(\n func_name, args, kwargs, key_name\n )\n )\n except Exception as exc:\n logger.error(\"Caching serialization failed: {}\".format(exc))\n finally:\n return res\n\n # append methods to wrapper\n def cache_clear_all():\n\n key_name = \"{}*\".format(func_name)\n if prefix:\n key_name = \"{}_{}\".format(prefix, key_name)\n logger.debug(\"Erasing all caches for {}\".format(key_name))\n keys = self.keys(key_name)\n if keys:\n self.delete(*keys)\n\n setattr(wrapper, \"cache_clear_all\", cache_clear_all)\n\n def cache_bypass(*args, **kwargs):\n logger.debug(\"Bypassing cache for {}\".format(func_name))\n return func(*args, **kwargs)\n\n setattr(wrapper, \"cache_bypass\", cache_bypass)\n\n def cache_clear(*args, **kwargs):\n key_name = self.get_key_name(func_name, *args, **kwargs)\n logger.debug(\"Erasing cache for {}\".format(key_name))\n self.delete(key_name)\n\n setattr(wrapper, \"cache_clear\", cache_clear)\n\n def cache_refresh(*args, **kwargs):\n key_name = self.get_key_name(func_name, *args, **kwargs)\n logger.debug(\"Refreshing cache for {}\".format(key_name))\n res = func(*args, **kwargs)\n result_serialized = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL)\n self.set(\n key_name, result_serialized, ex=self.get_period_seconds(period)\n )\n return res\n\n setattr(wrapper, \"cache_refresh\", cache_refresh)\n\n return wrapper\n\n return decorator", "def onchange_quantity_sum(self,cr,uid,ids,lines,qty,context=None):\n if context is None:\n context = {}\n total = 0\n res = {'value':{}}\n for line in lines:\n total = total + round(line[2]['quantity'],4)\n diff = round(qty - total,4)\n if diff < 0 :\n diff = 0 \n res = {'value':{'qty_total':total,'qty_res':diff}}\n return res", "def memoize(func):\r\n func.cache = {}\r\n return decorator(_memoize, func)", "def test_custom_cache_multiple(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n a = jax.numpy.array(0.1)\n b = jax.numpy.array(0.2)\n\n def cost(a, b, cache):\n with qml.queuing.AnnotatedQueue() as q1:\n qml.RY(a, wires=0)\n qml.RX(b, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape1 = qml.tape.QuantumScript.from_queue(q1)\n\n with qml.queuing.AnnotatedQueue() as q2:\n qml.RY(a, wires=0)\n qml.RX(b, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape2 = qml.tape.QuantumScript.from_queue(q2)\n\n res = execute(\n [tape1, tape2],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )\n return res[0]\n\n custom_cache = {}\n jax.grad(cost)(a, b, cache=custom_cache)\n\n cache = spy.call_args[0][1]\n assert cache is custom_cache", "def memo(func):\n cache = {}\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n key = str(args) + str(kwargs)\n try:\n return cache[key]\n except KeyError:\n rc = func(*args, **kwargs)\n cache[key] = rc\n return rc\n return wrapper", "def memoize(prefix, time=60):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n key = memoize_key(prefix, *args, **kwargs)\n data = cache.get(key)\n if data is not None:\n return data\n data = func(*args, **kwargs)\n cache.set(key, data, time)\n return data\n return wrapper\n return decorator", "def memo(func):\n cache = {}\n\n def wrapper(*args, **kwargs):\n update_wrapper(wrapper, func)\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = func(*args, **kwargs)\n return cache[key]\n return wrapper", "def lru_cache(maxsize):\n\n def dec(fn, *args):\n cache = {}\n\n @wraps(fn)\n def wrapper(*args):\n key = args\n try:\n ret = cache[key]\n except KeyError:\n ret = cache[key] = fn(*args)\n return ret\n\n return wrapper\n\n return dec", "def memoize(func):\n cache = {}\n @wraps(func)\n def wrap(*args):\n if args not in cache:\n cache[args] = func(*args)\n return cache[args]\n return wrap", "def _buy(self, units=1):\n self.quantity -= units", "def __deepcopy__(self, memo):\n return Quantity(copy.deepcopy(self._value, memo), self.unit)", "def memoization(func):\n cache = {}\n\n @wraps(func)\n def _wrap(*args, **kwargs):\n key = (args, tuple(sorted(kwargs.items())))\n result = cache.get(key, None)\n if result:\n print(\"It's cached\")\n return result\n\n result = func(*args, **kwargs)\n cache[key] = result\n return result\n\n return _wrap", "def test_custom_cache(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )[0]\n\n custom_cache = {}\n params = jax.numpy.array([0.1, 0.2])\n jax.grad(cost)(params, cache=custom_cache)\n\n cache = spy.call_args[0][1]\n assert cache is custom_cache", "def discount(self, cart):", "def memoize(func):\r\n cache = {}\r\n @functools.wraps(func)\r\n def wrapper(*args, **kwargs):\r\n key = (args, frozenset(kwargs.items()))\r\n if key not in cache:\r\n cache[key] = func(*args, **kwargs)\r\n return cache[key]\r\n return wrapper", "def memoize(func):\n memo = None\n\n @wraps(func)\n def wrapper(self):\n if memo is not None:\n return memo\n\n return func(self)\n\n return wrapper", "def _increment_quantity(self, units):\n self.quantity += units", "def compute_total(price):\n\n quantity = 20\n return price * quantity", "def __call__(self, *args):\n if args not in self.memo:\n self.memo[args] = self.f(*args)\n return self.memo[args]", "def memo_rod_cutting(price_table, cache_size=20):\n @functools.lru_cache(maxsize=cache_size)\n # @memo\n def wrapper(n):\n if n == 0:\n return 0\n\n revenue = max(\n [price_table[n - 1]] + [price_table[i - 1] + wrapper(n - i) for i in range(1, n)]\n )\n return revenue\n return wrapper", "def calc_subtotal(price, quantity):\n\n return price * quantity", "def make_quantity(string):\n pass", "def cache(func):\n results = {}\n\n @functools.wraps(func)\n def __cache(*args): # changed function\n nonlocal results # if this function call with parameters that already used\n if args in results.keys(): # then answer gets from dictionary\n # print(\"{} - got from cache\".format(args))\n rez = results[args]\n else:\n rez = func(*args)\n results[args] = rez\n return rez\n\n return __cache", "def memorized(f):\n cache = {}\n @wraps(f)\n def wrapped(*args):\n try:\n result = cache[args]\n except KeyError:\n result = cache[args] = f(*args)\n return result\n return wrapped", "def memoize_by_args(func):\n memory = {}\n\n @functools.wraps(func)\n def memoized(*args):\n if args not in memory.keys():\n value = func(*args)\n memory[args] = value\n\n return memory[args]\n\n return memoized", "def __pos__(self):\n return Quantity(+(self._value), self.unit)", "def memoize(maxsize=None, *args, **kwargs):\n return _coconut.functools.lru_cache(maxsize, *args, **kwargs)" ]
[ "0.559451", "0.559451", "0.5573915", "0.5568411", "0.5535107", "0.55242866", "0.550955", "0.5381615", "0.5359697", "0.5303804", "0.5300502", "0.52436477", "0.5218056", "0.5205465", "0.51786435", "0.5163052", "0.51593244", "0.5126326", "0.5122918", "0.51178956", "0.5104016", "0.50945294", "0.5087414", "0.5069723", "0.50558335", "0.50461006", "0.50304276", "0.5027666", "0.5005489", "0.50010824" ]
0.7298688
0
read the deCODE file and split into individual files per chrom
def split_decode_file(): # split files by chromosome header = [] current_chrom = 'chr1' # file_template = decode_folder + '/{}.deCODE_2019.GRCh38.txt' file_template = decode_folder + '/{}.deCODE_2019_hg19.txt' decode_file = decode_folder + '/aau1043_DataS3_hg19_liftOver.bed' w = open(file_template.format(current_chrom), 'a') print('NOTE: appending to map files, not overwriting. may cause duplicates') with open(decode_file, 'r') as f: for line in f: # save the header info if line.startswith('#'): header.append(line) # save the column labels elif line.startswith('Chr'): header.append('# ' + line) # write header to first file now w.write(''.join(header)) # the remaining lines are data else: # get the chromosome for the current line ch = line.split()[0] # if the chromosome matches the open file, write to it if ch == current_chrom: w.write(line) # if a new chromosome arises, switch to a new writefile else: w.close() current_chrom = ch w = open(file_template.format(current_chrom), 'a') # write header to file w.write(''.join(header)) w.write(line) # close the last open file w.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_file(infile_name):\n chr_list = [0]*13 \n for i in range(len(chr_list)):\n chr_list[i] = [] \n infile = open(infile_name)\n for line in infile:\n if line.startswith('SL2.40'):\n info = line.strip().split()\n chr = int(info[0][-2:])\n chr_list[chr].append(map(int,info[1:3])+[[info[-1]]])\n else:\n pass\n infile.close()\n return chr_list", "def get_chroms(chromfile):\n chroms = {}\n with open(chromfile) as c:\n for line in c:\n try:\n chrom, length = line.strip().split()\n chroms[chrom] = length\n except ValueError:\n chroms[line.strip()] = 1\n return chroms", "def read_file(infile_name):\n chr_list = [0]*13 \n for i in range(len(chr_list)):\n chr_list[i] = [] \n infile = open(infile_name)\n for line in infile:\n if line.startswith('SL2.40'):\n chr = int(line.strip().split()[0][-2:])\n loci = int(line.strip().split()[1])\n chr_list[chr] += [loci]\n else:\n pass\n infile.close()\n return chr_list", "def read_chr(fpath):\n\t# init dict and indices\n\tchrom_dicts={}\n\tstart=0\n\tindex=0\n\n\t# iterate through chromosome scores \n\tfor line in fileinput.input(fpath):\n\t\tx=line.split()\n\t\t\n\t\t# if chromosome skips some region, then normalize the previous window (<100 bp) and init new window \t\n\t\tif len(x)==4:\n\t\t\tif start in chrom_dicts:\n\t\t\t\tchrom_dicts[start]/=index\n\t\t\tstart=int(x[2].split(\"=\")[1])\n\t\t\tchrom_dicts[start]=0\n\t\t\tindex=0\n\n\t\t# if not a black region, then make news windows every 100 locations\n\t\tif len(x)==1:\n\t\t\tchrom_dicts[start]+=float(x[0])\n\t\t\tif index==100:\n\t\t\t\tchrom_dicts[start]/=index\n\t\t\t\tindex=0\n\t\t\t\tstart+=100\n\t\t\t\tchrom_dicts[start]=0\n\t\t\tindex+=1\n\t\n\t# track chromosomes that have been binned\n\tprint(\"%s %d\" % (fpath,len(chrom_dicts)))\n\treturn(chrom_dicts)", "def load_data() -> list:\n # trans_dict is used for changing the given names into standardized names.\n trans_dict = {\"chr1\": \"1\", \"chr2\": \"2\", \"chr3\": \"3\", \"chr4\": \"4\", \"chr5\": \"5\", \"chr6\": \"6\", \"chr7\": \"7\",\n \"chr8\": \"8\", \"chr9\": \"9\", \"chr10\": \"10\", \"chr11\": \"11\", \"chr12\": \"12\", \"chr13\": \"13\", \"chr14\": \"14\",\n \"chr15\": \"15\", \"chr16\": \"16\", \"chr17\": \"17\", \"chr18\": \"18\", \"chr19\": \"19\", \"chrx\": \"x\", \"chry\": \"y\"}\n # This try statement catches user error.\n try:\n with open(sys.argv[1]) as bed_file, open(sys.argv[2]) as fasta_file:\n fasta_records = []\n # Opens the bed file and splits into lists\n bed_file = list(csv.reader(bed_file, delimiter='\\t'))\n # Changes the names of the chromosomes in bed file, does some light rearranging and formatting.\n bed_file = [[trans_dict[record[0].lower()], record[1], record[3][0:record[3].index(\n '\\'')]] for record in bed_file]\n # Sorts the desired indices by chromosome, then by index in the chromosome.\n bed_file = sorted(bed_file, key=itemgetter(1))\n bed_file = sorted(bed_file, key=itemgetter(0))\n # This stores the desired indexes for each chromosome.\n indexable_bed_records = {'1': [], '2': [], '3': [], '4': [], '5': [], '6': [], '7': [], '8': [], '9': [],\n '10': [], '11': [], '12': [], '13': [], '14': [], '15': [], '16': [], '17': [],\n '18': [], '19': [], 'x': [], 'y': []}\n # Put each desired index into it's appropriate chromosome list.\n for record in bed_file:\n indexable_bed_records[record[0]].append([record[2], record[1]])\n # Loops over fasta records in the supplied fasta file\n for fasta_record in fasta_iter(fasta_file):\n # grabs the chromosome id\n chrom_id = fasta_record[\"header\"][:fasta_record[\"header\"].index(' ')].lower()\n # Some chromosomes are not desired, skip them.\n if chrom_id not in indexable_bed_records.keys():\n continue\n # Grabs the indexes we want to extract from the chromosome.\n indexes = indexable_bed_records[chrom_id]\n # Grabs each index+/-10 from the sequence\n for index in indexes:\n fasta_records.append([index[0], fasta_record[\"seq\"][int(index[1]) - 10:int(index[1]) + 10]])\n # Returns a list of lists of format [5'/3',splice site sequence]\n return fasta_records\n # Catches user error.\n except (FileNotFoundError, IndexError) as e:\n if type(e) is IndexError:\n sys.stderr.write(\"Usage: {} bed_file fasta_file\\n\\tbed_file: The appropriate bed file. \\n\\t\"\n \"fasta_file: The appropriate fasta file.\\n\".format(os.path.basename(__file__)))\n elif type(e) is FileNotFoundError:\n sys.stderr.write(\"One of the specified files was not found.\\n\")\n sys.exit(1)", "def process_file(input_file = 'NC_012655.ffn',output_file = 'NC_012655.output'):\n #prepare\n f = open(input_file, 'r')\n o = open(output_file,'w')\n seq = ''\n header = f.readline()\n o.write('GeneID Length GC \\n')\n #work\n for line in f:\n if not line.startswith('>'):\n seq += line\n else:\n o.write(process_gene(header = header, gene = seq))\n header = line\n seq = ''\n #finish\n f.close()\n o.close()\n return 0", "def make_tag_data_raw_fast(mdp,filename):\n #\n fin = open(filename,'r')\n iter = 0\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"comb_path\":\n update_params(mdp,lsp)\n if not mdp.flag_out_open: ## -- try to open output file\n try:\n if mdp.flag_overwrite == \"True\": ## check string value!\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ## -- try open output file\n for file in glob.glob(mdp.input_path):\n # get sign which corrects for boundary condition\n tvals = file.split('/')[-1].split('_')[3].split('t')\n try:\n ## flip sign if requested\n bcsign = ((int(tvals[1])+int(tvals[2])) != (int(tvals[1])+int(tvals[2])) % mdp.corr_len)\n except IndexError:\n ## 2-point function\n bcsign = False\n try:\n # open correlator file\n mdp.corr_file = open(file,'r')\n except IOError:\n print \"Could not open file \",file\n continue\n ## -- get tag\n ## baryons:\n #mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_r'+file.split('/')[-1].split('_')[4][-1]\n ## with time source tag\n #mdp.tag = file.split('/')[-1].split('_')[3][:3]\\\n # +'_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n # +file.split('/')[-1].split('_')[4][3:]\n ## no time source tag\n mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n +file.split('/')[-1].split('_')[4][3:]\n #print file,',',mdp.tag\n iter+=1\n ##endif ! flag_out_open\n\n #save_data_fast(mdp)\n save_data_fast_bc(mdp,bcsign)\n mdp.corr_file.close()\n if iter%400 == 0:\n print \"file\",iter\n max_iter = None\n if not(max_iter is None) and iter==max_iter:\n print \"reached max file iterations, ending loop...\"\n break\n ## end comb_path\n pass\n\n elif lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data_fast(mdp)\n mdp.corr_file.close()\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return", "def main():\r\n filename = sys.argv[1]\r\n codes = huffman_letter_codes_from_file_contents(filename)\r\n print(codes)\r\n encode_file_using_codes(filename, codes)\r\n decode_file_using_codes(filename + \"_encoded\", codes)", "def read_files():\n with open(\"CvixLerC9.loc\") as loc, open(\"CvixLerC9.qua\") as qua:\n qua_file = (qua.read().split('\\n'))\n qua_file = qua_file[8:-1]\n new_qua = []\n for q in qua_file:\n new_qua.append(q.split('\\t')) # [['1', '1.279502474'], ['3', '0.303712231']....]\n\n new_loc = {}\n header = ''\n read = False\n for i in loc:\n i = i.replace(\"\\n\", '')\n if read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n if \"(a,b)\" in i:\n header = i\n read = True\n else:\n read = False\n\n elif read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n\n return new_loc, new_qua", "def read_and_Kent_index(filename):\n chr_dict = defaultdict(lambda : defaultdict(list))\n debug = 0\n with open(filename, 'rU') as fh:\n # Skip comment lines\n # :TODO Fix this and make more general\n fh.next()\n fh.next()\n for line in fh:\n p_line = line[:-1].split(\"\\t\")\n try:\n start = int(p_line[1])\n end = int(p_line[2])\n kent_bin = binFromRangeStandard(start, end)\n except ValueError:\n # Case for VCF files\n start = int(p_line[1]) - 1\n end = int(p_line[1])\n kent_bin = binFromRangeStandard(start, end)\n chr_dict[p_line[0]][kent_bin].append(GTab(start, end))\n return(chr_dict)", "def preprocessBed(fname):\n res = {}\n iter = parseBed(fname)\n for i in iter:\n res.setdefault(i.chr,[])\n res[i.chr].append(i)\n for k in res.keys():\n res[k].sort()\n return res", "def process_data_to_chromatograms(self,data):\n chromatograms = []\n for block in data:\n chrom = self.extract_data_from_block(block)\n chromatograms.append(chrom)\n return chromatograms", "def read_input_files(self):\r\n\r\n for input_file in self.list_of_input_files:\r\n input_file.read_header_of_file()\r\n self.list_of_header_objects.extend(input_file.list_of_header_objects)\r\n self.list_of_header_objects_without_ID.extend(input_file.list_of_header_objects_without_ID)\r\n self.list_of_contigs.extend(input_file.list_of_contigs)\r\n\r\n self.list_of_header_objects = list(toolz.unique(self.list_of_header_objects, key=lambda x: x.tag_and_ID))\r\n self.list_of_header_objects_without_ID = list(\r\n toolz.unique(self.list_of_header_objects_without_ID, key=lambda x: x.line))\r\n self.list_of_contigs = list(toolz.unique(self.list_of_contigs, key=lambda x: x.line))\r\n self.list_of_header_objects.extend(self.list_of_header_objects_without_ID)\r\n self.list_of_header_objects.sort(key=lambda x: x.line)\r\n self.list_of_header_objects.extend(self.list_of_contigs)\r\n self.list_of_header_objects.sort(key=lambda x: x.tag, reverse=False)\r\n self.create_body_header_line_for_output()\r\n self.write_header_in_output_file()\r\n\r\n list_of_chrom = list(self.indices.keys())\r\n list_of_chrom.sort(key=lambda x: self.alphanum_key(x))\r\n for chrom in list_of_chrom:\r\n self.list_of_body_objects.clear()\r\n for input_file in self.list_of_input_files:\r\n input_file.read_specific_chrom_body_of_file(chrom)\r\n self.list_of_body_objects.extend(input_file.list_of_body_objects)\r\n\r\n self.adjust_body_records_to_samples()\r\n self.list_of_body_objects = list(toolz.unique(self.list_of_body_objects, key=lambda x: x.line))\r\n self.list_of_body_objects.sort(key=lambda x: self.alphanum_key(x.line))\r\n self.verify_and_merge_body_records()\r\n self.write_specific_chrom_in_output_file()", "def _create_chrom_dict(chrom_len_fpath: Path) -> Dict[str, int]:\n\n chrom_dict: Dict[str, int] = {}\n with chrom_len_fpath.open() as c_f:\n next(c_f) # skip header\n line: str\n for line in c_f:\n chrom_dict[line.split(\"\\t\")[0]] = int(line.split(\"\\t\")[1])\n\n return chrom_dict", "def readFastaFile(filename):", "def parse_file(file_name, barcode_map=barcode_map):\n\n with open(file_name) as file_handle:\n results = defaultdict(Counter)\n try:\n while True:\n name = file_handle.next()\n seq = file_handle.next()\n plus = file_handle.next()\n qual = file_handle.next()\n handle_seq(seq, barcode_map, results)\n except StopIteration:\n pass\n return pd.DataFrame(results).T.fillna(0)", "def read_cDNA_file_to_dict(filename):\n \n #initialize dictionary\n cDNA_dictionary = {}\n\n #open file\n with open(cDNA_file) as f:\n \n #loop through file line by line\n for line in f:\n\n #remove newline\n line = line.rstrip()\n \n #get gene name\n if line.startswith(\">\"):#If the line starts with the character \">\" then,\n gene_name = line.split(\"|\")[1]#I separate the line by the character \"|\" and assign index 1 to gene_name\n \n #read in sequence in uppercase\n if not line.startswith(\">\"):#If the line does not start with the character \">\" then,\n line = line.upper()#I make all of the characters within the line uppercase\n\n #put name and sequence in dictionary\n cDNA_dictionary[gene_name] = line#I assign the gene_name as the key and the line (sequence) as the value\n\n #return dictionary \n return cDNA_dictionary", "def method4(fname):\n\t#jfrom cStringIO import StringIO\n\t#from tokenize import generate_tokens\n\timport re\n\tprint \"Method 4: read in files by line\"\n\tprint \"and rather than printing out all of it, only print out specific cols \"\n\tf = open(fname,\"r\")\n\tline = f.readline()\n\ti = 0 \n\t\n\twhile line != '':\n\t\ttmp= line.strip()\n\t\tif tmp :\n\t\t\t#print tmp\n\t\t\t#tmp = line.strip()\n\t\t\ttmpp = tmp.split()\n\t\t\t#i +=1\n\t\t\t#print len(tmpp)\n\t\t\tif len(tmpp) >1:\n\t\t\t\tprint tmpp[1]\n\t\t#tmp = line.split(' ')\n\t\t#i += 1\n\t\t#tmp = 'sdklsd sdjlks '\n\t\t#print len(tmp)\n\t\t#if len(tmp) > 1: \n\t\t\t#print tmp[1]\n\t\tline=f.readline()\n\t\n\tf.close()\n\tprint \"Method 4 done\"", "def create_chunks(file_names):\n\n\tnew_chunks = []\n\n\tfor name in file_names:\n\n\t\t# Find the .inf file and read the details stored within\n\t\ttry:\n\t\t\tdetails = open(name + suffix + 'inf', 'r').readline()\n\t\texcept IOError:\n\n\t\t\ttry:\n\t\t\t\tdetails = open(name + suffix + 'INF', 'r').readline()\n\t\t\texcept IOError:\n\t\t\t\tprint(\"Couldn't open information file, %s\" % name+suffix+'inf')\n\t\t\t\tsys.exit()\n\n\t\t# Parse the details\n\t\tdetails = [string.rstrip(details)]\n\n\t\tsplitters = [' ', '\\011']\n\n\t\t# Split the details up where certain whitespace characters occur\n\t\tfor s in splitters:\n\n\t\t\tnew_details = []\n\n\t\t\t# Split up each substring (list entry)\n\t\t\tfor d in details:\n\n\t\t\t\tnew_details = new_details + string.split(d, s)\n\n\t\t\tdetails = new_details\n\n\t\t# We should have details about the load and execution addresses\n\n\t\t# Open the file\n\t\ttry:\n\t\t\tin_file = open(name, 'rb')\n\t\texcept IOError:\n\t\t\tprint(\"Couldn't open file, %s\" % name)\n\t\t\tsys.exit()\n\n\t\t# Find the length of the file (don't rely on the .inf file)\n\t\tin_file.seek(0, 2)\n\t\tlength = in_file.tell()\n\t\tin_file.seek(0, 0)\n\n\t\t# Examine the name entry and take the load and execution addresses\n\t\tdot_at = string.find(details[0], '.')\n\t\tif dot_at != -1:\n\t\t\treal_name = details[0][dot_at+1:]\n\t\t\tload, exe = details[1], details[2]\n\t\telse:\n\t\t\treal_name = get_leafname(name)\n\t\t\tload, exe = details[0], details[1]\n\n\t\tload = hex2num(load)\n\t\texe = hex2num(exe)\n\n\t\tif load == None or exe == None:\n\t\t\tprint('Problem with %s: information is possibly incorrect.' % name+suffix+'inf')\n\t\t\tsys.exit()\n\n\t\t# Reset the block number to zero\n\t\tblock_number = 0\n\n\t\t# Long gap\n\t\tgap = 1\n\t\n\t\t# Write block details\n\t\twhile True:\n\t\t\tblock, last = write_block(in_file, real_name, load, exe, length, block_number)\n\n\t\t\tif gap == 1:\n\t\t\t\tnew_chunks.append((0x110, number(2,0x05dc)))\n\t\t\t\tgap = 0\n\t\t\telse:\n\t\t\t\tnew_chunks.append((0x110, number(2,0x0258)))\n\n\t\t\t# Write the block to the list of new chunks\n\n\t\t\t# For old versions, just write the block\n\t\t\tif UEF_major == 0 and UEF_minor < 9:\n\t\t\t\tnew_chunks.append((0x100, block))\n\t\t\telse:\n\t\t\t\tnew_chunks.append((0x100, block))\n\n\t\t\tif last == 1:\n\t\t\t\tbreak\n\n\t\t\t# Increment the block number\n\t\t\tblock_number = block_number + 1\n\n\t\t# Close the input file\n\t\tin_file.close()\n\n\t# Write some finishing bytes to the list of new chunks\n#\tnew_chunks.append((0x110, number(2,0x0258)))\n#\tnew_chunks.append((0x112, number(2,0x0258)))\n\n\t# Return the list of new chunks\n\treturn new_chunks", "def dat_reader(fpath, fname):\n\n header = []\n data = []\n with open(fpath + fname + '.dat', 'rb') as file:\n for row in file:\n string_row = row.decode('iso-8859-1')\n if string_row[0] == 'C':\n header.append(string_row)\n else:\n data.append(string_row)\n\n return [header, data]", "def readHeader(self, filename):\n f = Data.Usrxxx.readHeader(self, filename)\n# self.sayHeader()\n \n while True:\n data = fortran.read(f)\n if data is None: break\n size = len(data)\n# print(\"size: \", size)\n\n if size == 14 and data[:10] == \"STATISTICS\":\n self.statpos = f.tell()\n for det in self.detector:\n data = Data.unpackArray(fortran.read(f))\n det.total = data[0]\n det.totalerror = data[1]\n# for j in range(6):\n# fortran.skip(f)\n break\n\n if size != 50: raise IOError(\"Invalid USRTRACK/USRCOLL file\")\n\n header = struct.unpack(\"=i10siiififfif\", data)\n\n det = Data.Detector()\n det.nb = header[0]\n det.name = header[1].strip() # titutc - track/coll name\n det.type = header[2] # itustc - type of binning: 1 - linear energy etc\n det.dist = header[3] # idustc = distribution to be scored\n det.reg = header[4] # nrustc = region\n det.volume = header[5] # vusrtc = volume (cm**3) of the detector\n det.lowneu = header[6] # llnutc = low energy neutron flag\n det.elow = header[7] # etclow = minimum energy [GeV]\n det.ehigh = header[8] # etchgh = maximum energy [GeV]\n det.ne = header[9] # netcbn = number of energy intervals\n det.de = header[10] # detcbn = energy bin width\n\n self.detector.append(det)\n\n if det.lowneu:\n data = fortran.read(f)\n det.ngroup = struct.unpack(\"=i\",data[:4])[0]\n det.egroup = struct.unpack(\"=%df\"%(det.ngroup+1), data[4:])\n print(\"Low energy neutrons scored with %d groups\" % det.ngroup)\n else:\n\t\tdet.ngroup = 0\n\t\tdet.egroup = []\n\n\t size = (det.ngroup+det.ne) * 4\n\t if size != fortran.skip(f):\n\t\traise IOError(\"Invalid USRTRACK file\")\n f.close()", "def process_file(file_name):\n _,fn = os.path.split(file_name)\n pdb_id = get_pdb_id(fn)\n # Get files in pdb format even when at LBL, to avoid issues with phenix.reduce\n if ('_mirror' in file_name) or (file_name == pdb_id):\n file_name = pdb_id + '.pdb'\n file_to_clean = []\n if not os.path.isfile(file_name):\n # leave file in folder if it was already there\n # cmd = 'phenix.fetch_pdb {} --all'.format(pdb_id)\n cmd = 'phenix.fetch_pdb {}'.format(pdb_id)\n r = easy_run.go(cmd,join_stdout_stderr=False)\n for fn in r.stdout_lines:\n fn = os.path.split(fn)[-1]\n if '.pdb' in fn: file_name = fn\n file_to_clean.append(fn)\n fn = fn.replace('.pdd','_with_h.pdb')\n file_to_clean.append(fn)\n return file_name,pdb_id,file_to_clean", "def _chrom_names(fasta_file):\n from pysam import FastaFile\n with FastaFile(fasta_file) as fa:\n chroms = list(fa.references)\n return chroms", "def split_file(in_file, num_splits, split_dir, mut_file):\n\n # create the output directory if it does\n # not exist\n if not os.path.exists(split_dir):\n os.mkdir(split_dir)\n\n # open the info file\n f = open(in_file)\n pdb_header = f.readline()\n\n # open the mutation file\n m = open(mut_file)\n mut_header = m.readline()\n\n # read into a dictionary containing\n # structure ids as keys and lines pertaining\n # to it as values\n pdb_dict = read_file(f)\n mut_dict = read_file(m)\n\n # determine total num of ids in file\n total_ids = len(list(pdb_dict.keys()))\n print(total_ids)\n # determine num of ids to put in each split\n num_ids = int(total_ids/num_splits)\n\n # counters\n count_file = 0\n count_id = num_ids\n\n # randomize order of insertions\n keys = list(pdb_dict.keys())\n random.shuffle(keys)\n\n # iterate through dict and write to files\n #for key in sorted(pdb_dict):\n for key in keys:\n\n # check if we need a new file\n if (count_id == num_ids and count_file < num_splits):\n count_id = 0\n pdb_out = open(split_dir + \"/pdb_info_split_\" + str(count_file) + \".txt\", 'w')\n pdb_out.write(pdb_header)\n mut_out = open(split_dir + \"/mut_info_split_\" + str(count_file) + \".txt\", 'w')\n mut_out.write(mut_header)\n count_file += 1\n\n # write all lines pertaining to the structure id\n for line in pdb_dict[key]:\n pdb_out.write(line)\n if key in mut_dict:\n for line in mut_dict[key]:\n mut_out.write(line)\n\n count_id += 1", "def read_data(filename):\n from intanutil.read_header import read_header\n from intanutil.get_bytes_per_data_block import get_bytes_per_data_block\n from intanutil.read_one_data_block import read_one_data_block\n from intanutil.notch_filter import notch_filter\n from intanutil.data_to_result import data_to_result \n \n\n tic = time.time()\n fid = open(filename, 'rb')\n filesize = os.path.getsize(filename)\n\n header = read_header(fid)\n\n print('Found {} amplifier channel{}.'.format(header['num_amplifier_channels'], plural(header['num_amplifier_channels'])))\n print('Found {} auxiliary input channel{}.'.format(header['num_aux_input_channels'], plural(header['num_aux_input_channels'])))\n print('Found {} supply voltage channel{}.'.format(header['num_supply_voltage_channels'], plural(header['num_supply_voltage_channels'])))\n print('Found {} board ADC channel{}.'.format(header['num_board_adc_channels'], plural(header['num_board_adc_channels'])))\n print('Found {} board digital input channel{}.'.format(header['num_board_dig_in_channels'], plural(header['num_board_dig_in_channels'])))\n print('Found {} board digital output channel{}.'.format(header['num_board_dig_out_channels'], plural(header['num_board_dig_out_channels'])))\n print('Found {} temperature sensors channel{}.'.format(header['num_temp_sensor_channels'], plural(header['num_temp_sensor_channels'])))\n print('')\n\n # Determine how many samples the data file contains.\n bytes_per_block = get_bytes_per_data_block(header)\n\n # How many data blocks remain in this file?\n data_present = False\n bytes_remaining = filesize - fid.tell()\n if bytes_remaining > 0:\n data_present = True\n\n if bytes_remaining % bytes_per_block != 0:\n raise Exception('Something is wrong with file size : should have a whole number of data blocks')\n\n num_data_blocks = int(bytes_remaining / bytes_per_block)\n\n num_amplifier_samples = header['num_samples_per_data_block'] * num_data_blocks\n num_aux_input_samples = int((header['num_samples_per_data_block'] / 4) * num_data_blocks)\n num_supply_voltage_samples = 1 * num_data_blocks\n num_board_adc_samples = header['num_samples_per_data_block'] * num_data_blocks\n num_board_dig_in_samples = header['num_samples_per_data_block'] * num_data_blocks\n num_board_dig_out_samples = header['num_samples_per_data_block'] * num_data_blocks\n\n record_time = num_amplifier_samples / header['sample_rate']\n\n if data_present:\n print('File contains {:0.3f} seconds of data. Amplifiers were sampled at {:0.2f} kS/s.'.format(record_time, header['sample_rate'] / 1000))\n else:\n print('Header file contains no data. Amplifiers were sampled at {:0.2f} kS/s.'.format(header['sample_rate'] / 1000))\n\n if data_present:\n # Pre-allocate memory for data.\n print('')\n print('Allocating memory for data...')\n\n data = {}\n if (header['version']['major'] == 1 and header['version']['minor'] >= 2) or (header['version']['major'] > 1):\n data['t_amplifier'] = np.zeros(num_amplifier_samples, dtype=np.int_)\n else:\n data['t_amplifier'] = np.zeros(num_amplifier_samples, dtype=np.uint)\n\n data['amplifier_data'] = np.zeros([header['num_amplifier_channels'], num_amplifier_samples], dtype=np.uint)\n data['aux_input_data'] = np.zeros([header['num_aux_input_channels'], num_aux_input_samples], dtype=np.uint)\n data['supply_voltage_data'] = np.zeros([header['num_supply_voltage_channels'], num_supply_voltage_samples], dtype=np.uint)\n data['temp_sensor_data'] = np.zeros([header['num_temp_sensor_channels'], num_supply_voltage_samples], dtype=np.uint)\n data['board_adc_data'] = np.zeros([header['num_board_adc_channels'], num_board_adc_samples], dtype=np.uint)\n \n # by default, this script interprets digital events (digital inputs and outputs) as booleans\n # if unsigned int values are preferred(0 for False, 1 for True), replace the 'dtype=np.bool_' argument with 'dtype=np.uint' as shown\n # the commented line below illustrates this for digital input data; the same can be done for digital out\n \n #data['board_dig_in_data'] = np.zeros([header['num_board_dig_in_channels'], num_board_dig_in_samples], dtype=np.uint)\n data['board_dig_in_data'] = np.zeros([header['num_board_dig_in_channels'], num_board_dig_in_samples], dtype=np.bool_)\n data['board_dig_in_raw'] = np.zeros(num_board_dig_in_samples, dtype=np.uint)\n \n data['board_dig_out_data'] = np.zeros([header['num_board_dig_out_channels'], num_board_dig_out_samples], dtype=np.bool_)\n data['board_dig_out_raw'] = np.zeros(num_board_dig_out_samples, dtype=np.uint)\n\n # Read sampled data from file.\n print('Reading data from file...')\n\n # Initialize indices used in looping\n indices = {}\n indices['amplifier'] = 0\n indices['aux_input'] = 0\n indices['supply_voltage'] = 0\n indices['board_adc'] = 0\n indices['board_dig_in'] = 0\n indices['board_dig_out'] = 0\n\n print_increment = 10\n percent_done = print_increment\n for i in range(num_data_blocks):\n read_one_data_block(data, header, indices, fid)\n\n # Increment indices\n indices['amplifier'] += header['num_samples_per_data_block']\n indices['aux_input'] += int(header['num_samples_per_data_block'] / 4)\n indices['supply_voltage'] += 1\n indices['board_adc'] += header['num_samples_per_data_block']\n indices['board_dig_in'] += header['num_samples_per_data_block']\n indices['board_dig_out'] += header['num_samples_per_data_block'] \n\n fraction_done = 100 * (1.0 * i / num_data_blocks)\n if fraction_done >= percent_done:\n print('{}% done...'.format(percent_done))\n percent_done = percent_done + print_increment\n\n # Make sure we have read exactly the right amount of data.\n bytes_remaining = filesize - fid.tell()\n if bytes_remaining != 0: raise Exception('Error: End of file not reached.')\n\n\n\n # Close data file.\n fid.close()\n\n if (data_present):\n print('Parsing data...')\n\n # Extract digital input channels to separate variables.\n for i in range(header['num_board_dig_in_channels']):\n data['board_dig_in_data'][i, :] = np.not_equal(np.bitwise_and(data['board_dig_in_raw'], (1 << header['board_dig_in_channels'][i]['native_order'])), 0)\n\n # Extract digital output channels to separate variables.\n for i in range(header['num_board_dig_out_channels']):\n data['board_dig_out_data'][i, :] = np.not_equal(np.bitwise_and(data['board_dig_out_raw'], (1 << header['board_dig_out_channels'][i]['native_order'])), 0)\n\n # Scale voltage levels appropriately.\n data['amplifier_data'] = np.multiply(0.195, (data['amplifier_data'].astype(np.int32) - 32768)) # units = microvolts\n data['aux_input_data'] = np.multiply(37.4e-6, data['aux_input_data']) # units = volts\n data['supply_voltage_data'] = np.multiply(74.8e-6, data['supply_voltage_data']) # units = volts\n if header['eval_board_mode'] == 1:\n data['board_adc_data'] = np.multiply(152.59e-6, (data['board_adc_data'].astype(np.int32) - 32768)) # units = volts\n elif header['eval_board_mode'] == 13:\n data['board_adc_data'] = np.multiply(312.5e-6, (data['board_adc_data'].astype(np.int32) - 32768)) # units = volts\n else:\n data['board_adc_data'] = np.multiply(50.354e-6, data['board_adc_data']) # units = volts\n data['temp_sensor_data'] = np.multiply(0.01, data['temp_sensor_data']) # units = deg C\n\n # Check for gaps in timestamps.\n num_gaps = np.sum(np.not_equal(data['t_amplifier'][1:]-data['t_amplifier'][:-1], 1))\n if num_gaps == 0:\n print('No missing timestamps in data.')\n else:\n print('Warning: {0} gaps in timestamp data found. Time scale will not be uniform!'.format(num_gaps))\n\n # Scale time steps (units = seconds).\n data['t_amplifier'] = data['t_amplifier'] / header['sample_rate']\n data['t_aux_input'] = data['t_amplifier'][range(0, len(data['t_amplifier']), 4)]\n data['t_supply_voltage'] = data['t_amplifier'][range(0, len(data['t_amplifier']), header['num_samples_per_data_block'])]\n data['t_board_adc'] = data['t_amplifier']\n data['t_dig'] = data['t_amplifier']\n data['t_temp_sensor'] = data['t_supply_voltage']\n\n # If the software notch filter was selected during the recording, apply the\n # same notch filter to amplifier data here.\n if header['notch_filter_frequency'] > 0 and header['version']['major'] < 3:\n print('Applying notch filter...')\n\n print_increment = 10\n percent_done = print_increment\n for i in range(header['num_amplifier_channels']):\n data['amplifier_data'][i,:] = notch_filter(data['amplifier_data'][i,:], header['sample_rate'], header['notch_filter_frequency'], 10)\n\n fraction_done = 100 * (i / header['num_amplifier_channels'])\n if fraction_done >= percent_done:\n print('{}% done...'.format(percent_done))\n percent_done += print_increment\n else:\n data = [];\n\n # Move variables to result struct.\n result = data_to_result(header, data, data_present)\n\n print('Done! Elapsed time: {0:0.1f} seconds'.format(time.time() - tic))\n return result", "def getReadSamFile(read_file,rnameList):\n size = len(rnameList)\n prev = 0\n ends = range(0, size, 20)\n ends += [size]\n ends.pop(0)\n \n \n \n for i in ends:\n chrs = rnameList[prev:i]\n f = []\n ch_p = ''\n jj = 0\n for j in range(0,i-prev):\n samfile = os.path.join(working_dir, 'MappedRead.'+chrs[j]+'.sam')\n log.info('Generating ' + samfile)\n f.append(open(samfile, \"w\"))\n for line in open(read_file, \"r\"):\n \n itemList = line[:-1].split('\\t')\n \n if len(itemList) < 11:\n continue\n #print itemList\n if itemList[0][0:1] == '@':\n continue\n line_ch = itemList[2]\n if line_ch == '*':\n continue\n if int(itemList[1]) & 0b100 != 0:\n continue\n \n if ch_p != line_ch:\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n jj = j\n ch_p = line_ch\n continue\n #end for j in range(0,i-prev):\n elif ch_p == line_ch:\n f[jj].write(line)\n '''\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n continue\n '''\n for fp in f:\n fp.close()\n prev = i", "def full_chromosomes(reader):\n for line in reader.header.get_lines(\"contig\"):\n if line.id in CHROMS:\n name = line.id\n length = line.length or 1_000_000_000\n yield \"{}:{}-{}\".format(name, 1, length)", "def file_reader(filename = 'conv_params'):\n\n with open(filename) as f:\n info = f.readlines()\n info = [i.strip() for i in info] # each element in info is a string of a line from the file\n info = [i.split() for i in info] # split each whitespace delimited element into a list of lists\n info = [[i.split('-') for i in j] for j in info] # note info is 3 layers deep\n\n info[2] = info[2][0] # makes default E just a single string of the number\n info[3] = info[3][0]\n\n return info", "def decode_file(self, filename):\n num_bytes = os.stat(filename)[6]\n data = array.array('B')\n\n with open(filename, 'rb') as f:\n data.fromfile(f, num_bytes)\n\n return self.decode_data(data)", "def read_GFF(gff_filename):\n gff_info = {} # loci --> LocusInfo\n tmp = {} # loci PB.X --> list of GFF records for PB.X.Y\n\n for r in collapseGFFReader(gff_filename):\n m = rex_pbid.match(r.seqid)\n if m is None:\n raise Exception(f\"Expected PBID format PB.X.Y but saw {r.seqid}\")\n locus = m.group(1) # ex: PB.1\n if locus not in tmp:\n tmp[locus] = [r]\n gff_info[locus] = LocusInfo(\n chrom=r.chr, strand=r.strand, regions=None, isoforms=None\n )\n else:\n if gff_info[locus].chrom != r.chr:\n logger.warning(\n f\"WARNING: Expected {r.seqid} to be on {gff_info[locus].chrom} but saw {r.chr}. Could be minimap2 multi-mapping inconsistency for repetitive genes. Check later.\\n\"\n )\n tmp[locus].append(r)\n\n # now figure out the exonic regions for each gene PB.X\n for locus, records in tmp.items():\n c = ClusterTree(0, 0)\n for r in records:\n for e in r.ref_exons:\n c.insert(\n max(0, e.start - extra_bp_around_junctions),\n e.end + extra_bp_around_junctions,\n 1,\n )\n\n regions = [(a, b) for (a, b, junk) in c.getregions()]\n regions[0] = (max(0, regions[0][0] - __padding_before_after__), regions[0][1])\n regions[-1] = (\n max(0, regions[-1][0]),\n regions[-1][1] + __padding_before_after__,\n )\n gff_info[locus] = LocusInfo(\n chrom=gff_info[locus].chrom,\n strand=gff_info[locus].strand,\n regions=regions,\n isoforms=[r.seqid for r in records],\n )\n\n return gff_info" ]
[ "0.5818746", "0.56577337", "0.5606158", "0.5569912", "0.543026", "0.542189", "0.5332967", "0.52981675", "0.52800643", "0.5245522", "0.51938415", "0.5192011", "0.51777303", "0.51236606", "0.51045394", "0.5096848", "0.5091337", "0.50838816", "0.50694305", "0.50682366", "0.5067442", "0.5058609", "0.5042363", "0.50045395", "0.49925688", "0.4989402", "0.4981118", "0.49752435", "0.49722943", "0.4970187" ]
0.67987746
0
Tests registered class types (passable by reference and value). Also tests a moveonly class type.
def test_abstract_value_registered_class(self): obj = MoveOnlyType(10) self.assertEqual( str(Value[MoveOnlyType]), "<class 'pydrake.common.value.Value[MoveOnlyType]'>") # This *always* clones `obj`. value = Value[MoveOnlyType](obj) self.assertTrue(value.get_value() is not obj) self.assertEqual(value.get_value().x(), 10) # Set value (via `get_mutable_value`). value.get_mutable_value().set_x(20) self.assertEqual(value.get_value().x(), 20) # Test custom emplace constructor. emplace_value = Value[MoveOnlyType](30) self.assertEqual(emplace_value.get_value().x(), 30) # Test docstring. self.assertTrue("unique_ptr" in value.set_value.__doc__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_isclassinstance():\n class MockClass:\n pass\n\n # Since Python3, everything is a class, so this means nothing (?)\n assert isclassinstance(0)\n assert isclassinstance(1.0)\n assert isclassinstance(complex(2.0))\n assert isclassinstance('foo')\n assert isclassinstance([])\n assert isclassinstance(())\n assert isclassinstance(range(6))\n assert isclassinstance(bytes(7))\n assert isclassinstance(bytearray())\n assert isclassinstance(memoryview(b'nine'))\n assert isclassinstance(set())\n assert isclassinstance(frozenset())\n assert isclassinstance({})\n assert isclassinstance(None)\n assert isclassinstance(MockClass())", "def class_is(cls: Class) -> bool:\n pass", "def test_returns_class(self):\n assert type is simple_class().__class__", "def check_class(instance, type):\n\tif not issubclass(instance, type):\n\t\traise TypeError('Subclass expected type {0}, but got: {1}', type(type), type(instance))", "def _classesToCheck(self, cls):\r\n yield cls\r\n yield from inspect.getmro(cls)", "def _check(self, class_):\r\n\r\n if isinstance(class_, (types.FunctionType, types.LambdaType,\r\n types.ClassType, types.InstanceType)):\r\n return False\r\n if not hasattr(class_, '__dict__'):\r\n if not hasattr(class_, '__slots__'):\r\n return False\r\n return True", "def isclass(object):\r\n return isinstance(object, (type, types.ClassType))", "def issubclass_safe(value, type_):\n try:\n return issubclass(value, type_)\n except (TypeError, AttributeError):\n # Cannot perform issubclass on some types\n return False", "def test_cls(self):\n assert forge.cls == forge.FParameter(\n forge.FParameter.POSITIONAL_OR_KEYWORD,\n name='cls',\n interface_name='cls',\n contextual=True,\n )", "def check_type(self):\n return True", "def test_is_a():\n class MockParentClass:\n pass\n\n # Test primitives\n objects = [\n 0,\n 1.0,\n complex(2.0),\n 'foo',\n [],\n (),\n range(6),\n bytes(7),\n bytearray(),\n memoryview(b'nine'),\n set(),\n frozenset(),\n {},\n MockParentClass(),\n ]\n types = [\n int,\n float,\n complex,\n str,\n list,\n tuple,\n range,\n bytes,\n bytearray,\n memoryview,\n set,\n frozenset,\n dict,\n MockParentClass,\n ]\n\n # Test primitives (+ one class) against each other\n assert len(objects) == len(types)\n for i in range(0, len(objects)):\n assert is_a(objects[i], types[i])\n for j in range(0, len(objects)):\n if j == i:\n continue\n assert not is_a(objects[i], types[j])\n\n class MockChildClass(MockParentClass):\n pass\n\n class MockNonRelativeClass:\n pass\n\n # Test with class inheritance\n child = MockChildClass()\n other = MockNonRelativeClass()\n assert is_a(child, MockChildClass)\n assert is_a(child, MockParentClass)\n assert not is_a(other, MockParentClass)\n assert not is_a(child, MockNonRelativeClass)", "def test_types(self):\n \n self.assertIsInstance(self.detector_type, str)\n self.assertIsInstance(self.psd, dict)\n self.assertIsInstance(self.intensity, dict)\n self.assertIsInstance(self.database, str)\n self.assertIsInstance(self.position, list)\n self.assertIsInstance(self.angle, list)\n self.assertIsInstance(self.linearity_curve, dict)\n self.assertIsInstance(self.FOV, float)\n \n pass", "def test_motorcycle(self):\n try:\n self.test = oop1.Motorcycle()\n self.assertIsInstance(self.test, oop1.Motorcycle)\n print(\"\\nPASS : Class Exists\\n\")\n except NameError as e:\n print(e)", "def test_class():\n class TestClass1(object):\n arg1 = None # type: int\n arg2 = None # type: str\n\n assert get_type_hints(TestClass1) == {\n 'arg1': int,\n 'arg2': str\n }", "def _typechecked_class(cls):\n for name, func in cls.__dict__.items():\n if not name.startswith('__'):\n setattr(cls, name, _typechecked_func(func))\n return cls", "def exactly(base_cls):\n\n @meta\n def check(cls):\n return cls is base_cls\n\n return check", "def test_is_a_subclass():\n class MockParentClass:\n pass\n\n class MockChildClass(MockParentClass):\n pass\n\n class MockGrandchildClass(MockChildClass):\n pass\n\n class MockNonRelativeClass:\n pass\n\n # Test with instances\n parent = MockParentClass()\n child = MockChildClass()\n grandchild = MockGrandchildClass()\n other = MockNonRelativeClass()\n assert is_a_subclass(1, int)\n assert is_a_subclass(child, MockParentClass)\n assert is_a_subclass(grandchild, MockParentClass)\n assert is_a_subclass(grandchild, MockChildClass)\n assert not is_a_subclass(1, float)\n assert not is_a_subclass(parent, MockNonRelativeClass)\n assert not is_a_subclass(child, MockNonRelativeClass)\n assert not is_a_subclass(grandchild, MockNonRelativeClass)\n assert not is_a_subclass(other, MockParentClass)\n assert not is_a_subclass(other, MockChildClass)\n assert not is_a_subclass(other, MockGrandchildClass)\n assert not is_a_subclass(parent, MockChildClass)\n assert not is_a_subclass(parent, MockGrandchildClass)\n assert not is_a_subclass(child, MockGrandchildClass)\n\n # Test with types\n assert is_a_subclass(int, int)\n assert is_a_subclass(MockChildClass, MockParentClass)\n assert is_a_subclass(MockGrandchildClass, MockParentClass)\n assert is_a_subclass(MockGrandchildClass, MockChildClass)\n assert not is_a_subclass(int, float)\n assert not is_a_subclass(MockParentClass, MockNonRelativeClass)\n assert not is_a_subclass(MockChildClass, MockNonRelativeClass)\n assert not is_a_subclass(MockGrandchildClass, MockNonRelativeClass)\n assert not is_a_subclass(MockNonRelativeClass, MockParentClass)\n assert not is_a_subclass(MockNonRelativeClass, MockChildClass)\n assert not is_a_subclass(MockNonRelativeClass, MockGrandchildClass)\n assert not is_a_subclass(MockParentClass, MockChildClass)\n assert not is_a_subclass(MockParentClass, MockGrandchildClass)\n assert not is_a_subclass(MockChildClass, MockGrandchildClass)", "def test_is_primitive_returns_false(self):\n class TestClass:\n pass\n\n for primitive_type in (TestClass(), [], {}):\n is_primitive = self.tested_class._is_primitive(primitive_type)\n self.assertFalse(is_primitive)", "def match(self, cls):\n return isinstance(self, cls)", "def test_class_annotations():\n\n for cls in get_module_classes('HABApp.core.events.events', ('ComplexEventValue', 'AllEvents')).values():\n check_class_annotations(cls)", "def test_cls_and_user(self):\n sentinel_1 = object()\n sentinel_2 = object()\n\n @converters.wrap\n def inner_test(cls: typing.Any, user: models.User):\n \"\"\"Make sure the self parameter was not touched.\"\"\"\n self.assertEqual(cls, sentinel_1)\n self.assertEqual(user, sentinel_2)\n inner_test(sentinel_1, user=sentinel_2)", "def CheckType(self, *args, **kwargs):\n pass", "def test_register_manual_keys(self):\n registry = ClassRegistry()\n\n @registry.register('fire')\n class Charizard(Pokemon):\n pass\n\n @registry.register('water')\n class Blastoise(Pokemon):\n pass\n\n # By default, you have to specify a registry key when\n # registering new classes. We'll see how to assign\n # registry keys automatically in the next test.\n with self.assertRaises(ValueError):\n # noinspection PyUnusedLocal\n @registry.register\n class Venusaur(Pokemon):\n pass\n\n self.assertIsInstance(registry['fire'], Charizard)\n self.assertIsInstance(registry['water'], Blastoise)", "def safe_isinstance(obj, class_path_str):\n # this function is copy-paste from the code of the SHAP Python library\n # Copyright (c) 2018 Scott Lundberg\n if isinstance(class_path_str, str):\n class_path_strs = [class_path_str]\n elif isinstance(class_path_str, list) or isinstance(class_path_str, tuple):\n class_path_strs = class_path_str\n else:\n class_path_strs = ['']\n\n # try each module path in order\n for class_path_str in class_path_strs:\n if \".\" not in class_path_str:\n raise ValueError(\"class_path_str must be a string or list of strings specifying a full \\\n module path to a class. Eg, 'sklearn.ensemble.RandomForestRegressor'\")\n\n # Splits on last occurence of \".\"\n module_name, class_name = class_path_str.rsplit(\".\", 1)\n\n # here we don't check further if the model is not imported, since we shouldn't have\n # an object of that types passed to us if the model the type is from has never been\n # imported. (and we don't want to import lots of new modules for no reason)\n if module_name not in sys.modules:\n continue\n\n module = sys.modules[module_name]\n\n #Get class\n _class = getattr(module, class_name, None)\n\n if _class is None:\n continue\n\n if isinstance(obj, _class):\n return True\n\n return False", "def test_create_obj_by_type(self):\n test_obj = mock.MagicMock()\n returned_obj = self.tested_class._create_obj_by_type(test_obj)\n self.assertIs(returned_obj, test_obj)", "def match_classvar(typ):\n return abstract_utils.match_type_container(typ, \"typing.ClassVar\")", "def test_init_subclass(self, cosmo_cls):\n class InitSubclassTest(cosmo_cls):\n pass\n\n # test parameters\n assert InitSubclassTest.__parameters__ == cosmo_cls.__parameters__\n\n # test and cleanup registry\n registrant = _COSMOLOGY_CLASSES.pop(InitSubclassTest.__qualname__)\n assert registrant is InitSubclassTest", "def assert_type(instance, classtype):\n assert_cond(isinstance(instance, classtype), TypeCheckError(type(instance), classtype))", "def isclass(object):\n if not inspect.isclass(object):\n return False\n if isbuiltin(object):\n return False\n return type not in inspect.getmro(object)", "def test_types(self):\n assert types.typeClass(\"str\") == str\n\n assert types.isBuiltinType(\"str\")\n\n assert types.isCollectionType(\"map\")\n assert types.isCollectionType(\"seq\")\n assert not types.isCollectionType(\"str\")\n\n assert types.isScalarType(\"str\")\n assert not types.isScalarType(\"seq\")\n assert not types.isScalarType(\"map\")\n\n assert types.isCollection([])\n assert types.isCollection({})\n assert not types.isCollection(\"foo\")\n\n assert types.isScalar(\"\")\n assert types.isScalar(True)\n assert not types.isScalar([])\n\n assert types.isCorrectType(\"\", str)\n assert types.isCorrectType({}, dict)\n\n assert types.isString(\"foo\")\n assert not types.isString([])\n\n assert types.isInt(1)\n assert not types.isInt(\"foo\")\n\n assert types.isBool(True)\n assert not types.isBool(1)\n assert not types.isBool(\"true\")\n\n assert types.isFloat(1.0)\n assert not types.isFloat(\"foo\")\n\n assert types.isNumber(1)\n assert types.isNumber(1.0)\n assert not types.isNumber(\"foo\")\n\n assert types.isText(\"foo\")\n assert types.isText(1)\n assert types.isText(1.0)\n assert not types.isText([])\n assert not types.isText(True)\n\n assert types.isAny(\"foo\")\n assert types.isAny(True)\n assert types.isAny(1)\n assert types.isAny(1.0)\n assert types.isAny({})\n assert types.isAny([])\n\n assert types.isEnum(\"foo\")\n assert not types.isEnum(1)\n\n assert types.isNone(None)\n assert not types.isNone(\"foo\")" ]
[ "0.6440121", "0.62261176", "0.6093001", "0.6060876", "0.604561", "0.6029342", "0.595637", "0.58810997", "0.5852991", "0.58228475", "0.57530355", "0.5710559", "0.5691547", "0.5682141", "0.5676183", "0.56754434", "0.56320673", "0.5610011", "0.55910206", "0.5559114", "0.555801", "0.55575645", "0.5540791", "0.5534665", "0.5505643", "0.55003107", "0.5485272", "0.54688185", "0.54636806", "0.54522014" ]
0.63377374
1
Instantiates GameObjectFactory, the factory that will create all the game objects.
def create_factory() -> pygameng.GameObjectFactory: from Assets.inventory import images, sounds, assets, game_types factory = pygameng.GameObjectFactory(pygameng.ClassRegistrar.registry, images, sounds, assets, game_types) factory.set_layer_manager_asset_name("LayerManager") return factory
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_game(self):\n game = Game(self.data['gamename'])\n self.game = game\n return game", "def create(game):\r\n ## Create Garbage\r\n game.garbage = deque([])\r\n\r\n ## Create Stars\r\n game.create_stars()\r\n\r\n ## Create Millenium Falcon\r\n game.falcons = MilleniumFalcon.init(game)\r\n game.millenium_falcon = MilleniumFalcon()\r\n \r\n ## Create TIE Fighters\r\n game.fighters = Fighter.init(game)\r\n\r\n ## Create Asteroids\r\n game.rocks = Group.mesh(Rock1.init(game), Rock2.init(game))\r\n\r\n ## Create Lasers\r\n game.pro_lasers = ProLaser.init(game)\r\n game.con_lasers = ConLaser.init(game)\r\n\r\n ## Setup collision groups\r\n Group.bind(game.pro_lasers, game.rocks, game.act_laser_void)", "def test_factory_methods(self):\n\n po = ProjectObject.gen_bounding_box_object(id=\"1\", bounds=self.bounds)\n self.assertEqual(po.project_type, \"bounding_box\")\n self.assertAlmostEqual(po.bounds, self.bounds)\n self.assertEqual(po.id, \"1\")\n\n po = ProjectObject.gen_voxels_object(id=\"2\", voxels=self.voxels)\n self.assertEqual(po.project_type, \"voxels\")\n self.assertAlmostEqual(po.voxels.bounds(), self.voxels.bounds())\n self.assertEqual(po.id, \"2\")\n\n po = ProjectObject.gen_meshes_object(id=\"3\", meshes=self.meshes)\n self.assertEqual(po.project_type, \"meshes\")\n self.assertEqual(\n po.meshes.num_primitive_meshes(), self.meshes.num_primitive_meshes()\n )\n self.assertEqual(po.id, \"3\")", "def make_objects(self):\n pass", "def __init__(self):\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._client_type = \"\"\n self._creatures = []\n self._lost = False\n self._name = \"Anonymous\"\n self._opponent = None\n self._reason_lost = \"\"\n self._reason_won = \"\"\n self._time_remaining = 0\n self._total_health = 0\n self._won = False", "def populate_objects(self):\n if not self._random_object: # only populate the first object\n U.spawn_object(self.object_list[0], self.object_initial_position)\n else:\n rand_x = np.random.uniform(low=-0.35, high=0.35, size=(len(self.object_list),))\n rand_y = np.random.uniform(low=2.2, high=2.45, size=(len(self.object_list),))\n for idx, obj in enumerate(self.object_list):\n box_pos = Pose(position=Point(x=rand_x[idx],\n y=rand_y[idx],\n z=1.05))\n U.spawn_object(obj, box_pos)", "def create(self, pygame):\n\n white = (255,255,255)\n self.obstacle_img = pygame.image.load(\"./Images/Obstacle.png\").convert()\n self.obstacle_img.set_colorkey(white)\n\n for i in range(8):\n self.random_objects.append(pygame.image.load(\"./Images/Object{}.png\".format(i+1)).convert())\n # self.random_objects[i].set_colorkey(white)", "def __init__(self):\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._bottle = None\n self._cowboy = None\n self._furnishing = None\n self._has_hazard = False\n self._is_balcony = False\n self._tile_east = None\n self._tile_north = None\n self._tile_south = None\n self._tile_west = None\n self._x = 0\n self._y = 0\n self._young_gun = None", "def __init__(self):\n game_engine = get_gameengine()\n if game_engine is not None:\n self = game_engine\n else:\n ## The targeted frames per second\n self.target_fps = 200\n\n ## The start time\n self.time = time.time()\n\n ## A list of all registered game objects\n self.game_objects = list()\n\n ## A list of colliders\n self.colliders = list()\n\n ## Manage the user inputs\n self.input_manager = InputManager(self)\n\n ## Determines the state of the Game Engine\n self.running = False\n\n ## Variable to pause the Game Engine\n self.paused = False\n\n self.time_node = pm.PyNode('time1')\n # end if", "def __init__(self):\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._acted = False\n self._dash_x = 0\n self._dash_y = 0\n self._energy = 0\n self._genarium = 0\n self._is_busy = False\n self._job = None\n self._legendarium = 0\n self._moves = 0\n self._mythicite = 0\n self._owner = None\n self._protector = None\n self._rarium = 0\n self._shield = 0\n self._x = 0\n self._y = 0", "def __init__(self):\n self.full_map = [] # map with all the items in place.\n self.list_objects = []\n self.objects_numbers = 3\n self.x_axis = None\n self.y_axis = None\n self.user = User(self)\n self.load_data_map()\n self.create_object()\n self.graphics = Graphics(self)", "def __create_scene(self):\n\n print 'creating a scene'\n # create scenegraph by the ifgi scene parser\n _infilepath = '../../sampledata/cornel_box.ifgi'\n # _infilepath = '../../sampledata/one_tri_full.ifgi'\n ifgireader = IfgiSceneReader.IfgiSceneReader()\n if(not ifgireader.read(_infilepath)):\n raise StandardError, ('load file [' + _infilepath + '] failed.')\n\n # add a new scene\n # A ifgi file may have many cameras, but only default camera\n # is handled.\n cam_dict = ifgireader.camera_dict_dict['default']\n\n assert(self.__ifgi_cpp_render_core != None)\n self.__ifgi_cpp_render_core.create_scene(ifgireader.material_dict_list,\\\n ifgireader.geometry_dict_list,\\\n cam_dict)\n # check the camera correctly pushed\n # print cam_dict\n # dir(ifgi_cpp_render_core)\n # ret_cam_dict = ifgi_cpp_render_core.get_camera_pydict()\n # print ret_cam_dict\n\n # self.__scenegraph.update_all_bbox()\n # -- now all primitive (TriMesh) can look up the material\n\n # # added RGBA buffer and Hit buffer to the current camera.\n # imgsz = (self.__image_xsize, self.__image_ysize, 4)\n # cur_cam.set_film('RGBA', Film.ImageFilm(imgsz, 'RGBA'))\n # # cur_cam.print_obj()", "def __init__(self, nickname, position, direction, color, object_hash = None):\n GameObject.__init__(\n self,\n nickname,\n position,\n direction,\n color = color,\n remote_object = True,\n object_hash = object_hash\n )", "def default_factory(*args, **kwargs):\n obj = RandomGameEntity()\n obj.build(*args, **kwargs)\n return obj", "def create_object(self):\n i = 0\n for i in range(0, self.objects_numbers):\n self.list_objects.insert(i, Obj(self, i))", "def __init__(self, from_game_object):\n GameObject.__init__(\n self,\n nickname = 'M', \n position = from_game_object.position, \n direction = from_game_object.direction,\n brain = MissileBrain(self),\n color = from_game_object.color\n )\n # record who created me, so i don't kill them by accident\n self.shot_from = from_game_object\n # state variable to control simple missile explosion animation\n self.die_cycle = 0", "def init_objects():\n gravity = Gravity()\n planet_stencil = PlanetStencil()\n return gravity, planet_stencil", "def __init__(self):\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._damage = 0\n self._gold_cost = 0\n self._health = 0\n self._mana_cost = 0\n self._moves = 0\n self._per_tile = 0\n self._range = 0\n self._title = \"\"", "def run(self):\n\n # Initialise the pygame display.\n pygame.init()\n pygame.mixer.init()\n self.renderer.initialise()\n\n # Create the game systems.\n self.entity_manager.register_component_system(physics.Physics())\n self.entity_manager.register_component_system(systems.FollowsTrackedSystem())\n self.entity_manager.register_component_system(systems.TrackingSystem())\n self.entity_manager.register_component_system(systems.LaunchesFightersSystem())\n self.entity_manager.register_component_system(systems.KillOnTimerSystem())\n self.entity_manager.register_component_system(systems.PowerSystem())\n self.entity_manager.register_component_system(systems.ShieldSystem())\n self.entity_manager.register_component_system(systems.TextSystem())\n self.entity_manager.register_component_system(systems.AnimSystem())\n self.entity_manager.register_component_system(systems.ThrusterSystem())\n self.entity_manager.register_component_system(systems.ThrustersSystem())\n self.entity_manager.register_component_system(systems.WaveSpawnerSystem())\n self.entity_manager.register_component_system(systems.CameraSystem())\n self.entity_manager.register_component_system(systems.TurretSystem())\n self.entity_manager.register_component_system(systems.TurretsSystem())\n self.entity_manager.register_component_system(systems.WeaponSystem())\n\n # Preload certain images.\n self.resource_loader.preload()\n\n # Make the camera.\n camera = self.entity_manager.create_entity_with(components.Camera,\n components.Body,\n components.Tracking,\n components.FollowsTracked)\n camera.get_component(components.FollowsTracked).follow_type = \"instant\"\n\n # Draw debug info if requested.\n self.game_services.debug_level = self.config.get_or_default(\"debug\", 0)\n\n # Make the player\n player = self.entity_manager.create_entity(\"player.txt\")\n camera.get_component(components.Tracking).tracked.entity = player\n\n # Create a view to pass to the input handling - this lets it map between\n # world and screen coordinates.\n view = drawing.CameraView(self.renderer, camera)\n\n # Make the input handling system.\n self.input_handling = input_handling.InputHandling(view, self.game_services)\n\n # Create the wave spawner.\n if not self.config.get_or_default(\"peaceful_mode\", False):\n self.entity_manager.register_component_system(systems.WaveSpawnerSystem())\n\n # Make it so that bullets can damage things.\n self.entity_manager.get_system(physics.Physics).add_collision_handler(\n DamageCollisionHandler()\n )\n\n # Set the scrolling background.\n self.drawing.set_background(\"res/images/857-tileable-classic-nebula-space-patterns/6.jpg\")\n\n # Run the game loop.\n self.running = True\n fps = 60\n clock = pygame.time.Clock()\n tick_time = 1.0/fps\n while self.running:\n\n # Has a load been requested?\n if self.want_load:\n self.entity_manager.load(open(\"space_game.save\", \"r\"))\n self.want_load = False\n\n ## Create any queued objects\n self.entity_manager.create_queued_objects()\n\n # If a pause has been scheduled then pause the game.\n if self.want_pause:\n self.want_pause = False\n self.entity_manager.pause()\n\n # If an unpause has been scheduled then unpause the game.\n if self.want_resume:\n self.want_resume = False\n self.entity_manager.unpause()\n\n # If a step has been scheduled then advance a frame and schedule a\n # pause.\n if self.want_step:\n self.entity_manager.unpause()\n self.want_pause = True\n self.want_step = False\n\n # Input\n for e in pygame.event.get():\n response = self.input_handling.handle_input(e)\n if response.quit_requested:\n self.running = False\n\n # Update the systems.\n self.entity_manager.update(tick_time)\n\n # Draw\n self.renderer.pre_render(view)\n self.drawing.draw(view)\n self.renderer.post_render()\n self.renderer.flip_buffers()\n\n # Maintain frame rate.\n clock.tick(fps)\n\n # Remember how long the frame took.\n limited_fps = 1.0/(clock.get_time() / 1000.0)\n raw_fps = 1.0/(clock.get_rawtime() / 1000.0)\n time_ratio = (1.0/fps) / (clock.get_time()/1000.0)\n self.game_services.info.update_framerate(limited_fps,\n raw_fps,\n time_ratio)\n\n # Finalise\n pygame.quit()", "def create_game(self):\n\n\t\tself.player_model.grid = []\n\t\tself.player_model.available_cells = []\n\n\t\tfor i in range(9):\n\t\t\tc = Cell(i, None)\n\t\t\tself.player_model.grid.append(c)\n\t\t\tself.player_model.available_cells.append(c)\n\n\t\tself.player_frame.setup_game(self.player_model.current_player.name)", "def _createObj(self) -> None:\n phase_img = skimage.img_as_float(skimage.data.camera())[::-1, ::-1]\n mod_img = skimage.img_as_float(skimage.data.immunohistochemistry()[:, :, 0])[::-1, ::-1]\n mod = skimage.transform.resize(mod_img, self.shape,\n mode='wrap', preserve_range=True)\n phase = skimage.transform.resize(phase_img, self.shape,\n mode='wrap', preserve_range=True)\n\n # Setting the ranges\n phase = (phase - np.min(phase)) / (np.max(phase) - np.min(phase)) * self.phase_range\n mod = (mod - np.min(mod)) / (np.max(mod) - np.min(mod)) * self.mod_range\n\n # Centering the phase at 0.\n phase = np.angle(np.exp(1j * (phase - scipy.stats.circmean(phase))))\n obj = (mod * np.exp(1j * phase)).astype('complex64')\n self._setObjArrayValues(obj)", "def get_factory():", "def createObject(self, *args):\n return _libsbml.FbcSpeciesPlugin_createObject(self, *args)", "def __init__(self):\n self.games = {} # Dict from gameIDs to game objects. Initially empty.\n self.players = {} # Dict from playerID to player name\n self._version = __version__ # Used in version check during un-pickling\n\n # Log initialization\n TournamentSystem._logger.debug(\"Initialized\")", "def _create_games(self):\n\n ''''''", "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.joys = initialize_all_gamepads()\n self.done = False\n self.clock = pg.time.Clock()\n self.fps = 60\n self.keys = pg.key.get_pressed()\n self.cannon = Turret(self.joys[0], (250,250))\n self.objects = pg.sprite.Group()", "def reset(self):\n self.objects = []\n hero = GameObject(self.__new_position(), 1, 1, 2, None, \"hero\")\n self.objects.append(hero)\n goal = GameObject(self.__new_position(), 1, 1, 1, 1, \"goal\")\n self.objects.append(goal)\n fire = GameObject(self.__new_position(), 1, 1, 0, -1, \"fire\")\n self.objects.append(fire)\n goal2 = GameObject(self.__new_position(), 1, 1, 1, 1, \"goal\")\n self.objects.append(goal2)\n fire2 = GameObject(self.__new_position(), 1, 1, 0, -1, \"fire\")\n self.objects.append(fire2)\n goal3 = GameObject(self.__new_position(), 1, 1, 1, 1, \"goal\")\n self.objects.append(goal3)\n goal4 = GameObject(self.__new_position(), 1, 1, 1, 1, \"goal\")\n self.objects.append(goal4)\n # print(self.objects)\n self.state = self.render_env()\n return self.state", "def setup(self):\n \n self.explosions_list = arcade.SpriteList()\n\n self.explosion_texture_list = []\n \n # Set up the score\n self.score = 0\n self.countdown = 1000\n\n for i in range(EXPLOSION_TEXTURE_COUNT):\n # Files from http://www.explosiongenerator.com are numbered sequentially.\n # This code loads all of the explosion0000.png to explosion0270.png files\n # that are part of this explosion.\n texture_name = f\"images/explosion/explosion{i:04d}.png\"\n\n self.explosion_texture_list.append(arcade.load_texture(texture_name))\n \n # create 10 balls\n for i in range(10):\n myball = make_ball()\n self.ball_list.append(myball)", "def create_ball():\n balls.append(gen_ball())\n generate_velocity(balls)", "def build(self, factory, *factory_args, **factory_kw):\n return self._instantiate(\"\", factory, factory_args, factory_kw)" ]
[ "0.5683385", "0.56730527", "0.56672084", "0.564122", "0.5577839", "0.55421233", "0.55357003", "0.55088884", "0.5471441", "0.54373586", "0.5369217", "0.53011876", "0.5292592", "0.5270018", "0.5170161", "0.5158009", "0.5140437", "0.51247996", "0.5097501", "0.50773364", "0.50667053", "0.5053791", "0.5040511", "0.502451", "0.5013984", "0.5009459", "0.49735838", "0.49567813", "0.49554893", "0.49511033" ]
0.740402
0
Return a vector of size self.n_voxels. See mode options below.
def get_feature_vector(self, mode="binary"): voxel_n = np.ravel_multi_index([self.voxel_x, self.voxel_y, self.voxel_z], self.x_y_z) if mode == "binary": vector = np.zeros(self.n_voxels) vector[np.unique(voxel_n)] = 1 vector = vector.reshape(self.x_y_z) return vector elif mode == "binary_with_nopoints": vector = np.zeros(self.n_voxels) vector[np.unique(voxel_n)] = 1 vector = vector.reshape(self.x_y_z) tot_bounds = abs(self.bounds[0]) + abs(self.bounds[1]) # TODO can be parallelised non_points = [] for point in self.points_inside_bounds: start, end = get_points_from_bounds(self.bounds[0], self.bounds[1], self.origin, point) start_projected_voxelgrid = (start - self.bounds[0]) end_projected_voxelgrid = (end - self.bounds[0]) assert np.all(start_projected_voxelgrid + PRECISION >= 0), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(start_projected_voxelgrid + PRECISION, tot_bounds) assert np.all(end_projected_voxelgrid + PRECISION >= 0), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(end_projected_voxelgrid + PRECISION, tot_bounds) assert np.all(start_projected_voxelgrid - PRECISION <= tot_bounds), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(start_projected_voxelgrid, tot_bounds) assert np.all(end_projected_voxelgrid - PRECISION <= tot_bounds), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(end_projected_voxelgrid, tot_bounds) start_projected_voxelgrid = np.clip(start_projected_voxelgrid, 0, tot_bounds - PRECISION) end_projected_voxelgrid = np.clip(end_projected_voxelgrid, 0, tot_bounds - PRECISION) new_non_points = list(supercover_line(start_projected_voxelgrid, end_projected_voxelgrid, self.sizes)) non_points.extend(new_non_points) # if not np.all(np.array(new_non_points) >= 0) or not np.all(np.array(new_non_points).max(axis=0) < vector.shape): # print('Non-point detected with indices under 0 or over size') # print('start = {}'.format(start_projected_voxelgrid)) # print('end = {}'.format(end_projected_voxelgrid)) # print('Max Size: {}'.format(vector.shape)) # print('Wrong points:') # print(np.array(new_non_points)) # raise Exception() # convert only cells that are 0 to -1, NOT 1 to -1 non_points = np.unique(np.array(non_points), axis=0).astype(int) temp = vector[non_points[:, 0], non_points[:, 1], non_points[:, 2]] temp[temp == 0] = -1 vector[non_points[:, 0], non_points[:, 1], non_points[:, 2]] = temp return vector elif mode == "density": vector = np.zeros(self.n_voxels) count = np.bincount(voxel_n) vector[:len(count)] = count vector /= len(voxel_n) vector = vector.reshape(self.x_y_z) return vector # elif mode == "TDF": # vector = np.zeros(self.n_voxels) # # truncation = np.linalg.norm(self.shape) # kdt = cKDTree(self.points_inside_bounds) # vector, i = kdt.query(self.voxel_centers, n_jobs=-1) # vector = vector.reshape(self.x_y_z) # return vector elif mode.endswith("_max"): vector = np.zeros(self.n_voxels) if not is_numba_avaliable: raise ImportError("numba is required to compute {}".format(mode)) axis = {"x_max": 0, "y_max": 1, "z_max": 2} vector = groupby_max(self.points_inside_bounds, voxel_n, axis[mode], vector) vector = vector.reshape(self.x_y_z) return vector elif mode.endswith("_mean"): vector = np.zeros(self.n_voxels) if not is_numba_avaliable: raise ImportError("numba is required to compute {}".format(mode)) axis = {"x_mean": 0, "y_mean": 1, "z_mean": 2} voxel_sum = groupby_sum(self.points_inside_bounds, voxel_n, axis[mode], np.zeros(self.n_voxels)) voxel_count = groupby_count(self.points_inside_bounds, voxel_n, np.zeros(self.n_voxels)) vector = np.nan_to_num(voxel_sum / voxel_count) vector = vector.reshape(self.x_y_z) return vector else: raise NotImplementedError("{} is not a supported feature vector mode".format(mode))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_feature_vector(self, mode=\"binary\"):\n vector = np.zeros(self.n_voxels)\n\n if mode == \"binary\":\n vector[np.unique(self.voxel_n)] = 1\n\n elif mode == \"density\":\n count = np.bincount(self.voxel_n)\n vector[:len(count)] = count\n vector /= len(self.voxel_n)\n\n elif mode == \"TDF\":\n # truncation = np.linalg.norm(self.shape)\n kdt = cKDTree(self._points)\n vector, i = kdt.query(self.voxel_centers, n_jobs=-1)\n\n elif mode.endswith(\"_max\"):\n if not is_numba_avaliable:\n raise ImportError(\"numba is required to compute {}\".format(mode))\n axis = {\"x_max\": 0, \"y_max\": 1, \"z_max\": 2}\n vector = groupby_max(self._points, self.voxel_n, axis[mode], vector)\n\n elif mode.endswith(\"_mean\"):\n if not is_numba_avaliable:\n raise ImportError(\"numba is required to compute {}\".format(mode))\n axis = {\"x_mean\": 0, \"y_mean\": 1, \"z_mean\": 2}\n voxel_sum = groupby_sum(self._points, self.voxel_n, axis[mode], np.zeros(self.n_voxels))\n voxel_count = groupby_count(self._points, self.voxel_n, np.zeros(self.n_voxels))\n vector = np.nan_to_num(voxel_sum / voxel_count)\n\n else:\n raise NotImplementedError(\"{} is not a supported feature vector mode\".format(mode))\n\n return vector.reshape(self.x_y_z)", "def vec(self):\r\n\r\n xv = np.arange(self.dx / 2, self.lx, self.dx)\r\n yv = np.arange(-self.ly / 2 + self.dy / 2, self.ly / 2, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz, self.dz)\r\n\r\n if self.ox != 0:\r\n xv = np.arange(self.ox, self.lx + self.ox, self.dx)\r\n yv = np.arange(self.oy, self.ly + self.oy, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz, self.dz)\r\n\r\n return xv, yv, zv", "def xvec(self):\n return self._xvec", "def numpy_vector(self):\n pass", "def vector(self):\n return self.__vector", "def vel_inicial(x): #Velocidad inicial como un vector de ceros\r\n return np.zeros_like(x)", "def f_vector(self):\n try:\n return self._f_vector\n except AttributeError:\n self._f_vector = vector(ZZ,[len(x) for x in self.face_lattice().level_sets()])\n return self._f_vector", "def vec_x(self):\t\r\n if self.ox != 0:\r\n ov = self.ox\r\n lv = self.self.lx + self.ox\r\n else:\r\n ov = self.dx / 2\r\n lv = self.lx\r\n\r\n xv = \"\"\r\n for num in np.arange(ov, lv, self.dx):\r\n xv += str(num) + \" \"\r\n\r\n return xv", "def boxVectors(self):\n return self.box_vectors", "def vec(self):\n return np.matrix(self.val.ravel()).transpose()", "def is_vectorvox(self):\n return bool(self.gxvox.is_vector_voxel())", "def vnEx(self):\n return np.array(\n [x for x in [self.nCx, self.nNy, self.nNz] if x is not None],\n dtype=int\n )", "def vec_node(self):\r\n\r\n xv = np.arange(self.ox, self.lx + self.ox + self.dx, self.dx)\r\n yv = np.arange(self.oy, self.ly + self.oy + self.dy, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz + self.dz, self.dz)\r\n\r\n return xv, yv, zv", "def __call__(self):\n return self._vector", "def xvec(self):\n return np.array([self.x, self.y])", "def getVoxelSize(self):\n\t\treturn self.voxelsize", "def vnE(self):\n return np.array(\n [x for x in [self.nEx, self.nEy, self.nEz] if x is not None],\n dtype=int\n )", "def return_vec(self) :\r\n y_vec = np.concatenate((self.x_vec,self.v_vec))\r\n return y_vec", "def AsVector(self) -> ngsolve.la.BaseVector:", "def as_vector(self):\n return self.pdm.as_vector()", "def V(self):\n return self._V", "def V(self):\n return self._V", "def V(self):\n return self._V", "def vnFx(self):\n return np.array(\n [x for x in [self.nNx, self.nCy, self.nCz] if x is not None],\n dtype=int\n )", "def vnEy(self):\n if self.dim < 2:\n return None\n return np.array(\n [x for x in [self.nNx, self.nCy, self.nNz] if x is not None],\n dtype=int\n )", "def rvs(self, size: int) -> np.ndarray:\n raise NotImplementedError", "def v(self) -> np.ndarray:\n return self.array[:, 1:] if self.scalar_vector else self.array[:, :3]", "def velocity_field(self):\n return scipy.dstack((self._u_int, self._v_int))", "def V(self):\n return len(self.voc)", "def V(self):\n return len(self.voc)" ]
[ "0.717673", "0.6309085", "0.628692", "0.62847066", "0.6244491", "0.6162953", "0.6134086", "0.60895526", "0.60499966", "0.60194033", "0.6017152", "0.6012891", "0.5983854", "0.59729743", "0.59357184", "0.59262997", "0.5916878", "0.5905484", "0.5902124", "0.588724", "0.5847988", "0.5847988", "0.5847988", "0.58387434", "0.5828474", "0.582148", "0.57959247", "0.5793348", "0.57641184", "0.57641184" ]
0.65292823
1
Get valid, nonempty 26 neighbors of voxel.
def get_voxel_neighbors(self, voxel): x, y, z = np.unravel_index(voxel, self.x_y_z) valid_x = [] valid_y = [] valid_z = [] if x - 1 >= 0: valid_x.append(x - 1) if y - 1 >= 0: valid_y.append(y - 1) if z - 1 >= 0: valid_z.append(z - 1) valid_x.append(x) valid_y.append(y) valid_z.append(z) if x + 1 < self.x_y_z[0]: valid_x.append(x + 1) if y + 1 < self.x_y_z[1]: valid_y.append(y + 1) if z + 1 < self.x_y_z[2]: valid_z.append(z + 1) valid_neighbor_indices = cartesian((valid_x, valid_y, valid_z)) ravel_indices = np.ravel_multi_index((valid_neighbor_indices[:, 0], valid_neighbor_indices[:, 1], valid_neighbor_indices[:, 2]), self.x_y_z) voxel_n = np.ravel_multi_index([self.voxel_x, self.voxel_y, self.voxel_z], self.x_y_z) return [x for x in ravel_indices if x in np.unique(voxel_n)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_voxel_neighbors(self, voxel):\n\n x, y, z = np.unravel_index(voxel, self.x_y_z)\n\n valid_x = []\n valid_y = []\n valid_z = []\n if x - 1 >= 0:\n valid_x.append(x - 1)\n if y - 1 >= 0:\n valid_y.append(y - 1)\n if z - 1 >= 0:\n valid_z.append(z - 1)\n\n valid_x.append(x)\n valid_y.append(y)\n valid_z.append(z)\n\n if x + 1 < self.x_y_z[0]:\n valid_x.append(x + 1)\n if y + 1 < self.x_y_z[1]:\n valid_y.append(y + 1)\n if z + 1 < self.x_y_z[2]:\n valid_z.append(z + 1)\n\n valid_neighbor_indices = cartesian((valid_x, valid_y, valid_z))\n\n ravel_indices = np.ravel_multi_index((valid_neighbor_indices[:, 0],\n valid_neighbor_indices[:, 1],\n valid_neighbor_indices[:, 2]), self.x_y_z)\n\n return [x for x in ravel_indices if x in np.unique(self.voxel_n)]", "def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours", "def get_neighbors(self, row, col):\n neighbors = set()\n for d in [-1,1]:\n if row+d >= 0 and row+d < self._height and \\\n (row+d,col) in self._empty_spaces:\n neighbors.add((row+d,col))\n if col+d >= 0 and col+d < self._width and \\\n (row,col+d) in self._empty_spaces:\n neighbors.add((row,col+d))\n return neighbors", "def get_neighbours(self):\n shape=self.cubeshape[1:]\n neighboursx=np.arange(self.xpos-(self.blocksize-1)/2,(self.xpos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursx=[x if (x>=0) & (x<=shape[1]-1) else np.nan for x in neighboursx ]\n neighboursy=np.arange(self.ypos-(self.blocksize-1)/2,(self.ypos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursy=[y if (y>=0) & (y<=shape[0]-1) else np.nan for y in neighboursy ]\n keys=[np.ravel_multi_index([y,x], shape) if np.all(np.isfinite(np.asarray([y,x]))) else np.nan for y in neighboursy for x in neighboursx]\n\n return keys", "def neighbors(self):\n return self.mesh.neighbors()", "def checkNumNeighbors():", "def get_neighbours(self):\n return self.points_to.keys()", "def neighbors(self, x):\n pass", "def get_all_neighbors(self):\n m, n = self.board.shape\n return as_strided(self.expanded_board,\n shape = (m,n,3,3), \n strides = self.expanded_board.strides + self.expanded_board.strides)", "def get_neighbours(self):\n return []", "def find_valid_neighbours(self, cell):\n\n delta = [('W', (-1, 0)),\n ('E', (1, 0)),\n ('S', (0, 1)),\n ('N', (0, -1))]\n neighbours = []\n for direction, (dx, dy) in delta:\n x2, y2 = cell.x + dx, cell.y + dy\n if (0 <= x2 < self.nx) and (0 <= y2 < self.ny):\n neighbour = self.cell_at(x2, y2)\n if neighbour.has_all_walls():\n neighbours.append((direction, neighbour))\n return neighbours", "def get_valid_neighbors(self, x, y):\n\t\tx_1, x_2 = max(x-1, 0), min(x+1, self.width-1)\n\t\ty_1, y_2 = max(y-1, 0), min(y+1, self.height-1)\n\t\treturn [(n, m) for n in range(x_1, x_2+1) \n\t\t for m in range(y_1, y_2+1) if x != n or y != m]", "def possibleNeighbors(self, col, row):\n neighbors = [(col, row-1), (col-1, row), (col+1, row), (col, row+1)]\n valid_neighbors = []\n for (ncol, nrow) in neighbors:\n if ncol >= 0 and ncol < self.cols and \\\n nrow >= 0 and nrow < self.rows:\n valid_neighbors.append((ncol, nrow))\n return valid_neighbors", "def get_neighbors(self):\n \n return self.adjacent.keys()", "def neighbors(self):\n return self._neighbors", "def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.neighbors", "def test_get_neighbours(self):\n self.assertEqual(self.game.get_neighbours(2,2), [[1, 1], [1, 2], [1, 3], \n [2, 1], [2, 3], [3, 1], [3, 2], [3, 3]])\n self.assertEqual(self.game.get_neighbours(0,0), [[0, 1], [1, 0], [1, 1]])\n self.assertEqual(self.game.get_neighbours(44,0), [[43, 0], [43, 1], [44, 1]])\n self.assertEqual(self.game.get_neighbours(45,0), [])\n self.assertEqual(self.game.get_neighbours(44,89), [[43, 88], [43, 89], [44, 88]])", "def _valid_neighbors(location, some_num):\n xloc, yloc = location\n vector = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n ret_v = []\n for vect in vector:\n xpos = xloc + vect[0]\n ypos = yloc + vect[1]\n if xpos <= 0 or ypos <= 0:\n continue\n if xpos > some_num or ypos > some_num:\n continue\n ret_v.append((xpos, ypos))\n return ret_v", "def get_neighbours(self, cell):\n\t\tx,y = cell.find_id()\n\t\tlength = self.space.shape[1]\n\t\twidth = self.space.shape[0]\n\t\tif (length == 0 or width == 0 or x < 0 or x >= length or y < 0 or y >= width):\n\t\t\treturn []\n\t\tneighs = [(i,j) for i in range(y-1,y+2) if 0<=i<width for j in range(x-1,x+2) if 0<=j<length]\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tneighbours.append(self.space[neigh[0],neigh[1]])\n\t\treturn neighbours", "def get_neighbors(self, x, y):\n neighbors = [False]*8\n if x != 0 and y != 0:\n neighbors[0] = self._board[x-1][y-1]\n if y != 0:\n neighbors[1] = self._board[x][y-1]\n if x != (self._dim-1) and y != 0:\n neighbors[2] = self._board[x+1][y-1]\n if x != 0:\n neighbors[3] = self._board[x-1][y]\n if x != (self._dim-1):\n neighbors[4] = self._board[x+1][y]\n if y != (self._dim-1) and x != 0:\n neighbors[5] = self._board[x-1][y+1]\n if y != (self._dim-1):\n neighbors[6] = self._board[x][y+1]\n if x != (self._dim-1) and y != (self._dim-1):\n neighbors[7] = self._board[x+1][y+1]\n return neighbors", "def get_neighbours(self):\n return self.neighbours", "def _find_neighbours(self):\n\n neighbours = []\n for i, p in enumerate(self.frame_0):\n nearests = np.where(np.linalg.norm(self.frame_0 - p, axis=1) <= self.R_n)[0]\n # delete self index\n index = np.argwhere(nearests==i)\n nearests = np.delete(nearests, index)\n neighbours.append(nearests)\n\n return neighbours", "def eight_neighbors(self, row, col):\n ans = []\n if row > 0:\n ans.append((row - 1, col))\n if row < self._grid_height - 1:\n ans.append((row + 1, col))\n if col > 0:\n ans.append((row, col - 1))\n if col < self._grid_width - 1:\n ans.append((row, col + 1))\n if (row > 0) and (col > 0):\n ans.append((row - 1, col - 1))\n if (row > 0) and (col < self._grid_width - 1):\n ans.append((row - 1, col + 1))\n if (row < self._grid_height - 1) and (col > 0):\n ans.append((row + 1, col - 1))\n if (row < self._grid_height - 1) and (col < self._grid_width - 1):\n ans.append((row + 1, col + 1))\n return ans", "def get_neighbours(x, y, board):\n return [get_left(x, y, board), get_upper(x, y, board), get_right(x, y, board), get_lower(x, y, board)]", "def get_neighbors(self):\n return self.neighbours_names", "def get_neighbours(self):\n return self._neighbours" ]
[ "0.687631", "0.6724855", "0.67223924", "0.6670258", "0.65394425", "0.6517443", "0.6467586", "0.64275324", "0.64233154", "0.6421244", "0.64047986", "0.6391682", "0.6388574", "0.6371847", "0.63681686", "0.6354173", "0.63498", "0.63498", "0.63498", "0.63498", "0.63398826", "0.6336188", "0.6327251", "0.6321364", "0.6314322", "0.630246", "0.6297278", "0.6296888", "0.6294192", "0.6286609" ]
0.6917683
0
it'll call get_all_data from service module and return all students data
def get_all_data(): return jsonify(service.get_all_data())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_data(self):", "def get_all_records(self, data: dict, execution_context: dict):", "def test_get_Student_bulk(self):\n school_ids = self.create_School(2,20)\n url = '/students'\n for i in range(10):\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 10)\n self.assertEqual(response.data['results'][0]['first_name'], 'Poompatai')\n self.assertEqual(response.data['results'][0]['last_name'], 'Puntitpong')\n self.assertEqual(response.data['results'][0]['age'], 20)\n self.assertEqual(response.data['results'][0]['nationality'], 'Thailand')\n self.assertEqual(response.data['results'][0]['school'], school_ids[0])", "def student_view_data(self):\n def get_student_profile_data():\n # pylint: disable=no-member\n \"\"\"\n Returns profile data for all students on the course.\n \"\"\"\n try:\n regexp_string = self.regexp_from_users_included_email(self.users_included_email)\n re.compile(regexp_string)\n users = self.students_for_course(regexp_string)\n except:\n log.info(\"regexp is invalid: '%s'\", regexp_string)\n users = []\n\n for user in users:\n student_id = anonymous_id_for_user(user, self.course_id)\n profile = user.profile\n\n vip = self.get_vip(user)\n image_url = None\n if vip:\n image_url = \"https://my.imd.org/api/profile/{}/profile-picture-header\".format(vip)\n else:\n if self.is_course_staff:\n image_url = self.runtime.local_resource_url(self, 'public/images/profile-picture-header-no-vip.gif')\n else:\n image_url = self.runtime.local_resource_url(self, 'public/images/profile-picture-header.gif')\n\n cohort_name = None\n if (self.is_course_cohorted(self.course_id)):\n cohort_name = self.get_cohort(user, self.course_id).name\n\n yield {\n 'student_id': student_id,\n 'username': user.username,\n 'fullname': profile.name,\n 'vip': vip,\n 'image_url': image_url,\n 'email': user.email,\n 'cohort_name': cohort_name,\n }\n\n return {\n 'student_profile_list': list(get_student_profile_data()),\n 'display_name': self.display_name,\n 'username': self.logged_in_username,\n 'course_is_cohorted': self.enable_cohorts and self.is_course_cohorted(self.course_id),\n 'profile_display': {\n 'profile_display_job_title': self.profile_display_job_title,\n 'profile_display_organisation': self.profile_display_organisation,\n 'profile_display_work_country': self.profile_display_work_country,\n 'profile_display_email_button': self.profile_display_email_button,\n 'profile_display_bio': self.profile_display_bio,\n },\n }", "def test_return_all(self):\n self.data.return_all()", "def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(row [1], row[2], row[3], row[5])\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.first_name,\n s.Last_name,\n s.slack_handle,\n s.cohort_id,\n c.name\n from students s\n join cohorts c on s.cohort_id = c.id\n order by s.cohort_id\n \"\"\")\n\n all_students = db_cursor.fetchall()\n print('\\n***All Students***')\n\n for student in all_students:\n print(student)", "def test_get_students_for_contact(self):\n pass", "def get_students(self):\n self.cur = self.conn.cursor(pymysql.cursors.DictCursor)\n self.cur.execute(\"SELECT * FROM studenten\")\n self.cur.close()\n\n return self.cur.fetchall()", "def all_students(self):\n \n with sqlite3.connect(self.db_path) as conn:\n # conn.row_factory = self.create_student\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n \n \n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.Id\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n for student in all_students:\n print(student)", "def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.StudentId,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.CohortId\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n for student in all_students:\n print(student)", "def me():\n try:\n student_data = models.get_student_data(request.student_id)\n except Exception as e:\n print(e)\n return server_error()\n\n return success(student_data)", "def get_data():\n pass", "def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.Id\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n\n for student in all_students:\n print(student)", "def get_all_students(hospital_codes, results_codes):\n data = pd.read_csv(\"res/Internship Lottery_April 8, 2018_11.54_correct encoding.csv\", encoding='iso-8859-8')\n students = []\n for i in range(2, 241):\n student = get_student(i + 2, data.iloc[i], hospital_codes, results_codes)\n if student is not None:\n students.append(student)\n\n return students", "def _getStudentEntries(self, program_entity, student_entity,\n params, id, user, prefix):\n\n items = []\n\n timeline_entity = program_entity.timeline\n\n if timeline_helper.isAfterEvent(timeline_entity,\n 'student_signup_start'):\n # add a link to show all projects\n items += [(ghop_redirects.getListStudentTasksRedirect(\n program_entity, {'url_name':'ghop/student'}),\n \"List my Tasks\", 'any_access')]\n\n items += super(View, self)._getStudentEntries(program_entity,\n student_entity, params, id, user, prefix)\n\n return items", "def get_data(self):", "def get_all(self, name):\n\t\tpass", "def _fetch_data(self):\n pass", "def list(self, request):\n queryset = Students.objects.filter(average_rating=5.0)\n students = normalize_students(queryset)\n return Response(students)", "def getAllData(self):\r\n return self.data", "def get_students(\n self,\n gradebook_id='',\n simple=False,\n section_name='',\n include_photo=False,\n include_grade_info=False,\n include_grade_history=False,\n include_makeup_grades=False\n ):\n # These are parameters required for the remote API call, so\n # there aren't too many arguments, or too many variables\n # pylint: disable=too-many-arguments,too-many-locals\n\n # Set params by arguments\n params = dict(\n includePhoto=json.dumps(include_photo),\n includeGradeInfo=json.dumps(include_grade_info),\n includeGradeHistory=json.dumps(include_grade_history),\n includeMakeupGrades=json.dumps(include_makeup_grades),\n )\n\n url = 'students/{gradebookId}'\n if section_name:\n group_id, _ = self.get_section_by_name(section_name)\n if group_id is None:\n failure_message = (\n 'in get_students -- Error: '\n 'No such section %s' % section_name\n )\n log.critical(failure_message)\n raise PyLmodNoSuchSection(failure_message)\n url += '/section/{0}'.format(group_id)\n\n student_data = self.get(\n url.format(\n gradebookId=gradebook_id or self.gradebook_id\n ),\n params=params,\n )\n\n if simple:\n # just return dict with keys email, name, section\n student_map = dict(\n accountEmail='email',\n displayName='name',\n section='section'\n )\n\n def remap(students):\n \"\"\"Convert mit.edu domain to upper-case for student emails.\n\n The mit.edu domain for user email must be upper-case,\n i.e. MIT.EDU.\n\n Args:\n students (list): list of students\n\n Returns:\n dict: dictionary of updated student email domains\n \"\"\"\n newx = dict((student_map[k], students[k]) for k in student_map)\n # match certs\n newx['email'] = newx['email'].replace('@mit.edu', '@MIT.EDU')\n return newx\n\n return [remap(x) for x in student_data['data']]\n\n return student_data['data']", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_2 = marks.filter(name='2')\n mark_id_3 = marks.filter(name='3')\n mark_id_pass = marks.filter(name='Зачтено')\n mark_id_not_pass = marks.filter(name='Незачтено')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n\n counter_2 = 0\n counter_3 = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_2[0].id):\n counter_2 += 1\n counter_all += 1\n\n if str(mark) == str(mark_id_3[0].id):\n counter_3 += 1\n counter_all += 1\n\n elif str(mark) == str(mark_id_pass[0].id):\n counter_all += 1\n\n else:\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0 and counter_2 == 0 and counter_3 == 1:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def get(self):\r\n return get_all()", "def get_students(self):\n dist_on_foot = db.session.query(Activity.user_id.label('user_id'),\n func.sum(Activity.distance).label('on_foot')). \\\n filter(func.date(Activity.datetime) >= self.SEASON.start_date,\n func.date(Activity.datetime) <= self.SEASON.end_date,\n Activity.type.in_([ActivityType.Run, ActivityType.Walk])). \\\n group_by(Activity.user_id). \\\n subquery(with_labels=True)\n dist_on_bike = db.session.query(Activity.user_id.label('user_id'),\n func.sum(Activity.distance).label('on_bike')). \\\n filter(func.date(Activity.datetime) >= self.SEASON.start_date,\n func.date(Activity.datetime) <= self.SEASON.end_date,\n Activity.type.in_([ActivityType.Ride])). \\\n group_by(Activity.user_id). \\\n subquery(with_labels=True)\n data = db.session.query(User, dist_on_foot.c.on_foot, dist_on_bike.c.on_bike). \\\n select_from(User). \\\n outerjoin(dist_on_foot, User.id == dist_on_foot.c.user_id). \\\n outerjoin(dist_on_bike, User.id == dist_on_bike.c.user_id). \\\n filter(User.type == UserType.Student). \\\n order_by(User.last_name.asc(), User.first_name.asc())\n\n result = []\n for row in data:\n on_foot = row.on_foot or 0\n on_bike = row.on_bike or 0\n item = {\n 'name': row.User.first_name + ' ' + row.User.last_name,\n 'uk id': row.User.uk_id,\n 'on foot': round(on_foot, 1),\n 'on bike': round(on_bike, 1),\n 'points': round(on_foot + on_bike / 2, 2)\n }\n result.append(item)\n return result", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_not_appointed = marks.filter(name='Неявка')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n \n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_not_appointed[0].id):\n counter_all += 1\n\n if counter_all == 1:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)", "def get_all(self):\n self.data = list(self.data)\n return self.data", "def list(self, request):\n\n marks = Marks.objects.all()\n\n mark_id_2 = marks.filter(name='2')\n mark_id_3 = marks.filter(name='3')\n mark_id_4 = marks.filter(name='4')\n mark_id_pass = marks.filter(name='Зачтено')\n mark_id_not_pass = marks.filter(name='Незачтено')\n\n records = filter_against_records(request)\n \n students = Students.objects.all()\n students_to_return = []\n \n for student in students:\n student_records = records.filter(student_id=student.id)\n\n counter_2 = 0\n counter_3 = 0\n counter_4 = 0\n counter_all = 0\n\n for record in student_records:\n mark_dict = model_to_dict(record.mark_id) \n mark = mark_dict['id'] \n\n if str(mark) == str(mark_id_2[0].id):\n counter_2 += 1\n counter_all += 1\n\n if str(mark) == str(mark_id_3[0].id):\n counter_3 += 1\n counter_all += 1\n \n if str(mark) == str(mark_id_4[0].id):\n counter_4 += 1\n counter_all += 1\n\n else:\n counter_all += 1\n\n if counter_all == len(student_records) and len(student_records) > 0 and counter_2 == 0 and counter_3 == 0 and counter_4 == 1:\n students_to_return.append(student)\n \n students_to_send = normalize_students(students_to_return)\n return Response(students_to_send)" ]
[ "0.63158745", "0.62640435", "0.62553257", "0.62480736", "0.61972684", "0.61808133", "0.6172427", "0.61494964", "0.6140303", "0.6039809", "0.6035919", "0.60292524", "0.6018947", "0.59919596", "0.59866506", "0.59833336", "0.5977811", "0.59730965", "0.592698", "0.58814895", "0.58799165", "0.5867149", "0.5867149", "0.5867149", "0.58362305", "0.5835724", "0.5829431", "0.5825738", "0.5809982", "0.58043647" ]
0.6492461
0
Run through new_schm and add any fields not in old_schm to old_schm.
def merge_schemas(self, old_schm, new_schm): old_schm_cols = [x['name'] for x in old_schm] for col in new_schm: if type(col) == dict: if col['name'] not in old_schm_cols: old_schm.append(col) for count, old_col in enumerate(old_schm): for meta in old_col: if type(old_col[meta]) == list: if old_col['name'] in [pot_new_col['name'] for pot_new_col in new_schm]: new_col = [pot_new_col for pot_new_col in new_schm if pot_new_col['name'] == old_col['name']][0] if meta in new_col: old_schm[count][meta] = self.merge_schemas(old_col[meta], new_col[meta]) return old_schm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_from(self, src):\n changed = {}\n for name in self.action_fields:\n other_field = getattr(src, name)\n this_field = getattr(self, name)\n if other_field != this_field:\n changed[name] = other_field\n setattr(self, name, other_field)\n return changed", "def _AddNewColsToSchema(new_fields, orig_schema_map):\n updated_schema_map = orig_schema_map.copy()\n for new_field in new_fields:\n if new_field.name in orig_schema_map:\n raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)\n updated_schema_map[new_field.name] = new_field\n return updated_schema_map", "def update_to(self, new):\r\n if self.idhex != new.idhex:\r\n plog(\"ERROR\", \"Update of router \"+self.nickname+\"changes idhex!\")\r\n for i in new.__dict__.iterkeys():\r\n if i == \"refcount\" or i == \"_generated\": continue\r\n self.__dict__[i] = new.__dict__[i]", "def _metrics_update(orig, new):\n revsd = orig\n for k, v in orig.items():\n if not v:\n revsd[k] = new[k]\n elif new[k]:\n if new[k] != v:\n # LOG ME, do something\n print(orig)\n print(new)\n elif not new[k] or v:\n pass\n else:\n raise Exception(\"_metrics_update error\")\n return revsd", "def copy_menu_from_previous_event(previous_event, new_event):\n for obj in previous_event.menu.all():\n new_obj = obj\n new_obj.pk = None\n new_obj.event = new_event\n new_obj.save()", "def update_schedule(old_holidays: List[str], new_holidays: dict):\n for holiday in new_holidays:\n trash_service.update({'name': holiday, 'routeDelays': new_holidays[holiday]})\n if holiday in old_holidays:\n old_holidays.remove(holiday)\n delete_holidays(old_holidays)", "def copy_content_from_previous_event(previous_event, new_event):\n previous_event.refresh_from_db()\n for obj in previous_event.content.all():\n new_content = obj\n new_content.id = None\n new_content.event = new_event\n new_content.save()", "def lsdb_diff(lsdb_new, lsdb_old) :\n\n nnei = convert_lsdb_to_neighbor_set(lsdb_new)\n onei = convert_lsdb_to_neighbor_set(lsdb_old)\n\n lines = []\n new_adjacent = set()\n rem_adjacent = set()\n\n # find new adjacency\n for router_id, nei_set in nnei.items() :\n if not router_id in onei :\n lines.append(\"New Router %s with Neighbor %s\" %\n (router_id, ' '.join(nei_set)))\n else :\n new_nei = nei_set - onei[router_id]\n \n for nei in new_nei :\n new_adjacent.add(' '.join(sorted([router_id, nei])))\n\n for new_adj in new_adjacent :\n lines.append(\"New Adjacency %s\" % new_adj)\n\n\n # find removed adjacency\n for router_id, nei_set in onei.items() :\n if not router_id in nnei :\n lines.append(\"Removed Router %s with Neighbor %s\" %\n (router_id, ' '.join(nei_set)))\n else :\n rem_nei = nei_set - nnei[router_id]\n for nei in rem_nei :\n rem_adjacent.add(' '.join(sorted([router_id, nei])))\n\n for rem_adj in rem_adjacent :\n lines.append(\"Removed Adjacency %s\" % rem_adj)\n\n return lines", "def trello_updates(new, old):\n try:\n return {k: (v, new[k]) for k, v in old.iteritems()}\n except KeyError:\n return {k: (v, None) for k, v in old.iteritems()}", "def removeOldItems(self):\n pass", "def clone(old):\n new_kwargs = dict([(fld.name, getattr(old, fld.name))\n for fld in old._meta.fields\n if not isinstance(fld, JeevesForeignKey)])\n ans = old.__class__(**new_kwargs)\n for fld in old._meta.fields:\n if isinstance(fld, JeevesForeignKey):\n setattr(ans, fld.attname, getattr(old, fld.attname))\n return ans", "def old(self, old):\n\n self._old = old", "def move(self, old, new):\n if old == new:\n return\n\n rawoldpre = self.keeper.getPre(key=old)\n if rawoldpre is None:\n raise ValueError(\"Nonexistent old pre={}, nothing to assign.\".format(old))\n\n rawnewpre = self.keeper.getPre(key=new)\n if rawnewpre is not None:\n raise ValueError(\"Preexistent new pre={} may not clobber.\".format(new))\n\n rawoldprm = self.keeper.getPrm(key=old)\n if rawoldprm is None:\n raise ValueError(\"Nonexistent old prm for pre={}, nothing to move.\".format(old))\n\n rawnewprm = self.keeper.getPrm(key=new)\n if rawnewprm is not None:\n raise ValueError(\"Preexistent new prm for pre={} may not clobber.\".format(new))\n\n rawoldsit = self.keeper.getSit(key=old)\n if rawoldsit is None:\n raise ValueError(\"Nonexistent old sit for pre={}, nothing to move.\".format(old))\n\n rawnewsit = self.keeper.getSit(key=new)\n if rawnewsit is not None:\n raise ValueError(\"Preexistent new sit for pre={} may not clobber.\".format(new))\n\n if not self.keeper.putPrm(key=new, val=bytes(rawoldprm)):\n raise ValueError(\"Failed moving prm from old pre={} to new pre={}.\".format(old, new))\n else:\n self.keeper.delPrm(key=old)\n\n if not self.keeper.putSit(key=new, val=bytes(rawoldsit)):\n raise ValueError(\"Failed moving sit from old pre={} to new pre={}.\".format(old, new))\n else:\n self.keeper.delSit(key=old)\n\n # move .pubs entries if any\n i = 0\n while (pl := self.keeper.getPubs(key=riKey(old, i))):\n if not self.keeper.putPubs(key=riKey(new, i), val=pl):\n raise ValueError(\"Failed moving pubs at pre={} ri={} to new pre={}\".format())\n i += 1\n\n # assign old\n if not self.keeper.setPre(key=old, val=new):\n raise ValueError(\"Failed assiging new pre={} to old pre={}.\".format(new, old))\n\n # make new so that if move again we reserve each one\n if not self.keeper.putPre(key=new, val=new):\n raise ValueError(\"Failed assiging new pre={}.\".format(new))", "def export_changelog(self, c_id, matches_old, matches_new):\n def entry_str(s_idx, p_idx):\n s_idx += 2 # convert from UTC to UTC+1\n d_idx = s_idx//SLOT_NUM\n s_idx = s_idx%SLOT_NUM\n return '{} {}, {}\\n'.format(\n self._get_day_str(d_idx), self._get_slot_str(s_idx),\n self.pod_info['name'][p_idx],\n )\n\n print('\\npreview of old and new matches')\n print('[old]:')\n m_matches_old = self.get_mentor_centered_view(matches_old)\n print('[new]:')\n m_matches_new = self.get_mentor_centered_view(matches_new)\n\n count, out_strs = 0, []\n for mentor_email in sorted(self.mentor_info['email']):\n set_old = set(m_matches_old[mentor_email]) if mentor_email in m_matches_old else set()\n set_new = set(m_matches_new[mentor_email]) if mentor_email in m_matches_new else set()\n\n if set_new!=set_old:\n count += 1\n m_idx = self.mentor_info['email'].index(mentor_email)\n out_strs.append('\\n{} {}, {}, #pod {} --> {}\\n'.format(\n self.mentor_info['first_name'][m_idx],\n self.mentor_info['last_name'][m_idx],\n mentor_email, len(set_old), len(set_new)\n ))\n to_remove = set_old.difference(set_new)\n to_add = set_new.difference(set_old)\n if to_remove:\n out_strs.append('remove\\n')\n for s_idx, p_idx in to_remove:\n out_strs.append(entry_str(s_idx, p_idx))\n if to_add:\n out_strs.append('add\\n')\n for s_idx, p_idx in to_add:\n out_strs.append(entry_str(s_idx, p_idx))\n out_strs = ['Change Log\\n', '{} mentors affected\\n'.format(count)]+out_strs\n with open(f'change.log_{c_id}.txt', 'w') as f:\n for out_str in out_strs:\n f.write(out_str)", "def get_changes(old_obj: Dict, new_obj: Dict):\n from_ = {}\n to_ = {}\n for key, value in new_obj.items():\n if \"_hash\" not in key:\n if key not in old_obj:\n to_[key] = value\n elif old_obj[key] != value:\n from_[key] = old_obj[key]\n to_[key] = value\n return {\"from\": from_, \"to\": to_}", "def subjectsChanged(self, other_graph):\n # FIXME dispatch on OntRes ?\n\n # the case where an external process (e.g. editing in protege)\n # has caused a change in the elements used to calculate the id\n # of the class\n\n # FIXME mapped but a change to the structure of the class has\n # cause a change in the identity of the class\n # in which case the old hasTemporaryId should still be attached\n\n #temporary_id_changed = [e for e in self[:ilxtr.hasTemporaryId:] not_mapped ]\n #changed_classes = [(s,\n #ilxtr.hasTemporaryId,\n #o) for s, o in self[:ilxtr.hasTemporaryId:]]\n\n sid = {s:self.subjectIdentity(s) for s in set(self.named_subjects())}\n osid = {s:other_graph.subjectIdentity(s) for s in set(other_graph.named_subjects())}\n ssid = set(sid)\n sosid = set(osid)\n added = not_in_other = ssid - sosid\n removed = not_in_self = sosid - ssid\n changed = [(s, osid[s], i) for s, i in sid.items() if s in osid and i != osid[s]]\n return added, removed, changed", "def update_model_output(self):\n warnings.warn(\"Please ensure that the column names of the new file accurately corresponds to the relevant column names in the exisitng file\")\n column_names_new = self.new_data.head()\n column_names_old = self.existing_data.head()\n for column_name in column_names_new:\n if column_name in column_names_old:\n self.existing_data[column_name] = self.new_data[column_name]\n \n self.existing_data.to_csv(filename_main, index = False)", "def update_inplace_from(self, other):\n self.__dict__ = other.__dict__.copy()", "def merge(self, mst, new_id, old_edges, cycle):\n succs = dict((n, []) for n in self)\n for source, target in mst.iteredges():\n if source == new_id:\n # this edge points out of the cycle into the mst. there might be\n # more than one of these. use the old_edges to find out which\n # cycle node is responsible for this edge, and add it.\n logging.debug(\"%s -> %s: cycle -> mst\", source, target)\n for s, ts in old_edges.items():\n for t in ts:\n if t == target:\n succs[s].append(t)\n\n elif target == new_id:\n # this edge points at the cycle. use the old_edges to find out\n # where in the cycle it points, then add all the edges in the\n # cycle except the one that completes the loop.\n logging.debug(\"%s -> %s: mst -> cycle\", source, target)\n targets = old_edges[source]\n assert len(targets) == 1, targets\n target = targets[0]\n succs[source].append(target)\n cycle_source = target\n cycle_target = cycle.successors[cycle_source][0]\n while cycle_target != target:\n succs[cycle_source].append(cycle_target)\n cycle_source = cycle_target\n cycle_target = cycle.successors[cycle_source][0]\n\n else:\n # this edge is completely in the mst, so add it and move on.\n logging.debug(\"%s -> %s: in mst\", source, target)\n succs[source].append(target)\n\n return Digraph(succs, self.get_score, self.get_label)", "def merge_lines(old_lines, new_dict):\n old_dict = collections.OrderedDict()\n for key, value in old_lines:\n old_dict.setdefault(key, []).append(value)\n\n old_keys = set(old_dict)\n\n del_keys = {k for k, v in new_dict.iteritems() if not v}\n new_keys = ({k for k, v in new_dict.iteritems() if v} | old_keys) - del_keys\n\n # delete keys\n new_lines = [(k, v) for k, v in old_lines if k in new_keys]\n\n for change_key in (new_keys & old_keys):\n insert_idx = None\n to_nuke = set()\n for i, (k, v) in enumerate(new_lines):\n if k == change_key:\n if insert_idx is None:\n insert_idx = i\n to_nuke.add(i)\n assert to_nuke # because it's in old_keys\n new_lines = [(k, v) for i, (k, v) in enumerate(new_lines)\n if i not in to_nuke]\n new_lines[insert_idx:insert_idx] = [\n (change_key, v)\n for v in new_dict.get(change_key, old_dict[change_key])\n ]\n\n for add_key in new_dict: # Preserve sort order of new lines\n if add_key in old_keys or add_key in del_keys:\n continue\n new_lines.extend((add_key, v) for v in new_dict[add_key])\n\n return new_lines", "def reset_modified(self):\n self.modified_fields = set()\n\n # compensate for us not having knowledge of certain fields changing\n for field_name, field in self.schema.normal_fields.items():\n if isinstance(field, ObjectField):\n self.modified_fields.add(field_name)", "def recreate_all_sghops (nffg):\n sg_map = NFFGToolBox.get_all_sghop_info(nffg)\n for sg_hop_id, data in sg_map.iteritems():\n src, dst, flowclass, bandwidth, delay = data\n if not (src and dst):\n continue\n if not nffg.network.has_edge(src.node.id, dst.node.id, key=sg_hop_id):\n nffg.add_sglink(src, dst, id=sg_hop_id, flowclass=flowclass,\n bandwidth=bandwidth, delay=delay)\n # causes unnecesary failures, when bandwidth or delay is missing\n # somewhere\n # else:\n # sg_hop = nffg.network[src.node.id][dst.node.id][sg_hop_id]\n # NFFGToolBox._check_flow_consistencity(sg_map, sg_hop)\n return nffg", "def update_dict(new,old):", "def replace_cfs(old_key, new_key):\n altered_methods = []\n for name in methods:\n changed = False\n data = Method(name).load()\n for line in data:\n if line[0] == old_key:\n line[0], changed = new_key, True\n if changed:\n Method(name).write(data)\n altered_methods.append(name)\n return altered_methods", "def OldItems(self) -> _n_1_t_7:", "def mergeWith(self, newFL):\n srcMods = self.srcMods\n for levls, newLevls in ((self.levcs,newFL.levcs),(self.levis,newFL.levis)):\n for listId, newLevl in newLevls.items():\n if listId not in srcMods: \n srcMods[listId] = [newFL.fileInfo.name]\n levl = levls[listId] = copy.deepcopy(newLevl)\n self.records.append(levl)\n else:\n srcMods[listId].append(newFL.fileInfo.name)\n levls[listId].mergeWith(newLevl)", "def set_old_props(self):\n self.old_props = {k: v for k, v in self.node.props.items()}", "def make_rules(self, old_rules):\n rules = defaultdict(set)\n\n def recurse_disc_rule(attr, rule):\n \"\"\"\n Recursively partition multivalued discrete attributes if\n its worth it\n \"\"\"\n\n\n ro = RuleObj(rule,\n self.bad_err_funcs,\n self.good_err_funcs,\n self.bad_tables,\n self.good_tables)\n\n if not self.prune_rule(ro):\n return set([ro])\n \n c = rule.filter.conditions[0]\n var_type = rule.data.domain[c.position].var_type\n\n if (var_type == Orange.feature.Type.Discrete):\n if len(c.values) == 1:\n return [ro]\n \n refiner = BeamRefiner(attrs=[attr], fanout=10)\n ret = set()\n for _, newrule in refiner(rule):\n ret.update(recurse_disc_rule(attr, newrule))\n return ret\n else:\n if len(rule.data) < self.min_pts:\n return [ro]\n return [ro]\n\n # XXX: figure out this logic!\n\n refiner = BeamRefiner(attrs=[attr], fanout=2)\n ret = set()\n for _, newrule in refiner(rule):\n newro = RuleObj(newrule,\n self.bad_err_funcs,\n self.good_err_funcs,\n self.bad_tables,\n self.good_tables)\n ret.update(recurse_disc_rule(attr, newrule))\n\n \n if old_rules is None:\n base_rule = SDRule(self.full_table, None) \n refiner = BeamRefiner(attrs=self.cols, fanout=10)\n #refiner = BeamRefiner(attrs=['recipient_nm'], fanout=30) \n\n \n for attr, rule in refiner(base_rule):\n ros = recurse_disc_rule(attr, rule)\n #self.top_k({None:ros})\n ros = filter(self.prune_rule, ros)\n rules[(attr,)].update(ros)\n\n else:\n attrs = old_rules.keys()\n for a_idx, attr1 in enumerate(attrs):\n for attr2 in attrs[a_idx+1:]:\n merged_attrs = set(attr1).union(attr2)\n max_attrs_len = max(len(attr1), len(attr2))\n if len(merged_attrs) == max_attrs_len:\n continue\n \n \n a1rules, a2rules = old_rules[attr1], old_rules[attr2]\n\n for ro in self.merge_dims(a1rules, a2rules):\n key = ro.rule.attributes\n\n #self.top_k({None:(ro,)})\n if self.prune_rule(ro):\n rules[key].add(ro)\n \n return rules", "def _cap_previous_history_row(clocked, new_clock, cls):\n if sa.inspect(clocked).identity is not None:\n # but only if it already exists!!\n effective_close = sa.func.tstzrange(\n sa.func.lower(cls.effective),\n new_clock.effective.lower,\n '[)')\n vclock_close = sa.func.int4range(\n sa.func.lower(cls.vclock),\n new_clock.vclock.lower,\n '[)')\n\n history_query = getattr(\n clocked, cls.entity.property.backref[0])\n history_query.filter(\n sa.and_(\n sa.func.upper_inf(cls.effective),\n sa.func.upper_inf(cls.vclock),\n ),\n ).update(\n {\n cls.effective: effective_close,\n cls.vclock: vclock_close,\n }, synchronize_session=False,\n )", "def process_object(self, new, old=None):\n new = super().process_object(new, old)\n\n # Remove internal and auto-assigned fields.\n internal_fields = (self.model.modified_field, self.model.permissions_field)\n validate_from_bucket_schema_or_400(\n new,\n resource_name=\"group\",\n request=self.request,\n ignore_fields=internal_fields,\n id_field=self.model.id_field,\n )\n\n return new" ]
[ "0.5547992", "0.53283143", "0.53074485", "0.5118632", "0.49806392", "0.4959772", "0.4932249", "0.49296173", "0.4907506", "0.4893577", "0.48828602", "0.48637092", "0.48500943", "0.48333395", "0.48257568", "0.4819643", "0.4781666", "0.47804913", "0.47772083", "0.4759098", "0.47442418", "0.47411633", "0.47351632", "0.4730665", "0.47069883", "0.4685874", "0.46856624", "0.46788493", "0.46761847", "0.4670929" ]
0.6495964
0
Write file at file_name to table in BQ.
def write_to_bq(self, table_name, file_name, append=True, ignore_unknown_values=False, bq_schema_autodetect=False): table_name = table_name.lower().replace("-","_") self.log.info(f"Writing {table_name} to BQ from file {file_name}") dataset_ref = self.bq_client.dataset(self.dataset_id) table_ref = dataset_ref.table(table_name) job_config = bigquery.LoadJobConfig() job_config.source_format = 'NEWLINE_DELIMITED_JSON' if bq_schema_autodetect == False: # prepare for schema manipulation current_tables = [x.table_id for x in self.bq_client.list_tables(dataset_ref)] new_schm = self.generate_bq_schema(file_name) # if table exists, edit schema. otherwise, use new_schm if table_name in current_tables: table = self.bq_client.get_table(table_ref) new_schm = self.merge_schemas(table.to_api_repr()['schema']['fields'], new_schm) # move new_schm into job_config through the api_repr options api_repr = job_config.to_api_repr() api_repr['load']['schema'] = {'fields': new_schm} job_config = job_config.from_api_repr(api_repr) else: job_config.autodetect = True # handle write options if append == False: job_config.write_disposition = "WRITE_TRUNCATE" else: job_config.write_disposition = "WRITE_APPEND" job_config.schema_update_options = ['ALLOW_FIELD_ADDITION'] if ignore_unknown_values: job_config.ignore_unknown_values = True # send to BQ with open(file_name, 'rb') as source_file: job = self.bq_client.load_table_from_file( source_file, table_ref, job_config=job_config) # API request try: job.result() # Waits for table load to complete. except: self.log.info(job.errors) job.result()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_table(table, file_path):\n\n\twith open(file_path, 'w') as file:\n\t\tfile.write(table)", "def save(file, table):\n pq.write_table(pa.Table.from_pandas(table), file)", "def write_to_file(self, filename: str) -> None:", "def write_to(self, fname, **kwargs):\n data = self.to_Table()\n data.write(fname, **kwargs)", "def store_hive_table(data, directory, file_name):\n table_name = directory + \".\" + file_name\n data.write.saveAsTable(table_name)", "def writeFile( self, file_handle=None, table_name=None, data=None ):\n\n # Record the next primary key id.\n nextId = self.nextPrimaryKey( table_name )\n\n # Generate the string list of data to be written in the file.\n values = '\\t'.join( data )\n\n # Actual put together the primary key id, the string values and the new line character to be writen in the file.\n insert = str(nextId) + '\\t' + str(values) + \"\\n\"\n\n # Write the stuff in the file.\n file_handle.write( insert )\n\n # DON'T MESS WITH THAT!!!!! YOU'RE WARNED!!!\n # Messing with this cute id will kill your importer because the table relationships files relies on that!!!\n # Take a look on the lines like 'taxonomiesInserted' or 'proteinsInserted'.\n return nextId", "def saveastable(file, warehouse_dir):\n \n file1_path = os.path.join(files_2017_path,file)\n file2_path = os.path.join(files_2018_path,file)\n df1 = spark.read.load(\n file1_path,\n format='csv',\n sep=',',\n inferSchema=True,\n header=True\n )\n\n df2 = spark.read.load(\n file2_path,\n format='csv',\n sep=',',\n inferSchema=True,\n header=True\n )\n\n df = df1.unionAll(df2)\n \n tablename = os.path.splitext(i)[0]\n tblwarehouse_dir = os.path.join(warehouse_dir,tablename)\n df.write.saveAsTable(tablename, mode = 'overwrite', path = tblwarehouse_dir )\n print(\" Table created for - \",tablename)", "def insert_data_from_file(self, filename):\n self.get_cursor()\n if self.check_bulk_insert() and self.table.header_rows < 2 and (\n self.table.delimiter in [\"\\t\", \",\"]):\n print(\"Inserting data from \" + os.path.basename(filename) + \"...\")\n\n if self.table.delimiter == \"\\t\":\n fmt = \"TabDelimited\"\n elif self.table.delimiter == \",\":\n fmt = \"CSVDelimited\"\n\n if self.table.header_rows == 1:\n hdr = \"Yes\"\n else:\n hdr = \"No\"\n\n columns = self.table.get_insert_columns()\n\n need_to_delete = False\n add_to_record_id = 0\n\n if self.table.pk and not self.table.contains_pk:\n if '.' in os.path.basename(filename):\n proper_name = filename.split('.')\n len_name = len(proper_name)\n newfilename = '.'.join(\n proper_name[0:-1] if len_name > 0 else proper_name[0]\n ) + \"_new.\" + filename.split(\".\")[-1]\n else:\n newfilename = filename + \"_new\"\n\n if not os.path.isfile(newfilename):\n print(\"Adding index to \" + os.path.abspath(newfilename) + \"...\")\n read = open(filename, \"rb\")\n write = open(newfilename, \"wb\")\n to_write = \"\"\n\n for line in read:\n line = line.strip()\n to_write += str(id) + self.table.delimiter + line\n add_to_record_id += 1\n self.table.record_id += add_to_record_id\n\n write.write(to_write + os.linesep)\n write.close()\n read.close()\n need_to_delete = True\n columns = \"record_id, \" + columns\n else:\n newfilename = filename\n\n newfilename = os.path.abspath(newfilename)\n filename_length = (len(os.path.basename(newfilename)) * -1) - 1\n filepath = newfilename[:filename_length]\n statement = \"\"\"\nINSERT INTO \"\"\" + self.table_name() + \" (\" + columns + \"\"\")\nSELECT * FROM [\"\"\" + os.path.basename(newfilename) + ''']\nIN \"''' + filepath + '''\" \"Text;FMT=''' + fmt + ''';HDR=''' + hdr + ''';\"'''\n try:\n self.execute(statement)\n return True\n except BaseException:\n print(\"Couldn't bulk insert. Trying manual insert.\")\n self.connection.rollback()\n self.table.record_id -= add_to_record_id\n return None\n finally:\n if need_to_delete:\n os.remove(newfilename)\n\n return Engine.insert_data_from_file(self, filename)", "def write_to_databse(fileName):\n f = open(fileName)\n queries = eval(open(fileName).read())\n for q in queries:\n site.write(q)\n print \"Quries are saved:)\"", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write(self, filename, **kwargs):\n self.to_table().write(filename, format='fits', **kwargs)", "def Write(self):\n table_data = self._TABLE.build(self._timestamps)\n self._zip_file.writestr(self._stream_name, table_data)", "def write_q_table_file(q_table, q_file=\"Q_Table.txt\"):\n file = open(q_file, \"w+\")\n rows = len(q_table)\n cols = len(q_table[0])\n file.write(str(rows) + \"x\" + str(cols) + \"\\n\")\n for i in range(len(q_table)):\n file.write(str(i) + \"-\" + \"24\\n\") # TODO: deshardcodear el objetivo del juego\n file.write(\"UP\\n\")\n file.write(\"RIGHT\\n\")\n file.write(\"DOWN\\n\")\n file.write(\"LEFT\\n\")\n for row in q_table:\n for col in row:\n file.write(str(col) + \"\\n\")\n file.close()", "def write(self, filename):\n bvh_string = self.generate_bvh_string()\n if filename[-4:] == '.bvh':\n filename = filename\n else:\n filename = filename + '.bvh'\n with open(filename, 'w') as outfile:\n outfile.write(bvh_string)", "def bulk_copy_to_db(self):\n database = PostgreSQLCommon()\n\n try:\n file = open(self.file_name_hash)\n database.bulk_copy(file, self.storage_table)\n\n m.info('Bulk insert from %s has been successfully completed!'\n % self.file_name_hash)\n except Exception as err:\n m.error('OOps! Bulk insert operation FAILED! Reason: %s' % str(err))\n finally:\n database.close()\n\n if os.path.exists(self.file_name_hash):\n os.remove(self.file_name_hash)", "def writeToDB(self, eventDateTime, eventFileName, eventType, eventPath):\n conn = self.createConnection()\n c = conn.cursor()\n\n c.execute(\"INSERT INTO RansomedFiles (TIME, EventFileName, EventType, EventPath) VALUES (?,?,?,?)\", (eventDateTime, eventFileName, eventType, eventPath))\n conn.commit()\n conn.close()\n\n # print(\"[+]Wrote to the database successfully!\")", "def write_index(self, file_name):\n self.df_index.to_csv(file_name, sep='\\t')", "def writeDtbFile(self, filename):\n filename = os.path.realpath(filename)\n try:\n with open(filename, \"wb\") as f:\n f.write(self.to_dtb())\n return filename\n except IOError:\n raise RuntimeError(\"Failed to open DTB output file\")", "def write_file(file_name, table):\r\n \r\n savectr=len(table)\r\n try:\r\n with open (file_name, 'wb') as objFile:\r\n pickle.dump(table,objFile) #pickle my 2D list\r\n print ('{} CD(s) saved into {}.\\n'.format(savectr,file_name))\r\n except PermissionError as e:\r\n print('Not enough rights to create/modify ' + file_name + '.') #if unable pickle data due to permission issues\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print ()\r\n except IOError as e:\r\n print ('I/O error({0}): {1}'.format(e.errno,e.strerror))#if unable to pickle data due to IO errors such as disk space issues\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print ()\r\n except pickle.PickleError as e:\r\n print ('Unable to write data into ' + file_name + '.') #if unable to pickle 2D list, exception handling for pickling errors\r\n print ()\r\n print (e, e.__doc__, sep='\\n')\r\n print ()", "def write_csv_file(csv_table, file_name, file_delimiter, quoting_value):\n \n with open(file_name, 'w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=file_delimiter, quoting=quoting_value)\n for row in csv_table:\n csv_writer.writerow(row)", "def write_csv_file(csv_table, file_name, file_delimiter, quoting_value):\n \n with open(file_name, 'w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=file_delimiter, quoting=quoting_value)\n for row in csv_table:\n csv_writer.writerow(row)", "def write_frame(self, file, table):\n\n frame = pd.read_csv(file, usecols=self.columns[table])\n frame.fillna('-', inplace=True)\n\n for line in frame.index:\n\n available = []\n accum = 0\n for item in frame.loc[line]:\n if item not in self.placeholder:\n available.append(accum)\n accum = accum + 1\n\n if table == 'premium' and len(available) <= 2:\n # Premium table is full of null\n continue\n\n # Filter the key-value pairs\n key = [frame.columns[column] for column in available]\n keys = ','.join(key)\n value = ['\\'' + str(frame.loc[line][i]) + '\\'' for i in available]\n values = ','.join(value)\n\n insert_query = 'INSERT INTO public.%s ' \\\n '(%s) ' \\\n 'VALUES (%s);' \\\n % (table, keys, values)\n try:\n self.cursor.execute(insert_query)\n self.conn.commit()\n except ps.Error as e:\n # Ignore errors\n self.errors = self.errors + 1\n self.conn.commit()\n continue\n\n self.conn.commit()\n self.cursor.close()", "def save_to_gcs(df, file, bucket=settings.ASSETS.BUCKET):\n output_file = NamedTemporaryFile().name\n df.to_csv(output_file, compression=\"gzip\", index=False)\n upload_blob(bucket, output_file, file)", "async def write_file(self, directory: str, name: str, file: bytes):\n pass", "def upload(filename, bucket):\n k = Key(bucket)\n k.key = uuid.uuid1().hex\n print \"Uploading batch to {}, key: {}...\".format(bucket.name, k.key)\n k.set_contents_from_filename(filename, reduced_redundancy=True)\n print \" Done.\"\n \n\n\n bucket = openBucket(dest)", "def putFile(self, filename):\n basename = os.path.basename(filename)\n fp = open(filename, 'rb')\n self.ftp.storbinary('stor ' + basename, fp)\n fp.close();", "def export(self, filename):\n columns = self.cursor.execute(f'''pragma table_info(job)''').fetchall()\n\n columns_to_export = [col[1] for col in columns\n if self._validate_column(col[1])]\n\n self._export_from_db(columns_to_export, filename)\n self.con.close()", "def filewrite(self, filename):\n io.write(self, filename)", "def write(self, fname):\n pass" ]
[ "0.6389289", "0.62776583", "0.6185979", "0.6152416", "0.61514306", "0.60884386", "0.6057259", "0.5896836", "0.5782033", "0.5767647", "0.5767647", "0.5727883", "0.5715699", "0.56989884", "0.5688516", "0.5665562", "0.565316", "0.5612624", "0.5582769", "0.5565674", "0.55629915", "0.55629915", "0.5549166", "0.5533165", "0.55245024", "0.5521133", "0.5511338", "0.5509263", "0.550887", "0.5486559" ]
0.6649394
0
Takes start and end datetimes and chunks the period into ndays size chunks.
def chunk_date_range(self, start_datetime, end_datetime, chunk_size): self.log.info(f'Chunking period {start_datetime} to {end_datetime} into chunks of {chunk_size} days.') for n in range(int ((end_datetime - start_datetime).days) + 1): if n/chunk_size == int(n/chunk_size): start = start_datetime + datetime.timedelta(n) end = start_datetime + datetime.timedelta(n+chunk_size) # if we reach the end_datetime, return that instead of end if end < end_datetime: yield (start, end) else: yield (start, end_datetime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chunk_periods(start, end):\n\n logging.debug(f'chunking {start} to {end}')\n # convert the strings to datetime objects\n #start = dt.datetime.strptime(''.join(start.rsplit(':', 1)), '%Y-%m-%dT%H:%M:%S-%z')\n start = dt.datetime.strptime(start, '%Y-%m-%dT%H:%M:%S-%z')\n logging.debug(f'start: {start}')\n periods = []\n\n # if the year and month of the period are the same, just return the dates as we got them\n\n\n\n return periods", "def get_date_range(num_days):\n\n date1 = datetime.datetime.utcnow()\n dateranges = []\n \n if num_days > 90:\n chunks = math.ceil(num_days/90)\n print('Breaking dates into into', chunks,'90 day chunks.')\n\n for chunk in range(1,chunks+1):\n date2 = date1 - datetime.timedelta(days=90)\n\n start = add_milliseconds(date1)\n end = add_milliseconds(date2)\n\n print('Chunk', chunk, ': ', date1, 'to', date2)\n dateranges.append((start,end))\n date1 = date2 - datetime.timedelta(days=1)\n \n else: \n date1 = datetime.datetime.utcnow()\n date2 = date1 - datetime.timedelta(days=num_days)\n \n start = add_milliseconds(date1)\n end = add_milliseconds(date2)\n \n dateranges.append((start,end))\n \n return(dateranges)", "def daterange(start_date, end_date):\n for n in range(int((end_date - start_date).days)+1):\n yield start_date + dt.timedelta(n)", "def daterange(start_date, end_date):\n for n in range(int ((end_date - start_date).days)):\n yield start_date + dt.timedelta(n)", "def daterange(start_date, end_date):\n for n in range(int ((end_date - start_date).days)+1):\n yield start_date + timedelta(n)", "def _drange(start: Date, end: Date) -> Iterator[Date]:\n while start <= end:\n yield start\n start = start + TimeDelta(days=1)", "def split_iter(self, delta):\n interval_start = self.start_date\n while interval_start < self.end_date:\n interval_end = interval_start + delta\n if interval_end > self.end_date:\n interval_end = self.end_date\n yield DateRange(interval_start, interval_end)\n interval_start = interval_end + relativedelta(days=1)", "def per_month(start: datetime, end: datetime, n: int = 1):\n curr = start.replace(day=1, hour=0, minute=0, second=0, microsecond=0)\n while curr < end:\n curr_end = add_month(curr, n)\n yield curr, curr_end\n curr = curr_end", "def multiple_day_binning(df, n_days=7, **kwargs):\n df[\"time_mid\"] = df.run_start + (df.run_stop - df.run_start) / 2\n frequency = str(n_days*24) + 'h'\n\n print(\"Resampling with:\", frequency)\n\n resampler = df.resample(frequency, on='time_mid', base=-12, **kwargs)\n\n print(\"Number of blocks:\", len(resampler.groups))\n\n df[\"bin\"] = np.zeros(len(df))\n for name, group in resampler:\n df.loc[group.index, 'bin'] = [name] * len(group)\n df['bin'] = (df.bin != df.bin.shift()).cumsum()\n\n print(\"Number of filled blocks:\", df[\"bin\"].iloc[-1])\n print(\"Number of runs per block:\")\n print(resampler['n_on'].count())\n\n return df['bin']", "def datetime_range(start_timestamp, end_timestamp, step_size):\n r = []\n ts = start_timestamp\n while ts <= end_timestamp:\n r.append(ts)\n ts += timedelta(minutes=step_size)\n return r", "def _split_date_range(start, end, intv):\n previous = start\n diff = (end - start) / intv\n for i in range(1, intv):\n current = start + diff * i\n yield (previous, current)\n previous = current\n yield (previous, end)", "def getIntervals(t_start,t_end,interval):\n timestamp_start = pd.Timestamp(t_start)\n timestamp_end = pd.Timestamp(t_end)\n nro_elements=int(int((timestamp_end-timestamp_start).total_seconds())/(60*interval))\n list_dates = []\n list_dates.append(t_start)\n for i in range(nro_elements):\n date_time_str = list_dates[i]\n Current_Date = dt.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')\n Next_Date = Current_Date + datetime.timedelta(minutes=interval)\n list_dates.append(str(Next_Date))\n return list_dates", "def date_range(start_date, end_date):\n for n in range((end_date - start_date).days + 1):\n yield start_date + timedelta(n)", "def date_sequence(start, end, stats_duration, step_size):\n step_size, freq = parse_interval(step_size)\n stats_duration = parse_duration(stats_duration)\n for start_date in rrule(freq, interval=step_size, dtstart=start, until=end):\n end_date = start_date + stats_duration\n if end_date <= end:\n yield start_date, start_date + stats_duration", "def iter_dates(start, end):\n one_day = timedelta(days=1)\n date = start\n while date <= end:\n yield date\n date += one_day", "def date_range(start, stop):\n for n in range(int((stop - start).days)):\n yield start + timedelta(n)", "def generate_N_random_date(N, start, end):\n delta = end - start\n int_delta = (delta.days * 24 * 60 * 60) + delta.seconds\n \n result = []\n for i in range(N):\n random_second = random.randrange(int_delta)\n datetime_object = datetime.datetime.date(start + datetime.timedelta(seconds = random_second))\n result.append(datetime_object)\n \n return result", "def n_business_days(self, n=-2):\n\n business_days = 0\n calendar_days = 0 \n if n != 0:\n step = int(n/abs(n))\n while business_days != abs(n):\n calendar_days = calendar_days + step\n if business_day(self.time_stamp + timedelta(calendar_days)):\n business_days = business_days + 1\n return self.time_stamp + timedelta(calendar_days)\n return date", "def subgraphs_of_length(self, days=None, periods=None):\n graphs = []\n if days:\n sg_length = datetime.timedelta(days=days)\n else:\n sg_length = periods\n\n start_date = self.min_date\n end_date = start_date + sg_length\n done = False\n while not done:\n if start_date > self.max_date:\n break\n if end_date > self.max_date:\n # end_date = self.max_date\n done = True\n print(start_date, end_date)\n new = self.subgraph_within_dates(start_date, end_date)\n if new.nx_graph.number_of_edges():\n graphs.append(new)\n start_date += sg_length\n end_date += sg_length\n return graphs", "def getDaysInDateRange(start, end):\n def dateRange(start, end, increment, period):\n # http://stackoverflow.com/a/10688309/2875074\n result = []\n nxt = start\n delta = relativedelta(**{period:increment})\n while nxt <= end:\n result.append(nxt)\n nxt += delta\n return result\n return dateRange(start, end, 1, 'days')", "def daterange(date1, date2):\n for n in range(int ((date2 - date1).days)+1):\n yield date1 + timedelta(n)", "def dayPeriod(lon,lat,n1,n2,day):\n x, y, z = _getXYZ(lon,lat)\n N = range(n1,n2+1)\n D = []\n for n_ in N:\n n = n_ * day\n i = range(0,n)\n j = range(n,n+n)\n d_ = gcDist(x[i],y[i],z[i],\n x[j],y[j],z[j])\n D = D + [d_,]\n print n, d_\n\n return (N,D)", "def generate_dates(start_date, cutoff_date):\n epoch = datetime.strptime(start_date, '%Y-%m-%d')\n cutoff = datetime.strptime(cutoff_date, '%Y-%m-%d')\n\n while epoch <= cutoff:\n yield epoch\n epoch += timedelta(days=1)", "def date_range_daily(start_at, end_at: datetime, cutoff_at: Optional[datetime], every: int):\n cutoff_at = cutoff_at or start_at\n current_result = start_at\n while current_result <= end_at:\n if current_result >= cutoff_at:\n yield current_result\n current_result += timedelta(days=every)", "def days_from_start(self) -> List[int]:\n n_periods = [(x - self.date_range.min())/pd.Timedelta('1D')\n for x in self.date_range]\n return n_periods", "def days_from_start(self) -> List[int]:\n n_periods = [(x - self.date_range.min())/pd.Timedelta('1D')\n for x in self.date_range]\n return n_periods", "def t_range_days(t_range, *, step=np.timedelta64(1, \"D\")) -> np.array:\r\n sd = dt.datetime.strptime(t_range[0], \"%Y-%m-%d\")\r\n ed = dt.datetime.strptime(t_range[1], \"%Y-%m-%d\")\r\n return np.arange(sd, ed, step)", "def date_range(start, stop, step=datetime.timedelta(days=1)):\n while start < stop:\n yield start\n start += step", "def daily_date_range(date1, date2):\n num_days = (date2-date1).days\n return np.array([datetime(date1.year, date1.month, date1.day, 0)+timedelta(days=i) for i in range(num_days)])", "def get_all_submissions_in_24_hours(subreddit, start_date, end_date, limit):\n list_of_all_submissions = []\n inc_start_date = start_date\n inc_end_date = end_date + 600\n\n for i in range(0, 86400, 14400):\n inc_end_date += i\n inc_start_date += i\n threads = list(get_submissions(subreddit, inc_start_date, inc_end_date, limit))\n for item in threads:\n if item.d_['num_comments'] > MIN_COMMENTS:\n list_of_all_submissions.append(item.d_)\n print(len(list_of_all_submissions))\n return list_of_all_submissions" ]
[ "0.74561584", "0.70724005", "0.6633228", "0.6588212", "0.6577086", "0.65028185", "0.643334", "0.6382789", "0.6378288", "0.6325229", "0.6312387", "0.62938607", "0.62823457", "0.62358534", "0.616273", "0.6017077", "0.59950334", "0.5977496", "0.5964378", "0.5950997", "0.5935242", "0.5919401", "0.5891619", "0.5890085", "0.58887523", "0.58887523", "0.58783376", "0.58673084", "0.5853745", "0.5835374" ]
0.78345835
0
Returns maximum value from date_column in table_name.
def find_last_entry(self, table_name, date_column): query = f"SELECT MAX({date_column}) FROM `{self.dataset_id}.{table_name}`" query_job = self.bq_client.query(query) # API request rows = query_job.result() latest_time = [x[0] for x in rows][0] return latest_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_max(self):\n max_value= self.df[self.col_name].max()\n return max_value", "async def max(self, table_name: str, column: str, conditions_list=None):\n if conditions_list:\n conditions = LemkPgUtils.get_conditions(conditions_list)\n query = f\"\"\"SELECT MAX({column}) FROM {table_name} WHERE {\" \".join(conditions)}\"\"\"\n else:\n query = f\"\"\"SELECT MAX({column}) FROM {table_name}\"\"\"\n result = await LemkPgUtils.get_query_result(self.dsn, query)\n return result", "def max_date(self, rows: List[Row], column: DateColumn) -> Date:\n cell_values = [row.values[column.name] for row in rows if row.values[column.name] is not None]\n if not cell_values:\n return Date(-1, -1, -1)\n if not all([isinstance(value, Date) for value in cell_values]):\n raise ExecutionError(f\"Invalid values for date selection function: {cell_values}\")\n return max(cell_values) # type: ignore", "def max(self, column):\n if not column:\n columns = ['*']\n\n return self.aggregate('max', *[column])", "def max(self, column):\n self.aggregate(\"MAX\", \"{column}\".format(column=column))\n return self", "def _get_max_day(row: pd.Series, col_name: str):\n if (pd.isnull(row[col_name + \"_year\"])) or (pd.isnull(row[col_name + \"_year\"])):\n return np.nan\n return calendar.monthrange(\n int(row[col_name + \"_year\"]), int(row[col_name + \"_month\"])\n )[1]", "def obtain_daily_maximum(data=pd.DataFrame()):\n return data.resample(\"D\").max()", "def max_entity_date(entity_type):\r\n\r\n # assume empty date\r\n max_date = None\r\n try:\r\n\r\n # get a cursor\r\n conn = ecommerce.db.getConnection()\r\n cursor = conn.cursor()\r\n\r\n # execute the query\r\n cursor.execute(\"\"\"\r\n SELECT TO_CHAR(LastUpdateDate, 'YYYY-MM-DD HH24:MI:SS')\r\n FROM Stage0_DeltaControl\r\n WHERE EntityType = ?\r\n \"\"\", (entity_type, ) )\r\n\r\n # fetch the max date\r\n row = cursor.fetchone()\r\n if row is not None:\r\n max_date = row[0]\r\n cursor.close()\r\n except:\r\n pass\r\n\r\n return max_date", "def get_last(self, table, column):\n\n query = \"SELECT {0} FROM {1} ORDER BY ROWID DESC LIMIT 1;\".format(\n column, table)\n self.cursor.execute(query)\n row = self.cursor.fetchone()\n return row\n # return self.get(table, column, limit=1)[0] # uses fetchall()", "def __get_latest_data(table_name='derivatives_economicindicatorstandard'):\n # create query and get data\n query = 'SELECT * FROM ' + table_name\n df = AccessDB().run_read_query(query)\n\n if table_name == 'derivatives_economicindicatorstandard':\n df = pd.DataFrame(df.groupby(['dbcode', 'indicator', 'country', 'freq', 'flow'])['date'].max())\n else:\n df = pd.DataFrame(df.groupby(['dbcode', 'indicator', 'country', 'freq', 'counter_party'])['date'].max())\n df.reset_index(inplace=True)\n return df", "def argmax(table):\n return max((v,k) for k,v in table.iteritems())[1]", "def get_max_update_dt():\r\n from .utils import connect_to_pg\r\n conn = connect_to_pg()\r\n query = \"select max(updated_at) from scf.issues\"\r\n res = conn.execute(query)\r\n max_dt = res.fetchone()[0]\r\n print(max_dt)\r\n return max_dt", "def getDate(delta):\r\n debug.p(\"FUNC:check_db_alarm.getDate\")\r\n db = connectToDB()\r\n debug.p('Get Max Date in DB')\r\n date_entry = db.query(func.max(StatLogSCP1.date))\r\n debug.p(date_entry)\r\n #max_Date = c.fetchone() #This return a tuple, 0 item is a datetime.datetime object\r\n #maxDate = max_Date[0]\r\n deltaDate = date_entry - timedelta(minutes= delta)\r\n debug.p(\"**Leaving FUNC:check_db_alarm.getDate\")\r\n return deltaDate", "def get_max_donation_date_list():\n try:\n logger.info('opening get_max_donation_date_list database call')\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n\n query_max_date = (Donations\n .select(Donations.donated_by_id.alias('fullname'),\n fn.MAX(Donations.donation_date).alias(\n 'last_donation_date'),\n Donations.donation_amount.alias('last_donation'))\n .group_by(Donations.donated_by_id)\n )\n return query_max_date\n\n except Exception as e:\n logger.info(e)\n\n finally:\n database.close()\n logger.info('closing get_max_donation_date_list database call')", "def check_max(df):\n\n df_max = pd.concat(\n [\n df.iloc[-1].rename(\"this_period\"),\n df.max().rename(\"largest_ever\"),\n df.idxmax().rename(\"date_of_max\"),\n df.rank(ascending=False).iloc[-1].astype(int).rename(\"current_rank\"),\n (df.iloc[-1] >= df.max()).rename(\"is_maximum\"),\n ],\n axis=1,\n ).rename_axis(\"components\")\n\n df_max.is_maximum = df_max.is_maximum.astype(str).replace(\"False\", \"\")\n\n # comparison_this_period = df.iloc[-1] >= df_max.maximum\n # .rename('is_maximum')\n # )\n\n # df_max_period = (pd\n # .concat([df_max, comparison_this_period], axis=1)\n # .rename(columns={0: 'is_maximum'})\n # )\n\n # if df_max_period.maximum.any():\n # idx = df_max_period.Maximum\n # df_max_period['this_period'] = np.nan\n # df_max_period.loc[~idx, 'this_period'] = df.iloc[-1].loc[~idx]\n\n # column_order = ['this_period', 'current_rank',\n # 'maximum', 'max date',\t'is_maximum',\n # ]\n\n # df_max_period = df_max_period[column_order]\n\n return df_max", "def maxtime(conn):\n c = conn.cursor()\n r = c.execute(\"SELECT max(time) as max_time FROM event WHERE bin_id not null\").fetchall()\n last_time = r[0]['max_time']\n return last_time", "def max(x: pd.Series, d: int or float) -> pd.Series:\n return ts_max(x, d)", "def maxKey(analyzer):\n return om.maxKey(analyzer['dateIndex'])", "def maxKey(analyzer):\n return om.maxKey(analyzer['dateIndex'])", "def get_max(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n maximum = df.max(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n maximum = round(float(maximum), 4)\r\n return maximum", "def find_last_value(self, value, closest=False):\n value = pd.to_datetime(value)\n value = column.as_column(value).as_numerical[0]\n return self.as_numerical.find_last_value(value, closest=closest)", "def max_temp_aggregate_by_fog(filename):\n weather_data = pandas.read_csv(filename)\n\n q = \"\"\"\n SELECT fog, MAX(cast (maxtempi as integer)) \n FROM weather_data \n GROUP BY fog;\n \"\"\"\n\n # Execute your SQL command against the pandas frame\n foggy_days = pandasql.sqldf(q.lower(), locals())\n return foggy_days", "def fetch_last(self, tablename):\n query = 'select * from ' + tablename\n try:\n self.__cur.execute(query)\n except Exception as e:\n self.__conn.rollback()\n raise e\n fetcheddata = self.__cur.fetchall()\n if fetcheddata:\n fetcheddata = fetcheddata[-1]\n fetcheddata = self.__helper._functions.__rowtodict([fetcheddata])\n return fetcheddata[-1]\n return None", "def latest(self, column='created_at'):\n return self.order_by(column, 'desc')", "def rolling_max(series, n=None):\n\n if n:\n result = series.rolling(window=n, min_periods=0).max()\n result.name = 'rolling_max_' + str(n)\n else:\n result = series.rolling(window=len(series), min_periods=0).max()\n result.name = 'max_to_date'\n\n return result", "def cap_max_value(df: pd.DataFrame, column_to_inspect: str, max_value: int):\n df[column_to_inspect] = df[column_to_inspect].apply(\n lambda e: max_value if e > max_value else e\n )\n return df", "def max(self):\n\n return time_stat(self, stat=\"max\")", "def get_maximum ( self, object ):\n return self.maximum", "def get_latest(self, key):\n # Check connection\n self._checkInit()\n \n # Construct the query\n query = \"\"\"SELECT \n date_format(max(str_to_date(concat(year,',',month,',',day,',',hour),'%Y,%c,%e,%k')),'%Y-%m-%d-%H')\n FROM {} WHERE `key`='{}'\"\"\".format(\n self.table,\n key)\n\n #logging.debug(\"query: \\\"{}\\\"\".format(query))\n\n # Get Connection\n cnx = self.getConnection()\n cur = cnx.cursor()\n cur.execute(query)\n retval = None\n for fields in cur:\n retval = fields[0]\n break\n cur.close()\n cnx.close()\n return retval", "def get_latest_date(cls):\n\n return cls.query.order_by(desc(cls.date)).first().date" ]
[ "0.69734967", "0.68731785", "0.6581008", "0.6453102", "0.6329918", "0.62211794", "0.6203115", "0.6109451", "0.60852724", "0.6030926", "0.592045", "0.5807638", "0.57572854", "0.57227606", "0.5688055", "0.5640436", "0.55683875", "0.5554176", "0.5554176", "0.5554019", "0.55342084", "0.54993534", "0.54932046", "0.5453971", "0.54348475", "0.543376", "0.54253745", "0.53980964", "0.5386203", "0.53752685" ]
0.7224291
0
Runs a query in BQ and retunrs the results in a list of rows.
def bq_query(self, query): query_job = self.bq_client.query(query) # API request rows = [x for x in query_job.result()] return rows
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_query(conn, query):\n\tcur = conn.cursor()\n\tcur.execute(query)\n\trows = cur.fetchall()\n\treturn rows", "def run_query(query):\r\n db = psycopg2.connect('dbname=' + database)\r\n connect = db.cursor()\r\n connect.execute(query)\r\n rows = connect.fetchall()\r\n db.close()\r\n return rows", "def execute_and_fetch(db, query):\n rows = []\n\n db.query(query)\n res = db.use_result()\n row = res.fetch_row()\n while row != ():\n rows.append(row[0])\n row = res.fetch_row()\n return rows", "def _run_query(self, query):\n cursor = self.conn.cursor()\n cursor.execute(query)\n return cursor.fetchall()", "def _get_rows_from_query(self, query, data):\n to_return = []\n results = self.db_conn.cursor().execute(query, data)\n for result in results:\n to_return.append(result)\n return to_return", "async def db_query(self, *args, **kwargs):\n rows = []\n async with self.db_pool.acquire() as conn:\n async with conn.cursor(cursor_factory=DictCursor) as cur:\n await cur.execute(*args, **kwargs)\n try:\n async for row in cur:\n rows.append(row)\n except psycopg2.ProgrammingError:\n # No results\n pass\n return rows", "def executeQuery(query):\n c = db.cursor()\n c.execute(query)\n rows = c.fetchall()\n db.close()\n return rows", "def run(self, query, project=\"odyssey-193217\"):\n\t\tfrom google.cloud import bigquery\n\t\tjob_config = bigquery.QueryJobConfig()\n\t\tclient = bigquery.Client(project=project)\n\t\tresult = client.query(query,job_config=job_config)\n\t\tjob_config.allowLargeResults = True\n\t\tresult.__done_timeout = 99999999\n\t\treturn list(result)", "def execute(self, query):\n with self.conn.cursor() as cur:\n # Execute the query\n try:\n cur.execute(query)\n except Exception as exc:\n print(\"Unable to execute query. Error was {0}\".format(str(exc)))\n exit()\n rows = cur.fetchall()\n return rows", "def query(self, query: str, *args, **kwargs):\n cursor = self._cursor()\n try:\n self._execute(cursor, query, args, kwargs)\n column_names = [d[0] for d in cursor.description]\n return [Row(zip(column_names, row)) for row in cursor]\n finally:\n cursor.close()", "def query(self, sql):\n try:\n res_cursor = self.connection.execute(text(sql))\n except Exception as e: \n raise e(\"SQL execution error!\")\n \n rows = (Row(res_cursor.keys(), record) for record in res_cursor)\n results = RowsCollection(rows)\n return results", "def _query_and_fetchall(self, query):\n with self._connect() as conn:\n cur = conn.cursor()\n cur.execute(query)\n results = cur.fetchall()\n\n return results", "def query(statement, project, **kwargs):\n\n with bqapi.connect(project) as conn:\n return conn.execute(statement, **kwargs).fetchall()", "def run_query(db, query):\n log.debug(\"run query on %s: %s\", db, query)\n conn = _connect(show_dbs(db)[db][\"uri\"])\n return conn.cursor().execute(query).fetchall()", "def perform_query(query):\r\n\r\n # setup and perform query\r\n client = boto3.client('athena')\r\n DATABASE = 'default'\r\n output='s3://ruesdefrance-query-results/'\r\n \r\n queryStart = client.start_query_execution(\r\n QueryString=query,\r\n QueryExecutionContext={\r\n 'Database': DATABASE\r\n },\r\n ResultConfiguration={\r\n 'OutputLocation': output,\r\n }\r\n )\r\n\r\n # get query execution ID\r\n queryId = queryStart['QueryExecutionId']\r\n \r\n # prepare results if request succeeds\r\n status = ''\r\n while status not in ['SUCCEEDED','FAILED']:\r\n test_exec = client.get_query_execution(QueryExecutionId = queryId)\r\n status = test_exec['QueryExecution']['Status']['State']\r\n if status == 'SUCCEEDED':\r\n results = client.get_query_results(QueryExecutionId = queryId)\r\n rows = []\r\n for row in results['ResultSet']['Rows']:\r\n #print(row)\r\n rows.append(row['Data'])\r\n elif status == 'FAILED':\r\n print('FAILED')\r\n time.sleep(1)\r\n\r\n # extract column names and content of rows \r\n columns = rows[0]\r\n rows = rows[1:]\r\n print(\"rows length {}\".format(len(rows)))\r\n print(rows)\r\n \r\n return rows", "def run_query(query):\n db.query(query)\n dbResult = db.store_result()\n dbFetched = dbResult.fetch_row(maxrows = 0, how = 2)\n df = pd.DataFrame.from_records(dbFetched)\n return df", "def get_query_results(query):\n\n db, c = connect()\n c.execute(query)\n results = c.fetchall()\n db.close()\n return results", "def execute_query(query):\n c.execute(query)\n return c.fetchall()", "def dbselect(cxn, query, payload):\n\tcursor = cxn.cursor()\n\tif not payload:\n\t\trows = cursor.execute(query)\n\telse:\n\t\trows = cursor.execute(query,payload)\n\tresults = []\n\tfor row in rows:\n\t\tresults.append(row)\n\tcursor.close()\n\treturn results", "def run_bq_query(client, query, timeout):\n \n job_id, _results = client.query(query, timeout=timeout)\n complete, row_count = client.check_job(job_id)\n if complete:\n results = client.get_query_rows(job_id)\n print('Got %s records' %row_count)\n else:\n raise RuntimeError('Query not complete')\n return(results)", "async def query(self, stmt, *args):\n\n with (await self.application.db.cursor()) as cur:\n await cur.execute(stmt, args)\n return [self.row_to_obj(row, cur)\n for row in await cur.fetchall()]", "def get_rows(query, params=None):\n cur = get_cursor()\n cur.execute(query, params)\n rows = cur.fetchall()\n return rows", "async def db_execute(self, *args, **kwargs):\n rows = []\n async with self.db_pool.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(*args, **kwargs)\n try:\n async for row in cur:\n rows.append(row)\n except psycopg2.ProgrammingError:\n # No results\n pass\n return rows", "def exec_results(query):\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n result = c.fetchall()\n db.close()\n return result", "def runningwithqueries(query):\n print(\"\\nRunning Query: \" + str(query) + \"\\nResult :\\n\")\n crsr = cnxn.execute(query)\n columns = [column[0] for column in crsr.description]\n print(columns)\n for row in crsr.fetchall():\n print(row)\n crsr.close()", "def _execute_query(self, query, values):\n with self as plasticDB:\n cursor = plasticDB.connection.cursor()\n cursor.execute(query,values)\n if not cursor.description:\n return []\n rs = RecordSet(initialData=cursor.fetchall(), recordType=next(zip(*cursor.description)))\n return rs", "def query(self, qq: str) -> List[sqlite3.Row]:\n\n try:\n self.cursor.execute(qq)\n self.last_result = self.cursor.fetchall()\n self.connection.commit()\n except sqlite3.OperationalError:\n self._createTable()\n return self.query(qq)\n except sqlite3.Error as e:\n print(\"Couldn't execute query %s, exception: %s\" % (qq, e), file=stderr)\n self.last_result = []\n return self.last_result", "def run_query(cur, query, show_results=False):\n num_rows = cur.execute(query)\n print('the query returned {} rows'.format(num_rows))\n if show_results:\n for row in cur.fetchall():\n print(row)", "def get_query_results(self, conn, sql):\n\n cur = conn.cursor()\n cur.execute(sql)\n yield from cur.fetchall()", "def _execute_query(self,\n sql_connection: Connection,\n sql_query: Union[str, Query]) -> List[Dict[str, Any]]:\n return_result: List[Dict[str, Any]] = []\n result: ResultProxy = sql_connection.execute(sql_query)\n if result and result.returns_rows:\n return_result: List[Dict[str, Any]] = [dict(row) for row in result]\n return return_result" ]
[ "0.74091065", "0.73635143", "0.7329013", "0.7272752", "0.71248585", "0.7106369", "0.70463365", "0.70437837", "0.7036642", "0.6965512", "0.69410104", "0.693591", "0.68967617", "0.68664765", "0.6862096", "0.68477184", "0.680308", "0.68021363", "0.6753161", "0.6746222", "0.6720808", "0.6704279", "0.6699142", "0.6694089", "0.66730917", "0.66429406", "0.6625846", "0.6611827", "0.6607437", "0.6558351" ]
0.7811319
0
Runs all keys in a JSON object (dict or list) through the given callback function.
def fix_json_keys(self, obj, callback): if type(obj) == list: newlist = [] for item in obj: newlist.append(self.fix_json_keys(item, callback)) return newlist elif type(obj) == dict: newdict = {} for item in list(obj): if type(obj[item]) == list or type(obj[item]) == dict: newdict[callback(item)] = self.fix_json_keys(obj[item], callback) else: newdict[callback(item)] = obj[item] return newdict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iterate(d, fun): # type: (Dict, Callable[[Any, Any], None]) -> None\n for key, value in d.items():\n if isinstance(value, dict):\n DictUtil.iterate(value, fun)\n else:\n fun(key, value)", "def __call__(self, *args, **kwargs):\n for key, obj in self._dict.items():\n key[0](obj, *args, **kwargs)", "def walk(obj,dict_fn):\n if isinstance(obj,dict):\n result = dict()\n for key, value in obj.items():\n result[key] = walk(value, dict_fn)\n return dict_fn(result)\n if isinstance(obj,list):\n return [walk(i,dict_fn) for i in obj]\n return obj", "def apply_to_all(data, func, func_kws={}, verbose=False):\n keys = list(data.keys())\n n_tot = len(keys)\n new_data = {}\n\n if verbose:\n fn_name = func.__name__\n desc_str = \"apply_to_all:{}\".format(fn_name)\n iterator = tqdm(keys, desc=desc_str, total=n_tot)\n else:\n iterator = keys\n\n for key in iterator:\n if isinstance(data[key], dict):\n new_data[key] = apply_to_all(data[key], func, **func_kws)\n else:\n new_data[key] = func(data[key], **func_kws)\n return new_data", "def fix_json_values(self, obj, callback, **kwargs):\n if type(obj) == list:\n newlist = []\n for item in obj:\n newlist.append(self.fix_json_values(item, callback, **kwargs))\n return newlist\n elif type(obj) == dict:\n newdict = {}\n for item in list(obj):\n if type(obj[item]) == list or type(obj[item]) == dict:\n newdict[item] = self.fix_json_values(callback(obj[item], item, **kwargs), callback, **kwargs)\n else:\n newdict[item] = callback(obj[item], item, **kwargs)\n return newdict\n else:\n return obj", "def _run_callback() -> None:\n\n if keys.ANY_KEY in self._bindings:\n method, _ = self._bindings[keys.ANY_KEY]\n method(self, key)", "def json_apply(fragment, check_func, func):\n if check_func(fragment):\n return func(fragment)\n elif isinstance(fragment, list):\n output = []\n for val in fragment:\n output.append(json_apply(val, check_func, func))\n return output\n elif isinstance(fragment, dict):\n output = {}\n for k, val in fragment.items():\n output[k] = json_apply(val, check_func, func)\n return output\n else:\n return fragment", "def step_into(json_dict, key, backtrack):\n if key in json_dict:\n backtrack.append(json_dict)\n if isinstance(json_dict[key], dict): \n show_key_options(json_dict[key], backtrack)\n elif isinstance(json_dict[key], list):\n el_count = len(json_dict[key])\n new_d = {}\n for el_num in range(el_count):\n new_d[str(el_num + 1)] = json_dict[key][el_num]\n show_key_options(new_d, backtrack)\n else:\n print(json_dict[key])\n print(\"You have reached your endpoint ^^\")", "def loads_json(function):\n def f(*args, **kwargs):\n return json.loads(function(*args, **kwargs))\n return f", "def substitute_json_keys(content, new_keys):\n substitute_keys_in_functions(content['functions'], new_keys)\n new_types = substitute_keys_in_types(content['types'], new_keys)\n content['types'] = new_types", "def crawl_json(json):\n for key in json:\n if type(json[key]) is dict:\n for k in crawl_json(json[key]):\n yield k\n yield key", "def inner(fn_inner):\n\n def handler(event, context):\n \"\"\"\n The AWS Lambda Entry Point\n \"\"\"\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');\n\n return handler", "def iterkeys(self, *args, **kwargs):\n self.__iter__(*args, **kwargs)", "def subscribe(callback, keys):\n assert type(keys) in (set, list, tuple)\n _subscribers[callback] = keys", "def find_keys(obj, key):\n arr = []\n\n def extract(obj, arr, key):\n \"\"\"Recursively search for values of key in JSON tree.\"\"\"\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, key)\n elif k == key:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr\n\n results = extract(obj, arr, key)\n return results", "def pull_key(key_fun):\n def pull_key_fun(objs):\n return dict((key_fun(value), value) for value in objs)\n return pull_key_fun", "def iterkeys(d, **kw):\r\n return iter(getattr(d, _iterkeys)(**kw))", "def iterkeys(d, **kw):\r\n return iter(getattr(d, _iterkeys)(**kw))", "def json_imap(mapping, iterable):\n for item in iterable:\n yield scraper.json_map(mapping, item)", "def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj", "def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj", "def test_dict(self, dictionary: dict) -> None:\r\n if not isinstance(dictionary, dict):\r\n raise ValueError(f'Expected dictionary, but received {type(dictionary)}')\r\n for key, value in dictionary.items():\r\n conditional_check(key, self.case_check, self.ignored_keys)\r\n if isinstance(value, dict):\r\n self.test_dict(dictionary=value)\r\n elif isinstance(value, list):\r\n self.test_list(items=value)", "def substitute_keys_in_functions(functions, new_keys):\n for _, func in functions.items():\n func['ret_type'] = new_keys[func['ret_type']]\n substitute_params_keys(func['params'], new_keys)", "def each(self, callback):\n if not callable(callback):\n raise CallbackTypeError()\n\n self._each_got_more(callback, None)", "def json_in(fn):\n @wraps(fn)\n def new(arg):\n # convert the args in JSON to a python object\n arg = json.loads(arg)\n return fn(arg)\n return new", "def test():\n test = [{'key': 'val1'}, ['key']]\n assert fetch_data_by_keys(*test).unwrap() == 'val1'", "def call(self) -> List[Dict]:", "def iterkeys(d):\r\n return iter(getattr(d, _iterkeys)())", "def _handle_key_event(self, key, modifiers, mapping):\n if key in mapping:\n for callback in mapping[key]:\n callback()", "def run_callbacks(self, **kwargs):\n for callback in self.CALLBACKS:\n getattr(self, callback)(**kwargs)" ]
[ "0.60165715", "0.57527566", "0.55811495", "0.5573455", "0.5379951", "0.5370649", "0.5307617", "0.52369964", "0.5205658", "0.5186293", "0.51724434", "0.5159436", "0.5126748", "0.5125651", "0.5117088", "0.50605685", "0.5046835", "0.5046835", "0.50238544", "0.50214005", "0.50214005", "0.50213444", "0.50181174", "0.50090694", "0.5004567", "0.4979009", "0.49361348", "0.49033955", "0.48820674", "0.48803094" ]
0.654714
0
Runs all values in a JSON object (dict or list) through the given callback function. Callback should be passed two
def fix_json_values(self, obj, callback, **kwargs): if type(obj) == list: newlist = [] for item in obj: newlist.append(self.fix_json_values(item, callback, **kwargs)) return newlist elif type(obj) == dict: newdict = {} for item in list(obj): if type(obj[item]) == list or type(obj[item]) == dict: newdict[item] = self.fix_json_values(callback(obj[item], item, **kwargs), callback, **kwargs) else: newdict[item] = callback(obj[item], item, **kwargs) return newdict else: return obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def json_apply(fragment, check_func, func):\n if check_func(fragment):\n return func(fragment)\n elif isinstance(fragment, list):\n output = []\n for val in fragment:\n output.append(json_apply(val, check_func, func))\n return output\n elif isinstance(fragment, dict):\n output = {}\n for k, val in fragment.items():\n output[k] = json_apply(val, check_func, func)\n return output\n else:\n return fragment", "def send_callback_json_message(value, *args, **kwargs):\r\n\r\n if value:\r\n kwargs['result'] = value\r\n\r\n send_json_message(args[0], args[1], **kwargs)\r\n\r\n return value", "def iterate(d, fun): # type: (Dict, Callable[[Any, Any], None]) -> None\n for key, value in d.items():\n if isinstance(value, dict):\n DictUtil.iterate(value, fun)\n else:\n fun(key, value)", "def loads_json(function):\n def f(*args, **kwargs):\n return json.loads(function(*args, **kwargs))\n return f", "def apply_callback(self, all_values: Namespace) -> Any:\n if not callable(self.callback):\n return self.value\n\n try:\n val = self.callback(self.value, all_values)\n except TypeError as terr: # pragma: no cover\n # len() takes exactly one argument (2 given)\n # <lambda>() takes 1 positional argument but 2 were given\n if not re.search(r\"takes .+ argument .+ given\", str(terr)):\n raise\n val = self.callback(self.value)\n\n if isinstance(val, Exception):\n raise PyParamTypeError(str(val))\n return val", "def callback(self, fun: Callable[[], None] | None) -> None:", "def jsonify(func, *args, **kwargs): \n adict = func(*args, **kwargs)\n if not isinstance(adict, dict):\n return adict\n \n \n #: getting updates from session and database\n \n updates = list(session['callback_updates']) \n updates.extend(models.CallbackUpdate.dump())\n \n if updates:\n if not adict.get('type') == 'composite':\n adict = beans._wrap('composite', [adict]) \n \n adict['result'].extend(updates)\n \n json = simplejson.dumps(adict)\n response = make_response(json) \n response.headers['Content-Type'] = 'application/json'\n session['callback_updates'] = []\n db.session.commit() \n return response", "def fix_json_keys(self, obj, callback):\n if type(obj) == list:\n newlist = []\n for item in obj:\n newlist.append(self.fix_json_keys(item, callback))\n return newlist\n elif type(obj) == dict:\n newdict = {}\n for item in list(obj):\n if type(obj[item]) == list or type(obj[item]) == dict:\n newdict[callback(item)] = self.fix_json_keys(obj[item], callback)\n else:\n newdict[callback(item)] = obj[item]\n return newdict", "def callback(cb): \n def cb_func(*args):\n self = args[0]\n (value, is_last) = cb(*args)\n if (value is not None):\n self._cb_return[cb.__name__] = self._cb_return.get(cb.__name__, []) + \\\n [value]\n if (is_last):\n self._cb_event[cb.__name__] = True\n return cb_func", "def call(self) -> List[Dict]:", "def walk(obj,dict_fn):\n if isinstance(obj,dict):\n result = dict()\n for key, value in obj.items():\n result[key] = walk(value, dict_fn)\n return dict_fn(result)\n if isinstance(obj,list):\n return [walk(i,dict_fn) for i in obj]\n return obj", "def _multiple_callbacks(callbacks, *args, **kwargs):\n if isinstance(callbacks, list):\n for cb in callbacks:\n cb(*args, **kwargs)\n return\n if callbacks:\n callbacks(*args, **kwargs)", "def json_in(fn):\n @wraps(fn)\n def new(arg):\n # convert the args in JSON to a python object\n arg = json.loads(arg)\n return fn(arg)\n return new", "def dumps_json(function):\n def f(*args, **kwargs):\n return json.dumps(function(*args, **kwargs))\n return f", "def callback(ch, method, properties, body):\n record = json.loads(body.decode()) # decode binary string to dict\n pprint(record)", "def do_post_parse_json(self, *args, **kwargs): # real signature unknown\n pass", "def json_imap(mapping, iterable):\n for item in iterable:\n yield scraper.json_map(mapping, item)", "def apply_func(output, func):\n new_output = []\n for dict in output:\n mnemonic = copy.deepcopy(dict['mnemonic'])\n values = dict['values']\n new_values = func(values)\n new_output.append({'mnemonic': mnemonic, 'values': new_values})\n return new_output", "def _callback(self, data: list):\n self.data = data", "def test_get_value_list_result(self):\n test_data = []\n test_data.append(json.loads('{\"name\": \"Pat\"}'))\n test_data.append(json.loads('{\"last_name\": \"Nat\"}'))\n test_data.append(json.loads('{\"name\": \"Gwen\"}'))\n\n key = \"name\"\n result_list = get_value_list(test_data, key)\n self.assertTrue(len(result_list) == 2)", "def handle_json_arguments(context: click.Context, param: click.Parameter, items: Sequence[str]) -> Optional[dict]:\n if not items or context.resilient_parsing:\n return\n return parse_dict_items(items)", "def collecting_callback():\n calls = []\n\n def cb(**kwargs):\n calls.append(kwargs)\n\n return cb, calls", "def jsonResult(f):\n def _inner(self, request):\n d = maybeDeferred(f, self, request)\n d.addCallback(_writeJSONResponse, request)\n d.addErrback(_writeJSONErrorResponse, request)\n return NOT_DONE_YET\n return _inner", "def each(self, callback):\n if not callable(callback):\n raise CallbackTypeError()\n\n self._each_got_more(callback, None)", "def execute(self):\n\t\tfor callback in self:\n\t\t\tcallback()", "def per_list(results: dict, key: str, file_vals: Dict[Path, Any],\n action: Callable):\n\n all_vals = []\n for _, val in file_vals.items():\n val = action(val)\n\n if isinstance(val, list):\n all_vals.extend(val)\n else:\n all_vals.append(val)\n\n return store_values(results, key, all_vals)", "def inner(*args, **kwargs):\n return Response(\n dumps(function(*args, **kwargs)),\n mimetype='application/json'\n )", "def visititems(\n self, func: Callable[[str, H5ObjectLike], Optional[Any]]\n ) -> Optional[Any]:\n for key, val in self._recurse():\n result = func(key, val)\n if result is not None:\n return result", "def run_callback(func, plus, result):\n data = result.value\n error = None if result.successful() else \"%s\" % result.exception\n try:\n if plus is None:\n func(data, error=error)\n else:\n func(data, plus, error=error)\n except Exception as error:\n logger.error(\"RPC callback for %s.%s raised exception.\",\n self.remote_service_coord.name, method,\n exc_info=True)", "def test_batch(self):\n req = '''[{\"foo\": \"boo\"},\n {\"jsonrpc\": \"2.0\", \"method\": \"notify_hello\", \"params\": [7]},\n {\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42,23], \"id\": \"2\"},\n {\"jsonrpc\": \"2.0\", \"method\": \"foo.get\", \"params\": {\"name\": \"myself\"}, \"id\": \"5\"}\n ]'''\n\n resp = '''[{\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32600, \"message\": \"InvalidRequestError: Invalid members in request object\"}, \"id\": null},\n {\"jsonrpc\": \"2.0\", \"result\": 19, \"id\": \"2\"},\n {\"jsonrpc\": \"2.0\", \"id\": \"5\", \"error\": {\"message\": \"MethodNotFoundError: Method foo.get not found\", \"code\": -32601}}\n ]'''\n\n status = 200\n r_status, r_resp = self.exec_handler(req)\n self.assertEqual(r_status, status)\n self.assertEqual(simplejson.loads(r_resp), simplejson.loads(resp))" ]
[ "0.6534004", "0.59166056", "0.58235997", "0.5695522", "0.5678162", "0.5629116", "0.5615373", "0.56030566", "0.54738176", "0.54465544", "0.54227227", "0.53541183", "0.5297419", "0.5266667", "0.5264149", "0.52472883", "0.52044153", "0.5186965", "0.51423436", "0.5129454", "0.51147956", "0.509486", "0.50789136", "0.5022382", "0.49812827", "0.49621138", "0.49595988", "0.49551105", "0.4948105", "0.49435866" ]
0.64443946
1
Returns a query for copying and replacing a table, applying the given callback to each column name.
def copy_and_replace_keys(self, table, key_callback): client = self.bq_client t = client.get_table(table) cross_joins = [] # begin query generation process q = f'CREATE OR REPLACE TABLE `{table}` AS (\nSELECT \n' for field in t.schema: q += process_field(field, None, key_callback) cross_joins.extend(process_cross_joins(field, "copy_table")) q = q.strip(",\n") q += f"\nFROM\n `{table}` copy_table" for cross_join in cross_joins: q += cross_join q += ")" return q
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_new_table(db_old, db_new, table, col_index, new_col_list, ord_users, ord_subs):\n con = lite.connect(db_old)\n with con:\n cur = con.cursor()\n cur.execute(\"SELECT * FROM \" + table)\n tuple_list = cur.fetchall()\n for i in range(0, len(new_col_list)):\n tuple_list[i] = replace_tuple(tuple_list[i], new_col_list[i], col_index)\n #anonymize username and submission id\n if(table == \"Comments\"):\n anon_users = anonymize(strip_tuple(tuple_list, 1), ord_users)\n anon_subs = anonymize(strip_tuple(tuple_list, 5), ord_subs)\n for i in range(0, len(new_col_list)):\n tuple_list[i] = replace_tuple(tuple_list[i], anon_users[i], 1)\n tuple_list[i] = replace_tuple(tuple_list[i], anon_subs[i], 5)\n elif(table == \"Submissions\"):\n for i in range(0, len(new_col_list)):\n tuple_list[i] = replace_tuple(tuple_list[i], i, 0)\n num_bindings = len(tuple_list[0])\n bindings = ('?,' * num_bindings)[:-1]\n con = lite.connect(db_new)\n with con:\n cur = con.cursor()\n cur.executemany(\"INSERT INTO \" + table + \" VALUES\" + \" (\"+ bindings + \")\", tuple_list)", "def copyData(self, src_schema, src_table, src_columns, dest_schema, dest_table, dest_columns):\r\n sql = 'INSERT INTO {} ( {} ) SELECT {} FROM {}'.format(self.encodeTableName(dest_schema, dest_table), ','.join(dest_columns),\r\n ','.join(src_columns), self.encodeTableName(src_schema, src_table))\r\n return self.runSql(sql)", "def update_table(table_name):\n for filename in table_name_to_funcs[table_name][\"filename\"]:\n choose_file_to_get(table_name_to_funcs[table_name][\"file_type\"], filename)\n\n for process_func in table_name_to_funcs[table_name][\"process\"]:\n process_func()\n for to_sql_func in table_name_to_funcs[table_name][\"to_sql\"]:\n to_sql_func(update=True)", "def deferred_to_columns_cb(self, target, model, fields):\n table = model._meta.db_table\n if table not in target:\n target[table] = set()\n for field in fields:\n if not hasattr(field.column, \"columns\"):\n target[table].add(field.column)\n else:\n target[table].update(field.column.columns)", "def table_callback(table, data, event, column=False):\n if column:\n table.object = data.loc[event.new]\n else:\n table.object = data.loc[event.new:event.new]", "def import_table(ctx: DataFunctionContext, table_name: str, copy: bool = True):\n target_storage = ctx.execution_config.get_target_storage()\n if ensure_bool(copy):\n as_identifier = target_storage.get_api().get_quoted_identifier\n sql = f\"select * from {as_identifier(table_name)}\"\n # TODO: DRY this pattern\n sdf = SqlDataFunctionWrapper(sql)\n\n def get_sql(*args, **kwargs):\n return sql\n\n sdf.get_compiled_sql = get_sql\n return sdf(ctx)\n else:\n ctx.emit(\n name=table_name,\n storage=target_storage,\n data_format=\"table\",\n create_alias_only=True,\n )", "def MakeReplace(\n cls, table_name, cols, new_values, ignore=False):\n assert _IsValidTableName(table_name)\n assert all(_IsValidColumnName(col) for col in cols)\n ignore_word = ' IGNORE' if ignore else ''\n main_clause = 'INSERT%s INTO %s (%s)' % (\n ignore_word, table_name, ', '.join(cols))\n return cls(main_clause, insert_args=new_values, duplicate_update_cols=cols)", "def load_into_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "def copy_table(source_table, destination_table, db='default'):\n try:\n with connections[db].cursor() as cursor:\n cursor.execute('CREATE TABLE IF NOT EXISTS %s LIKE %s;' % (destination_table, source_table))\n except:\n pass", "def create_staging_table(cursor,table_schema,table_name,column_names):\n if not column_names:\n create_table = \"CREATE TABLE IF NOT EXISTS \" + table_schema + \".\" + table_name + \"();\"\n else:\n create_table = \"CREATE TABLE IF NOT EXISTS \" + table_schema + \".\" + table_name + \" (\" + \" text,\".join(column_names) + \" text);\"\n create_table = create_table.replace(\",TO \",\",TOV \")\n cursor.execute(create_table)", "def delete_columns(self, table, cols):\n conn = psycopg2.connect(self.name, sslmode='require')\n c = conn.cursor()\n #Get info of current table\n table_info = self.table_col_info(table)\n \n #Remove the undesired columns and corresponding data types\n columns = [row[1] for row in table_info if row[1] not in cols]\n types=[row[2] for row in table_info if row[1] in columns]\n \n #Make new table with desired columns\n create_command = \"\"\"CREATE TABLE new_table ({})\"\"\".format(', '.join([''.join(str(i)+\" \"+str(j)) for i,j in zip(columns, types)]))\n c.execute(create_command)\n \n #Insert Data into new table\n c.execute(\"\"\"INSERT INTO new_table({0}) SELECT {0} FROM {1}\"\"\".format(','.join([', '.join(str(i) for i in columns)]),table))\n conn.commit()\n \n #Delete old table\n self.delete_table(table)\n self.display_all()\n \n #Rename new table to match the old one\n conn = psycopg2.connect(self.name, sslmode='require')\n c = conn.cursor()\n c.execute('ALTER TABLE new_table RENAME TO {}'.format(table))\n \n \n conn.commit()\n conn.close()", "def replace_into(self, table_name, values):\n\n # This is safe: https://stackoverflow.com/questions/835092/\n # python-dictionary-are-keys-and-values-always-the-same-order\n column_names = list(values.keys())\n values = list(values.values())\n\n # Dynamically build the query\n # Be aware that the %s is NOT string formatting but parameter binding\n query = 'REPLACE INTO ' + table_name + ' (' + ', '.join(column_names) + \\\n ') VALUES (' + ', '.join(['%s'] * len(column_names)) + ')'\n\n # Execute the query and commit the results\n self.execute_query(query, tuple(values))\n\n return self.cursor.lastrowid", "def _remake_table(self, table_name, renames={}, deleted=[], altered={}):\r\n # Dry runs get skipped completely\r\n if self.dry_run:\r\n return\r\n # Temporary table's name\r\n temp_name = \"_south_new_\" + table_name\r\n # Work out the (possibly new) definitions of each column\r\n definitions = {}\r\n cursor = self._get_connection().cursor()\r\n for column_info in self._get_connection().introspection.get_table_description(cursor, table_name):\r\n name = column_info[0]\r\n type = column_info[1]\r\n # Deal with an alter (these happen before renames)\r\n if name in altered:\r\n type = altered[name]\r\n # Deal with a rename\r\n if name in renames:\r\n name = renames[name]\r\n # Add to the defs\r\n definitions[name] = type\r\n # Alright, Make the table\r\n self.execute(\"CREATE TABLE %s (%s)\" % (\r\n self.quote_name(temp_name),\r\n \", \".join([\"%s %s\" % (self.quote_name(cname), ctype) for cname, ctype in definitions.items()]),\r\n ))\r\n # Copy over the data\r\n self._copy_data(table_name, temp_name, renames)\r\n # Delete the old table, move our new one over it\r\n self.delete_table(table_name)\r\n self.rename_table(temp_name, table_name)", "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n try:\n cur.execute(query)\n conn.commit()\n except Exception as e:\n print(e)", "def get_table( db, table_name, key_name, suffix=None, columns='*'):\n \n if suffix == None:\n query = 'SELECT ' + columns + ' FROM ' + table_name + ';'\n else:\n query = 'SELECT ' + columns + ' FROM ' + table_name + ' ' + suffix + ';'\n \n #print query\n rowList = [ x for x in table_generator(db, query)]\n table = {}\n for r in rowList:\n table[r.__getattribute__(key_name)] = r\n \n return table", "def _copy_expert_csv(\n self, csv_reader: protocols.Reader, columns: Sequence[str], dest_table: str\n ) -> None:\n sql_columns = \",\".join(columns)\n sql_query = f\"\"\"COPY {dest_table} ({sql_columns}) FROM STDIN\n WITH CSV HEADER DELIMITER AS ','\n NULL AS 'NULL';\"\"\"\n raw_conn = self._get_raw_conn()\n try:\n raw_conn.cursor().copy_expert(sql_query, csv_reader)\n except Exception:\n raw_conn.rollback()\n raise\n else:\n raw_conn.commit()", "def make_query(table_name, cols, query):\n str_query = None\n if query == None:\n str_query = \"SELECT {} FROM {};\".format(cols, table_name)\n else:\n str_query = \"SELECT {} FROM {} {};\".format(cols, table_name, query)\n print(\">>>ejecutando: \", str_query)\n sistema.cursor.execute(str_query)\n for row in sistema.cursor.fetchall():\n print(row)", "def build_insert_query(self, query, columns, table_name):\n cols = \"\"\n values = \"\"\n on_dupe_values = \"\"\n\n for column in columns:\n cols += \"`{}`, \".format(column)\n values += \"%({})s, \".format(column)\n on_dupe_values += \"{} = VALUES({}), \".format(column, column)\n\n # Remove trailing whitespace and commas\n cols = cols.rstrip().rstrip(\",\")\n values = values.rstrip().rstrip(\",\")\n on_dupe_values = on_dupe_values.rstrip().rstrip(\",\")\n\n query = query.format(table_name=table_name, cols=cols, values=values, on_dupe_values=on_dupe_values)\n return query", "def copy_df_to_table(df, db_cursor, table_name, columns=None):\n\n # Save dataframe to an IO buffer in memory\n output = io.StringIO()\n df.to_csv(output, sep='\\t', header=False, index=False)\n output.seek(0)\n\n db_cursor.copy_from(output, f'{table_name}', null=\"\", columns=columns)", "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n print('staging', query)\n cur.execute(query)\n conn.commit()", "def _postprocess_name_columns(\n table: pyarrow.Table, has_header: bool, settings: Settings\n) -> Tuple[pyarrow.Table, List[I18nMessage]]:\n if has_header and table.num_rows > 0:\n names, warnings = gen_unique_clean_colnames_and_warn(\n list((c[0].as_py() if c[0].is_valid else \"\") for c in table.columns),\n settings=settings,\n )\n\n # Remove header (zero-copy: builds new pa.Table with same backing data)\n table = table.slice(1)\n else:\n names = [f\"Column {i + 1}\" for i in range(len(table.columns))]\n warnings = []\n\n return (\n pyarrow.table(dict(zip(names, table.columns))),\n warnings,\n )", "def populate_table(database, table, data):\n\n for row in data:\n database.session.add(table(row))\n database.session.commit()", "def load_parquet_tables(cur, conn):\n\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()\n print('Success, table data loaded!')", "def _refactor_time_columns(write_cursor: 'DBCursor') -> None:\n log.debug('Enter _refactor_time_columns')\n write_cursor.execute('ALTER TABLE timed_balances RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE timed_location_data RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE trades RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE asset_movements RENAME COLUMN time TO timestamp')\n log.debug('Exit _refactor_time_columns')", "def dbcolumns(con,table,**kw):\n cur = con.cursor()\n cols = list(cur.execute(\"pragma table_info(\"+table+\")\"))\n colnames = [col[1] for col in cols]\n if colnames==[]:\n cmd = \"create table \"+table+\" (id integer primary key\"\n for k,v in kw.items():\n cmd += \", %s %s\"%(k,v)\n cmd += \")\"\n cur.execute(cmd)\n else:\n # table already exists; add any missing columns\n for k,v in kw.items():\n if not k in colnames:\n cmd = \"alter table \"+table+\" add column \"+k+\" \"+v\n cur.execute(cmd)\n con.commit()\n del cur", "def project(self, name, cols) :\n\n ct = [v \n for v in list(zip(self.get_cols(), self.get_types()))\n if v[0] in cols]\n\n base_row = dict(zip(cols, itertools.repeat(None)))\n \n def make_new_row(r) :\n values = {}\n values.update(base_row)\n values.update(r.as_dict())\n return values\n new_rows = [make_new_row(row) for row in self]\n\n new_table = self.factory.new_table(name, ct)\n new_table.add_rows(new_rows)\n return new_table" ]
[ "0.59699565", "0.5824967", "0.5479155", "0.54488003", "0.54385924", "0.54055893", "0.5390006", "0.53809685", "0.5363095", "0.53458697", "0.5302262", "0.52874994", "0.52289927", "0.5214595", "0.5214595", "0.5214595", "0.5214595", "0.5191025", "0.5183032", "0.5167538", "0.5145682", "0.51369834", "0.5136765", "0.5119159", "0.5107991", "0.50861573", "0.5066078", "0.50449693", "0.5018161", "0.49908426" ]
0.6539468
0
Send test metric to AWS CloudWatch
def main(): logging.basicConfig(level=logging.DEBUG) cloud_watch = create_cloud_watch( 'Test Namespace', asynchronous=False, buffered=False, dummy=False, dimensions={'By intent': 'Test'}, ) cloud_watch.log('awsme-test', {'By source': 'awsme'}) print('Successfully sent metric "awsme-test" to "Test Namespace"') return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def postToCloudWatch(sServerId, sMetricName, sServerDesc, sStatus):\n import boto3.ec2\n if not hasattr(postToCloudWatch, 'oBoto'):\n postToCloudWatch.oBoto = boto3.client('cloudwatch',\n aws_access_key_id=getConfig('cloudwatch', 'access_id'),\n aws_secret_access_key=getConfig('cloudwatch', 'secret_key'))\n sNamespace = 'EC2: ' + sServerDesc\n print('Posting to EC2 CloudWatch: namespace: %s, metric: %s, instance: %s, value: %s' %\n (sNamespace, sMetricName, sServerId, sStatus))\n postToCloudWatch.oBoto.put_metric_data(\n Namespace=sNamespace,\n MetricData=[\n {'MetricName': sMetricName,\n 'Dimensions': [{'Name': 'InstanceId', 'Value': sServerId}],\n 'Value': round(float(sStatus), 1),\n 'Unit': 'Count'\n }\n ]\n )", "def sendMeasurement(self, metric, value, source, timestamp=None):\n sys.stdout.write('{0} {1} {2} {3}\\n'.format(metric, value, source, timestamp).decode('utf-8'))\n sys.stdout.flush()", "def aws_write(vl, flusher):\n flusher.add_metric(vl)", "def __push_metric(self, metric_name, value, timestamp):\n sock = self.__get_carbon_socket()\n _data = \"%s %d %d\\n\" % (metric_name, value, timestamp)\n LOGGER.debug(\"SEND: %s\", _data.replace(\"\\n\", \"\"))\n sock.send(_data.encode('utf-8'))", "def send_metric(model_id, metric, value):\n host, port, namespace = get_metric_endpoint()\n\n metric_name = '%s.%s' % (namespace, get_metric_name(metric, model_id))\n message = \"%s %f %d\\n\" % (metric_name, float(value), int(time.time()))\n send_tcp(host, port, message)\n\n build_no = get_build_number()\n metric_name = '%s.%s' % (namespace, get_metric_name('build', model_id))\n message = \"%s %f %d\\n\" % (metric_name, build_no, int(time.time()))\n send_tcp(host, port, message)", "def test_metrics(client):\n response = client.get(\"/metrics\")\n assert response.status_code == 200", "def put_metric(cw_metric_name, statistic_value, config=None,\n cw_dimension_name=None, cw_namespace=None):\n\n try:\n if config:\n session = config.boto3_session()\n cw_dimension_name = config.cw_dimension_name\n cw_namespace = config.cw_namespace\n region = config.region\n else:\n session = Config.boto3_session()\n except:\n logger.exception(\"\")\n sys.exit(127)\n\n if not cw_dimension_name or not cw_metric_name:\n raise ValueError(\"You have to specify at least\\\n cw_dimension_name or config parameter\")\n\n cw = session.resource('cloudwatch', region_name=region)\n try:\n float(statistic_value)\n except ValueError:\n logger.error(\"Statistic value not convertible to float.\")\n return False\n\n try:\n if statistic_value == 0:\n statistic_value = 0.1\n\n cw.Metric(cw_namespace, cw_metric_name).put_data(\n MetricData=[\n {\n 'MetricName': cw_metric_name,\n 'Dimensions': [\n {\n 'Name': cw_dimension_name,\n 'Value': cw_metric_name\n }\n ],\n 'StatisticValues': {\n 'SampleCount': statistic_value,\n 'Sum': statistic_value,\n 'Minimum': statistic_value,\n 'Maximum': statistic_value\n },\n 'Unit': 'Count',\n 'StorageResolution': 1\n }\n ]\n )\n except:\n logger.exception(\"\")", "def lambda_metric(metric_name, value, timestamp=None, tags=None):\n tags = _tag_dd_lambda_layer(tags)\n if os.environ.get(\"DD_FLUSH_TO_LOG\", \"\").lower() == \"true\":\n logger.debug(\"Sending metric %s to Datadog via log forwarder\", metric_name)\n print(\n json.dumps(\n {\n \"m\": metric_name,\n \"v\": value,\n \"e\": timestamp or int(time.time()),\n \"t\": tags,\n }\n )\n )\n else:\n logger.debug(\"Sending metric %s to Datadog via lambda layer\", metric_name)\n lambda_stats.distribution(metric_name, value, timestamp=timestamp, tags=tags)", "def test_get_deployment_metric(self):\n pass", "def test_export_function(self):\n\n function_name = \"testcloudwatchlogs\"\n bucket_name = \"my-bucket-name\"\n fnb_name = \"fnb\" + function_name\n role = \"arn:aws:iam::123456789012:role/MyFunction\"\n security_group_ids = [\"sg-ABCDEFGHIJKL\"]\n subnet_ids = [\"subnet-ABCDEFGHIJKL\"]\n log_group = \"/aws/lambda/functionbeat-cloudwatch\"\n\n self._generate_dummy_binary_for_template_checksum()\n\n self.render_config_template(\n path=os.path.abspath(self.working_dir) + \"/log/*\",\n cloudwatch={\n \"name\": function_name,\n \"bucket\": bucket_name,\n \"role\": role,\n \"virtual_private_cloud\": {\n \"security_group_ids\": security_group_ids,\n \"subnet_ids\": subnet_ids,\n },\n \"log_group\": log_group,\n },\n )\n functionbeat_proc = self.start_beat(\n logging_args=[\"-d\", \"*\"],\n extra_args=[\"export\", \"function\", function_name]\n )\n\n self.wait_until(lambda: self.log_contains(\"PASS\"))\n functionbeat_proc.check_wait()\n\n function_template = self._get_generated_function_template()\n function_properties = function_template[\"Resources\"][fnb_name][\"Properties\"]\n\n assert function_properties[\"FunctionName\"] == function_name\n assert function_properties[\"Code\"][\"S3Bucket\"] == bucket_name\n assert function_properties[\"Role\"] == role\n assert function_properties[\"VpcConfig\"][\"SecurityGroupIds\"] == security_group_ids\n assert function_properties[\"VpcConfig\"][\"SubnetIds\"] == subnet_ids", "def handler(event, context):\n try:\n # Retrieve environment variables\n dimension_name = getenv(\"CODEDEPLOY_DIMENSION_NAME\")\n metric_name = getenv(\"CODEDEPLOY_METRIC_NAME\")\n if not dimension_name or not metric_name:\n return \"CODEDEPLOY_DIMENSION_NAME or CODEDEPLOY_METRIC_NAME not set\"\n\n # Get deployment state from CodeDeploy event\n deployment_state = event[\"detail\"][\"state\"]\n print(f\"Deployment state: {deployment_state}\")\n\n # Pushing custom metric to CW\n response = boto3.client(\"cloudwatch\").put_metric_data(\n MetricData=[\n {\n \"MetricName\": metric_name,\n \"Dimensions\": [{\"Name\": dimension_name, \"Value\": deployment_state}],\n \"Unit\": \"None\",\n \"Value\": 1,\n \"Timestamp\": datetime.datetime.now(),\n },\n ],\n Namespace=\"CodeDeployDeploymentStates\",\n )\n print(f\"Response from CW service: {response}\")\n return response\n # pylint: disable=broad-except\n except Exception as excpt:\n print(f\"Execution failed... {excpt}\")\n return None", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def send_metrics(timestamp: Optional[float] = None) -> bool:\n\n def new_point(metric_name: str, result: float):\n series = monitoring_v3.types.TimeSeries()\n series.metric.type = f\"custom.googleapis.com/{metric_name}\"\n\n point = series.points.add()\n point.interval.end_time.seconds = now\n\n if isinstance(result, float):\n point.value.double_value = result\n else:\n point.value.int64_value = result\n return series\n\n now = int(time.time())\n prev_minute_tstamp = timestamp or (now - (now % 60) - 60)\n metrics_pattern = f\"{Monitoring.ACC_PREFIX}_{prev_minute_tstamp}_*\"\n monitoring_keys = redis_client.keys(metrics_pattern)\n all_series = []\n for metric_key in monitoring_keys:\n raw_value = redis_client.get(metric_key)\n values: List[str] = raw_value.split(\"|\") # type: ignore\n metric_name = values.pop(0) # metric name\n op = values.pop(0) # operation - SUM or AVG\n typ = values.pop(0) # INT or FLOAT\n if typ == \"INT\":\n result = sum(map(int, values))\n if op == \"AVG\":\n result = result // len(values)\n else:\n result = sum(map(float, values)) # type: ignore\n if op == \"AVG\":\n result = result / len(values) # type: ignore\n\n all_series.append(new_point(metric_name, result))\n if op == \"AVG\": # create count for AVG metric too\n all_series.append(new_point(f\"{metric_name}_COUNT\", len(values)))\n\n try:\n monitor_client.create_time_series(project_path, all_series)\n except InvalidArgument:\n logging.exception(\"mark_point failed\")\n return False\n else:\n return True", "def publish_metric(name, value, type):\n t = time.time()\n m = json.dumps({'monitor':name, type:value, 'time':t})\n r = redis.StrictRedis(host='localhost', port=6379, db=0) \n r.lpush('sensor_readings',m)", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def setUp(self):\n super().setUp()\n self.metric = {\n \"name\": \"Metric\",\n \"type\": \"security_warnings\",\n \"sources\": {\"source_uuid\": {\"type\": \"owasp_zap\", \"name\": \"Source\"}},\n }", "def test_create_goal_metric(self):\n pass", "def test_cloudwatch_subscription_event(self):\n lh = LambdaHandler(\"tests.test_event_script_settings\")\n\n event = {\"awslogs\": {\"data\": \"some-data-not-important-for-test\"}}\n response = lh.handler(event, None)\n\n self.assertEqual(response, True)", "def test_update_goal_metric(self):\n pass", "def submit_metric(self, metric_suffix, metric, scraper_config, gauge=True, monotonic_count=True):\n metric_name = scraper_config['namespace'] + metric_suffix\n for sample in metric.samples:\n # Explicit shallow copy of the instance tags\n _tags = list(scraper_config['custom_tags'])\n\n for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):\n _tags.append('{}:{}'.format(label_name, label_value))\n if gauge:\n # submit raw metric\n self.gauge(metric_name, sample[self.SAMPLE_VALUE], _tags)\n if monotonic_count:\n # submit rate metric\n self.monotonic_count(metric_name + '.count', sample[self.SAMPLE_VALUE], _tags)", "def create_metric(name,\n value,\n environment: str = None,\n zone: str = None, **kwargs):\n message = {\n 'name': name,\n 'value': value,\n 'environment': environment,\n 'zone': zone,\n 'metric_type': kwargs.get('metric_type', 'ms'),\n 'az': kwargs.get('az', 'default'),\n 'timestamp': kwargs.get('timestamp', datetime.datetime.now().isoformat()),\n '__type': kwargs.get('__type', 'metric')\n }\n\n return message", "def test_metric_namespace(self):\n self.statsd.namespace = \"foo\"\n self.statsd.gauge('gauge', 123.4)\n self.assert_equal_telemetry('foo.gauge:123.4|g\\n', self.recv(2))", "def test_update_derived_metric(self):\n pass", "def test_get_measurement_history(self):\n device = DeviceFactory(node=Node.objects.first(), external_id='123', type__code=SecureDeviceType.SRT321,\n device_param__type__code=SecureDeviceParameterType.MEASURED_TEMPERATURE)\n d_id_1 = device.external_id\n\n now_loc = datetime.datetime.now(bst)\n ts_loc = now_loc - datetime.timedelta(seconds=30)\n ts_str = ts_loc.strftime('%Y-%m-%dT%H:%M:%S')\n\n data = self.create_secure_server_push_data(d_id_1, ts_str)\n\n SecureClient.process_push_data(data)\n time.sleep(.5)\n\n # get newer timestamp\n ts_str = now_loc.strftime('%Y-%m-%dT%H:%M:%S')\n data = self.create_secure_server_push_data(d_id_1, ts_str, value=\"23.5\")\n\n SecureClient.process_push_data(data)\n\n token = Token.objects.get(user__username=email)\n device_param = device.parameters.first()\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)\n url = reverse('api:device_measurements', kwargs={'device_parameter_id': device_param.id})\n\n time.sleep(.5)\n\n response = client.get(url, format='json')\n\n self.assertTrue(response.status_code == 200)\n self.assertTrue(len(response.data) >= 2)", "def __init__(self):\n super().__init__()\n self.metric = 'SNSVTY'", "def metric(env, metric):\n envs = environments()\n check_env(env, envs)\n\n name = unquote(metric)\n metric = get_or_abort(puppetdb.metric, metric)\n return render_template(\n 'metric.html',\n name=name,\n metric=sorted(metric.items()),\n envs=envs,\n current_env=env)", "def test_create_derived_metric(self):\n pass", "def test_add_tag_to_derived_metric(self):\n pass", "def _cloudwatch_metrics_boto3(self, region_name):\n boto3.setup_default_session(region_name = region_name)\n cloudwatch_resource = boto3.resource('cloudwatch')\n return cloudwatch_resource", "def test_metrics_server(self):\n validate_metrics_server()" ]
[ "0.6268691", "0.62427914", "0.6162454", "0.61112636", "0.60867095", "0.6058298", "0.60395", "0.59895533", "0.59845847", "0.5904876", "0.5773967", "0.57484984", "0.569267", "0.56479716", "0.5644985", "0.56444424", "0.56369525", "0.56129986", "0.5581815", "0.55798525", "0.5546753", "0.55361676", "0.5527928", "0.55251265", "0.5524985", "0.5503279", "0.5474436", "0.54576933", "0.5452654", "0.5438224" ]
0.72424513
0
parses through log files to extract marginal likelihood estimates from executing the variational inference algorithm on a dataset.
def parse_logs(files): marginal_likelihood = [] for file in files: handle = open(file,'r') for line in handle: if 'Marginal Likelihood' in line: m = float(line.strip().split('=')[1]) marginal_likelihood.append(m) break handle.close() return marginal_likelihood
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_extract(log_info):\n \n #Handle file names, strings and open file-like objects equivalently\n with uber_open_rmode(log_info) as log_info:\n \n headers = []\n footers = []\n i = 0\n \n #for all lines in file/output\n for line in log_info:\n \n #skip blank lines\n if len(line.split()) == 0:\n continue\n \n #This is listed before both run and minimize simulations \n if 'Memory usage per processor =' in line:\n headers.append(i+1)\n \n #This follows both run and minimize simulations\n elif 'Loop time of' in line:\n footers.append(i-1)\n \n i += 1\n \n #Add last line to footers for incomplete logs\n footers.append(i)\n \n log_info.seek(0)\n \n #Create DataModelDict root\n log_dict = DM()\n log_dict['LAMMPS-log-thermo-data'] = DM()\n \n #for all lines in file/output\n for header, footer in zip(headers, footers):\n\n #Read thermo data\n df = pd.read_csv(log_info, header=header, nrows=footer-header, sep='\\s+', engine='python', skip_blank_lines=True)\n log_info.seek(0) \n\n #Convert to DataModelDict\n thermo = DM()\n for j in df:\n thermo[str(j)] = df[j].values.tolist()\n \n #Append simulation results to DataModelDict root\n simulation = DM([('thermo', thermo)])\n log_dict['LAMMPS-log-thermo-data'].append('simulation', simulation)\n \n return log_dict", "def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):", "def parse_log(path_to_log):\n regex_iteration = re.compile('Iteration (\\d+), loss = ([\\.\\deE+-]+)')\n regex_train_output = re.compile('Train net output #(\\d+): (\\S+) = ([\\.\\deE+-]+)')\n regex_learning_rate = re.compile('lr = ([\\.\\deE+-]+)')\n regex_test_output = re.compile('Test net output #(\\d+): detection_eval = ([\\.\\deE+-]+)')\n\n\n # Pick out lines of interest\n iteration = 0\n loss = -1\n learning_rate = 0.001\n train_dict_list = []\n train_row = None\n test_score=0.0\n\n logfile_year = extract_seconds.get_log_created_year(path_to_log)\n with open(path_to_log) as f:\n start_time = extract_seconds.get_start_time(f, logfile_year)\n last_time = start_time\n\n for line in f:\n iteration_match = regex_iteration.search(line)\n if iteration_match:\n iteration = float(iteration_match.group(1))\n loss = float(iteration_match.group(2))\n try:\n time = extract_seconds.extract_datetime_from_line(line,\n logfile_year)\n except:\n # Skip lines with bad formatting, for example when resuming solver\n continue\n\n # if it's another year\n if time.month < last_time.month:\n logfile_year += 1\n time = extract_seconds.extract_datetime_from_line(line, logfile_year)\n last_time = time\n\n seconds = (time - start_time).total_seconds()\n\n learning_rate_match = regex_learning_rate.search(line)\n\n if learning_rate_match:\n learning_rate = float(learning_rate_match.group(1))\n\n test_score_match = regex_test_output.search(line)\n if test_score_match:\n test_score = float(test_score_match.group(2))\n\n train_dict_list, train_row = parse_line_for_net_output(\n regex_train_output, train_row, train_dict_list,\n line, iteration, seconds, learning_rate,loss,test_score\n )\n\n\n return train_dict_list", "def likelihood_prediction():\n # Get info\n selected_word = prompt_tech_selection()\n article_json = get_json_from_file()\n\n # Calculate results\n total_word_counter, selected_word_counter = count_occurrences(article_json, selected_word)\n probability = selected_word_counter / total_word_counter\n total_time = article_json[-1]['time'] - article_json[0]['time'] # unix subtraction = seconds\n months_in_train_set = total_time / SECONDS_IN_MONTH\n expected_posts_per_month = int(total_word_counter / months_in_train_set)\n\n # Show results\n print_text_results(expected_posts_per_month, probability, selected_word)\n plot_likelihood(expected_posts_per_month, probability)", "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def log_likelihood(self, data, reward_model, bias_params):", "def analyse_loglike(test_data, mods):\r\n l1 = list(map(lambda x: x + ' NB', mods.names))\r\n l1.extend(list(map(lambda x: x + ' ZI', mods.names)))\r\n l1.extend(list(map(lambda x: x + ' P', mods.names)))\r\n loglikeNB = np.array(mods.compute_log_likelihood(test_data, 'NB'))\r\n loglikeZI = np.array(mods.compute_log_likelihood(test_data, 'ZI'))\r\n loglikeP = np.array(mods.compute_log_likelihood(test_data, 'P'))\r\n # loglikeG = np.array(mods.compute_log_likelihood_gaussian(test_data))\r\n # loglikegeo = np.array(mods.compute_log_likelihood_geom(test_data))\r\n LL = np.zeros((loglikeNB.shape[0] * 3, loglikeNB.shape[1]))\r\n LL[:loglikeNB.shape[0], :] = loglikeNB\r\n LL[loglikeNB.shape[0]:2 * loglikeNB.shape[0], :] = loglikeZI\r\n LL[2 * loglikeNB.shape[0]:3 * loglikeNB.shape[0], :] = loglikeP\r\n # LL[3 * loglikeNB.shape[0]:4 * loglikeNB.shape[0], :] = loglikeG\r\n # LL[4 * llzi.shape[0]:, :] = np.array(mods.loglikegeo)\r\n print('mean per model', list(zip(np.ma.masked_invalid(LL).sum(axis=1), map(lambda x: x.mod.name, mods.models))))\r\n print('mean per distrib')\r\n print(np.ma.masked_invalid(LL[:loglikeNB.shape[0], :]).mean())\r\n print(np.ma.masked_invalid(LL[loglikeNB.shape[0]:loglikeNB.shape[0] * 2, :]).mean())\r\n print(np.ma.masked_invalid(LL[loglikeNB.shape[0] * 2:loglikeNB.shape[0] * 3, :]).mean())\r\n # print(np.nanmean(LL[1-np.isinf(LL)], axis=1))\r\n # print(np.nanmean(LL[LL != np.inf],axis=1))\r\n LL[np.isnan(LL)] = 0\r\n LL[np.isinf(LL)] = 0\r\n LL[LL == 0] = -np.inf\r\n r = np.argmax(LL, axis=0)\r\n # LL /= mx\r\n print('mean_best', np.mean(np.ma.masked_invalid(LL[r, range(LL.shape[1])])))\r\n mx = np.max(LL, axis=0)\r\n LL = LL / mx\r\n means = test_data.get_miniOD(None)[test_data.get_stations_col(None)].mean(axis=0).to_numpy()\r\n # for i in np.unique(r):\r\n # print(means[r == i].max())\r\n print('mean NB', means[r < loglikeNB.shape[0]].mean())\r\n print('mean ZI', means[(r < 2 * loglikeNB.shape[0]) * (r > loglikeNB.shape[0])].mean())\r\n print('mean poisson', means[(r < 3 * loglikeNB.shape[0]) * (r > 2 * loglikeNB.shape[0])].mean())\r\n # print('mean ga', means[(r < 4 * llzi.shape[0]) * (r > 3 * llzi.shape[0])].mean())\r\n # print('mean Gaussian', means[r > 3 * loglikeNB.shape[0]].mean())\r\n print('model name, mean trips per model, LL/maxLL, N inf')\r\n for i in range(LL.shape[0]):\r\n print(l1[i], means[r == i].mean(), np.mean(np.ma.masked_invalid(LL[i, :])), np.sum(np.isinf(LL[i, :])))\r\n print(np.ma.corrcoef(np.ma.masked_invalid(LL[i, :]), means[:LL.shape[1]])[1, 0])\r\n plt.hist(r, bins=np.arange(-0.5, 3 * len(mods.names) + 1, 1))\r\n\r\n # l1.extend(list(map(lambda x: x + ' geo', mods.names)))\r\n # l1.extend(list(map(lambda x: x + ' G', mods.names)))\r\n plt.xticks(range(len(l1)), l1, rotation='vertical')\r\n plt.show()\r\n\r\n for m in mods.loglike:\r\n print(m)\r\n print(m[np.logical_not(np.isinf(m))].mean())", "def process_extracted_logits(args, concat_logits, sentence_token_ids):\n # (batch_size, max_len, vocab_size)\n\n # concatenate all batches\n prediction_scores = torch.cat(concat_logits, axis=0)\n\n if prediction_scores.shape[0] == 0:\n return [None], [None], [None]\n elif prediction_scores.shape[0] == 1:\n true_y = torch.tensor(sentence_token_ids[0][1:]).unsqueeze(-1)\n else:\n sti = torch.tensor(sentence_token_ids)\n true_y = torch.cat([sti[0, 1:], sti[1:, -1]]).unsqueeze(-1)\n\n prediction_probabilities = F.softmax(prediction_scores, dim=1)\n\n logp = np.log2(prediction_probabilities)\n entropy = [None] + torch.sum(-prediction_probabilities * logp,\n dim=1).tolist()\n\n top1_probabilities, top1_probabilities_idx = prediction_probabilities.max(\n dim=1)\n predicted_tokens = args.tokenizer.convert_ids_to_tokens(\n top1_probabilities_idx)\n predicted_words = [\n args.tokenizer.convert_tokens_to_string(token)\n for token in predicted_tokens\n ]\n\n # top-1 probabilities\n top1_probabilities = [None] + top1_probabilities.tolist()\n # top-1 word\n top1_words = [None] + predicted_words\n # probability of correct word\n true_y_probability = [None] + prediction_probabilities.gather(\n 1, true_y).squeeze(-1).tolist()\n #TODO: probabilities of all words\n\n return top1_words, top1_probabilities, true_y_probability, entropy", "def analyze(self, names=None, anadir=None):\n if anadir is None:\n anadir = self.expdir\n\n # Check number of valid runs\n print(\"Experiment directory: \"+self.expdir)\n print(\"Total number of runs: {}\".format(len(self)))\n print(\"Number of successful runs: {}\".format(self.get_valid().sum()))\n\n\n # Check outputs\n # =============\n names = names or []\n names = names + [x.name for x in self.model.likelihood \n if x.name not in names]\n if not names:\n names = self.get_output_names()\n logging.info(\"Detected output variables: \"+\", \".join(names))\n\n\n # Write output variables\n # ======================\n if names:\n xoutput = self.get_output(names)\n else:\n xoutput = None\n\n if xoutput is not None:\n outputfile = os.path.join(anadir, \"output.txt\")\n logging.info(\"Write output variables to \"+outputfile)\n xoutput.write(outputfile)\n\n # Derive likelihoods\n # ==================\n xlogliks = self.get_logliks()\n file = os.path.join(anadir, 'logliks.txt')\n logging.info('write logliks to '+ file)\n xlogliks.write(file)\n\n # Sum-up and apply custom distribution\n # ====================================\n logliksum = xlogliks.values.sum(axis=1)\n file = os.path.join(anadir, \"loglik.txt\")\n logging.info('write loglik (total) to '+ file)\n np.savetxt(file, logliksum)\n\n # Add statistics\n # ==============\n valid = np.isfinite(logliksum)\n ii = [xoutput.names.index(c.name) for c in self.model.likelihood]\n output = xoutput.values[:, ii] # sort !\n pct = lambda p: np.percentile(output[valid], p, axis=0)\n\n names = [c.name for c in self.model.likelihood]\n\n #TODO: include parameters in the stats\n #for c in self.model.prior:\n # if c.name not in self.params.names:\n # raise ValueError('prior name not in params: '+c.name)\n\n res = [\n (\"obs\", [c.dist.mean() for c in self.model.likelihood]),\n (\"best\", output[np.argmax(logliksum)]),\n (\"mean\", output[valid].mean(axis=0)),\n (\"std\", output[valid].std(axis=0)),\n (\"min\", output[valid].min(axis=0)),\n (\"p05\", pct(5)),\n (\"med\", pct(50)),\n (\"p95\", pct(95)),\n (\"max\", output[valid].max(axis=0)),\n (\"valid_99%\", self.get_valids(0.99).values.sum(axis=0)),\n (\"valid_67%\", self.get_valids(0.67).values.sum(axis=0)),\n ]\n\n index = [nm for nm,arr in res if arr is not None]\n values = [arr for nm,arr in res if arr is not None]\n\n stats = str_dataframe(names, values, include_index=True, index=index)\n\n with open(os.path.join(anadir, 'stats.txt'), 'w') as f:\n f.write(stats)\n\n #import pandas as pd\n #df = pd.DataFrame(np.array(values), columns=names, index=index)\n\n #f.write(str(df))", "def analysis(N):\n\n http_log_paths = get_http_logs()\n\n httplogs = []\n\n for path in http_log_paths:\n file = path+'/http.log'\n if os.path.isfile(file):\n httplogs.append(file)\n else:\n pass #print(path)\n\n fields = []\n\n for log in httplogs:\n with open(log) as f:\n lines = f.readlines()\n rows = len(lines)\n filesize = sum([len(line) for line in lines])\n\n tss = [] # time series\n methods = []\n uris = []\n uas = []\n request_body_lens = []\n response_body_lens = []\n status_codes = []\n filenames = []\n\n tmp = []\n\n for line in lines[8:len(lines)-1]:\n fs = line.strip().split('\\t')\n\n \"\"\"\n ts = fileds[0]\n uid = fileds[1]\n orig_h = fileds[2]\n orig_p = fileds[3]\n resp_h = fileds[4]\n resp_p = fileds[5]\n trans_depth = fileds[6]\n method = fileds[7]\n host = fileds[8]\n uri = fileds[9]\n referrer = fileds[10]\n user_agent = fileds[11]\n request_body_len = fileds[12]\n response_body_len = fileds[13]\n status_code = fileds[14]\n status_msg = fileds[15]\n info_code = fileds[16]\n info_msg = fileds[17]\n filename = fileds[18]\n tags = fileds[19]\n username = fileds[20]\n password = fileds[21]\n proxied = fileds[22]\n orig_fuids = fileds[23]\n orig_mime_types = fileds[24]\n resp_fuids = fileds[25]\n resp_mime_types = fileds[26]\n\n tss.append(ts)\n methods.append(method)\n uris.append(uri)\n uas.append(user_agent)\n request_body_lens.append(request_body_len)\n response_body_lens.append(response_body_len)\n status_codes.append(status_code)\n filenames.append(filename)\n \"\"\"\n\n tmp.append(fs[N])\n\n #print(log, rows, ','.join(methods))\n\n # time intervals\n #tss_sorted = sorted(map(float,tmp))\n #tss_sorted = map(float, tmp)\n #intervals = map(int,[tss_sorted[i+1]-tss_sorted[i] for i in range(len(tss_sorted)-1)])\n #print('%s %s' % (log, ' '.join(map(str,intervals))))\n #file = urlparse(fs[N]).path.split('/')[-1].split('.')\n #if len(file)>1:\n # tmp.append(file[-1])\n #tmp.append(urlparse(fs[N]).path.split('/')[-1])\n #tmp.append(urlparse(fs[N]).path)\n\n #fields.append(set(tmp))\n #fields.append(intervals)\n fields.append(tmp)\n\n\n dic = {}\n for i in fields:\n for j in i:\n if j in dic:\n dic[j] += 1\n else:\n dic[j] = 1\n ls = sorted(dic.items(), lambda x,y: cmp(x[1], y[1]), reverse = True)\n for i in range(len(ls)):\n print('%s\\t%s' %(ls[i][0], ls[i][1]))\n #print('%s' % join(ls[i][1]))\n\n\n \"\"\"\n col = []\n for i in fields:\n for j in i:\n col.append(j)\n print('%s' % ' '.join(map(str,col)))\n \"\"\"\n\n\n \"\"\"\n dic = {}\n for i in fields:\n for j in i:\n sub = j.split('.')\n if sub[0] in dic:\n dic[sub[0]] += 1\n else:\n dic[sub[0]] = 1\n\n\n if len(sub) > 1:\n if sub[-2]+'.'+sub[-1] in dic:\n dic[sub[-2]+'.'+sub[-1]] += 1\n else:\n dic[sub[-2]+'.'+sub[-1]] = 1\n\n\n ls = sorted(dic.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)\n for i in range(len(ls)):\n print('%s\\t%s' % (ls[i][0], ls[i][1]))\n # print('%s' % join(ls[i][1]))\n\n \"\"\"", "def get_all_annotations(filename,log):\n results = {}\n log.info(\"Creating POS-tagged files and raw text files for '{}'\".format(filename))\n # call function to creat a POS file (into directory ./output/POS by default)\n convert(filename)\n\n # Do annotations in POS tagged files\n for filename in os.listdir(\"temp/\"):\n fileresults = {}\n\n # Do annotations in POS tagged files\n if filename.endswith(\"_pos.txt\"):\n posfile = \"temp/\" + filename\n\n log.info(\"Extracting features from file '{}'\".format(posfile))\n\n # Do extraction and annotation step for each feature\n # Results are dictionaries of lists that are merged with a helper function\n fileresults = merge_results(fileresults, get_annotation(posfile, evaluative_dict, [], \"evaluative\"))\n fileresults = merge_results(fileresults, get_annotation(posfile, reinforcement_dict, reinforcement_exclude_dict, \"reinforcement\"))\n fileresults = merge_results(fileresults, get_annotation(posfile, hedging_dict, hedging_exclude_dict, \"hedging\"))\n fileresults = merge_results(fileresults, get_annotation(posfile, quant_dict, quant_exclude_dict, \"quantifier\"))\n\n # Add connectors as provided by Iverina and Janina's lists\n fileresults = merge_results(fileresults, get_annotation(posfile, conn_argumentation_dict, [], \"conn.argumentation\"))\n fileresults = merge_results(fileresults, get_annotation(posfile, conn_concluding_dict, [], \"conn.concluding\"))\n fileresults = merge_results(fileresults, get_annotation(posfile, conn_exemplification_dict, [], \"conn.exemplification\"))\n fileresults = merge_results(fileresults, get_annotation(posfile, conn_listing_dict, [], \"conn.listing\"))\n fileresults = merge_results(fileresults, get_annotation(posfile, conn_paired_dict, [], \"conn.paired\"))\n\n os.remove(posfile)\n\n\n # Do annotations in raw text files without POS tags\n elif filename.endswith(\"_raw.txt\"):\n fulltxtfile = \"temp/\" + filename\n\n log.info(\"Extracting features from file '{}'\".format(fulltxtfile))\n\n # Add connectors based on PDTB-parser\n connectors = extract_connectors(fulltxtfile)\n fileresults['connectors'] = connectors\n\n os.remove(fulltxtfile)\n\n if fileresults:\n results[filename.rstrip(\".txt\")] = fileresults\n\n log.info(\"Finished annotations for file '{}'\".format(filename))\n #print(json.dumps(results, indent=True))\n return results", "def parse_log(path_to_log):\n\n re_correct_line = re.compile('^\\w+\\d+')\n re_iteration = re.compile('Iteration (\\d+)')\n # alexnet\n #re_output_loss = re.compile('output #\\d+: loss = ([\\.\\d]+)')\n #re_output_acc = re.compile('output #\\d+: accuracy = ([\\.\\d]+)')\n\n #googlenet\n re_output_loss = re.compile('output #\\d+: loss3\\/loss3 = ([\\.\\d]+)')\n re_output_acc = re.compile('output #\\d+: loss3\\/top-1 = ([\\.\\d]+)')\n\n re_lr = re.compile('lr = ([\\.\\d]+)')\n\n # Pick out lines of interest\n iteration = -1\n test_accuracy = -1\n learning_rate = float('NaN')\n acc = float('NaN')\n train_dict_list = []\n test_dict_list = []\n train_dict_names = ('NumIters', 'Loss', 'Accuracy', 'LearningRate', 'Seconds')\n test_dict_names = ('NumIters', 'Loss', 'Accuracy')\n\n is_test_group = False\n\n logfile_year = extract_seconds.get_log_created_year(path_to_log)\n with open(path_to_log) as f:\n start_time = extract_seconds.get_start_time(f, logfile_year)\n for line in f:\n if not re_correct_line.match(line):\n continue\n iteration_match = re_iteration.search(line)\n if iteration_match:\n iteration = int(iteration_match.group(1))\n if iteration == -1:\n # Only look for other stuff if we've found the first iteration\n continue\n\n time = extract_seconds.extract_datetime_from_line(line, logfile_year)\n seconds = (time - start_time).total_seconds()\n\n lr_match = re_lr.search(line)\n if lr_match:\n learning_rate = float(lr_match.group(1))\n output_acc_match = re_output_acc.search(line)\n if output_acc_match:\n acc = float(output_acc_match.group(1))\n\n output_loss_match = re_output_loss.search(line)\n if output_loss_match:\n if get_line_type(line) == 'test':\n test_loss = float(output_loss_match.group(1))\n test_dict_list.append({'NumIters': iteration,\n 'Loss': test_loss,\n 'Accuracy': acc})\n else:\n train_loss = float(output_loss_match.group(1))\n train_dict_list.append({'NumIters': iteration,\n 'Loss': train_loss,\n 'Accuracy': acc,\n 'LearningRate': learning_rate,\n 'Seconds': seconds})\n\n df_train = pd.DataFrame(columns=train_dict_names)\n df_test = pd.DataFrame(columns=test_dict_names)\n\n for col in train_dict_names:\n df_train[col] = [d[col] for d in train_dict_list]\n for col in test_dict_names:\n df_test[col] = [d[col] for d in test_dict_list]\n\n return df_train, df_test", "def check_up(year, batch_number):\n\tdm_name = \"Data_Matrices/\"+str(year) + \"_\" + str(batch_number) + \"_data_matrix.csv\"\n\tlf_name = \"Log_Files/\"+str(year) + \"_\" + str(batch_number) + \"_log_file.txt\"\n\n\tdm_lines = sum(1 for line in open(dm_name,'r'))\n\tlf_lines = sum(1 for line in open(lf_name,'r'))\n\tif dm_lines != lf_lines:\n\t\traise ValueError(\"DATA MATRIX {}_{} HAS DIFFERENT NUMBER OF LINES THAN LOG FILE\".format(year, batch_number))\n\n\twith open(dm_name,'r') as dm:\n\t\twith open(lf_name, 'r') as lf:\n\t\t\tdm_reader = csv.reader(dm)\n\t\t\tdm_numbers = [line[0] for line in dm_reader]\n\t\t\tlf_numbers = [line.split()[0] for line in lf]\n\tif dm_numbers != lf_numbers:\n\t\tprint(dm_numbers)\n\t\tprint(lf_numbers)\n\t\traise ValueError(\"DATA MATRIX {}_{} CONTAINS DIFFERENT DOCUMENTS THAN LOG FILE\".format(year, batch_number))\n\n\twith open(dm_name,'r') as dm:\n\t\tdm_reader = csv.reader(dm)\n\t\tfor line in dm_reader:\n\t\t\twords = line[1:]\n\t\t\tbreak\n\tfor word in words:\n\t\tif not word.isalpha():\n\t\t\traise ValueError(\"DATA MATRIX {}_{} CONTAINS NON-ALPHABETIC WORD\".format(year, batch_number))\n\n\twith open(lf_name,'r') as lf:\n\t\tline = next(lf)\n\t\tlf_words = sum([float(line.split(\" --- \")[2].split()[0]) for line in lf])\n\tif len(words) != lf_words:\n\t\traise ValueError(\"DATA MATRIX {}_{} AND LOG FILE COUNT DIFFERENT NUMBER OF WORDS\".format(year, batch_number))", "def train(self, documents):\n prior_log_prob, label_to_col = self.get_prior_log_probabilities(documents)\n self.my_model[\"vocabulary\"] = make_vocabulary(documents)\n\n # find frequencies of features\n num_classes = len(label_to_col)\n num_features = len(self.extract_f_vector(documents[0]))\n features_freq = np.zeros((num_features, num_classes))\n for doc in documents:\n f_vector = self.extract_f_vector(doc)\n col_for_f_vector = label_to_col[doc.label]\n features_freq[:, col_for_f_vector] += f_vector\n\n # laplace smoothing\n total_per_label = np.sum(features_freq, axis=0)\n features_freq += np.ones(total_per_label.shape, int)\n normalizer = total_per_label + np.full(total_per_label.shape, num_features, int)\n features_freq /= normalizer\n\n # stack all probabilities to one matrix and take log\n # result: self.all_log_prob\n # |-----------------------------------|\n # | log P(f1|C1) | ... | log P(f1|Cn) |\n # | log P(f2|C1) | ... | log P(f2|Cn) |\n # | . | . | . |\n # | . | . | . |\n # | . | . | . |\n # | log P(fm|C1) | ... | log P(fm|Cn) |\n # | log P(C1) | ... | log P(Cn) |\n # |-----------------------------------|\n likelihood_log_prob = np.log(features_freq)\n all_log_prob = np.vstack((likelihood_log_prob, prior_log_prob))\n self.my_model[\"all_log_prob\"] = all_log_prob", "def log_likelihood(self, params):\n # extract the parameters\n m1 = params['m1']\n m2 = params['m2']\n DL = params['DL']\n Tc = params['Tc']\n iota = params['iota']\n phic = params['phic']\n psi = params['psi']\n thetaS = params['thetaS']\n phiS = params['phiS']\n\n # calculate the model\n model = self._model(time, m1, m2, DL, Tc, iota, phic, psi, thetaS, phiS)\n\n# # normalisation\n# norm = -0.5*self._ndata*LN2PI - self._ndata*self._logsigma\n\n# # chi-squared\n# chisq = np.sum(((self._data - model)/(self._sigma))**2)\n\n return -np.vdot(self._data - model,self._data - model)", "def log_inference(tester, name, description):\r\n\tfor dataset, output in tester.preds.items():\r\n\t\tresults = pandas.DataFrame.from_dict(output)\r\n\t\tpath = os.path.join(\r\n\t\t\tEXPERIMENT_PATH, tester.config[\"name\"] + '-' + dataset)\r\n\t\twith open(path + \".csv\", \"w\") as f:\r\n\t\t\tresults.to_csv(f, sep=\"\\t\", encoding='utf-8', \r\n\t\t\t\tfloat_format='%.3f', index=False)\r\n\r\n\t\twith open(path + \"-predictions.csv\", \"w\") as f:\r\n\t\t\tresults[[\"tag\", \"y_hat\"]].to_csv(\r\n\t\t\t\tf, index=False, float_format='%.3f', header=False)", "def read_data_test_MLOG(self):\n self.na.set_query_timeout(10e3)\n self.na.set_format('mlog')\n fpts, mags = self.na.read_data()\n\n plt.figure()\n plt.plot(fpts, mags)\n plt.show()", "def test_marginal_likelihood(self):\n data = np.repeat([1, 0], [50, 50])\n marginals = []\n a_prior_0, b_prior_0 = 1.0, 1.0\n a_prior_1, b_prior_1 = 20.0, 20.0\n\n for alpha, beta in ((a_prior_0, b_prior_0), (a_prior_1, b_prior_1)):\n with pm.Model() as model:\n a = pm.Beta(\"a\", alpha, beta)\n y = pm.Bernoulli(\"y\", a, observed=data)\n trace = pm.sample_smc(2000, chains=2, return_inferencedata=False)\n # log_marginal_likelihood is found in the last value of each chain\n lml = np.mean([chain[-1] for chain in trace.report.log_marginal_likelihood])\n marginals.append(lml)\n\n # compare to the analytical result\n assert abs(np.exp(marginals[1] - marginals[0]) - 4.0) <= 1", "def ICA_log_likelihood(X, model):\n\n # TODO: YOUR CODE HERE", "def log_marginal(self):\n #\n # Predictive covariance of x is sum of covariance of phi a and covariance of x|a\n x_Sigma = self.phi @ self.phi.T + np.diag(self.sigma_n**2 * np.ones(self.M))\n #\n # Predictive mean is 0 by symmetry\n # so given that x is distributed as a MVN, the exact marginal is\n lp_exact = st.multivariate_normal.logpdf(self.x, cov=x_Sigma)\n #\n return lp_exact", "def _ConstructParseAndCheckLogfiles(self, inputfiles, graphs):\n parser = self._ConstructDefaultProcessor()\n for inputfile in inputfiles:\n self._ProcessLog(parser, inputfile)\n\n logs = json.loads(parser.GenerateGraphJson())\n for graph in graphs:\n self._CheckFileExistsWithData(logs, graph)\n\n return logs", "def extract_dl(driving_log_path):\r\n entries = []\r\n with open(driving_log_path) as csv_file:\r\n reader = csv.reader(csv_file)\r\n for entry in reader:\r\n entries.append(entry)\r\n empty_lists = [[] for i in range(7)]\r\n center_images, left_images, right_images, steerings, throttles, brakes, speeds = empty_lists\r\n for entry in entries:\r\n center_image_path, left_image_path, right_image_path = (entry[0], entry[1], entry[2])\r\n steering = float(entry[3])\r\n throttle = float(entry[4])\r\n brake = float(entry[5])\r\n speed = float(entry[6])\r\n center_image = cv2.imread(center_image_path)\r\n left_image = cv2.imread(left_image_path)\r\n right_image = cv2.imread(right_image_path)\r\n center_images.append(center_image)\r\n left_images.append(left_image)\r\n right_images.append(right_image)\r\n steerings.append(steering)\r\n throttles.append(throttle)\r\n brakes.append(brake)\r\n speeds.append(speed)\r\n return center_images, left_images, right_images, steerings, throttles, brakes, speeds", "def forward(self, motif_size, motif_log_embeddings):\n\n # D = 1 - exp(-sum_j(prod_i(d_ij)))\n # log(1-D) = -sum_j(exp(sum_i(log(d_ij))))\n x = lbann.MatMul(\n lbann.Constant(value=1, num_neurons=str_list([1, motif_size])),\n motif_log_embeddings,\n )\n x = lbann.Exp(x)\n x = lbann.Reduction(x, mode='sum')\n x = lbann.Negative(x)\n log_not_prob = x\n\n # Convert log-probability to linear space\n # Note: D=-expm1(x) is accurate when D~0. When D~1, prefer\n # 1-D=exp(x).\n prob = lbann.Negative(lbann.Expm1(log_not_prob))\n\n return prob, log_not_prob", "def main(filepath):\n path = Path(filepath)\n\n if not path.is_file():\n click.echo('reading_detector filepath_csv')\n return sys.exit(1)\n try:\n fil = open(path)\n results = app.run_ml_detection(fil)\n headers = ['Client', 'Month', 'Suspicious', 'Median']\n output_result = [[result.client_id, result.month.strftime('%Y-%m'),\n result.suspicious_reading, result.median]\n for result in results]\n click.echo('{0}'.format(tabulate(output_result, headers=headers, tablefmt=\"github\")))\n except IOError as err:\n logging.error(err, exc_info=True)\n click.echo(\"Input file is incorrect\")\n return sys.exit(1)", "def parse_log(self,filename,log_year):\n\n \n download_filename=os.sep.join([self.source_dir,filename])\n my_logger.debug(\"parsing log file: %s\" % download_filename)\n try:\n f = open(download_filename,mode='rt')\n except IOError:\n my_logger.debug( \"can't open file %s\" % download_filename)\n return\n\n #\n # return list of report objects\n L=[]\n\n #\n # parse & extract fields into new report object\n # parse to determine exact category\n # parse to determine geoscope\n state = STATE_INIT\n new_state = STATE_INIT\n current_crime_category=None\n line_index = 0\n previous_report_index=0\n for line in f:\n line_index=line_index+1\n #\n # state machine:\n # transition from init -> find_category \n # transition from find_category to find_report after finding first category\n\n if state==STATE_INIT:\n new_state = STATE_FIND_CATEGORY\n\n elif state==STATE_FIND_CATEGORY:\n #\n # find first instance of crime category heading\n match_crime_header = CATEGORY_REGEXP.search(line)\n match_report=REPORT_DATE_REGEXP.search(line)\n \n if match_crime_header and (match_report==None):\n #\n # found crime header\n my_logger.debug(\"========== TRANSITION TO FIND_REPORT\\n\")\n my_logger.debug('%d %s' % (line_index,line))\n new_state = STATE_FIND_REPORT\n\n #\n # remember where this category occurred\n category_line_index=line_index\n\n current_crime_category = self.extract_crime_category(match_crime_header)\n \n elif match_crime_header and match_report:\n #\n # error: both detectors triggered by this line\n my_logger.debug('match_crime_header and match_report triggered by (%s)' % line)\n raise ValueError\n elif (match_crime_header==None) and (match_report):\n #\n # error: found report line before first category\n my_logger.debug(\"found report prematurely in (%s)\\n\" % line)\n raise ValueError\n else:\n #\n # neither crime header nor crime report, so ignore it\n pass\n\n elif state==STATE_FIND_REPORT:\n my_logger.debug('%d %s' % (line_index,line[0:-1])) # -1 to avoid extra LF\n \n #\n # sanity check:\n # \"run\" of valid reports is too long\n if (category_line_index-line_index) > 20:\n my_logger.debug(\"run of reports too long: skipped category?\")\n raise ValueError\n\n match_crime_header = CATEGORY_REGEXP.search(line)\n match_report=REPORT_DATE_REGEXP.search(line)\n\n if match_crime_header and (match_report==None):\n #\n # came across new crime category\n current_crime_category = self.extract_crime_category(match_crime_header)\n new_state = STATE_FIND_REPORT\n\n category_line_index=line_index\n\n elif (match_crime_header==None) and match_report:\n #\n # found report\n new_state = STATE_FIND_REPORT\n\n report=self.parse_report_line(line)\n report['category']=current_crime_category\n report['line_num']=line_index\n report['date_year']=log_year\n L.append(report)\n\n #\n # sanity check\n # reports should be <= 2 lines apart\n if (line_index - max([category_line_index,previous_report_index])) > 2:\n my_logger.debug('WARNING: possible skipped report')\n my_logger.debug('current line: %d' % line_index)\n my_logger.debug('last report or category: %d' %\n max([category_line_index,previous_report_index]))\n\n # remember this line index\n previous_report_index=line_index\n\n else:\n #\n # neither regexp matched, so ignore it\n pass\n\n state=new_state\n\n f.close()\n return L", "def get_lda_data(self, genre):\n\n # Getting movie_genre_data\n movie_genre_data_frame = self.data_extractor.get_mlmovies_data()\n movie_genre_data_frame = self.split_genres(movie_genre_data_frame)\n\n # Getting actor_movie_data\n movie_actor_data_frame = self.data_extractor.get_movie_actor_data()\n\n genre_actor_frame = movie_genre_data_frame.merge(movie_actor_data_frame, how=\"left\", left_on=\"movieid\",\n right_on=\"movieid\")\n # genre_actor_frame = genre_actor_frame[genre_actor_frame['year'].notnull()].reset_index()\n genre_actor_frame = genre_actor_frame[[\"movieid\", \"year\", \"genre\", \"actorid\", \"actor_movie_rank\"]]\n\n genre_actor_frame[\"actorid_string\"] = pd.Series(\n [str(id) for id in genre_actor_frame.actorid],\n index=genre_actor_frame.index)\n\n genre_data_frame = genre_actor_frame[genre_actor_frame[\"genre\"]==genre]\n actor_df = genre_data_frame.groupby(['movieid'])['actorid_string'].apply(list).reset_index()\n actor_df = actor_df.sort_values('movieid')\n actor_df.to_csv('movie_actor_lda.csv', index=True, encoding='utf-8')\n\n actor_df = list(actor_df.iloc[:,1])\n\n (U, Vh) = util.LDA(actor_df, num_topics=4, num_features=1000)\n\n for latent in Vh:\n print (\"\\n\")\n print(latent)", "def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')", "def readMcmcLog(fname, par):\n\t# find params that varied\n\tfor line in open(fname):\n\t\tif line.startswith('##'):\n\t\t\tlabels = line.split()[1:]\n\t\t\tlabels = np.array(labels, dtype=str)\n\t\t\tbreak\n\t#read data and select the desired parameter\n\tdata = np.loadtxt(fname)\n\toi = data[:, (labels==par)]\n\tif len(oi) == 0:\n\t\tprint \"!!ERROR finding that parameter in log file!!\"\n\tprint \"Read %d points from %s\"%(len(oi),fname)\n\treturn oi", "def log_marginal_likelihood(self) -> tf.Tensor:\n X, Y = self.data\n Y = Y[..., :-1]\n K = self.kernel(X)\n ks = self._add_noise_cov(K)\n L = tf.linalg.cholesky(ks)\n m = self.mean_function(X)\n\n # [R,] log-likelihoods for each independent dimension of Y\n log_prob = gpflow.logdensities.multivariate_normal(Y, m, L)\n return tf.reduce_sum(log_prob)", "def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()" ]
[ "0.6709856", "0.6107463", "0.58700055", "0.58251566", "0.55823094", "0.5506087", "0.5464955", "0.54559237", "0.5388074", "0.537436", "0.5313697", "0.52944803", "0.52933943", "0.52919465", "0.5281008", "0.5272261", "0.52709246", "0.52665913", "0.52410084", "0.5238071", "0.5233608", "0.52313673", "0.52277553", "0.52239305", "0.52228427", "0.52124673", "0.521152", "0.51963985", "0.5186671", "0.5184965" ]
0.7288729
0
Ensure n_fft, n_per_seg and n_overlap make sense.
def _check_nfft(n, n_fft, n_per_seg, n_overlap): if n_per_seg is None and n_fft > n: raise ValueError( ( "If n_per_seg is None n_fft is not allowed to be > " "n_times. If you want zero-padding, you have to set " "n_per_seg to relevant length. Got n_fft of %d while" " signal length is %d." ) % (n_fft, n) ) n_per_seg = n_fft if n_per_seg is None or n_per_seg > n_fft else n_per_seg n_per_seg = n if n_per_seg > n else n_per_seg if n_overlap >= n_per_seg: raise ValueError( ( "n_overlap cannot be greater than n_per_seg (or " "n_fft). Got n_overlap of %d while n_per_seg is " "%d." ) % (n_overlap, n_per_seg) ) return n_fft, n_per_seg, n_overlap
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _autocheck_dimensions(self):\n # W dimensions check list\n assert len(self.W.shape) == 2, f\"W shape should be (N, N) but is {self.W.shape}.\"\n assert self.W.shape[0] == self.W.shape[1], f\"W shape should be (N, N) but is {self.W.shape}.\"\n\n # Win dimensions check list\n assert len(self.Win.shape) == 2, f\"Win shape should be (N, input) but is {self.Win.shape}.\"\n err = f\"Win shape should be ({self.W.shape[1]}, input) but is {self.Win.shape}.\"\n assert self.Win.shape[0] == self.W.shape[0], err\n\n # Wout dimensions check list\n assert len(self.Wout.shape) == 2, f\"Wout shape should be (output, nb_states) but is {self.Wout.shape}.\"\n nb_states = self.Win.shape[1] + self.W.shape[0] + 1 if self.use_raw_inp else self.W.shape[0] + 1\n err = f\"Wout shape should be (output, {nb_states}) but is {self.Wout.shape}.\"\n assert self.Wout.shape[1] == nb_states, err\n\n # Wfb dimensions check list\n if self.Wfb is not None:\n assert len(self.Wfb.shape) == 2, f\"Wfb shape should be (input, output) but is {self.Wfb.shape}.\"\n err = f\"Wfb shape should be ({self.Win.shape[0]}, {self.Wout.shape[0]}) but is {self.Wfb.shape}.\"\n assert (self.Win.shape[0],self.Wout.shape[0]) == self.Wfb.shape, err", "def _check_input_size(n_components, n_features):\n if n_components <= 0:\n raise ValueError(\n \"n_components must be strictly positive, got %d\" % n_components\n )\n if n_features <= 0:\n raise ValueError(\"n_features must be strictly positive, got %d\" % n_features)", "def check_consistent_length(arrays: Sequence[npt.ArrayLike]) -> None:\n lengths = [_num_samples(X) for X in arrays if X is not None]\n uniques = np.unique(lengths)\n if len(uniques) > 1:\n raise ValueError(\n \"Found input variables with inconsistent numbers of\" \" samples: %r\" % [int(length) for length in lengths]\n )", "def _check_validity(self):\n cnt = np.array([len(v) for v in self.t_signatures.values()])\n cnt_n = len(cnt) - self.min_bins\n idx = None\n if cnt_n < 0:\n self.valid = False\n else:\n y = [np.all(cnt[i:(i + self.min_bins)] >= self.min_neigh) for i in range(cnt_n)]\n if sum(y) <= 0:\n self.valid = False\n elif sum(y) == 1:\n self.valid = True\n idx = np.where(y)[0][0]\n else:\n # If many sequences are valid, select the one with the most letters\n self.valid = True\n w_list = [self.w_signatures[i] for i in range(len(self.w_signatures))]\n w = [sum(w_list[i:(i + self.min_bins)]) if y[i] else 0 for i in range(cnt_n)]\n idx = np.argmax(w)\n self._valid_idx = idx", "def check_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_overlaps - something went wrong... no data?\")\n pass", "def _check_sizes(self, space):\n my_dimension = self.get_total_dimension()\n other_dimension = space.get_total_dimension()\n if my_dimension != other_dimension:\n if isinstance(space, Conv2DSpace):\n if my_dimension * space.shape[0] !=\\\n other_dimension:\n raise ValueError(str(self)+\" with total dimension \" +\n str(my_dimension) +\n \" can't format a batch into \" +\n str(space) + \"because its total dimension\\\n is \" +\n str(other_dimension))", "def _validate_no_overlap(params, error_callback):\n dhcp_set = netaddr.IPSet(netaddr.IPRange(params['dhcp_start'],\n params['dhcp_end']))\n inspection_set = netaddr.IPSet(netaddr.IPRange(params['inspection_start'],\n params['inspection_end']))\n # If there is any intersection of the two sets then we have a problem\n if dhcp_set & inspection_set:\n message = ('Inspection DHCP range \"%s-%s\" overlaps provisioning '\n 'DHCP range \"%s-%s\".' %\n (params['inspection_start'], params['inspection_end'],\n params['dhcp_start'], params['dhcp_end']))\n error_callback(message)", "def _validate_dimensionality(self):\r\n\r\n if self.time.ndim != 1:\r\n raise ValueError(\"time array must be one-dimensional\")\r\n npoints = self.data.shape[-1]\r\n if npoints != len(self.time):\r\n raise ValueError(\"mismatch of time and data dimensions\")", "def can_overlap(self):\n return False", "def check_sim_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.sim_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.sim_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.sim_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.sim_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.sim_spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass", "def sanity_check_section(self):\n # Note: all addresses here are RVAs\n image_size = self.obj_parent.OptionalHeader.SizeOfImage\n if self.VirtualAddress > image_size:\n raise exceptions.SanityCheckException('VirtualAddress {0:08x} is past the end of image.'.format(self.VirtualAddress))\n if self.Misc.VirtualSize > image_size:\n raise exceptions.SanityCheckException('VirtualSize {0:08x} is larger than image size.'.format(self.Misc.VirtualSize))\n if self.SizeOfRawData > image_size:\n raise exceptions.SanityCheckException('SizeOfRawData {0:08x} is larger than image size.'.format(self.SizeOfRawData))", "def _verify_space(self) -> None:\n\n for dimension in self.space.values():\n\n if dimension.type != \"fidelity\" and dimension.prior_name not in [\n \"uniform\",\n \"reciprocal\",\n \"int_uniform\",\n \"int_reciprocal\",\n \"choices\",\n ]:\n raise ValueError(\n \"TPE now only supports uniform, loguniform, uniform discrete \"\n f\"and choices as prior: {dimension.prior_name}\"\n )\n\n shape = dimension.shape\n if shape and len(shape) != 1:\n raise ValueError(\"TPE now only supports 1D shape.\")", "def check_recon_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'recon_spec'):\n for i, spectrum in enumerate(self.recon_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.recon_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.recon_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.recon_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.recon_spec[spectrum]._add_to_overlapping_filters(filtername)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass", "def check_nfaces(sections):\n return _check_nentries(sections, \"NFACES\", \"FACES\")", "def check_consistency(self):\n assert len(self.shape) == len(self.qhape) == len(self.dirs)\n # Qnums must be unique within a qim and correspond one-to-one with\n # dimensions in dim.\n assert all(\n (\n len(dim) == len(qim) == len(set(qim))\n for dim, qim in zip(self.shape, self.qhape)\n )\n )\n assert all(d == 1 or d == -1 for d in self.dirs)\n assert all(q == self._qod_func(q) for q in sum(self.qhape, []))\n # Check that every sect has a valid key and the correct shape and\n # dtype.\n for k, v in self.sects.items():\n assert v.dtype == self.dtype\n assert self.is_valid_key(k)\n block_shp_real = v.shape\n qnum_inds = tuple(\n self.qhape[i].index(qnum) for i, qnum in enumerate(k)\n )\n block_shp_claimed = tuple(\n [self.shape[i][j] for i, j in enumerate(qnum_inds)]\n )\n assert block_shp_claimed == block_shp_real\n if self.invar and (self.charge != 0 or not self.isscalar()):\n assert self.defval == 0\n return True", "def verifyLengths( options, data ):\n types = [ 'maf', 'maf1e2', 'maf1e3', 'maf1e4',\n 'maf1e5', 'maf1e6', 'maf1e7', 'mafCpl1e2', \n 'mafCpl1e3', 'mafCpl1e4', 'mafCpl1e5', \n 'mafCpl1e6', 'mafCpl1e7', 'mafCtg1e2', \n 'mafCtg1e3', 'mafCtg1e4', 'mafCtg1e5', \n 'mafCtg1e6', 'mafCtg1e7', 'mafSpl1e2', \n 'mafSpl1e3', 'mafSpl1e4', 'mafSpl1e5', \n 'mafSpl1e6', 'mafSpl1e7', 'xAxis',\n 'mafCpEdgeCount', 'mafCpErrorCount', \n 'mafCpScafGapCount', 'blockEdgeCount' ]\n if len( data.chrNames ) != len( data.mafWigDict ): \n sys.stderr.write('the expected length of the data wig '\n 'dictionary is %d (i.e. number of chromosomes), but actual is %d\\n' \n % ( len( data.chrNames ), len( data.mafWigDict )))\n sys.exit( 1 )\n for c in data.chrNames:\n if len( types ) + 5 != len( data.mafWigDict[c] ): # extra 5 are from the *Max records\n sys.stderr.write('the expected length of the data wig '\n 'dictionary for %s is %d, but actual is %d\\n' \n % ( c, len( types ) + 5, len( data.mafWigDict[c] )))\n sys.stderr.write( '%s\\n' % str( data.mafWigDict[ c ].keys() ))\n sys.exit( 1 )\n sys.stderr.write('Verify number of records in data structure = %d, OK.\\n' % (len(types) + 4))\n for c in data.chrNames:\n for i in xrange(0, len( types ) - 1):\n if len( data.mafWigDict[c][ types[i] ] ) != len( data.mafWigDict[c][ types[i+1] ]):\n sys.stderr.write('the lengths of all vectors must the '\n 'same for a given chromosome. %s, %s (%d) != %s (%d)\\n' \n % ( c, types[i], len(data.mafWigDict[c][types[i]]), \n types[i+1], len(data.mafWigDict[c][types[i+1]]) ))\n sys.exit( 1 )\n sys.stderr.write('Verify length of records in data structure for chr %s are all %d, OK.\\n' \n % ( c, len(data.mafWigDict[c][ types[0] ])))\n sys.stderr.write('Verify lengths of arrays inside data structure, OK.\\n')", "def _check_inference(self, inference):\n if inference == 'GP2KronSum':\n assert self.n_randEffs == 2, 'VarianceDecomposition: for fast inference number of random effect terms must be == 2'\n assert not sp.isnan(self.Y).any(\n ), 'VarianceDecomposition: fast inference available only for complete phenotype designs'\n # TODO: add GP3KronSumLR, GP2KronSumLR", "def test_size_check(self):\n [x1, y1, s1, g1] = self.data.diffusion_data.shape\n [x2, y2, s2, g2] = module_05.run_module(self.data).diffusion_data.shape\n self.assertEqual(x1, x2)\n self.assertEqual(y1, y2)\n self.assertEqual(s1, s2)\n self.assertEqual(g1, g2)", "def detect_overlap_1d(first, first_length, second, second_length):\n first_end = first + first_length - 1\n second_end = second + second_length - 1\n return second_end >= first and first_end >= second", "def validate_output(self):\n if self.dimension == 2:\n required = SEGMENT_GEO_SIG | self.output_signature\n for rays in [\n self.active_rays,\n self.finished_rays,\n self.stopped_rays,\n self.dead_rays\n ]:\n if bool(rays):\n sig = set(rays.keys())\n if not (sig >= required):\n raise RuntimeError(\n f\"Optical engine failed output signature check. System \" \n f\"signature is {sig}, but needed {required}.\"\n )", "def do_grid_check(self,):\n self.ydim, self.xdim = self.data_fcst.shape \n if self.data_obs.shape != (self.ydim,self.xdim):\n raise FormatError(\"Obs and forecast data not same size.\")\n return", "def __check(self):\n if len(self._data)!=len(self._ptbins)+1: \n raise IndexError('Pt bins mismatch')\n for ptbin in self._data:\n if len(ptbin)!=len(self._etabins)+1:\n raise IndexError('Eta bins mismatch')", "def feat_overlap(f1, f2):\n f1start = int(f1[3])\n f1end = int(f1[4])\n f2start = int(f2[3])\n f2end = int(f2[4])\n\n if f1start <= f2end and f1end >= f2start:\n return True\n return False", "def validate_collision(self):\n pass", "def check_collisions(self):", "def _check_inputlengths(self):\n # Check x and y have more than 1 item, and x and y are equal length\n if not len(self.x) > 1:\n raise ValueError(\"Route input 'x' must contain more than 1 item\")\n\n if not (len(self.y) > 1):\n raise ValueError(\"Route input 'y' must contain more than 1 item\")\n\n if not (len(self.x) == len(self.y)):\n raise ValueError(\"Route inputs 'x' and 'y' must be of equal length\")\n\n # Performs checks on z if not empty\n if self.z is not None:\n for v in self.z.values():\n if not (len(v) == len(self.x)):\n raise ValueError(\"Route input 'z' must be of equal length to 'x' and 'y'\")", "def _check_flow_consistencity (sg_map, fr_sg):\n if isinstance(fr_sg, Flowrule):\n flowclass = NFFGToolBox._extract_flowclass(fr_sg.match.split(\";\"))\n else:\n flowclass = fr_sg.flowclass\n consistent = True\n if sg_map[fr_sg.id][2] != flowclass:\n consistent = False\n if (sg_map[fr_sg.id][3] is None or sg_map[fr_sg.id][3] == float(\"inf\")) != \\\n (fr_sg.bandwidth is None or fr_sg.bandwidth == float(\"inf\")):\n # If not both of them are None\n consistent = False\n elif (sg_map[fr_sg.id][3] is not None) and (fr_sg.bandwidth is not None):\n if consistent and math.fabs(sg_map[fr_sg.id][3] - fr_sg.bandwidth) > 1e-8:\n consistent = False\n if (sg_map[fr_sg.id][4] is None or sg_map[fr_sg.id][4] == 0.000000000) != \\\n (fr_sg.delay is None or fr_sg.delay == 0.0000000000):\n # If not both of them are None\n consistent = False\n elif (sg_map[fr_sg.id][4] is not None) and (fr_sg.delay is not None):\n if math.fabs(sg_map[fr_sg.id][4] - fr_sg.delay) > 1e-8:\n consistent = False\n if not consistent:\n raise RuntimeError(\"Not all data of a Flowrule equal to the other \"\n \"Flowrules of the sequence for the SGHop %s! Or the\"\n \" SGHop to be added differs in data from the existing\"\n \" SGHop!\" % fr_sg.id)", "def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])", "def check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits):\n def _check_pair(a, b):\n if a != b:\n raise ValueError(\"Shape mismatch: %s vs %s.\" % (a, b))\n if len(a) != 2 or len(b) != 2:\n raise ValueError(\"Rank: expected 2, got %s and %s\" % (len(a), len(b)))\n\n if (d_real is not None) and (d_fake is not None):\n _check_pair(d_real.shape.as_list(), d_fake.shape.as_list())\n if (d_real_logits is not None) and (d_fake_logits is not None):\n _check_pair(d_real_logits.shape.as_list(), d_fake_logits.shape.as_list())\n if (d_real is not None) and (d_real_logits is not None):\n _check_pair(d_real.shape.as_list(), d_real_logits.shape.as_list())", "def _validate_input_specification(\n self, holdem_ranges, board_cards, dead_cards):\n explicit_card_count = collections.defaultdict(int)\n for c in (board_cards + dead_cards) or ():\n explicit_card_count[c] += 1\n\n for her in holdem_ranges:\n if len(her.possible_hands) == 1:\n for c in her.possible_hands[0].cards:\n explicit_card_count[c] += 1\n\n multiple_specified_cards = []\n for card, count in explicit_card_count.iteritems():\n if count > 1:\n multiple_specified_cards.append(card)\n if multiple_specified_cards:\n raise Error('Cards specified multiple times: %s' % (\n ','.join('%s' % c for c in multiple_specified_cards)))" ]
[ "0.5919392", "0.5857666", "0.5699962", "0.5582914", "0.551394", "0.54537946", "0.5436112", "0.54184246", "0.5386672", "0.53664434", "0.53493524", "0.5337134", "0.5311439", "0.52793556", "0.5267394", "0.5254082", "0.5248522", "0.52285975", "0.5204103", "0.5197914", "0.51861525", "0.5184916", "0.5184527", "0.51769024", "0.5173406", "0.5171358", "0.5166511", "0.5137988", "0.5137512", "0.5116778" ]
0.79668355
0
compute dev or test accuracy on a certain task
def get_accuracy(model, task, batchmanager, test_set=False): model.eval() count, num = 0., 0 batchmanager = batchmanager if isinstance(batchmanager, BatchManager) else batchmanager.batchmanagers[task] iter = batchmanager.test_iter if test_set else batchmanager.dev_iter with torch.no_grad(): for batch in iter: data, targets = batch out = model(data, task) predicted = out.argmax(dim=1) count += (predicted == targets).sum().item() num += len(targets) model.train() return count / num
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(eval_ds, model, task):\n\n print('==========EVAL==========')\n # Testing contrastive accuracy\n if task['name'] == 'contrastive_accuracy':\n ds = eval_ds.map(data_utils.pretrain_preprocess)\n ds = ds.batch(128)\n test_contrast_acc = tf.keras.metrics.Accuracy(name='test_constrastive_accuracy')\n for x in ds:\n image = x['image']\n image = tf.transpose(image, [1, 0, 2, 3, 4])\n image = tf.reshape(\n image, \n (image.shape[0]*image.shape[1], image.shape[2], image.shape[3], image.shape[4])\n )\n out = model(image, mode='unsupervised', training=False)\n metrics.update_contrastive_accuracy2(test_contrast_acc, out, TEMP)\n print('test contrastive accuracy')\n print(test_contrast_acc.result())\n return \n\n # Testing classification accuracy \n ds = eval_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.eval_preprocess)\n ds = ds.batch(FLAGS.eval_bs)\n test_class_acc = tf.keras.metrics.Accuracy(name='test_class_accuracy')\n for x in ds:\n image = x['image']\n labels = x[task['name']]\n if task['name'] == 'extr':\n out = model(image, mode='eval', sup_layers=2, training=False)\n else:\n out = model(image, mode='eval', sup_layers=1, training=False)\n metrics.update_supervised_accuracy(test_class_acc, labels, out)\n \n if FLAGS.debug:\n print(tf.math.argmax(out, axis=-1))\n print('test classification accuracy')\n print(test_class_acc.result())", "def _compute_final_accuracies(self, meval):\n valid_accuracy = self.eval_child_model(meval, self.data_loader, 'val')\n if self.hparams.eval_test:\n test_accuracy = self.eval_child_model(meval, self.data_loader, 'test')\n else:\n test_accuracy = 0\n tf.logging.info('Test Accuracy: {}'.format(test_accuracy))\n return valid_accuracy, test_accuracy", "def accuracy(self):", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.episode.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.data.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def compute(self) -> None:\n \n self.model.eval()\n \n with torch.no_grad():\n for (input, target, _) in self.loader:\n\n # self.model = self.model.train(False) # TEST @lacoupe\n output, _ = self.model(input)\n \n output = (output >= 0.5)\n \n for out, tar in zip(output, target):\n \n tar = bool(tar)\n \n if out and tar:\n self.confusion['true_positive'] += 1\n elif not out and not tar:\n self.confusion['true_negative'] += 1\n elif out and not tar:\n self.confusion['false_positive'] += 1\n elif not out and tar:\n self.confusion['false_negative'] += 1\n \n self.accuracy = (self.confusion['true_positive'] + self.confusion['true_negative']) \\\n / sum(list(self.confusion.values()))\n \n if (self.confusion['true_positive'] + self.confusion['false_positive']) == 0.:\n self.precision = 0.\n else:\n self.precision = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_positive'])\n \n if (self.confusion['true_positive'] + self.confusion['false_negative']) == 0.:\n self.recall = 0.\n else:\n self.recall = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_negative'])\n \n if (self.precision + self.recall) == 0.:\n self.f1_score = 0.\n else:\n self.f1_score = 2 * self.precision * self.recall / (self.precision + self.recall)", "def eval_perf_total(model, X_train, y_train, X_test, y_test):\n\n y_hat_train = model.predict(X_train)\n y_hat_test = model.predict(X_test)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f' Train Mean Absolute Error: {train_mae:,.2f}')\n print(f' Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n print('\\n'+'---'*25+'\\n')\n\n test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n test_r = metrics.r2_score(y_test, y_hat_test)\n\n print('Evaluating Performance on Testing Data:\\n')\n print(f' Test Mean Absolute Error: {test_mae:,.2f}')\n print(f' Test Mean Squared Error: {test_mse:,.2f}\\n')\n print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n print(f'Test R-Square Value: {round(test_r,2)}')", "def test(self):\n self.load()\n bottleneck_features = np.load(self.feature_path)\n test = bottleneck_features['test']\n _, test_targets = load_dataset(self.image_path_test) \n predictions = [np.argmax(self.model.predict(np.expand_dims(feature, axis=0))) for feature in test]\n test_accuracy = 100*np.sum(np.array(predictions) == np.argmax(test_targets, axis=1))/len(predictions)\n print('{}, test accuracy: {:.4f}%'.format(self.name, test_accuracy))\n return test_accuracy", "def test(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n \r\n dataset.set_split('test')\r\n batch_generator = generate_nmt_batches(dataset, \r\n batch_size=len(dataset), \r\n device=args.device)\r\n\r\n acc_sum = 0.0\r\n model.eval()\r\n \r\n for batch_index, batch_dict in enumerate(batch_generator):\r\n # step 1. compute the output\r\n if isinstance(model,NMTModelWithMLTM):\r\n y_pred = model(batch_dict['x_source'], \r\n batch_dict['x_source_mltm_vector'],\r\n batch_dict['x_source_length'], \r\n batch_dict['x_target'])\r\n else:\r\n y_pred = model(batch_dict['x_source'], \r\n batch_dict['x_source_length'], \r\n batch_dict['x_target'])\r\n\r\n acc_t = compute_accuracy(y_pred, batch_dict['y_target'], self.mask_index)\r\n acc_sum += acc_t\r\n \r\n return acc_sum / (batch_index+1)", "def accuracy(output, target): # , topk=(1,)):\n correct = 0\n batch_size = target.size(0)\n for i in range(batch_size):\n tar = target[i].data.cpu().numpy()\n pred = output[i].data.cpu().numpy()\n if (tar) == np.argmax(pred):\n correct += 1\n return float(correct/batch_size)", "def getAccuracy(self):\n\t\tcorrect = (self.testing[self.classLabel]==self.bestLabel).sum()\n\t\tself.accuracy = (correct/float(len(self.testing))) * 100.0", "def check_test_accuracy(self):\n print('\\n# Evaluate on test data')\n results = self.model.evaluate(self.data.test_dataset)\n print('\\ntest loss, test acc:', results)", "def accuracy(gt, pred):\n \n return np.mean(gt == pred)", "def accuracy(self):\n total_predictions = self.tp + self.fp + self.tn + self.fn;\n return float(self.tp + self.tn) / total_predictions if total_predictions != 0 else 1", "def see_evaluation(epoch, training_acc, test_acc):\n print (\"Epoch \", epoch, \"Training acc: \", training_acc*100, \"Test acc: \", test_acc*100)", "def test(args, model, device, data, target):\n model.eval()\n test_loss = 0\n correct = 0\n data, target = data.to(device), target.to(device)\n output = model(data)\n # Final result will be average of averages of the same size\n test_loss += F.nll_loss(output, target, reduction='mean').item()\n ppe.reporting.report({'val/loss': test_loss})\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n ppe.reporting.report({'val/acc': correct / len(data)})", "def compute_accuracy(self):\n if not self.is_training:\n logits = self.test_logits\n labels = self.data.test_labels\n else:\n logits = self.train_logits\n labels = self.data.labels\n\n predictions = tf.cast(tf.argmax(logits, 1), tf.int32)\n correct = tf.equal(labels, predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def test_network(self):\n train_accuracy = 100 - percentError(map(self.neural_result,\n self.train_inputs),\n self.train_outputs)\n print 'Train accuracy:', train_accuracy\n\n test_accuracy = 100 - percentError(map(self.neural_result,\n self.test_inputs),\n self.test_outputs)\n print 'Test accuracy:', test_accuracy\n\n print '#' * int(train_accuracy), 'TR'\n print '#' * int(test_accuracy), 'TE'", "def accuracy_compute(predictions, labels):\n with tf.name_scope('test_accuracy'):\n accu = 100 * np.sum(np.argmax(predictions, 1) == labels) / predictions.shape[0]\n tf.summary.scalar('test_accuracy', accu)\n return accu", "def test(args, model, lossfn, device, data, target):\n model.eval()\n test_loss = 0\n correct = 0\n data, target = data.to(device), target.to(device)\n output = model(data)\n # Final result will be average of averages of the same size\n test_loss += lossfn(output, target).item()\n ppe.reporting.report({\"val/loss\": test_loss})\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n ppe.reporting.report({\"val/acc\": correct / len(data)})", "def eval_perf_train(model, X_train=None, y_train=None):\n\n # if X_train != None and y_train != None:\n\n y_hat_train = model.predict(X_train)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f'Train Mean Absolute Error: {train_mae:,.2f}')\n print(f'Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n # if X_test != None and y_test != None:\n\n # y_hat_test = model.predict(X_test)\n\n # test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n # test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n # test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n # test_r = metrics.r2_score(y_test, y_hat_test)\n\n # print('Evaluating Performance on Testing Data:\\n')\n # print(f'Test Mean Absolute Error: {test_mae:,.2f}')\n # print(f'Test Mean Squared Error: {test_mse:,.2f}\\n')\n # print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n # print(f'Test R-Square Value: {round(test_r,2)}')", "def accuracy(self):\n if not self.run:\n self._run()\n return self.model_acc", "def run_offenseval_task_a(training_data, test_data):\n #grid_search_svm(training_data, test_data)\n compare_classifiers(classifiers(), training_data, test_data, dev_stage=True)\n #compare_classifiers(classifiers(), training_data, test_data, dev_stage=False)", "def test_compute_metrics(self):\n with self.test_session() as sess:\n tf.set_random_seed(1234)\n dut = _setup_trainer(self.tmpdir)\n\n sess.run(tf.global_variables_initializer())\n sess.run((dut.train_iterator.initializer,\n dut.train_metric_reset_op))\n\n train_mloss = sess.run(dut.train_mean_loss)\n\n # Without update, it should be zero.\n self.assertEqual(train_mloss, 0.)\n\n sess.run((dut.train_op, dut.train_mean_loss_update_op))\n\n train_mloss = sess.run(dut.train_mean_loss)\n\n # After update.\n self.assertAlmostEqual(train_mloss, 5.2298584)", "def test_best_val(self, te_acc):\n self.test_val = te_acc", "def run_analyses(y_predict_train, y_train, y_predict, y_test):\n # calculate metrics\n _, training_error = output_error(y_predict_train, y_train)\n (precision, recall, f1, _), testing_error = output_error(y_predict, y_test)\n \n # print out metrics\n print 'Average Precision:', np.average(precision)\n print 'Average Recall:', np.average(recall)\n print 'Average F1:', np.average(f1)\n print 'Training Error:', training_error\n print 'Testing Error:', testing_error", "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n correct = 0\n for i in range(len(targets)):\n if(predictions[i] == targets[i]):\n correct += 1\n accuracy = correct/len(targets)\n #raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "def compute_metrics(self, train_data, test_data, criterion):\n m = self.metrics\n warnings.filterwarnings('ignore','Mean of empty slice')\n\n ## load data\n trn, trn_labs = train_data\n tst, tst_labs = test_data\n\n # trn = trn.transpose(1,0)\n tst = tst.transpose(1,0)\n\n t_final = -(np.flipud(trn!=self.padding).argmax(0)+1)\n test_tfinal = -(np.flipud(tst!=self.padding).argmax(0)+1)\n\n ntest = tst.size(1)\n P = self.decoder.out_features\n\n ## training data ###########################################################\n # hidden = self.init_hidden(trn.size(1))\n # out, hidden = self.transparent_forward(trn, hidden)\n # # output = out[t_final, np.arange(trn.size(1)), :]\n # output = out.squeeze()\n # # compute orthogonality\n # mem_act = np.array([np.cumsum(trn==p,axis=0).int().detach().numpy() % 2 \\\n # for p in range(self.q_)]).transpose((1,2,0))\n\n # ps_clf = LinearDecoder(self, 2**(self.q_-1), MeanClassifier)\n # ps = []\n # for d in Dichotomies(mem_act, 'simple'):\n # np.warnings.filterwarnings('ignore',message='invalid value encountered in')\n # ps_clf.fit(hidden.detach().numpy(), d)\n # new_ps = ps_clf.orthogonality()\n # ps.append(new_ps)\n # # if new_ps > ps:\n # # ps = new_ps\n # m['train_parallelism'] = np.append(m['train_parallelism'], np.array(ps).T, axis=0)\n\n # # print(mem_act.shape)\n # # print(hidden.shape)\n # # self.orth_clf.fit(hidden.detach().numpy(), mem_act)\n # # orth_score = self.orth_clf.orthogonality()\n # # m['train_orthogonality'] = np.append(m['train_orthogonality'], orth_score)\n\n ## test data ##############################################################\n hidden = self.init_hidden(tst.size(1))\n out, hidden = self.transparent_forward(tst, hidden)\n # output = out.squeeze()\n # print(hidden.shape)\n # print(out.shape)\n # print(test_tfinal)\n output = out[test_tfinal, np.arange(tst.size(1)), :]\n # raise Exception\n\n # compute loss\n test_loss = criterion(output.squeeze(0),tst_labs.squeeze())\n\n m['test_loss'] = np.append(m['test_loss'], test_loss.item())\n\n # compute orthogonality\n # mem_act = np.array([np.cumsum(tst==p,axis=0).int().detach().numpy() % 2 \\\n # for p in range(self.q_)]).transpose((1,2,0))\n\n # # self.orth_clf.fit(hidden.detach().numpy(), mem_act)\n # # orth_score = self.orth_clf.orthogonality()\n # # m['test_orthogonality'] = np.append(m['test_orthogonality'], orth_score)\n\n # # compute parallelism\n # ps_clf = LinearDecoder(self, 2**(self.q_-1), MeanClassifier)\n # ps = []\n # for d in Dichotomies(mem_act, 'simple'):\n # np.warnings.filterwarnings('ignore',message='invalid value encountered in')\n # ps_clf.fit(hidden.detach().numpy(), d)\n # new_ps = ps_clf.orthogonality()\n # ps.append(new_ps)\n # # if new_ps > ps:\n # # ps = new_ps\n # m['test_parallelism'] = np.append(m['test_parallelism'], np.array(ps).T, axis=0)\n\n ## package #################################################################\n self.metrics = m\n warnings.filterwarnings('default')", "def evaluate(sess, images_ph, labels_ph, softmax, mnist, config, task):\n\n print 'Evaluating on {} task ({}x{}, {} distractors) using {} glimpses (at {} scales)'.format(\n task, config.new_size, config.new_size, config.n_distractors,\n config.num_glimpses, config.n_patches)\n\n # Evaluation\n test_acc = []\n val_acc = []\n\n for k, dataset in enumerate([mnist.validation, mnist.test]):\n\n steps_per_epoch = dataset.num_examples // config.eval_batch_size\n correct_cnt = 0\n num_samples = steps_per_epoch * config.batch_size\n # loc_net.sampling = True\n\n for test_step in tqdm(xrange(steps_per_epoch)):\n\n images, labels = dataset.next_batch(config.batch_size)\n images = images.reshape((-1, config.original_size, config.original_size, 1))\n labels_bak = labels\n\n if task == 'translated':\n images = translate(images, width=config.new_size, height=config.new_size)\n elif task == 'cluttered':\n images = clutter(images,\n dataset.images.reshape((-1, config.original_size, config.original_size, 1)),\n width=config.new_size, height=config.new_size, n_patches=config.n_distractors\n )\n elif task == 'cluttered_var':\n images, _, _, _ = clutter_rnd(images,\n train_data=dataset.images.reshape(\n (-1, config.original_size, config.original_size, 1)),\n lim=config.distractor_range,\n color_digits=config.color_digits,\n color_noise=config.color_noise,\n width=config.new_size, height=config.new_size, norm=True)\n\n # else:\n # print 'original mnist data ({}x{}).'.format(config.original_size,config.original_size)\n\n # Duplicate M times (average prediction over M repeats)\n images = np.tile(images, [config.M, 1, 1, 1])\n labels = np.tile(labels, [config.M])\n\n softmax_val = sess.run(softmax,\n feed_dict={\n images_ph: images,\n labels_ph: labels\n })\n softmax_val = np.reshape(softmax_val,\n [config.M, -1, config.num_classes])\n softmax_val = np.mean(softmax_val, 0)\n\n pred_labels_val = np.argmax(softmax_val, 1)\n correct_cnt += np.sum(pred_labels_val == labels_bak)\n acc = correct_cnt / float(num_samples)\n\n if k == 0:\n print '\\nVal accuracy\\t{:4.4f} ({:4.4f} error)'.format(100 * acc, 100 - 100 * acc)\n val_acc = acc\n else:\n print 'Test accuracy\\t{:4.4f} ({:4.4f} error)\\n'.format(100 * acc, 100 - 100 * acc)\n test_acc = acc\n\n return test_acc, val_acc", "def Experiment1(train_x,train_y,test_x,test_y,task):\r\n lambda_r = np.array(np.arange(0,151,1))\r\n if(task=='1'):\r\n #Task1: Effects of regularization parameters\r\n Exp1_task1(lambda_r,train_x,train_y,test_x,test_y)\r\n if(task=='2'):\r\n #Task2: Effects of No.of examples\r\n Exp1_task2(lambda_r,train_x,train_y,test_x,test_y)" ]
[ "0.69914055", "0.6827569", "0.6770814", "0.6744679", "0.658327", "0.6576183", "0.6530896", "0.6528073", "0.64344484", "0.6419894", "0.6414318", "0.641199", "0.63928133", "0.63615847", "0.63476807", "0.634563", "0.6324", "0.63181156", "0.62964886", "0.6295964", "0.62817776", "0.6255736", "0.625323", "0.62432706", "0.62347543", "0.6232479", "0.61802685", "0.6176688", "0.6171418", "0.61695504" ]
0.7175868
0
Given Card or View layers, convert the grid layers to a string
def convert_layers_to_string(layers: list) -> str: string_conversion = "" for layer in layers: string_conversion += "\n" + "".join(layer) return string_conversion
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cat_layer_dict_to_str(layer: dict, rows_per_column: int) -> str:\n\n rpc_str = \"Infinity\" if np.isinf(rows_per_column) else str(rows_per_column)\n layer_str = [\n \"const \" + layer[\"name\"],\n \" = L.gridLayer.tiledMarkers(\",\n \"{ \",\n 'tileURL:\"' + layer[\"directory\"] + '\", ',\n \"radius: 10, \",\n 'color: \"' + layer[\"color\"] + '\", ',\n \"fillOpacity: 0.2, \",\n \"strokeOpacity: 1.0, \",\n f\"rowsPerColumn: {rpc_str}, \",\n f'catalogColumns: [{\",\".join(layer[\"columns\"])}], ',\n \"minZoom: \" + str(layer[\"min_zoom\"]) + \", \",\n \"maxZoom: \" + str(layer[\"max_zoom\"]) + \", \",\n \"maxNativeZoom: \" + str(layer[\"max_native_zoom\"]) + \" \",\n \"});\",\n ]\n\n return \"\".join(layer_str)", "def img_layer_dict_to_str(layer: dict) -> str:\n\n layer_str = [\n \"const \" + layer[\"name\"],\n ' = L.tileLayer(\"' + layer[\"directory\"] + '\"',\n \", { \",\n 'attribution:\"' + LAYER_ATTRIBUTION + '\", ',\n \"minZoom: \" + str(layer[\"min_zoom\"]) + \", \",\n \"maxZoom: \" + str(layer[\"max_zoom\"]) + \", \",\n \"maxNativeZoom: \" + str(layer[\"max_native_zoom\"]) + \" \",\n \"});\",\n ]\n\n return \"\".join(layer_str)", "def _get_grid_representations(self):\n\n representation = '-----------Loading Sequence----------------------------------------------------------------\\n'\n for row in self.grid:\n for col in row:\n if col == -1:\n representation += 'X\\t'\n elif col == 0:\n representation += '-\\t'\n else:\n representation += str(int(col)) + '\\t'\n representation += '\\n\\n'\n\n representation += '-----------VehicleType--------------------------------------------------------------------\\n'\n for row in self.grid_vehicle_type:\n for col in row:\n if col == -2:\n representation += 'X\\t'\n elif col == -1:\n representation += '-\\t'\n else:\n representation += str(int(col)) + '\\t'\n representation += '\\n\\n'\n\n representation += '-----------Destination--------------------------------------------------------------------\\n'\n for row in self.grid_destination:\n for col in row:\n if col == -1:\n representation += 'X\\t'\n elif col == 0:\n representation += '-\\t'\n else:\n representation += str(int(col)) + '\\t'\n representation += '\\n\\n'\n\n return representation", "def __str__(self):\n # replace with your code\n grid = '['\n for row in range(0,self._grid_height):\n grid += '['\n for col in range(0,self._grid_width):\n if col == self._grid_width - 1:\n grid += str(self.get_tile(row, col))\n else:\n grid += str(self.get_tile(row, col)) + ', '\n if row == self._grid_height - 1:\n grid += ']'\n else:\n grid += '], '\n \n grid += ']'\n return grid", "def __str__(self):\r\n grid_text = \"\\n-------------------\\n|\"\r\n for i in range(len(self.grid)):\r\n grid_text = grid_text + ' %s '%(self.grid[i][-1])\r\n\r\n if i%3 == 2:\r\n grid_text = grid_text + '|\\n-------------------\\n|'\r\n else:\r\n grid_text = grid_text + '|'\r\n return grid_text[0:len(grid_text)-1]", "def _to_string(board: Tuple[Tuple[Optional[int]]], width: int) -> str:\n display = \"\\n\"\n for i in range(width):\n for j in range(width):\n line = board[j][i * width:i * width + width]\n start = j * width ** 2 + i * width\n for k, space in enumerate(line):\n if space == 0:\n space = start + k\n else:\n space = (\"X\" if space == 1\n else \"O\" if space == -1\n else \"-\")\n display += \"{0:>4}\".format(space)\n display += \" \" * width\n display += \"\\n\"\n return display", "def __str__(self):\n return str(self._grid)", "def display_layers(layers, wide, tall):\n\n colours = {\n \"0\": \" \",\n \"1\": \" # \",\n }\n\n for row in range(tall):\n for col in range(wide):\n pixels = [layer[row][col] for layer in layers]\n line = next(colours[p] for p in pixels if p in colours)\n print(line, end=\"\")\n print()", "def grid_to_string(grid, player):\n single_string = \"\"\n row = 0\n column = 0\n # Loop through the grid and use the cells' display attributes\n # If the current position in the grid is the player, replace it with the\n # player's display ('A')\n while row < len(grid):\n while column < len(grid[0]):\n if player.row == row and player.col == column:\n single_string += player.display\n else:\n single_string += grid[row][column].display\n column += 1\n row += 1\n column = 0\n single_string += \"\\n\"\n\n # Add on the water bucket message with the proper plural phrasing\n buckets = player.num_water_buckets\n s = 's'\n if buckets == 1:\n s = ''\n single_string += f\"\\nYou have {buckets} water bucket{s}.\"\n return single_string", "def __str__(self):\n rep = \"\"\n for row in range(self._dim):\n for col in range(self._dim):\n rep += STRMAP[self._board[row][col]]\n if col == self._dim - 1:\n rep += \"\\n\"\n else:\n rep += \" | \"\n if row != self._dim - 1:\n rep += \"-\" * (4 * self._dim - 3)\n rep += \"\\n\"\n return rep", "def __str__(self):\r\n string_rep_of_grid=\"\"\r\n row=\"\"\r\n for dummy_j in range(self._height):\r\n for dummy_i in range(self._width):\r\n row=row+str(self._grid[dummy_j][dummy_i])+\" \" \r\n string_rep_of_grid=string_rep_of_grid+\"row number \"+str(dummy_j)+\": \"+row\r\n row=\"\"\r\n return string_rep_of_grid", "def __str__(self):\n string = \"\"\n for row in self.layout:\n for tile in row:\n string+= str(tile) + \" \"\n string+= \"\\n\"\n return string", "def surface_to_string( surface ):\n return pygame.image.tostring( surface, 'RGB' )", "def grid_to_str(grid):\n result = \"\"\n\n if len(grid) > 0 and len(grid[0]) > 0:\n for row in grid:\n row = [str(c) for c in row]\n result += \" \".join(row) + \"\\n\"\n\n result = result[:-1]\n return result", "def __str__(self):\r\n # replace with your code\r\n result = ''\r\n for row in range(0, self._grid_height):\r\n result += str(self._grid_tile[row]) + '\\n'\r\n return result", "def convertBoard(self):\n \n board = \"\"\n \n for m in self.squares:\n board += str(convertMarker(m)) + \" \"\n \n return board", "def __str__(self):\n str = '-' * (self.SIZE ** 2 + self.SIZE + 1) + '\\n'\n for row in self.boards:\n for i in range(self.SIZE):\n str += '|'\n for board in row:\n for square in board.export_grid()[i]:\n str += square.value\n str += '|'\n str += '\\n'\n str += '-' * (self.SIZE ** 2 + self.SIZE + 1) + '\\n'\n return str", "def __str__(self):\n grid_str = \"\"\n for i in range(len(self.grid)):\n for j in range(len(self.grid[i])):\n grid_str = grid_str + self.grid[i][j] + '\\t'\n grid_str = grid_str.strip('\\t')\n grid_str = grid_str + '\\n'\n return grid_str", "def __str__(self):\n grid_str = \"\"\n for row in range(self.grid_height):\n grid_str += str(self.grid[row])+'\\n'\n return grid_str", "def __str__(self):\n board_lists = [['_']*self.__width for rows in range(self.__height)]\n for car in self.__cars:\n car_coords = car.car_coordinates()\n for item in car_coords:\n if item == (3,7):\n pass\n else:\n board_lists[item[0]][item[1]] = car.get_name()\n board_str = '\\n'.join(' '.join(sub) for sub in board_lists)\n return board_str", "def __repr__(self):\n fmt_str = 'Cityscapes Split: %s\\n' % self.cs_split\n fmt_str += '----Number of images: %d\\n' % len(self.cs)\n fmt_str += 'COCO Split: %s\\n' % self.coco_split\n fmt_str += '----Number of images: %d\\n' % len(self.coco)\n return fmt_str.strip()", "def __str__(self):\n grid_str = \"[\"\n for row in range(self._height):\n grid_str += \" \" + str(self._grid[row]) + \"\\n\"\n grid_str = grid_str[0:1] + grid_str[2:]\n grid_str = grid_str[:-1]\n grid_str += \"]\"\n return grid_str", "def __str__(self):\r\n \r\n #return \"The 2048 board is \" + str(self._cells)\r\n string = \"\"\r\n for row in range(self._grid_height):\r\n for column in range(self._grid_width):\r\n if column == self._grid_width -1:\r\n string += str(self._cells[row][column]) + \"\\n\"\r\n else:\r\n string += str(self._cells[row][column]) +\", \"\r\n return \"The 2048 board is \"+ str(self._grid_height) + \"x\" + str(self._grid_width) + \" and contains: \" + \"\\n\" + string", "def __str__(self):\n grid_string = ''\n for row in range(self.grid_height):\n grid_string += str(self.grid[row]) + '\\n'\n return grid_string", "def __str__(self):\n # replace with your code\n board = \"\"\n for index in range(self.grid_height):\n board += \"[\"\n for inner_index in range(self.grid_width):\n board += str(self.board[index][inner_index]) + \" \"\n else:\n board += \"]\\n\"\n return board", "def __repr__(self):\r\n # Initialize the return string\r\n s = ''\r\n\r\n for row in range(self.height):\r\n # Print the index of the row\r\n s = s + str(row % 10) + ' |'\r\n\r\n for col in range(self.width):\r\n s += self.grid[row][col]\r\n s += '|'\r\n\r\n s += '\\n'\r\n s += '--' * (self.width + 1)\r\n s += '-'\r\n s += '\\n'\r\n \r\n s += ' '\r\n for i in range(self.width):\r\n s += ' ' + str(i % 10) \r\n \r\n return s", "def __repr__(self):\r\n out = \"\"\r\n for x in range(0, self.width):\r\n for y in range(0, self.height):\r\n out += str(\"{:.1f}\".format(self.tiles[x][y])).rjust(6,' ')\r\n out += \"\\n\"\r\n return out", "def boardToString(self, margins={}):\n b = self.board\n rg = range(b.size())\n left = ' '*margins.get('left', 0)\n s = '\\n'.join(\n [left + ' '.join([self.getCellStr(x, y) for x in rg]) for y in rg])\n return s", "def __str__(self):\n def align_column(grid):\n board = \"\"\n for i in range(self.n):\n board += str(grid[i]) + \"\\n\"\n return board.strip()\n return (\"===Current Stage===\\n\"\n \"{}\\n\"\n \"====Goal Board=====\\n\"\n \"{}\".format(align_column(self.from_grid),\n align_column(self.to_grid)))", "def __repr__(self):\r\n rep_tpl = (self.__board_size, self.__bombs_dict, self.__ships)\r\n return str(rep_tpl)" ]
[ "0.68149734", "0.6398493", "0.6249319", "0.60937405", "0.60690725", "0.58461916", "0.5807707", "0.5799735", "0.5788799", "0.574172", "0.5738138", "0.57306874", "0.5712747", "0.5700559", "0.56804615", "0.567307", "0.5638548", "0.5636529", "0.56099236", "0.55986625", "0.55969965", "0.5594274", "0.55900353", "0.5571504", "0.55603194", "0.55529416", "0.55364305", "0.5509117", "0.54780346", "0.5433358" ]
0.71344054
0
Parse a paper, save it to a local file and return the dblp key of the paper. Parses all properties as specified in `parse_properties.py`.
def get_paper(paper_container): paper_type = list(paper_container.keys())[0] dblp_paper = paper_container[paper_type] dblp_key = dblp_paper["@key"].replace("/", "_") paper = {"type": paper_type} for prop in props: # see if property exists and get the key key = prop.key if key not in dblp_paper: continue key_name = prop.key_name if prop.key_name else key # get the property value and save if non-empty prop_value = dblp_paper[key] if prop.parse: prop_value = prop.parse(prop_value) if prop_value and prop_value != "": paper[key_name] = prop_value # output paper with open(local.file_name("papers", dblp_key), "w") as out: json.dump(paper, out, ensure_ascii=False, indent=4) return dblp_key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_paper(self, dblpkey, db):\n NS = {'tei': 'http://www.tei-c.org/ns/1.0'}\n try:\n xml=self.get_grobid_xml(dblpkey)\n result= grobid_mapping.tei_to_dict(xml)\n #\n #try:\n mongo_set_dict=dict()\n #print(\"results: {}\".format(result))\n if 'abstract' in result:\n mongo_set_dict[\"content.abstract\"]=result[\"abstract\"]\n if 'notes' in result:\n mongo_set_dict[\"content.notes\"] = result[\"notes\"]\n if 'fulltext' in result:\n mongo_set_dict[\"content.fulltext\"] = result[\"fulltext\"]\n with open(cfg.folder_content_xml + dblpkey + \".txt\", 'w') as f:\n # f.write(result[\"fulltext\"])\n print(result[\"fulltext\"])\n if 'chapters' in result:\n mongo_set_dict[\"content.chapters\"] = result[\"chapters\"]\n\n mongoResult= db.publications.update_one(\n {'_id': dblpkey},\n {'$set': result}\n )\n # print(mongoResult)\n\n logging.info(\"Processed \"+dblpkey)\n except:\n logging.exception('Cannot process paper ' +dblpkey, exc_info=True)\n\n # pprint.pprint(result)\n # for ref in result['references']:\n # print(ref)\n # print(etree.tostring(result['fulltext'], pretty_print=True))", "def get_paper_by_id(paper_id):\n dblp_key = paper_id.replace(\"/\", \"_\")\n if local.paper_exists(dblp_key):\n return dblp_key\n\n print(\"getting information from dblp about paper {}\".format(paper_id))\n data = get(\"https://dblp.org/rec/\" + paper_id + \".xml\")[\"dblp\"]\n return get_paper(data)", "def parse(filepath):\n wos_list = []\n\n paper_start_key = 'PT'\n paper_end_key = 'ER'\n\n\n #\n line_list = []\n try:\n with open(filepath, 'r') as f:\n line_list = f.read().splitlines()\n except IOError: # File does not exist, or couldn't be read.\n raise IOError(\"File does not exist, or cannot be read.\")\n\n if len(line_list) is 0:\n raise IOError(\"Unable to read filepath or filepath is empty.\")\n # Convert the data in the file to a usable list of dictionaries.\n # Note: first two lines of file are not related to any paper therein.\n last_field_tag = paper_start_key # initialize to something.\n for line in line_list[2:]:\n\n field_tag = line[:2]\n\n if field_tag == ' ':\n pass\n\n if field_tag == paper_start_key:\n # Then prepare for next paper.\n wos_dict = _new_wos_dict()\n\n if field_tag == paper_end_key:\n # Then add paper to our list.\n wos_list.append(wos_dict)\n\n # Handle keys like AU,AF,CR that continue over many lines.\n if field_tag == ' ':\n field_tag = last_field_tag\n\n # Add value for the key to the wos_dict: only for the five tags.\n try:\n if field_tag in ['DE', 'DI', 'TI', 'SO', 'UT','PY']:\n wos_dict[field_tag] += ' ' + str(line[3:])\n # Rest all will just get passed\n else:\n pass\n\n except (KeyError, TypeError, UnboundLocalError):\n wos_dict[field_tag] = str(line[3:])\n\n last_field_tag = field_tag\n # End line loop.\n\n # Define keys that should be lists instead of default string.\n list_keys = ['DE']\n delims = {'DE': ';'}\n\n # And convert the data at those keys into lists.\n for wos_dict in wos_list:\n for key in list_keys:\n delim = delims[key]\n try:\n key_contents = wos_dict[key]\n if delim != '\\n':\n wos_dict[key] = key_contents.split(delim)\n else:\n wos_dict[key] = key_contents.splitlines()\n except KeyError:\n # One of the keys to be converted to a list didn't exist.\n pass\n except AttributeError:\n # Again a key didn't exist but it belonged to the wos\n # data_struct set of keys; can't split a None.\n pass\n\n return wos_list", "def process_paper(filename):\n\n # Start time\n start_time = time.time()\n\n # Read in the paper\n paper = useful_functions.read_in_paper(filename, sentences_as_lists=True)\n\n # Extract the gold summary\n gold = paper[\"HIGHLIGHTS\"]\n gold_string_list = [\" \".join(x) for x in gold]\n\n # Extract the title\n title = paper[\"MAIN-TITLE\"][0]\n title_string = \" \".join(title)\n\n # Extract the abstract\n abstract = paper[\"ABSTRACT\"]\n abstract_string_list = [\" \".join(x) for x in abstract]\n\n # Extract the keyphrases\n try:\n keyphrases = paper[\"KEYPHRASES\"][0]\n except IndexError:\n keyphrases = []\n\n # Turn the paper into a single string and calculate the bag of words score\n paper_string = \" \".join([\" \".join(x) for key, val in paper.iteritems() for x in val])\n bag_of_words = useful_functions.calculate_bag_of_words(paper_string)\n\n # Get the paper as a list of sentences, associating each sentence with its section name - will be used by oracle\n # to find best summary sentences.\n paper_sentences = [(\" \".join(x), key) for key, val in paper.iteritems() for x in val\n if key != \"ABSTRACT\"]\n\n # Create a list of sentences, their ROUGE-L scores with the Highlights and the section they occur in\n # (as a string)\n sents_scores_secs = []\n\n for sentence, section in paper_sentences:\n # For some reason the candidate sentence needs to be the only item in a list\n r_score = rouge.calc_score([sentence], gold_string_list)\n\n sents_scores_secs.append((sentence.split(\" \"), r_score, section))\n\n # Sort the sentences, scores and sections into descending order\n sents_scores_secs = sorted(sents_scores_secs, key=itemgetter(1), reverse=True)\n\n pos_sents_scores_secs = sents_scores_secs[:num_summary]\n neg_sents_scores_secs = sents_scores_secs[num_summary:]\n\n if len(neg_sents_scores_secs) < len(pos_sents_scores_secs):\n print(\"{}**** NOT A SUFFICIENT AMOUNT OF DATA IN PAPER {}, IGNORING PAPER ****{}\".format(\n Color.RED, filename, Color.END))\n return\n\n # Positive sentences\n positive_sents_secs_class = [(sent, sec, 1) for sent, _, sec in pos_sents_scores_secs]\n\n # Negative sentences\n\n # Take the sentences not used as positive and reverse it to have worst scores first then take an equal number\n neg_sents_scores_secs = [x for x in reversed(neg_sents_scores_secs)][:len(positive_sents_secs_class)]\n negative_sents_secs_class = [(sent, sec, 0) for sent, _, sec in neg_sents_scores_secs]\n\n # Don't create data from this paper if it's less than 40 sentences - i.e. there would be more positive than\n # negative data. The data needs to be balanced.\n #if len(positive_sents_secs_class) != len(negative_sents_secs_class):\n # print(\"{}**** NOT A SUFFICIENT AMOUNT OF DATA IN PAPER {}, IGNORING PAPER ****{}\".format(\n # Color.RED, filename, Color.END))\n # return\n\n # Concatenate the positive and negative sentences into a single data item and shuffle it\n data = positive_sents_secs_class + negative_sents_secs_class\n random.shuffle(data)\n\n # Average word vectors of each sentence and convert to list for JSON serialisation\n sentvecs_secs_class = [(useful_functions.sentence2vec(sent).tolist(), sec, y) for sent, sec, y in data]\n\n # Calculate features for each sentence\n features = [useful_functions.calculate_features(sent, bag_of_words, document_wordcount, keyphrases,\n abstract_string_list, title_string, sec)\n for sent, sec, y in data]\n\n # Calculate abstract vector\n abs_vector = useful_functions.abstract2vector(abstract_string_list).tolist()\n\n # Description of the data\n description_text = \"All text is of the form of a list of lists, where each sentence is a list of words. The\" \\\n \" sentences are of the form [(sentence (as a list of words), section in paper,\" \\\n \" classification)]. The sentence vectors are of a similar form, except the sentence text is\" \\\n \" replaced with the vector representation of the sentence. The features are of the form \" \\\n \"[(AbstractROUGE, TF-IDF, Document_TF-IDF, keyphrase_score, title_score, numeric_score,\" \\\n \" sentence_length, section)]. The dimensions of each sentence vector are [1x100]. The \" \\\n \"abstract vector is a single [1x100] vector also.\"\n\n # The data item that will be written for this paper\n data_item = {\n \"filename\": filename,\n \"gold\": gold,\n \"title\": paper[\"MAIN-TITLE\"],\n \"abstract\": abstract,\n \"abstract_vec\": abs_vector,\n \"sentences\": data,\n \"sentence_vecs\": sentvecs_secs_class,\n \"sentence_features\": features,\n \"description\": description_text\n }\n\n # Write the data out\n with open(TRAINING_DATA_WRITE_LOC + filename.strip(\".txt\") + \".json\", \"wb\") as f:\n json.dump(data_item, f)\n\n print(\"--> Finished processing {}, took {} seconds, data length: {}.\".format(\n filename, (time.time() - start_time), len(data)))", "def _paper_id(hit: DD) -> str:\n return hit[\"_source\"][\"paper_id\"]", "def dump_pa(self,paname):\n pafilemap = {'Photometry': 'lightcurve'}\n if paname in pafilemap:\n filetype = pafilemap[paname]\n else:\n raise IOError(\"PA name does not match any file type. Check PA name in config file.\") \n\n pafile = findfile(filetype, self.outdir)\n\n return pafile", "def read_pk(filename):\n with open(filename, 'rb') as fd:\n ret = pickle.load(fd)\n return ret", "def _get_pubkey_from_der_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_der_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def from_paper(self) -> str:\n raise NotImplementedError", "def getFilePath( source, paper, month, day, year ):\r\n attributes = { SOURCE : source,\r\n PAPER : paper,\r\n MONTH : month,\r\n DAY : day,\r\n YEAR : year \r\n }\r\n return \"\\\\\".join( [ settings.CLEAN_STORE ] + [ attributes[ key ] for key in STORE_ORDER ] )", "def _get_pe_key(self, pathstr):\n path = _path.Path.from_str(pathstr)\n return path.elems()[-1].key", "def rrpn_loader(rpn_pkl_file):\n\n pkl = load_object(rpn_pkl_file)\n proposals = {}\n for boxes, scores, id in zip(pkl['boxes'], pkl['scores'], pkl['ids']):\n proposals[id] = {'boxes':boxes, 'scores':scores}\n\n return proposals", "def get_pointmention_pk_for_point_text(fxfn, ptk2ptmk):\n print \"Get point pk for point texts\"\n ptext2pk = {}\n fd = gzip.open(fxfn)\n for idx, ll in enumerate(fd):\n if '\\\"ui.point\\\"' in ll:\n jso = js.loads(ll.strip().strip(\",\"))\n assert jso[\"fields\"][\"name\"].strip() not in ptext2pk\n ptext2pk[jso[\"fields\"][\"name\"].strip()] = \\\n {jso[\"pk\"]: ptk2ptmk[jso[\"pk\"]]}\n if idx and not idx % 10000:\n print \"- Done {} lines, {}\".format(idx, time.strftime(\"%H:%M:%S\",\n time.localtime()))\n return ptext2pk", "def load_key(fn, psw=None):\n if not fn:\n die(\"Need private key\")\n if psw:\n psw = as_bytes(psw)\n data = load_gpg_file(fn)\n key = load_pem_private_key(data, password=psw, backend=get_backend())\n return key", "def _get_pubkey_from_der_x509_certificate(filedata: bytes, backend: Any) -> Tuple[Any, Optional[int]]:\n try:\n cert = x509.load_der_x509_certificate(filedata, backend=backend)\n return cert.public_key(), _get_keyidv2_from_cert(cert)\n except Exception:\n return None, None", "def _build_paper(self,save_data=True,store_latex=False):\n try:\n # $ Set the Arxiv Object to ensure Proper extraction\n identity,paper = self.extract_meta_from_remote(self.paper_id)\n self.identity = identity\n\n if not dir_exists(self.paper_root_path):\n os.makedirs(self.paper_root_path)\n # $ Download the paper. \n downloaded_data = arxiv.download(paper,dirpath=self.paper_root_path,slugify=lambda paper: paper.get('id').split('/')[-1],prefer_source_tarfile=True)\n except Exception as e:\n raise ArxivAPIException(self.paper_id,str(e))\n # $ Extract Files in Folder.\n with tarfile.open(downloaded_data) as tar:\n tar.extractall(path=self.latex_root_path)\n\n # $ Remove the Tar File.\n if not store_latex:\n os.remove(downloaded_data)\n # $ Save the Metadata\n self._extract_info_from_latex()\n\n shutil.rmtree(self.latex_root_path) # remove Latex source data.\n # print(\"Extracted Latex Data\")\n if save_data:\n self.to_fs()", "def read_pf_to_dict(\n pfname: str, load_extra: bool = True, extra_pf_keyname: str = \"extra_pf_names\"\n) -> dict:\n result = stock.pfread(pfname).pf2dict()\n\n if load_extra:\n if extra_pf_keyname in result:\n for extra_pfname in result[extra_pf_keyname]:\n LOGGER.debug(\"Attempting to load extra pf %s\", extra_pfname)\n result[extra_pfname] = stock.pfread(extra_pfname).pf2dict()\n else:\n LOGGER.debug(\n \"extra_pf_keyname %s does not exist in pf %s, not loading extra pf files.\",\n extra_pf_keyname,\n pfname,\n )\n\n return result", "def _get_pubkey_from_der_public_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n return serialization.load_der_public_key(filedata, backend=backend), None\n except Exception:\n return None, None", "def parse_pdf_to_dict(\n pdf_path: str,\n fulltext: bool = True,\n soup: bool = True,\n as_list: bool = False,\n return_coordinates: bool = True,\n grobid_url: str = GROBID_URL,\n):\n parsed_article = parse_pdf(\n pdf_path,\n fulltext=fulltext,\n soup=soup,\n return_coordinates=return_coordinates,\n grobid_url=grobid_url,\n )\n article_dict = convert_article_soup_to_dict(parsed_article, as_list=as_list)\n return article_dict", "def get_autor_with_papers(author_id):\n print(\"getting information from dblp about author {}\".format(author_id))\n data = get(\"https://dblp.org/pid/\" + author_id + \".xml\")[\"dblpperson\"]\n author_id = author_id.replace(\"/\", \"_\")\n author = {\n \"name\": data[\"@name\"],\n \"papers\": [get_paper(paper_container) for paper_container in data[\"r\"]],\n }\n\n with open(local.file_name(\"authors\", author_id), \"w\") as out:\n json.dump(author, out, ensure_ascii=False, indent=4)", "def get_ploidy(ploidyfile) -> dict:\n print(ColorText('\\nLoading ploidy information ...').bold())\n # have user choose key to dict\n return choose_pool(pklload(ploidyfile))", "def parse_book_record(root) -> dict:\n\n doc = {\n \"abstract\": \"\",\n \"pmid\": \"\",\n \"title\": \"\",\n \"authors\": [],\n \"pub_date\": \"\",\n \"journal_iso_title\": \"\",\n \"journal_title\": \"\",\n \"doi\": \"\",\n \"compounds\": [],\n \"mesh\": [],\n }\n\n doc[\"pmid\"] = root.xpath(\".//PMID/text()\")[0]\n\n doc[\"title\"] = next(iter(root.xpath(\".//BookTitle/text()\")))\n\n doc[\"authors\"] = []\n for author in root.xpath(\".//Author\"):\n last_name = next(iter(author.xpath(\"LastName/text()\")), \"\")\n first_name = next(iter(author.xpath(\"ForeName/text()\")), \"\")\n initials = next(iter(author.xpath(\"Initials/text()\")), \"\")\n if not first_name and initials:\n first_name = initials\n doc[\"authors\"].append(f\"{last_name}, {first_name}\")\n\n pub_year = next(iter(root.xpath(\".//Book/PubDate/Year/text()\")), None)\n pub_mon = next(iter(root.xpath(\".//Book/PubDate/Month/text()\")), \"Jan\")\n pub_day = next(iter(root.xpath(\".//Book/PubDate/Day/text()\")), \"01\")\n medline_date = next(\n iter(root.xpath(\".//Journal/JournalIssue/PubDate/MedlineDate/text()\")), None\n )\n\n pub_date = process_pub_date(pub_year, pub_mon, pub_day, medline_date)\n\n doc[\"pub_date\"] = pub_date\n\n for abstracttext in root.xpath(\".//Abstract/AbstractText\"):\n abstext = node_text(abstracttext)\n\n label = abstracttext.get(\"Label\", None)\n if label:\n doc[\"abstract\"] += f\"{label}: {abstext}\\n\"\n else:\n doc[\"abstract\"] += f\"{abstext}\\n\"\n\n doc[\"abstract\"] = doc[\"abstract\"].rstrip()\n\n return doc", "def uploaded_paper_name(paper, filename):\n initial = 'uploads/'\n name = paper.name + '-paperbank' + '.pdf'\n\n return initial + name", "def reprkey(path):\n return ReprKey(\n path=storepath(path),\n hash=get_hash(path),\n tag=config_tag)", "def _get_pubkey_from_pem_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_pem_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def fetch_document(pid, filename):\n path = os.path.join(BASE_DOCUMENTS_PATH, pid, filename)\n f = open(path, 'rb')\n encoded_file_content = base64.b64encode(f.read())\n f.close()\n return {\n 'path': path,\n 'hash': compute_hash(path),\n 'size': os.path.getsize(path),\n 'base64_content': encoded_file_content\n }", "def view_paper():\n return dict(paper_id=request.args(0),\n topic_id=request.args(1))", "def read_from_pln(self, path):\n\n # Read the .pln file contents to a dictionary.\n pln_dict = read_pln_file(path)\n\n # Look for each attribute listed in self.attributes in the results\n # dictionary.\n for attr in self.attributes:\n\n # Get the corresponding ExoParameter object.\n current = getattr(self, attr)\n\n # Look for this attribute in the results dictionary and set\n # ExoParameter.value.\n key_str = attr\n try:\n current.value = pln_dict[key_str]\n del pln_dict[key_str]\n except KeyError:\n current.value = current.default\n\n # Look for reference and URL information in the results dictionary,\n # and use this to set ExoParameter.reference and ExoParameter.url.\n # Skip 'transit' since 'transitref' and 'transiturl', are separate\n # fields in the references section.\n if not attr == \"transit\":\n\n key_str = \"\".join([attr, \"ref\"])\n try:\n current.reference = pln_dict[key_str]\n del pln_dict[key_str]\n except KeyError:\n current.reference = None\n\n key_str = \"\".join([attr, \"url\"])\n try:\n current.url = pln_dict[key_str]\n del pln_dict[key_str]\n except KeyError:\n current.url = None\n\n # If this attribute can take uncertainty values, look for these in\n # the results dictionary, then set ExoParameter.uncertainty and\n # ExoParameter.uncertainty_upper.\n if current.uncertain_flag:\n\n key_str = \"\".join([\"u\", attr])\n try:\n current.uncertainty = pln_dict[key_str]\n del pln_dict[key_str]\n except KeyError:\n current.uncertainty = None\n\n key_str = \"\".join([\"u\", attr, \"d\"])\n try:\n current.uncertainty_upper = pln_dict[key_str]\n del pln_dict[key_str]\n except KeyError:\n current.uncertainty_upper = None\n\n # If there are still keyword / value pairs in pln_dict, these fields\n # are not in the self.attributes list, which is built from\n # self.template_file.\n \"\"\"\n if len(pln_dict.keys()) > 0:\n print(\"{0} contains unknown .pln fields: {1}\".format(\n path, pln_dict.keys()))\n print(\"Add fields to {0} to include.\".format(self.template_file))\n \"\"\"\n\n # Trigger uncertainty calculations.\n self._populate_uncertainties()", "def _load_psk(self, psk_file):\n with open(psk_file, 'rb') as f:\n return f.read().rstrip()", "def extract(key, path_pdf):\n\n path_tmp_pdf = extract_first_page(path_pdf)\n\n # extract all text from first page\n raw_text = extract_text(path_tmp_pdf)\n\n # extract abstract from whole page and replace hyphens etc.\n abstract = extract_abstract(raw_text)\n\n # something went wrong when abstract is longer than 1500 chars\n if len(abstract) > MAX_LEN:\n print('{}: Abstract is too long.'.format(path_pdf))\n\n if not abstract:\n print('{}: Could not extract abstract.'.format(path_pdf))\n\n # clean up temp file\n os.unlink(path_tmp_pdf)\n\n # TODO: Fix this return object\n out = {'@key': key, 'abstract': abstract}\n\n return out" ]
[ "0.5995921", "0.58947307", "0.5745195", "0.5690165", "0.5009332", "0.49415568", "0.4937688", "0.49098766", "0.48259768", "0.48054695", "0.48006597", "0.47413546", "0.47379598", "0.47188556", "0.47013918", "0.46845803", "0.46781558", "0.46623516", "0.46493664", "0.46405235", "0.4626739", "0.46055946", "0.4598053", "0.4568594", "0.45393723", "0.45378134", "0.451342", "0.44956672", "0.4462712", "0.44619405" ]
0.69308305
0
Parse the author with given id and all their papers and store them in the local database.
def get_autor_with_papers(author_id): print("getting information from dblp about author {}".format(author_id)) data = get("https://dblp.org/pid/" + author_id + ".xml")["dblpperson"] author_id = author_id.replace("/", "_") author = { "name": data["@name"], "papers": [get_paper(paper_container) for paper_container in data["r"]], } with open(local.file_name("authors", author_id), "w") as out: json.dump(author, out, ensure_ascii=False, indent=4)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scrape_author(self, author_name, min_len=0, max_len=9999):\n search = sc.search_author(author_name)\n author = next(search)\n sc.fill(author, sections=['publications'])\n print(author.keys())\n with open(\n 'loadings\\\\authors_papers\\\\{}.txt'.format(author_name),\n 'w',\n encoding='utf-8'\n ) as file:\n for counter, pubblication in enumerate(author['publications']):\n\n if len(pubblication['bib']['title']) < min_len \\\n or len(pubblication['bib']['title']) > max_len:\n continue\n file.write(pubblication['bib']['title'])\n file.write('\\n')\n counter += 1\n if counter > self.hard_limit:\n break", "def get_papers_info(author_url, existing_papers):\n\n\tauthor_dict = {}\n\n\tauthor_page_tree = get_tree(author_url)\n\t# Get the <a> elements for the papers on the author's page\n\ta_elems = get_a_elems_for_papers(author_page_tree)\n\t# get the dates corresponding to each paper\n\tpaper_dates = get_dates_for_papers(author_page_tree)\n\t# zip into a list of (a_elem, date) pairs\n\ta_elem_dates = zip(a_elems, paper_dates)\n\t# Each a is a paper\n\tfor a, date in a_elem_dates:\n\t\t# Title of paper is the text content of the a element\n\t\tpaper_title = a.text_content()\n\t\t# Check if paper has already been checked, if so, move on to next paper\n\t\tif paper_title in existing_papers:\n\t\t\tcontinue\n\n\t\tpaper_url = a.get(\"href\")\n\n\t\tpaper_tree = get_tree(paper_url)\n\t\t# Get list of the paper's authors\n\t\tauthors = get_paper_authors(paper_tree)\n\t\t# Get paper abstract\n\t\tabstract = get_paper_abstract(paper_tree)\n\t\t# Get paper keywords\n\t\tkeywords = get_paper_keywords(paper_tree)\n\t\t# Get paper id number from its url\n\t\tpaper_id = re.search(\"[0-9]+\", paper_url).group()\n\t\t# Add paper to dictionary with id as key and metadata as values\n\t\tauthor_dict[paper_id] = {\n\t\t\t\t\t\t\"title\": paper_title,\n\t\t\t\t\t\t\"authors\": authors,\n\t\t\t\t\t\t\"abstract\": abstract,\n\t\t\t\t\t\t\"url\": paper_url,\n\t\t\t\t\t\t\"keywords\": keywords,\n\t\t\t\t\t\t'year': date\n\t\t} \n\n\treturn author_dict", "def prepare_pubs(path_name):\n add_pubs = read_csv(path_name)\n for key, row in add_pubs.items():\n row['author_uris'] = set([])\n ids = row['ufid'].split(';')\n print \"ids=\", ids\n for id in ids:\n print \"Processing id=\", id\n if id[0] in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:\n author_uri = find_vivo_uri('ufVivo:ufid', id)\n if author_uri is None:\n print >>exc_file, id, \"UFID not found in VIVO\"\n continue\n else:\n row['author_uris'].add(author_uri)\n elif id[0] == 'h':\n row['author_uris'].add(id)\n else:\n print >>exc_file, row['ufid'], \"Unknown identifier in UFID\"\n print id, row\n add_pubs[key] = row\n return add_pubs", "def get_pubmed(pmid, author_uris = None):\n ardf = \"\"\n record = get_entrez_record(pmid)\n if record is None:\n return [\"\", None]\n pub = document_from_pubmed(record)\n if pub['page_end'] == '':\n pub['page_end'] = pub['page_start']\n if pub['date']['month'] == '':\n pub['date']['month'] = '1'\n if pub['date']['day'] == '':\n pub['date']['day'] = '1'\n pub['pub_uri'] = get_vivo_uri()\n pub['date_harvested'] = str(datetime.now())\n pub['harvested_by'] = \"Python PubMed Add \"+__version__\n journal_uri = find_vivo_uri(\"bibo:issn\", pub['issn'])\n if journal_uri is None:\n [add, journal_uri] = make_journal_rdf(pub['journal'], pub['issn'])\n ardf = ardf + add\n pub['journal_uri'] = journal_uri\n\n pub_date = datetime.strptime(pub['date']['month']+'/'+pub['date']['day']+\\\n '/'+pub['date']['year'], \"%m/%d/%Y\")\n if pub_date in date_dictionary:\n pub['date_uri'] = date_dictionary[pub_date]\n else:\n [add, pub_date_uri] = make_datetime_rdf(pub_date.isoformat())\n date_dictionary[pub_date] = pub_date_uri\n pub['date_uri'] = pub_date_uri\n ardf = ardf + add\n \n# Turn each author into a URI reference to an authorship\n\n pub['authorship_uris'] = []\n for key, author in sorted(pub['authors'].items(),key=lambda x:x[0]):\n try:\n author_uri_set = find_author(author)\n except:\n print \"No last name for author\", author\n print \"Pub\\n\", pub\n print \"Record\\n\", record\n continue\n if len(author_uri_set) == 0:\n [add, author_uri] = make_author_rdf(author)\n ardf = ardf + add\n print pmid, \"Add\", author, \"at\", author_uri\n elif len(author_uri_set) == 1:\n author_uri = list(author_uri_set)[0]\n print pmid, \"Found\", author, author_uri\n else:\n if author_uris is None:\n author_uri = list(author_uri_set)[0]\n print pmid, \"Disambiguate\", author, \"from\", author_uri_set\n else:\n possible_uri_set = author_uri_set.intersection(author_uris)\n if len(possible_uri_set) == 1:\n author_uri = list(possible_uri_set)[0]\n else:\n author_uri = list(possible_uri_set)[0]\n print pmid, \"Disambiguate\", author, \"from\", possible_uri_set\n print \"Disambiguate:\"\n print \" Possible authors in VIVO\", author_uri_set\n print \" Possible authors in Source\", author_uris\n print \" Selected author\", author_uri\n \n [add, authorship_uri] = make_authorship_rdf(pub['pub_uri'], author_uri,\n key, corresponding=False)\n pub['authorship_uris'].append(authorship_uri)\n ardf = ardf + add\n \n return [ardf, pub]", "def getAuthorByID(id: int) -> Author:\n if not id:\n abort(400)\n author = Author.query.get(id)\n if not author:\n abort(404, \"Author is not found\")\n return author.serialize()", "def saveData(self):\r\n newPaper = self.getData()\r\n\r\n cur = self.dbConn.execute(\"SELECT PaperID FROM Papers WHERE Title = ?\",[self.title.getVal()])\r\n res = cur.fetchone()\r\n if not res==None:\r\n self.dbConn.execute(\"DELETE FROM CoAuthors WHERE Paper = ?\",[res[\"PaperID\"]])\r\n self.dbConn.execute(\"DELETE FROM Papers WHERE Title = ?\",[self.title.getVal()])\r\n\r\n sqlStr = str.format(\"INSERT into {} ({}) VALUES ({})\",self.dataTable,\",\".join(self.dataCols),\"?\"+\",?\"*(len(self.dataCols)-1))\r\n newCur = self.dbConn.execute(sqlStr,[newPaper.symposium, newPaper.title,newPaper.primeAuthor,newPaper.correspond])\r\n\r\n newRow = newCur.lastrowid\r\n if len(newPaper.coauthors)>0:\r\n for coauth in newPaper.coauthors:\r\n sqlStr = str.format(\"INSERT into {} ({}) VALUES ({})\",self.coauthorTable,\",\".join(self.coauthorCols),\"?\"+\",?\"*(len(self.coauthorCols)-1))\r\n newCur = self.dbConn.execute(sqlStr,[newRow,coauth])\r\n\r\n self.dbConn.commit()\r\n self.clearData()", "def _parse_mercurial_author(data, id_gen):\n angled = ur'(?P<author>.+?) <(?P<email>.+?)>'\n paren = ur'(?P<email>.+?) \\((?P<author>.+?)\\)'\n simple = ur'(?P<author>[^,]+)'\n author_list = []\n for regex in (angled, paren, simple):\n # Watch out for commas separating multiple names.\n regex += u'(,\\s*)?'\n for match in re.finditer(regex, data):\n # Watch out for suffixes like 'Jr.' when they are comma-separated\n # from the name and thus cause issues when *all* names are only\n # separated by commas.\n match_dict = match.groupdict()\n author = match_dict['author']\n if not author.partition(' ')[1] and author.endswith('.'):\n prev_author = author_list.pop()\n author = ', '.join([prev_author, author])\n if u'email' not in match_dict:\n email = ''\n else:\n email = match_dict['email']\n author_list.append((author, email))\n else:\n # If authors were found then stop searching as only expect one\n # style of author citation.\n if author_list:\n break\n author = Author(author_list[0])\n user = author.first_last\n email = author.email\n uid = id_gen[user]\n return (uid, user, email)", "def __init__(self, author_id, refresh=False):\n # Load json\n self._id = str(int(str(author_id).split('-')[-1]))\n Retrieval.__init__(self, self._id, 'AuthorRetrieval', refresh)\n self._json = self._json['author-retrieval-response']\n # Checks\n try:\n self._json = self._json[0]\n except KeyError: # Incomplete forward\n alias_json = self._json['alias']['prism:url']\n if not isinstance(alias_json, list):\n alias_json = [alias_json]\n alias = ', '.join([d['$'].split(':')[-1] for d in alias_json])\n text = 'Author profile with ID {} has been merged and the main '\\\n 'profile is now one of {}. Please update your records '\\\n 'manually. Functionality of this object is '\\\n 'reduced.'.format(author_id, alias)\n warn(text, UserWarning)", "def process_paper(self, dblpkey, db):\n NS = {'tei': 'http://www.tei-c.org/ns/1.0'}\n try:\n xml=self.get_grobid_xml(dblpkey)\n result= grobid_mapping.tei_to_dict(xml)\n #\n #try:\n mongo_set_dict=dict()\n #print(\"results: {}\".format(result))\n if 'abstract' in result:\n mongo_set_dict[\"content.abstract\"]=result[\"abstract\"]\n if 'notes' in result:\n mongo_set_dict[\"content.notes\"] = result[\"notes\"]\n if 'fulltext' in result:\n mongo_set_dict[\"content.fulltext\"] = result[\"fulltext\"]\n with open(cfg.folder_content_xml + dblpkey + \".txt\", 'w') as f:\n # f.write(result[\"fulltext\"])\n print(result[\"fulltext\"])\n if 'chapters' in result:\n mongo_set_dict[\"content.chapters\"] = result[\"chapters\"]\n\n mongoResult= db.publications.update_one(\n {'_id': dblpkey},\n {'$set': result}\n )\n # print(mongoResult)\n\n logging.info(\"Processed \"+dblpkey)\n except:\n logging.exception('Cannot process paper ' +dblpkey, exc_info=True)\n\n # pprint.pprint(result)\n # for ref in result['references']:\n # print(ref)\n # print(etree.tostring(result['fulltext'], pretty_print=True))", "def get_author_data(authors):\n\n try:\n author = authors.author.name.cdata.encode(\"utf8\")\n author_id = int(authors.author.id.cdata.encode(\"utf8\"))\n except: # FIXME: running into errors when book has multiple authors\n author = authors.author[0].cdata.encode(\"utf8\")\n author_id = authors.author[0].cdata.encode(\"utf8\")\n\n return (author, author_id)", "def author_id(self, author_id):\n\n self._author_id = author_id", "def to_internal_value(self, data):\n authors = []\n for author in data:\n path = urlparse(author).path\n resolved_func, __, resolved_kwargs = resolve(path)\n person = resolved_func.cls.queryset.get(pk=resolved_kwargs['pk'])\n authors.append(person)\n\n return {'authors': authors}", "def extra_bibparse(db):\n for key,entry in db.entries.items():\n for auth in entry.persons[\"author\"]:\n if (\"Harrison\" not in auth.first_names or\n \"Chapman\" not in auth.last_names):\n entry.add_person(auth, \"otherauthor\")", "def find_by_id(cls, author_id):\n try:\n author = cls.db.newsdb.find_one({'_id': ObjectId(str(author_id))})\n if author is not None:\n author = AuthorModel(author['name'], str(author['_id']))\n except InvalidId:\n author = None\n\n return author", "def get_author_by_id(self, id_num):\n\n cur = self.conn.cursor()\n query = 'SELECT author_id , name FROM author WHERE author_id = ?'\n cur.execute(query, (id_num,))\n return row_to_dict_or_false(cur)", "def get_author_data():\n entry = mongo.db.Authors\n output = list()\n look_up_type = None\n if 'name' in request.args:\n look_up_type = 'name'\n print(request.args)\n if len(request.args['name']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['name'].strip('\"')\n name = entry.find({'name': {'$regex': value}})\n if name:\n for author in name:\n output.append({'name': author['name']})\n elif 'booktitle' in request.args:\n look_up_type = 'related_books'\n if len(request.args['booktitle']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['booktitle'].strip('\"')\n related_books = entry.find(\n {'author_books': {'$regex': value}})\n if related_books:\n for related in related_books:\n for title in related['author_books']:\n if value in title:\n output.append(({'related_books': title}))\n if len(output) == 0:\n return render_template('error.html', message=\"No Entries Found\"), 400\n return render_template('gottenAuthors.html', output=output, look_up_type=look_up_type), 200", "def update_by_id(cls, id, name, author_id):\n\t\tbook = Book.query.get(id)\n\t\tbook.name = name\n\t\tbook.authors_id = author_id\n\t\tdb.session.commit()", "def init_books(author_file, json_file):\n with open(author_file) as f:\n authors = list(f)\n\n authors = [i.strip() for i in authors]\n\n books = []\n for author in authors:\n s = get_etexts('author', author)\n for i in s:\n try:\n if list(get_metadata('language', i))[0] == 'en':\n title, etext = list(get_metadata('title', i))[0], strip_headers(load_etext(i)).strip()\n b = Book(i, title, etext)\n books.append(b)\n except UnknownDownloadUriException:\n # this book does not have a load_etext corresponding to it.\n pass\n\n with open(json_file, 'wb') as f:\n pickle.dump(books, f)\n\n print (len(books))", "def getGRAuthorByID(id, book_callee=None, series_callee=None, printout=True): \n author_entry = session.query(author).get(id)\n if author_entry is None:\n request = requests.get('https://www.goodreads.com/author/show/'+str(id)+'.xml?key='+API_KEY['GOODREADS'])\n if request.status_code == 200:\n data = xmltodict.parse(request.text)['GoodreadsResponse']['author']\n \n auth = {}\n auth['id'] = int(data['id'])\n auth['author'] = data['name']\n auth['description'] = data['about']\n auth['hometown'] = data['hometown']\n auth['small_img'] = data['small_image_url']\n auth['large_img'] = data['image_url']\n \n author_entry = author(**auth) \n session.add(author_entry)\n session.commit() \n for key, book in data['books'].items():\n while type(book) is list:\n book = book[0]\n if type(book) is OrderedDict and (book_callee is None or book_callee != int(book['id']['#text'])):\n a_book = getGRBookByID(int(book['id']['#text']), id)\n if a_book is not None:\n session.query(author).get(id).books.append(a_book)\n session.commit()\n if(printout):\n print(author_entry)\n \n return author_entry", "def checkPaper(self,event=None):\r\n if self.title.getVal() not in self.paperList:\r\n self.paperList.append(self.title.getVal())\r\n self.paperList.sort()\r\n self.title.updateVals(self.paperList)\r\n return\r\n\r\n ## This section of code should probably go into setData. . .\r\n self.authorBox.clearData()\r\n\r\n cur = self.dbConn.execute(\"SELECT People.* FROM Papers JOIN People on Papers.PrimaryAuthor = People.PersonID WHERE Papers.Title = ?\",[self.title.getVal()])\r\n res = cur.fetchone()\r\n if res ==None:\r\n self.primeAuthor.setVal(\"No Author Found; Check database\")\r\n return\r\n self.primeAuthor.setVal(formatNameSQL(res))\r\n self.addPrimeAuthorFn()\r\n\r\n cur = self.dbConn.execute(\"SELECT People.* FROM Papers JOIN People on Papers.CorrespondingAuthor = People.PersonID WHERE Papers.Title = ?\",[self.title.getVal()])\r\n res = cur.fetchone()\r\n if res == None:\r\n self.correspond.setVal(self.primeAuthor.getVal())\r\n else:\r\n self.correspond.setVal(formatNameSQL(res))\r\n\r\n cur = self.dbConn.execute(\"SELECT People.* FROM Papers JOIN People JOIN CoAuthors ON Papers.paperID = CoAuthors.PaperID AND People.PersonID = CoAuthors.Author WHERE Papers.Title = ?\",[self.title.getVal()])\r\n res = cur.fetchall()\r\n if res == None:\r\n return\r\n for ln in res:\r\n curAuthor = str.format(formatNameSQL(ln))\r\n self.authorBox.addLine(curAuthor)\r\n self.coAuthor.setVal(curAuthor)", "def LoadArtIntoDB(store,art):\n if 'srcorgname' in art and art['srcorgname'] is not None:\n srcorg = Misc.GetOrgID( art[ 'srcorgname' ] )\n else:\n # no publication specified - look up using domain name\n o = urlparse.urlparse(art['permalink'])\n domain = o[1].lower()\n srcorg = Publication.find_or_create(domain)\n art['srcorg'] = srcorg\n\n\n # resolve bylined authors to journo ids\n expected_journo = None\n authors = Byline.CrackByline(art['byline'])\n attributed = []\n for author in authors:\n attributed.append(Journo.find_or_create(author, art, expected_journo))\n art['journos'] = attributed\n\n# if opts.test:\n# ukmedia.PrettyDump( art )\n\n article_id = store.upsert( art )\n\n return article_id", "def main():\n papers_with_references = add_references_to_papers(PAPERS_FILE, TEXT_DIR)\n citations = match_citations_with_papers(papers_with_references)\n insert_into_citation_table(citations)", "def post_author_data():\n data = None\n if request.get_json() is None:\n data = request.form.to_dict()\n print(data)\n else:\n data = request.get_json()\n\n if data is None or data == {} or all(value == '' for value in data.values()):\n return render_template('error.html', message='Input format is not correct'), 400\n\n data.get('name', None)\n data.get('author_url', None)\n data.get('author_id', None)\n data.get('rating', None)\n data.get('rating_count', None)\n data.get('review_count', None)\n data.get('image_url', None)\n data.get('related_authors', None)\n data.get('author_books', None)\n\n if isinstance(data, list):\n mongo.db.Authors.insert_many(data)\n else:\n mongo.db.Authors.insert_one(data)\n return render_template(\"post_author.html\", output=data), 200", "def get_author_detail(body):\n user_id = get_user_id(body) # user id\n author_details = body.find('div', {'id': 'gsc_prf_i'})\n\n name = author_details.find('div', attrs={'id': 'gsc_prf_in'}).text\n name_attributes = author_details.find_all('div', attrs={'class': 'gsc_prf_il'})\n\n affiliation = name_attributes[0].text or ''\n interests = name_attributes[1].text or ''\n email = name_attributes[2].text or ''\n\n citation_indices = body.find('table', attrs={'id': 'gsc_rsb_st'})\n\n year_since_text = citation_indices.find_all('tr')[0]\n year_since = re.sub('Since ', '', year_since_text.find_all('th')[-1].text)\n\n # citations\n citations = citation_indices.find_all('tr')[1]\n h_index = citation_indices.find_all('tr')[2]\n h_index_all = h_index.find_all('td')[1].text\n h_index_since = h_index.find_all('td')[2].text\n citation_all = citations.find_all('td')[1].text\n citation_since = citations.find_all('td')[2].text\n\n author_dict = {'user_id': user_id,\n 'name': name,\n 'affiliation': affiliation,\n 'interests': interests,\n 'email': email,\n 'h_index_all': h_index_all,\n 'h_index_since': h_index_since,\n 'citation_all': citation_all,\n 'citation_since': citation_since,\n 'year_since': year_since}\n return author_dict", "def get_author_info(self, author: str):\n for writer_word in self._writer_words:\n data = json.loads(requests.get(WIKIDATA_SEARCH + \"&srsearch=\" + author + \" \" + writer_word).text)\n pages = data.get(\"query\").get(\"search\")\n if len(pages) >= 1:\n pageid = pages[0].get(\"title\")\n author_details = self._reference.author_map.get(author)\n if author_details:\n return author_details\n if pageid == -1:\n continue\n\n else:\n response = requests.get(WIKIDATA_PARSE + pageid + \".json\")\n data = json.loads(response.text)\n if author.lower() not in data.get(\"entities\").get(pageid).get(\"labels\").get(\"en\").get(\"value\").lower():\n continue\n else:\n try:\n id = data.get(\"entities\").get(pageid).get(\"claims\").get(\"P31\")[0].get(\"mainsnak\").get(\"datavalue\").get(\"value\").get(\"id\")\n if str(id) != \"Q5\": # the id for human\n continue\n except IndexError:\n continue\n properties = data.get(\"entities\").get(pageid).get(\"claims\")\n author_details = {\"id\": pageid, \"gender\": self.get_gender(properties)}\n country_details = self.get_country(properties)\n author_details[\"country\"] = country_details\n self._reference.author_map[author] = author_details\n return author_details\n return {\"id\": \"Unknown\", \"gender\": \"Unknown\", \"country\": [{\"name\": \"Unknown\", \"region\": \"Unknown\"}]}", "def merge_to_elastic(paper_authors, papers, authors, index_name):\n columns = list(papers.columns) + ['authors']\n for index, paper in papers.iterrows():\n merger = paper_authors.loc[paper_authors['paper_id'] == index]\n author_ids = merger['author_id'].values\n author_names = [authors.loc[authors['id'] == x, 'name'].values[0] for x in author_ids]\n paper['authors'] = author_names\n yield {\n \"_index\": index_name,\n \"_type\": \"_doc\",\n \"_id\" : f\"{index}\",\n \"_source\": filterKeys(paper, columns),\n }", "def get_authors_from_papers(papers):\n auth_set = set()\n for p in papers:\n auth_set.update(p['authors'])\n return list(auth_set)", "def load_federalist_corpus(filename):\n with open(filename, \"rt\") as f:\n data = f.read()\n papers = data.split(\"FEDERALIST\")\n\n # all start with \"To the people of the State of New York:\" (sometimes . instead of :)\n # all end with PUBLIUS (or no end at all)\n locations = [(i, [-1] + [m.end() + 1 for m in re.finditer(r\"of the State of New York\", p)],\n [-1] + [m.start() for m in re.finditer(r\"PUBLIUS\", p)]) for i, p in enumerate(papers)]\n papers_content = [papers[i][max(loc[1]):max(loc[2])] for i, loc in enumerate(locations)]\n\n # discard entries that are not actually a paper\n papers_content = [p for p in papers_content if len(p) > 0]\n\n # replace all whitespace with a single space\n papers_content = [re.sub(r\"\\s+\", \" \", p).lower() for p in papers_content]\n\n # add spaces before all punctuation, so they are separate tokens\n punctuation = set(re.findall(r\"[^\\w\\s]+\", \" \".join(papers_content))) - {\"-\", \"'\"}\n for c in punctuation:\n papers_content = [p.replace(c, \" \" + c + \" \") for p in papers_content]\n papers_content = [re.sub(r\"\\s+\", \" \", p).lower().strip() for p in papers_content]\n\n authors = [tuple(re.findall(\"MADISON|JAY|HAMILTON\", a)) for a in papers]\n authors = [a for a in authors if len(a) > 0]\n\n numbers = [re.search(r\"No\\. \\d+\", p).group(0) for p in papers if re.search(r\"No\\. \\d+\", p)]\n\n return papers_content, authors, numbers", "def parse(filename):\n\n tree = etree.parse(filename)\n root = tree.getroot()\n # according to the structure of the xml article meta nested under \n # front then article-meta\n articleMeta = root[0][1]\n # pubmed central article id\n pmcId = ''\n # the author list, the list of names excluding corresponding\n # athor\n otherAuthors = []\n # the name and email of the corresponding authors\n cAuthors = []\n # container for all the author groups\n authorGroups = []\n \n for child in articleMeta:\n # find the pmc id\n if ((child.tag == 'article-id') and not(isEmpty(child.attrib))):\n if (child.attrib['pub-id-type'] == 'pmc'):\n pmcId = child.text\n # find the author group\n elif (child.tag == 'contrib-group'):\n authorGroups.append(child)\n # this child may contain important corresponding information\n elif (child.tag == 'author-notes'):\n authorNotes = child\n # find the publication date\n elif (child.tag == 'history'):\n for theDate in child:\n if ('date-type' in theDate.attrib and theDate.attrib['date-type'] == 'accepted'):\n #publiction date YEAR MONTH DAY\n if (theDate.find('year') != None):\n theYear = theDate.find('year').text\n else:\n theYear = 0\t\n if (theDate.find('month') != None):\n theMonth = theDate.find('month').text\n else:\n theMonth = 6\n if (theDate.find('day') != None):\n theDay = theDate.find('day').text\n else:\n theDay = 1\n\n publicationDate = (theYear, theMonth, theDay)\n try:\n dateCheck = date(int(theYear), int(theMonth), int(theDay))\n except:\n return((-1,))\n elif (child.tag == 'pub-date'): \n if ('pub-type' in child.attrib and (child.attrib['pub-type'] == 'ppub' or child.attrib['pub-type'] == 'epub')):\n #for grandchild in child: print(grandchild.tag)\n \n if (child.find('year') != None):\n theYear = child.find('year').text\n else:\n theYear = 0\n \n if (child.find('month') != None):\n theMonth = child.find('month').text\n else:\n theMonth = 6\n \n if (child.find('day') != None):\n theDay = child.find('day').text\n else:\n theDay = 1\t\t\t\t\t\n publicationDate = (theYear, theMonth, theDay)\n try:\n dateCheck = date(int(theYear), int(theMonth), int(theDay))\n except:\n return((-1,))\n case1 = False # will be used for post-processing, corr author identified but no email\n for authorGroup in authorGroups:\n # parse author group information\n for child in authorGroup:\n if (child.tag == 'contrib' and child.attrib['contrib-type'] == 'author'):\n # the first child is the name tag\n try:\n name = child[0].find('given-names').text + ' ' + child[0].find('surname').text\n except:\n return((-1,))\n if ('corresp' in child.attrib): # and child.attrib['corresp'] == 'yes'):\n # if it a corresponding author\n # check to see if there is email field\n if (len(child) > 2 and child[1].find('email') != None):\n data = (name, child[1].find('email').text)\n cAuthors.append(data)\n #else post-process this case: case(1)\n else:\n data = (name, 'null')\n cAuthors.append(data)\n case1 = True\n else: \n # handle EMBO style xml \n xrefList = findInSubtree(child, 'xref')\n if (len(xrefList) > 0):\n for xref in xrefList:\n if ('ref-type' in xref.attrib and xref.attrib['ref-type'] == 'corresp'):\n # this is an corresponding author\n data = (name, '')\n cAuthors.append(data)\n case1 = True\n if (case1 == False):\n otherAuthors.append(name) \n else:\n # if not a corresponding author\n otherAuthors.append(name)\n\n # not done yet, some corresponding author information are embedded in author-notes\n if (case1 and 'authorNotes' in locals()):\n i = 0\n # corresponding author identified but no email found\n for child in authorNotes:\n if (child.tag == 'corresp'):\n for grandchild in child:\n if (grandchild.tag == 'email'):\n if (i == len(cAuthors)): break\t\n cAuthors[i] = (cAuthors[i][0], grandchild.text)\n i = i + 1\n elif ('authorNotes' in locals()):\n # the linking information is embedded entirely in the text\n text = etree.tostring(authorNotes).strip().decode('utf-8')\n emailElements = findInSubtree(authorNotes, 'email')\n for name in otherAuthors:\n j = 0\n if (text.find(name) != -1 and j < len(emailElements)):\n data = (name, emailElements[j].text)\n cAuthors.append(data)\n otherAuthors.remove(name)\n j = j + 1\n\n # sanity check here, reject anything that may corrupt the database\n if ('pmcId' in locals() and 'publicationDate' in locals()):\n try:\n print(pmcId, otherAuthors, cAuthors, publicationDate)\n except:\n return(pmcId, otherAuthors, cAuthors, publicationDate)\n return(pmcId, otherAuthors, cAuthors, publicationDate)\n else:\n return((-1,))", "def parse_author(self, response):\n i = AuthorItem()\n i['name'] = response.xpath('//h3[@class=\"author-title\"]/text()').extract_first().strip()\n i['birth_date'] = response.xpath('//span[@class=\"author-born-date\"]/text()').extract_first()\n birth_location = response.xpath('//span[@class=\"author-born-location\"]/text()').extract_first()\n if birth_location:\n i['birth_location'] = birth_location.replace('in ', '')\n i['description'] = response.xpath('//div[@class=\"author-description\"]/text()').extract_first().strip()\n i['url'] = response.url\n return i" ]
[ "0.61678886", "0.57925886", "0.5683686", "0.5675467", "0.5641725", "0.5558396", "0.5553384", "0.5525774", "0.5525136", "0.54713714", "0.5426789", "0.53912705", "0.5386846", "0.53686625", "0.5347512", "0.53283745", "0.5324974", "0.5319414", "0.52988154", "0.5288916", "0.52756506", "0.5271806", "0.5265879", "0.52612233", "0.5256936", "0.52553666", "0.52271295", "0.521356", "0.520259", "0.5198428" ]
0.7115444
0
Get the paper with the given id from dblp.
def get_paper_by_id(paper_id): dblp_key = paper_id.replace("/", "_") if local.paper_exists(dblp_key): return dblp_key print("getting information from dblp about paper {}".format(paper_id)) data = get("https://dblp.org/rec/" + paper_id + ".xml")["dblp"] return get_paper(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_publication_from_id_in_db(new_id: str) -> Union[None, Dict]:\n\n # db_publications = app.data.driver.db[\"publications\"]\n # try:\n # res = db_publications.find({\"id\": new_id}).limit(1).next()\n # except:\n # res = None\n # return res\n\n url = BaseConfig.DATAESR_PUBLICATIONS_URL\n url += '?where={{\"id\":\"{}\"}}'.format(new_id)\n r = requests.get(url)\n if r.status_code == 200:\n res = r.json()['data']\n else:\n res = []\n if len(res) > 1:\n print(\"ERROR more than one id - SHOULD NOT HAPPEN !!\")\n return res[0]\n elif len(res) == 1:\n return res[0]\n else:\n return None", "def getByID(self, pid):\r\n i = self.pids.index(pid)\r\n return self.getByInd(i)", "def get_book_by_id(id):\n return Book.query.filter_by(id=id).first()", "def find_by_id(id):\n query = \"SELECT * FROM parcels WHERE id=%s\"\n return db.find_one(query, (id,))", "def _paper_id(hit: DD) -> str:\n return hit[\"_source\"][\"paper_id\"]", "def get_proof_item(self, id):\n return self.prf.find_item(id)", "def get_appt_by_dog_id(id):\n appt = session.query(Dog_Appointment).get(id)", "def get_publication_html_from_id_in_db(new_id: str) -> Union[None, Dict]:\n\n # db = app.data.driver.db[\"notices_publications\"]\n # try:\n # res = db.find({\"id\": new_id}).limit(1).next()\n # except:\n # res = None\n # return res\n\n url = BaseConfig.DATAESR_NOTICES_PUBLICATIONS_URL\n url += '?where={{\"id\":\"{}\"}}'.format(new_id)\n r = requests.get(url)\n if r.status_code == 200:\n res = r.json()['data']\n else:\n res = []\n if len(res) > 1:\n print(\"ERROR more than one id - SHOULD NOT HAPPEN !!\")\n return res[0]\n elif len(res) == 1:\n return res[0]\n else:\n return None", "def get_study_by_pmid(cls, pmid):\n\n print \"Found study \", pmid\n study_obj = cls.query.filter(cls.pmid == pmid).first()\n return study_obj", "def get_doc_from_MP(mp_id):\n apr=mpr.get_doc(mp_id)\n return apr", "def get_book_by_id(self, id):\n\n try:\n cur = self._db.cursor()\n results = cur.execute('SELECT rowid, * FROM books WHERE rowid = ?', (id, ))\n book_row = results.fetchone()\n return self._row_to_book(book_row)\n except sqlite3.Error as e:\n raise BookError(f'Error getting book ID {id}') from e", "def by_id(cls, id):\n\t\treturn DBSession.query(Power).filter(Power.power_id == id).first()", "def process_paper(self, dblpkey, db):\n NS = {'tei': 'http://www.tei-c.org/ns/1.0'}\n try:\n xml=self.get_grobid_xml(dblpkey)\n result= grobid_mapping.tei_to_dict(xml)\n #\n #try:\n mongo_set_dict=dict()\n #print(\"results: {}\".format(result))\n if 'abstract' in result:\n mongo_set_dict[\"content.abstract\"]=result[\"abstract\"]\n if 'notes' in result:\n mongo_set_dict[\"content.notes\"] = result[\"notes\"]\n if 'fulltext' in result:\n mongo_set_dict[\"content.fulltext\"] = result[\"fulltext\"]\n with open(cfg.folder_content_xml + dblpkey + \".txt\", 'w') as f:\n # f.write(result[\"fulltext\"])\n print(result[\"fulltext\"])\n if 'chapters' in result:\n mongo_set_dict[\"content.chapters\"] = result[\"chapters\"]\n\n mongoResult= db.publications.update_one(\n {'_id': dblpkey},\n {'$set': result}\n )\n # print(mongoResult)\n\n logging.info(\"Processed \"+dblpkey)\n except:\n logging.exception('Cannot process paper ' +dblpkey, exc_info=True)\n\n # pprint.pprint(result)\n # for ref in result['references']:\n # print(ref)\n # print(etree.tostring(result['fulltext'], pretty_print=True))", "def get_autor_with_papers(author_id):\n print(\"getting information from dblp about author {}\".format(author_id))\n data = get(\"https://dblp.org/pid/\" + author_id + \".xml\")[\"dblpperson\"]\n author_id = author_id.replace(\"/\", \"_\")\n author = {\n \"name\": data[\"@name\"],\n \"papers\": [get_paper(paper_container) for paper_container in data[\"r\"]],\n }\n\n with open(local.file_name(\"authors\", author_id), \"w\") as out:\n json.dump(author, out, ensure_ascii=False, indent=4)", "def view_paper():\n return dict(paper_id=request.args(0),\n topic_id=request.args(1))", "def find_rent(self, id):\n allR=self.__loadFromFile()\n for bk in allR:\n if bk.getId()==id:\n return bk", "def get(self, _id):\n try:\n doc = self._db[_id]\n # For speed testing\n del self._db[_id]\n except KeyError:\n return None\n else:\n return self._parse_doc(doc)", "def get_product_by_id(pid: int) -> Optional[Product]:\n return get_market().get_product(pid)", "def get(self,id):\r\n person = get_one_by_persons_id(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def get_policy_by_id(self, id):\n for service, policy_list in self.remote_store.get_policy_list().items():\n for policy in policy_list:\n if policy.id == id:\n return policy", "def get_record(self, id: uplink.Path):\n pass", "def hold_record_by_id(session, hold_id):\n d = session.get(api_url_base + '/patrons/holds/{}'.format(str(hold_id)))\n r = HoldRecord(api_data=json.loads(d.text))\n return r", "def get_book_by_id(self, book_id):\n query = \"SELECT * FROM library WHERE book_id=%s\"\n response = Database().fetch_one(query, book_id)\n return response", "def getGRReviewByID(id, printout=True): \n review_entry = session.query(reviews).get(id)\n if review_entry is None:\n request = requests.get('https://www.goodreads.com/review/show.xml?id='+ str(id) +'&key='+API_KEY['GOODREADS'])\n if request.status_code == 200:\n data = xmltodict.parse(request.text)['GoodreadsResponse']['review']\n \n review = {}\n review['id'] = int(data['id'])\n review['user'] = data['user']['display_name']\n review['rating'] = int(data['rating'])\n review['book'] = getGRBookByID(int(data['book']['id']['#text']))\n review['review'] = data['body']\n review['spoiler_flag'] = data['spoiler_flag']\n review['date_added'] = data['date_added']\n \n review_entry = reviews(**review)\n session.add(review_entry)\n session.commit()\n \n if(printout):\n print(review_entry)\n \n return review_entry", "def bib_record_by_id(session, record_id):\n d = session.get(api_url_base + '/bibs/{}'.format((record_id)),\n params=bib_record_fields)\n r = BibRecord(api_data=json.loads(d.text))\n return r", "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def get_by_id(self, book_id):\n if not isinstance(book_id, str):\n book_id = str(book_id)\n\n cursor = self._dbcon.cursor()\n sql = u\"select rowid,* from books where rowid = %s\" % book_id\n cursor.execute(sql)\n result = cursor.fetchall()\n cursor.close()\n return self._book_from_query_result(result[0])", "def get_paper(paper_container):\n paper_type = list(paper_container.keys())[0]\n dblp_paper = paper_container[paper_type]\n dblp_key = dblp_paper[\"@key\"].replace(\"/\", \"_\")\n paper = {\"type\": paper_type}\n for prop in props:\n # see if property exists and get the key\n key = prop.key\n if key not in dblp_paper:\n continue\n key_name = prop.key_name if prop.key_name else key\n\n # get the property value and save if non-empty\n prop_value = dblp_paper[key]\n if prop.parse:\n prop_value = prop.parse(prop_value)\n if prop_value and prop_value != \"\":\n paper[key_name] = prop_value\n\n # output paper\n with open(local.file_name(\"papers\", dblp_key), \"w\") as out:\n json.dump(paper, out, ensure_ascii=False, indent=4)\n\n return dblp_key", "def getBookById(self, id):\n cur = self.__execute__(\n '''select ID, NAME, AUTHOR from books where ID = %d;''' % id)\n return BookIter(cur)", "def get_data_by_id(data_id):\n return Data.get_by_id(data_id)" ]
[ "0.62456495", "0.60768956", "0.6007585", "0.5988748", "0.59750795", "0.5948963", "0.59419364", "0.5877572", "0.58755744", "0.5851668", "0.5780145", "0.57121706", "0.57081234", "0.56810683", "0.56097275", "0.5588593", "0.5541607", "0.5515542", "0.5512216", "0.5502963", "0.5487334", "0.5443299", "0.5425304", "0.5415228", "0.53959775", "0.53929365", "0.5371723", "0.5353683", "0.5328486", "0.5320919" ]
0.8748886
0
Get venue information based on the venue id.
def get_venue(venue_id): venue = venue_id.split("_")[1] # search for exact venue id print("getting information from dblp about venue {}".format(venue)) data = get("https://dblp.org/search/venue/api?h=1000&q=" + venue + "$") data = data["result"]["hits"] if int(data["@total"]) == 0: data = [] elif int(data["@total"]) == 1: data = [data["hit"]] elif int(data["@total"]) > 1: data = data["hit"] # verify venue matches = [ x for x in data if x["info"]["url"] == "https://dblp.org/db/" + venue_id.replace("_", "/") + "/" ] if len(matches) > 0: match = matches[0]["info"] return { "name": match["venue"], "acronym": match["acronym"] if "acronym" in match else venue.upper(), } print("WARNING: venue not found (" + venue_id + ")") return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_venue(venue_id):\n data = get_venue_by_id(venue_id).venue_details\n return render_template('pages/show_venue.html', venue=data)", "async def lookup_ven(ven_name=None, ven_id=None):\n return {'ven_id': 'ven1234'}", "def get_venue_and_events(cls, venue_id, db_session):\n logger.debug(\"%s: venue_name = %s\", func_name(cls), venue_id)\n venue = db_session.query(Venue)\\\n .options(joinedload(Venue.events))\\\n .filter_by(id=venue_id).one()\n return venue", "def show_venue(venue_id):\n # shows the venue page with the given venue_id\n result = db.session.query(Venue).filter(Venue.id == venue_id)\n result = result[0]\n\n past_shows_count = 0\n upcoming_shows_count = 0\n\n past_shows = []\n upcoming_shows = []\n\n all_shows = Shows.query.all()\n\n print(all_shows)\n\n for show in all_shows:\n if show.venue_id == result.id:\n show_time = datetime.strptime(show.start_time, '%Y-%m-%d %H:%M:%S')\n if show_time > datetime.now() :\n upcoming_shows.append(show)\n else: \n past_shows.append(show)\n \n past_shows_count = len(past_shows)\n upcoming_shows_count = len(upcoming_shows)\n \n\n # TODO: replace with real venue data from the venues table, using venue_id (DONE)\n resdata = {\n \"id\": result.id,\n \"name\": result.name,\n \"genres\": json.loads(result.genres),\n \"address\": result.address,\n \"city\": result.city,\n \"state\": result.state,\n \"phone\": result.phone,\n \"website\": result.website,\n \"facebook_link\": result.facebook_link,\n \"seeking_talent\": result.seeking_talent,\n \"seeking_description\": result.seeking_description,\n \"image_link\": result.image_link,\n \"past_shows\": past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": past_shows_count,\n \"upcoming_shows_count\": upcoming_shows_count,\n }\n \n data = list(filter(lambda d: d[\"id\"] == venue_id, [resdata]))[0]\n return render_template(\"pages/show_venue.html\", venue=data)", "def venues(self):\n response = self._request(V2_ENDPOINTS['VENUES'])\n return response", "def getVenue(lat, lon, name, radius=300, addr=''):\n # Construct the client object\n client = foursquare.Foursquare(CLIENT_ID, CLIENT_SECRET, redirect_uri='http://fondu.com/oauth/authorize')\n\n # Return all venues within radius of lat,lon\n ll = str(lat) + \",\" + str(lon)\n radius = str(radius)\n venues = client.venues.search(params={'v': VERSION, 'll': ll, 'intent': 'browse', \n 'radius': radius, 'limit': 100 })[\"venues\"]\n # Returns a list of dictionaries, each is a \"compact venue\"\n print \"Returned\", len(venues) , \"venues within\", radius ,\"meters\"\n print venues[0]\n \n # pull out just venue name and its distance from lat, lon\n venue_deets = [(ven[\"name\"], ven[\"location\"][\"distance\"], ven[\"location\"][\"address\"]) for ven in venues]\n \n # sort by distance away\n venue_deets = sorted(venue_deets, key=lambda x: x[1])\n venue_names = [x[0] for x in venue_deets]\n venue_addr = [x[2] for x in venue_deets]\n print venue_names\n \n # grab the \"foursquare\" version of the name\n if name in venue_names:\n # name supplied exactly matches foursquare name\n fs_name = name\n else:\n # look for close matches to supplied name\n \n # defaults set: returns a max of 3 matches with minimum score of 0.6 in similarity\n fs_name = difflib.get_close_matches(name, venue_names, n=3, cutoff=0.5)\n print fs_name\n \n if len(fs_name)<1:\n # hopefully this doesn't happen!\n #raise ValueError(\"ERROR: venue not found\")\n # match on address instead\n add_name = difflib.get_close_matches(addr, venue_addr, n=3, cutoff=0.5)\n print add_name\n return -1\n elif len(fs_name)>1:\n # if more than one match returned take closest venue\n dists = [venue_deets[venue_names.index(n)][1] for n in fs_name]\n fs_name = fs_name[dists.index(min(dists))] # return closest\n else:\n fs_name = fs_name[0]\n \n \n # details of desired venue\n print \"Name given =\", name\n print \"Name in foursquare =\", fs_name\n print \"Distance from original lat, long =\", venue_deets[venue_names.index(fs_name)][1],\"meters\"\n desired_venue_id = [ven for ven in venues if ven[\"name\"]==fs_name][0][\"id\"]\n\n \n # Now get \"complete venue\" information, that has more details on venue properties\n venue_url = \"https://api.foursquare.com/v2/venues/\" + desired_venue_id\n venue_url += \"?client_id=\" + CLIENT_ID\n venue_url += \"&client_secret=\" + CLIENT_SECRET\n venue_url += \"&v=\" + VERSION\n venue_url += \"&m=foursquare\"\n\n complete_venue = json.load(urllib2.urlopen(venue_url))[\"response\"][\"venue\"]\n \n \n # fields that help grab pertinent information\n descriptors = ['phrases', 'categories', 'attributes', 'tags', 'tips']\n\n words = ''\n venue_type = []\n for desc in descriptors:\n if desc in complete_venue:\n field = complete_venue[desc] \n \n # scan over phrases field\n if desc=='phrases':\n for f in field:\n print \"printing from 'sample'\"\n if 'sample' in f:\n if 'text' in f['sample']:\n print f['sample']['text'], type(f['sample']['text'])\n words += f['sample']['text'] + ' '\n print \"printing from 'phrase'\"\n if 'phrase' in f:\n print f['phrase'], type(f['phrase'])\n words += f['phrase'] + ' '\n \n # scan over categories field\n if desc=='categories':\n for f in field:\n if 'name' in f:\n print f['name'], type(f['name'])\n words += f['name'] + ' '\n venue_type.append(f['name'])\n \n # scan over attributes field\n if desc=='attributes':\n if 'groups' in field:\n gr = field['groups']\n for f in gr:\n if 'name' in f:\n print f['name'], type(f['name'])\n words += f['name'] + ' '\n \n # scan over tags field\n if desc=='tags':\n for f in field:\n print f, type(f),\n words += f + ' '\n print ''\n \n \n # scan over tips field\n if desc=='tips':\n if 'groups' in field:\n gr = field['groups']\n for group in gr:\n if 'items' in group:\n for item in group['items']:\n if 'text' in item:\n print item['text'], type(item['text'])\n words += item['text'] + ' '\n print ''\n \n # scrape all words for things indicating beer, coffee, food, liquor, wine\n words = word_tokenize(words)\n words = [x.lower() for x in words]\n \n service_flag = [0,0,0,0,0]\n print sorted(SERVICES)\n for i, (service, rel_words) in enumerate(sorted(SERVICES.items())):\n print service\n cnt = 0\n for word in rel_words:\n print difflib.get_close_matches(word.lower(), words, n=5, cutoff=0.99)\n cnt += len(difflib.get_close_matches(word.lower(), words, n=5, cutoff=0.99))\n print cnt, \"\"\n if cnt>=1:\n service_flag[i] = 1\n print service_flag\n print \"\"\n \n print words\n hours_id = None\n if 'hours' in complete_venue:\n print complete_venue['hours'], '\\n'\n else:\n print \"No hours in venue information\\n\"\n print \"\"\n\n \n rating = None\n if 'rating' in complete_venue:\n print 'rating =', complete_venue['rating'], '\\n'\n rating = complete_venue['rating']\n print type(rating)\n else:\n print \"No rating in venue information\\n\"\n print \"\"\n \n nLikes = None\n if 'likes' in complete_venue:\n print 'likes =', complete_venue['likes']['count'], '\\n'\n nLikes = complete_venue['likes']['count']\n print type(nLikes)\n else:\n print \"No likes in venue information\\n\"\n \n print \"\"\n \n if (len(venue_type)<0):\n venue_type = None\n # phrases \n # List of phrases commonly seen in this venue's tips, as well as a sample tip snippet and the number of \n # tips this phrase appears in.\n \n # categories\n # An array, possibly empty, of categories that have been applied to this venue. One of the categories \n # will have a field primary indicating that it is the primary category for the venue. For the complete \n # set of categories, see venues/categories. \n \n # attributes\n # Attributes associated with the venue, such as price tier, whether the venue takes reservations, and \n # parking availability. \n \n # tags\n # An array of string tags applied to this venue.\n \n # rating\n # Numerical rating of the venue (0 through 10). Returned as part of an explore result, excluded in \n # search results. Not all venues will have a rating.\n \n # tips\n # Contains the total count of tips and groups with friends and others as groupTypes. Groups may change \n # over time. \n \n # reasons?\n \n # likes \n # The count of users who have liked this venue, and groups containing any friends and others \n # who have liked it. The groups included are subject to change. \n \n # hours\n # Contains the hours during the week that the venue is open along with any named hours segments in a \n # human-readable format. For machine readable hours see venues/hours", "def edit_venue(venue_id):\n\n result = db.session.query(Venue).filter(Venue.id == venue_id)\n result = result[0]\n venue = result\n form = VenueForm(obj=venue)\n \n # TODO: populate form with values from venue with ID <venue_id>\n return render_template(\"forms/edit_venue.html\", form=form, venue=venue)", "def edit_venue(venue_id):\n form = VenueForm()\n venue_to_be_edited = get_venue_by_id(venue_id)\n form.state.process_data(venue_to_be_edited.state)\n form.genres.process_data(venue_to_be_edited.genres)\n return render_template('forms/edit_venue.html', form=form, venue=venue_to_be_edited)", "def get_venues():\n papers = local.papers().values()\n venues = {paper[\"venue\"] for paper in papers if \"venue\" in paper}\n\n for venue_id in venues:\n file_name = local.file_name(\"venues\", venue_id)\n if os.path.isfile(file_name):\n continue\n venue = get_venue(venue_id)\n if not venue:\n continue\n with open(file_name, \"w\") as out:\n json.dump(venue, out, ensure_ascii=False, indent=4)", "def venues(self):\n response = self._request(V2_ENDPOINTS['VENUES'])\n # Normalize `dateHours` to array\n for venue in response[\"result_data\"][\"document\"][\"venue\"]:\n if venue.get(\"id\") in VENUE_NAMES:\n venue[\"name\"] = VENUE_NAMES[venue.get(\"id\")]\n if isinstance(venue.get(\"dateHours\"), dict):\n venue[\"dateHours\"] = [venue[\"dateHours\"]]\n if \"dateHours\" in venue:\n for dh in venue[\"dateHours\"]:\n if isinstance(dh.get(\"meal\"), dict):\n dh[\"meal\"] = [dh[\"meal\"]]\n return response", "def getVenueName(self, v_name):\n if v_name not in self.associations:\n #print \"Venue %s does not exist on the associations\" % v_name\n return None\n return self.associations[v_name]", "def venues():\n # find all venues on the basis of distinct city and states\n venues_by_locations = get_venues_by_distinct_locations()\n data = []\n if venues_by_locations:\n # prepare data to be displayed in the template\n data = [v.venue_location_serializer for v in venues_by_locations]\n for venue_data in data:\n venue_data['venues'] = get_venues_by_location(venue_data['city'], venue_data['state'])\n venue_data['venue_count'] = len(venue_data['venues'])\n return render_template('pages/venues.html', areas=data)", "def get_venue_by_name(cls, venue_name, db_session):\n logger.debug(\"%s: venue_name = %s\", func_name(cls), venue_name)\n venue = db_session.query(Venue)\\\n .filter_by(name=venue_name)\\\n .first()\n return venue", "def edit_venue(venue_id):\n venue = Venue.query.get_or_404(venue_id, description='There is no venue with id={}'.format(venue_id))\n form = VenueForm(obj=venue)\n\n return render_template('forms/edit_venue.html', form=form, venue=venue)", "def get_venue_of_current_lot() -> Venue:\n return CommonResourceUtils.load_instance(Types.VENUE, CommonLocationUtils.get_current_venue_type())", "def set_main_venue(full_volume_id, venue):\n if is_newstyle_id(full_volume_id):\n return\n\n collection_id, volume_id = full_volume_id.split(\"-\")\n root_node = get_xml(collection_id)\n\n volume_id = str(int(volume_id)) # get rid of leading 0s\n volume_xml = root_node.find(f\"./volume[@id='{volume_id}']\")\n if volume_xml is None:\n print(\"* Fatal: no\", volume_id, \"in\", volume_collection_id)\n sys.exit(1)\n meta_xml = volume_xml.find(\"./meta\")\n main_venue = meta_xml.find(\"./venue\")\n if main_venue is not None:\n main_venue.text = venue\n else:\n make_simple_element(\"venue\", venue, parent=meta_xml)", "def venue(request):\n # Test Comment\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'venue.html',\n context_instance=RequestContext(request, {})\n )", "def get_venues():\n venues = queries.random_venues(10)\n venues = [venue_schema.dump(v).data for v in venues]\n result = {\n 'success': True,\n 'data': {\n 'venues': venues\n }\n }\n return jsonify(result)", "def search_venues():\n search_term = request.form.get('search_term', '')\n # search venue by venue name partial match\n venues_by_text = search_venue(search_term)\n # prepare data to shown in the template\n response = {\n 'count': len(venues_by_text),\n 'data': [v.short_serializer for v in venues_by_text]\n }\n return render_template('pages/search_venues.html', results=response,\n search_term=request.form.get('search_term', ''))", "def venues():\n\n # TODO: replace with real venues data. (DONE)\n # num_shows should be aggregated based on number of upcoming shows per venue.\n \n venues = Venue.query.group_by(Venue.id, Venue.city, Venue.state).all()\n data = []\n\n for venue in venues :\n data.append({\n \"city\": venue.city,\n \"state\":venue.state,\n \"venues\":[{\n \"id\": venue.id,\n \"name\": venue.name\n }]\n })\n\n return render_template(\"pages/venues.html\", areas=data)", "def infer_main_venue(volume):\n if is_newstyle_id(volume):\n return volume.split(\".\")[1]\n elif len(volume_to_venues_map[volume]):\n # if there are associations, find the \"lowest ranking\" one\n return sorted(volume_to_venues_map[volume], key=venue_size)[0]\n else:\n return venue_index.get_slug_by_letter(volume[0])", "def edit_venue_submission(venue_id):\n # parse POSTed form:\n venue_updated = convert_form_dict_to_dict(request.form)\n # parse venue name:\n venue_name = venue_updated[\"name\"]\n\n try:\n # read:\n venue = Venue.query.get_or_404(venue_id, description='There is no venue with id={}'.format(venue_id))\n # update:\n venue.from_json(venue_updated)\n db.session.add(venue)\n # write\n db.session.commit()\n # on successful db insert, flash success\n flash('Venue ' + venue_name + ' was successfully updated!')\n except:\n db.session.rollback()\n # on unsuccessful db insert, flash an error instead.\n flash('An error occurred. Venue ' + venue_name + ' could not be updated.')\n finally:\n db.session.close()\n\n return redirect(url_for('venue.show_venue', venue_id=venue_id))", "def get_event_by_id(event_id):\n db = get_db()\n return db.execute((\n 'SELECT id, name, start_time, end_time, location '\n 'FROM event WHERE id=?'),\n (event_id,)).fetchone()", "def search_venues():\n\n # TODO: implement search on artists with partial string search. Ensure it is case-insensitive.\n # seach for Hop should return \"The Musical Hop\". (DONE)\n # search for \"Music\" should return \"The Musical Hop\" and \"Park Square Live Music & Coffee\" (DONE)\n response = request.form.get('search_term', '')\n response = response.lower()\n\n venues = db.session.query(Venue).filter(Venue.name.ilike('%' + response + '%')).all()\n results = []\n \n for v in venues:\n print(v.name)\n results.append({\n 'id': v.id,\n 'name' : v.name\n })\n\n response={\n \"count\": len(results),\n \"data\": results\n }\n\n return render_template(\n \"pages/search_venues.html\",\n results=response,\n search_term=request.form.get(\"search_term\", \"\"),\n )", "def hours(self, venue_id):\n response = self._request(V2_ENDPOINTS['HOURS'] + venue_id)\n return response", "def get(self, eventId, uid):\n raise NotImplementedError", "def query_event_by_id():\n try:\n event_id = request.args['event_id']\n response = requests.put(app.config['EVENTS_ENDPOINT'] + event_id)\n if response.status_code == 200:\n return render_template(\n 'search_results.html',\n auth=is_organizer(get_user()),\n events=parse_events(response.json()),\n app_config=app.config\n )\n else:\n return 'Unable to retrieve events', 500\n except BadRequestKeyError as error:\n return f'Error: {error}.', 400", "def get_video_info(self, id, **kwargs):\n kwargs['id'] = id\n return self.get('info/video.json', **kwargs)", "def get(self, invite_id):\n url = self._url + \"/{id}\".format(id=invite_id)\n params = {\n 'f' : 'json'\n }\n return self._gis._con.get(url, params)", "def get(self, id):\n offset, limit, expand = self.get_pagination_values()\n event = self.session.query(Event).filter_by(id=id).scalar()\n if not event:\n raise exc.NotFound(\"No such Event {} found\".format(id))\n\n json = event.to_dict(base_uri=self.href_prefix, expand=expand)\n\n self.success(json)" ]
[ "0.77026415", "0.7629306", "0.7103243", "0.65625393", "0.652582", "0.6364141", "0.63093", "0.6302329", "0.629436", "0.6290347", "0.62509865", "0.6245466", "0.6217084", "0.6169167", "0.6144405", "0.6015178", "0.5862008", "0.5855715", "0.5842037", "0.57189023", "0.56969595", "0.5663879", "0.5614901", "0.5591313", "0.5563309", "0.5521034", "0.5469075", "0.5448657", "0.5440041", "0.5438294" ]
0.7865581
0
Parse all venues that appear in papers in the local database.
def get_venues(): papers = local.papers().values() venues = {paper["venue"] for paper in papers if "venue" in paper} for venue_id in venues: file_name = local.file_name("venues", venue_id) if os.path.isfile(file_name): continue venue = get_venue(venue_id) if not venue: continue with open(file_name, "w") as out: json.dump(venue, out, ensure_ascii=False, indent=4)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fix_venues():\n print(\"Fixing publication field\")\n\n # Load our database of venue names and keywords\n with open(resource_filename('fixmendeley', 'venues.json'), 'r') as f:\n venues = json.load(f)\n\n for venue in venues:\n # Build filter pattern for keywords\n ordered_keywords = venue['keywords'][0] + venue['keywords'][1]\n reverse_keywords = venue['keywords'][1] + venue['keywords'][0]\n pattern_ordered_keywords = '%' + '%'.join(ordered_keywords) + '%'\n pattern_reverse_keywords = '%' + '%'.join(ordered_keywords) + '%'\n pattern_acronym = '*' + venue['acronym'] + '*'\n\n query = (\n Document\n .select(Document.publication)\n .where((Document.publication ** pattern_ordered_keywords) |\n (Document.publication ** pattern_reverse_keywords) |\n (Document.publication % ('*' + venue['acronym'] + '*')) |\n (Document.doi ** ('%' + venue['acronym'] + '%'))\n )\n )\n for entry in query:\n print(entry.publication)\n\n # For each conference/journal registered in our dictionary\n\n # Query for rows using ordered keywords\n\n # Set field with normalized value for all fields\n pass", "def search_venues():\n search_term = request.form.get('search_term', '')\n # search venue by venue name partial match\n venues_by_text = search_venue(search_term)\n # prepare data to shown in the template\n response = {\n 'count': len(venues_by_text),\n 'data': [v.short_serializer for v in venues_by_text]\n }\n return render_template('pages/search_venues.html', results=response,\n search_term=request.form.get('search_term', ''))", "def load_venues():\n\n print('load_venues')\n\n Venue.query.delete()\n\n for row in open(\"seed_data/venues.csv\"):\n row = row.rstrip()\n subcategory, \\\n created_by, \\\n title, \\\n addr_1, \\\n addr_2, \\\n city, \\\n postal_code, \\\n state = row.split(',')\n\n cat_sub = Category_Subcategory.query.filter_by(name=subcategory).first()\n\n vnu = Venue(subcategory_id=cat_sub.id,\n created_by=created_by,\n name=title,\n addr_1=addr_1,\n addr_2=addr_2,\n city=city,\n postal_code=postal_code,\n state=state)\n\n db.session.add(vnu)\n\n db.session.commit()", "def venues():\n # find all venues on the basis of distinct city and states\n venues_by_locations = get_venues_by_distinct_locations()\n data = []\n if venues_by_locations:\n # prepare data to be displayed in the template\n data = [v.venue_location_serializer for v in venues_by_locations]\n for venue_data in data:\n venue_data['venues'] = get_venues_by_location(venue_data['city'], venue_data['state'])\n venue_data['venue_count'] = len(venue_data['venues'])\n return render_template('pages/venues.html', areas=data)", "def search_venues():\n\n # TODO: implement search on artists with partial string search. Ensure it is case-insensitive.\n # seach for Hop should return \"The Musical Hop\". (DONE)\n # search for \"Music\" should return \"The Musical Hop\" and \"Park Square Live Music & Coffee\" (DONE)\n response = request.form.get('search_term', '')\n response = response.lower()\n\n venues = db.session.query(Venue).filter(Venue.name.ilike('%' + response + '%')).all()\n results = []\n \n for v in venues:\n print(v.name)\n results.append({\n 'id': v.id,\n 'name' : v.name\n })\n\n response={\n \"count\": len(results),\n \"data\": results\n }\n\n return render_template(\n \"pages/search_venues.html\",\n results=response,\n search_term=request.form.get(\"search_term\", \"\"),\n )", "def venues(self):\n response = self._request(V2_ENDPOINTS['VENUES'])\n # Normalize `dateHours` to array\n for venue in response[\"result_data\"][\"document\"][\"venue\"]:\n if venue.get(\"id\") in VENUE_NAMES:\n venue[\"name\"] = VENUE_NAMES[venue.get(\"id\")]\n if isinstance(venue.get(\"dateHours\"), dict):\n venue[\"dateHours\"] = [venue[\"dateHours\"]]\n if \"dateHours\" in venue:\n for dh in venue[\"dateHours\"]:\n if isinstance(dh.get(\"meal\"), dict):\n dh[\"meal\"] = [dh[\"meal\"]]\n return response", "def available_venues(filename,start,end,day):\n #open file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #lists all venues\n venuelist = []\n for k in range(len(incsv)):\n venuelist.append(incsv[k][0][7])\n #filter all unique venues by checking whether venue is already in filterlist\n filterlist = []\n #check is temporary variable to decide whether to add venue\n check = True\n #for all venues in venuelist\n for l in range(len(venuelist)):\n #if venue in venuelist already in filterlist\n if venuelist[l] in filterlist:\n #decision to add venue is false\n check = False\n #if decision is to add the venue in venuelist\n if check == True:\n #append new venue to filterlist\n filterlist.append(venuelist[l])\n #reset decision to true\n check = True\n #finding all available venues\n #for all lines in the timetable\n for m in range(1,len(incsv)):\n #if the start time of the venue is in between the desired start time or the end time of the venue is in between the desired end time\n if ((int(incsv[m][0][5]) >= start and int(incsv[m][0][5]) < end) or (int(incsv[m][0][6]) > start and int(incsv[m][0][6]) <= end)) and int(incsv[m][0][3]) == 4:\n #if the venue is still in list of venues (filterlist)\n if incsv[m][0][7] in filterlist:\n #remove venue from filterlist\n filterlist.remove(incsv[m][0][7])\n #remove header \"venue\" from filterlist\n filterlist.remove(\"Venue\")\n return filterlist", "def venues(self):\n response = self._request(V2_ENDPOINTS['VENUES'])\n return response", "def happy():\n # Query all venues\n results = session.query(VP.name, VP.latitude, VP.longitude).all()\n \n # Create a dictionary from the row data and append to a list of all_venue\n all_venues = []\n for name, lat, lon in results:\n venue_dict = {}\n venue_dict[\"name\"] = name\n venue_dict[\"latitude\"] = lat\n venue_dict[\"longitude\"] = lon\n all_venues.append(venue_dict)\n \n return jsonify(all_venues)", "def parse_all(self):\n\n # Generates a list of apartment urls\n self.parse_apartment_urls()\n\n # Parses each apartment url and stores it in apartment_data\n for apartment_url in self.apartment_urls:\n self.parse_single_page(apartment_url)", "def venues():\n\n # TODO: replace with real venues data. (DONE)\n # num_shows should be aggregated based on number of upcoming shows per venue.\n \n venues = Venue.query.group_by(Venue.id, Venue.city, Venue.state).all()\n data = []\n\n for venue in venues :\n data.append({\n \"city\": venue.city,\n \"state\":venue.state,\n \"venues\":[{\n \"id\": venue.id,\n \"name\": venue.name\n }]\n })\n\n return render_template(\"pages/venues.html\", areas=data)", "def parse_records(self):\n for record in sp.parse(gzip.open(\n \"./human_uniprot_04_07_20.gz\", 'rt')):\n # print(record.taxonomy_id)\n # if record.organism != \"Homo sapiens\":\n # continue\n # print(record.features[0])\n # for comment in record.comments:\n # if comment.startswith(\"SUBCELLULAR LOCATION\"):\n # print(comment)\n self.extract_features_to_dict(record)\n self.extract_localization(record)", "def get_venues_by_author(cached_list, cached_set, author_name):\n author = DBLPQuery.author_distinct(cached_list, cached_set, author_name)\n venues = {}\n\n if author['dblp'].__contains__('publications'):\n for pub in author['dblp']['publications']:\n if venues.__contains__(pub['venue']):\n venues[pub['venue']]['count'] += 1\n else:\n venues[pub['venue']] = { 'type': pub['venue-type'], 'count': 1 }\n\n if author['cdblp'].__contains__('publications'):\n for pub in author['cdblp']['publications']:\n if venues.__contains__(pub['venue']):\n venues[pub['venue']]['count'] += 1\n else:\n venues[pub['venue']] = { 'type': pub['venue-type'], 'count': 1 }\n\n return venues", "def get_venues():\n venues = queries.random_venues(10)\n venues = [venue_schema.dump(v).data for v in venues]\n result = {\n 'success': True,\n 'data': {\n 'venues': venues\n }\n }\n return jsonify(result)", "def total_venues(filename):\n #reading the file\n f = open(filename,\"r\")\n #incsv is a short form of 'input csv file'\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n #tempstr and templist are temporary variables to split the strings in incsv\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #final format of incsv: [[[moduleCode,ClassNo,LessonType,DayCode,DayText,StartTime,EndTime,Venue,AcadYear,Semester]],...]\n #yes each line is nested in two lists for some reason\n #lists all venues\n #venuelist stores all occurrences of the venues. venues can be repeated\n venuelist = []\n for k in range(len(incsv)):\n #append venue to venuelist\n venuelist.append(incsv[k][0][7])\n #filter all unique venues by checking whether venue is already in filterlist\n filterlist = []\n #check is temporary variable to decide whether to add venue\n check = True\n #for all venues in venuelist\n for l in range(len(venuelist)):\n #if venue in venuelist already in filterlist\n if venuelist[l] in filterlist:\n #decision to add venue is false\n check = False\n #if decision is to add the venue in venuelist\n if check == True:\n #append new venue to filterlist\n filterlist.append(venuelist[l])\n #reset decision to true\n check = True\n return (len(filterlist)-1)", "def search_venues_submission():\n # seach for Hop should return \"The Musical Hop\".\n # search for \"Music\" should return \"The Musical Hop\" and \"Park Square Live Music & Coffee\"\n keyword = request.form.get('keyword', '')\n \n # data:\n shows_subq = Show.query.with_entities(\n Show.venue_id,\n func.count(Show.venue_id).label('num_upcoming_shows')\n ).filter(\n Show.start_time > datetime.utcnow()\n ).group_by(\n Show.venue_id\n ).subquery()\n\n venues_subq = Venue.query.with_entities(\n Venue.id,\n Venue.name\n ).filter(\n Venue.name.contains(keyword)\n ).subquery()\n\n data = db.session.query(\n venues_subq.c.id,\n venues_subq.c.name,\n shows_subq.c.num_upcoming_shows\n ).join(\n shows_subq, venues_subq.c.id == shows_subq.c.venue_id\n ).all()\n\n results={\n \"count\": len(data),\n \"data\": [\n {\n \"id\": id,\n \"name\": name,\n \"num_upcoming_shows\": num_upcoming_shows,\n } for (id, name, num_upcoming_shows) in data\n ]\n }\n\n return render_template(\n 'pages/search_venues.html', \n results=results, keyword=keyword\n )", "def venue_occupancy(filename):\n #open file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #all venues\n venuelist = []\n for k in range(1,len(incsv)):\n venuelist.append(incsv[k][0][7])\n #filter all unique venues by checking whether venue is already in filterlist\n filterlist = []\n #check is temporary variable to decide whether to add venue\n check = True\n #for all venues in venuelist\n for l in range(len(venuelist)):\n #if venue in venuelist already in filterlist\n if venuelist[l] in filterlist:\n #decision to add venue is false\n check = False\n #if decision is to add the venue in venuelist\n if check == True:\n #append new venue to filterlist\n filterlist.append(venuelist[l])\n #reset decision to true\n check = True\n #add hours to total count (time)\n time = 0\n #for all lines in file\n for m in range(1,len(incsv)):\n #if time of venue falls within office hours for weekdays\n if int(incsv[m][0][5]) >= 800 and int(incsv[m][0][5]) <= 1700 and int(incsv[m][0][6]) <= 1700 and int(incsv[m][0][6]) >= 800 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #add hour to total count\n time += (int(incsv[m][0][6]) - int(incsv[m][0][5]))\n #if start time falls before office hours but end time is within office hours\n elif int(incsv[m][0][5]) < 800 and int(incsv[m][0][6]) <= 1700 and int(incsv[m][0][5]) > 800 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #ignore hours before 800 and add remaining hours\n time += (int(incsv[m][0][6]) - 800)\n #if end time falls after office hours but start time is within office housr\n elif int(incsv[m][0][5]) >= 800 and int(incsv[m][0][5]) < 1700 and int(incsv[m][0][6]) > 1700 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #ignore hours after 1700 and add remaining hours\n time += (1700 - int(incsv[m][0][5]))\n #if start time falls before 800 and end time falls after 1700\n elif int(incsv[m][0][5]) < 800 and int(incsv[m][0][6]) > 1700 and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #add the maximum of 9 hours\n time += 900\n #if time range falls outside of office hours\n elif ((int(incsv[m][0][5]) < 800 and int(incsv[m][0][6]) <= 800) or (int(incsv[m][0][5]) >= 1700 and int(incsv[m][0][6]) > 1700)) and int(incsv[m][0][3]) >= 1 and int(incsv[m][0][3]) <= 5:\n #total hours remain\n time = time\n #average (avr)\n avr = 0\n #average = total hours / (number of unique venues) * 45 hours\n avr = (time/(len(filterlist)*4500))\n return avr", "def _collect(self):\n while True:\n article_data = (yield)\n article_title_words = self._normalize_title(article_data.title)\n article_week_range = self._normalize_publication_datetime(\n article_data.publication_datetime\n )\n\n self._parser_result[article_week_range].update(article_title_words)", "def run_query(self):\n query_dictionary_file_lines = self.get_dictionary_file_lines_for_keywords()\n result_postings_list = merge_lists([result.postings_list for result in query_dictionary_file_lines])\n self.result = result_postings_list\n print(\"Found {} matching documents\".format(len(result_postings_list)))", "def run_query(self):\n query_dictionary_file_lines = self.get_dictionary_file_lines_for_keywords()\n result_postings_list = intersect_lists([result.postings_list for result in query_dictionary_file_lines])\n self.result = result_postings_list\n print(\"Found {} matching documents\".format(len(result_postings_list)))", "def extract_events(url, slipID=None):\n html = req.get(url)\n assert html.status_code == 200\n soup = BeautifulSoup(html.content, 'lxml')\n #Gets all the race tables from the html\n tables = soup.find_all('table', {\"class\":\"wikitable plainrowheaders\"})\n\n #Stores the venue outside the loop, as not all lines have a venue.\n #If a line dont have a venue it means it uses the previous (saved) venue\n venue = \"N/A\"\n resultMen = []\n resultWomen = []\n doneMen = False\n\n #Extracts from the first two tables (mens and ladies races)\n for table in tables[:-1]:\n rows = table.find_all(\"tr\")\n for row in rows[1:]:\n cells = row.find_all([\"td\", \"th\"])\n text = [cell.get_text(strip=True) for cell in cells]\n \n \"\"\"Regex: to be matched in an array\n Date: in the format DMY\n Venue: Starts with a capital letter, continues with all other than capital letters until end of line\n Dicipline: Starts with two capital letters, then continues with lowercase letters or numbers until end of line\n \"\"\"\n regexDate = r\"(?:.*?)(\\d*\\d \\S* \\d*)\"\n regexVenue = r\"^[A-Z][^A-Z]+?.*$\"\n regexDicipline = r\"^[A-Z][A-Z][^A-Z]*$\"\n\n\n date = None\n dicipline = None\n \n #Matches the date, venue and dicipline from the extracted text\n for e in text:\n if re.match(regexDate, e):\n date = re.match(regexDate, e).group(1)\n if re.match(regexVenue, e):\n venue = e\n if re.match(regexDicipline, e):\n dicipline = e\n break\n\n #Adds men an women in seperate arrays to be displayes later\n if not doneMen and date:\n resultMen.append(date)\n resultMen.append(venue)\n resultMen.append(dicipline)\n resultMen.append(\"\")\n elif doneMen and date:\n resultWomen.append(date)\n resultWomen.append(venue)\n resultWomen.append(dicipline)\n resultWomen.append(\"\")\n doneMen = True\n\n #Puts all the data nicely formated into a markdown file\n if slipID:\n md = MdUtils(file_name=f'datetime_filter/betting_slip_empty_{slipID}', title='BETTING SLIP')\n else: md = MdUtils(file_name='datetime_filter/betting_slip_empty', title='BETTING SLIP')\n md.new_header(level=1, title='Name:')\n headings = [\"Date\", \"Venue\", \"Dicipline\", \"Who Wins\"]\n headings.reverse()\n for e in headings:\n resultMen.insert(0, e)\n resultWomen.insert(0, e)\n md.new_paragraph(\"MEN:\\n\")\n table = md.new_table(columns=4, rows=int(len(resultMen)/4), text=resultMen, text_align='center')\n md.new_paragraph(\"WOMEN:\\n\")\n table = md.new_table(columns=4, rows=int(len(resultMen)/4), text=resultMen, text_align='center')\n md.create_md_file()", "def extract_airports(filename, store):\n print filename\n f = open(filename, 'r')\n text = f.read()\n f.close()\n \n if store:\n ## Database connection, db, collection\n conn = pymongo.Connection()\n db=conn.flight_db\n ap = db.airports\n\n airport_list = []\n \n ## extract city,country,airport code\n #match = re.findall(r'<td\\s*class=\\\"city sorted\\\">(.*?)<\\/td>\\s+<td\\s*class=\\\"country\\\">(\\w+?)</td>\\s+<td\\s*class=\\\"code\\\"><a\\s*href=.+\\\">(\\w+?)</a></td>\\s+', text)\n match = re.findall(r'<td\\s*class=\\\"city sorted\\\">(.*?)<\\/td>\\s+<td\\s*class=\\\"country\\\">(\\w+?)</td>\\s+<td\\s*class=\\\"code\\\"><a\\s*href=.+\\\">(\\w+?)</a><span\\s*style=.*', text)\n if not match:\n print 'airport:rank not found...'\n exit(1)\n for tuples in match:\n if store:\n ap.insert({\n 'city':tuples[0],\n 'country':tuples[1],\n 'code':tuples[2]\n })\n airport_list.append(tuples[0] + ', ' + tuples[1] + ' - ' + tuples[2])\n if store:\n conn.disconnect()\n return airport_list", "def parse_posts(self):\n logger.info(\"Parsing posts\")\n\n self.df.title = self.df.title.str.strip()\n\n spam_companies = [\"Indeed Prime\"]\n self.df = self.df[~self.df[\"company\"].isin(spam_companies)]\n self.df = self.df.dropna(subset=[\"company\"])\n self.df = self.df.drop_duplicates(subset=[\"company\", \"date_posted\", \"title\"])", "def get_ranked_venues(X_name_and_features):\n # Pre-process query feature vectors\n X_names = []\n X = []\n for sample in X_name_and_features:\n X_names.append(sample[0])\n # X.append(sample[1]) # Actual one when list of surprise queries comes in\n X.append(sample[2]) # For my own testing purposes first.\n\n # Load the model and predict the log probabilities of every video sample in X\n model = joblib.load('model.pkl')\n predicted_probs = model.predict_proba(X)\n\n venue_classification = get_venue_classification(\"./venue-name.txt\")\n ranked_results = {}\n\n # Get the ranked list of venues\n ranked_venues = []\n for i, lst_of_probs in enumerate(list(predicted_probs)):\n ranked_indices = np.argsort(lst_of_probs)[::-1]\n ranked_venues.append([])\n for j in ranked_indices:\n ranked_venues[i].append(venue_classification[j])\n ranked_results[X_names[i]] = ranked_venues[i]\n\n # pickle dump results\n with open('audio_results.pickle', 'wb') as to_file:\n pickle.dump(ranked_results, to_file)", "def import_votes():\n c.execute(\"\"\"CREATE TABLE IF NOT EXISTS v\n (County TEXT, Election_Date TEXT, Precinct TEXT,\n Contest_Group_ID INTEGER, Contest_Type TEXT,\n Contest_Name TEXT, Choice TEXT, Choice_Party TEXT,\n Vote_For INTEGER,\tElection_Day INTEGER, One_Stop INTEGER,\n Absentee_by_Mail INTEGER, Provisional INTEGER,\n Total_Votes INTEGER);\n \"\"\")\n c.execute(\"CREATE INDEX cocop ON v(Contest_Name, County, Precinct)\")\n\n filename = 'resultsPCT20160315.txt'\n # filename = 'rwakepres.txt'\n with open(filename) as f:\n votes = f.readlines()\n\n for v in votes[1:]:\n c.execute('INSERT INTO v values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', v.rstrip().split('\\t', 13))", "def prepare_pubs(path_name):\n add_pubs = read_csv(path_name)\n for key, row in add_pubs.items():\n row['author_uris'] = set([])\n ids = row['ufid'].split(';')\n print \"ids=\", ids\n for id in ids:\n print \"Processing id=\", id\n if id[0] in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:\n author_uri = find_vivo_uri('ufVivo:ufid', id)\n if author_uri is None:\n print >>exc_file, id, \"UFID not found in VIVO\"\n continue\n else:\n row['author_uris'].add(author_uri)\n elif id[0] == 'h':\n row['author_uris'].add(id)\n else:\n print >>exc_file, row['ufid'], \"Unknown identifier in UFID\"\n print id, row\n add_pubs[key] = row\n return add_pubs", "def __results(self, plist, vlist):\n\n plist = sorted(plist, key=lambda s: s['score'], reverse=True)\n vlist = sorted(vlist, key=lambda s: s['score'], reverse=True)\n\n if len(plist) == 0:\n for el in vlist:\n el['key'] = el['ven']['key']\n results = vlist\n elif len(vlist) == 0:\n for el in plist:\n el['key'] = el['pub']['key']\n results = plist\n else:\n results = tr(plist, vlist)\n\n # merge publications that have the same crossref\n same_venue = list()\n end_cycle = len(results)\n end_tot = 0\n for r in results:\n if end_tot >= end_cycle:\n break\n if len(r['pub']) and len(r['ven']):\n if len(same_venue):\n id = None\n f = False\n for i in range(len(same_venue)):\n if same_venue[i]['key'] == r['ven']['key']:\n f = True # found\n id = i # position\n break\n if not f:\n same_venue.append({'key': r['ven']['key'], 'index': results.index(r)})\n elif isinstance(results[id]['pub'], dict): # create a new element\n tmp = {'key': r['ven']['key'],\n 'score': r['pub']['o_score'] + results[same_venue[id]['index']]['score'],\n 'pub': [r['pub'],\n results[same_venue[id]['index']]['pub'], ], 'ven': r['ven'],\n 'alternative': [], }\n del results[id] # remove the id element and the actual element\n results.remove(r)\n results.append(tmp) # add the element created\n same_venue[id]['index'] = results.index(tmp) # update the index\n end_cycle -= 2 # due to the remotion of the 2 elements\n else:\n results[id]['pub'].append(r['pub'])\n results[id]['score'] += r['pub']['o_score']\n results.remove(r)\n end_cycle -= 1 # due to the remotion of the element\n else:\n same_venue.append({'key': r['ven']['key'], 'index': results.index(r)})\n\n end_tot += 1\n results = sorted(results, key=lambda s: s['score'], reverse=True)\n\n # find correlations\n if self.__output_level == 3:\n self.__find_correlations(results)\n else:\n self.__output = results\n\n cprint('RESULTS:', 'yellow', 'bold', 'url', start='\\n\\t', end='\\n\\n')\n count = 0\n for element in self.__output:\n if count == self.__result_limit:\n break\n q_print(element, count + 1, self.__output_level)\n count += 1\n\n self.__output = list()", "def _getall(cliargs=CliArg(), heap=HeapGate()):\n site = 'http://'+cliargs._site \n recomp = re.compile(r'<td class=\\'vg_table_row_[0-1].*?\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}.*?Total.*?Ping:.*?Logging\\spolicy.*?\\n')\n count = 0\n try:\n response = urllib2.urlopen(site)\n data = response.read()\n except:\n print(\" unable to establish network connection.\\\n \\n .....\\\n \\n check network connectivity and site status.\")\n sys.exit(1)\n \"check verbose -v requirement\"\n if cliargs._verbose:\n print(\" . . .\\n parsing...\\n . . .\") \n cliargs.__str__()\n print(' . . .')\n for x in re.findall(recomp, data):\n \"parse each vpn on site and add to heap\"\n count += _parse(x, cliargs, heap)\n \"check if further parsing needed\"\n if heap._countries:\n for x in heap._countries:\n print(x)\n sys.exit(0)\n \"check verbose -v requirement\"\n if cliargs._verbose:\n print(\" found {0} matching VPNs\\n . . .\".format(count))\n heap.__str__()\n _getbest(cliargs, heap)", "def search(self):\n\n if (self.latitude is None or self.longitude is None):\n raise Exception('Please specify both a latitude and longitude')\n\n if (self.access_token == '' or self.access_token is None):\n raise Exception('Please specify a valid access token')\n\n # Book-keeping\n id_limit = 50 # Only 50 per /?ids= call allowed by FB\n curr_time = int(round(time.time()))\n venues_count = 0\n events_count = 0\n\n # Initial places request info\n place_params = {\n 'type': 'place',\n 'q': self.query,\n 'center': str(self.latitude) + ',' + str(self.longitude),\n 'distance': self.distance,\n 'limit': 1000,\n 'fields': 'id',\n 'access_token': self.access_token\n }\n place_url = ('https://graph.facebook.com/' + self.version + '/search?' +\n urllib.urlencode(place_params))\n\n # Grab places and prepare to get events\n\n places_data = r.get(place_url).json()['data']\n venues_count = len(places_data)\n\n # Batch places based on FB id_limit\n ids = []\n temp_lst = []\n for place in places_data:\n temp_lst.append(place['id'])\n if len(temp_lst) >= id_limit:\n ids.append(temp_lst)\n temp_lst = []\n if len(ids) == 0:\n ids.append(temp_lst)\n\n # Inner function to convert a list of\n # ids to a request url for events\n def ids_to_url(id_lst):\n events_fields = [\n 'id',\n 'type',\n 'name',\n 'cover.fields(id,source)',\n 'picture.type(large)',\n 'description',\n 'start_time',\n 'end_time',\n 'category',\n 'attending_count',\n 'declined_count',\n 'maybe_count',\n 'noreply_count'\n ]\n\n fields = [\n 'id',\n 'name',\n 'about',\n 'emails',\n 'cover.fields(id,source)',\n 'picture.type(large)',\n 'location',\n 'events.fields(' + ','.join(events_fields) + ')'\n ]\n\n timing = ('.since(' + str(self.since) + ')' +\n ('' if self.until is None else '.until(' + str(self.until) + ')'))\n\n events_params = {\n 'ids': ','.join(id_lst),\n 'access_token': self.access_token,\n 'fields': ','.join(fields) + timing\n }\n\n events_url = ('https://graph.facebook.com/' + self.version + '/?' +\n urllib.urlencode(events_params))\n\n return r.get(events_url).json()\n\n # Event results\n results = [ids_to_url(id_lst) for id_lst in ids]\n\n # Inner function to convert a list of\n # of venue result events to a list of\n # well-formatted events\n def venue_to_events(venue):\n venue_events = []\n if 'events' in venue and len(venue['events']['data']) > 0:\n for event in venue['events']['data']:\n event_r = dict()\n event_r['id'] = event['id']\n event_r['name'] = event['name']\n event_r['type'] = event['type']\n event_r['cover_picture'] = event['cover']['source'] if 'cover' in event else None\n event_r['profile_picture'] = event['picture']['data']['url'] if 'picture' in event else None\n event_r['description'] = event['description'] if 'description' in event else None\n event_r['start_time'] = event['start_time'] if 'start_time' in event else None\n event_r['end_time'] = event['end_time'] if 'end_time' in event else None\n event_r['time_from_now'] = self.calculate_start_time_diff(curr_time, event['start_time'])\n event_r['category'] = event['category'] if 'category' in event else None\n event_r['distance'] = (self.haversine_distance([venue['location']['latitude'],\n venue['location']['longitude']],\n [self.latitude, self.longitude]) * 1000\n if 'location' in venue else None)\n\n event_r['stats'] = {\n 'attending': event['attending_count'],\n 'declined': event['declined_count'],\n 'maybe': event['maybe_count'],\n 'noreply': event['noreply_count']\n }\n\n event_r['venue'] = {\n 'id': venue['id'],\n 'name': venue['name'],\n 'about': venue['about'] if 'about' in venue else None,\n 'emails': venue['emails'] if 'emails' in venue else None,\n 'cover_picture': venue['cover']['source'] if 'cover' in venue else None,\n 'profile_picture': venue['picture']['data']['url'] if 'picture' in venue else None,\n 'location': venue['location'] if 'location' in venue else None\n }\n\n venue_events.append(event_r)\n return venue_events\n\n # Grab the events\n events = []\n for result in results:\n for venue_id in result.keys():\n events.extend(venue_to_events(result[venue_id]))\n events_count = len(events)\n\n # Sort if specified\n if self.sort is not None:\n events.sort(self.allowed_sorts[self.sort])\n\n # Return events w/metadata\n return {\n 'events': events,\n 'metadata': { 'venues': venues_count, 'events': events_count }\n }", "def parse_data(self, page: str, **kwargs) -> dict:\n postcode = kwargs.get(\"postcode\")\n paon = kwargs.get(\"paon\")\n\n if not postcode:\n raise ValueError(\"Must provide a postcode\")\n\n if not paon:\n raise ValueError(\"Must provide a house number\")\n\n search_url = f\"{self.base_url}/address/{postcode}\"\n\n requests.packages.urllib3.disable_warnings()\n s = requests.Session()\n response = s.get(search_url)\n response.raise_for_status()\n\n address_data = response.json()\n\n address_list = address_data[\"html\"]\n\n soup = BeautifulSoup(address_list, features=\"html.parser\")\n\n address_by_id = {}\n\n for li in soup.find_all(\"li\"):\n link = li.find_all(\"a\")[0]\n address_id = link.attrs[\"href\"]\n address = link.text\n\n address_by_id[address_id] = address\n\n addresses = list(address_by_id.values())\n\n common = difflib.SequenceMatcher(\n a=addresses[0], b=addresses[1]\n ).find_longest_match()\n extra_bit = addresses[0][common.a: common.a + common.size]\n\n ids_by_paon = {\n a.replace(extra_bit, \"\"): a_id.replace(\"/view/\", \"\").replace(\"/\", \"\")\n for a_id, a in address_by_id.items()\n }\n\n property_id = ids_by_paon.get(paon)\n if not property_id:\n raise ValueError(\n f\"Invalid house number, valid values are {', '.join(ids_by_paon.keys())}\"\n )\n\n today = date.today()\n calendar_url = (\n f\"{self.base_url}/calendar/{property_id}/{today.strftime('%Y-%m-%d')}\"\n )\n response = s.get(calendar_url)\n response.raise_for_status()\n calendar_data = response.json()\n next_collections = calendar_data[\"nextCollections\"]\n\n collections = list(next_collections[\"collections\"].values())\n\n data = {\"bins\": []}\n\n for collection in collections:\n collection_date = datetime.strptime(collection[\"date\"], \"%Y-%m-%d\")\n bins = [c[\"name\"] for c in collection[\"collections\"].values()]\n\n for bin in bins:\n data[\"bins\"].append(\n {\n \"type\": bin,\n \"collectionDate\": collection_date.strftime(date_format),\n }\n )\n return data" ]
[ "0.6436121", "0.54411566", "0.5419386", "0.5414439", "0.5358596", "0.53317887", "0.52881116", "0.52856827", "0.51955736", "0.5189618", "0.51495665", "0.5109944", "0.51090825", "0.5100515", "0.5097937", "0.5094935", "0.49625772", "0.48519638", "0.4790151", "0.4780734", "0.47637388", "0.4757075", "0.47152728", "0.4715163", "0.46771288", "0.46438572", "0.46409416", "0.46392724", "0.46388128", "0.46094102" ]
0.6916864
0
Writes in os.getenv('HOME') + os.sep + '/powersdata/simplots.xml' These are parameters that are specific to the type of lines, etc.
def xml_saveLineParameters(dparms,basename=None): fname = '' if os.name =='nt': try: os.mkdir('D:/powersdata') except: pass if basename == None: fname = 'd:/powersdata/simplots.parms' else: fname = basename else: if basename == None: fname = os.getenv('HOME') + '/powersdata/simplots.parms' else: fname = basename # Now try to write to it. try: nfd = open(fname, 'w') except: print " I cannot save %s the defaults file" % fname return nfd.write('<?xml version="1.0" encoding="iso-8859-1" standalone="yes" ?>\n') nfd.write('<SIMPLOTS>\n') nfd.write('<PLOTPARMS>\n') skeys = dparms.keys() for k in skeys: if k == 'LINEPARAMETERS': continue nfd.write('<%s value="%s" />\n' % (k,dparms[k])) nfd.write('</PLOTPARMS>\n') lineParameters = dparms['LINEPARAMETERS'] nfd.write('<LINEPARMS count="%s">\n' % len(lineParameters)) for xln in lineParameters: x0 = tuple(xln) xstr ='<LINEPARM index="%s" linestyle="%s" linecolor="%s" linewidth="%s" marker="%s" markercolor="%s" markersize="%d"/>\n' % x0 nfd.write(xstr) nfd.write('</LINEPARMS>\n') nfd.write('</SIMPLOTS>\n') nfd.close() ############################ Handlers for buttons ###################################
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_datafiles(shell,params_info):\n\n parameters_text_items = []\n for key,value in params_info.items():\n shell.write_file(value['path'], value['text'])\n parameters_text_items.append(\"%s:%s\" % (value['type'],value['path']))\n\n # generate the parameters file to feed into the url\n parameters_text = '\\n'.join(parameters_text_items)\n\n return parameters_text", "def writeSettings(self):\n for i in range(1,N_STATION+1):\n vol = f\"vol{i}\"\n self.settings.setValue(vol,self.param.vol[i-1])\n info = f\"info{i}\"\n self.settings.setValue(info,self.param.info[i-1])\n ip = f\"ip{i}\"\n self.settings.setValue(ip,self.param.ip[i-1])\n muted = f\"muted{i}\"\n self.settings.setValue(muted,self.param.muted[i-1])", "def xml_readLineParameters(basename=None):\n\tfname = ''\n\tif os.name =='nt': \n\t\ttry:\n\t\t\tos.mkdir('D:/powersdata')\n\t\texcept:\n\t\t\tpass\n\t\tif basename == None: \n\t\t\tfname = 'd:/powersdata/simplots.xml'\n\t\telse:\n\t\t\tfname = basename \n\telse: \n\t\tif basename == None: \n\t\t\tfname = os.getenv('HOME') + '/powersdata/simplots.xml'\n\t\telse:\n\t\t\tfname = basename \n\t\t\n\ttry:\n\t\tnfd = open(fname,'r')\n\texcept:\n\t\treturn None\n\ttry:\n\t\tdom = minidom.parse(fname)\n\texcept:\n\t\treturn None\n\n\tlineTypes = LINETYPES \n\tcolors = COLORTYPES\n\tmarkerTypes = MARKERTYPES\n\t\t\n\tdparms = {} \n\tlineParameters = []\n\tself.maxLines = 12\n\tfor i in range(self.maxLines):\n\t\tj = i % len(self.markerTypes) \n\t\tk = i % len(self.colors)\n\t\t# index, width, colors,lnw, marker, markerColor, markerSize \n\t\tlineParameters.append([i+1,'-', colors[k],1, 'o', colors[k],1])\n\t\n\tdparms['LINEPARAMETERS'] = lineParameters \n\tnodes = dom.getElementsByTagName('PLOTPARMS') # All the notes are read here.\n\tfor nd in nodes: \n\t\tfor chld in nd.childNodes:\n\t\t\tdparms[chld.nodeName] = str(nd.getAttribute('value'))\n\tnodes = dom.getElementsByTagName('LINEPARMS') # All the notes are read here.\n\tfor nd in nodes: \n\t\tfor chld in nd.childNodes:\n\t\t\tk = 0\n\t\t\tif chld.nodeName == 'LINEPARM': \n\t\t\t\tls = chld.getAttribute('linestyle')\n\t\t\t\tid = int(chld.getAttribute('index'))\n\t\t\t\tlc = chld.getAttribute('linecolor')\n\t\t\t\tlw = int(chld.getAttribute('linewidth'))\n\t\t\t\tmk = chld.getAttribute('marker')\n\t\t\t\tmc = chld.getAttribute('markercolor')\n\t\t\t\tms = int(chld.getAttribute('markersize'))\n\t\t\t\tlineParameters[k] = [id,ls,lc,lw,mk,mc,ms]\n\t\t\t\tk = k + 1\n\treturn dparms", "def Write_XML(gui): \n # lock buttons\n gui.action_lock('Lock', gui.save_button)\n \n # clear output\n gui.output_clear() \n \n # get the desired delay from the gui.\n delay_time = gui.get_delay()\n \n \n # get the desired ascii delay from the gui.\n ascii_time = gui.get_ascii_delay()\n \n \n # get the desired I2C address from the gui.\n addr = \"0x%X\" % gui.get_i2c_address()\n \n \n # get the list of commands from the gui\n command_list = gui.get_command_list()\n \n # wrap up the writing directives\n directives = pySCPI_config.write_directives(command_list, addr,\n delay_time, ascii_time)\n \n # create the xml file\n filename = create_XML(directives, gui)\n \n # update the filename display window to show the filename saved\n gui.update_filename(filename = filename) \n \n # unlock the buttons\n gui.action_lock('Unlock')", "def pwrite(self):\n shell = os.getenv('SHELL')\n if shell == None: # assume bash or ksh\n shell = 'bash'\n else:\n shell = os.path.basename(shell)\n\n fname = '/tmp/source_' + os.environ['USER'] # get login id of current user\n try:\n fid = open(fname, 'w')\n except:\n print(\"ERROR. Could not open \", fname, \" for writing! Exiting...\")\n exit(1)\n\n if self.val == None:\n self.val = \"\"\n\n if 'csh' in shell:\n wstr = \"setenv \" + self.name + \" \" + self.val\n else:\n wstr = \"export \" + self.name + \"=\" + self.val\n\n fid.write(wstr)\n fid.close()\n print(\"Source \", fname, \" for new path to take effect\")", "def recordStatGraph(g, path):\n g.write(path, xml_declaration=True, encoding='utf-8', method='xml')", "def write_data_line(self, pm_25, pm_10, gps_data):\n self.file.write(str(int(time.time()))) # Unix Time)\n self.file.write(';' + datetime.now().strftime(\"%d.%m.%y %H:%M:%S\")) # Human Readable Time\n self.file.write(';' + str(pm_25)) # pm 2.5 \n self.file.write(';' + str(pm_10)) # pm 10 \n self.file.write(';' + str(gps_data['fix'])) # has fix \n self.file.write(';' + str(gps_data['lon'])) # longitude \n self.file.write(';' + str(gps_data['lat'])) # latitude \n self.file.write(';' + str(gps_data['alt'])) # altitude \n self.file.write(';' + str(gps_data['time'])) # gps unix time \n self.file.write('\\n')\n self.file.flush()", "def add_output_metadata(root):\n jss_connection = JSSConnection.get()\n report_date = ET.SubElement(root, \"ReportDate\")\n report_date.text = datetime.datetime.strftime(datetime.datetime.now(),\n \"%Y%m%d-%H%M%S\")\n report_server = ET.SubElement(root, \"Server\")\n report_server.text = jss_connection.base_url\n api_user = ET.SubElement(root, \"APIUser\")\n api_user.text = jss_connection.user\n report_user = ET.SubElement(root, \"LocalUser\")\n report_user.text = os.getenv(\"USER\")\n spruce_version = ET.SubElement(root, \"SpruceVersion\")\n spruce_version.text = __version__\n python_jss_version = ET.SubElement(root, \"python-jssVersion\")\n python_jss_version.text = jss.__version__\n ET.SubElement(root, \"Removals\")", "def sitesXML(beachdata, outdir='.'):\n\n with open(outdir+'/surf_sites.xml','w') as outp:\n outp.write('<markers>\\r\\n')\n for isite in range(len(beachdata['name'])):\n outp.write('<marker lat=\"%6.3f' %beachdata['lat'][isite] + \\\n '\" lng=\"%6.3f' %beachdata['lon'][isite] + \\\n '\" name=\"' + beachdata['name'][isite].replace(' ','_').replace('/','-') + '\"/>\\r\\n')\n outp.write('</markers>\\r\\n')\n outp.close()", "def write_plot(self):\n with open(self._graph_data_path, \"w+\") as f:\n run_time = self.start_time\n f.write(\"Time, Temperature\\n\")\n temperature = 0\n for step in self.profile[\"steps\"]:\n keys = list(step)\n if len(keys) > 0:\n if keys[0] == \"start\":\n temperature = step[\"start\"]\n if keys[0] == \"rest\":\n run_time += timedelta(minutes = step[\"rest\"])\n if keys[0] == \"ramp\":\n run_time += timedelta(minutes = step[\"ramp\"])\n temperature = step[\"to\"]\n if keys[0] == \"mashout\":\n temperature = step[\"mashout\"]\n time = run_time.strftime(\"%H:%M:%S, \")\n f.write(time + str(temperature) + \"\\n\")\n run_time += timedelta(minutes = 10)\n if keys[0] == \"jump\":\n temperature = step[\"jump\"]\n\n time = run_time.strftime(\"%H:%M:%S, \")\n f.write(time + str(temperature) + \"\\n\")\n else:\n logger.error(\"Can't make sense of \" + str(step))", "def create_settings_file():\n with open('./cfg/settings.cfg'.replace(\"/\", os.path.sep), 'w') as cfg:\n cfg.write('[report]\\nlogo = ./cfg/logo.png\\ncompany =\\nrecord =\\nunit =\\nexaminer =\\nnotes =\\n\\n[auth]\\ngmail = [email protected]\\npassw = yourpassword\\ndevid = 1234567887654321\\ncelnumbr = BackupPhoneNunmber\\n\\n[app]\\npkg = com.whatsapp\\nsig = 38a0f7d505fe18fec64fbf343ecaaaf310dbd799\\n\\n[client]\\npkg = com.google.android.gms\\nsig = 38918a453d07199354f8b19af05ec6562ced5788\\nver = 9877000'.replace(\"/\", os.path.sep))", "def generate_host_system_data(host):\n data = render_to_string(TEMPLATE_WDF, {\"host\": host})\n open(os.path.join(PLUGINSPACE_WDF_DIR, FILENAME_WDF), \"w+\").write(data)\n return", "def _create_pysam_wfile(self, resource, meta):\n # pylint: disable=attribute-defined-outside-init, consider-using-with\n self._temp_dir = TemporaryDirectory()\n fname = os.path.join(self._temp_dir.name, 'weather.csv')\n logger.debug('Creating PySAM weather data file: {}'.format(fname))\n\n # ------- Process metadata\n m = pd.DataFrame(meta).T\n m = m.rename({\"latitude\": \"Latitude\", \"longitude\": \"Longitude\",\n \"timezone\": \"Time Zone\"}, axis=1)\n\n m[[\"Latitude\", \"Longitude\", \"Time Zone\"]].to_csv(fname, index=False,\n mode='w')\n\n # --------- Process data, blank for geothermal\n time_index = resource.index\n mask = (time_index.month == 2) & (time_index.day == 29)\n time_index = time_index[~mask]\n\n df = pd.DataFrame(index=time_index)\n df['Year'] = time_index.year\n df['Month'] = time_index.month\n df['Day'] = time_index.day\n df['Hour'] = time_index.hour\n df['Minute'] = time_index.minute\n df.to_csv(fname, index=False, mode='a')\n\n return fname", "def write_scram_toolfiles(self):\n from string import Template\n\n mkdirp(join_path(self.spec.prefix.etc, 'scram.d'))\n\n values = {}\n values['VER'] = self.spec.version\n values['PFX'] = self.spec.prefix\n\n fname = 'uuid-cms.xml'\n template = Template(\"\"\"<tool name=\"uuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)\n\n fname = 'libuuid.xml'\n template = Template(\"\"\"<tool name=\"libuuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)", "def mk_vars_file(work_dir, server_cfg, provider_name):\n f = open(work_dir + '/vars', 'w')\n f.write('# generated by pentaho_cloud')\n if (server_cfg.ssl):\n f.write('\\nssl=1')\n else:\n f.write('\\nssl=0')\n if server_cfg.passwords:\n for i, p in enumerate(server_cfg.passwords):\n f.write(\"\\npasswords[%d]='%s'\" % (i, p))\n packages = vers[server_cfg.version]\n for k in packages.keys():\n f.write(\"\\n%s='%s'\" % (k, packages[k]))\n f.write(\"\\nprovider='%s'\" % provider_name)\n f.close()\n return f.name", "def save(sans, describer, minParams, minPars, stats, location, fitInfo, description):\n\n while path.exists(location) == False:\n print('error: file path does not exist. Please input a valid file path')\n location = input('file path: ')\n\n # for idx, char in enumerate(sans.expData.shear[0]):\n # if char != ' ':\n # continue\n # else:\n # shearIdx = idx\n # break\n\n # Build name for modelled scattering data\n # shear = sans.expData.shear[0][0:shearIdx]\n shear = sans.expData.shear[0]\n\n name = sans.expData.sample[0] + '_' + shear + 'ps'\n post1 = '_sim'\n type1 = '.dat'\n\n saveName1 = name + post1 + describer + '_'\n # versionNum1 = input(\"Input a version number: \" )\n versionNum1 = description\n\n # Write modelled scattering data to 3 column dat file\n write_3_column(location + saveName1 + versionNum1 + type1, sans)\n\n # Build name for modelled scattering data statistics\n post2 = '_simInfo'\n type2 = '.txt'\n\n saveName2 = name + post2 + describer + '_'\n\n output = []\n\n # Build output file\n output.append('qmin = ' + str(sans.qmin))\n output.append('ftol = ' + str(fitInfo[0]))\n output.append('method = ' + str(fitInfo[1]))\n output.append(' ')\n\n for key, val in minParams.items():\n if type(val) == str:\n output.append(str(key) + '=' + str(val) + ',')\n else:\n output.append(str(key) + '=' + str(round(val, sans.dp)) + ',')\n output.append(' ')\n\n output.append(' static parameters ')\n for key, val in sans.staticPars.items():\n if type(val) == str:\n output.append(str(key) + '=' + str(val) + ',')\n else:\n output.append(str(key) + '=' + str(round(val, sans.dp)) + ',')\n\n output.append(' ')\n\n output.append('Fitting_performed_over_the_following_parameters:')\n for key in minPars.keys():\n output.append(str(key))\n\n output.append('Returned_the_following_goodness_of_fit_measures:')\n output = output + stats\n output.append(str(datetime.datetime.now()))\n\n # Write output to txt file\n with open(location + saveName2 + versionNum1 + type2, 'w') as file:\n for lines in output:\n file.write(lines)\n file.write(\"\\n\")\n\n print('file was saved with filename: ' + saveName1 + versionNum1 + type1)\n return", "def write_to_file_x(path):\n path1 = path + \"/x_Macros\"\n if not os.path.exists(path1):\n os.mkdir(path1)\n for e in range(int(e_steps)+1):\n filename = \"x%sy0z0ke%s.mac\" %(dx*x + x_min, e*de + e_min)\n path = path1\n fullpath = os.path.join(path, filename)\n f = open(fullpath, \"w\")\n f.write('/rat/physics_list/OmitMuonicProcesses true\\n')\n f.write(\"/rat/physics_list/OmitHadronicProcesses true \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write('/rat/db/set DETECTOR geo_file \"geo/snoplus.geo\"\\n')\n f.write('/rat/db/set GEO[scint] material \"labppo_scintillator\"\\n')\n f.write('/rat/db/set DAQ dqxx_info 0\\n')\n f.write(\"/run/initialize \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/proc frontend\\n\")\n f.write(\"/rat/proc trigger\\n\")\n f.write(\"/rat/proc eventbuilder\\n\")\n f.write(\"/rat/proc count\\n\")\n f.write(\"/rat/procset update 100\\n\")\n f.write(\"/rat/proc calibratePMT\\n\")\n f.write(\"/rat/proc scintFitter\\n\")\n f.write(\"/rat/proclast outroot\\n\")\n f.write('/rat/procset file \"x%sy0z0ke%s.root\"\\n' %(dx*x + x_min, e*de + e_min))\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/generator/add combo gun:point:poisson\\n\")\n f.write(\"# want random, isotropic momentum distribution; energy given in MeV\\n\")\n f.write(\"/generator/vtx/set e- 0 0 0 %s\\n\" %(e*de + e_min))\n f.write(\"# position given in Cartesians, relative to detector center, in mm\\n\")\n f.write(\"/generator/pos/set %s 0 0\\n\" % (dx*x + x_min))\n f.write(\"/generator/rate/set 1\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/run/start %s\\n\" %(n))\n f.write(\"exit\")", "def write_data():", "def writeSTAR(filename, starDict, **kwargs):\n prog=kwargs.get('prog', 'XMIPP')\n\n star = open(filename, 'w')\n\n for dataBlockKey in starDict:\n star.write('\\ndata_' + dataBlockKey + '\\n')\n for loopNumber in starDict[dataBlockKey]:\n star.write('\\nloop_\\n')\n for fieldNumber in starDict[dataBlockKey][loopNumber]['fields']:\n if prog == 'XMIPP':\n star.write(' ')\n star.write(starDict[dataBlockKey][loopNumber]['fields'][fieldNumber] + '\\n')\n for dataItemNumber in starDict[dataBlockKey][loopNumber]['data']:\n if prog == 'XMIPP':\n star.write('\\t')\n for fieldNumber in starDict[dataBlockKey][loopNumber]['fields']:\n currentField = starDict[dataBlockKey][loopNumber]['fields'][fieldNumber]\n star.write(starDict[dataBlockKey][loopNumber]['data'][dataItemNumber][currentField] + '\\t')\n star.write('\\n')\n\n star.close()\n return", "def publish_data(username):\n x1 = []\n x2 = []\n y1 = []\n y2 = []\n\n for point_set in __data:\n x1.append(point_set[0][0])\n y1.append(point_set[0][1])\n\n x2.append(point_set[1][0])\n y2.append(point_set[1][1])\n\n figure = plt.figure()\n plt.plot(x1, y1, label='Atrium')\n plt.plot(x2, y2, label='Ventrical')\n plt.xlabel('Time (ms)')\n plt.ylabel('Voltage (V)')\n plt.title(\"'{0}' Live Egram Data\".format(username))\n plt.legend()\n\n timestamp = datetime.datetime.now().strftime(Config.getInstance().get('Database', 'db.timestamp')).replace(' ', '_').replace('/', '-').replace(':', '-')\n graph_doc_name = \"{0}_Live_Egram_Data_From_{1}.pdf\".format(username, timestamp)\n pp = PdfPages(os.path.join(parentfolder, 'downloads', graph_doc_name))\n pp.savefig(figure)\n pp.close()\n\n csv_output = list(zip(x1, y1, x2, y2))\n\n csv_doc_name = \"{0}_Live_Egram_Data_From_{1}.csv\".format(username, timestamp)\n with open(os.path.join(parentfolder, 'downloads', csv_doc_name), 'w') as file:\n writer = csv.writer(file)\n writer.writerow(['Atrium Timestamp', 'Atrium Value', 'Ventrical Timestamp', 'Ventrical Value'])\n for line in csv_output:\n writer.writerow(line)", "def create_conf_xml(self):\n path = os.path.join(\n self.buildout['buildout']['parts-directory'],\n self.name)\n if not os.path.isdir(path):\n os.makedirs(path)\n\n xml_path = os.path.join(path, 'uwsgi.xml')\n\n conf = \"\"\n for key, value in self.conf.items():\n if value.lower() in ('true', 'on', 'yes'):\n conf += \"<%s/>\\n\" % key\n elif value and value.lower() not in ('false', 'off', 'yes'):\n conf += \"<%s>%s</%s>\\n\" % (key, value, key)\n\n\n requirements, ws = self.egg.working_set()\n eggs_paths = [dist.location for dist in ws]\n eggs_paths.extend(self.get_extra_paths())\n # order preserving unique\n unique_egg_paths = []\n for p in eggs_paths:\n if p not in unique_egg_paths:\n unique_egg_paths.append(p)\n\n for path in map(realpath, unique_egg_paths):\n conf += \"<pythonpath>%s</pythonpath>\\n\" % path\n\n f = open(xml_path, 'w')\n f.write(\"<uwsgi>\\n%s</uwsgi>\" % conf)\n f.close()\n return xml_path", "def setup_save_point(self):\n\n # figure out the rel path we should save down\n n = datetime.datetime.now()\n r_path = os.sep.join([n.year,n.month,n.day, self.stream_id,\n n.hour,n.minute])\n\n # get our full path\n save_root = self.server.config.get('stream_save_root')\n out_path = os.path.join(save_root,r_path)\n\n\n # keep it around\n self.save_path = out_path", "def main():\n year = time.strftime(\"%Y\")\n month = time.strftime(\"%m\")\n today = time.strftime(\"%Y%m%d\")\n homedir = \"/home/\" + user + \"/raspi-sump/\"\n webchart.create_folders(year, month, homedir)\n webchart.create_chart(homedir)\n webchart.copy_chart(year, month, today, homedir)", "def write_manifest(self):\n import time\n import sys\n with open('bake-manifest-' + time.strftime('%Y-%m-%d-%H:%M:%S') + \n '.txt', 'w') as hout:\n hout.write(' '.join(sys.argv) + '\\n')\n for k, v in self.table.items():\n hout.write(';'.join([k] + v) + '\\n')", "def setup(self, force=False):\n if not os.path.exists(self.expdir):\n logging.info(\"create directory: \"+self.expdir)\n os.makedirs(self.expdir)\n\n pfile = join(self.expdir, XPARAM)\n if os.path.exists(pfile) and not force:\n raise RuntimeError(repr(pfile)+\" param file already exists\")\n self.params.write(join(self.expdir, XPARAM))", "def XMLWrite(one, two, three, four, five, six, seven, eight):\n filePath = \"/mnt/RAM/kanban.xml\"\n xmlFile = open(filePath, 'w')\n\n xmlFile.write('<kanbanShelf>\\n')\n xmlFile.write(' <one>%s</one>\\n' % one)\n xmlFile.write(' <two>%s</two>\\n' % two)\n xmlFile.write(' <three>%s</three>\\n' % three)\n xmlFile.write(' <four>%s</four>\\n' % four)\n xmlFile.write(' <five>%s</five>\\n' % five)\n xmlFile.write(' <six>%s</six>\\n' % six)\n xmlFile.write(' <seven>%s</seven>\\n' % seven)\n xmlFile.write(' <eight>%s</eight>\\n' % eight)\n xmlFile.write('</kanbanShelf>')", "def output(self,file):\n peep=len(self.findProID())\n f=open(file,'w')\n f.writelines(\" Apache Point Observatory\\n\"\\\n \" 3.5m Telescope Night Log\\n\")\n f.writelines(\" \"+self.link.GetLabel()+'\\n')\n #f.writelines('\\n'+self.userHeader.GetLabel()+'\\n')\n f.writelines(\"\\n ACTUAL\\n\"\\\n \" ASTRONOMER OBSERVER(S) INSTRUMENT START FINISH\\n\"\\\n \"--------------------------------------------------------------------\\n\")\n f.writelines('%s%s%s%s%s\\n' % (self.usastr0.GetValue().ljust(18),self.usobs0.GetValue().ljust(22),self.usinst0.GetValue().ljust(15),self.usstart0.GetValue().ljust(8), self.usend0.GetValue().ljust(8)))\n if oneVar==1:\n f.writelines('%s%s%s%s%s\\n' % (self.usastr0b.GetValue().ljust(18),self.usobs0b.GetValue().ljust(22),self.usinst0b.GetValue().ljust(15),self.usstart0b.GetValue().ljust(8), self.usend0b.GetValue()))\n f.writelines('%s%s%s%s%s\\n' % (self.usastr1.GetValue().ljust(18), self.usobs1.GetValue().ljust(22),self.usinst1.GetValue().ljust(15),self.usstart1.GetValue().ljust(8), self.usend1.GetValue()))\n if twoVar==1:\n f.writelines('%s%s%s%s%s\\n' % (self.usastr1b.GetValue().ljust(18),self.usobs1b.GetValue().ljust(22),self.usinst1b.GetValue().ljust(15),self.usstart1b.GetValue().ljust(8), self.usend1b.GetValue()))\n if peep > 2:\n f.writelines('%s%s%s%s%s\\n' % (self.usastr2.GetValue().ljust(18), self.usobs2.GetValue().ljust(22),self.usinst2.GetValue().ljust(15),self.usstart2.GetValue().ljust(8), self.usend2.GetValue()))\n if threeVar==1:\n f.writelines('%s%s%s%s%s\\n' % (self.usastr2b.GetValue().ljust(18),self.usobs2b.GetValue().ljust(22),self.usinst2b.GetValue().ljust(15),self.usstart2b.GetValue().ljust(8), self.usend2b.GetValue()))\n if peep > 3:\n f.writelines('%s%s%s%s%s\\n' % (self.usastr3.GetValue().ljust(18), self.usobs3.GetValue().ljust(22), self.usinst3.GetValue().ljust(15),self.usstart3.GetValue().ljust(8), self.usend3.GetValue()))\n if fourVar==1:\n f.writelines('%s%s%s%s%s\\n' % (self.usastr3b.GetValue().ljust(18),self.usobs3b.GetValue().ljust(22),self.usinst3b.GetValue().ljust(15),self.usstart3b.GetValue().ljust(8), self.usend3b.GetValue()))\n\n f.writelines('\\n' + self.schedHalf.GetLabel())\n f.writelines(\" ----------------------------------------------------------------\\n\")\n f.writelines('%s\\n' % self.sc1.GetValue())\n f.writelines('%s\\n' % self.sc2.GetValue())\n if peep > 2:\n f.writelines('%s\\n' %self.sc3.GetValue())\n if peep > 3:\n f.writelines('%s\\n' % self.sc4.GetValue())\n f.writelines(\"\\nnote: scheduled times listed include instrument change time\\n\\n\"\\\n \" ------------- ACTIVITY LOG --------------\\n\")\n f.writelines(self.obsspec.GetLabel()+'\\n\\n')\n f.writelines(self.actText.GetValue()+'\\n')\n f.writelines(\"\\n ------- FAILURE LOG -------\\n\"\\\n \"\\n\"\\\n \"PROG INST FAILURE MODE TIME\\n\"\\\n \" (SEDFNVOG) TI/SHU START FINISH DESCRIPTION\\n\"\\\n \"----------------------------------------------------------------------\\n\")\n f.writelines(self.failLog.GetValue()+'\\n')\n f.writelines('\\n'+self.focus.GetLabel()+'\\n')\n f.writelines(self.focusLog.GetValue()+'\\n')\n f.writelines(self.weathText.GetValue()+'\\n')\n f.writelines(' Note: the wind was coming from the azimuth listed.\\n'\\\n ' The convention used is north=0 degrees, east=90 degrees.\\n'\\\n ' The dust count is particles > 1u per 0.1 cubic feet.\\n\\n')\n f.writelines(self.stat.GetLabel()+'\\n')\n f.writelines(\" Telescope drives operational. Current TCC version: \" + self.statTCCText.GetValue() + '\\n')\n f.writelines(\" Current TUI version: \" + self.statTUIText.GetValue() + '\\n') \n f.close()\n\n \"\"\"In safari save as page source with filename weather.html\n In firefox save as web page, html only with filename weather.html\n \"\"\"", "def write_to_file_y(path):\n path1 = path + \"/y_Macros\"\n if not os.path.exists(path1):\n os.mkdir(path1)\n for e in range(int(e_steps)+1):\n filename = \"x0y%sz0ke%s.mac\" %(dy*y + y_min, e*de + e_min)\n path = path1\n fullpath = os.path.join(path, filename)\n f = open(fullpath, \"w\")\n f.write('/rat/physics_list/OmitMuonicProcesses true\\n')\n f.write(\"/rat/physics_list/OmitHadronicProcesses true \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write('/rat/db/set DETECTOR geo_file \"geo/snoplus.geo\"\\n')\n f.write('/rat/db/set GEO[scint] material \"labppo_scintillator\"\\n')\n f.write('/rat/db/set DAQ dqxx_info 0 \\n')\n f.write(\"/run/initialize \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/proc frontend\\n\")\n f.write(\"/rat/proc trigger\\n\")\n f.write(\"/rat/proc eventbuilder\\n\")\n f.write(\"/rat/proc count\\n\")\n f.write(\"/rat/procset update 100\\n\")\n f.write(\"/rat/proc calibratePMT\\n\")\n f.write(\"/rat/proc scintFitter\\n\")\n f.write(\"/rat/proclast outroot\\n\")\n f.write('/rat/procset file \"x0y%sz0ke%s.root\"\\n' %(dy*y + y_min, e*de + e_min))\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/generator/add combo gun:point:poisson\\n\")\n f.write(\"# want random, isotropic momentum distribution; energy given in MeV\\n\")\n f.write(\"/generator/vtx/set e- 0 0 0 %s\\n\" %(e*de + e_min))\n f.write(\"# position given in Cartesians, relative to detector center, in mm\\n\")\n f.write(\"/generator/pos/set 0 %s 0\\n\" % (dy*y + y_min))\n f.write(\"/generator/rate/set 1\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/run/start %s\\n\" %(n))\n f.write(\"exit\")", "def writer(output, output_name, output_data):\n\n kml = simplekml.Kml(name=output_name)\n for exif in output_data:\n if('Latitude' in exif.keys() and\n 'Latitude Reference' in exif.keys() and\n 'Longitude Reference' in exif.keys() and\n 'Longitude' in exif.keys()):\n\n if 'Original Date' in exif.keys():\n dt = exif['Original Date']\n else:\n dt = 'N/A'\n\n if exif['Latitude Reference'] == 'S':\n latitude = '-' + exif['Latitude']\n else:\n latitude = exif['Latitude']\n\n if exif['Longitude Reference'] == 'W':\n longitude = '-' + exif['Longitude']\n else:\n longitude = exif['Longitude']\n\n kml.newpoint(name=exif['Name'],\n description='Originally Created: ' + dt,\n coords=[(longitude, latitude)])\n else:\n pass\n kml.save(os.path.join(output, output_name))", "def write_inno_script (self, fd):\n print(\"; WARNING: This script has been created by py2exe. Changes to this script\", file=fd)\n print(\"; will be overwritten the next time py2exe is run!\", file=fd)\n print(\"[Setup]\", file=fd)\n print(\"AppName=%s\" % self.name, file=fd)\n print(\"AppVerName=%s %s\" % (self.name, self.version), file=fd)\n print(\"ChangesEnvironment=true\", file=fd)\n print(r\"DefaultDirName={pf}\\%s\" % self.name, file=fd)\n print(\"DefaultGroupName=%s\" % self.name, file=fd)\n print(\"OutputBaseFilename=%s\" % self.distfilebase, file=fd)\n print(\"OutputDir=..\", file=fd)\n print(\"SetupIconFile=%s\" % self.icon, file=fd)\n print(file=fd)\n print(\"[Tasks]\", file=fd)\n print(\"Name: modifypath; Description: Add application directory to %PATH%\", file=fd)\n print(file=fd)\n # List of source files\n files = self.windows_exe_files + \\\n self.console_exe_files + \\\n self.service_exe_files + \\\n self.comserver_files + \\\n self.lib_files\n print('[Files]', file=fd)\n for path in files:\n print(r'Source: \"%s\"; DestDir: \"{app}\\%s\"; Flags: ignoreversion' % (path, os.path.dirname(path)), file=fd)\n # Set icon filename\n print('[Icons]', file=fd)\n for path in self.windows_exe_files:\n print(r'Name: \"{group}\\%s\"; Filename: \"{app}\\%s\"' %\n (self.name, path), file=fd)\n for path in self.console_exe_files:\n name = os.path.basename(path).capitalize()\n print(r'Name: \"{group}\\%s help\"; Filename: \"cmd.exe\"; Parameters: \"/K %s --help\"' % (name, path), file=fd)\n print(r'Name: \"{group}\\Uninstall %s\"; Filename: \"{uninstallexe}\"' % self.name, file=fd)\n print(file=fd)\n # Uninstall optional log files\n print('[UninstallDelete]', file=fd)\n for path in (self.console_exe_files + self.windows_exe_files):\n exename = os.path.basename(path)\n print(r'Type: files; Name: \"{pf}\\%s\\%s.log\"' % (self.lname, exename), file=fd)\n print(file=fd)\n # Add app dir to PATH\n print(\"[Code]\", file=fd)\n print(\"\"\"\\\nconst\n ModPathName = 'modifypath';\n ModPathType = 'user';\n\nfunction ModPathDir(): TArrayOfString;\nbegin\n setArrayLength(Result, 1)\n Result[0] := ExpandConstant('{app}');\nend;\n#include \"modpath.iss\"\n\"\"\", file=fd)\n shutil.copy(r\"scripts\\modpath.iss\", \"dist\")" ]
[ "0.57784325", "0.5466428", "0.54199535", "0.5338996", "0.5319302", "0.52897364", "0.52734584", "0.520129", "0.5163601", "0.5156643", "0.5152206", "0.5109015", "0.5105658", "0.50975186", "0.5085345", "0.50773543", "0.50253874", "0.4992709", "0.49893114", "0.49869478", "0.49840865", "0.49781895", "0.49615148", "0.49159694", "0.49073583", "0.48827597", "0.4878233", "0.48774183", "0.48662376", "0.48586702" ]
0.6829257
0
Collect parameters from self.myLineParmBtns and put them in self.lineParameters self.lineParmBtns.append([lbl,lnStyleOpts,lnClrBtn,markerOpts,markerColorBtn,lnWdOpts]) self.lineParameters.append([i+1, '', self.colors[k],lw,self.markerTypes[j], self.colors[k],mw])
def handleApplyButton(self,save=1): #self.faceColor = self.faceColorBtn['bg'] #self.borderColor = self.borderColorBtn['bg'] self.obj.showGrid = self.showGrid self.obj.gridLineStyle = self.gridLineStyle #self.obj.borderColor = self.borderColor #self.obj.faceColor = self.obj.faceColor self.obj.legendLocation = self.legendLocation self.lineParameters = [] for i in range(self.maxLines): ilabel, lnOpts,lnClrBtn,lnWdOpts,mkOpts,mkClrBtn, mkSzOpts = self.lineParmBtns[i] ls = lnOpts.getvalue() lc = lnClrBtn['bg'] lw = int(lnWdOpts.getvalue()) mt = mkOpts.getvalue() mc = mkClrBtn['bg'] ms = int(mkSzOpts.getvalue()) #print lineStyle, lineColor, markerStyle, markerColor, lineWidth self.lineParameters.append([i+1,ls,lc,lw,mt,mc,ms]) self.obj.lineParameters = copy(self.lineParameters) # Reflect to master object. self.obj.applyLineParameters(); # Save to master object only. if save == 1: dparms = self.mapObjToLineParms() xml_saveLineParameters(dparms) self.obj.m_canvas.draw(); self.obj.m_canvas.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_params(lw=1.5, universal_color=\"#262626\", fontsize=16):\n rc(\"font\", size=fontsize)\n rc(\"lines\", linewidth=lw, markeredgewidth=lw * 0.5)\n rc(\"patch\", linewidth=lw, edgecolor=\"#FAFAFA\")\n rc(\n \"axes\",\n linewidth=lw,\n edgecolor=universal_color,\n labelcolor=universal_color,\n axisbelow=True,\n )\n rc(\"image\", origin=\"lower\") # fits images\n rc(\"xtick.major\", width=lw * 0.75)\n rc(\"xtick.minor\", width=lw * 0.5)\n rc(\"xtick\", color=universal_color)\n rc(\"ytick.major\", width=lw * 0.75)\n rc(\"ytick.minor\", width=lw * 0.5)\n rc(\"ytick\", color=universal_color)\n rc(\"grid\", linewidth=lw)\n rc(\n \"legend\",\n loc=\"best\",\n numpoints=1,\n scatterpoints=1,\n handlelength=1.5,\n fontsize=fontsize,\n columnspacing=1,\n handletextpad=0.75,\n )", "def set_params(self):\n max_margin = int(self.alpha) + 1\n self.sample_params['add'] = [0, max_margin, max_margin]", "def _init_trace(self,n,label,color,style,\n linewidth,marker,markersize):\n while n >= len(self.traces): self.traces.append(LineProps())\n line = self.traces[n]\n\n if label == None:label = \"trace %i\" % (n+1)\n line.label = label\n if color != None: line.color = color\n if style != None: line.style = style\n if linewidth != None: line.linewidth = linewidth\n if marker != None: line.marker = marker\n if markersize!= None: line.markersize = markersize\n self.traces[n] = line", "def add_line(self, buttoninstance):\r\n #del and create again to respect the order\r\n self.ids.inlayout.remove_widget(self.add_button)\r\n self.ids.inlayout.remove_widget(self.del_button)\r\n #create the new line\r\n store = get_store()\r\n lastval = store.get('Nbtimecompound')[\"value\"]\r\n store.put('Nbtimecompound', value=1+lastval)\r\n self.ids.inlayout.rows = 5 + store.get('Nbtimecompound')[\"value\"]\r\n #add the widget\r\n newval = str(store.get('Nbtimecompound')[\"value\"])\r\n timecompount = CEToolBoxLabel(text=\"Time compound \"+newval)\r\n timecompountvalue = CEToolBoxTextInput(text=str(1.0),\r\n id='Timecompound'+newval)\r\n timecompountunit = CEToolBoxSpinner(text=u\"min\",\r\n id='Timecompound'+newval+'Unit', \r\n values=[\"s\", \"min\"])\r\n store.put('Timecompound'+newval, value=1.0, unit=\"min\")\r\n self.ids.inlayout.add_widget(timecompount)\r\n self.ids.inlayout.add_widget(timecompountvalue)\r\n self.ids.inlayout.add_widget(timecompountunit)\r\n tosave = [timecompount, timecompountvalue, timecompountunit]\r\n self.timecompoundlist.append(tosave)\r\n #recreate the button\r\n self.add_button = CEToolBoxButton(text=\"Add\", id=\"addbutton\", on_release=self.add_line)\r\n self.ids.inlayout.add_widget(self.add_button)\r\n self.del_button = CEToolBoxButton(text=\"Del\", id=\"delbutton\", on_release=self.del_line)\r\n self.ids.inlayout.add_widget(self.del_button)\r\n self.ids.inlayout.rows = 5 + store.get('Nbtimecompound')[\"value\"]\r\n #force the good size\r\n self.ids.tscrollview.change_child_height(self.ids.tscrollview.height)", "def add_parameters(params):\n\n items = [\n ('x', 'X manual', ''),\n ('z', 'Z manual', ''),\n ('automatic', 'Automatic', '')\n ]\n\n params.rotation_axis = bpy.props.EnumProperty(\n items = items,\n name = \"Rotation Axis\",\n default = 'automatic'\n )\n\n params.auto_align_extremity = bpy.props.BoolProperty(\n name='auto_align_extremity',\n default=False,\n description=\"Auto Align Extremity Bone\"\n )\n\n params.segments = bpy.props.IntProperty(\n name = 'limb segments',\n default = 2,\n min = 1,\n description = 'Number of segments'\n )\n\n params.bbones = bpy.props.IntProperty(\n name = 'bbone segments',\n default = 10,\n min = 1,\n description = 'Number of segments'\n )\n\n # Setting up extra layers for the FK and tweak\n params.tweak_extra_layers = bpy.props.BoolProperty(\n name = \"tweak_extra_layers\",\n default = True,\n description = \"\"\n )\n\n params.tweak_layers = bpy.props.BoolVectorProperty(\n size = 32,\n description = \"Layers for the tweak controls to be on\",\n default = tuple( [ i == 1 for i in range(0, 32) ] )\n )\n\n # Setting up extra layers for the FK and tweak\n params.fk_extra_layers = bpy.props.BoolProperty(\n name = \"fk_extra_layers\",\n default = True,\n description = \"\"\n )\n\n params.fk_layers = bpy.props.BoolVectorProperty(\n size = 32,\n description = \"Layers for the FK controls to be on\",\n default = tuple( [ i == 1 for i in range(0, 32) ] )\n )", "def newParameter(self):\n numParams = self.ui.parameterList.rowCount()\n self.ui.parameterList.insertRow(numParams)", "def enterParameters(self,**kwargs):\n\n members = self.bl.getAllParameters().keys() \n entries={}\n\n for param in members:\n entries[param] = getattr(self.bl, 'paramSelection') # save param names in entries\n entries['view selection'] = [getattr(self.bl, 'displayText'), str(self.bl.getAllParameters())]\n entries['reset selection'] = getattr(self.bl, 'paramReset')\n self.mm.addGenericMenu(\"param\",self.mm.cur_page,\"Select your desired params for this operation\", entries)\n self.mm.loadMenu(\"param\")", "def addBL(self):\n self.parent.copyCurrentWinState(self.pltw)\n vname = self.pltw.curvelist[self.cpos].name + 'BL'\n (nvec, npt) = np.shape(self.pltw.blklst[self.blkno])\n if self.pltw.pasteVector(self.data[2], self.blkno, vname):\n xname = self.pltw.getVnam(self.blkno, self.xpos)\n xvinfo = vectInfo(self.blkno, self.xpos, xname)\n yvinfo = vectInfo(self.blkno, nvec, vname)\n self.pltw.curvelist.append(curveInfo(vname, xvinfo, yvinfo))\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def line(self, points, ls=\"--\", draw=\"black\", lw=None, options=None, kwoptions=None):\n\n draw = norm_colour(draw)\n self.use_colour(draw)\n\n if kwoptions is None:\n kwoptions = {}\n kwopts = {'draw': draw, **kwoptions}\n if lw:\n kwopts['line width'] = lw\n\n self._commands.append(rf\"\\draw{wrap(fmt_options(options,kwopts))} \" +\n f\" {ls} \".join(map(fmt_point, points))+\";\")", "def set_parameters(self, L, r):\n self.L = L\n self.r = r", "def fill_active(self, layout):\n self.new_func_triggers = QLineEdit()\n layout.addRow(\"new func triggers\", self.new_func_triggers)\n self.new_param = QLineEdit()\n layout.addRow(\"new parameter\", self.new_param)", "def initParms(self):\n self.parmVal = []\n self.parmName = []\n (nvect, npt) = self.data.shape\n if self.model == 0:\n self.parmVal.append(2.0)\n self.parmName.append('Order')\n if self.model == 1:\n self.parmVal.append(1.0)\n self.parmName.append('A')\n self.parmVal.append(1.0)\n self.parmName.append('B')\n if self.model == 2:\n self.parmVal.append(self.data[1][0])\n self.parmName.append('A')\n self.parmVal.append(self.data[1][npt-1])\n self.parmName.append('B')\n if self.model == 3:\n self.parmVal.append(self.data[1][0])\n self.parmName.append('Ao')\n self.parmVal.append(100.0)\n self.parmName.append('Ea')\n if self.model == 4:\n self.parmVal.append(0.001)\n self.parmName.append('A')\n self.parmVal.append(1.0)\n self.parmName.append('B')\n if self.model == 5:\n self.parmVal.append(0.001)\n self.parmName.append('A')\n self.parmVal.append(0.0)\n self.parmName.append('B')\n self.parmVal.append(1.0)\n self.parmName.append('C')\n if self.model == 6:\n self.parmVal.append(self.data[0][0])\n self.parmName.append('xo')\n self.parmVal.append(self.data[1][0])\n self.parmName.append('yo')\n yspan = getSpan(self.data[1])\n if self.data[1][0] > 0.0:\n v = self.data[1][0] + yspan/2.0\n else:\n v = self.data[1][npt-1] + yspan/2.0\n self.parmVal.append(v)\n self.parmName.append('H')\n if self.data[1][0] > self.data[1][npt-1]:\n self.parmVal.append(-1.0)\n else:\n self.parmVal.append(1.0)\n self.parmName.append('S')", "def addLineStyle(dist, focus, axis, pupil):\n r = 0 #focus / 2\n g = 0 #np.log10(dist) / (25 / 3)\n b = 0 #axis / 20\n a = 0.4\n rgb = [r, g, b, a]\n line = {'style': '-', 'color': rgb}\n return line", "def init_save_curve_params_button(self):\n def save_params():\n \"\"\"\n function to invoke different save routines\n \"\"\"\n file_name = filedialog.asksaveasfilename(\n filetypes=[\n (\"JSON\", \"*.json\")\n ],\n initialdir=os.getcwd())\n if file_name: # save option not cancelled by user\n self.parent_class.classes[\"fractal\"].curve.store_curve_tofile(\n file_name)\n\n self.buttons[\"btn_save_params\"] = Button(\n self.frame, text=\"Save Parameters\", command=save_params)\n self.buttons[\"btn_save_params\"].grid(row=4, column=1)", "def linelist(self):\n line_list = Marker()\n line_list.header = self._header\n line_list.type = Marker.LINE_LIST\n line_list.action = Marker.ADD\n line_list.scale.x = 0.005\n line_list.color = self.YELLOW\n line_list.pose = deepcopy(self.POSE)\n\n line_list.points.extend((self._p1, self._p2))\n line_list.points.extend((self._p2, self._p3))\n line_list.points.extend((self._p3, self._p4))\n line_list.points.extend((self._p4, self._p1))\n line_list.points.extend((self._p5, self._p6))\n line_list.points.extend((self._p6, self._p7))\n line_list.points.extend((self._p7, self._p8))\n line_list.points.extend((self._p8, self._p5))\n line_list.points.extend((self._p1, self._p5))\n line_list.points.extend((self._p2, self._p6))\n line_list.points.extend((self._p3, self._p7))\n line_list.points.extend((self._p4, self._p8))\n\n return line_list", "def define_parameters(self):", "def loadParameters (self, filePath):\r\n # productive #onButton\r\n profprint()\r\n widget = slicer.modules.NeedleFinderWidget\r\n config = ConfigParser.RawConfigParser()\r\n config.read(filePath)\r\n\r\n autoCorrectTip = config.getboolean('BooleanSection', 'autoCorrectTip')\r\n invertedContrast = config.getboolean('BooleanSection', 'invertedContrast')\r\n gradient = config.getboolean('BooleanSection', 'gradient')\r\n filterControlPoints = config.getboolean('BooleanSection', 'filterControlPoints')\r\n drawFiducialPoints = config.getboolean('BooleanSection', 'drawFiducialPoints')\r\n autoStopTip = config.getboolean('BooleanSection', 'autoStopTip')\r\n extendNeedle = config.getboolean('BooleanSection', 'extendNeedle')\r\n maxLength = config.getboolean('BooleanSection', 'maxLength')\r\n gaussianAttenuationButton = config.getboolean('BooleanSection', 'gaussianAttenuationButton')\r\n\r\n realNeedleLength = config.getint('IntegerSection', 'realNeedleLength')\r\n sigmaValue = config.getint('IntegerSection', 'sigmaValue')\r\n gradientPonderation = config.getint('IntegerSection', 'gradientPonderation')\r\n exponent = config.getint('IntegerSection', 'exponent')\r\n try:\r\n radiusMax = config.getint('IntegerSection', 'distanceMax') # try deprecated parameter name (old parameter files)\r\n except:\r\n radiusMax = config.getint('IntegerSection', 'radiusMax')\r\n nbRotatingIterations = config.getint('IntegerSection', 'nbRotatingIterations')\r\n numberOfPointsPerNeedle = config.getint('IntegerSection', 'numberOfPointsPerNeedle')\r\n lenghtNeedleParameter = config.getint('IntegerSection', 'lenghtNeedleParameter')\r\n radiusNeedleParameter = config.getint('IntegerSection', 'radiusNeedleParameter')\r\n algoVersParameter = config.getint('IntegerSection', 'algoVersParameter')\r\n\r\n widget.autoCorrectTip.checked = autoCorrectTip\r\n widget.invertedContrast.checked = invertedContrast\r\n widget.gradient.checked = gradient\r\n widget.filterControlPoints.checked = filterControlPoints\r\n widget.drawFiducialPoints.checked = drawFiducialPoints\r\n widget.autoStopTip.checked = autoStopTip\r\n widget.extendNeedle.checked = extendNeedle\r\n widget.maxLength.checked = maxLength\r\n widget.gaussianAttenuationButton.checked = gaussianAttenuationButton\r\n\r\n widget.realNeedleLength.value = realNeedleLength\r\n widget.sigmaValue.value = sigmaValue\r\n widget.gradientPonderation.value = gradientPonderation\r\n widget.exponent.value = exponent\r\n widget.radiusMax.value = radiusMax\r\n widget.nbRotatingIterations.value = nbRotatingIterations\r\n widget.numberOfPointsPerNeedle.value = numberOfPointsPerNeedle\r\n widget.lenghtNeedleParameter.value = lenghtNeedleParameter\r\n widget.radiusNeedleParameter.value = radiusNeedleParameter\r\n widget.algoVersParameter.value = algoVersParameter\r\n print \"#############\"\r\n print \"algoVers: \", algoVersParameter\r\n print \"Parameters successfully loaded!\"", "def _set_leg_params(self):\n self.p = 0.01600\n self.q = 0.00000\n self.r = 0.02000\n self.c = 0.01811\n self.u = 0.00000\n self.v = 0.00000\n self.e = -0.06000\n self.h = -0.02820\n self.s = 0.02200\n self.d1 = 0.0\n self.d2 = 0.0\n self.d3 = 0.0\n self.stability = 0.0", "def loadParameters (self, filePath):\n #productive #onButton\n profprint()\n widget = slicer.modules.NeedleFinderWidget\n config = ConfigParser.RawConfigParser()\n config.read(filePath)\n\n autoCorrectTip = config.getboolean('BooleanSection', 'autoCorrectTip')\n invertedContrast = config.getboolean('BooleanSection', 'invertedContrast')\n gradient = config.getboolean('BooleanSection', 'gradient')\n filterControlPoints = config.getboolean('BooleanSection', 'filterControlPoints')\n drawFiducialPoints = config.getboolean('BooleanSection', 'drawFiducialPoints')\n autoStopTip = config.getboolean('BooleanSection', 'autoStopTip')\n extendNeedle = config.getboolean('BooleanSection', 'extendNeedle')\n maxLength = config.getboolean('BooleanSection', 'maxLength')\n gaussianAttenuationButton = config.getboolean('BooleanSection', 'gaussianAttenuationButton')\n\n realNeedleLength = config.getint('IntegerSection', 'realNeedleLength')\n sigmaValue = config.getint('IntegerSection', 'sigmaValue')\n gradientPonderation = config.getint('IntegerSection', 'gradientPonderation')\n exponent = config.getint('IntegerSection', 'exponent')\n distanceMax = config.getint('IntegerSection', 'distanceMax')\n nbRotatingIterations = config.getint('IntegerSection', 'nbRotatingIterations')\n numberOfPointsPerNeedle = config.getint('IntegerSection', 'numberOfPointsPerNeedle')\n lenghtNeedleParameter = config.getint('IntegerSection', 'lenghtNeedleParameter')\n radiusNeedleParameter = config.getint('IntegerSection', 'radiusNeedleParameter')\n algoVersParameter = config.getint('IntegerSection', 'algoVersParameter')\n \n widget.autoCorrectTip.checked = autoCorrectTip\n widget.invertedContrast.checked = invertedContrast\n widget.gradient.checked = gradient \n widget.filterControlPoints.checked = filterControlPoints\n widget.drawFiducialPoints.checked = drawFiducialPoints\n widget.autoStopTip.checked = autoStopTip\n widget.extendNeedle.checked = extendNeedle\n widget.maxLength.checked = maxLength\n widget.gaussianAttenuationButton.checked = gaussianAttenuationButton\n\n widget.realNeedleLength.value = realNeedleLength\n widget.sigmaValue.value = sigmaValue\n widget.gradientPonderation.value = gradientPonderation\n widget.exponent.value = exponent\n widget.distanceMax.value = distanceMax\n widget.nbRotatingIterations.value = nbRotatingIterations\n widget.numberOfPointsPerNeedle.value = numberOfPointsPerNeedle\n widget.lenghtNeedleParameter.value = lenghtNeedleParameter\n widget.radiusNeedleParameter.value = radiusNeedleParameter\n widget.algoVersParameter.value = algoVersParameter\n print \"algoVers: \",algoVersParameter\n print \"Parameters successfully loaded!\"", "def parameters_ui(layout, params):\n\n r = layout.row()\n r.prop(params, \"rotation_axis\")\n\n if 'auto' not in params.rotation_axis.lower():\n r = layout.row()\n text = \"Auto align Foot\"\n r.prop(params, \"auto_align_extremity\", text=text)\n\n r = layout.row()\n r.prop(params, \"segments\")\n\n r = layout.row()\n r.prop(params, \"bbones\")\n\n bone_layers = bpy.context.active_pose_bone.bone.layers[:]\n\n for layer in ['fk', 'tweak']:\n r = layout.row()\n r.prop(params, layer + \"_extra_layers\")\n r.active = params.tweak_extra_layers\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(16, 24):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n col = r.column(align=True)\n row = col.row(align=True)\n\n for i in range(8, 16):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)\n\n row = col.row(align=True)\n\n for i in range(24, 32):\n icon = \"NONE\"\n if bone_layers[i]:\n icon = \"LAYER_ACTIVE\"\n row.prop(params, layer + \"_layers\", index=i, toggle=True, text=\"\", icon=icon)", "def my_line(self, master, name, prefilled_entry, r, c, rsp, csp, px, py) -> None:\n line = tk.Label(master=master, text=name, anchor='w')\n line.grid(row=r, column=c, rowspan=rsp, columnspan=csp, padx=px, pady=py)\n text = tk.StringVar()\n text.set(prefilled_entry)\n l2 = tk.Entry(master=master, textvariable=text)\n l2.grid(row=r, column=c + 1, rowspan=rsp, columnspan=csp, padx=px, pady=py)\n self.data.append({'name': name, 'tk_object': l2})", "def update_lines(self):\n self._checkfigure()\n for ld in self.lines:\n line = ld['line']\n\n color = ld['color']\n line.set_color(color)\n\n lw = ld['linewidth']\n hlf = ld['highlight factor']\n highlight = hlf if ld['highlighted'] else 1.0\n lw = lw*highlight\n line.set_linewidth(lw)\n\n for vline in ld['vlines']:\n vline.set_color(color)\n vline.set_linestyle('--')\n vline.set_linewidth(lw)\n\n for hline in ld['vlines']:\n hline.set_color(color)\n hline.set_linestyle('--')\n hline.set_linewidth(lw)", "def set_line_markers(self, line_markers):\n self._line_markers = line_markers", "def setupButtons(self):\n self.addLayerButton.setAccessibleName('editLayer')\n self.addLayerButton.setText('+')\n self.deleteLayerButton.setAccessibleName('editLayer')\n self.deleteLayerButton.setText('-')\n self.downButton.setArrowType(QtCore.Qt.DownArrow)\n self.upButton.setArrowType(QtCore.Qt.UpArrow)\n self.addLayerButton.setToolTip('Add a new Layer to the Job.')\n self.deleteLayerButton.setToolTip('Delete the selected Layer from the Job.')\n self.downButton.setToolTip('Move the selected Layer down in the Job.')\n self.upButton.setToolTip('Move the selected Layer up in the Job.')", "def addToolBarButtons(self):", "def getParameters(self): #$NON-NLS-1$\r", "def map_line(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results()", "def addLegendLine(line,n):\n dislin.leglin(' ',line,n)", "def edit_kpts(param_label, i, dir, line_key = 'Gamma', file = 'KPOINTS'):\n\n replacement_line = \" \" + i[0] + \" \" + i[1] + \" \" + i[2]\n gen_file_editor(param_label, dir, file, replacement_line, line_key)\n\n return False", "def init_all_params(self):\n self.annotations_timestamp = 0\n # self.annotations_offset = 0\n # self.annotation_offset_text.configure(text='Current: %d' % self.annotations_offset)\n self.annotations_timestamp_text.configure(text='Annotation timestamp:\\n %d' % self.annotations_timestamp)\n self.annotations_timestamp_text.grid(sticky=\"W\", row=9, column=0, columnspan=10)\n # set text frames\n # self.annotations_offset_entry.delete(0, 'end')\n # self.annotations_offset_entry.insert(0, str(self.annotations_offset))\n self.current_frame_entry.delete(0, 'end')\n self.current_frame_entry.insert(0, str(self.vid.frame_number))" ]
[ "0.57280254", "0.5546413", "0.5510684", "0.5442827", "0.52763814", "0.5265816", "0.52513844", "0.5206591", "0.51973724", "0.5193094", "0.5166233", "0.51613885", "0.5127534", "0.51055425", "0.51003975", "0.5092666", "0.5092629", "0.50905925", "0.50885", "0.50683045", "0.5055776", "0.5044302", "0.5026073", "0.5006838", "0.5005025", "0.49906936", "0.49819058", "0.49790332", "0.497561", "0.49748906" ]
0.5729337
0
Create a package with the current pack_operation_ids of the picking that aren't yet in a pack. Used in the barcode scanner UI and the normal interface as well. operation_filter_ids is used by barcode scanner interface to specify a subset of operation to pack
def action_pack(self, cr, uid, picking_ids, operation_filter_ids=None, context=None): if operation_filter_ids is None: operation_filter_ids = [] stock_operation_obj = self.pool.get('stock.pack.operation') package_obj = self.pool.get('stock.quant.package') stock_move_obj = self.pool.get('stock.move') package_id = False for picking_id in picking_ids: operation_search_domain = [('picking_id', '=', picking_id), ('result_package_id', '=', False)] if operation_filter_ids != []: operation_search_domain.append(('id', 'in', operation_filter_ids)) operation_ids = stock_operation_obj.search(cr, uid, operation_search_domain, context=context) pack_operation_ids = [] if operation_ids: for operation in stock_operation_obj.browse(cr, uid, operation_ids, context=context): # If we haven't done all qty in operation, we have to split into 2 operation op = operation if (operation.qty_done < operation.product_qty): new_operation = stock_operation_obj.copy( cr, uid, operation.id, {'product_qty': operation.qty_done, 'qty_done': operation.qty_done}, context=context ) stock_operation_obj.write( cr, uid, operation.id, {'product_qty': operation.product_qty - operation.qty_done, 'qty_done': 0}, context=context ) op = stock_operation_obj.browse(cr, uid, new_operation, context=context) pack_operation_ids.append(op.id) if op.product_id and op.location_id and op.location_dest_id: stock_move_obj.check_tracking_product( cr, uid, op.product_id, op.lot_id.id, op.location_id, op.location_dest_id, context=context ) package_id = package_obj.create(cr, uid, {}, context=context) stock_operation_obj.write( cr, uid, pack_operation_ids, {'result_package_id': package_id}, context=context ) return package_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_pack_ops(self, quants, forced_qties):\n valid_quants = quants.filtered(lambda quant: quant.qty > 0)\n _Mapping = namedtuple('Mapping', ('product', 'package', 'owner', 'location', 'location_dst_id','move_id'))\n all_products = valid_quants.mapped('product_id') | self.env['product.product'].browse(set(m.product_id.id for m,q in forced_qties)) | self.move_lines.mapped('product_id')\n computed_putaway_locations = dict(\n (product, self.location_dest_id.get_putaway_strategy(product) or self.location_dest_id.id) for product in all_products)\n product_to_uom = dict((product.id, product.uom_id) for product in all_products)\n picking_moves = self.move_lines.filtered(lambda move: move.state not in ('done', 'cancel'))\n for move in picking_moves:\n # If we encounter an UoM that is smaller than the default UoM or the one already chosen, use the new one instead.\n if move.product_uom != product_to_uom[move.product_id.id] and move.product_uom.factor > product_to_uom[move.product_id.id].factor:\n product_to_uom[move.product_id.id] = move.product_uom\n if len(picking_moves.mapped('location_id')) > 1:\n raise UserError(_('The source location must be the same for all the moves of the picking.'))\n if len(picking_moves.mapped('location_dest_id')) > 1:\n raise UserError(_('The destination location must be the same for all the moves of the picking.'))\n pack_operation_values = []\n # find the packages we can move as a whole, create pack operations and mark related quants as done\n top_lvl_packages = valid_quants._get_top_level_packages(computed_putaway_locations)\n for pack in top_lvl_packages:\n pack_quants = pack.get_content()\n pack_operation_values.append({\n 'picking_id': self.id,\n 'package_id': pack.id,\n 'product_qty': 1.0,\n 'location_id': pack.location_id.id,\n 'location_dest_id': computed_putaway_locations[pack_quants[0].product_id],\n 'owner_id': pack.owner_id.id,\n })\n valid_quants -= pack_quants\n # Go through all remaining reserved quants and group by product, package, owner, source location and dest location\n # Lots will go into pack operation lot object\n qtys_grouped = {}\n lots_grouped = {}\n for quant in valid_quants:\n key = _Mapping(quant.product_id, quant.package_id, quant.owner_id, quant.location_id, computed_putaway_locations[quant.product_id], quant.reservation_id)\n qtys_grouped.setdefault(key, 0.0)\n qtys_grouped[key] += quant.qty\n if quant.product_id.tracking != 'none' and quant.lot_id:\n lots_grouped.setdefault(key, dict()).setdefault(quant.lot_id.id, 0.0)\n lots_grouped[key][quant.lot_id.id] += quant.qty\n # Do the same for the forced quantities (in cases of force_assign or incomming shipment for example)\n for move_f, qty in forced_qties:\n if qty <= 0.0:\n continue\n key = _Mapping(move_f.product_id, self.env['stock.quant.package'], self.owner_id, self.location_id, computed_putaway_locations[move_f.product_id], move_f)\n qtys_grouped.setdefault(key, 0.0)\n qtys_grouped[key] += qty\n # Create the necessary operations for the grouped quants and remaining qtys\n Uom = self.env['product.uom']\n move_id_to_vals = {} # use it to create operations using the same order as the picking stock moves\n for mapping, qty in qtys_grouped.items():\n uom = product_to_uom[mapping.product.id]\n val_dict = {\n 'picking_id': self.id,\n 'product_qty': mapping.product.uom_id._compute_quantity(qty, uom),\n 'product_id': mapping.product.id,\n 'package_id': mapping.package.id,\n 'owner_id': mapping.owner.id,\n 'location_id': mapping.location.id,\n 'location_dest_id': mapping.location_dst_id,\n 'product_uom_id': uom.id,\n 'pack_lot_ids': [\n (0, 0, {'lot_id': lot, 'qty': 0.0, 'qty_todo': lots_grouped[mapping][lot]})\n for lot in lots_grouped.get(mapping, {}).keys()],\n }\n move_id_to_vals.setdefault(mapping.move_id.id, list()).append(val_dict)\n for move in self.move_lines.filtered(lambda move: move.state not in ('done', 'cancel')):\n values = move_id_to_vals.pop(move.id, [])\n pack_operation_values += values\n return pack_operation_values", "def pack():\n PackCommandExecutor().pack()", "def process_barcode_from_ui(self, cr, uid, picking_id, barcode_str, visible_op_ids, context=None):\n lot_obj = self.pool.get('stock.production.lot')\n package_obj = self.pool.get('stock.quant.package')\n product_obj = self.pool.get('product.product')\n stock_operation_obj = self.pool.get('stock.pack.operation')\n stock_location_obj = self.pool.get('stock.location')\n answer = {'filter_loc': False, 'operation_id': False}\n # check if the barcode correspond to a location\n matching_location_ids = stock_location_obj.search(cr, uid, [('barcode', '=', barcode_str)], context=context)\n if matching_location_ids:\n # if we have a location, return immediatly with the location name\n location = stock_location_obj.browse(cr, uid, matching_location_ids[0], context=None)\n answer['filter_loc'] = stock_location_obj._name_get(cr, uid, location, context=None)\n answer['filter_loc_id'] = matching_location_ids[0]\n return answer\n # check if the barcode correspond to a product\n matching_product_ids = product_obj.search(cr, uid, ['|', ('barcode', '=', barcode_str),\n ('default_code', '=', barcode_str)], context=context)\n if matching_product_ids:\n op_id = stock_operation_obj._search_and_increment(\n cr,\n uid,\n picking_id,\n [('product_id', '=', matching_product_ids[0])],\n filter_visible=True,\n visible_op_ids=visible_op_ids,\n increment=True,\n context=context\n )\n answer['operation_id'] = op_id\n return answer\n # check if the barcode correspond to a lot\n matching_lot_ids = lot_obj.search(cr, uid, [('name', '=', barcode_str)], context=context)\n if matching_lot_ids:\n lot = lot_obj.browse(cr, uid, matching_lot_ids[0], context=context)\n op_id = stock_operation_obj._search_and_increment(\n cr,\n uid,\n picking_id,\n [('product_id', '=', lot.product_id.id), ('lot_id', '=', lot.id)],\n filter_visible=True,\n visible_op_ids=visible_op_ids,\n increment=True,\n context=context\n )\n answer['operation_id'] = op_id\n return answer\n # check if the barcode correspond to a package\n matching_package_ids = package_obj.search(cr, uid, [('name', '=', barcode_str)], context=context)\n if matching_package_ids:\n op_id = stock_operation_obj._search_and_increment(\n cr,\n uid,\n picking_id,\n [('package_id', '=', matching_package_ids[0])],\n filter_visible=True,\n visible_op_ids=visible_op_ids,\n increment=True,\n context=context\n )\n answer['operation_id'] = op_id\n return answer\n return answer", "def genereate_echo_picklist(self):\n sample_names = []\n sample_wells = []\n indices = {'i5 name': {}, 'i5 plate': {}, 'i5 sequence': {},\n 'i5 well': {}, 'i7 name': {}, 'i7 plate': {},\n 'i7 sequence': {}, 'i7 well': {}, 'index combo': {},\n 'index combo seq': {}}\n\n for idx, well in enumerate(chain.from_iterable(self.plates[0].layout)):\n # Add the sample well\n sample_wells.append(well.well_id)\n # Get the sample name - we need to go back to the SampleComposition\n lib_comp = well.composition\n sample_comp = lib_comp.normalized_gdna_composition\\\n .gdna_composition.sample_composition\n sample_names.append(sample_comp.content)\n # Retrieve all the information about the indices\n i5_comp = lib_comp.i5_composition.primer_set_composition\n i5_well = i5_comp.container\n indices['i5 name'][idx] = i5_comp.external_id\n indices['i5 plate'][idx] = i5_well.plate.external_id\n indices['i5 sequence'][idx] = i5_comp.barcode\n indices['i5 well'][idx] = i5_well.well_id\n\n i7_comp = lib_comp.i7_composition.primer_set_composition\n i7_well = i7_comp.container\n indices['i7 name'][idx] = i7_comp.external_id\n indices['i7 plate'][idx] = i7_well.plate.external_id\n indices['i7 sequence'][idx] = i7_comp.barcode\n indices['i7 well'][idx] = i7_well.well_id\n\n indices['index combo seq'][idx] = '%s%s' % (\n indices['i5 sequence'][idx], indices['i7 sequence'][idx])\n\n sample_names = np.asarray(sample_names)\n sample_wells = np.asarray(sample_wells)\n indices = pd.DataFrame(indices)\n\n return LibraryPrepShotgunProcess._format_picklist(\n sample_names, sample_wells, indices)", "def create_package(self, **kwargs):\n results = self.api.action.package_create(**kwargs)\n self.get_ckan_metadata(True)\n return results", "def set_so_pack_operation_lot(self, picking):\n StockProductionLot = self.env['stock.production.lot']\n sale_line_obj = self.env['sale.order.line']\n has_wrong_lots = False\n for del_move in picking.move_lines:\n del_move.move_line_ids.unlink()\n for move in picking.move_lines:\n picking_type = picking.picking_type_id\n # lots_necessary = True\n if picking_type:\n if not picking_type.use_existing_lots:\n picking_type.write({'use_existing_lots':True})\n # lots_necessary = picking_type and picking_type.use_existing_lots\n qty = 0\n qty_done = 0\n pack_lots = []\n pack_lot_id = []\n for ord_line in self.order_line:\n if ord_line.lot_id and ord_line.lot_id.product_id.id == move.product_id.id:\n pack_lot_id.append(ord_line.lot_id.id)\n # if pack_lot_names and lots_necessary:\n if pack_lot_id:\n for lot_id in list(set(pack_lot_id)):\n stock_production_lot = StockProductionLot.search([('id', '=', lot_id), ('product_id', '=', move.product_id.id)])\n sale_order_line = sale_line_obj.search([('lot_id', '=', lot_id),('order_id', '=', self.id), ('product_id', '=', move.product_id.id)])\n if stock_production_lot and sale_order_line:\n if stock_production_lot.product_id.tracking == 'lot':\n # if a lot nr is set through the frontend it will refer to the full quantity\n qty = sale_order_line[0].product_uom_qty\n else:\n qty = 1.0\n qty_done += qty\n pack_lots.append({'lot_id': stock_production_lot.id, 'qty': qty})\n else:\n has_wrong_lots = True\n # elif move.product_id.tracking == 'none' or not lots_necessary:\n elif move.product_id.tracking == 'none':\n qty_done = move.product_uom_qty\n else:\n has_wrong_lots = True\n for pack_lot in pack_lots:\n lot_id, qty = pack_lot['lot_id'], pack_lot['qty']\n self.env['stock.move.line'].create({\n 'move_id': move.id,\n 'product_id': move.product_id.id,\n 'product_uom_id': move.product_uom.id,\n 'qty_done': qty,\n 'location_id': move.location_id.id,\n 'location_dest_id': move.location_dest_id.id,\n 'lot_id': lot_id,\n })\n if not pack_lots:\n move.quantity_done = qty_done\n return has_wrong_lots", "def test_specific_pack_creation(repo):\n pack_1 = repo.setup_one_pack('Pack1')\n pack_1.pack_metadata.write_json(\n {\n 'name': 'Pack Number 1',\n }\n )\n\n pack_2 = repo.setup_one_pack('Pack2')\n pack_2.pack_metadata.write_json(\n {\n 'name': 'Pack Number 2',\n }\n )\n\n with ChangeCWD(repo.path):\n with temp_dir() as temp:\n runner = CliRunner(mix_stderr=False)\n result = runner.invoke(main, [ARTIFACTS_CMD, '-a', temp, '-p', 'Pack1'])\n\n assert result.exit_code == 0\n assert os.path.exists(os.path.join(str(temp), 'uploadable_packs', 'Pack1.zip'))\n assert not os.path.exists(os.path.join(str(temp), 'uploadable_packs', 'Pack2.zip'))", "def package_select(self, master):\r\n\r\n #print self.newProj.workSpace\r\n\r\n if self.update_proj():\r\n self.isValidConfig.set(1)\r\n else:\r\n self.isValidConfig.set(0)\r\n return\r\n\r\n #print self.newProj.workSpace\r\n\r\n if len(self.newProj.toolChain) < 1:\r\n tkMessageBox.showinfo(\"No Toolchain Selected\",\\\r\n \"Select a toolchain to generate a project.\")\r\n return\r\n\r\n # Disable generate button\r\n self.widgetList[31].state([\"disabled\"])\r\n\r\n #Create package list form selected device\r\n packageList = []\r\n\r\n tree = ET.parse(self.newProj.sdkPath + '/ksdk_manifest.xml')\r\n for elem in tree.iter(tag='device'):\r\n if elem.attrib['full_name'] == self.newProj.device[0]:\r\n for pack in elem.findall('package'):\r\n packageList.append(pack.attrib['name'])\r\n\r\n labelFont = 'Arial 9 bold'\r\n\r\n #Create window to show USER that project has been generated and where it is.\r\n popPackage = Toplevel()\r\n winH = 0\r\n winW = 0\r\n if self.newProj.osType == 'Windows':\r\n winH = 75 * WIN_SCALE\r\n winW = 250 * WIN_SCALE\r\n elif self.newProj.osType == 'Darwin':\r\n if platform.mac_ver()[0][:5] == '10.10':\r\n winH = 75\r\n winW = 300\r\n elif platform.mac_ver()[0][:5] == '10.11':\r\n winH = 75\r\n winW = 330\r\n else:\r\n winH = 75\r\n winW = 300\r\n popPackage.config(height=winH, width=winW)\r\n popPackage.protocol('WM_DELETE_WINDOW', lambda: self.safe_return(popPackage))\r\n popPackage.grid()\r\n if self.newProj.osType == 'Linux':\r\n img = Image(\"photo\", data=kImg.boardImages['kds_icon.gif']) # Use the .gif in Linux\r\n popPackage.tk.call('wm', 'iconphoto', popPackage._w, img)\r\n popPackage.title(\"Select Device Package.\")\r\n popPackage.geometry('%dx%d+%d+%d' % (winW, winH, master.winfo_x() + 20, master.winfo_y() + 20))\r\n popPackage.resizable(width=FALSE, height=FALSE)\r\n popPackage.configure(background='#E7E7E7')\r\n\r\n #Text for window\r\n genString = 'Package:'\r\n genTxt = Label(popPackage, text=genString, justify=LEFT, font=labelFont)\r\n genTxt.grid(row=0, column=0, columnspan=1, sticky=W+E, padx=5, pady=5)\r\n\r\n genBox = Combobox(popPackage, state='readonly')\r\n genBox.config(textvariable=self.devPackage)\r\n genBox['values'] = packageList\r\n genBox.grid(row=1, column=0, sticky=W+E, padx=5)\r\n genBox.current(0)\r\n\r\n genButton = Button(popPackage, text='Apply', command=lambda: self.begin_advanced_gen(master, popPackage))\r\n genButton.grid(row=1, column=1, sticky=W+E, padx=5)\r\n\r\n # support automation test\r\n self.pop_package = popPackage", "def _get_package_items(self):\r\n mask = \"mask[description,capacity,prices.id,categories[name,id]]\"\r\n package = self.client['Product_Package']\r\n return package.getItems(id=46, mask=mask)", "def abc_load_picking(self):\n self.ensure_one()\n picking = self.abc_make_records(self)[0]\n if self.state == 'assigned':\n action = self.do_enter_transfer_details()\n wizard = self.env['stock.transfer_details'].browse(action['res_id'])\n operations = self.abc_make_records(wizard.item_ids)\n products = self.abc_make_records(wizard.item_ids.mapped('product_id'))\n else:\n operations = []\n products = []\n # TODO: Find packages\n packages = []\n res = {'picking': picking, 'operations': operations, 'products': products, 'packages': packages}\n return res", "def pick(self, pack, cards_owned, draft_info):\n pass", "def test_all_packs_creation(repo):\n pack_1 = repo.setup_one_pack('Pack1')\n pack_1.pack_metadata.write_json(\n {\n 'name': 'Pack Number 1',\n }\n )\n\n pack_2 = repo.setup_one_pack('Pack2')\n pack_2.pack_metadata.write_json(\n {\n 'name': 'Pack Number 2',\n }\n )\n\n with ChangeCWD(repo.path):\n with temp_dir() as temp:\n runner = CliRunner(mix_stderr=False)\n result = runner.invoke(main, [ARTIFACTS_CMD, '-a', temp, '-p', 'all'])\n\n assert result.exit_code == 0\n assert os.path.exists(os.path.join(str(temp), 'uploadable_packs', 'Pack1.zip'))\n assert os.path.exists(os.path.join(str(temp), 'uploadable_packs', 'Pack2.zip'))", "def SetToolPacking(self, packing):\r\n\r\n self._tool_packing = packing", "def __init__(__self__, *,\n packaging_group_id: pulumi.Input[str],\n cmaf_package: Optional[pulumi.Input['PackagingConfigurationCmafPackageArgs']] = None,\n dash_package: Optional[pulumi.Input['PackagingConfigurationDashPackageArgs']] = None,\n hls_package: Optional[pulumi.Input['PackagingConfigurationHlsPackageArgs']] = None,\n mss_package: Optional[pulumi.Input['PackagingConfigurationMssPackageArgs']] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input['PackagingConfigurationTagArgs']]]] = None):\n pulumi.set(__self__, \"packaging_group_id\", packaging_group_id)\n if cmaf_package is not None:\n pulumi.set(__self__, \"cmaf_package\", cmaf_package)\n if dash_package is not None:\n pulumi.set(__self__, \"dash_package\", dash_package)\n if hls_package is not None:\n pulumi.set(__self__, \"hls_package\", hls_package)\n if mss_package is not None:\n pulumi.set(__self__, \"mss_package\", mss_package)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def _create_customer_package(order):\n \n next_date = datetime.datetime.now()\n \n # RUN THE ITEMS THROUGH THE OFFERS FILTER, JUST TO CHECK\n items = _apply_deals(order.items.filter(monthly_order=False))\n print \"woohoo\"\n\n \n # GO THROUGH EACH SINGLE ITEM, GET/CREATE A WAREHOUSE ITEM, AND ADD IT TO A PACKAGE\n for x in items[0]:\n loop = x.quantity\n while loop >= 1:\n \n try:\n # GET A WAREHOUSE ITEM THAT MATCHES THE CURRENCY PARENT_PRODUCT, WEIGHT AND CURRENCY\n wh_item = WarehouseItem.objects.filter(\n unique_product__parent_product=x.item.parent_product, \n unique_product__weight=x.item.weight,\n unique_product__currency__code='GBP',\n sold__isnull=True,\n )[0]\n \n wh_item.sold = datetime.datetime.now()\n wh_item.reason = WarehouseItem.SOLD\n preorder = False \n \n except: \n # IF THERE'S NONE IN STOCK, CREATE A NEW ITEM AND MARK THE PACKAGE AS A PREORDER \n up = UniqueProduct.objects.filter( \n currency__code='GBP', \n parent_product=x.item.parent_product,\n weight=x.item.weight,\n is_active=True, \n )[0]\n \n wh_item = WarehouseItem.objects.create(\n unique_product=up,\n hashkey=uuid.uuid1().hex,\n created=datetime.datetime.now(),\n batch='TEMP',\n )\n preorder = True\n \n try:\n package = CustomerPackage.objects.get(\n order=order, \n is_preorder=preorder,\n )\n except:\n package = CustomerPackage.objects.create(\n order=order,\n is_preorder=preorder\n )\n\n wh_item.package = package \n \n # UPDATE THE FINAL FIGURES FOR POSTERITY\n wh_item.sale_currency = x.item.currency\n wh_item.list_price = x.item.price\n wh_item.sale_price = x.item.get_price()\n wh_item.save()\n \n loop -= 1\n\n\n # APPLY THE DISCOUNT/POSTAGE COSTS TO ONLY 1 PACKAGE\n try:\n package = CustomerPackage.objects.filter(order=order)[0]\n package.discount_amount = order.get_discount()\n \n amount = 0\n for x in order.items.filter(monthly_order=False):\n amount += x.item.price\n \n if amount > order.get_currency().postage_discount_threshold:\n postage_amount = 0\n else:\n postage_amount = order.get_currency().postage_cost\n \n package.postage_paid = postage_amount\n package.save()\n except:\n pass\n\n\n # NOTE: WE AREN'T SELLING MONTHLY ITEMS NOW!\n # NOW DEAL WITH THE MONTHLY ITEMS\n for x in order.items.filter(monthly_order=True):\n \n months = x.months \n while months >= 1:\n \n # THIS WILL CREATE THE FIRST PACKAGE TO BE SENT (ie. THE FIRST MONTH)\n if months == x.months:\n \n \n # CREATE A MONTHLY PACKAGE IF ONE DOESN\"T ALREADY EXIST\n if not monthly_package:\n monthly_package = CustomerPackage.objects.create(\n order=order,\n created=datetime.datetime.now(),\n )\n \n \n # TAKE THESE ITEMS OUT OF CURRENT AVAILABLE STOCK\n quantity = x.quantity\n while quantity >= 1:\n try:\n wh_item = WarehouseItem.objects.filter(\n unique_product__parent_product=x.item.parent_product, \n unique_product__weight=x.item.weight,\n unique_product__currency__code='GBP',\n sold__isnull=True,\n )[0]\n except:\n wh_item = WarehouseItem.objects.create(\n unique_product=x.item,\n hashkey=uuid.uuid1().hex,\n created=datetime.datetime.now(),\n batch='TEMP',\n )\n \n wh_item.sold = datetime.datetime.now()\n wh_item.reason = WarehouseItem.SOLD\n wh_item.package = package\n wh_item.save() \n \n # FOR ALL OTHER MONTHS\n else:\n monthly_package = CustomerPackage.objects.create(\n order=order,\n created=datetime.datetime.now(),\n shipping_due_date=next_date\n )\n \n # ADD ITEMS AS PREORDER ITEMS, NOT FROM CURRENT STOCK\n quantity = x.quantity\n while quantity >= 1:\n wh_item = WarehouseItem.objects.create(\n unique_product=x.item,\n hashkey=uuid.uuid1().hex,\n created=datetime.datetime.now(),\n batch='TEMP',\n ) \n \n wh_item.sold = datetime.datetime.now()\n wh_item.reason = WarehouseItem.SOLD\n wh_item.package = monthly_package\n wh_item.save()\n \n quantity -= 1\n \n next_date = add_months(next_date, 1)\n months -= 1\n\n return", "def get_filtered_pack(self):\n return self.list_pack", "def pack(backend_name, patterns, size, minimum, yes):\n # Load the backend\n backend = get_backend(backend_name)\n # Find the paths\n click.echo(\"Scanning files... \", nl=False)\n paths, size_used = Scanner(config.root_path, patterns).unstored_paths(\n config.index, size * (1024 ** 3)\n )\n click.secho(\"Done\", fg=\"green\")\n if not paths:\n click.secho(\"No files found to add.\", fg=\"yellow\")\n return\n # Print what we found\n for path in paths:\n click.echo(\"> \" + click.style(path, fg=\"blue\"))\n click.echo(\"%s files, %s\" % (len(paths), human_size(size_used)))\n # Prompt to continue\n if not yes:\n if not click.confirm(\"Proceed with build?\"):\n return\n click.echo()\n # Select an unused archive ID\n archive_id = config.index.new_archive_id()\n # Pack the volume\n archive = Archive.from_files(archive_id, paths, config.root_path)\n click.echo(f\"Archive is {archive.id}, size {human_size(archive.size)}\")\n if archive.size < minimum * (1024 ** 3):\n click.echo(\"Archive too small, quitting\")\n sys.exit(1)\n backend.archive_store(config.root_path, archive)\n click.echo(\"Archive stored\")\n config.index.add_archive(archive, backend_name)\n click.echo(\"Archive indexed\")", "def __create_vnf_package(cls, context, vnf_package_info):\n vnf_package = objects.VnfPackage(\n context=context,\n id=vnf_package_info.get('id'),\n onboarding_state=fields.PackageOnboardingStateType.CREATED,\n operational_state=fields.PackageOperationalStateType.DISABLED,\n usage_state=fields.PackageUsageStateType.NOT_IN_USE,\n tenant_id=context.project_id\n )\n vnf_package.create()\n return vnf_package", "def pack(self, replicable):\r\n return self.pack_id(replicable.instance_id)", "def create_package(self, package_id):\n self.client._perform_empty(\n \"POST\", \"/projects/%s/apiservices/%s/packages/%s\" % (self.project_key, self.service_id, package_id))", "def generate():\n PackCommandExecutor().pack()\n GenerateCommandExecutor().generate()", "def _search_and_increment(self, cr, uid, picking_id, domain, filter_visible=False, visible_op_ids=False, increment=True, context=None):\n if context is None:\n context = {}\n\n # if current_package_id is given in the context, we increase the number of items in this package\n package_clause = [('result_package_id', '=', context.get('current_package_id', False))]\n existing_operation_ids = self.search(cr, uid, [('picking_id', '=', picking_id)] + domain + package_clause,\n context=context)\n todo_operation_ids = []\n if existing_operation_ids:\n if filter_visible:\n todo_operation_ids = [val for val in existing_operation_ids if val in visible_op_ids]\n else:\n todo_operation_ids = existing_operation_ids\n if todo_operation_ids:\n # existing operation found for the given domain and picking => increment its quantity\n operation_id = todo_operation_ids[0]\n op_obj = self.browse(cr, uid, operation_id, context=context)\n qty = op_obj.qty_done\n if increment:\n qty += 1\n else:\n qty -= 1 if qty >= 1 else 0\n if qty == 0 and op_obj.product_qty == 0:\n # we have a line with 0 qty set, so delete it\n self.unlink(cr, uid, [operation_id], context=context)\n return False\n self.write(cr, uid, [operation_id], {'qty_done': qty}, context=context)\n else:\n # no existing operation found for the given domain and picking => create a new one\n picking_obj = self.pool.get(\"stock.picking\")\n picking = picking_obj.browse(cr, uid, picking_id, context=context)\n values = {\n 'picking_id': picking_id,\n 'product_qty': 0,\n 'location_id': picking.location_id.id,\n 'location_dest_id': picking.location_dest_id.id,\n 'qty_done': 1,\n }\n for key in domain:\n var_name, dummy, value = key\n uom_id = False\n if var_name == 'product_id':\n uom_id = self.pool.get('product.product').browse(cr, uid, value, context=context).uom_id.id\n update_dict = {var_name: value}\n if uom_id:\n update_dict['product_uom_id'] = uom_id\n values.update(update_dict)\n operation_id = self.create(cr, uid, values, context=context)\n return operation_id", "def __init__(self, flag_band: FlagBand,\n layer: \"datacube_ows.ows_configuration.OWSNamedLayer\") -> None:\n super().__init__({})\n self.layer = layer\n self.bands: Set[str] = set()\n self.bands.add(flag_band.pq_band)\n self.flag_bands = {flag_band.pq_band: flag_band}\n self.product_names = tuple(flag_band.pq_names)\n self.ignore_time = flag_band.pq_ignore_time\n self.declare_unready(\"products\")\n self.declare_unready(\"low_res_products\")\n self.manual_merge = flag_band.pq_manual_merge\n self.fuse_func = flag_band.pq_fuse_func\n # pyre-ignore[16]\n self.main_product = self.products_match(layer.product_names)", "def create_raster_datapackage(pk_type, path, file_flag, out_path):\n process_source(pk_type, path, file_flag, out_path)", "def pick(self, pack):\n pack_snapshot = pack.copy()\n self.pack_history.append(pack_snapshot)\n pick = self.picker.pick(pack=pack.copy(), cards_owned=self.cards_owned.copy(),\n draft_info=self.draft_info)\n if pick not in pack:\n raise ValueError('Drafter made invalid pick {} from pack {}'.format(pick, pack))\n\n self.cards_owned.append(pick)\n return pick", "def create_package(self, name, command, display_name='', file_urls=[],\n command_timeout_seconds=600, expire_seconds=600, parameters_json_file='',\n verify_filters=[], verify_filter_options=[], verify_expire_seconds=600,\n **kwargs):\n pytan.utils.check_for_help(kwargs=kwargs)\n\n clean_keys = ['obj', 'pytan_help', 'defs']\n clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)\n\n metadata = kwargs.get('metadata', [])\n metadatalist_obj = pytan.utils.build_metadatalist_obj(properties=metadata)\n\n # bare minimum arguments for new package: name, command\n add_package_obj = taniumpy.PackageSpec()\n add_package_obj.name = name\n if display_name:\n add_package_obj.display_name = display_name\n add_package_obj.command = command\n add_package_obj.command_timeout = command_timeout_seconds\n add_package_obj.expire_seconds = expire_seconds\n add_package_obj.metadata = metadatalist_obj\n\n # VERIFY FILTERS\n if verify_filters:\n verify_filter_defs = pytan.utils.dehumanize_question_filters(\n question_filters=verify_filters\n )\n verify_option_defs = pytan.utils.dehumanize_question_options(\n question_options=verify_filter_options\n )\n verify_filter_defs = self._get_sensor_defs(defs=verify_filter_defs, **clean_kwargs)\n add_verify_group = pytan.utils.build_group_obj(\n q_filter_defs=verify_filter_defs, q_option_defs=verify_option_defs\n )\n h = \"Issue an AddObject to add a Group object for this package\"\n verify_group = self._add(obj=add_verify_group, pytan_help=h, **clean_kwargs)\n\n # this didn't work:\n # add_package_obj.verify_group = verify_group\n add_package_obj.verify_group_id = verify_group.id\n add_package_obj.verify_expire_seconds = verify_expire_seconds\n\n # PARAMETERS\n if parameters_json_file:\n add_package_obj.parameter_definition = pytan.utils.load_param_json_file(\n parameters_json_file=parameters_json_file\n )\n\n # FILES\n if file_urls:\n filelist_obj = taniumpy.PackageFileList()\n for file_url in file_urls:\n # if :: is in file_url, split on it and use 0 as\n # download_seconds\n if '::' in file_url:\n download_seconds, file_url = file_url.split('::')\n else:\n download_seconds = 0\n # if || is in file_url, split on it and use 0 as file name\n # else wise get file name from basename of URL\n if '||' in file_url:\n filename, file_url = file_url.split('||')\n else:\n filename = os.path.basename(file_url)\n file_obj = taniumpy.PackageFile()\n file_obj.name = filename\n file_obj.source = file_url\n file_obj.download_seconds = download_seconds\n filelist_obj.append(file_obj)\n add_package_obj.files = filelist_obj\n\n h = \"Issue an AddObject to add a Group object for this package\"\n package_obj = self._add(obj=add_package_obj, pytan_help=h, **clean_kwargs)\n\n m = \"New package {!r} created with ID {!r}, command: {!r}\".format\n self.mylog.info(m(package_obj.name, package_obj.id, package_obj.command))\n return package_obj", "def _pullbundle2extraprepare(pullop, kwargs):", "def _prune_catalog(self) -> cat.Catalog:\n if self._import is None:\n return self._catalog\n\n needed_ids = self._find_needed_control_ids()\n\n # if a control includes controls - only include those that we know are needed\n final_control_ids = self._prune_controls(needed_ids)\n\n # build the needed groups of controls\n group_dict: Dict[str, cat.Group] = {}\n for control_id in final_control_ids:\n group_id, group_title, group_class = self._catalog_interface.get_group_info(control_id)\n group = group_dict.get(group_id)\n control = self._catalog_interface.get_control(control_id)\n if group is None:\n group = cat.Group(id=group_id, title=group_title, class_=group_class, controls=[control])\n group_dict[group_id] = group\n else:\n group_dict[group_id].controls.append(control)\n\n # find all referenced uuids - they should be 1:1 with those in backmatter\n needed_uuid_refs: Set[str] = self._find_all_uuid_refs(final_control_ids)\n\n # prune the list of resources to only those that are needed\n new_resources: Optional[List[common.Resource]] = []\n if self._catalog.back_matter is not None and self._catalog.back_matter.resources is not None:\n for resource in self._catalog.back_matter.resources:\n if resource.uuid in needed_uuid_refs:\n new_resources.append(resource)\n\n new_groups: Optional[List[cat.Group]] = list(group_dict.values())\n\n # should avoid empty lists so set to None if empty\n new_resources = new_resources if new_resources else None\n new_groups = new_groups if new_groups else None\n\n new_cat = cat.Catalog(\n uuid=str(uuid4()),\n metadata=self._catalog.metadata,\n back_matter=common.BackMatter(resources=new_resources),\n groups=new_groups\n )\n\n return new_cat", "def create_package(self):\n return self.create(\"RequestedPackageLineItem\")", "def push(self) -> None:\n\n with ImportExtensions(required=True):\n import requests\n\n pkg_path = Path(self.args.path)\n if not pkg_path.exists():\n self.logger.critical(f'`{self.args.path}` is not a valid path!')\n exit(1)\n\n request_headers = self._get_request_header()\n\n try:\n # archive the executor package\n with TimeContext(f'Packaging {self.args.path}', self.logger):\n md5_hash = hashlib.md5()\n bytesio = archive_package(pkg_path)\n content = bytesio.getvalue()\n md5_hash.update(content)\n\n md5_digest = md5_hash.hexdigest()\n\n # upload the archived package\n form_data = {\n 'public': self.args.public if hasattr(self.args, 'public') else False,\n 'private': self.args.private\n if hasattr(self.args, 'private')\n else False,\n 'md5sum': md5_digest,\n 'force': self.args.force,\n 'secret': self.args.secret,\n }\n\n method = 'put' if self.args.force else 'post'\n\n hubble_url = get_hubble_url()\n # upload the archived executor to Jina Hub\n with TimeContext(\n f'Pushing to {hubble_url} ({method.upper()})',\n self.logger,\n ):\n resp = getattr(requests, method)(\n hubble_url,\n files={'file': content},\n data=form_data,\n headers=request_headers,\n )\n\n if 200 <= resp.status_code < 300:\n # TODO: only support single executor now\n image = resp.json()['executors'][0]\n\n uuid8 = image['id']\n secret = image['secret']\n visibility = image['visibility']\n\n info_table = [\n f'\\t🔑 ID:\\t\\t' + colored(f'{uuid8}', 'cyan'),\n f'\\t🔒 Secret:\\t'\n + colored(\n f'{secret}',\n 'cyan',\n )\n + colored(\n ' (👈 Please store this secret carefully, it wont show up again)',\n 'red',\n ),\n f'\\t👀 Visibility:\\t' + colored(f'{visibility}', 'cyan'),\n ]\n\n if 'alias' in image:\n info_table.append(f'\\t📛 Alias:\\t' + colored(image['alias'], 'cyan'))\n\n self.logger.success(f'🎉 Executor `{pkg_path}` is pushed successfully!')\n self.logger.info('\\n' + '\\n'.join(info_table))\n\n usage = (\n f'jinahub://{uuid8}'\n if visibility == 'public'\n else f'jinahub://{uuid8}:{secret}'\n )\n\n self.logger.info(f'You can use it via `uses={usage}` in the Flow/CLI.')\n elif resp.text:\n # NOTE: sometimes resp.text returns empty\n raise Exception(resp.text)\n else:\n resp.raise_for_status()\n except Exception as e: # IO related errors\n self.logger.error(\n f'Error while pushing `{self.args.path}` with session_id={request_headers[\"jinameta-session-id\"]}: '\n f'\\n{e!r}'\n )" ]
[ "0.5840583", "0.55044174", "0.5431738", "0.542185", "0.53571826", "0.52159303", "0.51423734", "0.5080695", "0.5045913", "0.499037", "0.4984317", "0.49737558", "0.49271718", "0.4918655", "0.49071163", "0.4905323", "0.4889021", "0.48848778", "0.48737967", "0.4859566", "0.47781584", "0.47777665", "0.47401568", "0.47062543", "0.469875", "0.46947265", "0.4692887", "0.46815014", "0.46784478", "0.4666836" ]
0.73506504
0
Search for an operation with given 'domain' in a picking, if it exists increment the qty (+1) otherwise create it
def _search_and_increment(self, cr, uid, picking_id, domain, filter_visible=False, visible_op_ids=False, increment=True, context=None): if context is None: context = {} # if current_package_id is given in the context, we increase the number of items in this package package_clause = [('result_package_id', '=', context.get('current_package_id', False))] existing_operation_ids = self.search(cr, uid, [('picking_id', '=', picking_id)] + domain + package_clause, context=context) todo_operation_ids = [] if existing_operation_ids: if filter_visible: todo_operation_ids = [val for val in existing_operation_ids if val in visible_op_ids] else: todo_operation_ids = existing_operation_ids if todo_operation_ids: # existing operation found for the given domain and picking => increment its quantity operation_id = todo_operation_ids[0] op_obj = self.browse(cr, uid, operation_id, context=context) qty = op_obj.qty_done if increment: qty += 1 else: qty -= 1 if qty >= 1 else 0 if qty == 0 and op_obj.product_qty == 0: # we have a line with 0 qty set, so delete it self.unlink(cr, uid, [operation_id], context=context) return False self.write(cr, uid, [operation_id], {'qty_done': qty}, context=context) else: # no existing operation found for the given domain and picking => create a new one picking_obj = self.pool.get("stock.picking") picking = picking_obj.browse(cr, uid, picking_id, context=context) values = { 'picking_id': picking_id, 'product_qty': 0, 'location_id': picking.location_id.id, 'location_dest_id': picking.location_dest_id.id, 'qty_done': 1, } for key in domain: var_name, dummy, value = key uom_id = False if var_name == 'product_id': uom_id = self.pool.get('product.product').browse(cr, uid, value, context=context).uom_id.id update_dict = {var_name: value} if uom_id: update_dict['product_uom_id'] = uom_id values.update(update_dict) operation_id = self.create(cr, uid, values, context=context) return operation_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_id(demand_array, old_iter, new_iter):\r\n #функция для первоначального добавления айдишника\r\n #используется в тех случаях, когда зафиксирована продажа,\r\n #но конкретно такого предмета еще нет в demand\r\n #adding item ID\r\n demand_array.append({\"item_id\": old_iter['item_id']})\r\n #ярлык для наполнения массива дополнительными свойствами, если они есть\r\n def search(value):\r\n nonlocal old_iter\r\n nonlocal demand_array\r\n if value in old_iter:\r\n demand_array[len(demand_array)-1].update({value: old_iter[value]})\r\n search('refine')\r\n search('cards')\r\n search('star_crumbs')\r\n search('element')\r\n search('beloved')\r\n #adding price:sold amount info\r\n if isinstance(new_iter, bool):\r\n _position = demand_array[len(demand_array)-1]\r\n _position[old_iter['price']] = old_iter['amount']\r\n else:\r\n _position = demand_array[len(demand_array)-1]\r\n _position[old_iter['price']] = old_iter['amount'] - new_iter['amount']", "def put_nr(nr: Request, svc) -> Request:\n return nr", "def find_or_create(cls, website, values):\n stores = cls.search([\n ('website', '=', website.id),\n ('magento_id', '=', int(values['group_id']))\n ])\n\n if stores:\n return stores[0]\n\n return cls.create([{\n 'name': values['name'],\n 'magento_id': int(values['group_id']),\n 'website': website.id,\n }])[0]", "def add(self, prod1_name, prod2_name, times):\n if prod1_name == prod2_name:\n return\n try:\n self._purchased.update({PROD1: prod1_name, PROD2: prod2_name, TIMES: {'$exists': True}},\n {'$inc': {TIMES: times}},\n True\n )\n self._purchased.update({PROD1: prod2_name, PROD2: prod1_name, TIMES: {'$exists': True}},\n {'$inc': {TIMES: times}},\n True\n )\n print('add: succeeded')\n return True\n except pyerrors.OperationFailure as ex:\n print(ex.value)\n except pyerrors.PyMongoError as ex:\n print(ex.value)\n print('add: failed')\n return False", "def handle_nr_create(self, nr: Request, svc) -> Request:\n return self.post_nr(nr, svc)", "def test_single_quant_assign_correct_quant(self):\n Quant = self.env[\"stock.quant\"]\n\n # Create a bunch of identical quants in the same location\n quants = Quant.browse()\n for i in range(5):\n quants |= self.create_quant(self.apple.id, self.test_stock_location_01.id, 10)\n self.assertEqual(len(quants), 5)\n\n quant = quants[2]\n pick = quant.create_picking(self.picking_type_pick, confirm=True, assign=True)\n self.assertEqual(pick.state, \"assigned\")\n self.assertEqual(quant.reserved_quantity, 10)", "def test_multiple_creates_do_not_increase_products(self):\n for i in xrange(0, 10):\n modified_po = copy.deepcopy(base_purchase_order)\n self.assertEqual(Supply.objects.get(pk=1).quantity, 10)\n \n resp = self.client.post('/api/v1/purchase-order/', format='json', data=modified_po)\n \n self.assertEqual(resp.status_code, 201, msg=resp)\n \n po_data = resp.data\n self.assertEqual(po_data['status'], 'AWAITING APPROVAL')\n\n item1 = po_data['items'][0]\n #self.assertEqual(item1['supply']['id'], 1)\n self.assertEqual(item1['status'], u'Ordered')\n\n item2 = po_data['items'][1]\n #self.assertEqual(item1['supply']['id'], 2)\n self.assertEqual(item1['status'], u'Ordered')\n \n #Test database values\n po = PurchaseOrder.objects.get(pk=resp.data['id'])\n self.assertEqual(po.status, 'AWAITING APPROVAL')\n for item in po.items.all():\n self.assertEqual(item.status, u\"Ordered\")\n \n supplier = Supplier.objects.get(pk=1)\n\n supply = Supply.objects.get(pk=1)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)\n\n supply = Supply.objects.get(pk=2)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)", "def get_incrementor(self, field: str, create_if_missing=False):\n\n # check if field exists\n if self.coll.find_one({'_id': field}) is None:\n if create_if_missing is False:\n raise UnknownField()\n else:\n self.create(field)\n\n # create incrementor function and return it to the caller\n def func():\n doc = self.coll.find_one_and_update(\n {'_id': field},\n {'$inc': {'next': 1}},\n return_document=ReturnDocument.BEFORE\n )\n if doc is None:\n raise UnknownField()\n\n # note that this is the value before incrementing it.\n return doc['next']\n\n return func", "def _get_id(self, num, comp_dict, attr_name='' , change=False) -> int:\n keys = comp_dict.keys()\n for key in keys:\n b_id, min_v, max_v = key\n if min_v <= num and num < max_v:\n if change and attr_name:\n self.counter_change(attr_name, b_id)\n value_count_dict = self._value_count_dict[attr_name]\n\n if num in value_count_dict:\n value_count_dict[num][0] += 1\n else:\n value_count_dict[num] = [1]\n\n return b_id\n # print('did not find: ...')\n # print(num)\n # print(comp_dict)\n return -1", "def save_nr(nr: Request, svc) -> Request:\n nr = svc.save_request(nr)\n # Return the updated name request\n return nr", "def perform_create(self, serializer):\n instance = serializer.save(\n domain=self.org_safe_get(self.request.user, self.kwargs.get('pk')))", "def pluto_handler(self, pluto_entity):\n self.create_node()\n pluto_entity.create_unique_relationship('QUERIES', self.node,\n radd_fields=self.used_fields)", "def create(self, good, quantity):\n self._haves[good] += quantity", "def handle_nr_update(self, nr: Request, svc) -> Request:\n return self.put_nr(nr, svc)", "def get_unfinished_or_create(cls, subdomain, scan_name):\n counter = cls.all().filter('subdomain =', subdomain\n ).filter('scan_name =', scan_name\n ).order('-timestamp').get()\n if not counter or not counter.last_key:\n counter = Counter(subdomain=subdomain, scan_name=scan_name)\n return counter", "def test_creation_of_duplicate_service_in_store(self):\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n response3 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n self.assertEqual(response3.status, \"409 CONFLICT\")\n self.assertIn(\"Sorry. Live at the yard already exists in this store.\", str(response3.data))", "def set_so_pack_operation_lot(self, picking):\n StockProductionLot = self.env['stock.production.lot']\n sale_line_obj = self.env['sale.order.line']\n has_wrong_lots = False\n for del_move in picking.move_lines:\n del_move.move_line_ids.unlink()\n for move in picking.move_lines:\n picking_type = picking.picking_type_id\n # lots_necessary = True\n if picking_type:\n if not picking_type.use_existing_lots:\n picking_type.write({'use_existing_lots':True})\n # lots_necessary = picking_type and picking_type.use_existing_lots\n qty = 0\n qty_done = 0\n pack_lots = []\n pack_lot_id = []\n for ord_line in self.order_line:\n if ord_line.lot_id and ord_line.lot_id.product_id.id == move.product_id.id:\n pack_lot_id.append(ord_line.lot_id.id)\n # if pack_lot_names and lots_necessary:\n if pack_lot_id:\n for lot_id in list(set(pack_lot_id)):\n stock_production_lot = StockProductionLot.search([('id', '=', lot_id), ('product_id', '=', move.product_id.id)])\n sale_order_line = sale_line_obj.search([('lot_id', '=', lot_id),('order_id', '=', self.id), ('product_id', '=', move.product_id.id)])\n if stock_production_lot and sale_order_line:\n if stock_production_lot.product_id.tracking == 'lot':\n # if a lot nr is set through the frontend it will refer to the full quantity\n qty = sale_order_line[0].product_uom_qty\n else:\n qty = 1.0\n qty_done += qty\n pack_lots.append({'lot_id': stock_production_lot.id, 'qty': qty})\n else:\n has_wrong_lots = True\n # elif move.product_id.tracking == 'none' or not lots_necessary:\n elif move.product_id.tracking == 'none':\n qty_done = move.product_uom_qty\n else:\n has_wrong_lots = True\n for pack_lot in pack_lots:\n lot_id, qty = pack_lot['lot_id'], pack_lot['qty']\n self.env['stock.move.line'].create({\n 'move_id': move.id,\n 'product_id': move.product_id.id,\n 'product_uom_id': move.product_uom.id,\n 'qty_done': qty,\n 'location_id': move.location_id.id,\n 'location_dest_id': move.location_dest_id.id,\n 'lot_id': lot_id,\n })\n if not pack_lots:\n move.quantity_done = qty_done\n return has_wrong_lots", "def increment_name(base, existing):\r\n if not base in existing:\r\n return base\r\n n = 1\r\n make_name = lambda: base + str(n)\r\n while make_name() in existing:\r\n n += 1\r\n return make_name()", "def test_find_by_code(self):\n self.assertEqual(len(Promotion.all()), 0)\n codes = ['SAVE15', 'SAVE20', 'SAVE30']\n counts = [10, 15, 2]\n for count, code in zip(counts, codes):\n PromotionFactory.batch_create(count, code=code)\n\n for count, code in zip(counts, codes):\n promotions = Promotion.find_by_code(code)\n self.assertEqual(len(promotions), count)\n for promotion in promotions:\n self.assertEqual(promotion.code, code)", "def test_add_item_with_duplicate_value_on_unique_field_raises(\n test_store, andy, pandy, candy\n):\n\n person_with_duplicate_name = Person(name=\"Andy\", age=80)\n\n with pytest.raises(NotUniqueException):\n test_store.add(person_with_duplicate_name)\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items", "def add_to_inv(self, item):\n for obj in self.inv:\n if obj.name == item.name:\n self.inv[obj] += 1\n break\n else:\n self.inv[item] = 1", "def test_adding_item_to_list(create_shopping_item, create_shopping_list):\n shopping_list = create_shopping_list\n items_before = shopping_list.items.values_list().count()\n new_item = create_shopping_item\n shopping_list.items.add(new_item)\n items_after = shopping_list.items.values_list().count()\n assert items_after > items_before\n assert items_before == 0\n assert items_after == 1", "def xtest_adding_a_new_item_with_no_supply(self): \n print '\\n'\n logger.debug('Add a new item to a current PO via PUT')\n print '\\n'\n \n #Verifying po in database\n self.assertEqual(self.po.id, 1)\n self.assertEqual(self.po.items.count(), 1)\n self.assertEqual(self.po.grand_total, Decimal('129.58'))\n self.assertEqual(timezone('Asia/Bangkok').normalize(self.po.order_date).date(), datetime.datetime.now().date())\n item = self.po.items.all()[0]\n self.assertEqual(item.id, 1)\n self.assertEqual(item.quantity, 10)\n self.assertEqual(item.total, Decimal('121.1'))\n \n modified_po_data = copy.deepcopy(base_purchase_order)\n modified_po_data['items'][1]['unit_cost'] = Decimal('11.99')\n modified_po_data['items'][1]['comments'] = 'test change'\n modified_po_data['items'][1]['description'] = \"test description change\"\n modified_po_data['status'] = 'PROCESSED'\n\n logger.debug(modified_po_data)\n\n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po_data)\n \n #Verify the response\n self.assertEqual(resp.status_code, 200, msg=resp)\n po = resp.data\n self.assertEqual(po['id'], 1)\n self.assertEqual(po['supplier']['id'], 1)\n self.assertEqual(po['vat'], 7)\n #self.assertEqual(Decimal(po['grand_total']), Decimal('74.85'))\n self.assertEqual(po['discount'], 0)\n self.assertEqual(po['revision'], 1)\n self.assertEqual(len(po['items']), 2)\n #self.assertEqual(po['status'], 'PAID')\n #Check the new pdf\n #webtbrowser.get(\"open -a /Applications/Google\\ Chrome.app %s\").open(po['pdf']['url'])\n \n item1 = po['items'][0]\n logger.debug(item1)\n self.assertEqual(item1['id'], 2)\n self.assertEqual(item1['quantity'], '10.0000000000')\n self.assertEqual(item1['description'], u'Pattern: Maxx, Col: Blue')\n self.assertEqual(Decimal(item1['unit_cost']), Decimal('12.1100'))\n self.assertEqual(Decimal(item1['total']), Decimal('121.10'))\n\n item2 = po['items'][1]\n logger.debug(item2)\n self.assertEqual(item2['id'], 3)\n self.assertEqual(item2['quantity'], '3.0000000000')\n self.assertEqual(item2['comments'], 'test change')\n self.assertEqual(item2['description'], 'test description change')\n self.assertEqual(Decimal(item2['unit_cost']), Decimal('11.99'))\n self.assertEqual(Decimal(item2['total']), Decimal('35.97'))\n \n #Verify database record\n po = PurchaseOrder.objects.get(pk=1)\n \n self.assertEqual(po.supplier.id, 1)\n self.assertEqual(po.status, 'PROCESSED')\n #self.assertEqual(timezone('Asia/Bangkok').normalize(po.order_date), datetime.datetime.now().date())\n self.assertEqual(po.vat, 7)\n self.assertEqual(po.grand_total, Decimal('168.07'))\n self.assertEqual(po.items.count(), 2)\n \n # Check new item in the database\n item2_d = po.items.all().order_by('id')[1]\n self.assertEqual(item2_d.id, 203)\n self.assertEqual(item2_d.description, 'test description change')\n self.assertEqual(item2_d.comments, 'test change')\n self.assertEqual(item2_d.quantity, 3)\n self.assertEqual(item2_d.unit_cost, Decimal('11.99'))\n self.assertEqual(item2_d.total, Decimal('35.97'))\n\n # Check new supply product in the database\n products = SupplyProduct.objects.filter(supply=item2_d.supply, supplier=self.po.supplier)\n self.assertEqual(products.count(), 1)\n product = products.all()[0]\n self.assertEqual(product.supply.id, item2_d.supply.id)\n self.assertEqual(product.supplier.id, self.po.supplier.id)\n self.assertEqual(product.cost, Decimal('11.99'))", "def do(data, **kwargs):\n l = kwargs.get('logger')\n do_number = kwargs.get(u'do_number')\n\n l.info(\n u'#{} Do ADD. Segment {}, {} items.'\n .format(\n u'-' * 8, do_number, len(data)\n )\n )\n \n # Processing logic here\n result = sum(data)\n\n return {'items_processed': len(data), 'result': result}", "def update_or_create_delivery(self, orderitem_data):", "def create_correlation_quantity_indexed_docs():\n pn_store = MemoryStore()\n q_store = MemoryStore()\n m_store = MemoryStore()\n with open(os.path.join(CORR_TEST_DIR, \"correlation_propnet_data.json\"), 'r') as f:\n data = json.load(f)\n pn_store.connect()\n pn_store.update(jsanitize(data, strict=True, allow_bson=True))\n sb = SeparationBuilder(pn_store, q_store, m_store)\n r = Runner([sb])\n r.run()\n q_data = list(q_store.query(criteria={}, properties={'_id': False}))\n dumpfn(q_data, os.path.join(CORR_TEST_DIR, \"correlation_propnet_quantity_data.json\"))", "def create_now(self,cr,uid,ids,context=None):\n vals={}\n if context is None:\n context = {}\n trans_brw = self.browse(cr,uid,ids,context=context)\n so_l_obj = self.pool.get('sale.order.line')\n product_obj = self.pool.get('product.uom')\n if context['active_ids']:\n so_l_brw = so_l_obj.browse(cr,uid,context['active_ids'][0],context=context)\n sale_quantity = so_l_brw.product_uom_qty\n for i in trans_brw:\n for line in i.sale_order_line_id:\n quantity = line.quantity1\n diff = round(sale_quantity - quantity,4)\n if diff > 0:\n if line.length1 and line.heigth1:\n vals = {\n 'prod_lot_id':line.lot_id and line.lot_id.id,\n 'pieces':line.pieces_qty,\n 'product_uom_qty':quantity,\n }\n \n sale_quantity = diff\n current_move = so_l_obj.copy(cr, uid,context['active_ids'][0] , vals, context=context)\n \n if diff == 0 or diff < 0:\n vals = {\n 'prod_lot_id':line.lot_id and line.lot_id.id,\n 'pieces':line.pieces_qty,\n 'product_uom_qty':line.quantity1,\n }\n \n so_l_obj.write(cr, uid,context['active_ids'][0],vals)\n if diff > 0:\n if line.length1 and line.heigth1:\n pieces = product_obj._compute_pieces2(cr, uid,so_l_brw.product_id.stock_driver, diff, line.length1, line.heigth1, line.width1)\n vals = {\n 'prod_lot_id':False,\n 'pieces': pieces,\n 'product_uom_qty':diff,\n }\n so_l_obj.write(cr, uid,context['active_ids'][0],vals)\n \n return True", "def action_pack(self, cr, uid, picking_ids, operation_filter_ids=None, context=None):\n if operation_filter_ids is None:\n operation_filter_ids = []\n stock_operation_obj = self.pool.get('stock.pack.operation')\n package_obj = self.pool.get('stock.quant.package')\n stock_move_obj = self.pool.get('stock.move')\n package_id = False\n for picking_id in picking_ids:\n operation_search_domain = [('picking_id', '=', picking_id), ('result_package_id', '=', False)]\n if operation_filter_ids != []:\n operation_search_domain.append(('id', 'in', operation_filter_ids))\n operation_ids = stock_operation_obj.search(cr, uid, operation_search_domain, context=context)\n pack_operation_ids = []\n if operation_ids:\n for operation in stock_operation_obj.browse(cr, uid, operation_ids, context=context):\n # If we haven't done all qty in operation, we have to split into 2 operation\n op = operation\n if (operation.qty_done < operation.product_qty):\n new_operation = stock_operation_obj.copy(\n cr,\n uid,\n operation.id,\n {'product_qty': operation.qty_done, 'qty_done': operation.qty_done},\n context=context\n )\n stock_operation_obj.write(\n cr,\n uid,\n operation.id,\n {'product_qty': operation.product_qty - operation.qty_done, 'qty_done': 0},\n context=context\n )\n op = stock_operation_obj.browse(cr, uid, new_operation, context=context)\n pack_operation_ids.append(op.id)\n if op.product_id and op.location_id and op.location_dest_id:\n stock_move_obj.check_tracking_product(\n cr,\n uid,\n op.product_id,\n op.lot_id.id,\n op.location_id,\n op.location_dest_id,\n context=context\n )\n package_id = package_obj.create(cr, uid, {}, context=context)\n stock_operation_obj.write(\n cr,\n uid,\n pack_operation_ids,\n {'result_package_id': package_id},\n context=context\n )\n return package_id", "def get_or_create(self, cr, uid, dimension_id, value_name, context=None):\n ids = self.search(cr, uid, [('name', '=', value_name), ('dimension_id', '=', dimension_id)])\n if not len(ids):\n id = self.create(cr, uid, {'name': value_name, 'code': value_name, 'dimension_id': dimension_id})\n else:\n id = ids[0]\n return id", "def add_to_items(items, name, size, price):\n index = items_contains_name(items, name)\n if index == 0:\n temp = {'name': name, 'size': size, 'count': 1, 'price': price}\n items.append(temp)\n else:\n items[index]['count'] = items[index]['count'] + 1\n return items" ]
[ "0.46932408", "0.463366", "0.46036744", "0.4586839", "0.4582527", "0.45816582", "0.45693213", "0.45557344", "0.44739816", "0.44731164", "0.44641668", "0.4427717", "0.44171816", "0.44152266", "0.43981063", "0.43791568", "0.43735072", "0.43704206", "0.4368647", "0.43574724", "0.4346654", "0.4345042", "0.43436292", "0.43299672", "0.43260345", "0.43222004", "0.43153837", "0.4309975", "0.42842376", "0.42810097" ]
0.6647632
0
Used by barcode interface to create a new lot and assign it to the operation
def create_and_assign_lot(self, cr, uid, id, name, context=None): obj = self.browse(cr, uid, id, context) product_id = obj.product_id.id val = {'product_id': product_id} new_lot_id = False if name: lots = self.pool.get('stock.production.lot').search( cr, uid, ['&', ('name', '=', name), ('product_id', '=', product_id)], context=context ) if lots: new_lot_id = lots[0] val.update({'name': name}) if not new_lot_id: new_lot_id = self.pool.get('stock.production.lot').create(cr, uid, val, context=context) self.write(cr, uid, id, {'lot_id': new_lot_id}, context=context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_so_pack_operation_lot(self, picking):\n StockProductionLot = self.env['stock.production.lot']\n sale_line_obj = self.env['sale.order.line']\n has_wrong_lots = False\n for del_move in picking.move_lines:\n del_move.move_line_ids.unlink()\n for move in picking.move_lines:\n picking_type = picking.picking_type_id\n # lots_necessary = True\n if picking_type:\n if not picking_type.use_existing_lots:\n picking_type.write({'use_existing_lots':True})\n # lots_necessary = picking_type and picking_type.use_existing_lots\n qty = 0\n qty_done = 0\n pack_lots = []\n pack_lot_id = []\n for ord_line in self.order_line:\n if ord_line.lot_id and ord_line.lot_id.product_id.id == move.product_id.id:\n pack_lot_id.append(ord_line.lot_id.id)\n # if pack_lot_names and lots_necessary:\n if pack_lot_id:\n for lot_id in list(set(pack_lot_id)):\n stock_production_lot = StockProductionLot.search([('id', '=', lot_id), ('product_id', '=', move.product_id.id)])\n sale_order_line = sale_line_obj.search([('lot_id', '=', lot_id),('order_id', '=', self.id), ('product_id', '=', move.product_id.id)])\n if stock_production_lot and sale_order_line:\n if stock_production_lot.product_id.tracking == 'lot':\n # if a lot nr is set through the frontend it will refer to the full quantity\n qty = sale_order_line[0].product_uom_qty\n else:\n qty = 1.0\n qty_done += qty\n pack_lots.append({'lot_id': stock_production_lot.id, 'qty': qty})\n else:\n has_wrong_lots = True\n # elif move.product_id.tracking == 'none' or not lots_necessary:\n elif move.product_id.tracking == 'none':\n qty_done = move.product_uom_qty\n else:\n has_wrong_lots = True\n for pack_lot in pack_lots:\n lot_id, qty = pack_lot['lot_id'], pack_lot['qty']\n self.env['stock.move.line'].create({\n 'move_id': move.id,\n 'product_id': move.product_id.id,\n 'product_uom_id': move.product_uom.id,\n 'qty_done': qty,\n 'location_id': move.location_id.id,\n 'location_dest_id': move.location_dest_id.id,\n 'lot_id': lot_id,\n })\n if not pack_lots:\n move.quantity_done = qty_done\n return has_wrong_lots", "def prepare_empty_lot(self, n):\n start_new_cmd = \"create_parking_lot\"\n return self.controller.execute(start_new_cmd, *(n,))", "def create_parking_lot(self, allow_slots):\n allow_slots = int(allow_slots)\n\n if len(self.slots) > 0:\n print(\"Parking Lot is already created\")\n return\n\n if allow_slots < 1:\n print(\"Number of slot: %s provided is incorrect.\" % allow_slots)\n return\n\n for i in range(1, allow_slots + 1):\n self.slots[i] = Slot(slot_no=i, available=True)\n print(\"Created a parking lot with %s slots\" % allow_slots)", "def test_execute_start_new(self):\n # Setup params\n n_slots = 10\n start_new_cmd = \"create_parking_lot\"\n start_new_args = (n_slots,)\n\n # Verify command is able to execute start new parking lot\n _, output = self.controller.execute(start_new_cmd, *start_new_args)\n self.assertEqual(output, n_slots)", "def new_object(self):\r\n\t\tpass", "def lot_assigned(self,cr,uid,ids,context=None):\n vals={}\n if context is None:\n context = {}\n trans_brw = self.browse(cr,uid,ids,context=context)\n so_l_obj = self.pool.get('sale.order.line')\n so_obj = self.pool.get('sale.order')\n product_obj = self.pool.get('product.uom')\n lot_obj = self.pool.get('stock.production.lot')\n so_line = []\n if context['active_ids']:\n so_brw = so_obj.browse(cr,uid,context['active_ids'][0],context=context)\n for i in trans_brw:\n for line in i.sale_order_line_id:\n lot_brw = lot_obj.browse(cr,uid,line.lot_id.id,context=context)\n res = so_l_obj.product_id_change( cr, uid, ids, so_brw.pricelist_id.id, lot_brw.product_id.id, qty=line.quantity1,\n uom=False, qty_uos=0, uos=False, name='', partner_id=so_brw.partner_id.id,\n lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False)\n \n so_line.append((0,0,{'product_id':lot_brw.product_id and lot_brw.product_id.id,\n 'prod_lot_id':lot_brw and lot_brw.id,\n 'pieces':line.pieces_qty,\n 'product_uom_qty':line.quantity1,\n 'product_uom':lot_brw.product_id.uom_id and lot_brw.product_id.uom_id.id,\n 'name':lot_brw.product_id and lot_brw.product_id.name,\n 'price_unit':res.values()[2]['price_unit'],\n 'delay':res.values()[2]['delay'],\n 'type':res.values()[2]['type'],\n }))\n so_obj.write(cr,uid,context['active_ids'],{'order_line':so_line},context=context)", "def create_parking_lot(data):\n size = int(data['size'])\n PARKING_LOT[0] = size\n for i in range(1, size + 1):\n PARKING_LOT.append(SlotVehicleDriverMapping())\n return 'Created parking of {} slots'.format(size)", "def create(self, request):\n lot = Lot.objects.get(pk=request.data[\"lotId\"])\n\n project = Project()\n project.name = request.data[\"name\"]\n project.estimatedCost = request.data[\"estimatedCost\"]\n project.estimatedCompletionDate = request.data[\"estimatedCompletionDate\"]\n project.lotId = lot\n #projectNote=projectNote\n\n\n try:\n project.save()\n serializer = ProjectSerializer(project, context={'request': request}) #converting data into json\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except ValidationError as ex:\n return Response({\"reason\": ex.message}, status=status.HTTP_400_BAD_REQUEST)", "def do_produce(self, cr, uid, ids, context=None):\r\n production_id = context.get('active_id', False)\r\n assert production_id, \"Production Id should be specified in context as a Active ID.\"\r\n data = self.browse(cr, uid, ids[0], context=context)\r\n lot_id = self.pool.get('stock.production.lot').create(cr,uid,{'name':data.lot_str,'product_id':data.product_id.id})\r\n self.write(cr,uid,ids,{'lot_id':lot_id})\r\n self.pool.get('mrp.production').action_produce(cr, uid, production_id,\r\n data.product_qty, data.mode, data, context=context)\r\n return {}", "def lot_serial_nbr(self, lot_serial_nbr):\n\n self._lot_serial_nbr = lot_serial_nbr", "def create(self):", "def create_work_item(self):", "def create_now(self,cr,uid,ids,context=None):\n vals={}\n if context is None:\n context = {}\n trans_brw = self.browse(cr,uid,ids,context=context)\n so_l_obj = self.pool.get('sale.order.line')\n product_obj = self.pool.get('product.uom')\n if context['active_ids']:\n so_l_brw = so_l_obj.browse(cr,uid,context['active_ids'][0],context=context)\n sale_quantity = so_l_brw.product_uom_qty\n for i in trans_brw:\n for line in i.sale_order_line_id:\n quantity = line.quantity1\n diff = round(sale_quantity - quantity,4)\n if diff > 0:\n if line.length1 and line.heigth1:\n vals = {\n 'prod_lot_id':line.lot_id and line.lot_id.id,\n 'pieces':line.pieces_qty,\n 'product_uom_qty':quantity,\n }\n \n sale_quantity = diff\n current_move = so_l_obj.copy(cr, uid,context['active_ids'][0] , vals, context=context)\n \n if diff == 0 or diff < 0:\n vals = {\n 'prod_lot_id':line.lot_id and line.lot_id.id,\n 'pieces':line.pieces_qty,\n 'product_uom_qty':line.quantity1,\n }\n \n so_l_obj.write(cr, uid,context['active_ids'][0],vals)\n if diff > 0:\n if line.length1 and line.heigth1:\n pieces = product_obj._compute_pieces2(cr, uid,so_l_brw.product_id.stock_driver, diff, line.length1, line.heigth1, line.width1)\n vals = {\n 'prod_lot_id':False,\n 'pieces': pieces,\n 'product_uom_qty':diff,\n }\n so_l_obj.write(cr, uid,context['active_ids'][0],vals)\n \n return True", "def newInsertionNeedleSet(self):\r\n # productive #onButton\r\n profbox()\r\n widget = slicer.modules.NeedleFinderWidget\r\n if widget.newInsertionButton:\r\n dialog = qt.QDialog()\r\n messageBox = qt.QMessageBox.information(dialog, 'Information', 'You are creating a new set of needles')\r\n self.round += 1\r\n widget.newInsertionButton.setText('Start a new set of needles - Round ' + str(self.round + 1) + '?')\r\n widget.deleteNeedleButton.setText('Delete Needles from round ' + str(self.round))", "def onchange_begin_transaction(self,cr,uid,ids,lot_id,pieces,length,heigth,width,context=None):\n if context is None:\n context = {}\n res = {'value':{}}\n \n if lot_id:\n \n lot_obj = self.pool.get('stock.production.lot')\n product_obj = self.pool.get('product.uom')\n lot_brw = lot_obj.browse(cr,uid,lot_id,context=context)\n area = lot_brw.virtual\n \n if lot_brw.product_id.stock_driver == 'normal' :\n res['value'].update({'factor': 3})\n if lot_brw.product_id.stock_driver == 'tile' :\n res['value'].update({'factor': 2})\n if lot_brw.product_id.stock_driver == 'slab' :\n res['value'].update({'factor': 1})\n if lot_brw.product_id.stock_driver == 'block' :\n res['value'].update({'factor': 0})\n \n res['value'].update({'length':lot_brw.length})\n res['value'].update({'length1':lot_brw.length})\n res['value'].update({'heigth':lot_brw.heigth})\n res['value'].update({'heigth1':lot_brw.heigth})\n res['value'].update({'width':lot_brw.width})\n res['value'].update({'width1':lot_brw.width})\n \n if lot_brw.product_id.stock_driver == 'tile' :\n if pieces == False:\n pieces = product_obj._compute_pieces2(cr, uid,lot_brw.product_id.stock_driver, lot_brw.virtual, lot_brw.length, lot_brw.heigth, lot_brw.width)\n else:\n area = product_obj._compute_area(cr, uid,lot_brw.product_id.stock_driver, pieces, lot_brw.length, lot_brw.heigth, lot_brw.width)\n res['value'].update({'length':lot_brw.length})\n res['value'].update({'length1':lot_brw.length})\n res['value'].update({'heigth':lot_brw.heigth})\n res['value'].update({'heigth1':lot_brw.heigth})\n res['value'].update({'width':lot_brw.width})\n res['value'].update({'width1':lot_brw.width})\n res['value'].update({'pieces_qty':pieces})\n res['value'].update({'pieces_qty1':pieces})\n res['value'].update({'quantity':area})\n res['value'].update({'quantity1':area})\n\n if lot_brw.product_id.stock_driver in ('slab','block'):\n pieces = 1\n area = product_obj._compute_area(cr, uid,lot_brw.product_id.stock_driver, pieces,length,heigth,width)\n res['value'].update({'quantity': area})\n res['value'].update({'quantity1': area})\n \n if lot_brw.virtual == 0:\n raise osv.except_osv(_('Processing Error'), _('The lot specified is not available in the stock')\\\n ) \n return res", "def create(self):\n ...", "def action_active_lot(self, cr, uid, ids, context=None):\n context = context or {}\n wol_obj = self.pool.get('mrp.workorder.lot')\n consume = self.browse(cr, uid, ids, context=context)[0]\n wol_obj.write(cr, uid, consume.wo_lot_id.id,\n {'state': 'picking'}, context=context)\n return True", "def new(self):\n self.__buttons.setDisabled(False)\n self.__service = None\n self.name.setFocus()\n self.name.setText(\"\")\n self.threadable.setChecked(False)\n self.min_cores.setValue(100)\n self.max_cores.setValue(100)\n self.min_memory.setValue(3276)\n self.min_gpu_memory.setValue(self.gpu_min_mb)\n self.timeout.setValue(0)\n self.timeout_llu.setValue(0)\n self.min_memory_increase.setValue(2048)\n self._tags_w.set_tags(['general'])", "def newInsertionNeedleSet(self):\n #productive #onButton\n profbox()\n widget = slicer.modules.NeedleFinderWidget\n if widget.newInsertionButton:\n dialog = qt.QDialog()\n messageBox = qt.QMessageBox.information( dialog, 'Information','You are creating a new set of needles')\n self.round +=1\n widget.newInsertionButton.setText('Start a new set of needles - Round ' + str(self.round+1)+'?')\n widget.deleteNeedleButton.setText('Delete Needles from round ' + str(self.round))", "def action_consume(self, cr, uid, ids, context=None):\n context = context or {}\n wol_obj = self.pool.get('mrp.workorder.lot')\n # TODO check this method. the super is apply but the res is not return\n super(MrpConsume, self).action_consume(\n cr, uid, ids, context=context)\n if context.get('active_model', False) == 'mrp.workorder.lot':\n wol_id = context.get('active_id', False)\n if wol_id:\n wol_obj.write(cr, uid, wol_id, {'state': 'open'},\n context=context)\n else:\n raise osv.except_osv(\n _('Error!'),\n _('No valid operation. no work order lot active_id.')\n )\n\n # refresh kaban view\n view_id, search_view_id, action_help = \\\n self._get_kanban_view_data(cr, uid, context=context)\n\n return {\n 'view_id': view_id,\n 'view_type': 'form',\n 'view_mode': 'kanban',\n 'views': [(view_id, 'kanban')],\n 'search_view_id': search_view_id,\n 'res_model': 'mrp.workorder.lot',\n 'type': 'ir.actions.act_window',\n 'target': 'inlineview',\n 'context': {'search_default_wol_picking': True},\n 'help': action_help\n }", "def __init__(self):\n\n self.operations = {}", "def make_operation_space():\n operation_space = {}\n\n # Set integInfo and integBranch\n operation_space['prepare_delenv'] = rmdmod.PrepareDelEnvOperation()\n\n # Call p4 integ for delete revisions\n operation_space['call_p4_integ'] = rmdmod.CallIntegOperation()\n\n # checkout README and place into a pending cln\n operation_space['create_changelist'] = rmdmod.CreateChangelistOperation()\n\n # open file for edit within changelist\n operation_space['reopen'] = rmdmod.ReopenOperation()\n\n # list history of deleted files\n operation_space['list_history'] = rmdmod.ListDelHistoryOperation()\n\n return operation_space", "def btn_create_order_pro(self):\n\t\tprint()\n\t\tprint('treatment - btn_create_order_pro')\n\n\t\t# Search Partner\n\t\tpartner = tre_funcs.get_partner(self, self.patient.name)\n\n\t\t# Search pricelist\n\t\tpricelist = tre_funcs.get_pricelist(self)\n\n\t\t# Search product\n\t\t# Create Product tuple\n\t\tproduct_tup = []\n\t\t#for service in self.service_all_ids:\n\t\tfor service in self.service_ids:\n\t\t\t#print()\n\t\t\t#print('* Create Product tuple')\n\t\t\t#print(service)\n\t\t\t#print(service.service)\n\t\t\t#print(service.service.name)\n\t\t\t#print(service.qty)\n\t\t\t#print(service.service.list_price)\n\t\t\t\n\t\t\t# Init\n\t\t\tproduct_template = service.service\n\t\t\tname = service.service.name\n\t\t\tqty = service.qty\n\t\t\tprice = service.service.list_price\n\t\t\t\n\t\t\t# Check Exceptions\n\t\t\ttry:\n\t\t\t\tprice_list = '2019'\n\t\t\t\tproduct = tre_funcs.get_product_product(self, name, price_list)\n\t\t\t\tproduct_tup.append((product, qty, price))\n\n\t\t\texcept Exception:\n\t\t\t\tprint('ERROR - Treatment - Product not in 2019 price_list !')\n\t\t\t\tprint('Search in other price_lists')\n\n\t\t\t\ttry:\n\t\t\t\t\tprice_list = False\n\t\t\t\t\tproduct = tre_funcs.get_product(self, name, price_list)\n\t\t\t\t\tprint(product)\n\t\t\t\t\tproduct_tup.append((product, qty, price))\n\n\t\t\t\texcept Exception:\n\t\t\t\t\tprint('ERROR - Treatment - Product Not Available at all !!!!!')\n\n\t\t\t#else:\n\t\t\t#\tprint('jx - Else !')\n\t\t\t\t#pass\n\n\n\t\t\t# Check \n\t\t\ttre_funcs.check_product(self, '2019', product, product_template)\n\t\t\n\t\t# Create order \n\t\torder = pl_creates.create_order(self, partner.id, pricelist.id, product_tup)\n\t\tprint(order)\n\n\t\t# Open Order\n\t\treturn action_funcs.open_order(order)", "def _create_petition_(self):\n self.__weather = create(self.__latitude, self.__longitude)", "def create_stock(self):\n if len(dummy_stock) > 0:\n self.product_name, self.product_id, self.availability, self.stock, self.price = dummy_stock.pop()\n else:\n raise StopLocust\n\n create_stock_respone = self.client.post(\"/stock/item/create/\", data=json.dumps({\n 'product_name': self.product_name, 'price': self.price}), headers={'content-type': 'application/json'})\n\n stock_add_response = None\n try:\n if create_stock_respone:\n if json.loads(create_stock_respone.content)['success']:\n product_id = json.loads(create_stock_respone.content)['product_id']\n stock_add_response = self.client.post(\"/stock/add/{0}/{1}\".format(product_id, self.stock),\n headers={'content-type': 'application/json'})\n logging.info('%s added to stock', product_id)\n\n else:\n logging.info('Failed to add to stock')\n else:\n logging.info('ERROR_HERE' + json.loads(create_stock_respone.content)['message'])\n except JSONDecodeError as jde:\n logging.info('ERROR_HERE' + str(jde.doc))\n\n try:\n if stock_add_response:\n if json.loads(stock_add_response.content)['success']:\n created_ids['product_ids'] += [str(product_id)]\n logging.info('Created %s products %s with id= %s ', self.stock, self.product_name, product_id)\n else:\n logging.info('Failed to add products with product id= %s', product_id)\n else:\n logging.info('ERROR_HERE' + json.loads(create_stock_respone.content)['message'])\n except JSONDecodeError as jde:\n logging.info('ERROR_HERE' + str(jde.doc))", "def newLayer(self):\n self.currentLayerData = Layer.LayerData()\n self.layers.append(self.currentLayerData)\n self.addRow(self.currentLayerData)\n self.selected = self.jobRow.child(self.getCurrentRow() + 1)\n self.setSelectedFromItem(self.selected)\n self.updateDependLabels()", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def create_order(self):\n\tprint()\n\tprint('OH - pl_create_order')\n\n\t# Search Partner\n\tprint()\n\tprint('Search partner')\n\tpartner = self.env['res.partner'].search([\n\t\t\t\t\t\t\t\t\t\t\t\t\t('name', '=', self.patient.name),\n\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t\t#order='appointment_date desc',\n\t\t\t\t\t\t\t\t\t\t\t\tlimit=1,)\n\n\t# Search Pl\n\tprint()\n\tprint('Search pricelist')\n\tpricelist = self.env['product.pricelist'].search([\n\t\t\t\t\t\t\t\t\t\t\t#('active', 'in', [True]),\n\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t#order='x_serial_nr asc',\n\t\t\t\t\t\t\t\t\t\t\tlimit=1,\n\t\t\t\t\t\t\t\t\t\t)\n\tprint(pricelist)\n\n\t# Create Order\n\torder = self.env['sale.order'].create({\n\t\t\t\t\t\t\t\t\t\t\t\t\t'state':'draft',\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_doctor': self.physician.id,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'partner_id': self.partner_id.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'partner_id': partner.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'x_ruc': self.partner_id.x_ruc,\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'x_dni': self.partner_id.x_dni,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t'patient': self.patient.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_id_doc': self.patient.x_id_doc,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_id_doc_type': self.patient.x_id_doc_type,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_family': 'procedure',\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t'treatment': self.id,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t'pricelist_id': pricelist.id,\n\t\t\t\t\t\t\t\t\t\t\t\t})\n\t#print(order)\n\n\n\n\t# Create Order Lines\n\tfor cart_line in self.shopping_cart_ids:\n\n\t\tproduct = cart_line.product\n\n\t\t#print(product)\n\t\t#print(product.name)\n\n\t\t# Create Order Line\n\t\tol = order.order_line.create({\n\t\t\t\t\t\t\t\t\t\t'name': \t\tproduct.name,\n\t\t\t\t\t\t\t\t\t\t'product_id': \tproduct.id,\n\t\t\t\t\t\t\t\t\t\t'price_unit': \tcart_line.price,\n\t\t\t\t\t\t\t\t\t\t'product_uom_qty': cart_line.qty,\n\t\t\t\t\t\t\t\t\t\t'order_id': \torder.id,\n\t\t\t\t\t\t\t\t\t})\n\treturn order" ]
[ "0.6130646", "0.59913564", "0.59538823", "0.58391386", "0.57997227", "0.5788583", "0.5747826", "0.5601751", "0.5560619", "0.5560485", "0.554676", "0.5544482", "0.55205005", "0.5441034", "0.5437646", "0.5388805", "0.53761446", "0.5363241", "0.53601325", "0.5335545", "0.5300332", "0.5286971", "0.5238215", "0.52371687", "0.52263707", "0.52252", "0.52220803", "0.52220803", "0.52220803", "0.5205104" ]
0.6627396
0
Check ~/.planning.domains exists, and is not a file
def checkExists(pd_dir): if os.path.isfile(pd_dir): print("Fatal error: need to store settings in {0}, but there is a file with that name".format(pd_dir)) exit(1) if not os.path.isdir(pd_dir): print(""" == Pre-release client for planning.domains == This is pre-release software, for accessing the content on api.planning.domains. It is released without warranty (including the implied warranties of merchantability or fitness for a particular purpose). Send bug reports to Andrew Coles ([email protected]) or Christian Muise ([email protected]) """) print("Making directory {0}...\n".format(pd_dir)) try: os.mkdir(pd_dir) except OSError: print("Cannot make directory") exit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkDNSInstallDir(self):\n try:\n dnsDir = self.getDNSInstallDir()\n except IOError:\n dnsDir = None\n if not dnsDir:\n fatal_error('no valid DNSInstallDir found, please repair in Config program or Configuration GUI')\n pass", "def server_has_non_default_configuration(self):\n if not os.path.isdir(YPSERV_DIR_PATH):\n return False\n\n return any(f not in YPSERV_DEFAULT_FILES for f in os.listdir(YPSERV_DIR_PATH))", "def in_maintenance_mode():\n return os.path.exists(\"maintenance.txt\")", "def inHome(resname):\n prv_filename = os.path.join(os.getenv(\"HOME\"), \".aphla\", resname)\n if os.path.exists(prv_filename):\n return True\n else:\n return False", "def check_file_exist(self):\n return False", "def test_get_predefined_config_path_domain_failure(self) -> None:\n with self.assertRaises(Exception):\n get_predefined_config_path(\n framework=\"onnxrt\",\n domain=\"object_detection\",\n )", "def security_vars_exists():\n return os.path.exists(SECURITY_PATH)", "def test_not_in_domain(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Throw exception, need to clear internal cached host in driver\n self._empty_domain_list = True\n self.driver._vgc_host = None\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "def check_draft_exist(draft_location):\n if os.path.isfile(draft_location):\n return True\n else:\n return False", "def test_validate_domain_ok():\n domain = 'python.org'\n assert howisresolved.validate_domain(domain) is None", "def is_valid_production_root(path: pathlib.Path) -> bool:\n if not path.is_absolute():\n return False\n if not path.exists():\n return False\n if not path.is_dir():\n return False\n config_file_path = get_production_config_file_path(path)\n return config_file_path.exists()", "def in_dataflow():\n if os.environ.get(\"HOME\") == \"/home/dataflow\":\n return True\n return False", "def __check_config(self):\n if not os.path.exists(self.__config_path):\n return False\n else:\n return True", "def get_valid_domains():\n msg = ''\n import glob\n validDomains = []\n for f in glob.glob('{}/*'.format(OPT_MANAGER_RESOURCES_PGAAS)):\n try:\n with open(f, \"r\") as fp:\n try:\n tmpdata = json.load(fp)\n if 'pubkey' in tmpdata:\n validDomains.append(os.path.basename(f))\n except: # pylint: disable=bare-except\n pass\n except: # pylint: disable=bare-except\n pass\n if len(validDomains) == 0:\n msg += '\\nNo valid PostgreSQL cluster information was found'\n else:\n msg += '\\nThese are the valid PostgreSQL cluster domains found on this manager:'\n for v in validDomains:\n msg += '\\n\\t\"{}\"'.format(v)\n return msg", "def file_exist() -> bool:\n pass", "def user_conf_dir_exists(self):\n return os.path.exists(self.user_conf_dir())", "def is_config_exist(self) -> bool:\n return True", "def checkEnvVar(self):\n for path in self.config.options('ENV'):\n if (self.config.get('ENV', path)).startswith('/'):\n print (\"Checking path for \"+path).ljust(65, '.'),\n if not os.path.exists(self.config.get('ENV', path)):\n print \"[ Failed ]\"\n print \"\\n***ERROR: %s not found. Check the config file.\" % path\n sys.exit()\n else:\n print \"[ OK ]\"", "def validate_configdir(configdir):\r\n if (configdir and configdir != '/' and\r\n configdir != '~' and\r\n configdir != os.path.abspath(os.path.expanduser('~'))):\r\n return True\r\n\r\n return False", "def task_dir_is_valid(task_dir: str) -> bool:\n return True", "def config_env_var_verify():\n with open('skywalking/config.py', 'r') as config_file:\n data = config_file.read().replace('\\n', '')\n for each in OPTIONS.keys():\n if f'_{each.upper()}' not in data:\n raise Exception(f'Environment variable for {each.upper()} is not found in config.py\\n'\n f'This means you have a mismatch of config.py variable and env var name')", "def test_robots_txt_can_be_loaded(self): \n # main domain robots.txt\n robots_url = \"/robots.txt/\"\n \n # robots.txt for each project, which by bots can be seen as seperate\n # domain beacuse we use dubdomains to designate projects\n robots_url_project = reverse(\"comicsite_robots_txt\",\n kwargs={\"site_short_name\":self.testproject.short_name})\n \n self._test_url_can_be_viewed(None,robots_url) # None = not logged in\n self._test_url_can_be_viewed(None,robots_url_project) # None = not logged in", "def test_6_1_3_etc_group_exists(host):\n assert host.file(ETC_PASSWD_DASH).exists", "def check_execution_path():\n file_name = \"LICENSE\"\n if not os.path.exists(file_name):\n logging.error(\n \"Don't execute the script from a sub-directory. \"\n \"Switch to the root of the project folder\"\n )\n return False\n return True", "def is_config_exist(self) -> bool:\n pass", "def __check_in_autonotes_dir():\n if not os.path.isfile('master.tex'):\n cli.log.error(f'I can\\'t find a {emph(\"master.tex\")} file, '\n 'are you inside an autonotes directory?')\n exit(3)", "def check_config_file(self, file):\n (var1, var2) = file.split(\".\")\n try:\n f = os.path.join(self.config[var1][\"directory\"],\n self.config[var1][var2])\n if os.path.exists(f) or os.path.lexists(f):\n if os.path.islink(f) is False:\n raise ProfileCheckError(\"'%s' is in a bad config\" % f)\n\n except KeyError:\n raise ProfileKeyError(\"no value for %s.%s\" % (var1, var2))", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"rdp_lineage_to_tax.py\", get_files)", "def exists(_env):\n return True", "def exists(_env):\n return True" ]
[ "0.5942702", "0.5712539", "0.56750417", "0.5662834", "0.5592347", "0.55438226", "0.5541979", "0.5524914", "0.5514341", "0.549784", "0.54763305", "0.5436257", "0.5427554", "0.5411611", "0.540421", "0.5377908", "0.5346322", "0.53399336", "0.53397995", "0.5331371", "0.5325626", "0.5311302", "0.53039587", "0.5292448", "0.52576876", "0.52495736", "0.5240575", "0.5236869", "0.52278507", "0.52278507" ]
0.605463
0
Show an object of type sub that matches the id arg.
def show(sub, arg): arg = int(arg) if sub == 'collection': res = api.get_collection(arg) elif sub == 'domain': res = api.get_domain(arg) elif sub == 'problem': res = api.get_problem(arg) elif sub == 'plan': res = api.get_plan(arg) else: print("Error: Unrecognized sub-command, {0}".format(sub)) exit(1) pprint.pprint(res)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def info(self, id):", "def ShowObject(object_id):\n return ShowObjects(object_id)==1", "def get_object(id):", "def show(self,id, **kw):\n r = validate_get(id)\n return dict(name=name, namepl=namepl, record=r)", "def get_object(self, id, **args):\n return self.request(\"{0}/{1}\".format(self.version, id), args)", "def help_show(self):\n print(\"print an instance based on the class name and id\")", "def sub_id(self, sub_id):\n\n self._sub_id = sub_id", "def sub_id(self, sub_id):\n\n self._sub_id = sub_id", "def get(self, _id):", "def __str__(self):\n return self.id", "def __str__(self):\n return self.id", "def get_show_info(self, id, **kwargs):\n kwargs['id'] = id\n return self.get('info/show.json', **kwargs)", "def __str__(self):\n\n return '%s<%x>' % (self.__class__.__name__, self.id,)", "def showId(self):\n extent = self.getExtent()\n id = self.parent_id\n levels = self.getLevels()\n prefix = settings.NESTED_TAXONOMY_PREFIX\n \n # name = prefix,id,levels,extent\n \n name = '%s:%s:%s:%s' %(prefix,id,levels,extent)\n return name", "def show(self, item_id):\n pass", "def id(obj):\n return obj", "def __str__(self):\n return str(self.id)", "def __str__(self):\n return str(self.id)", "def __str__(self):\n return str(self.id)", "def __str__(self):\n return str(self.id)", "def do_show(self, arg):\n arg = arg.split()\n try:\n args = arg[0] + \".\" + arg[1]\n except:\n pass\n objects = storage.all()\n if len(arg) is 0:\n print(\"** class name missing **\")\n elif len(arg) == 1 and arg[0] in self.dict.keys():\n print(\"** instance id missing **\")\n elif arg[0] not in self.dict.keys():\n print(\"** class doesn't exist **\")\n elif args not in objects:\n print(\"** no instance found **\")\n else:\n print(objects[args])", "def __str__(self) -> str:\n return self.id", "def getElementBySId(self, *args):\n return _libsbml.Submodel_getElementBySId(self, *args)", "def get_title_by_id(id):\n\n # your code", "def __repr__(self):\n\t\treturn self.id", "def __init__(self, id: str):\n self.id = id", "def __displayID(self): # displayID is a private method\n print(\"ID:\", self.ID)", "def show(args, syn):\n \n ent = syn.get(args.id, downloadFile=False)\n syn.printEntity(ent)", "def id(self, *args, **kwargs) -> Any:\n pass", "def get_record(self, id: uplink.Path):\n pass" ]
[ "0.6170138", "0.61571515", "0.6098876", "0.59050685", "0.5701581", "0.5683588", "0.5617816", "0.5617816", "0.5584747", "0.5561041", "0.5561041", "0.5537727", "0.5529539", "0.5524066", "0.55181515", "0.5504681", "0.5497895", "0.5497895", "0.5497895", "0.5497895", "0.5454298", "0.54471946", "0.5418129", "0.5392053", "0.5391765", "0.5390851", "0.53607786", "0.5356354", "0.5352401", "0.53441596" ]
0.6315935
0
Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation.
def average_distributed_scalar(scalar, args): if args.local_rank == -1: return scalar scalar_t = torch.tensor(scalar, dtype=torch.float, device=args.device) / torch.distributed.get_world_size() torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM) return scalar_t.item()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute(self, node, input_vals):\r\n assert len(input_vals) == 1\r\n if node.const_attr!=None:\r\n return np.array(np.mean(input_vals[0], node.const_attr))\r\n else:\r\n return np.array(np.mean(input_vals[0]))", "def mean(self):\n return self.sum / self.sum_weights", "def avgcpu(self):\n return (self._total_cpu['value'] / self._total_cpu['count']) if self._total_cpu['count'] else 0", "def reduce_mean(tensor):\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor", "def reduce_mean(tensor):\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor", "def mean(self):\n return np.average(self.particles, weights=self.weights, axis=0)", "def avg_net(self) -> float:\n return torch.mean(self.units.net)", "def _eed_compute(sentence_level_scores: List[Tensor]) ->Tensor:\n if len(sentence_level_scores) == 0:\n return tensor(0.0)\n average = sum(sentence_level_scores) / tensor(len(sentence_level_scores))\n return average", "def ensemble_mean(self):\n return self.mean(dim='mem')", "def get_avg(self) -> float:\n if self._cur_elem_count < 1:\n return 0\n self._mtx.acquire()\n avg = self._sum / float(self._cur_elem_count)\n self._mtx.release()\n return avg", "def average(self):\n s = self.sum()\n flat_shape = self.flatten_shape(self.shape)\n num_of_elements = fct.reduce(opr.mul, flat_shape, 1)\n average = s / num_of_elements\n return average", "def _mean(self,gp):\r\n return self.gp_link.transf(gp)", "def _mean(self,gp):\r\n return self.gp_link.transf(gp)", "def _mean(self,gp):\r\n return self.gp_link.transf(gp)", "def _mean(self,gp):\r\n return self.gp_link.transf(gp)", "def _mean(self,gp):\r\n return self.gp_link.transf(gp)", "def mean(self):\n mean = sum(self.data)/self.size\n return mean", "def average_impurity(self):\n children = tf.squeeze(tf.slice(self.variables.tree, [0, 0], [-1, 1]),\n squeeze_dims=[1])\n is_leaf = tf.equal(LEAF_NODE, children)\n leaves = tf.to_int32(tf.squeeze(tf.where(is_leaf), squeeze_dims=[1]))\n counts = tf.gather(self.variables.node_sums, leaves)\n impurity = self._weighted_gini(counts)\n return tf.reduce_sum(impurity) / tf.reduce_sum(counts + 1.0)", "def calculate_mean(self) -> float:\n\n if self.data:\n return np.mean(self.data)\n else:\n return self.mu", "def get_mean(self):\n self.meanval = np.mean(self.adulist)", "def availability(self):\n if len(self.nodes) == 0:\n return 0.0\n values = map(lambda n: n.availability, self.nodes)\n return mean(values)", "def getAverage(self):\n return sum(self.scores) / len(self.scores)", "def _mean(items):\n return sum(items) / len(items)", "def conditional_mean(self, gp):\n raise NotImplementedError", "def average(self):\n return self.summation() / self.count()", "def _get_mean(self):\n mu = self._get_conditional_negative_energy()\n return sigmoid(mu)", "def _global_avg_pool(x):\n with tf.name_scope('global_avg_pool'):\n assert x.get_shape().ndims == 4\n return tf.reduce_mean(x, [1, 2])", "def mean(self):\r\n\t\treturn sum(self.sample)/len(self.sample)", "def mean(items):\n\n return float(sum(items)) / len(items)", "def mean(self) -> float:\n return self._data.mean()" ]
[ "0.6868551", "0.66323113", "0.65663683", "0.6506279", "0.6506279", "0.64611053", "0.64454865", "0.6427829", "0.6404392", "0.6344673", "0.63312757", "0.6280638", "0.6280638", "0.6280638", "0.6280638", "0.6280638", "0.6259122", "0.62401956", "0.6236642", "0.62141514", "0.61956006", "0.6185763", "0.61844254", "0.6157675", "0.6127196", "0.61143744", "0.61106354", "0.6105246", "0.6103279", "0.6094722" ]
0.7250768
0
Add special tokens to the tokenizer and the model if they have not already been added.
def add_special_tokens_(model, tokenizer, update_model=True): orig_num_tokens = len(tokenizer) num_added_tokens = tokenizer.add_special_tokens(ATTR_TO_SPECIAL_TOKEN) # doesn't add if they are already there #print("coab::",len(tokenizer.vocab)) if (num_added_tokens > 0 and update_model): model.encoder.resize_token_embeddings(new_num_tokens=orig_num_tokens + num_added_tokens) model.decoder.resize_token_embeddings(new_num_tokens=orig_num_tokens + num_added_tokens) #print(model.encoder.embeddings.word_embeddings.weight.shape) #print(model.decoder.bert.embeddings.word_embeddings.weight.shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_special_tokens_(model, tokenizer):\n orig_num_tokens = len(tokenizer.encoder)\n num_added_tokens = tokenizer.add_special_tokens(ATTR_TO_SPECIAL_TOKEN) # doesn't add if they are already there\n if num_added_tokens > 0:\n model.resize_token_embeddings(new_num_tokens=orig_num_tokens + num_added_tokens)", "def add_tokens(self, sample):\n # Text\n inputs = self._tokenizer.encode_plus(sample['text'],\n add_special_tokens=True,\n max_length=self._max_text_length,\n padding='max_length', # TODO padding here or in model (together with item_glove)?\n truncation=True, # truncate to 512 (added for MSNBC dataset)\n return_attention_mask=True)\n # TODO warn if text was truncated\n #if len(TODO) > self._max_text_length:\n # self._logger.info(f'Truncate long input sentence ({len(TODO)} tokens) to {self._max_text_length}')\n sample['text_tokenized'] = inputs['input_ids']\n sample['text_attention_mask'] = inputs['attention_mask']\n # Item name (mention/surface form)\n inputs = self._tokenizer.encode(sample['item_name'],\n add_special_tokens=False)\n sample['item_name_tokenized'] = inputs", "def special_tokens(self, ):\n\n if self.tokenizer.bos_token is None or self.tokenizer.eos_token is None:\n special_tokens = self.tokenizer.build_inputs_with_special_tokens([])\n special_tokens_ids = self.tokenizer.convert_ids_to_tokens(special_tokens)\n self.tokenizer.bos_token, self.tokenizer.eos_token = special_tokens_ids\n\n special_tokens = self.tokenizer.eos_token, self.tokenizer.bos_token\n return special_tokens", "def add_special_tokens(self, special_tokens_dict: dict) -> int:\n num_tokens_added = self.tokenizer.add_special_tokens(special_tokens_dict)\n\n if num_tokens_added > 0:\n logging.info(f'{num_tokens_added} special tokens added, resize your model accordingly.')\n for k in self.tokenizer.SPECIAL_TOKENS_ATTRIBUTES:\n setattr(self, k, getattr(self.tokenizer, k, None))\n return num_tokens_added", "def add_special_tokens(self) -> bool:\n return True", "def add_token(self,token):\n\t\tif not token:\n\t\t\tlogging.error(\"Token cannot be empty!\")\n\t\t\texit()\n\n\t\tself.tokens.append(token.lower())\n\t\t#self.user_defined_token = token.lower()", "def set_special_tokens(self, special_tokens):\n if not special_tokens:\n self.special_tokens = {}\n self.special_tokens_decoder = {}\n return\n self.special_tokens = dict((tok, len(self.encoder) + i)\n for i, tok in enumerate(special_tokens))\n self.special_tokens_decoder = {\n v: k\n for k, v in self.special_tokens.items()\n }", "def tokenizer(self):\n tokenizer = RegexpTokenizer(r'\\w+')\n \n self.tweet_tokenized_train = [tokenizer.tokenize(x.lower()) for x in self.tweet_prepro_train]\n self.tweet_tokenized_test = [tokenizer.tokenize(x.lower()) for x in self.tweet_prepro_test]", "def process_new_tokens(tokens,processed_tokens_set, model, dictionary):\n if hasattr(model, 'using_pretrained') and model.using_pretrained is not None:\n processed_tokens_set.update(tokens)\n update_embedding_layer(processed_tokens_set, model, dictionary)", "def override_special_tokens(self, opt: Opt):\n # now override\n self.start_token = self.hf_tokenizer.cls_token\n self.end_token = self.hf_tokenizer.sep_token\n self.null_token = self.hf_tokenizer.pad_token\n self.unk_token = self.hf_tokenizer.unk_token\n\n self._unk_token_idx = self.hf_tokenizer.unk_token_id\n\n self.start_idx = self[self.start_token]\n self.end_idx = self[self.end_token]\n self.null_idx = self[self.null_token]", "def tokenize_pretraining(self, inputs):\n\n ref_ids = prepare_ref([inputs], self.tokenizer_ltp, self.tokenizer_cn)\n\n tokens = self.tokenizer_cn.tokenize(inputs)\n\n if len(tokens) > self.max_seq_length - 2:\n tokens = tokens[:(self.max_seq_length - 2)]\n ref_ids = ref_ids[:(self.max_seq_length - 2)]\n\n ref_ids = cn_whole_word_mask(tokens, ref_ids[0])\n tokens, labels = random_word_wwm(tokens, ref_ids, self.tokenizer_cn)\n\n tokens = ['[CLS]'] + tokens + ['[SEP]']\n lm_label_ids = ([-100] + labels + [-100])\n\n input_ids = self.tokenizer_cn.convert_tokens_to_ids(tokens)\n\n attention_mask = [1] * len(input_ids)\n token_type_ids = [0] * len(input_ids)\n\n while len(input_ids) < self.max_seq_length:\n input_ids.append(0)\n attention_mask.append(0)\n token_type_ids.append(0)\n lm_label_ids.append(-100)\n\n assert len(input_ids) == self.max_seq_length\n assert len(attention_mask) == self.max_seq_length\n assert len(token_type_ids) == self.max_seq_length\n assert len(lm_label_ids) == self.max_seq_length\n\n\n outputs = {'input_ids': tf.constant(input_ids), 'attention_mask': tf.constant(attention_mask), \n 'token_type_ids': tf.constant(token_type_ids), 'lm_label_ids': tf.constant(lm_label_ids)}\n\n return outputs", "def _add_non_empty_token(self, token: str):\n if token != \"\":\n self._tokens.append(token)", "def add_special_tokens_single_sentence(self, token_ids):\r\n return [self.token_to_id(self.cls_token)] + token_ids + [self.token_to_id(self.sep_token)]", "def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):\r\n sep = [self.token_to_id(self.sep_token)]\r\n cls = [self.token_to_id(self.cls_token)]\r\n return cls + token_ids_0 + sep + sep + token_ids_1 + sep", "def add_special_tokens_single_sentence(self, token_ids):\n sep = [self._convert_token_to_id(self.sep_token)]\n cls = [self._convert_token_to_id(self.cls_token)]\n return token_ids + sep + cls", "def custom_tokenizer(nlp, infix_reg):\n return Tokenizer(nlp.vocab, infix_finditer=infix_reg.finditer)", "def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):\n sep = [self._convert_token_to_id(self.sep_token)]\n cls = [self._convert_token_to_id(self.cls_token)]\n return token_ids_0 + sep + token_ids_1 + sep + cls", "def add_tokens(self, tokens):\n self.result.extend([d for d in tokens])", "def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n return cls + token_ids_0 + sep + token_ids_1 + sep", "def tokenize(self, text, **kwargs):\n def split_on_token(tok, text):\n result = []\n split_text = text.split(tok)\n for i, sub_text in enumerate(split_text):\n if i == 0 and not sub_text:\n result += [tok]\n elif i == len(split_text) - 1:\n if sub_text:\n result += [sub_text]\n else:\n pass\n else:\n if sub_text:\n result += [sub_text]\n result += [tok]\n return result\n\n def split_on_tokens(tok_list, text):\n if not text:\n return []\n if not tok_list:\n return self._tokenize(text, **kwargs)\n\n tokenized_text = []\n text_list = [text]\n for tok in tok_list:\n tokenized_text = []\n for sub_text in text_list:\n if sub_text not in self.added_tokens_encoder \\\n and sub_text not in self.all_special_tokens:\n tokenized_text += split_on_token(tok, sub_text)\n else:\n tokenized_text += [sub_text]\n text_list = tokenized_text\n\n return sum((self._tokenize(token, **kwargs) if token not \\\n in self.added_tokens_encoder and token not in self.all_special_tokens \\\n else [token] for token in tokenized_text), [])\n\n added_tokens = list(self.added_tokens_encoder.keys()) + self.all_special_tokens\n tokenized_text = split_on_tokens(added_tokens, text)\n return tokenized_text", "def add_special_tokens_single_sentence(self, token_ids):\n return [self.cls_token_id] + token_ids + [self.sep_token_id]", "def setOwnTokens(self):\n\t\tself.removeOwnPunctuation()\n\t\tself.removeOwnStopWords()", "def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n _cls = [self.cls_token_id]\n _sep = [self.sep_token_id]\n return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep", "def additional_special_tokens(self):\r\n if self._additional_special_tokens is None:\r\n logger.error(\"Using additional_special_tokens, but it is not set yet.\")\r\n return self._additional_special_tokens", "def init_tokens(self):\n raise NotImplementedError('Abstract method.')", "def _resize_token_embeddings(\n cls, model: PreTrainedModel, tokenizer_wrapper: TokenizerWrapper\n ):\n if tokenizer_wrapper.num_added_special_tokens > 0:\n model.resize_token_embeddings(\n new_num_tokens=len(tokenizer_wrapper.tokenizer)\n )", "def build_tokens(self):\n self.advance()\n while self.__token != \"\":\n self.__tokens.append(self.token_type())\n self.advance()", "def _tokenize(self, raw_text):\n\n doc = self.nlp(raw_text.strip())\n\n # Loop through tokens and find known entities aren't already marked\n for token in doc:\n # Is this word in our known_entities, but is not recognized by the spaCy parser?\n if token.text.lower() in self.known_entities and token.ent_type not in self.entities:\n # We need to set the new entity to doc.ents directly (I believe the getter for doc.ents does\n # some important massaging. However, counter to the online docs, setting doc.ents wipes out\n # all of the previously recognized ents, so we stash the value, then we combine and reset.\n stash = doc.ents\n doc.ents = [(token.text.title(), doc.vocab.strings['PERSON'], token.i, token.i + 1)]\n doc.ents = doc.ents + stash\n\n # Find proper noun n-grams: (a) find a known entity, (b) is the next word also a known entity?,\n # (c) merge, (d) repeat\n # TODO: Joining multi-word named entities sometimes causes us trouble.\n doc_len = len(doc) # Helps us know when to exit the 'for loop' (since we change the # of items via merge)\n for token in doc:\n # if we're not at the end of the loop, and we recognize this as a proper noun and it's not a stop word\n # and the token isn't a space...\n if token.i + 1 < doc_len and token.ent_type in self.entities and \\\n token.text.lower() not in self.stop_words and token.text not in ' ':\n next_token = doc[token.i + 1]\n # keep looping while we're not at the end of the loop and this token has the same entity type as\n # the previous token and it's not a stop word or a space.\n while token.i + 1 < doc_len and next_token.ent_type == token.ent_type and \\\n next_token.text.lower() not in self.stop_words and next_token.text not in ' ':\n n_gram = doc[token.i:token.i + 2]\n n_gram.merge()\n doc_len -= 1 # the merge changes the list length, so we just shrunk the list!\n # print(x)\n if token.i + 1 >= doc_len:\n break\n\n return doc", "def prepare_for_model(\n self,\n text: Union[TextInput, PreTokenizedInput],\n text_pair: Optional[PreTokenizedInput] = None,\n boxes: Optional[List[List[int]]] = None,\n word_labels: Optional[List[int]] = None,\n add_special_tokens: bool = True,\n padding: Union[bool, str, PaddingStrategy] = False,\n truncation: Union[bool, str, TruncationStrategy] = None,\n max_length: Optional[int] = None,\n stride: int = 0,\n pad_to_multiple_of: Optional[int] = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n return_token_type_ids: Optional[bool] = None,\n return_attention_mask: Optional[bool] = None,\n return_overflowing_tokens: bool = False,\n return_special_tokens_mask: bool = False,\n return_offsets_mapping: bool = False,\n return_length: bool = False,\n verbose: bool = True,\n prepend_batch_axis: bool = False,\n **kwargs,\n ) -> BatchEncoding:\n\n # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'\n padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(\n padding=padding,\n truncation=truncation,\n max_length=max_length,\n pad_to_multiple_of=pad_to_multiple_of,\n verbose=verbose,\n **kwargs,\n )\n\n tokens = []\n pair_tokens = []\n token_boxes = []\n pair_token_boxes = []\n labels = []\n\n if text_pair is None:\n if word_labels is None:\n # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference)\n for word, box in zip(text, boxes):\n if len(word) < 1: # skip empty words\n continue\n word_tokens = self.tokenize(word)\n tokens.extend(word_tokens)\n token_boxes.extend([box] * len(word_tokens))\n else:\n # CASE 2: token classification (training)\n for word, box, label in zip(text, boxes, word_labels):\n if len(word) < 1: # skip empty words\n continue\n word_tokens = self.tokenize(word)\n tokens.extend(word_tokens)\n token_boxes.extend([box] * len(word_tokens))\n if self.only_label_first_subword:\n # Use the real label id for the first token of the word, and padding ids for the remaining tokens\n labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))\n else:\n labels.extend([label] * len(word_tokens))\n else:\n # CASE 3: document visual question answering (inference)\n # text = question\n # text_pair = words\n tokens = self.tokenize(text)\n token_boxes = [self.pad_token_box for _ in range(len(tokens))] + [self.sep_token_box]\n\n for word, box in zip(text_pair, boxes):\n if len(word) < 1: # skip empty words\n continue\n word_tokens = self.tokenize(word)\n pair_tokens.extend(word_tokens)\n pair_token_boxes.extend([box] * len(word_tokens))\n\n # Create ids + pair_ids\n ids = self.convert_tokens_to_ids(tokens)\n pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None\n\n # Compute the total size of the returned encodings\n pair = bool(pair_ids is not None)\n len_ids = len(ids)\n len_pair_ids = len(pair_ids) if pair else 0\n total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)\n\n # Truncation: Handle max sequence length\n overflowing_tokens = []\n overflowing_token_boxes = []\n overflowing_labels = []\n if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:\n (\n ids,\n token_boxes,\n pair_ids,\n pair_token_boxes,\n labels,\n overflowing_tokens,\n overflowing_token_boxes,\n overflowing_labels,\n ) = self.truncate_sequences(\n ids,\n token_boxes,\n pair_ids=pair_ids,\n pair_token_boxes=pair_token_boxes,\n labels=labels,\n num_tokens_to_remove=total_len - max_length,\n truncation_strategy=truncation_strategy,\n stride=stride,\n )\n\n if return_token_type_ids and not add_special_tokens:\n raise ValueError(\n \"Asking to return token_type_ids while setting add_special_tokens to False \"\n \"results in an undefined behavior. Please set add_special_tokens to True or \"\n \"set return_token_type_ids to None.\"\n )\n\n # Load from model defaults\n if return_token_type_ids is None:\n return_token_type_ids = \"token_type_ids\" in self.model_input_names\n if return_attention_mask is None:\n return_attention_mask = \"attention_mask\" in self.model_input_names\n\n encoded_inputs = {}\n\n if return_overflowing_tokens:\n encoded_inputs[\"overflowing_tokens\"] = overflowing_tokens\n encoded_inputs[\"overflowing_token_boxes\"] = overflowing_token_boxes\n encoded_inputs[\"overflowing_labels\"] = overflowing_labels\n encoded_inputs[\"num_truncated_tokens\"] = total_len - max_length\n\n # Add special tokens\n if add_special_tokens:\n sequence = self.build_inputs_with_special_tokens(ids, pair_ids)\n token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)\n token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box]\n if pair_token_boxes:\n pair_token_boxes = pair_token_boxes + [self.sep_token_box]\n if labels:\n labels = [self.pad_token_label] + labels + [self.pad_token_label]\n else:\n sequence = ids + pair_ids if pair else ids\n token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])\n\n # Build output dictionary\n encoded_inputs[\"input_ids\"] = sequence\n encoded_inputs[\"bbox\"] = token_boxes + pair_token_boxes\n if return_token_type_ids:\n encoded_inputs[\"token_type_ids\"] = token_type_ids\n if return_special_tokens_mask:\n if add_special_tokens:\n encoded_inputs[\"special_tokens_mask\"] = self.get_special_tokens_mask(ids, pair_ids)\n else:\n encoded_inputs[\"special_tokens_mask\"] = [0] * len(sequence)\n\n if labels:\n encoded_inputs[\"labels\"] = labels\n\n # Check lengths\n self._eventual_warn_about_too_long_sequence(encoded_inputs[\"input_ids\"], max_length, verbose)\n\n # Padding\n if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:\n encoded_inputs = self.pad(\n encoded_inputs,\n max_length=max_length,\n padding=padding_strategy.value,\n pad_to_multiple_of=pad_to_multiple_of,\n return_attention_mask=return_attention_mask,\n )\n\n if return_length:\n encoded_inputs[\"length\"] = len(encoded_inputs[\"input_ids\"])\n\n batch_outputs = BatchEncoding(\n encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis\n )\n\n return batch_outputs", "def setup_special_ids(tokenizer):\n FLAGS.vocab_size = tokenizer.get_vocab_size()\n tf.logging.info(\"Set vocab_size: %d.\", FLAGS.vocab_size)\n for sym, sym_id_str in special_symbols_mapping.items():\n try:\n sym_id = tokenizer.get_token_id(sym)\n setattr(FLAGS, sym_id_str, sym_id)\n tf.logging.info(\"Set %s to %d.\", sym_id_str, sym_id)\n except KeyError:\n tf.logging.warning(\"Skip %s: not found in tokenizer's vocab.\", sym)" ]
[ "0.7694922", "0.6634113", "0.6582269", "0.64322776", "0.64064205", "0.6263845", "0.5975597", "0.59564984", "0.59484524", "0.5922455", "0.5899315", "0.58885926", "0.5843837", "0.5829498", "0.5825403", "0.57836765", "0.575896", "0.5743932", "0.57355046", "0.5733764", "0.57210046", "0.567992", "0.5668568", "0.5642767", "0.563662", "0.56323296", "0.55800265", "0.5546004", "0.5490504", "0.5478709" ]
0.7371381
1
Equal comparison Two pilots are defined equal, if and only if their first and last names are equal.
def __eq__(self, other): return self.last_name == other.last_name and self.first_name == other.first_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other: 'Pair') -> bool:\n return self.names == other.names", "def equal(self, other):\n return self.slots == other.slots", "def same_player(self, other):\n return self.name == other.name \\\n and self.color == other.color", "def is_equal_to(self, another_labyrinth):\n \n if self.equals_list_nodes(self.list_empty_nodes, another_labyrinth.list_empty_nodes) and \\\n self.equals_list_nodes(self.list_wall_nodes, another_labyrinth.list_wall_nodes) and \\\n self.start_point.position_is_equal_to(another_labyrinth.start_point) and \\\n self.exit_point.position_is_equal_to(another_labyrinth.exit_point):\n return True\n \n else:\n return False", "def __eq__(self, other) -> bool:\n return self.Firstname == other.Firstname and self.LastName == other.LastName", "def are_equal(self, sp1, sp2):\n return True", "def is_equal(self, other):\n return (other.__class__ == self.__class__\n and other.subscript == self.subscript\n and other.swept_inames == self.swept_inames)", "def are_equal(self, sp1, sp2):\n return", "def ParsedDataEqual(self, other):\n return (self.parsed_coeff == other.parsed_coeff and\n self.parsed_name == other.parsed_name)", "def __eq__(self, second):\r\n\t\treturn self.x == other.x and self.y == other.y", "def are_equal(self, sp1, sp2):\n for s1 in sp1.keys():\n spin1 = getattr(s1, \"spin\", 0)\n oxi1 = getattr(s1, \"oxi_state\", 0)\n for s2 in sp2.keys():\n spin2 = getattr(s2, \"spin\", 0)\n oxi2 = getattr(s2, \"oxi_state\", 0)\n if (s1.symbol == s2.symbol and oxi1 == oxi2 and\n spin2 == -spin1):\n break\n else:\n return False\n return True", "def __eq__(self, other):\n return self.points == other.points", "def pequal(self, other):\n if not isinstance(other, plist):\n return False\n if len(self) != len(other):\n return False\n try:\n for x, y in zip(self, other):\n if not x.pequal(y):\n return False\n except Exception:\n for x, y in zip(self, other):\n if x != y:\n return False\n return True", "def __eq__(self, other):\n return sorted(self.points) == sorted(other.points)", "def __eq__(self, other: 'PairwiseInfo') -> bool:\n\n return (\n self.their_did == other.their_did and\n self.their_verkey == other.their_verkey and\n self.my_did == other.my_did and\n self.my_verkey == other.my_verkey and\n self.metadata == other.metadata)", "def __eq__(self, other):\n\n return self.name == other.name and self.price_range == other.price_range", "def __eq__(self, other):\n if type(self) != type(other):\n return False\n else:\n return ((self.name == other.name) and (self.value == other.value)\n and (self.time == other.time))", "def __eq__(self, l):\n return l.point == self.point and l.angle == self.angle", "def are_equal(self, sp1, sp2):\n return sp1 == sp2", "def __lt__(self, other: 'Pair') -> bool:\n return self.names < other.names", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def __eq__(self, secondPoint):\n return self.xCoordinate == secondPoint.xCoordinate and self.yCoordinate == secondPoint.yCoordinate", "def __eq__(self, other):\n return self.point == other.point", "def is_identical(self, other):\n return (self.compounddatatype == other.compounddatatype and\n self.min_row == other.min_row and\n self.max_row == other.max_row)", "def __eq__(self, rhs):\n return self.x == rhs.x and self.y == rhs.y", "def _is_equal(x, y):\n return x[0] == y", "def __eq__(self: 'TOAHModel', other: 'TOAHModel') -> bool:\n return self.stool_lst == other.stool_lst", "def are_similar(first_coords: List[Tuple[int, int]], second_coords: List[Tuple[int, int]]) -> bool:\n # Step 1: Get angles of each triangle\n # Step 2: Compare grades of two triangles\n # Step 3: If two angles are equal then first triangle is similar to second triangle\n pass", "def _io_similar(lhs, rhs):\n ldecl = lhs.decl()\n rdecl = rhs.decl()\n if not ldecl[::2] == rdecl[::2]: # names are the same\n return False\n size = len(ldecl)\n return all(ldecl[i] is rdecl[i] for i in range(1, size, 2))" ]
[ "0.6627373", "0.63435644", "0.6153186", "0.6149348", "0.6088684", "0.602681", "0.6011238", "0.5975594", "0.5950189", "0.5947117", "0.59188914", "0.58683014", "0.58377177", "0.5823649", "0.58202285", "0.58163077", "0.5816294", "0.5756619", "0.5748799", "0.5720211", "0.5704944", "0.5696138", "0.5694921", "0.56943715", "0.5680851", "0.56754637", "0.56741226", "0.5642345", "0.56400657", "0.5633242" ]
0.6386746
1
Less comparison by last name and first name
def __lt__(self, other): return( (self.last_name, self.first_name) < (other.last_name, other.first_name) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __lt__(self, other):\n return self.first_name < other.first_name", "def __lt__(self, other):\n if self.last_name == other.last_name:\n return self.name < other.name\n return self.last_name < other.last_name", "def __lt__(self, other):\r\n if self.lastName == other.lastName:\r\n return self.name < other.name\r\n return self.lastName < other.lastName", "def __lt__(self, other):\n if self.lastName == other.lastName:\n return self.name < other.name\n return self.lastName < other.lastName", "def __lt__(self, other):\n if self.lastName == other.lastName:\n return self.name < other.name\n return self.lastName < other.lastName", "def __lt__(self, other):\n if self.lastName == other.lastName:\n return self.name < other.name\n return self.lastName < other.lastName", "def __lt__(self, other):\n if self.lastName == other.lastName:\n return self.name < other.name\n return self.lastName < other.lastName", "def __lt__(self, other):\n if self.lastName == other.lastName:\n return self.name < other.name\n return self.lastName < other.lastName", "def __lt__(self, other):\r\n print 'eaating shit from Person'\r\n if self.lastName == other.lastName :\r\n return self.name < other.name\r\n return self.lastName < other.lastName", "def __eq__(self, other):\n return self.last_name == other.last_name and self.first_name == other.first_name", "def test_first_last_name(self):\n\t\tformatted_name = get_formatted_name('janos', 'jk')\n\t\tself.assertEqual(formatted_name, 'Janos Jk')", "def first_last_name(obj):\n return '%s %s' % (obj.first_name, obj.last_name)", "def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n # Asserting that formatted_name equals 'Janis Joplin'\n self.assertEqual(formatted_name, 'Janis Joplin')", "def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n self.assertEqual(formatted_name, 'Janis Joplin')", "def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n self.assertEqual(formatted_name, 'Janis Joplin')", "def test_first_last_name(self):\n formatted_name = get_formatted_name('jimi', 'hendrix')\n self.assertEqual(formatted_name, 'Jimi Hendrix')", "def combine_name(self):\n if self.first_name.isalpha() and self.last_name.isalpha():\n username = self.first_name + \" \" + self.last_name\n return username\n return 'Names must be alphabets'", "def get_user_firstname_lastname(self, record):\n lower_first_name, lower_last_name = self.clean_user_names(record)\n\n #No first name and last name check email\n if lower_first_name is None and lower_last_name is None:\n\n lower_first_name, lower_last_name = \\\n self.extract_name_from_email(record)\n\n return lower_first_name, lower_last_name", "def get_short_name(self):\n last_name = self.last_name\n first_name = self.first_name\n if (not (last_name and not last_name.isspace())):\n \"\"\" If last name is empty or none then return first name\"\"\"\n return first_name\n else:\n return last_name", "def test_last_name_first_name(self):\n current_resume = resume.objects.first()\n expected = 'Bielinski, Nicholas'\n case = current_resume.last_name_first_name()\n self.assertEqual(case, expected)", "def name_comparator(last_name):\n score = 0\n\n # check if first n letters of first and last name matches\n for i in range(1, 4):\n if len(first_name) >= i and len(last_name) >= 2:\n # if previous letter does not match, don't continue\n if i > 1 and score > (i - 1) * -1:\n break\n\n # lower score by one per each matching letter\n if first_name[i - 1: i] == last_name[i - 1: i]:\n score -= 1\n\n \"\"\"detect names with umlauts and give them higher score if both have\n them, lower score if only one has them.\"\"\"\n regex = compile(r'[äöå]')\n if score == 0:\n if regex.search(first_name) and regex.search(last_name):\n score -= 1\n else:\n if bool(regex.search(last_name)) != bool(regex.search(last_name)):\n score += 1\n\n return score", "def __lt__(self, other):\n # If total amount is strictly less than, sort by it\n if self.total_donations() < other.total_donations():\n return True\n elif self.total_donations() == other.total_donations(): # Otherwise, sort by last name\n return self.name.split()[-1] < other.name.split()[-1]\n else:\n return False", "def clean_user_names(record):\n if 'first_name' in record and 'last_name' in record:\n #Remove all special characters from first_name/last name\n lower_first_name = record['first_name'].replace('-', '')\\\n .replace('_', '').replace('[', '')\\\n .replace(']', '').replace(' ', '')\\\n .lower()\n lower_last_name = record['last_name'].replace('-', '')\\\n .replace('_', '').replace('[', '')\\\n .replace(']', '').replace(' ', '')\\\n .lower()\n return lower_first_name, lower_last_name\n else:\n return None, None", "def _first_name_sql(self, first_name, tolerance=1):\n nicknames = self._lookup_name(first_name)\n first_name_selects = []\n first_name_conditions = []\n for i, name in enumerate(nicknames):\n col_name = \"match_first_name_{}\".format(i)\n select = \" lower('{}') as {} \".format(name, col_name)\n first_name_selects.append(select)\n edit_distance = \"\"\"\n (levenshtein(lower(first_name), {col}) <= {tolerance}\n OR levenshtein(lower(nickname), {col}) <= {tolerance})\n \"\"\".format(col=col_name, tolerance=tolerance)\n first_name_conditions.append(edit_distance)\n name_select = \", \".join(first_name_selects)\n name_conditions = \" OR \".join(first_name_conditions)\n return name_select, name_conditions", "def __lt__(self, other):\n return self.name.lower() < other.name.lower()", "def __lt__(self, other):\n return self.name.lower() < other.name.lower()", "def test_first_last_name(self):\n formatted_name = get_formatted_name('david', 'Malan')\n self.assertEqual(formatted_name, 'David Malan')", "def test_first_name(self) :\n\t\tformatted_name = get_formatted_name('janis','joplin')\n\t\tself.assertEqual(formatted_name,'Janis Joplin')", "def test_first_last(self):\n\n full_name = get_full_name(\"pony\", \"cat\")\n self.assertEqual(full_name, \"Pony Cat\")\n\n full_name = get_full_name(\"goat\", \"cat\")\n self.assertEqual(full_name, \"Goat Cat\")", "def name_first(twitter_data, a, b):\r\n \r\n a_name = twitter_data[a][\"name\"]\r\n b_name = twitter_data[b][\"name\"]\r\n if a_name < b_name:\r\n return -1\r\n if a_name > b_name:\r\n return 1\r\n return username_first(twitter_data, a, b)" ]
[ "0.75604546", "0.742028", "0.7111031", "0.7088123", "0.7088123", "0.7088123", "0.7088123", "0.7088123", "0.67837274", "0.6554892", "0.64998496", "0.6478273", "0.64664423", "0.6444121", "0.6444121", "0.6406543", "0.63803595", "0.6337073", "0.63360804", "0.62972367", "0.6284565", "0.61704636", "0.61634654", "0.6159262", "0.61287", "0.61287", "0.6098971", "0.6081002", "0.60782814", "0.60634696" ]
0.7713619
0
Generate default user name of the form '``first_name``.\ ``last_name``'
def generateUsername(self): retval= "{0}.{1}".format( self.first_name.split()[0].lower(), self.last_name.split()[-1].lower() ) return toAscii(retval)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_name(self):\n full_name = f'{self.f_name} {self.l_name}'\n return full_name", "def get_short_name(self):\n return f\"{self.first_name} {self.last_name[:1]}\" if self.first_name else self.username", "def get_full_name(self):\n full_name = f'{self.first_name} {self.last_name}' if self.first_name and self.last_name else self.username\n return full_name.strip()", "def full_name(self):\n return \"{} {}\".format(self.user.first_name, self.user.last_name)", "def get_full_name(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip())\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()", "def get_name(self):\n user = self.user\n name = \"%s %s\" % (user.first_name, user.last_name)\n name = name.strip()\n\n return self.display_name or name or user.email or user.username", "def get_username(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip()[0:1])\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()", "def combine_name(self):\n if self.first_name.isalpha() and self.last_name.isalpha():\n username = self.first_name + \" \" + self.last_name\n return username\n return 'Names must be alphabets'", "def first_name_and_initial(self):\n return u\"{} {}\".format(self.pref_first_name(), self.last_name[0])", "def nice_name(self):\n if self.first_name or self.last_name:\n return \"%s %s\" % (self.first_name, self.last_name)\n else:\n key = \"profile.nice_name\"\n cache_key = \"%s.%s.%s\" % (settings.SITE_CACHE_KEY, key, self.pk) \n cached = cache.get(cache_key)\n if cached is None:\n cached = self.user.username\n cache.set(cache_key, cached)\n return cached", "def get_full_name(self):\n # The user is identified by their email address\n return self.first_name+' '+self.last_name", "def get_formated_name(first_name,last_name):\n\tfull_name = first_name + '' + last_name\n\treturn full_name.title()", "def get_name(self):\r\n return ('%s %s' % ( self.first_name, self.last_name ))", "def full_name_short(self):\n return \"{}. {}\".format(str(self.user.first_name)[:1], self.user.last_name)", "def full_name(self):\n return u\"{} {}\".format(self.pref_first_name(), self.last_name)", "def generate_name():\n\n first_name = \"\"\n\n for letter in range(5):\n if letter % 2 == 0:\n first_name += CONSONANTS[randint(0,20)]\n else: # The letter is even\n first_name += VOWELS[randint(0,4)]\n\n last_name = \"\"\n for letter in range(5):\n if letter == 1 or letter == 3:\n last_name += CONSONANTS[randint(0, 20)]\n elif letter == 4:\n last_name += VOWELS[randint(0, 4)]\n else:\n last_name += VOWELS[randint(0, 4)] * 2\n\n last_name = last_name[0].upper() + last_name[1:]\n first_name = first_name[0].upper() + first_name[1:]\n username = first_name + last_name\n\n return username", "def get_name(self):\n return \"%s %s\" % (\n self.first_name,\n self.last_name\n )", "def get_name(self) :\n\n return self.factory.to_user_name(self.name)", "def get_full_name(self):\n return \"{0} {1}\".format(self.first_name, self.last_surname)", "def account_name_generator():\n return 'jdoe-' + str(uuid()).lower()[:16]", "def get_formatted_name(first_name, last_name): \r\n full_name = f\"{first_name} {last_name}\"\r\n return full_name.title()", "def full_name(self):\n \tif self.first_name and self.last_name:\n \t\treturn \"{} {}\".format(self.first_name, self.last_name)", "def get_full_name(self):\n return f'{self.first_name} {self.last_name}'", "def full_name(self):\n return f\"{self.first_name} {self.last_name}\"", "def full_name(self):\n return f\"{self.first_name} {self.last_name}\"", "def full_name(self):\n return f\"{self.first_name} {self.last_name}\"", "def make_full_name(first_name, last_name, middle_initial=\"\"):\n if middle_initial:\n return f\"{first_name} {middle_initial[0:1]}. {last_name}\"\n else:\n return f\"{first_name} {last_name}\"", "def get_user_name(user: User) -> str:\n user_name = user.get(\"display_name\")\n if not user_name:\n user_name = user[\"fullname\"]\n if not user_name:\n user_name = user[\"name\"]\n return user_name", "def name(self):\n return \"%s %s\" % (self.first_name, self.last_name)", "def display_name(self):\n if self.email is None:\n if self.first_name is None and self.last_name is None:\n return \"\"\n\n if self.first_name is None and self.last_name is None:\n return self.email\n\n if self.last_name is None:\n return self.first_name\n\n if self.first_name is None:\n return self.last_name\n\n return \"{} {}\".format(self.first_name, self.last_name)" ]
[ "0.78407025", "0.77698755", "0.7629761", "0.7615957", "0.75311977", "0.75153714", "0.73975754", "0.7373969", "0.73408616", "0.73127294", "0.72625107", "0.72523916", "0.7252209", "0.7217108", "0.7209429", "0.71537197", "0.7150995", "0.7134499", "0.7072816", "0.70415425", "0.7041541", "0.7039586", "0.70206106", "0.70059407", "0.70059407", "0.70059407", "0.7002636", "0.70000243", "0.69975615", "0.69768536" ]
0.8386236
0
Gets field from comment. Comment fields are strings of the format '``key`` = ``value``'
def getCommentField(self, key): if not self.comments: return None pattern= re.compile(key + r"\s*=\s*'(.+)'") match= pattern.search(self.comments) if not match: return None return match.group(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getfield(value, arg):\n #import pdb; pdb.set_trace()\n if hasattr(value, \"fields\"):\n fields = getattr(value, \"fields\")\n if str(arg) in fields:\n return str(fields[str(arg)])", "def get_field(fieldname):\n m = re.search(\"(^|\\\\n)%s\\\\s(.*?)\\n\" % fieldname, s, re.I)\n if not m:\n return None\n else:\n return Unquote(m.group(2))", "def get_field_from_sbmfielddesc(hgf_field):\n\tq = \"\"\"SELECT * FROM sbmFIELDDESC where name='%s'\"\"\" %(hgf_field)\n\treturn run_sql(q)[0]", "def get_field(self, bib_entry, field):\n output = bib_entry.fields[field] if field in bib_entry.fields else \"\"\n return self.strip_braces(output)", "def get_field(self, field_name):\n for f in self.fields:\n if f.name.lower() == field_name.lower():\n return f\n return None", "def get_field(self, field_name):\n all_fields = self._fields.items(self._fields.root)\n print(\"all_fields\", all_fields)\n for name, field in all_fields:\n print(name, field_name)\n if name == field_name:\n return field", "def _getfield(self, block, name):\n\n # First, get the field from the class, if defined\n block_field = getattr(block.__class__, name, None)\n if block_field is not None and isinstance(block_field, Field):\n return block_field\n\n # Not in the class, so name\n # really doesn't name a field\n raise KeyError(name)", "def get_field(self, key):\n return Field.deserialize(self._get_single('fields', {'key': key}))", "def get_field(self, field):\n idx = self._keys.index(field)\n return self._data[idx]", "def get_field_property(dt, fieldname, property):\n\tfield = webnotes.conn.sql(\"\"\"\n\t\tselect name, `%s` \n\t\tfrom tabDocField \n\t\twhere parent=%s and fieldname=%s\"\"\" % (property, '%s', '%s'), (dt, fieldname))\n\t\t\n\tprop = webnotes.conn.sql(\"\"\"\n\t\tselect value \n\t\tfrom `tabProperty Setter` \n\t\twhere doc_type=%s and field_name=%s and property=%s\"\"\", (dt, fieldname, property))\n\tif prop: \n\t\treturn prop[0][0]\n\telse:\n\t\treturn field[0][1]", "def get_field(self, name):\n for field_name, field in self._all_fields.iteritems():\n if name == self._sanitize_field_name(field_name):\n return field", "def get_field(key, obj):\n\n val = obj\n\n for subkey in key.split('.'):\n val = val[subkey]\n\n return val", "def getfield(form, fieldname):\n try:\n return form[fieldname]\n except KeyError:\n return None", "def field_by_name(self, name):\r\n return self._by_name[name]", "def get_field(self, link_id, field):\n key = self.link_key(link_id)\n \n result = self.connection.hget(key, field)\n \n self.link_messenger.viewed_field(link_id, field)\n \n return result", "def get_field_by_key(field, key, val, session):\n sql = select([field]).where(key == val)\n value = session.execute(sql).scalar()\n return value", "def get_field(self, field):\n return self._dict.get(field)", "def __getitem__(self, field_name):\n\n if field_name in self._module._fields.keys():\n try:\n return self._fields[field_name]\n except KeyError:\n if self['id'] == '':\n # If this is a new entry, the 'id' field is yet undefined.\n return ''\n else:\n # Retrieve the field from the SugarCRM connection.\n \n q_str = \"%s.id='%s'\" % (self._module._table, self['id'])\n res = self._module._connection.get_entry_list(\n self._module._name, q_str,\n '', 0, [field_name], 1, 0)\n\n nvl = res['entry_list'][0]['name_value_list']\n for attribute in nvl:\n if attribute == field_name:\n value = nvl[attribute]['value']\n if value:\n self._fields[attribute] = \\\n HTMLParser().unescape(\n nvl[attribute]['value'])\n else:\n self._fields[attribute] = ''\n\n return self._fields[attribute]\n\n else:\n raise AttributeError", "def get_comment(self, attribute_name, default=None):\n return getattr(self, '%s__comment' % attribute_name, default)", "def get_comment(self, attribute_name, default=None):\n return getattr(self, '%s__comment' % attribute_name, default)", "def _get_field(self, section, field):\n if not self._configparser.has_option(section, field):\n return None\n return self._configparser.get(section, field).strip()", "def get_field(cls, name):\n if name not in cls.get_field_names():\n # - check field name first, next: column name -\n name = cls.get_field_name(name)\n return getattr(cls, name, None)", "def get_field(self, field_name):\n for attr_name, field in self:\n if field_name == attr_name:\n return field\n\n raise errors.FieldNotFound('Field not found', field_name)", "def get_field(cls, line, column_name):\n\n # FUTURE this might be useful for other Parser implementations\n # refactor to generic solution?\n fields = line.split(cls.DELIMITER)\n index = cls.FIELD_NAME_TO_INDEX[column_name]\n try:\n field = fields[index]\n converter = cls.FIELD_CONVERTERS[column_name]\n except IndexError as i_err:\n logging.error(\"can't parse line for %s, index is missing: %s\\n\\t%s\"\n % (column_name, i_err, line))\n raise i_err\n except KeyError as k_err:\n logging.error(\"can't parse line for %s, converter is missing: %s\"\n % (column_name, k_err))\n raise k_err\n return converter(field)", "def getdocfield(fieldname):\t\t\n\tl = [d for d in doctype_dl if d.doctype=='DocField' and d.fieldname==fieldname]\n\treturn l and l[0] or None", "def lookup(self, name):\n return self.fieldDict[name]", "def get_req_from_comment(comment, ans):\n for req_name in [r for r in ans.results.keys() if r.startswith('REQUEST_')]:\n if Adams.evaluate_exp(f'{req_name}.comment').lower() == comment.lower():\n return ans.results.get(req_name)\n\n raise AviewError(f'No request found with comment {comment}!')", "def get_field(entry, field):\n\n if field.name in entry.field_dict:\n if field.choices:\n return getattr(entry.object, \"get_%s_display\" % field.name)()\n return entry.field_dict[field.name]\n else:\n return settings.TEMPLATE_STRING_IF_INVALID", "def get_field(self, field):\n return self.extra_fields[field]", "def get_field(resource, field):\n fields = field.split('.', 1)\n if len(fields) == 1:\n sep2_field = getattr(resource, field, None)\n else:\n meta_field = getattr(resource, fields[0], None)\n sep2_field = EndDevice.get_field(meta_field, fields[1]) if meta_field else None\n return sep2_field" ]
[ "0.60989666", "0.60174024", "0.59364283", "0.5774984", "0.5718621", "0.5715055", "0.57144874", "0.5686602", "0.5627017", "0.56223005", "0.56118613", "0.5563498", "0.55319387", "0.55255896", "0.5520792", "0.5506498", "0.5476555", "0.5475833", "0.5456248", "0.5456248", "0.5425322", "0.54207647", "0.5412386", "0.5391364", "0.5381642", "0.5359057", "0.53526163", "0.5345528", "0.533265", "0.5323951" ]
0.76645404
0
Set comment field. Comment fields are strings of the format '``key`` = ``value``'
def setCommentField(self, key, value): if not key: raise KeyError() comment= "" if value: comment= "{0}='{1}'".format(key, value) if not self.comments: self.comments= comment return pattern= re.compile(key + r"s*=\s*'.+'") match= pattern.search(self.comments) if match: #key exists -> replace self.comments= ( self.comments[0:match.start(0)].strip() + comment + self.comments[match.end(0):] ).strip() else: self.comments+= "; " + comment
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_comment(self, comment):\n self.comment_text = str(comment)", "def set_comment(self, comment):\n\t\tself.comment_ = comment", "def comment(self, value: str):\n self._comment = value", "def comment(self, comment):\n\n self.logger.debug(\"In 'comment' setter.\")\n\n self._comment = comment", "def comment(self, comment):\n self.logger.debug(\"In 'comment' setter.\")\n\n if len(comment) > 512:\n raise Exception(\"Comment is too long, must be less than 512 characters.\")\n\n self._comment = comment", "def comment(self, comment: str):\n\n self._comment = comment", "def comment(self, comment: str):\n\n self._comment = comment", "def set_attribute(self, name, value, comment):\n setattr(self, '%s__' % name, value_or_none(value))\n setattr(self, '%s__comment' % name, value_or_none(comment))", "def comment(self, comment) :\n\t\ttry :\n\t\t\tself._comment = comment\n\t\texcept Exception as e:\n\t\t\traise e", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment): # type: (str) -> None\n self._tmp_comment = comment", "def comment(self, comment):\r\n\r\n core.FW_conf['connection'].comment(comment)", "def edit_comment():\n # Implement me!\n\n logger.info(\"vars: %r\" % request.vars)\n logger.info(\"vars_comment_text: %r\" % request.vars.comment_text)\n logger.info(\"vars id: %r\" % request.vars.comment_id)\n logger.info(\"comment_text: %r\" % db(db.Comments.id == request.vars.comment_id))\n\n #comment.comment_text = request.vars.comment_text\n #comment.edited_on = datetime.datetime.utcnow()\n db(db.Comments.id == request.vars.comment_id).update(comment_text=request.vars.comment_text, edited_on=datetime.datetime.utcnow())\n db.commit()\n logger.info(\"comment_text: %r\" % db(db.Comments.id == request.vars.comment_id))\n return \"ok\"", "def set_comment(node_handle, comment):\n content_type = ContentType.objects.get_for_model(NodeHandle)\n object_pk = node_handle.pk\n user = get_user()\n site_id = django_settings.SITE_ID\n c = Comment(content_type=content_type, object_pk=object_pk, user=user, site_id=site_id, comment=comment)\n c.save()", "def set_doc_comment(self, doc, comment):\n if not self.doc_comment_set:\n self.doc_comment_set = True\n doc.comment = comment\n else:\n raise CardinalityError('Document::Comment')", "def comment(self, uuid, comment):\n # TODO: add overwrite (false by default) and append options\n cur = self.conn.cursor()\n cur.execute(\n \"\"\"\n UPDATE experiments\n SET comment = ?\n WHERE uuid = ?\n \"\"\", [comment, uuid])\n cur.close()\n self.conn.commit()", "def testComment(self):\n attr = self.session.create_visit_attr()\n\n self.util.stringTypeTest(self, attr, \"comment\")\n\n self.util.stringPropertyTest(self, attr, \"comment\")", "def set_comment_editor_value(self, comment_id, new_body):\r\n self._find_within(\"#comment_{} .wmd-input\".format(comment_id)).fill(new_body)", "def set_comment(self, obj, cursor):\n if isinstance(obj, typedesc.T):\n obj.comment = cursor.brief_comment\n return", "def set_doc_comment(self, doc, comment):\n if not self.doc_comment_set:\n self.doc_comment_set = True\n if validations.validate_doc_comment(comment):\n doc.comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('Document::Comment')\n else:\n raise CardinalityError('Document::Comment')", "def set_snippet_comment(self, doc, comment):\n self.assert_snippet_exists()\n if not self.snippet_comment_set:\n self.snippet_comment_set = True\n if validations.validate_snip_comment(comment):\n doc.snippet[-1].comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('Snippet::SnippetComment')\n else:\n raise CardinalityError('Snippet::SnippetComment')" ]
[ "0.7819822", "0.7701546", "0.75566393", "0.7124005", "0.7091031", "0.7003119", "0.7003119", "0.69408345", "0.69243276", "0.6875771", "0.6875771", "0.6875771", "0.6875771", "0.6875771", "0.6875771", "0.6875771", "0.6875771", "0.6875771", "0.6875771", "0.6860937", "0.67275405", "0.6671038", "0.6584545", "0.65106696", "0.64922255", "0.6473931", "0.64185214", "0.6406104", "0.6388448", "0.6354242" ]
0.79708326
0
A function to remove beats that fall beyond the the bounds of the phrase
def trim_timings(phrase_length, timings): extra_hits = np.argwhere(np.cumsum(timings) > int(phrase_length)).ravel() if len(extra_hits) != 0: all_to_end = np.min(extra_hits) del timings[all_to_end:] return timings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prune(self, upper, lower):\n # max_count = sorted([self.counts[key] for key in self.counts.keys()])[::-1][upper]\n max_count = upper\n\n print('Removed all words that occur less than {} times and more than {} times'.format(lower, upper))\n for i, doc in enumerate(self.docs):\n new_doc = []\n for word in doc:\n if self.counts[word] <= max_count and self.counts[word] > lower:\n new_doc.append(word)\n self.docs[i] = new_doc", "def filter_subsumed(ngrams):\n\n remove = set()\n for ng in ngrams:\n if len(ng) == 1:\n continue\n shorter = ng[:-1]\n if shorter in ngrams and ngrams[shorter] <= ngrams[ng]*1.3:\n remove.add(shorter)\n shorter = ng[1:]\n if shorter in ngrams and ngrams[shorter] <= ngrams[ng]*1.3:\n remove.add(shorter)\n for ng in remove:\n del ngrams[ng]\n return ngrams", "def filter_hot_words(self, hot_words: list):\n\n to_remove = []\n for hot_word_item in hot_words:\n hot_word = hot_word_item['hot_word']\n\n # True if the hot word is in the subtitles of it's range. (E.g. hot word 'hello' which is said at 00:42, when the subtitles at 00:52-00:56 is 'that is how you say hello')\n is_hot_word_falty = any(\n [hot_word in i['subtitles'].split() for i in hot_words if i != hot_word_item\n and abs(i['start'] - hot_word_item['start']) < Constants.DELAY_RADIUS\n and abs(i['end'] - hot_word_item['end']) < Constants.DELAY_RADIUS\n ])\n if(is_hot_word_falty):\n to_remove.append(hot_word_item)\n\n # Can't remove them in the loop because then it will cause problems\n for item in to_remove:\n hot_words.remove(item)\n\n return hot_words", "def _reject_subspans(spans):\n filtered = []\n for i, span in enumerate(spans):\n subspan = False\n for j, other in enumerate(spans):\n if i == j:\n continue\n\n if span[0] >= other[0] and span[1] <= other[1]:\n subspan = True\n break\n if subspan is False:\n filtered.append(span)\n return filtered", "def filter_chants_without_word_boundary(chants, logger=None):\n constains_word_boundary = chants.volpiano.str.contains('---')\n return chants[constains_word_boundary]", "def remove_4s_every_other_in_between(seq):\n seq_copy = seq [4:-4:2]\n return seq_copy", "def remove_false_positives(headlines,exclusions):\r\n for headline in headlines:\r\n for word in exclusions:\r\n if headline.lower().find(word) != -1: #If headline contains exclusionary word.\r\n headlines.remove(headline)\r\n break\r\n return headlines", "def remove_longer_words(text):\n return \" \".join([word for word in str(text).split() if len(word) <= 12])", "def remove_below_lower_length_limit(self) -> None:\n for column_name in self.data:\n threshold_executor = TrimUtils.remove_text_below_lower_length_threshold(\n self.config[f'{column_name}_lower_length_limit']\n )\n self.data = self.data[self.data[column_name].map(threshold_executor)]\n self.data.reset_index(drop=True, inplace=True)", "def remove_noise(text):\n\n text = text.split()\n word = [word for word in text if word not in [\n 'pertain',\n 'estimate',\n 'link',\n 'and',\n 'more',\n 'fetch',\n 'be',\n 'there',\n 'do',\n 'you',\n 'have',\n 'any',\n 'is',\n 'my',\n 'on',\n 'can',\n 'i',\n 'get',\n 'some',\n 'am',\n 'look',\n 'for',\n 'the',\n 'to',\n 'share',\n 'me',\n 'of',\n 'please',\n 'a',\n 'very',\n 'at',\n 'with',\n 'relate',\n 'sorry'\n ]]\n return ' '.join(word)", "def removeBounded(self, bounds):\n if bounds==None or len(bounds)!=4:\n return\n x1,y1,x2,y2 = bounds\n if x1>x2 :\n temp=x1;x1=x2;x2=temp\n if y1>y2:\n temp=y1;y1=y2;y2=temp\n lst=[]\n for i in range(0,self.length()):\n x=self.x[i]; y=self.y[i]\n if (x>x1 and x<x2) and (y>y1 and y<y2): \n lst.append(i)\n self.removeMultiple(lst)\n return", "def remove_unsuitable_words(words):\n\n max_length = Board.SIZE\n return [word for word in words if word and \"-\" not in word and len(word) <= max_length]", "def remove_overlapping_ems(mentions):\n to_remove = set()\n new_mentions = []\n length = len(mentions)\n for i in range(length):\n start_r = mentions[i]['start']\n end_r = mentions[i]['end']\n for j in range(length):\n if i != j and j not in to_remove:\n start = mentions[j]['start']\n end = mentions[j]['end']\n if start_r >= start and end_r <= end:\n to_remove.add(i)\n for i in range(length):\n if i not in to_remove:\n new_mentions.append(mentions[i])\n return new_mentions", "def remove_stuck(traj,size):\n from numpy import sqrt, where\n \n r_min = traj.groupby('particle').first()\n r_max = traj.groupby('particle').last()\n\n pos_columns = ['x','y']\n dist = r_min[pos_columns] - r_max[pos_columns]\n dist_eu = sqrt(dist['x']**2+dist['y']**2)\n\n index_remove = dist_eu.index[where(dist_eu < size)]\n \n traj_new = traj\n for i in range(len(index_remove)):\n traj_new = traj_new[(traj_new['particle'] != index_remove[i])]\n \n return traj_new", "def prune(words, sos, eos):\n start_index = 0\n end_index = len(words)\n if sos in words:\n start_index = np.where(words == sos)[0][0] + 1\n if eos in words:\n end_index = np.where(words == eos)[0][0]\n return words[start_index:end_index]", "def cut_text(text):\n for phrase in TERMINALS:\n if phrase in text:\n return text[:text.index(phrase)]\n\n SavedSource(label=LABEL, subject='cut_text', body=text).put()\n return text", "def remove_citation_overlaps(text, possible_markers):\n return [(m, start, end) for m, start, end in possible_markers\n if not any((e.start <= start and e.end >= start)\n or (e.start <= end and e.end >= end)\n or (start <= e.start and end >= e.end)\n for e in internal_citations(text))]", "def find_clumps(text, k, len_win, t):\n\n patterns = []\n len_text = len(text)\n for i in range(len_text - len_win + 1):\n window = text[i:i + len_win]\n freq_map = frequency_table(window, k)\n for key in freq_map.keys():\n if freq_map[key] >= t and key not in patterns:\n patterns.append(key)\n return patterns", "def make_bag(txt, stopw):\n bow = re.split('\\s',txt.lower())\n new_bow=[]\n for word in bow:\n if word not in stopw and len(word)>0 and not re.search('\\d',word):\n new_bow.append(word)\n return(new_bow)", "def condenseGappyAlignment(a, thresh=0.9):\n\n a = padAlignment(a)\n smat = align2mat(a)\n gapSiteInd = np.mean(smat == b'-', axis=0) >= thresh\n keepSeqInd = np.all(smat[:, gapSiteInd] == b'-', axis=1)\n print('Removing %d of %d sites and %d of %d sequences from the alignment.' % (gapSiteInd.sum(), smat.shape[1], (~keepSeqInd).sum(), smat.shape[0]))\n\n smat = smat[keepSeqInd,:]\n smat = smat[:, ~gapSiteInd]\n \n return seqmat2align(smat, index=a.index[keepSeqInd])", "def filterPossibleWords(self): \r\n filledInSpaces = []\r\n for i in range(len(self.currentBoard)):\r\n if self.currentBoard[i] != '_':\r\n filledInSpaces.append( (i, self.currentBoard[i]) )\r\n \r\n self.wordList = list(filter(lambda word: self.viableWord(word, filledInSpaces), self.wordList))", "def filterFunction(region):\n inset = abs(region.stopGene.location[1] - region.stopGene.location[0])/2 if region.stopGene else 0\n return region.stop + inset - region.start > minLength", "def search_trimmers(seq: str) -> str:\n return [seq[i:i+3] for i in range(len(seq)-2)]", "def trim_region(self, start, stop):\n if stop > len(self.bases):\n sys.stderr.write(\"Sequence.trim called on sequence that is too short; doing nothing.\\n\")\n return\n # Remove any genes that are overlap the trimmed region\n genes_to_remove = [g for g in self.genes if overlap([start, stop], g.indices)]\n self.genes = [g for g in self.genes if g not in genes_to_remove]\n # Remove bases from sequence\n self.bases = self.bases[:start - 1] + self.bases[stop:]\n # Adjust indices of remaining genes\n bases_removed = stop - start + 1\n for g in self.genes:\n g.adjust_indices(-bases_removed, start)\n return genes_to_remove", "def filterDict(lettersGrid, dictionary, maxLen):\n letters = set([letter for row in lettersGrid for letter in row])\n return set([word for word in dictionary if (len(word) <= maxLen) and\n (len(word) >= 3) and (len(set(word) - letters) == 0)])", "def DeGap(consensus,cutoff = 0.95):\n newCon = []\n for i in consensus:\n if i[0][0] == '-' and i[1] > cutoff:\n print i\n continue\n newCon.append(i)\n return newCon", "def _filter_typos(typos, char_vocab): \n\n new_typos = dict()\n\n for key,values in typos.items():\n new_values = list()\n for v in values: \n \n invalid_chars = [c for c in v if c not in char_vocab]\n if len(invalid_chars) > 0:\n continue\n\n new_values.append(v)\n\n if len(new_values) > 0:\n new_typos[key] = new_values \n\n return new_typos", "def chars_to_preserve(\n self,\n sentence: str,\n ) -> str:\n try:\n tokenized = re.findall(self.whitelist, sentence, re.IGNORECASE)\n return \" \".join(tokenized)\n except Exception as error:\n print(\n textwrap.dedent(\n f\"\"\"\n Bad characters range {self.whitelist},\n {error}\n \"\"\"\n )\n )\n raise", "def remove_garbage(word_list, garbage_method = 'char_confidence', garbage_threshold = 0.85, height = None, **kwargs):\n \n if garbage_method is None:\n return word_list\n elif garbage_method == 'char_confidence':\n #Caluclate average character confidence\n for word in word_list:\n word['char_confidence'] = (word['confidence'] + 0.005) ** (1 / len(word['text']))\n\n #Remove words with low average confidence\n result = []\n for word in word_list:\n if word['char_confidence'] > garbage_threshold:\n result.append(word)\n return result\n else:\n raise ValueError('Method ' + grabage_method + ' not implemented.')", "def prune_ratios(ratios, bad_words):\n for word in bad_words:\n ratios.pop(word, None)" ]
[ "0.60071754", "0.5904355", "0.5902717", "0.58244514", "0.58123296", "0.57975394", "0.5666994", "0.5652277", "0.56520176", "0.56004584", "0.5580919", "0.5541433", "0.55284405", "0.5460393", "0.5398652", "0.53874314", "0.5385925", "0.53792715", "0.5347592", "0.5344827", "0.5343971", "0.5316253", "0.5316009", "0.52854145", "0.52798724", "0.5278071", "0.5270542", "0.5269119", "0.5246649", "0.52433425" ]
0.593318
1
compute overall precision, recall and FB1 (default values are 0.0) if percent is True, return 100 original decimal value
def calcMetrics(TP, P, T, percent=True): precision = TP / P if P else 0 recall = TP / T if T else 0 FB1 = 2 * precision * recall / (precision + recall) if precision + recall else 0 if percent: return 100 * precision, 100 * recall, 100 * FB1 else: return precision, recall, FB1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_precision(self):\r\n return round(f1_score(self.actual, self.predicted),2)", "def f1_score(precision, recall):\n if precision + recall == 0:\n return 0\n return 2 * precision * recall / (precision + recall)", "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def calc_pr_rc_F1(GTD, block_occ):\r\n true_pos = ((GTD + block_occ)==2).sum()\r\n false_pos = sum([1 if BI_val==1 and GTD[i]==0 else 0 for i, BI_val in enumerate(block_occ.values)])\r\n precision = float(true_pos/(true_pos+false_pos))\r\n recall = float(true_pos/(GTD.sum()))\r\n if precision == 0:\r\n F1 = 0\r\n else:\r\n F1 = float(2 * (precision * recall) / (precision + recall))\r\n \r\n return precision, recall, F1", "def f1(gold_labels, predicted_labels):\n \n precision_value = float(precision(gold_labels, predicted_labels))\n recall_value = float(recall(gold_labels, predicted_labels))\n top = float(2 * precision_value * recall_value)\n bottom = precision_value + recall_value\n\n if(top == 0):\n return 0\n else:\n return float(top / bottom)", "def calc_f1(precision: float, recall: float) -> float:\r\n return 2 * (precision * recall) / (precision + recall)", "def percent_b(self) -> float:\n return self._percent_b", "def pct(self):\n\t\treturn self.bottle.pct()", "def precision(self):\n self.overall_precision = precision_score(\n self.y_true, self.y_pred, average = self.average_type).round(self.digits_count_fp)\n self.classes_precision = precision_score(\n self.y_true, self.y_pred, average = None).round(self.digits_count_fp)", "def test_case_09_one_percent_precision(self):\n self.assertEquals(self.func(1, 1, 1.4142135623730951, precision=100), 'Isosceles Triangle')\n self.assertEquals(self.func(1, 1, 1.4142135623730951, precision=2), 'Right Isosceles Triangle')", "def precision(classifier_output, true_labels):\n\n # TODO: finish this.\n conf_matrix = confusion_matrix(classifier_output, true_labels)\n return conf_matrix[0][0]/(conf_matrix[0][0] + conf_matrix[1][0])", "def comp_f1(precision, recall):\n return 2. * recall * precision / (recall + precision) \\\n if (recall + precision) > 0. else 0.", "def result(self):\n prec_value = self.precision.result()\n recall_value = self.recall.result()\n return 2 * math_ops.div_no_nan(prec_value * recall_value,\n prec_value + recall_value)", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def _prec_recall_f1_score(pred_items, gold_items):\n common = Counter(gold_items) & Counter(pred_items)\n num_same = sum(common.values())\n if num_same == 0:\n return 0, 0, 0\n precision = 1.0 * num_same / len(pred_items)\n recall = 1.0 * num_same / len(gold_items)\n f1 = (2 * precision * recall) / (precision + recall)\n return precision, recall, f1", "def per(a):\n return a * 100", "def round_perc(value):\n return round(value, dev_fan.round_perc)", "def percent(num):\n return round(num * 100, 1)", "def percentage(part, whole):\n return round((100 * float(part)/float(whole)),2)", "def percent_rating(value):\n value = Decimal(value)\n value = round(value / 3, 2) * 100\n return value", "def percent(value, total):\n if total:\n return float(value) * 100.0 / float(total)\n else:\n return 100.0", "def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None", "def stretch_pct(cube,out,pct):\n\n try:\n isis.percent(from_=cube, to=out, percentage=pct)\n val = isis.getkey(from_=out, grpname=\"Results\", keyword=\"Value\").decode().replace('\\n', '')\n except ProcessError as e:\n val = None\n if val:\n return float(val)\n else:\n return None", "def percent_usage(value, total):\n if total:\n return float(value) * 100.0 / (float(total) + float(value))\n else:\n return 100.0", "def get_precision_recall_f1(gt, pred):\n tp, tn, fp, fn = calc_pred_stats(gt, pred)\n prec = precision(tp, fp)\n rec = recall(tp, fn)\n f1_score = f1(prec, rec)\n return prec, rec, f1_score", "def rate(self) -> float:\n return self.success_cnt / self.total_cnt if self.total_cnt > 0 else 1.0", "def of_num(number: float, percent: float) -> float:\n return number / 100 * percent", "def percentage(a, b):\n return (a * 100.0) / b", "def test_decimal_number(self):\r\n given_n = 2.5\r\n total_n = 10.5\r\n expected_given_percent = 23.80952\r\n expected_other_percent = 76.19047\r\n result = n_percent(given_n, total_n)\r\n\r\n self.assertAlmostEqual(expected_given_percent, result['given_percent'], 4)\r\n self.assertAlmostEqual(expected_other_percent, result['other_percent'], 4)", "def precision(ground_truth, prediction):\n ground_truth = remove_duplicates(ground_truth)\n prediction = remove_duplicates(prediction)\n precision_score = count_a_in_b_unique(prediction, ground_truth) / float(len(prediction))\n assert 0 <= precision_score <= 1\n return precision_score" ]
[ "0.70171875", "0.6893242", "0.6893009", "0.68919694", "0.68863714", "0.68086755", "0.6713527", "0.6683036", "0.6610096", "0.65929633", "0.657033", "0.6569705", "0.65341216", "0.65330094", "0.648091", "0.6473839", "0.6464411", "0.6462978", "0.64594454", "0.6454277", "0.6442183", "0.64409447", "0.6418599", "0.6408407", "0.64070207", "0.64063185", "0.6406023", "0.6404409", "0.6398236", "0.6391089" ]
0.7153003
0
Split chunk tag into IOB tag and chunk type; return (iob_tag, chunk_type)
def splitTag(chunkTag, oTag = "O", raw = False): if chunkTag == "O" or chunkTag == oTag: tag, type_ = "O", None elif raw: tag, type_ = "B", chunkTag else: try: # split on first hyphen, allowing hyphen in type tag, type_ = chunkTag.split('-', 1) except ValueError: tag, type_ = chunkTag, None return tag, type_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_tag(chunk_tag):\n if chunk_tag == 'O':\n return ('O', None)\n return chunk_tag.split('-', maxsplit=1)", "def split_chunk(chunk):\n if not sentinel_d.get(\"repatt2\"):\n patt2 = r\"<(t(?:ag)?)\\s*([^>]*)>([^>]*)</t(?:ag)?>\"\n sentinel_d.update(repatt2=re.compile(patt2, flags=re.IGNORECASE))\n # Chunk = collections.namedtuple('Chunk', 'tag attrs text')\n if chunk.lower().startswith(\"<t\") and chunk.endswith(\"/>\"):\n chunk_split = chunk.split(None, 1) # [1][:-2]\n tag, attrs = chunk_split[0][1:], chunk_split[1][:-2]\n options_d, font_d, case = parse_tag_attrs(attrs) # , attr=text_s) #\n text = options_d.pop(text_s, \"\")\n new_attrs = gen_tag_attrs(options=options_d, font=font_d, case=case)\n chunk = \"<{tag} {new_attrs}>{text}</{tag}>\".format(\n tag=tag, new_attrs=new_attrs, text=text\n )\n matches = sentinel_d[\"repatt2\"].findall(chunk)\n result = (\n Chunk(*matches[0])\n if len(matches) == 1\n else Chunk(\"\", \"\", chunk)\n if chunk\n else ()\n )\n return result", "def startOfChunk(prevTag, tag, prevType, type_):\r\n chunkStart = ((prevTag == \"B\" and tag == \"B\") or\r\n (prevTag == \"B\" and tag == \"B\") or\r\n (prevTag == \"I\" and tag == \"B\") or\r\n (prevTag == \"O\" and tag == \"B\") or\r\n (prevTag == \"O\" and tag == \"I\") or\r\n\r\n (prevTag == \"E\" and tag == \"E\") or\r\n (prevTag == \"E\" and tag == \"I\") or\r\n (prevTag == \"O\" and tag == \"E\") or\r\n (prevTag == \"O\" and tag == \"I\") or\r\n\r\n (tag != \"O\" and tag != \".\" and prevType != type_) or\r\n (tag == \"]\" or tag == \"[\"))\r\n # corrected 1998-12-22: these chunks are assumed to have length 1\r\n\r\n #print(\"startOfChunk?\", prevTag, tag, prevType, type)\r\n #print(chunkStart)\r\n return chunkStart", "def _parse_tokens(chunk, format=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]):\n tokens = []\n # Only process <chunk> and <chink> elements, \n # text nodes in between return an empty list.\n if not (chunk.tag == XML_CHUNK or chunk.tag == XML_CHINK):\n return []\n type = chunk.get(XML_TYPE, \"O\")\n if type == \"PNP\":\n # For, <chunk type=\"PNP\">, recurse all the child chunks inside the PNP.\n for ch in chunk:\n tokens.extend(_parse_tokens(ch, format))\n # Tag each of them as part of the PNP.\n if PNP in format:\n i = format.index(PNP)\n for j, token in enumerate(tokens):\n token[i] = (j==0 and \"B-\" or \"I-\") + \"PNP\"\n # Store attachments so we can construct anchor id's in parse_string().\n # This has to be done at the end, when all the chunks have been found.\n a = chunk.get(XML_OF).split(_UID_SEPARATOR)[-1]\n if a:\n _attachments.setdefault(a, [])\n _attachments[a].append(tokens)\n return tokens\n # For <chunk type-\"VP\" id=\"1\">, the relation is VP-1.\n # For <chunk type=\"NP\" relation=\"OBJ\" of=\"1\">, the relation is NP-OBJ-1.\n relation = _parse_relation(chunk, type)\n # Process all of the <word> elements in the chunk, for example:\n # <word type=\"NN\" lemma=\"pizza\">pizza</word> => [pizza, NN, I-NP, O, NP-OBJ-1, O, pizza]\n for word in filter(lambda n: n.tag == XML_WORD, chunk):\n tokens.append(_parse_token(word, chunk=type, relation=relation, format=format))\n # Add the IOB chunk tags:\n # words at the start of a chunk are marked with B-, words inside with I-.\n if CHUNK in format:\n i = format.index(CHUNK)\n for j, token in enumerate(tokens):\n token[i] = token[i] != \"O\" and ((j==0 and \"B-\" or \"I-\") + token[i]) or \"O\"\n # The chunk can be the anchor of one or more PNP chunks.\n # Store anchors so we can construct anchor id's in parse_string().\n a = chunk.get(XML_ANCHOR, \"\").split(_UID_SEPARATOR)[-1]\n if a: \n _anchors[a] = tokens\n return tokens", "def get_entity_bio( seq):\n chunks = []\n chunk = [-1, -1, -1]\n for indx, tag in enumerate(seq):\n # if not isinstance(tag, str):\n # tag = id2label[tag]\n if tag.startswith(\"B-\"):\n if chunk[2] != -1:\n chunks.append(chunk)\n chunk = [-1, -1, -1]\n chunk[1] = indx\n chunk[0] = tag.split('-')[1]\n chunk[2] = indx\n if indx == len(seq) - 1:\n chunks.append(chunk)\n elif tag.startswith('I-') and chunk[1] != -1:\n _type = tag.split('-')[1]\n if _type == chunk[0]:\n chunk[2] = indx\n\n if indx == len(seq) - 1:\n chunks.append(chunk)\n else:\n if chunk[2] != -1:\n chunks.append(chunk)\n chunk = [-1, -1, -1]\n return chunks", "def endOfChunk(prevTag, tag, prevType, type_):\r\n return ((prevTag == \"B\" and tag == \"B\") or\r\n (prevTag == \"B\" and tag == \"O\") or\r\n (prevTag == \"I\" and tag == \"B\") or\r\n (prevTag == \"I\" and tag == \"O\") or\r\n\r\n (prevTag == \"E\" and tag == \"E\") or\r\n (prevTag == \"E\" and tag == \"I\") or\r\n (prevTag == \"E\" and tag == \"O\") or\r\n (prevTag == \"I\" and tag == \"O\") or\r\n\r\n (prevTag != \"O\" and prevTag != \".\" and prevType != type_) or\r\n (prevTag == \"]\" or prevTag == \"[\"))\r\n # corrected 1998-12-22: these chunks are assumed to have length 1\r", "def get_entity_bio(seq,id2label):\n chunks = []\n chunk = [-1, -1, -1]\n for indx, tag in enumerate(seq):\n if not isinstance(tag, str):\n tag = id2label[tag]\n if tag.startswith(\"B-\"):\n if chunk[2] != -1:\n chunks.append(chunk)\n chunk = [-1, -1, -1]\n chunk[1] = indx\n chunk[0] = tag.split('-')[1]\n chunk[2] = indx\n if indx == len(seq) - 1:\n chunks.append(chunk)\n elif tag.startswith('I-') and chunk[1] != -1:\n _type = tag.split('-')[1]\n if _type == chunk[0]:\n chunk[2] = indx\n\n if indx == len(seq) - 1:\n chunks.append(chunk)\n else:\n if chunk[2] != -1:\n chunks.append(chunk)\n chunk = [-1, -1, -1]\n return chunks", "def iob2iobes(sentence_iob):\n sentence_iobes = copy.deepcopy(sentence_iob)\n for tag in ['chunk', 'ner']:\n sentence_iobes = I2S(sentence_iob, sentence_iobes, tag)\n sentence_iobes = I2B(sentence_iob, sentence_iobes, tag)\n sentence_iobes = I2E(sentence_iob, sentence_iobes, tag)\n sentence_iobes = B2S(sentence_iob, sentence_iobes, tag)\n return sentence_iobes", "def splitTag(my_tag):\n my_split = re.findall(r'(\\d+)(\\D+)', my_tag)\n return ((int(x[0]), x[1]) for x in my_split)", "def start_of_chunk(prev_tag, tag, prev_type, type_):\n chunk_start = False\n\n if tag == 'B': chunk_start = True\n if tag == 'S': chunk_start = True\n\n if prev_tag == 'E' and tag == 'E': chunk_start = True\n if prev_tag == 'E' and tag == 'I': chunk_start = True\n if prev_tag == 'S' and tag == 'E': chunk_start = True\n if prev_tag == 'S' and tag == 'I': chunk_start = True\n if prev_tag == 'O' and tag == 'E': chunk_start = True\n if prev_tag == 'O' and tag == 'I': chunk_start = True\n\n if tag != 'O' and tag != '.' and prev_type != type_:\n chunk_start = True\n\n return chunk_start", "def split_tag(elem, tags):\n splited_tag = elem.split(TAG_SEP)\n if len(splited_tag) > 1:\n tag_prefix, tag = splited_tag\n assert tag in tags.tags\n assert tag_prefix in tags.iob\n else:\n tag = elem\n tag_prefix = None\n assert tag == tags.default\n return tag_prefix, tag", "def readchunk(self):\n chunksize = self.readdword()\n chunktype = ChunkType(self.readword())\n chunkdata = self.readbytearr(chunksize - 6)\n return {\n \"type\": chunktype,\n \"data\": _ParseChunk(chunktype, chunkdata, self.PIXELSIZE),\n }", "def get_chunks(seq, tags):\n default = tags[NONE]\n idx_to_tag = {idx: tag for tag, idx in tags.items()}\n chunks = []\n chunk_type, chunk_start = None, None\n for i, tok in enumerate(seq):\n # End of a chunk 1\n if tok == default and chunk_type is not None:\n # Add a chunk.\n chunk = (chunk_type, chunk_start, i)\n chunks.append(chunk)\n chunk_type, chunk_start = None, None\n\n # End of a chunk + start of a chunk!\n elif tok != default:\n tok_chunk_class, tok_chunk_type = get_chunk_type(tok, idx_to_tag)\n if chunk_type is None:\n chunk_type, chunk_start = tok_chunk_type, i\n elif tok_chunk_type != chunk_type or tok_chunk_class == \"B\":\n chunk = (chunk_type, chunk_start, i)\n chunks.append(chunk)\n chunk_type, chunk_start = tok_chunk_type, i\n else:\n pass\n\n # end condition\n if chunk_type is not None:\n chunk = (chunk_type, chunk_start, len(seq))\n chunks.append(chunk)\n\n return chunks", "def get_entity_bieos(seq,id2label):\n chunks = []\n chunk = [-1, -1, -1]\n for indx, tag in enumerate(seq):\n if not isinstance(tag, str):\n tag = id2label[tag]\n\n if tag.startswith(\"S-\"):\n if chunk[2] != -1:\n chunks.append(chunk)\n chunk = [-1, -1, -1]\n chunk[1] = indx\n chunk[2] = indx\n chunk[0] = tag # chunk [S-a,3,3]\n chunks.append(chunk)\n chunk = (-1, -1, -1)\n\n if tag.startswith(\"B-\"):\n if chunk[2] != -1:\n chunks.append(chunk)\n chunk = [-1, -1, -1]\n chunk[1] = indx\n chunk[0] = tag.split('-')[-1] # chunk [type,0,-1]\n\n elif tag.startswith('I-') and chunk[1] != -1:\n _type = tag.split('-')[-1]\n if _type == chunk[0]: # 如果后缀相同,则记录下来\n #chunk[2] = indx\n continue\n else:\n chunk = [-1,-1,-1]\n\n elif tag.startswith('E-'):\n _type = tag.split('-')[-1]\n if _type == chunk[0]:\n chunk[2] = indx\n if indx == len(seq) - 1 and chunk[2] != -1: # 如果是最后一个标签了,就直接append\n chunks.append(chunk)\n else:\n if chunk[2] != -1:\n chunks.append(chunk)\n chunk = [-1, -1, -1]\n return chunks", "def end_of_chunk(prev_tag, tag, prev_type, type_):\n chunk_end = False\n\n if prev_tag == 'E': chunk_end = True\n if prev_tag == 'S': chunk_end = True\n\n if prev_tag == 'B' and tag == 'B': chunk_end = True\n if prev_tag == 'B' and tag == 'S': chunk_end = True\n if prev_tag == 'B' and tag == 'O': chunk_end = True\n if prev_tag == 'I' and tag == 'B': chunk_end = True\n if prev_tag == 'I' and tag == 'S': chunk_end = True\n if prev_tag == 'I' and tag == 'O': chunk_end = True\n\n if prev_tag != 'O' and prev_tag != '.' and prev_type != type_:\n chunk_end = True\n\n return chunk_end", "def get_entity_bios(seq,id2label):\n chunks = []\n chunk = [-1, -1, -1]\n for indx, tag in enumerate(seq):\n if not isinstance(tag, str):\n tag = id2label[tag]\n\n if tag.startswith(\"S-\"):\n if chunk[2] != -1:\n chunks.append(chunk)\n chunk = [-1, -1, -1]\n chunk[1] = indx\n chunk[2] = indx\n chunk[0] = tag.split('-')[1] #chunk [type,3,3]\n chunks.append(chunk)\n chunk = (-1, -1, -1)\n\n if tag.startswith(\"B-\"):\n if chunk[2] != -1:\n chunks.append(chunk)\n chunk = [-1, -1, -1]\n chunk[1] = indx\n chunk[0] = tag.split('-')[1] #chunk [type,0,-1]\n\n elif tag.startswith('I-') and chunk[1] != -1:\n _type = tag.split('-')[1]\n if _type == chunk[0]: #如果后缀相同,则记录下来\n chunk[2] = indx\n if indx == len(seq) - 1: #如果是最后一个标签了,就直接append\n chunks.append(chunk)\n else:\n if chunk[2] != -1:\n chunks.append(chunk)\n chunk = [-1, -1, -1]\n return chunks", "def GetTags(tag, btype, indent):\n assert tag in COLOR_SCHEME\n assert btype in ['match', 'diff']\n fbegin = BEGIN_TAG % COLOR_SCHEME[tag][btype]\n bbegin = BEGIN_TAG % COLOR_SCHEME[tag]['bckgrnd']\n lend = END_TAG\n nl_plus_indent = '\\n'\n if indent > 0:\n nl_plus_indent += bbegin + cgi.escape(\" \"*indent) + lend\n return fbegin, lend, nl_plus_indent", "def _readtag(self):\n tag = Tag()\n tag.tag = self.reader.readint(1)\n tag.len = self.reader.readint(2)\n\n if tag.len > 0:\n tag.data = self.reader.read(tag.len)\n return tag", "def readNamedTag(bstream):\r\n #print(\"Reading Named Tag\\n\")\r\n tbyte = bstream.read(1)[0] # read 1 byte and get its numerical value #read 1 byte, switch type generated depending (stream-reader type 'abstract?' factory\r\n #print(\"Byte read: %d\" % tbyte)\r\n tname = TAG_String(bstream).value\r\n #print(\"Name read: %s\" % tname)\r\n #print(\"RNamedT - name is %s\" %tname)\r\n tpayload = TAGLIST[tbyte](bstream)\r\n tpayload.name = tname\r\n return (tname, tpayload)\r\n #object type = bleh based on the number 0-255 you just read. Which should be a 10... for TAG_Compound.\r", "def df2chunkset(df, chunktag='chunktag', guesstag='guesstag'):\n go, ge = set(), set()\n if df.iloc[0][chunktag][0] not in 'BOS':\n raise ValueError('Invalid chunktag on first token.')\n if df.iloc[0][guesstag][0] not in 'BOS':\n raise ValueError('Invalid guesstag on first token.')\n chunk_go = [(0, df.iloc[0][chunktag])]\n chunk_ge = [(0, df.iloc[0][guesstag])]\n for tid, r in df.iloc[1:].iterrows():\n if r[chunktag][0] in 'BOS':\n # start new\n go.add(tuple(chunk_go))\n chunk_go = [(tid, r[chunktag])]\n else:\n # continue chunk\n chunk_go.append((tid, r[chunktag]))\n if r.guesstag[0] in 'BOS':\n # start new\n ge.add(tuple(chunk_ge))\n chunk_ge = [(tid, r[guesstag])]\n else:\n # continue chunk\n chunk_ge.append((tid, r[guesstag]))\n\n if chunk_ge:\n ge.add(tuple(chunk_ge))\n if chunk_go:\n go.add(tuple(chunk_go))\n\n return go, ge", "def iobes2iob(sentence_iobes):\n sentence_iob = copy.deepcopy(sentence_iobes)\n for tag in ['chunk', 'ner']:\n sentence_iob = E2I(sentence_iobes, sentence_iob, tag)\n sentence_iob = S2I(sentence_iobes, sentence_iob, tag)\n sentence_iob = B2I(sentence_iobes, sentence_iob, tag)\n sentence_iob = S2B(sentence_iobes, sentence_iob, tag)\n return sentence_iob", "def ensure_iob2(tags):\n tags = list(tags)\n for i, tag in enumerate(tags):\n if tag == 'O':\n continue\n split = tag.split('-')\n if len(split) != 2 or split[0] not in ['I', 'B']:\n return False\n if split[0] == 'B':\n continue\n elif i == 0 or tags[i - 1] == 'O': # conversion IOB1 to IOB2\n tags[i] = 'B' + tag[1:]\n elif tags[i - 1][1:] == tag[1:]:\n continue\n else: # conversion IOB1 to IOB2\n tags[i] = 'B' + tag[1:]\n return tags", "def _parse_token(word, chunk=\"O\", pnp=\"O\", relation=\"O\", anchor=\"O\", \n format=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]):\n tags = []\n for tag in format:\n if tag == WORD : tags.append(xml_decode(word.value))\n elif tag == POS : tags.append(xml_decode(word.get(XML_TYPE, \"O\")))\n elif tag == CHUNK : tags.append(chunk)\n elif tag == PNP : tags.append(pnp)\n elif tag == REL : tags.append(relation)\n elif tag == ANCHOR : tags.append(anchor)\n elif tag == LEMMA : tags.append(xml_decode(word.get(XML_LEMMA, \"\")))\n else:\n # Custom tags when the parser has been extended, see also Word.custom_tags{}.\n tags.append(xml_decode(word.get(tag, \"O\")))\n return tags", "def tags(self):\n # See also. Sentence.__repr__().\n ch, I,O,B = self.chunk, INSIDE+\"-\", OUTSIDE, BEGIN+\"-\"\n tags = [OUTSIDE for i in range(len(self.sentence.token))]\n for i, tag in enumerate(self.sentence.token): # Default: [WORD, POS, CHUNK, PNP, RELATION, ANCHOR, LEMMA]\n if tag == WORD:\n tags[i] = encode_entities(self.string)\n elif tag == POS and self.type:\n tags[i] = self.type\n elif tag == CHUNK and ch and ch.type:\n tags[i] = (self == ch[0] and B or I) + ch.type\n elif tag == PNP and self.pnp:\n tags[i] = (self == self.pnp[0] and B or I) + \"PNP\"\n elif tag == REL and ch and len(ch.relations) > 0:\n tags[i] = [\"-\".join([str(x) for x in [ch.type]+list(reversed(r)) if x]) for r in ch.relations]\n tags[i] = \"*\".join(tags[i])\n elif tag == ANCHOR and ch:\n tags[i] = ch.anchor_id or OUTSIDE\n elif tag == LEMMA:\n tags[i] = encode_entities(self.lemma or \"\")\n elif tag in self.custom_tags:\n tags[i] = self.custom_tags.get(tag) or OUTSIDE\n return tags", "def _do_chunk(self, type, role=None, relation=None, iob=None):\n if (type is None or type == OUTSIDE) and \\\n (role is None or role == OUTSIDE) and (relation is None or relation == OUTSIDE):\n return\n if iob != BEGIN \\\n and self.chunks \\\n and self.chunks[-1].type == type \\\n and self._relation == (relation, role) \\\n and self.words[-2].chunk is not None: # \"one, two\" => \"one\" & \"two\" different chunks.\n self.chunks[-1].append(self.words[-1])\n else:\n ch = Chunk(self, [self.words[-1]], type, role, relation)\n self.chunks.append(ch)\n self._relation = (relation, role)", "def split_chunk(chunk, *a, **kw):\n return split_chunk(chunk, *a, **kw)", "def _load_chunk_from_tag(self, chunk, tag):\n\n level = tag[\"Level\"]\n\n # These fromstring() calls are designed to raise if there are any\n # issues, but still be speedy.\n\n # Loop through the sections and unpack anything that we find.\n for tag in level[\"Sections\"].tags:\n index = tag[\"Y\"].value\n section = Section()\n section.blocks = array(\"B\")\n section.blocks.fromstring(tag[\"Blocks\"].value)\n section.metadata = array(\"B\", unpack_nibbles(tag[\"Data\"].value))\n section.skylight = array(\"B\",\n unpack_nibbles(tag[\"SkyLight\"].value))\n chunk.sections[index] = section\n\n chunk.heightmap = array(\"B\")\n chunk.heightmap.fromstring(level[\"HeightMap\"].value)\n chunk.blocklight = array(\"B\",\n unpack_nibbles(level[\"BlockLight\"].value))\n\n chunk.populated = bool(level[\"TerrainPopulated\"])\n\n if \"Entities\" in level:\n for tag in level[\"Entities\"].tags:\n try:\n entity = self._load_entity_from_tag(tag)\n chunk.entities.add(entity)\n except KeyError:\n log.msg(\"Unknown entity %s\" % tag[\"id\"].value)\n log.msg(\"Tag for entity:\")\n log.msg(tag.pretty_tree())\n\n if \"TileEntities\" in level:\n for tag in level[\"TileEntities\"].tags:\n try:\n tile = self._load_tile_from_tag(tag)\n chunk.tiles[tile.x, tile.y, tile.z] = tile\n except KeyError:\n log.msg(\"Unknown tile entity %s\" % tag[\"id\"].value)\n log.msg(\"Tag for tile:\")\n log.msg(tag.pretty_tree())\n\n chunk.dirty = not chunk.populated", "def split_input_target(chunk):\n input_text = chunk[:-shift]\n target_text = chunk[shift:]\n return input_text, target_text", "def iob2(tags):\n for i, tag in enumerate(tags):\n if tag == \"O\":\n continue\n split = tag.split(\"-\")\n if len(split) != 2 or split[0] not in [\"I\", \"B\"]:\n return False\n if split[0] == \"B\":\n continue\n elif i == 0 or tags[i - 1] == \"O\": # conversion IOB1 to IOB2\n tags[i] = \"B\" + tag[1:]\n elif tags[i - 1][1:] == tag[1:]:\n continue\n else: # conversion IOB1 to IOB2\n tags[i] = \"B\" + tag[1:]\n return True", "def split_tag(image_name):\n image = image_name.split(\":\", maxsplit=1)\n if len(image) > 1:\n image_repo = image[0]\n image_tag = image[1]\n else:\n image_repo = image[0]\n image_tag = None\n return image_repo, image_tag" ]
[ "0.713737", "0.69007325", "0.6209524", "0.60984844", "0.6082831", "0.58778906", "0.57449347", "0.5672539", "0.5591809", "0.55803895", "0.5574942", "0.5501076", "0.547124", "0.5446082", "0.5443957", "0.54422784", "0.5439464", "0.5370299", "0.53504926", "0.5332012", "0.53270584", "0.5294447", "0.52878356", "0.52760583", "0.5263097", "0.5178603", "0.5165946", "0.51561415", "0.51453376", "0.5133391" ]
0.7466575
0
Process input in given format and count chunks using the last two columns; return correctChunk, foundGuessed, foundCorrect, correctTags, tokenCounter
def countChunks(args,inputFile): boundary = "-X-" # sentence boundary # delimiter = args.delimiter # raw = args.raw # oTag = args.oTag #inputFile=args.inputFile delimiter = args["delimiter"] raw = args["raw"] oTag = args["oTag"] fileIterator=open(inputFile) correctChunk = defaultdict(int) # number of correctly identified chunks foundCorrect = defaultdict(int) # number of chunks in corpus per type foundGuessed = defaultdict(int) # number of identified chunks per type tokenCounter = 0 # token counter (ignores sentence breaks) correctTags = 0 # number of correct chunk tags lastType = None # temporary storage for detecting duplicates inCorrect = False # currently processed chunk is correct until now lastCorrect, lastCorrectType = "O", None # previous chunk tag in corpus lastGuessed, lastGuessedType = "O", None # previously identified chunk tag for line in fileIterator: # each non-empty line must contain >= 3 columns features = line.strip().split(delimiter) #print(features) if not features or features[0] == boundary: features = [boundary, "O", "O"] elif len(features) < 3: raise IOError("conlleval: unexpected number of features in line %s\n" % line) # extract tags from last 2 columns guessed, guessedType = splitTag(features[-1], oTag=oTag, raw=raw) correct, correctType = splitTag(features[-2], oTag=oTag, raw=raw) # 1999-06-26 sentence breaks should always be counted as out of chunk firstItem = features[0] if firstItem == boundary: guessed, guessedType = "O", None # decide whether current chunk is correct until now if inCorrect: endOfGuessed = endOfChunk(lastCorrect, correct, lastCorrectType, correctType) endOfCorrect = endOfChunk(lastGuessed, guessed, lastGuessedType, guessedType) if (endOfGuessed and endOfCorrect and lastGuessedType == lastCorrectType): inCorrect = False correctChunk[lastCorrectType] += 1 elif ( endOfGuessed != endOfCorrect or guessedType != correctType): inCorrect = False startOfGuessed = startOfChunk(lastGuessed, guessed, lastGuessedType, guessedType) startOfCorrect = startOfChunk(lastCorrect, correct, lastCorrectType, correctType) if (startOfCorrect and startOfGuessed and guessedType == correctType): inCorrect = True if startOfCorrect: foundCorrect[correctType] += 1 if startOfGuessed: foundGuessed[guessedType] += 1 if firstItem != boundary: if correct == guessed and guessedType == correctType: correctTags += 1 tokenCounter += 1 lastGuessed, lastGuessedType = guessed, guessedType lastCorrect, lastCorrectType = correct, correctType if inCorrect: correctChunk[lastCorrectType] += 1 return correctChunk, foundGuessed, foundCorrect, correctTags, tokenCounter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_chunks(self) -> int:", "def mistake_counts(filename, statsfile):\n #The data dat will be used comes from the mistakes_dataframe-function.\n data = mistakes_dataframe(filename, statsfile)[0]\n\n #For all the types of information, a list will be made, for every \n #false positive and negative, a seperate list will be made.\n fp_token_list = []\n fp_lemma_list = []\n fp_UPOS_list = []\n fp_XPOS_list = []\n fp_DepRel_list = []\n fp_head_list = []\n fp_PrevPOS_list = []\n fp_NextPOS_list = []\n fp_counter = 0 #A counter to count the false positives\n \n fn_token_list = []\n fn_lemma_list = []\n fn_UPOS_list = []\n fn_XPOS_list = []\n fn_DepRel_list = []\n fn_head_list = []\n fn_PrevPOS_list = []\n fn_NextPOS_list = []\n fn_counter = 0 #The false negatives are also counted\n \n for data in data:\n #For all the False Positives, their tokens, lemmas, UPOS-labels, \n #XPOS-Labels, DepRel-labels, heads, PrevPOS-Labels, NextPOS-Labels\n #are appended to separate lists.\n if data['Mistake-type'] == 'FalsePositive': \n fp_counter += 1\n fp_token_list.append(data['Token'])\n fp_lemma_list.append(data['lemma'])\n fp_UPOS_list.append(data['UPOS'])\n fp_XPOS_list.append(data['XPOS'])\n fp_DepRel_list.append(data['DepRel'])\n fp_head_list.append(data['head'])\n fp_PrevPOS_list.append(data['PrevPOS'])\n fp_NextPOS_list.append(data['NextPOS'])\n #The same happens for the False Negatives.\n elif data['Mistake-type'] == 'FalseNegative': \n fn_counter += 1\n fn_token_list.append(data['Token'])\n fn_lemma_list.append(data['lemma'])\n fn_UPOS_list.append(data['UPOS'])\n fn_XPOS_list.append(data['XPOS'])\n fn_DepRel_list.append(data['DepRel'])\n fn_head_list.append(data['head'])\n fn_PrevPOS_list.append(data['PrevPOS'])\n fn_NextPOS_list.append(data['NextPOS'])\n #The mistakes are printed\n print(f\"In total, this classifier made {fp_counter} False Positive and {fn_counter} False Negative mistakes\")\n\n #With the help of a counter dictionary, all instances are counted and printed below\n print(\"-TOKEN-\")\n print(\"FP:\\n\", collections.Counter(fp_token_list))\n print(\"FN:\\n\", collections.Counter(fn_token_list))\n print('\\n\\n')\n print(\"-LEMMA-\")\n print(\"FP:\\n\", collections.Counter(fp_lemma_list))\n print(\"FN:\\n\", collections.Counter(fn_lemma_list))\n print('\\n\\n')\n print(\"-UPOS-\")\n print(\"FP:\\n\", collections.Counter(fp_UPOS_list))\n print(\"FN:\\n\", collections.Counter(fn_UPOS_list))\n print('\\n\\n')\n print(\"-XPOS-\")\n print(\"FP:\\n\", collections.Counter(fp_XPOS_list)) \n print(\"FN:\\n\", collections.Counter(fn_XPOS_list))\n print('\\n\\n')\n print(\"-DepRel-\")\n print(\"FP:\\n\", collections.Counter(fp_DepRel_list))\n print(\"FN:\\n\", collections.Counter(fn_DepRel_list))\n print('\\n\\n')\n print(\"-head-\")\n print(\"FP:\\n\", collections.Counter(fp_head_list))\n print(\"FN:\\n\", collections.Counter(fn_head_list))\n print('\\n\\n')\n print(\"-PrevPos-\")\n print(\"FP:\\n\", collections.Counter(fp_PrevPOS_list))\n print(\"FN:\\n\", collections.Counter(fn_PrevPOS_list))\n print('\\n\\n')\n print(\"-NextPOS-\")\n print(\"FP:\\n\", collections.Counter(fp_NextPOS_list))\n print(\"FN:\\n\", collections.Counter(fn_NextPOS_list))", "def _count_chunks(matches):\n i = 0\n chunks = 1\n while (i < len(matches) - 1):\n if (matches[i + 1][0] == matches[i][0] + 1) and (matches[i + 1][1] == matches[i][1] + 1):\n i += 1\n continue\n i += 1\n chunks += 1\n return chunks", "def convert_chunk_into_number(chunk_of_string):\n # TODO: Daniel\n my_file = open(\"C:/Users/Triqk/github/RSAProject1/TestFile.txt\", \"r\")\n test_message = my_file.readlines()\n number = int(text_message)\n return number", "def process_chunk(chunk):\n count = 0\n summation = 0.0\n for float_str in chunk.split():\n try:\n float_num = float(float_str)\n count += 1\n summation += float_num\n except ValueError:\n print \"Invalid floating number encountered: %s\" % float_str\n sys.exit(1)\n\n return (count, summation)", "def parse_chunks(self):\n logger.info('parse_chunks()')\n\n while (self.replay.pos < len(self.replay)):\n chunk_type = self.replay.read_uint32()\n chunk_size = self.replay.read_int32()\n offset = self.replay.bytepos\n\n if chunk_type == ChunkTypes.CHECKPOINT.value:\n self.parse_checkpoint()\n\n elif chunk_type == ChunkTypes.EVENT.value:\n self.parse_event()\n\n elif chunk_type == ChunkTypes.REPLAYDATA.value:\n self.parse_replaydata()\n\n elif chunk_type == ChunkTypes.HEADER.value:\n self.parse_header(chunk_size)\n\n self.replay.bytepos = offset + chunk_size", "def countChaptersVerses(filename):\n # Modes that the usfm scanner is in (parsing mode)\n NORMAL = 0 # regular Bible text\n MARKER = 1 # USFM marker\n PREFIX = 2 # file header info\n GLOSSARY = 3 # within a \\w ... \\w* section\n mode = PREFIX\n newParagraph = False\n usfmCode = \"\"\n markerPattern = r'\\\\(\\S+)'\n markerPatternCompiled = regex.compile(markerPattern) # looking for a usfm \\marker\n # The following markers are ones we just \"delete\" from the text because they are\n # glossary or formatting markers. NOTE: The next line of code is critical. If there\n # is a marker that I have not seen before, I may lose words from the original USFM\n # and verses can appear to be truncated. Watch out for this in the future.\n markersToIgnore = ['li', 'q1', 'q2', 'qt', 'm', 'w', 'pi', 'pi2', 'b', 'nb', 'mi']\n # The current word list\n wordlist = []\n try:\n # If you do not have utf_8_sig, the byte-order-mark ef bb bf messes up\n # the initial \\id line so it does not match \\\\id below. This decoding\n # method dumps the BOM if it is present.\n file = open(filename, 'r', encoding='utf_8_sig')\n except IOError:\n # File does not exist...ignore...lets us pass wrong parameters like *.sfm *.usfm *.SFM and not worry\n return\n debug(f\"Processing file {filename}\")\n\n for lineno, line in enumerate(file):\n # Ignore blank lines\n if not line.strip():\n continue;\n\n debug(\"DEBUG1: \" + line)\n\n # Disregard line/verse boundaries so that repeats can cross lines/verses\n words = line.split()\n debug(\"DEBUG2: \" + \"::\".join(words))\n\n # Handle USFM codes (by noting them or dropping them)\n while words:\n word = words.pop(0)\n debug(f\"DEBUG3: Processing chunk ::{word}:: with length {len(word)}\")\n markerMatch = markerPatternCompiled.search(word)\n #print(\"DEBUG2: \" + \"Word=\" + word + \" \" + ' '.join(words))\n # Capture context of book chapter:verse\n if (word == \"\\\\id\"):\n debug(f\"DEBUG4: Processing id\")\n bookid = words.pop(0)\n debug(f\"DEBUG5: Found book id {bookid}\")\n # We don't process the glossary book\n if (bookid == \"XXA\" or bookid == \"XXB\" or bookid == \"FRT\" or bookid == \"GLO\" or \n bookid == \"XXC\" or bookid == \"XXD\" or bookid == \"INT\" or bookid == \"BAK\" or\n bookid == \"XXE\" or bookid == \"XXF\" or bookid == \"XXG\"):\n file.close()\n return\n book = bookid # instead of changing to any other naming system, keep it same\n debug(\"DEBUG6: Set Book = {book}\")\n elif (word == \"\\\\c\"):\n if not words:\n error(f\"Missing chapter number in {filename}:{lineno}\")\n chapter = words.pop(0)\n debug(f\"DEBUG7: Chapter {chapter}\")\n verse = 0 # restart verse numbering\n mode = NORMAL # move out of PREFIX mode\n elif (word == \"\\\\v\"):\n if not words:\n error(f\"Missing verse number in {filename}:{lineno}\")\n verse = words.pop(0)\n debug(f\"DEBUG8: Verse {verse}\")\n # Verse numbers should be monotonically increasing by one every time from the previous one\n try: chapter\n except NameError: \n error(f\"Missing chapter in {book} or verse number {verse} is not within a chapter??\")\n exit(1)\n prevVerse = int(verseDict.get((book, chapter), 0))\n if (\"-\" in verse):\n # Special case: we have a verse range, like 17-18\n verses = verse.split(\"-\")\n verse1 = int(verses[0])\n verse2 = int(verses[1])\n if (prevVerse+1 != verse1):\n error(f\"Verse number {verse1} in range {verse} is out of sequence in {book} {chapter}, last verse was {prevVerse}\")\n if ((verse2 - verse1) > 0):\n # We have a range of verses like \\v 1-4 or \\v 1-3 or \\v 5-6\n verse = verse2 # move to the end of the range\n prevVerse = verse2 - 1 # set up for below check; we know it is OK, but it doesn't!\n else:\n error(f\"Verse number {verse1} in range {verse} is greater than the end of the range in {book} {chapter}, last verse was {prevVerse}\")\n else:\n # Just a regular single verse, like \\v 4\n pass\n # Now carry on as if no verse range was found\n if (prevVerse+1 != int(verse)):\n error(f\"Verse number {verse} is out of sequence in {book} {chapter}, last verse was {prevVerse}\")\n verseDict[(book, chapter)] = verse\n\n file.close()", "def test_counts(self):\n lines, words, chars = analyze_text(self.filename)\n self.assertEqual(lines, 4)\n self.assertEqual(words, 8)\n self.assertEqual(chars, 36)", "def _parse_tokens(chunk, format=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]):\n tokens = []\n # Only process <chunk> and <chink> elements, \n # text nodes in between return an empty list.\n if not (chunk.tag == XML_CHUNK or chunk.tag == XML_CHINK):\n return []\n type = chunk.get(XML_TYPE, \"O\")\n if type == \"PNP\":\n # For, <chunk type=\"PNP\">, recurse all the child chunks inside the PNP.\n for ch in chunk:\n tokens.extend(_parse_tokens(ch, format))\n # Tag each of them as part of the PNP.\n if PNP in format:\n i = format.index(PNP)\n for j, token in enumerate(tokens):\n token[i] = (j==0 and \"B-\" or \"I-\") + \"PNP\"\n # Store attachments so we can construct anchor id's in parse_string().\n # This has to be done at the end, when all the chunks have been found.\n a = chunk.get(XML_OF).split(_UID_SEPARATOR)[-1]\n if a:\n _attachments.setdefault(a, [])\n _attachments[a].append(tokens)\n return tokens\n # For <chunk type-\"VP\" id=\"1\">, the relation is VP-1.\n # For <chunk type=\"NP\" relation=\"OBJ\" of=\"1\">, the relation is NP-OBJ-1.\n relation = _parse_relation(chunk, type)\n # Process all of the <word> elements in the chunk, for example:\n # <word type=\"NN\" lemma=\"pizza\">pizza</word> => [pizza, NN, I-NP, O, NP-OBJ-1, O, pizza]\n for word in filter(lambda n: n.tag == XML_WORD, chunk):\n tokens.append(_parse_token(word, chunk=type, relation=relation, format=format))\n # Add the IOB chunk tags:\n # words at the start of a chunk are marked with B-, words inside with I-.\n if CHUNK in format:\n i = format.index(CHUNK)\n for j, token in enumerate(tokens):\n token[i] = token[i] != \"O\" and ((j==0 and \"B-\" or \"I-\") + token[i]) or \"O\"\n # The chunk can be the anchor of one or more PNP chunks.\n # Store anchors so we can construct anchor id's in parse_string().\n a = chunk.get(XML_ANCHOR, \"\").split(_UID_SEPARATOR)[-1]\n if a: \n _anchors[a] = tokens\n return tokens", "def process(self):\n\n count = 0\n total = 0\n\n while total < 200 and count < 10:\n digits = self._stream.read(2)\n if len(digits) < 2:\n break\n \n number = int(digits)\n \n total += number\n \n count += 1\n\n return count", "def part_two(rows):\n\n cmds = [int(cmd) for cmd in rows]\n\n count = 0\n next_counter = 0\n\n while True:\n\n try:\n next_counter = process_commands(cmds, next_counter)\n count += 1\n except IndexError:\n break\n\n return count", "def __call__(self, example):\n para_counter = data.count_tokens(example['context_tokens'] if not self._iterate_over_example\n else [c for tkn in example['context_tokens'] for c in tkn])\n ques_counter = data.count_tokens(example['ques_tokens'] if not self._iterate_over_example\n else [c for tkn in example['ques_tokens'] for c in tkn])\n counter = para_counter + ques_counter\n return list(counter.items())", "def test_count_groups(test_input, expected):\n token_count = sp.count_groups(test_input)\n assert token_count == expected", "def solve(input_file: typing.IO) -> typing.Generator[str, None, None]:\n data = split_into_groups([parse_line(line.strip()) for line in input_file])\n yield str(sum([count_anyone_answered(group) for group in data]))\n yield str(sum([count_everyone_answered(group) for group in data]))", "def processData(content):\n\n csv_file = csv.reader(content)\n line_count = 0\n image_count = 0\n hour_count = 0\n\n chrome = ['Google Chrome', 0]\n explorer = ['Internet Explorer', 0]\n mozilla = ['Firefox', 0]\n safari = ['Safari', 0]\n \n for line in csv_file:\n line_count += 1\n if re.search(\"firefox\", line[2], re.I):\n mozilla[1] += 1\n elif re.search(r\"MSIE\", line[2]):\n explorer[1] += 1\n elif re.search(r\"Chrome\", line[2]):\n chrome[1] += 1\n elif re.search(r\"Safari\", line[2]):\n safari[1] += 1\n if re.search(r\"jpe?g|JPE?G|png|PNG|gif|GIF\", line[0]):\n image_count += 1\n\n image_percentage = (float(image_count) / line_count) * 100\n\n browser_count = [chrome, explorer, mozilla, safari]\n\n browser_popularity = 0\n top_browser = ' '\n for b in browser_count:\n if b[1] > browser_popularity:\n browser_popularity = b[1]\n top_browser = b[0]\n else:\n continue\n\n message1 = ('There were {:,} total page hits today.').format(line_count)\n message2 = ('Hits on images accounted for {}% of all hits.').format(image_percentage)\n message3 = ('{} had the most hits with {:,}.').format(top_browser, browser_popularity)\n\n print message1\n print message2\n print message3", "def test_chunks(year, day, part_number):\n chunks = []\n chunk_index = -1\n data_file_lines(part_number).each do |line|\n if line[0] == '#'\n chunk_index += 1\n chunks[chunk_index] = [line[1..-1].strip, []]\n elsif chunk_index >= 0\n chunks[chunk_index][1] << line\n end\n end\n chunks", "def parse_input(parts):\n\n \"\"\"\n Begin in state A.\n Perform a diagnostic checksum after 6 steps.\n \"\"\"\n turing = {}\n metadata_part = parts[0].split('\\n')\n start_state = metadata_part[0][-2]\n checksum_after = 12302209\n\n metadata = (start_state, checksum_after)\n\n for part in parts[1:]:\n lines = part.split('\\n')\n state = lines[0][-2]\n state_num = int(lines[1][-2])\n # print(\"PART N: \", state, state_num)\n # - Write the value X.\n write_val = int(lines[2][-2])\n move = '>' if lines[3][-6:-1] == 'right' else '<'\n next_state = lines[4][-2]\n turing[(state, state_num)] = (write_val, move, next_state)\n\n state_num = int(lines[5][-2])\n # print(\"PART N: \", state, state_num)\n write_val = int(lines[6][-2])\n move = '>' if lines[7][-6:-1] == 'right' else '<'\n next_state = lines[8][-2]\n turing[(state, state_num)] = (write_val, move, next_state)\n\n # print(turing)\n\n return turing, metadata", "def test_parsing(self):\n truth = self.generate_fake_pos()\n batch_size = 4\n records = []\n for i in range(batch_size):\n record = b''\n for j in range(2):\n record += self.v4_record(*truth)\n records.append(record)\n\n parser = ChunkParser(ChunkDataSrc(records),\n shuffle_size=1,\n workers=1,\n batch_size=batch_size)\n batchgen = parser.parse()\n data = next(batchgen)\n\n batch = (np.reshape(np.frombuffer(data[0], dtype=np.float32),\n (batch_size, 112, 64)),\n np.reshape(np.frombuffer(data[1], dtype=np.int32),\n (batch_size, 1858)),\n np.reshape(np.frombuffer(data[2], dtype=np.float32),\n (batch_size, 3)),\n np.reshape(np.frombuffer(data[3], dtype=np.float32),\n (batch_size, 3)))\n\n fltplanes = truth[1].astype(np.float32)\n fltplanes[5] /= 99\n for i in range(batch_size):\n data = (batch[0][i][:104],\n np.array([batch[0][i][j][0] for j in range(104, 111)]),\n batch[1][i], batch[2][i], batch[3][i])\n self.assertTrue((data[0] == truth[0]).all())\n self.assertTrue((data[1] == fltplanes).all())\n self.assertTrue((data[2] == truth[2]).all())\n scalar_win = data[3][0] - data[3][-1]\n self.assertTrue(np.abs(scalar_win - truth[3]) < 1e-6)\n scalar_q = data[4][0] - data[4][-1]\n self.assertTrue(np.abs(scalar_q - truth[4]) < 1e-6)\n\n parser.shutdown()", "def total_chunks(self) -> global___Expression:", "def num_tokens(self, index):\r\n raise NotImplementedError", "def process_content(cmd, content=u''):\n global NR, NF\n content = content.strip()\n\n begin, normal, end = parse_cmd(cmd)\n logging.debug('BEGIN: %s, NORMAL: %s, END: %s' % (begin, normal, end))\n\n do_begin(begin)\n\n NR = 0\n for record in content.split(RS):\n NR += 1\n\n ##### Fields of current record\n # $i <=> fields[i]\n # $0: the whole record\n # $1-$n: the actual field\n fields = [record] + list(multi_split(record, FS))\n logging.debug(u'fields: %s' % unicode(fields))\n\n NF = len(fields) - 1\n\n do_normal(normal, fields)\n\n do_end(end)", "def get_analyze_per_file(self):\n \"\"\"Exclude tags, exclude binary (img), count words without non literal characters and digits\"\"\"\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n df_tmp = pd.DataFrame(columns=['word', 'cnt', 'word_low'])\n w_cnt = 0\n word_counter = {}\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n for word in word_list:\n\n if word not in word_counter:\n word_counter[word] = 1\n else:\n word_counter[word] = word_counter[word] + 1\n w_cnt += 1\n\n for word, occurance in word_counter.items():\n df_tmp = df_tmp.append({'word': '{:15}'.format(word), 'cnt': '{:3}'.format(occurance),\n 'word_low': '{:15}'.format(word).lower()}, ignore_index=True)\n df_tmp = df_tmp.sort_values(by='word_low')\n df_tmp.loc[(df_tmp.word != df_tmp.word_low), 'word'] = df_tmp.cnt\n df_tmp.loc[(df_tmp.word == df_tmp.cnt), 'cnt'] = 0\n df_tmp.loc[(df_tmp.word == df_tmp.word_low), 'word'] = 0\n df_tmp['word'] = df_tmp.word.astype(int)\n df_tmp['cnt'] = df_tmp.cnt.astype(int)\n df_tmp = df_tmp.groupby(['word_low'])['cnt', 'word'].sum().reset_index()\n conn = sqlite3.connect('for_python_ht.db')\n try:\n try:\n sqlite_for_ht.CreateTableSingle.delete_table(f_3, self.filename)\n print(datetime.now(), '-', self.filename, 'Table deleted at the start point')\n except Exception:\n print(datetime.now(), '-', 'Something went wrong')\n traceback.print_exc()\n df_tmp.to_sql(name=self.filename, con=conn, index=False)\n print(datetime.now(), '-', self.filename, 'Table created and filled with data')\n except Exception:\n print(datetime.now(), '-', 'file with name {} already exists'.format(self.filename))\n traceback.print_exc()\n print(datetime.now(), '-', 'word analyse for', self.filename, 'done')\n sqlite_for_ht.HandleTemp.update_table(f_2, 'status', 'Done', self.filename)\n return None", "def iterate_data(self):\n if \"single\" in self.dataset_name:\n # Index 0 for list of sentence lengths, index 1 for list of token lengths\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for answer_id in self.data:\n summary = self.data[answer_id]['summary']\n articles = self.data[answer_id]['articles']\n question = self.data[answer_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(articles, 'article')\n self._get_token_cnts(question, 'question')\n self._write_stats(\"token_counts\")\n\n if \"multi\" in self.dataset_name:\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for q_id in self.data:\n summary = self.data[q_id]['summary']\n question = self.data[q_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(question, 'question')\n question = self.data[q_id]['question']\n for answer_id in self.data[q_id]['articles']:\n articles = self.data[q_id]['articles'][answer_id][0]\n if args.tokenize:\n self._get_token_cnts(articles, 'article')\n self._write_stats(\"token_counts\")\n\n if self.dataset_name == \"complete_dataset\":\n self.stat_dict = {'urls': [], 'sites': []}\n article_dict = {}\n print(\"Counting answers, sites, unique urls, and tokenized counts of unique articles\")\n answer_cnt = 0\n for q_id in self.data:\n for a_id in self.data[q_id]['answers']:\n answer_cnt += 1\n url = self.data[q_id]['answers'][a_id]['url']\n article = self.data[q_id]['answers'][a_id]['article']\n if url not in article_dict:\n article_dict[url] = article\n self.stat_dict['urls'].append(url)\n assert \"//\" in url, url\n site = url.split(\"//\")[1].split(\"/\")\n self.stat_dict['sites'].append(site[0])\n print(\"# of Answers:\", answer_cnt)\n print(\"Unique articles: \", len(article_dict)) # This should match up with count written to file\n self._write_stats(\"full collection\")\n\n # Get token/sent averages of unique articles\n if args.tokenize:\n self.stat_dict = {'article': [[], []]}\n for a in article_dict:\n self._get_token_cnts(article_dict[a], 'article')\n self._write_stats(\"token_counts\")", "def main ():\n fio = FileIo(\"../input2.txt\")\n text = fio.getInput()\n p = re.compile(r'#?\\d[\\s\\.]?[\\s]?')\n out = filter(None, p.split(text))\n #print out[2]\n #print len(out)\n wc = 0\n\n for s in out:\n text = nltk.word_tokenize(s)\n wc += wordCount( text )\n print wc", "def cleanCsv(): \n\n count_neutral = 0\n count_sad = 0\n count_angry = 0\n count_happy = 0\n\n count_session_neutral = 0 \n\n for column_values in raw_data:\n\n if significant_data.fieldnames is None:\n dh = dict((h, h) for h in raw_data.fieldnames)\n significant_data.fieldnames = raw_data.fieldnames\n significant_data.writerow(dh)\n\n if column_values['AOI[Sad_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_sad = count_sad + 1\n\n if column_values['AOI[Neutral_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Neutral_Left]Hit_0'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Neutral_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Angry_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_angry = count_angry + 1\n\n if column_values['AOI[Neutral_Right]Hit_0'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Happy_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_happy = count_happy + 1\n\n if column_values['AOI[Neutral_Left]Hit_1'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Happy_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_happy = count_happy + 1\n\n if column_values['AOI[Neutral_Right]Hit_1'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Sad_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_sad = count_sad + 1\n\n if column_values['AOI[Neutral_Right]Hit_2'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Angry_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_angry = count_angry + 1\n\n if column_values['AOI[Neutral_Left]Hit_2'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n return {\n 'count_neutral': count_neutral,\n 'count_sad': count_sad,\n 'count_angry': count_angry,\n 'count_happy': count_happy,\n }", "def analyze(filename):\n\n start = datetime.datetime.now()\n found = 0\n new_ones = []\n\n # read file into a generator\n lines_generator = (line for line in open(filename, encoding=\"ISO-8859-1\"))\n\n # read generator into a list comprehension\n lists_generator = (l.split(\",\") for l in lines_generator)\n\n for line in lists_generator:\n if 'ao' in line[6]:\n found += 1\n lrow: List[str] = list(line)\n if lrow[5] > '00/00/2012':\n new_ones.append((lrow[5], lrow[0]))\n print(f\"'ao' was found {found}, times\")\n end = datetime.datetime.now()\n year_count = {\n \"2013\": 0,\n \"2014\": 0,\n \"2015\": 0,\n \"2016\": 0,\n \"2017\": 0,\n \"2018\": 0\n }\n # create yyyy from tuple, start at char(6) and grab to end of string\n # for each yyyy, add 1 yyyy if yyyy 2013-2017\n for new in new_ones:\n if new[0][6:] == '2013':\n year_count[\"2013\"] += 1\n if new[0][6:] == '2014':\n year_count[\"2014\"] += 1\n if new[0][6:] == '2015':\n year_count[\"2015\"] += 1\n if new[0][6:] == '2016':\n year_count[\"2016\"] += 1\n if new[0][6:] == '2017':\n year_count[\"2017\"] += 1\n if new[0][6:] == '2018':\n year_count[\"2017\"] += 1\n print(year_count)\n return start, end, year_count, found", "def get_result(self, maxlen=0):\n out = \"\"\n out_len = 0\n for token in self.tokens:\n chunk, chunk_len = token.converter(token.match, token.userdata, maxlen)\n out = out + chunk\n out_len = out_len + chunk_len\n if maxlen:\n maxlen = maxlen - chunk_len\n if maxlen <= 0:\n return out, out_len, 1\n return out, out_len, 0", "def _get_token_cnts(self, doc, doc_type):\n tokenized_doc = self.nlp(doc)\n self.stat_dict[doc_type][0].append(len([s for s in tokenized_doc.sents]))\n doc_len = len([t for t in tokenized_doc])\n self.stat_dict[doc_type][1].append(doc_len)\n if doc_len < 50 and doc_type == \"answer\":\n print(\"Document less than 50 tokens:\", url)", "def count_regexp():\r\n # Here's an example regular expression that roughly matches a valid email address.\r\n # The ones you write below should be shorter than this\r\n email = re.compile(\"[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\.[a-zA-Z]{2,5}\")\r\n\r\n ###### Write below #########\r\n subheading = re.compile(\"\\=\\=+.*\\=\\=+\")\r\n link_to_subheading = re.compile(\"\\[\\[[\\w\\'*\\-*\\:*\\(*\\)*\\_*\\s*]+[#][\\s*\\w\\\\'*\\-*\\:*\\(*\\)*\\_*s*]+\\|*\")\r\n doi_citation = re.compile(\"\\{\\{[c][ite](?!{{).*[dD][oO][iI]\\s*[:|,=\\/]*\\s*[0-9]+\\.[0-9]+.*\\}\\}\")\r\n ###### End of your work #########\r\n\r\n patterns = {\r\n \"emails\": email,\r\n \"subheadings\": subheading,\r\n \"links to subheadings\": link_to_subheading,\r\n \"citations with DOI numbers\": doi_citation,\r\n }\r\n\r\n with open(RAW_DUMP_XML, encoding=\"utf-8\") as f:\r\n dump_text = f.read()\r\n for name, pattern in patterns.items():\r\n if pattern is None:\r\n continue\r\n matches = pattern.findall(dump_text)\r\n count = len(matches)\r\n\r\n example_matches = [matches[i * (count // 5)] for i in range(5)]\r\n\r\n print(\"Found {} occurences of {}\".format(count, name))\r\n print(\"Here are examples:\")\r\n print(\"\\n\".join(example_matches))\r\n print(\"\\n\")", "def _parse(self):\n adds_factor = 1\n pattern = re.compile(\"(\\d+)?([kKdDbB])(\\d+)?\")\n\n for chunk in self.chunks:\n if chunk == \"+\":\n adds_factor = 1\n elif chunk == \"-\":\n adds_factor = -1\n elif chunk == \">\":\n # next factor will be target difficulty!\n adds_factor = 0\n elif chunk.isdigit():\n self.bonus += int(chunk) * adds_factor # anything + 0 remains unchanged\n if adds_factor == 0:\n self.target = 0\n break # difficulty always be the last chunk!\n else:\n m = pattern.match(chunk)\n\n # This part is dubious\n if m is not None:\n t_die_letter = m.group(2).upper()\n t_dice = 0\n t_keep = 0\n if m.group(1) is not None:\n t_dice = int(m.group(1))\n if m.group(3) is not None:\n t_keep = int(m.group(3))\n\n if \"D\" == t_die_letter:\n if adds_factor == 1:\n self.dice += t_dice\n self.keeps += t_dice\n elif adds_factor == -1:\n self.keeps -= t_dice\n elif \"B\" == t_die_letter :\n if adds_factor == 1:\n self.dice += t_dice\n # you cannot do -xB\n else: #well, that's a Keep\n self.dice += t_dice * adds_factor\n self.keeps += t_keep * adds_factor\n\n if self.keeps > self.dice:\n self.keeps = self.dice" ]
[ "0.577075", "0.5582318", "0.5535175", "0.54834586", "0.5468226", "0.53948593", "0.5361013", "0.53123224", "0.53054005", "0.5288631", "0.5288206", "0.5273744", "0.5239814", "0.5214098", "0.5131974", "0.51176083", "0.5099485", "0.509863", "0.5094825", "0.50480515", "0.50278896", "0.50216585", "0.5019713", "0.50191164", "0.50148374", "0.5014427", "0.50100327", "0.5003416", "0.49822876", "0.4978872" ]
0.75956017
0
Creates index for a dictionary index file is created in form of dictionary object and creates a file by name dictfile.index with contents in following format A=1 B=2000 .... (For eg. en_US.index)
def create_index(self, dictfile): self.dictionary_file = dictfile self.index_file = os.path.join(dictfile.split(".")[0] + ".index") self.fp = codecs.open(self.dictionary_file, "r", encoding="utf-8") self.op = codecs.open(self.index_file, "w", encoding="utf-8") # loop untill entire file is not finished while True: item = self.fp.readline() if not item: break # if the alphabet is currently not indexed then index it # with current value of byte offset else increase the offset # by the byte length of currently read word till you get # new alphaet which is not indexed if len(item) > 0 and not item[0] in self.dictionary.keys(): self.dictionary[item[0]] = self.offset self.offset = self.offset + len(item.encode("utf-8")) #print "Index for " + self.dictionary_file + " is created " for index in self.dictionary: value = self.dictionary.get(index, None) if not value: self.op.write(index + "=%d\n" % value) # Clean up self.fp.close() self.op.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_index(self, filename, tri):\n self.index = {'name': filename, \"bi\": {}}\n if tri:\n self.index[\"tri\"] = {}\n fichero = open(filename, 'r').read()\n fichero = fichero.replace(\";\",\".\")\n fichero = fichero.replace(\"\\n\\n\",\".\")\n fichero = fichero.replace(\",\",\".\")\n fichero = fichero.replace(\"?\",\".\")\n fichero = fichero.replace(\"!\",\".\")\n fichero = fichero.lower()\n\n for frase in fichero.split('.'):\n frase = self.r2.sub(\" \", frase)\n frase = \"$ \" + frase + \" $\"\n Monkey.index_sentence(self, frase, tri)\n\n #sort_index(self, self.index['bi'])\n if tri:\n sort_index(self, self.index['tri'])\n\n extension = filename.find('.')\n aux = filename[:extension] \n new_filename = aux + 'index'\n\n with open(new_filename, 'w') as fh:\n #print(self.index['bi'].items())\n for nombre, valor in self.index['bi'].items():\n fh.write(\"%s %s\\n\" %(nombre, valor))", "def load_index(self, dictfile):\n\n self.index_file = os.path.join(self.path,\n dictfile.split(\".\")[0] + \".index\")\n try:\n self.fp = codecs.open(self.index_file, \"r\",\n encoding=\"utf-8\", errors=\"ignore\")\n except IOError:\n self.create_index(dictfile)\n\n self.fp = codecs.open(self.index_file, \"r\", encoding=\"utf-8\")\n self.dictionary = {}\n while True:\n text = unicode(self.fp.readline())\n if text:\n line = text.split(\"=\")\n if len(line) == 2:\n index = line[0]\n value = line[1]\n self.dictionary[index] = value\n else:\n break\n\n self.fp.close()\n return self.dictionary", "def create_index():", "def create_index_dict(vcb_file):\n index_dict = {}\n vcb = open(vcb_file).readlines()\n for line in vcb:\n line = line.split()\n index_dict[int(line[0])] = line[1]\n return index_dict", "def createindexes():\n index = [{}, {}, {}, {}]\n readcorpus(index)\n buildindex4(index[2], index[3])\n writeindextofile(index)\n return index", "def build_single_file_index(cls, index_path, d):\n index = json.load(open(index_path))\n info_list = cls.list_from_index_path(index_path)\n\n sub_d = d\n for entry in info_list:\n if entry[0] not in sub_d:\n sub_d[entry[0]] = {}\n if entry[1] not in sub_d[entry[0]]:\n sub_d[entry[0]][entry[1]] = {}\n sub_d = sub_d[entry[0]][entry[1]]\n\n current_dir = os.path.dirname(index_path)\n rel_dirname = os.path.relpath(current_dir, paths.db_root)\n if 'files' in index:\n for name, file in list(index['files'].items()):\n sub_d[name] = os.path.join(rel_dirname, file)\n if 'info' in index:\n sub_d.update(index['info'])", "def load_index(index_file):\n index_dict = {}\n with open(index_file) as f:\n for line in f:\n title, path = line.strip().split()\n index_dict[title] = path\n return index_dict", "def __createIndexFile(self, dimensions):\n target = os.path.join(self.workingDir, self.get( 'index_filename'))\n self.info(\"Creating index file {}\".format(target))\n text = \"\"\n for i in range(0,dimensions):\n text+=\"1 \"\n\n util.createScript(target, text)\n return target", "def myhtable_create_index(files):\n res_buckets = htable(4011)\n for id, file in enumerate(files):\n if file[-4:] == '.txt':\n word_list = words(get_text(file))\n for word in word_list:\n value = htable_get(res_buckets, word)\n if value == None:\n htable_put(res_buckets, word, {id})\n else:\n value.add(id)\n htable_put(res_buckets, word, value)\n return res_buckets", "def _make_index(self, fname, sents, words):\n for w in words:\n # word index for this file only\n findex = []\n\n for ixS, s in enumerate(sents):\n # iterate over each word in the sentencep\n for ixT, token in enumerate(s):\n # could use regex for substring matching instead\n if w == token.lower():\n findex.append((ixS, ixT))\n # keep track of word use frequency\n self._freq[w] += 1\n\n # grow the main index \n self._index[w][fname]= findex", "def build_index():\n pass", "def build_index(in_dir, out_dict, out_postings):\n print('indexing...')\n\n maxInt = sys.maxsize\n\n while True:\n # decrease the maxInt value by factor 10 \n # as long as the OverflowError occurs.\n try:\n csv.field_size_limit(maxInt)\n break\n except OverflowError:\n maxInt = int(maxInt/10)\n\n #Dicitionary for saving the normalized weights for document vector\n lengths = dict()\n\n #Number of docs read from csv\n total_docs = 1\n max_docs = 1000\n\n #Data stored in csv read file line by line and save columns data\n with open(os.path.join(in_dir), 'r', encoding=\"utf8\") as data_csv:\n reader = csv.DictReader(data_csv)\n #each line corresponds to a document\n for doc in reader:\n\n #if(total_docs > max_docs):\n # break\n\n #If line is blank, just skip\n if doc is None:\n continue\n \n #save the different columns of the doc\n doc_id = int(doc[\"document_id\"])\n #Remove punctuation in title and content\n doc_title = re.sub(r\"[,;@#?!&$()%\\[\\]°~^_.+=\\\"><`|}{*':/]+ *\", \" \", doc[\"title\"])\n doc_content = re.sub(r\"[,;@#?!&$()%\\[\\]°~^_.+=\\\"><`|}{*':/]+ *\", \" \", doc[\"content\"])\n doc_date = doc[\"date_posted\"]\n doc_year = doc_date[0:4]\n doc_court = doc[\"court\"]\n\n #The dictionaryies are updated, postings lists are updated or new terms added\n update_terms_zones_dictionary(doc_id, doc_title, \".title\")\n update_terms_zones_dictionary(doc_id, doc_content, \".content\")\n update_date_field_dictionary(doc_id, doc_year)\n update_court_field_dictionary(doc_id, doc_court)\n\n total_docs += 1\n\n data_csv.close()\n\n #This section stores the Log TF using the word counts in the postings in the dictionary\n #It saves the Log TF in an auxiliary dictionary named lengths\n for word in dictionary:\n #Get postings list for the word\n postings_list = dictionary[word]\n\n for docID_termF in postings_list:\n #Get the vector for the doc, where the docId is docID_termF[0]\n #If there is no vector for this doc, then create a new dict\n #I am using dictionaries as the vector for the word only for the calculations\n doc_vector = lengths.get(docID_termF[0], dict())\n #I add the logarithmic term frequency to that document vector\n doc_vector[word] = 1 + math.log(docID_termF[1], 10)\n #Save that to its corresponding doc\n lengths[docID_termF[0]] = doc_vector\n\n #This section normalizes the Log TFs \n for doc_vector in lengths.values():\n #We store each of the values in a list and then use:\n #np.linalg.norm to do the normalization = sqrt(sum(values^2))\n weights = doc_vector.values()\n #We get the vectors magnitude\n magnitude = np.linalg.norm(np.array(list(weights)))\n for word in doc_vector.keys():\n #For every word entry in the vector \n #normalize by dividing the weight by the magnitude\n doc_vector[word] = doc_vector[word] / magnitude\n\n #This section replaces the word count in the tuple of the dictionary with the Normalized Log TF\n #It also sorts the postings list by doc ID\n for word in dictionary:\n #Get postings list for the word\n postings_list = dictionary[word]\n new_postings_list = list()\n for docID_termF in postings_list:\n docID_termF = ( docID_termF[0], lengths[docID_termF[0]][word] )\n new_postings_list.append(docID_termF)\n new_postings_list.sort()\n dictionary[word] = new_postings_list\n\n ''' \n with open('ugly_dictionary.txt', 'w') as fp:\n json.dump(dictionary, fp)\n '''\n #Determine the relevance of each doc by the court that it has in its court field\n #Save the relevant docs and their relevance\n relevant_courts_dict = { \"SG Court of Appeal\":2, \"SG Privy Council\":2, \"UK House of Lords\":2, \"UK Supreme Court\":2,\n \"High Court of Australia\":2, \"CA Supreme Court\":2, \"SG High Court\":1.5, \"Singapore International Commercial Court\":1.5,\n \"HK High Court\": 1.5, \"HK Court of First Instance\": 1.5, \"UK Crown Court\": 1.5, \"UK Court of Appeal\": 1.5, \"UK High Court\": 1.5, \n \"Federal Court of Australia\": 1.5, \"NSW Court of Appeal\": 1.5, \"NSW Court of Criminal Appeal\": 1.5, \"NSW Supreme Court\": 1.5}\n\n relevant_docs = dict()\n \n for court_name in relevant_courts_dict:\n court_postings_list = court_dictionary.get(court_name, -1)\n if(court_postings_list != -1):\n for docid in court_postings_list:\n #save a dictionary of docID and its relevance (2 or 1.5) according to its court\n relevant_docs[docid] = relevant_courts_dict[court_name]\n\n #This section traverse each word (key) in the dictionary, get its postings list and save it in a different file \n postings_list_file = open(out_postings, \"wb\") \n for word in dictionary:\n #Get postings list for the word\n postings_list = dictionary[word]\n #Get the document frequency\n document_frequency = len(postings_list)\n #Know the starting position for the pointer\n postings_list_position = postings_list_file.tell()\n # Writing to file \n pickle.dump(postings_list, postings_list_file)\n #Replace postings list with reference to the position\n dictionary[word] = (document_frequency, postings_list_position)\n for date in date_dictionary:\n #Get postings list for the date\n postings_list = date_dictionary[date]\n #Get the document frequency\n document_frequency = len(postings_list)\n #Know the starting position for the pointer\n postings_list_position = postings_list_file.tell()\n # Writing to file \n pickle.dump(postings_list, postings_list_file)\n #Replace postings list with reference to the position\n date_dictionary[date] = (document_frequency, postings_list_position)\n for court in court_dictionary:\n #Get postings list for the date\n postings_list = court_dictionary[court]\n #Get the document frequency\n document_frequency = len(postings_list)\n #Know the starting position for the pointer\n postings_list_position = postings_list_file.tell()\n # Writing to file \n pickle.dump(postings_list, postings_list_file)\n #Replace postings list with reference to the position\n court_dictionary[court] = (document_frequency, postings_list_position)\n #Close the postings lists file\n postings_list_file.close() \n\n #Now open the dictionary file and save the three dictionaries\n with open(out_dict, 'wb') as dictionary_file:\n pickle.dump(total_docs, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(dictionary, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(date_dictionary, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(court_dictionary, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(relevant_docs, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n \n '''\n The structure we have is:\n\n dictionary.txt: Has three dictionaries\n {word.zone : [doc_freq, pointer], word.zone: [doc_freq, pointer], ...}\n {date : [doc_freq, pointer], date: [doc_freq, pointer], ...}\n {court : [doc_freq, pointer], court: [doc_freq, pointer], ...}\n\n postings.txt: Has the postings for the three dictionaries\n For the dictionary postings:\n [[docID,termFrequency],[docID,termFrequency]]\n [[docID,termFrequency]] ...\n For the date_dictionary postings:\n [docId, docId, docId, docId]\n For the court_dictionary postings:\n [docId, docId, docId, docId]\n ...\n\n Both documents together would be:\n { word.zone: [doc_freq, [[docID,termFrequency], ... ]], \n word.zone: [doc_freq, [[docID,termFrequency].}, ...]] }\n { date: [doc_freq, [docID, docID, ... ]], date: [doc_freq, [docID, docID, ... ]] }\n { court: [doc_freq, [docID, docID, ... ]], date: [doc_freq, [docID, docID, ... ]] }\n\n lengths.txt\n [document: [word: weight, word: weight, ...], document: [word: weight, word: weight, ...]]\n Decided to make it like this to keep control of which weights correspond to which words\n Although for a document I will traverse all the weights to get the score\n If the word is not in the document vector [which in my case is a dictionary], then its weight is 0\n This way I am no using a sparse matrix\n\n '''", "def load_word_to_index(dict_word_to_index_file_name):\n with open(dict_word_to_index_file_name, 'r') as f:\n word_to_index = json.load(f)\n _LAST_INDEX = len(word_to_index) - 2 # Why - 2? Open issue?\n print(\"word_to_index dict restored from '{}'.\".format(dict_word_to_index_file_name))\n word_to_index = defaultdict(lambda: _LAST_INDEX, word_to_index)\n\n return word_to_index", "def create_index(self, vocabulary=[]) -> dict:\n try:\n out = {}\n for word in vocabulary:\n if word in out:\n out[word] += 1\n else: \n out[word] = 1\n return(out)\n except Exception as error:\n print(f\"Error: self.create_index([...]) -> {error}\")", "def index_writer_init(idx_dir=\"Wiki_index\"):\n try:\n assert type(idx_dir) is str\n except AssertionError:\n raise TypeError\n\n try:\n assert idx_dir != \"\"\n except AssertionError:\n raise ValueError\n\n # Creazione dello schema dei documenti da indicizzare\n schema: Schema = Schema(title=TEXT(stored=True),\n identifier=ID(stored=True, unique=True),\n content=TEXT(stored=True, analyzer=StemmingAnalyzer()))\n\n # Verifica dell'esistenza della cartella dell'indice\n if not path.exists(idx_dir):\n # In caso la cartella non esista viene creata\n mkdir(idx_dir)\n\n # Creazione dell'indice all'interno della cartella designata\n index = create_in(idx_dir, schema)\n\n # La funzione restituisce un oggetto in grado di inserire (scrivere) documenti all'interno dell'indice\n return index.writer()", "def create_idx(for_this_file, put_here):\n file_name = for_this_file.split('/')[-1]\n idx_dir = '/uufs/chpc.utah.edu/common/home/horel-group/archive/' + put_here\n if not os.path.exists(idx_dir):\n os.makedirs(idx_dir)\n idx_name = idx_dir + file_name + '.idx'\n os.system('wgrib2 ' + for_this_file + ' -t -var -lev -ftime > ' + idx_name)\n print \"created idx file:\", idx_name", "def writetofile(invertedindex, filename):\n file = open(filename + '.txt', 'w', encoding='utf-8')\n for word in invertedindex.keys():\n file.write(word)\n file.write(' : ')\n for docid in invertedindex[word][0]:\n file.write(str(docid) + ' ')\n file.write('\\n')", "def _add_to_index( env, meta_dict, file_str, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n doc = document(\n env[\"metadata\"][\"known_keys\"].keys(),\n meta_dict,\n env,\n )\n return adapter.add(doc, boosts=env[\"metadata\"][\"boosts\"])\n #logger.info(u\"Added to index [%s]\", file_str)", "def construct_dict(self):\n i = 0\n self.word2idx = dict()\n fi = open(self.config.word_vec_fi_glove, 'r')\n\n for line in fi:\n self.word2idx[line.split(\" \")[0]] = i\n i += 1\n\n self.vocab_size = i\n self.write_dict()\n fi.close()", "def create_index(input_path, output_path, category_string, generate_index_cardinality_list: bool):\n file = open(input_path, 'r', encoding=\"utf-8\")\n index = dict()\n doc_id = -1\n look_for_id = False\n line = file.readline()\n while line != '':\n if '<page>' in line:\n look_for_id = True\n if '<id>' in line and look_for_id:\n doc_id = re.sub('^.*<id>', '', re.sub('</id>.*$', '', line, flags=re.DOTALL), flags=re.DOTALL)\n look_for_id = False\n if f'[[{category_string}' in line and '<' not in line:\n categories = categories_from_line(line, category_string)\n for category in categories:\n if category not in index:\n index[category] = list()\n index[category].append(doc_id)\n line = file.readline()\n\n with open(output_path, 'w', encoding=\"utf-8\") as outFile:\n outFile.write(json.dumps(index, sort_keys=True, indent=' ', ensure_ascii=False))\n\n if generate_index_cardinality_list:\n index_list = list()\n for key, value in index.items():\n temp = [key, len(value)]\n index_list.append(temp)\n index_list.sort(key=lambda x: x[1], reverse=True)\n with open(re.sub('\\\\.json$', '.txt', output_path), 'w', encoding=\"utf-8\") as outFile:\n for category in index_list:\n outFile.write(\"%s: %d\\n\" % (category[0], category[1]))", "def buildindex4(invertedindex, index):\n sortedbycount = sorted(invertedindex.items(), key=lambda x: x[1][1])\n startindex = math.floor((2*len(sortedbycount))/100) + 1\n for word, count in sortedbycount[startindex:]:\n index[word] = count\n return", "def create_index(self, db_name):\n\t\tindex_func_path = self._get_index_func_filepath(db_name)\n\t\t\n\t\tif os.path.isfile(index_func_path):\n\t\t\t# create index request payload from predefined file\t\n\t\t\twith open(index_func_path, 'r') as content_file:\n\t\t\t\tpayload = content_file.read()\n\t\t\n\t\t\tprint (\"Create index using function in: {}\".format(index_func_path))\n\t\t\turl = \"https://{}/{}/_design/view\".format(\n\t\t\t\tself.cloudanthost, db_name)\n\t\t\tresponse = self.r.put(url, data=payload)\n\t\t\tassert response.status_code == 201", "def _index_document(index_list):\n if isinstance(index_list, abc.Mapping):\n raise TypeError(\"passing a dict to sort/create_index/hint is not \"\n \"allowed - use a list of tuples instead. did you \"\n \"mean %r?\" % list(index_list.items()))\n elif not isinstance(index_list, (list, tuple)):\n raise TypeError(\"must use a list of (key, direction) pairs, \"\n \"not: \" + repr(index_list))\n if not len(index_list):\n raise ValueError(\"key_or_list must not be the empty list\")\n\n index = SON()\n for (key, value) in index_list:\n if not isinstance(key, str):\n raise TypeError(\"first item in each key pair must be a string\")\n if not isinstance(value, (str, int, abc.Mapping)):\n raise TypeError(\"second item in each key pair must be 1, -1, \"\n \"'2d', 'geoHaystack', or another valid MongoDB \"\n \"index specifier.\")\n index[key] = value\n return index", "def save_byte_index(index, fp):\n encoded_index = dict()\n for key, offset in index.items():\n encoded_index[key.decode(\"utf8\")] = offset\n json.dump(encoded_index, fp)\n return fp", "def rebuild_index():\n print('Building indexes...')\n print(data_fldr)\n ndx = []\n for root, _, files in os.walk(data_fldr):\n for f in files:\n if f[-3:].upper() in ['CSV','TXT']:\n ndx.extend(get_index_terms(root + os.sep + f))\n with open(ndx_file, 'w') as fio:\n for i in ndx:\n fio.write(i + '\\n')", "def index(self, dictfile):\n\n print 'Start index.'\n for word in open(dictfile):\n word = word.strip()\n sig = hash(''.join(sorted(list(word.lower()))))\n # Calculate a \"signature\"\n\n try:\n self._anagrams[sig].append(word)\n except KeyError:\n self._anagrams[sig] = [word]\n\n print 'End index,found',len(self._anagrams),'anagrams.'", "def buildIndex(filename, currentTime, baseDir):\n pathToFolder = baseDir + 'Collections/IndriIndices/'\n if not os.path.exists(pathToFolder):\n os.makedirs(pathToFolder)\n INDRI_BUILD_INDEX = '/mnt/bi-strg3/v/zivvasilisky/ziv/env/indri/indri/bin/IndriBuildIndex'\n CORPUS_PATH = filename\n CORPUS_CLASS = 'trectext'\n MEMORY = '1G'\n INDEX = pathToFolder + currentTime\n STEMMER = 'krovetz'\n run_bash_command(INDRI_BUILD_INDEX + ' -corpus.path='+CORPUS_PATH + ' -corpus.class='+CORPUS_CLASS + ' -index='+INDEX + ' -memory='+MEMORY + ' -stemmer.name=' + STEMMER)\n return INDEX", "def build_inverted_index():\r\n # vacabulary list (with out common_words)\r\n file_read = read_file()\r\n vacabulary_list = []\r\n common_words = read_common_words()\r\n for key in file_read:\r\n for element in file_read[key]:\r\n if (element not in vacabulary_list) & (element not in common_words):\r\n vacabulary_list.append(element)\r\n\r\n # word list of each file\r\n content = remove_common_words(file_read, common_words) # content = stopping()\r\n\r\n # generate direction to save result\r\n inverted_index = {}\r\n for item in vacabulary_list:\r\n inverted_index[item] = {}\r\n\r\n for file_id in content.keys():\r\n frequency = Counter(\r\n content[file_id]) # the frequency of words in a file : {'slipstream': 5, 'lift': 4, 'wing': 3}\r\n for word in frequency.keys():\r\n inverted_index[word][file_id] = frequency[word]\r\n\r\n inverted_index = sorted(inverted_index.items(), key=lambda d: d[0], reverse=False)\r\n inverted_index = dict(inverted_index)\r\n return inverted_index", "def create_index(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def write_index_to_file(output_file, items): \n \n file = open(output_file, 'w')\n for item in items: \n str0 = str(item[0])\n str1 = ' '.join(str(x) for x in item[1])\n file.write( str0 + ' ' + str1 + '\\n') \n # file.write(item)\n print ('An inverted index has been writted in file')\n file.close()" ]
[ "0.7095544", "0.7029676", "0.7002316", "0.67770976", "0.67745614", "0.67199093", "0.66818196", "0.6662767", "0.6655858", "0.6651296", "0.6531438", "0.6508011", "0.6504394", "0.6454348", "0.6287616", "0.62688965", "0.6261858", "0.6243503", "0.6228945", "0.6199209", "0.61885613", "0.61562574", "0.61112195", "0.61018485", "0.60524553", "0.60472304", "0.6045986", "0.60421574", "0.6038604", "0.6037273" ]
0.8214013
0
This function reads the index file and loads the content into a dictionary object. If file doesn't exist this will create the index file and then reads it.
def load_index(self, dictfile): self.index_file = os.path.join(self.path, dictfile.split(".")[0] + ".index") try: self.fp = codecs.open(self.index_file, "r", encoding="utf-8", errors="ignore") except IOError: self.create_index(dictfile) self.fp = codecs.open(self.index_file, "r", encoding="utf-8") self.dictionary = {} while True: text = unicode(self.fp.readline()) if text: line = text.split("=") if len(line) == 2: index = line[0] value = line[1] self.dictionary[index] = value else: break self.fp.close() return self.dictionary
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_index(index_file):\n index_dict = {}\n with open(index_file) as f:\n for line in f:\n title, path = line.strip().split()\n index_dict[title] = path\n return index_dict", "def get_index(self):\n with open(self.index_path, \"r\") as f:\n return json.load(f)", "def _load_index(self):\n try:\n with open(self._index_path, \"rb\") as f:\n version = pickle.load(f)\n data = f.read()\n except EnvironmentError as e:\n # Index doesn't exist yet?\n if e.errno in (errno.ENOENT,):\n return {}\n raise\n if version != self._version:\n # This is another version. Avoid trying to unpickling the\n # rest of the stream, as that may fail.\n return {}\n stamp, overloads = pickle.loads(data)\n _cache_log(\"[cache] index loaded from %r\", self._index_path)\n if stamp != self._source_stamp:\n # Cache is not fresh. Stale data files will be eventually\n # overwritten, since they are numbered in incrementing order.\n return {}\n else:\n return overloads", "def __loadIndex( self ):\n\n assert self.mCreateMode == False, \"asked to read from database opened for writing\"\n\n if self.mMethod == \"uncompressed\":\n self.mDatabaseFile = open( self.mDbname, \"r\" )\n elif self.mMethod == \"dictzip\":\n import dictzip\n self.mDatabaseFile = dictzip.GzipFile( self.mNameDb)\n elif self.mMethod == \"lzo\":\n import lzo\n self.mDatabaseFile = Uncompressor( self.mNameDb, lzo.decompress )\n elif self.mMethod == \"gzip\":\n self.mDatabaseFile = Uncompressor( self.mNameDb, gzip_demangler )\n elif self.mMethod == \"zlib\":\n self.mDatabaseFile = Uncompressor( self.mNameDb, zlib.decompress )\n elif eslf.mMethod == \"bz2\":\n self.mDatabaseFile = bz2.BZ2File( self.mNameDb )\n elif self.mMethod == \"debug\":\n self.mDatabaseFile = Uncompressor( self.mDbname + \".debug\", lambda x: x ) \n\n self.mIndex = {}\n\n for line in open(self.mNameIndex, \"r\"):\n\n if line.startswith(\"#\"): continue\n data = line[:-1].split(\"\\t\")\n\n # index with random access points\n if len(data) > 4:\n (identifier, pos_id, block_size, lsequence) = bytes(data[0]), int(data[1]), int(data[2]), int(data[-1])\n points = map(int, data[3:-1])\n self.mIndex[int(identifier)] = (pos_id, block_size, lsequence, points)\n else:\n (identifier, pos_id, pos_seq, lsequence) = bytes(data[0]), int(data[1]), int(data[2]), int(data[-1])\n self.mIndex[int(identifier)] = (pos_id, pos_seq, lsequence) \n \n self.mIsLoaded = True", "def _load_index(self):\n try:\n with open(self._index_path, \"rb\") as f:\n version = pickle.load(f)\n data = f.read()\n except FileNotFoundError:\n # Index doesn't exist yet?\n return {}\n if version != self._version:\n # This is another version. Avoid trying to unpickling the\n # rest of the stream, as that may fail.\n return {}\n stamp, overloads = pickle.loads(data)\n _cache_log(\"[cache] index loaded from %r\", self._index_path)\n if stamp != self._source_stamp:\n # Cache is not fresh. Stale data files will be eventually\n # overwritten, since they are numbered in incrementing order.\n return {}\n else:\n return overloads", "def create_index(self, dictfile):\n\n self.dictionary_file = dictfile\n self.index_file = os.path.join(dictfile.split(\".\")[0] + \".index\")\n\n self.fp = codecs.open(self.dictionary_file, \"r\", encoding=\"utf-8\")\n self.op = codecs.open(self.index_file, \"w\", encoding=\"utf-8\")\n\n # loop untill entire file is not finished\n while True:\n item = self.fp.readline()\n if not item:\n break\n\n # if the alphabet is currently not indexed then index it\n # with current value of byte offset else increase the offset\n # by the byte length of currently read word till you get\n # new alphaet which is not indexed\n\n if len(item) > 0 and not item[0] in self.dictionary.keys():\n self.dictionary[item[0]] = self.offset\n self.offset = self.offset + len(item.encode(\"utf-8\"))\n\n #print \"Index for \" + self.dictionary_file + \" is created \"\n\n for index in self.dictionary:\n value = self.dictionary.get(index, None)\n if not value:\n self.op.write(index + \"=%d\\n\" % value)\n\n # Clean up\n self.fp.close()\n self.op.close()", "def load_index(self, index_path: str = \"annoy_index.bin\"):\n if index_path and os.path.exists(index_path):\n corpus_emb_json_path = index_path + \".json\"\n logger.info(f\"Loading index from: {index_path}, corpus embedding from: {corpus_emb_json_path}\")\n super().load_index(corpus_emb_json_path)\n if self.index is None:\n self.create_index()\n self.index.load(index_path)\n else:\n logger.warning(\"No index path given. Index not loaded.\")", "def load_index():\n\tprint \"Offline Wikipedia: Loading Index\\nThis may take a bit...\"\n\tindex = {}\n\tnum_entries = 0\n\tstart_time = time.time()\n\n\twith open(wikipedia_index_file) as index_file:\n\t\tcsvreader = csv.reader(index_file, delimiter=',')\n\n\t\tfor line in csvreader:\n\t\t\tindex[line[0].lower()] = join(wikipedia_base_directory, line[1])\n\t\t\tnum_entries += 1\n\n\tprint \"Loaded \" + str(num_entries) + \" index entries in \" + \\\n\t\t\tstr(time.time() - start_time) + \" seconds.\"\n\treturn index", "def load_index(self, index_path: str = \"hnswlib_index.bin\"):\n if index_path and os.path.exists(index_path):\n corpus_emb_json_path = index_path + \".json\"\n logger.info(f\"Loading index from: {index_path}, corpus embedding from: {corpus_emb_json_path}\")\n super().load_index(corpus_emb_json_path)\n if self.index is None:\n self.create_index()\n self.index.load_index(index_path)\n else:\n logger.warning(\"No index path given. Index not loaded.\")", "def test_read_index(self):\n\n indexfile = tempfile.mktemp()\n self.addCleanup(os.unlink, indexfile)\n\n TroveIndexBuilder(\"test/short.dat\", out=indexfile)\n\n index = TroveIndex()\n index.reload(indexfile)\n\n docs = sorted([doc for doc in index.documents])\n self.assertEquals(10, len(docs))\n\n self.assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], docs)\n\n doc = index.get_document(1)\n ref = {u\"id\":\"1\",u\"titleName\":u\"Hello\"}\n self.assertNotEquals(None, doc, \"Document not found for id 1\")\n self.assertDictEqual(ref, doc)\n\n doc = index.get_document(10)\n ref = {\"id\":\"10\",\"titleName\":\"Hello\"}\n self.assertNotEquals(None, doc)\n self.assertDictEqual(ref, doc)", "def load_index_config(index_dir):\n with open(os.path.join(index_dir, 'index.json')) as data_file:\n data = json.load(data_file)\n return data", "def __load_index(self):\n import os\n if not os.path.exists(self.__dir):\n filename=os.path.join(MY_STORE,self.__dir,INTERNAL_DB_FILE)\n else:\n filename=os.path.join(self.__dir,INTERNAL_DB_FILE)\n try:\n self.__handle = open(filename,self.__mode)\n except IOError, e:\n print 'Cannot create status file. Ensure you have permission to write'\n return False\n\n fcntl.flock(self.__handle.fileno(), fcntl.LOCK_EX)\n internal_db = dbm.open(filename, 'c', 0644 )\n self.__storage = shelve.Shelf(internal_db)\n return True", "def load_index_from_cache(self):\n cache = open(self.cache_path_index, 'r')\n json_index = cache.read()\n self.index = json.loads(json_index)", "def build_single_file_index(cls, index_path, d):\n index = json.load(open(index_path))\n info_list = cls.list_from_index_path(index_path)\n\n sub_d = d\n for entry in info_list:\n if entry[0] not in sub_d:\n sub_d[entry[0]] = {}\n if entry[1] not in sub_d[entry[0]]:\n sub_d[entry[0]][entry[1]] = {}\n sub_d = sub_d[entry[0]][entry[1]]\n\n current_dir = os.path.dirname(index_path)\n rel_dirname = os.path.relpath(current_dir, paths.db_root)\n if 'files' in index:\n for name, file in list(index['files'].items()):\n sub_d[name] = os.path.join(rel_dirname, file)\n if 'info' in index:\n sub_d.update(index['info'])", "def load_existing_index(self, path: str) -> bool:\n if path.endswith(\"/\"):\n path = path[:-1]\n try:\n with open(os.path.join(\n INDEX_DIR, path.replace(\"/\", \"_\") + \".pkl\"\n ), \"rb\") as f:\n self.file_index, self.modified_time = pickle.load(f)\n except FileNotFoundError:\n self.file_index, self.modified_time = [], 0\n return False\n return True", "def test_read_index_swift(self):\n\n indexfile = tempfile.mktemp()\n self.addCleanup(os.unlink, indexfile)\n\n TroveSwiftIndexBuilder(\"short.dat\", out=indexfile)\n\n index = TroveSwiftIndex()\n index.reload(indexfile)\n\n docs = sorted([doc for doc in index.documents])\n self.assertEquals(10, len(docs))\n\n self.assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], docs)\n\n doc = index.get_document(1)\n ref = {\"id\":\"1\",\"titleName\":\"Hello\"}\n self.assertDictEqual(ref, doc)\n\n doc = index.get_document(10)\n ref = {\"id\":\"10\",\"titleName\":\"Hello\"}\n self.assertNotEquals(None, doc)\n self.assertDictEqual(ref, doc)", "def load_word_to_index(dict_word_to_index_file_name):\n with open(dict_word_to_index_file_name, 'r') as f:\n word_to_index = json.load(f)\n _LAST_INDEX = len(word_to_index) - 2 # Why - 2? Open issue?\n print(\"word_to_index dict restored from '{}'.\".format(dict_word_to_index_file_name))\n word_to_index = defaultdict(lambda: _LAST_INDEX, word_to_index)\n\n return word_to_index", "def load_index(self, fn):\n # print('Load ', fn)\n # if fn[len(fn)-4:] == '.pkl':\n # fn = fn[0:len(fn)-4]\n fn = 'idx_bench'\n inverted_index = utils.load_obj(fn)\n return inverted_index", "def index_file(self, file_name):\n self.contents = []\n article_text = \"\"\n article_annots = [] # for annot-only index\n\n f = open(file_name, \"r\")\n for line in f:\n line = line.replace(\"#redirect\", \"\")\n # ------ Reaches the end tag for an article ---------\n if re.search(r'</doc>', line):\n # ignores null titles\n if wiki_uri is None:\n print \"\\tINFO: Null Wikipedia title!\"\n # ignores disambiguation pages\n elif (wiki_uri.endswith(\"(disambiguation)>\")) or \\\n ((len(article_text) < 200) and (\"may refer to:\" in article_text)):\n print \"\\tINFO: disambiguation page \" + wiki_uri + \" ignored!\"\n # ignores list pages\n elif (wiki_uri.startswith(\"<wikipedia:List_of\")) or (wiki_uri.startswith(\"<wikipedia:Table_of\")):\n print \"\\tINFO: List page \" + wiki_uri + \" ignored!\"\n # adds the document to the index\n else:\n self.__add_to_contents(Lucene.FIELDNAME_ID, wiki_uri, Lucene.FIELDTYPE_ID)\n if self.annot_only:\n self.__add_to_contents(Lucene.FIELDNAME_CONTENTS, article_annots, Lucene.FIELDTYPE_ID_TV)\n else:\n self.__add_to_contents(Lucene.FIELDNAME_CONTENTS, article_text, Lucene.FIELDTYPE_TEXT_TVP)\n self.lucene.add_document(self.contents)\n self.contents = []\n article_text = \"\"\n article_annots = []\n\n # ------ Process other lines of article ---------\n tag_iter = list(self.tagRE.finditer(line))\n # adds line to content if there is no annotation\n if len(tag_iter) == 0:\n article_text += line\n continue\n # A tag is detected in the line\n for t in tag_iter:\n tag = t.group(3)\n if tag == \"doc\":\n doc_title = self.titleRE.search(t.group(2))\n wiki_uri = WikipediaUtils.wiki_title_to_uri(doc_title.group(1)) if doc_title else None\n if tag == \"a\":\n article_text += t.group(1) + t.group(4) # resolves annotations and replace them with mention\n # extracts only annotations\n if self.annot_only:\n link_title = self.linkRE.search(t.group(2))\n link_uri = WikipediaUtils.wiki_title_to_uri(unquote(link_title.group(1))) if link_title else None\n if link_uri is not None:\n article_annots.append(link_uri)\n else:\n print \"\\nINFO: link to the annotation not found in \" + file_name\n last_span = tag_iter[-1].span()\n article_text += line[last_span[1]:]\n f.close()", "def load_index(self, fn):\n name = fn.split('.pkl')[0]\n return utils.load_obj(name)", "def load_word_index(path):\n word_index = open(path + '/word_index.pickle', 'rb')\n word_index = pickle.load(word_index)\n print('Word Index Pickle load successful\\n')\n return word_index", "def create_index_dict(vcb_file):\n index_dict = {}\n vcb = open(vcb_file).readlines()\n for line in vcb:\n line = line.split()\n index_dict[int(line[0])] = line[1]\n return index_dict", "def load_file(self, fn, index_mapping_dir: Optional[str] = None):\n logging.info(f\"Loading {fn}\")\n idx_fn = _index_fn(fn, index_mapping_dir)\n\n # create data map\n mdata = np.memmap(fn, dtype=np.uint8, mode=\"r\")\n\n if _index_file_exists(idx_fn):\n # load index file into memory map\n midx = np.load(idx_fn + \".npy\", allow_pickle=True, mmap_mode=\"r\")\n # test for header\n if len(midx) < self._header_lines:\n raise RuntimeError(f\"Missing header, expected {self._header_lines} header lines\")\n\n # load meta info\n idx_info_dict = pickle.load(open(idx_fn + \".info\", \"rb\"))\n # test for mismatch in expected newline_int\n if \"newline_int\" in idx_info_dict:\n newline_int = idx_info_dict[\"newline_int\"]\n if self._newline_int != newline_int:\n logging.warning(\n f\"Mismatch in newline_int, expected = {self._newline_int} but loaded {newline_int}\"\n )\n\n # test for version mismatch (useful to force recreation of index files)\n idx_version = idx_info_dict.get(\"version\", \"0.0\")\n if __idx_version__ != idx_version:\n raise RuntimeError(\n f\"Version mismatch: Please delete existing '.{__idx_suffix__}' files. Expected version = {__idx_version__}, but file version = {idx_version}. File path = {idx_fn}\"\n )\n else:\n raise ValueError(\n f\"Memory Map for {fn} is not found, missing one or more of files: {idx_fn}.{{.npy,.info}}\"\n )\n\n return (mdata, midx)", "def index_file(self, anns_file):\n print \"Indexing \" + anns_file + \"... \",\n\n with open(anns_file, 'rb') as tsvfile:\n reader = csv.reader(tsvfile, delimiter=\"\\t\", quoting=csv.QUOTE_NONE)\n file_dict = defaultdict(list)\n # Reads tsv lines\n for line in reader:\n doc_id, en = line[0], line[7]\n file_dict[doc_id].append(en)\n\n for doc_id, en_list in file_dict.iteritems():\n contents = self.__get_lucene_contents(doc_id, en_list)\n self.lucene.add_document(contents)\n\n print \"done\"", "def index():\n\n return dict()", "def read(ffindex_data, ffindex_db=None, encoding=None):\n\n if ffindex_db is None:\n if _is_string(ffindex_data):\n ffindex_db = ffindex_data + \".index\"\n else:\n raise Exception(\"When ffindex_data is passed as a file-like object, ffindex_db is required\")\n\n f_db = _to_file(ffindex_db, \"r\")\n f_data = _to_file(ffindex_data, \"r+b\")\n\n m_data = mmap.mmap(f_data.fileno(), 0)\n\n for l_db in f_db:\n filename, start, length = l_db.strip().split(\"\\t\")\n\n yield FFIndexContent(m_data, int(start), int(length) - 1, filename, encoding)\n\n m_data.close()\n\n if _is_string(ffindex_db):\n f_db.close()\n\n if _is_string(ffindex_data):\n f_data.close()", "def get_index_data(dir):\n db = IndexDb(dir)\n result = db.read_from_index()\n return result", "def _read_module_index(str_or_file):\n yaml_content = syaml.load(str_or_file)\n index = {}\n yaml_index = yaml_content[\"module_index\"]\n for dag_hash, module_properties in yaml_index.items():\n index[dag_hash] = ModuleIndexEntry(\n module_properties[\"path\"], module_properties[\"use_name\"]\n )\n return index", "def get_or_build_author_index(index_file=INDEX_FILE_AUTHORS):\n return (\n SqliteDict(index_file, flag=\"r\")\n if os.path.exists(INDEX_FILE_AUTHORS)\n else index_authors()\n )", "def read_from_index(self):\n self.__mode = self.READ_MODE\n if not self.__storage:\n self.__load_index()\n\n try:\n tmp=dict(self.__storage)\n except Exception,e:\n print e\n self.__storage = None\n return None\n \n self.__close_storage()\n return tmp" ]
[ "0.79177237", "0.6884474", "0.6872445", "0.68435687", "0.67888933", "0.6783612", "0.6745491", "0.673843", "0.66856533", "0.6683366", "0.66697305", "0.6482477", "0.64072615", "0.64012605", "0.6378176", "0.6347014", "0.6316539", "0.6304061", "0.6221651", "0.6221545", "0.61547756", "0.6073499", "0.6048676", "0.6037838", "0.6032884", "0.60174096", "0.6012794", "0.5988359", "0.5986348", "0.5976547" ]
0.7745487
1
Returns a normalized domain on rating.rating to select the records to include in count, avg, ... computation of current model.
def _rating_domain(self): return ['&', '&', ('res_model', '=', self._name), ('res_id', 'in', self.ids), ('consumed', '=', True), ('website_published', '=', True)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_average_rating(self):\n queryset = ArticleRating.objects.filter(article_id=self.get_object())\n return queryset.aggregate(Avg('rate')).get(\"rate__avg\")", "def average_ratings(self):\n return get_average_rate(\n model=Rating,\n article=self.pk\n )", "def dist_by_rating(self):\n return ratings_distribution", "def get_ratings(self):\n df = pd.read_csv(IoManager.CARD_RATINGS_FILE_PATH)\n df = IoManager.scale_ratings(df)\n df = IoManager.normalize_ratings_per_archetype(df)\n df = self.add_ratings_sum(df)\n # print(df[[\"name\", \"monogreen\", \"simic_ramp\", \"general\"]].tail(60))\n # print(df[[\"name\", \"general\"]].sort_values(ascending=False, by=\"general\").head(50))\n return df", "def all_ratings(self):\n return {\n 'average': self.average_rating(),\n 'total': self.proto.aggregateRating.ratingsCount,\n 'oneStar': self.proto.aggregateRating.oneStarRatings,\n 'twoStar': self.proto.aggregateRating.twoStarRatings,\n 'threeStar': self.proto.aggregateRating.threeStarRatings,\n 'fourStar': self.proto.aggregateRating.fourStarRatings,\n 'fiveStar': self.proto.aggregateRating.fiveStarRatings,\n }", "def aggregate_rating(self) -> object:\n return self._aggregate_rating", "def get_ratings():\n query = \"\"\"\n SELECT DISTINCT rating\n FROM film\n \"\"\"\n cursor.execute(query)\n result = cursor.fetchall()\n\n return pd.DataFrame(result, columns=['Rating'])", "def select_features(self):\r\n customer_vendor_ratings = self.customer_vendor_full[\r\n ['customer_id', 'vendor_id', 'order_rating']]\r\n customer_vendor_ratings = customer_vendor_ratings.groupby(['customer_id', 'vendor_id']) \\\r\n .mean().reset_index() # 69814 remained\r\n customer_vendor_ratings.rename(columns={'order_rating': 'mean_rating'}, inplace=True)\r\n return customer_vendor_ratings", "def dist_by_year(self):\n return ratings_by_year", "def get_queryset(self):\n return ArticleRating.objects.filter(article=self.get_object())", "def rating(self):\n average = self.review.all().aggregate(Avg('rating'))['rating__avg']\n if not average:\n return 0\n return average", "def all(self):\n ratings = []\n for i in range (1, self.pages()+1):\n ratings.extend(self.page(i))\n \n self._set_attrs_to_values({'ratings': ratings})\n return ratings", "def normalize_ratings_per_archetype(ratings):\n archetype_cols = [c for c in ratings.columns if c != \"name\"]\n n_cards = len(ratings[\"monored\"])\n for arch_col in archetype_cols:\n ratings[arch_col] = ratings[arch_col] / (ratings[arch_col].sum() / n_cards)\n return ratings", "def get_global_mean(self, ratings):\n total_ratings = []\n for user, movie, rating in ratings:\n total_ratings.append(rating)\n return sum(total_ratings) / len(total_ratings)", "def get_ratings(self):\n return Vote.objects.filter(content_type=self.get_content_type(), object_id=self.instance.pk, key=self.field.key)", "def predict_rating(self, movie):\n\n #\n # option 1: SQLAlchemy ORM\n #\n UserMovies = db.aliased(Rating)\n MovieUsers = db.aliased(Rating)\n\n query = (db.session.query(Rating.user_id, Rating.score, UserMovies.score, MovieUsers.score)\n .join(UserMovies, UserMovies.movie_id == Rating.movie_id)\n .join(MovieUsers, Rating.user_id == MovieUsers.user_id)\n .filter(UserMovies.user_id == self.user_id)\n .filter(MovieUsers.movie_id == movie.movie_id))\n\n #\n # option 2: raw SQL\n #\n # sql = \"\"\"\n # SELECT ratings.user_id, ratings.score, user_movies.score, movie_users.score\n # FROM ratings AS user_movies\n # JOIN ratings\n # ON (user_movies.movie_id = ratings.movie_id)\n # JOIN ratings AS movie_users\n # ON (ratings.user_id = movie_users.user_id)\n # WHERE user_movies.user_id = :user_id\n # AND movie_users.movie_id = :movie_id\n # \"\"\"\n #\n # query = db.session.execute(sql, dict(user_id=self.user_id, movie_id=movie.movie_id))\n #\n\n known_ratings = {}\n paired_ratings = defaultdict(list)\n\n for rating_user_id, rating_score, user_movie_score, movie_user_score in query:\n paired_ratings[rating_user_id].append((user_movie_score, rating_score))\n known_ratings[rating_user_id] = movie_user_score\n\n similarities = []\n\n for _id, score in known_ratings.items():\n similarity = correlation.pearson(paired_ratings[_id])\n if similarity > 0:\n similarities.append((similarity, score))\n\n if not similarities:\n return None\n\n numerator = sum([score * sim for sim, score in similarities])\n denominator = sum([sim for sim, score in similarities])\n\n return numerator / denominator", "def mrr(self):\n _test = self.drop_bad_ratings()\n merged = pd.merge(left=_test, right=self.predict, on=['user', 'item'], how='right')[\n ['user', 'item', 'rating_x', 'rating_y']]\n nott = np.vectorize(lambda x: not x)\n mrrs = []\n for user in merged.user.unique():\n frame = merged[merged.user == user].sort_values(by='rating_y', ascending=False)\n true_ratings = frame.rating_x.values\n positions = np.where(nott(np.isnan(true_ratings)))[0]\n if len(positions) > 0:\n mrrs.append(1 / (positions[0] + 1))\n else:\n mrrs.append(0)\n\n return sum(mrrs) / len(mrrs)", "def get_ratings(self):\n return self.ratings", "def get_ratings(self):\n return self.ratings", "def average_rating(self):\n return self.proto.aggregateRating.starRating", "def findRatings():\n if request.method == 'POST':\n connector = appEngine.connect()\n rating = int(request.form['rating'])\n joinTable = connector.execute(\"SELECT movie.movieName, actor.actorName, rating.rating FROM movie INNER JOIN rating ON movie.movieID=rating.movie_ID INNER JOIN movie_actor ON movie.movieID=movie_actor.movie_ID INNER JOIN actor ON movie_actor.actor_ID=actor.actorID WHERE rating.rating >= (?);\", (rating))\n result = {'data': [dict(zip(tuple(joinTable.keys()), i)) for i in joinTable.cursor]}\n return result\n return render_template('rating_search.html')", "def get_models_query():\n query = db.session.query(Products.model).distinct()\n return query", "def validate_rating(self):\n\n ok = (self.allteams['divname'] == 'JP Mixed (4/3)') & \\\n (self.allteams['season'] == 'Spring') & \\\n (self.allteams['type'] == 'Hat') & \\\n (self.allteams['year'] == 2011)\n sph2011 = self.allteams[ok]\n dfratings = []\n for i in tqdm(sph2011.index):\n team_id = sph2011.loc[i, 'teamid']\n dfratings.append(self.predict_team(str(team_id)))\n dfratings = pd.concat(dfratings)\n return dfratings", "def expose(self, rating):\n k = self.mu / self.sigma\n return rating.mu - k * rating.sigma", "def ratings(self):\n session = Session.object_session(self)\n return session.query(Rating).join(Section).filter(Section.professor == self).all()", "def user_ratings(user_id):\n return _fetch_records(f\"SELECT item_id, rating_type FROM ratings WHERE user_id = {user_id}\")", "def average_rating(self):\n reviews = self.gamereview_set.all()\n\n try:\n return mean([ review.rating for review in reviews ])\n\n except StatisticsError:\n return None", "def normalizeRatings(Y, R):\n m, n = Y.shape\n Ymean = np.zeros((m, 1))\n Ynorm = np.zeros(Y.shape)\n for i in range(m):\n idx = np.where(R[i] == 1)[0]\n Ymean[i, 0] = Y[i, idx].mean()\n Ynorm[i, idx] = Y[i, idx]-Ymean[i, 0]\n return Ynorm, Ymean", "def get_meanrates(self):\n return np.asarray([ n.meanrate for n in self.alln.values() ])", "def average_rating(self):\n ratings = AttractionRating.objects.filter(attraction=self)\n total_rating = 0\n for rating in ratings:\n total_rating += rating.rating\n\n # If there are no rating, then we set the average to 0\n # otherwise we calculate the average\n try:\n avg = total_rating / len(ratings)\n except ZeroDivisionError:\n avg = total_rating\n\n return avg" ]
[ "0.55893254", "0.55250335", "0.54229623", "0.53542584", "0.533851", "0.53040886", "0.5296353", "0.5279337", "0.5270756", "0.52280354", "0.5141364", "0.5106121", "0.5101962", "0.5076106", "0.5063996", "0.5055503", "0.50505793", "0.50104856", "0.50104856", "0.4995368", "0.49942774", "0.4900818", "0.48769522", "0.48697034", "0.486877", "0.48640272", "0.48594335", "0.48473522", "0.4830442", "0.48187816" ]
0.6925912
0
Creates an empty list and then appends a random number to the list 'count' number of times. A CPUheavy operation!
def list_append(count, id, out_list): for i in range(count): out_list.append(random.random())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def newList(self):\n lst = []\n count = 0\n while count < 52:\n lst.append(randint(1, 1500))\n count += 1\n return lst", "def create_list(self):\n\n\t\trandom_list = random.sample(range(0, 500), 10)\n\n\t\treturn random_list", "def populate_empty_list():\n\n from random import randint, seed\n seed(56)\n l = []\n for i in range(100):\n l.append(randint(0, 100))\n print(l[34:56])", "def sample(self, count: int = 1) -> 'List':\n if count < 0:\n raise ValueError('The number of random items should be positive')\n\n return List(sample(self, count))", "def generator(self, random, args):\r\n if self.duplicates:\r\n max_count = [self.capacity // item[0] for item in self.items]\r\n return [random.randint(0, m) for m in max_count]\r\n else:\r\n return [random.choice([0, 1]) for _ in range(len(self.items))]", "def rand_list(n, limit):\n g = []\n while n > 0:\n g.append(random.randrange(limit))\n n -= 1\n return g", "def generate_list(size):\n items = [randint(0, MAX_NUM) for i in range(size)]\n return items", "def getRandomList(n):\n lyst = list()\n for count in range (n):\n lyst.append(random.randint(1, n))\n return lyst", "def random_num():\n my_list = [i for i in range(10)]\n num_list = random.sample(my_list, 4)\n while num_list[0] == 0:\n num_list = random.sample(my_list, 4)\n \n return num_list", "def generate(count):\n return unpack_random_animals(generate_animals_randomly(count))", "def generate_list(length: int) -> list:\n\n return [randint(0, length + 1) for _ in range(length)]", "def generate(count):\n lst = []\n with open('data.txt', 'w+') as f:\n for i in range(0, count):\n st = str(random.random())\n f.write(st+\"\\n\")\n lst.append(st)\n return lst", "def generatoze(b):\r\n l = []\r\n for i in range(b):\r\n k = random.randint(0, 100)\r\n l.append(k)\r\n return l", "def random_ints(count=20, min=1, max=50):\n import random\n return [random.randint(min, max) for _ in range(count)]", "def list_gen(value):\n\n sample_list = random.sample(xrange(1, (value + 1)), value)\n return sample_list", "def generate_numbers():\n\n return random.sample(range(100), 10)", "def replicate(self, count):\n return map(lambda x: x.clone(),[self]*count)", "def get_rand_list(alist, th=.5):\n nlist = []\n counter = math.ceil( len(alist)*th )\n \n while(counter):\n num = nprand.randint(0,len(alist))\n assert num < len(alist)\n nlist.append( alist[num] )\n alist.pop(num)\n # del alist[num]\n counter = counter - 1\n \n return [alist, nlist]", "def random(self, n=1):\n # self.num_generated += n", "def repeat(self, count):\n x = HSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x", "def get_me_random_list(n):\n a_list = list(range(n))\n random.shuffle(a_list)\n return(a_list)", "def generatesack(nmbrofitems):\n while nmbrofitems != 0:\n newitem = Sackitem(random.randint(1,10),random.randint(1,20))\n itemlist.append(newitem)\n nmbrofitems -= 1\n return itemlist", "def repeat(self, count):\n x = _OSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x", "def init_list(no_elements):\n\ti = 0\n\tnumbers\t= []\n\twhile i < no_elements:\n\t\tnumbers.append(i)\n\n\t\ti += 1\n\n\t# return initialized array\n\treturn numbers", "def generate_random_list(self, n):\n return [self.generate_random_payload((int, str, float, bool, list, dict)) for i in range(n)]", "def get_unsorted_list(size,MaxN=1000,MinN=0):\n return [random.randint(MinN,MaxN) for i in xrange(size)]", "def create_shared_noise(count):\n seed = 123\n noise = np.random.RandomState(seed).randn(count).astype(np.float32)\n return noise", "def generateRandomList(minval, maxval, size):\n return [random.randint(minval, maxval) for _ in range(size)]", "def create_wild_lists(amount,length):\r\n box = []\r\n\r\n k = 0\r\n while k < amount:\r\n sublist = []\r\n j = 0\r\n while j < length:\r\n num = random.randint(1, 100)\r\n sublist.append(num)\r\n j += 1\r\n box.append(sublist)\r\n k += 1\r\n\r\n if amount == 1:\r\n return sublist\r\n\r\n return box", "def sample(self, count):\n batch = deepcopy(random.sample(self.buffer, count))\n batch = [np.array(arr) for arr in zip(*batch)]\n\n return batch" ]
[ "0.7182441", "0.70022523", "0.6790341", "0.6682189", "0.6658717", "0.6651417", "0.6616092", "0.65375924", "0.64722615", "0.6330074", "0.6265442", "0.62584394", "0.6244736", "0.6236094", "0.6146634", "0.61383426", "0.6136133", "0.61209166", "0.6118684", "0.61057687", "0.6022139", "0.5993405", "0.59424907", "0.59208447", "0.584946", "0.58346075", "0.5833304", "0.57923144", "0.57808924", "0.5769924" ]
0.7692458
0
Get the output of conv layer.
def conv_output(model, layer_name, img): # this is the placeholder for the input images input_img = model.input try: # this is the placeholder for the conv output out_conv = model.get_layer(layer_name).output except: raise Exception('Not layer named {}!'.format(layer_name)) # get the intermediate layer model intermediate_layer_model = Model(inputs=input_img, outputs=out_conv) # get the output of intermediate layer model intermediate_output = intermediate_layer_model.predict(img) return intermediate_output[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_output(self, **kwargs):\n with tf.variable_scope(self.layer_scope):\n return self.out", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = self.a(conv2d_transpose2d(incoming, W=self.W, output_shape=self.output_shape,\n strides=self.strides, padding=self.padding,\n data_format=self.data_format) + self.b)\n return self.out", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n # Perform convolution\n conv = conv2d(incoming, self.W, strides=self.strides, padding=self.padding,\n dilation_rate=self.dilation_rate)\n \n # Add bias\n if self.b is not None:\n conv += self.b\n \n # Apply activation function\n self.out = self.a(conv)\n \n return self.out", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = incoming[..., 0::3] * 0.299 + incoming[..., 1::3] * 0.587 + incoming[..., 2::3] * 0.114\n return self.out", "def layerResult(self,layer,X,pos):\n if(layer[\"l_type\"]==\"conv\"):\n w=kernels[layer[\"kernel\"]]\n hparam=layer[\"hparams\"]\n return vecConv(X[0,:,:,:],w[:,:,:,pos[0]:pos[1]],hparam)\n else:\n hparam=layer[\"hparams\"]\n mode=layer[\"l_type\"]\n #batch size of 1\n return Pooling(X[0,:,:,pos[0]:pos[1]],hparam,mode)", "def _get_conv_out(self, shape) -> int:\n conv_out = self.conv(torch.zeros(1, *shape))\n return int(np.prod(conv_out.size()))", "def _get_conv_out(self, shape) -> int:\n conv_out = self.conv(torch.zeros(1, *shape))\n return int(np.prod(conv_out.size()))", "def _get_conv_out(self, shape) -> int:\n conv_out = self.conv(torch.zeros(1, *shape))\n return int(np.prod(conv_out.size()))", "def getOutput(self):\r\n return self._output", "def GetOutput(self):\r\n return self._reader.GetOutput()", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = incoming * self.factor\n return self.out", "def get_output(self):\n return self._output", "def get_output(self):\n return self.output", "def get_output(self):\n return self.output", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = tf.reshape(incoming, self.shape)\n \n return self.out", "def get_output(self, **kwargs):\n return self.out", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = self.a(incoming)\n \n return self.out", "def get_penultimate_layer(self):\n return self.model.outputs[1]", "def get_output(self):\r\n return self._api.get_output()", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n \n if self not in prev_layers:\n prev_layers += [self]\n self.out = tf.tile(self.incoming, multiples=self.multiples)\n \n return self.out", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = avgpool2D(incoming, ksize=self.ksize, strides=self.strides, padding=self.padding,\n data_format=self.data_format)\n return self.out", "def get_embedding_output(self):\n return self.embedding_output", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n if (len(incoming.shape) > 2 and self.flatten_input) or (len(incoming.shape) > 3):\n # Flatten all but first dimension (e.g. flat seq_pos and features)\n X = tf.reshape(incoming, self.incoming_shape)\n else:\n X = incoming\n net = dot_product(X, self.W)\n if self.b is not None:\n net += self.b\n self.out = self.a(net)\n \n return self.out", "def get_layer_output(keras_model, layer_name, input_data, batch_size=32):\n\n intermediate_layer_model = Model(inputs=keras_model.get_input_at(0),\n outputs=keras_model.get_layer(name=layer_name).get_output_at(0))\n intermediate_output = intermediate_layer_model.predict(input_data, batch_size=batch_size)\n\n return intermediate_output", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n \n if self not in prev_layers:\n prev_layers += [self]\n incomings = [incoming(prev_layers=prev_layers, **kwargs) for incoming in self.incomings]\n with tf.variable_scope(self.layer_scope):\n self.out = self.a(tf.add_n(incomings) / len(incomings))\n \n return self.out", "def kernel_output(self):\n\t\treturn self.kernel_shape_param('O')", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = tf.stop_gradient(incoming)\n return self.out", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n if self.incoming_shape == self.scale_size:\n self.out = incoming\n else:\n self.out = resize2d(incoming, size=self.scale_size, method=self.method,\n align_corners=self.align_corners)\n if self.method_name == 'AREA':\n self.out = tf.stop_gradient(self.out)\n \n return self.out", "def pixelcnn_2d_output(input):\n if not isinstance(input, PixelCNN2DOutput):\n raise TypeError('`input` is not an instance of `PixelCNN2DOutput`: '\n 'got {!r}'.format(input))\n return input.horizontal", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = maxpool2D(incoming, ksize=self.ksize, strides=self.strides, padding=self.padding,\n data_format=self.data_format)\n return self.out" ]
[ "0.74672776", "0.6889656", "0.67878616", "0.6749953", "0.67351806", "0.6727751", "0.6727751", "0.6727751", "0.6722555", "0.6673674", "0.66717416", "0.65963495", "0.6580103", "0.6580103", "0.65754825", "0.65445495", "0.64597344", "0.6435824", "0.63882506", "0.6363812", "0.63456935", "0.6342576", "0.6335147", "0.63145834", "0.63058186", "0.6281657", "0.6249213", "0.6247857", "0.62346923", "0.623216" ]
0.78071994
0