query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Build a Query object from a set of facets, then call build() on it. | def from_facets(*args, **kwargs):
facets = Facets(self._default_library, *args, **kwargs)
filter = Filter(facets=facets)
qu = MockQuery("query string", filter=filter)
built = qu.build(search)
# Return the rest to be verified in a test-specific way.
return built | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):\n for field, options in applicable_filters[\"field_facets\"].items():\n queryset = queryset.facet(field, **options)\n\n for field, options in applicable_filters[\"date_facets\"].items():\n queryset = queryset.date_facet(field, **options)\n\n for field, options in applicable_filters[\"query_facets\"].items():\n queryset = queryset.query_facet(field, **options)\n\n return queryset",
"def _build_query(self, types=(), paths=(), depth=None, query=None, filterPermissions=True, globFilters=None):\n available_indexes = self.model_catalog.searcher.get_indexes()\n not_indexed_user_filters = {} # Filters that use not indexed fields\n\n user_filters_query = None\n types_query = None\n paths_query = None\n permissions_query = None\n\n partial_queries = []\n\n if query:\n \"\"\"\n # if query is a dict, we convert it to AdvancedQuery\n # @TODO We should make the default query something other than AdvancedQuery\n subqueries = []\n if isinstance(query, dict):\n for attr, value in query.iteritems():\n if isinstance(value, str) and '*' in value:\n subqueries.append(MatchGlob(attr, value))\n else:\n subqueries.append(Eq(attr, value))\n query = And(*subqueries)\n partial_queries.append(query)\n \"\"\"\n partial_queries.append(self._parse_user_query(query))\n\n # Build query from filters passed by user\n if globFilters:\n for key, value in globFilters.iteritems():\n if key in available_indexes:\n if user_filters_query:\n user_filters_query = And(query, MatchRegexp(key, '*%s*' % value))\n else:\n user_filters_query = MatchRegexp(key, '*%s*' % value)\n else:\n not_indexed_user_filters[key] = value\n\n if user_filters_query:\n partial_queries.append(user_filters_query)\n\n # Build the objectImplements query\n if not isinstance(types, (tuple, list)):\n types = (types,)\n types_query_list = [ Eq('objectImplements', dottedname(t)) for t in types ]\n if types_query_list:\n if len(types_query_list) > 1:\n types_query = Or(*types_query_list)\n else:\n types_query = types_query_list[0]\n\n partial_queries.append(types_query)\n\n # Build query for paths\n if paths is not False: # When paths is False we dont add any path condition\n if not paths:\n paths = ('/'.join(self.context.getPhysicalPath()) + '*', )\n elif isinstance(paths, basestring):\n paths = (paths,)\n\n \"\"\" OLD CODE. Why this instead of In? What do we need depth for?\n q = {'query':paths}\n if depth is not None:\n q['depth'] = depth\n paths_query = Generic('path', q)\n \"\"\"\n paths_query = In('path', paths)\n partial_queries.append(paths_query)\n\n # filter based on permissions\n if filterPermissions and allowedRolesAndGroups(self.context):\n permissions_query = In('allowedRolesAndUsers', allowedRolesAndGroups(self.context))\n partial_queries.append(permissions_query)\n\n # Put together all queries\n search_query = And(*partial_queries)\n return (search_query, not_indexed_user_filters)",
"def make_complex_query_set(self):\n\n query = self.request.GET.get(\"q\")\n program_id = self.request.META.get('HTTP_X_SVMS_PROGRAM_ID')\n category = self.request.GET.get(\"category\")\n title = self.request.GET.get(\"title\")\n level = self.request.GET.get(\"level\")\n description = self.request.GET.get(\"description\")\n status = self.request.GET.get(\"status\")\n job_tag = self.request.GET.get(\"job_tag\")\n\n q_object = Q()\n\n if query:\n q_object.add((\n Q(program_id=query) |\n Q(category=query) |\n Q(title__icontains=query) |\n #Q(category__category_name__icontains=query) |\n Q(description__icontains=query) |\n Q(job_tag__tag__in=str(query).split(\",\"))\n ), Q.OR)\n\n if query.isnumeric():\n q_object.add(\n Q(level__icontains=int(query)), Q.OR)\n\n q_object.add(Q(status=strtobool(query)), Q.OR) if query in [\n \"true\", \"True\", \"False\", \"false\"] else None\n\n else:\n if program_id:\n q_object.add(\n Q(program_id=program_id),\n Q.AND)\n\n if category:\n q_object.add(\n Q(category=category),\n Q.AND)\n if title:\n q_object.add(\n Q(title__icontains=title),\n Q.AND)\n\n if description:\n q_object.add(\n Q(description__icontains=description), Q.AND)\n\n if job_tag:\n q_object.add(\n Q(job_tag__tag__in=str(job_tag).split(\",\")),\n Q.AND)\n\n if level:\n if level.isnumeric():\n q_object.add(\n Q(level__icontains=int(level)),\n Q.AND)\n else:\n raise Exception(\n ErrorMessage.WRONG_FIELD_TYPE.value.format(\"level\",\n \"numeric\"))\n\n q_object.add(Q(status=strtobool(status)), Q.AND) if status in [\n \"true\", \"True\", \"False\", \"false\"] else None\n\n return q_object",
"def make_complex_query_set(self):\n\n query = self.request.GET.get(\"q\")\n category = self.request.GET.get(\"category\")\n title = self.request.GET.get(\"title\")\n\n q_object = Q()\n\n if query:\n q_object.add((\n Q(category__category_name__icontains=query) |\n Q(title__icontains=query) \n ), Q.OR)\n\n else:\n if category:\n q_object.add(\n Q(category=category),\n Q.AND)\n if title:\n q_object.add(\n Q(title__icontains=title),\n Q.AND)\n\n return q_object",
"def make_query(recid='', endpoint='_search', theq='', terms=None, facets=None, should_terms=None, consistent_order=True, **kwargs):\n q = deepcopy(theq)\n if recid and not recid.endswith('/'):\n recid += '/'\n if isinstance(q, dict):\n query = q\n if 'bool' not in query['query']:\n boolean = {'bool': {'must': []}}\n boolean['bool']['must'].append(query['query'])\n query['query'] = boolean\n if 'must' not in query['query']['bool']:\n query['query']['bool']['must'] = []\n elif q:\n query = {\n 'query': {\n 'bool': {\n 'must': [\n {'query_string': {'query': q}}\n ]\n }\n }\n }\n else:\n query = {\n 'query': {\n 'bool': {\n 'must': [\n {'match_all': {}}\n ]\n }\n }\n }\n\n if facets:\n if 'facets' not in query:\n query['facets'] = {}\n for k, v in facets.items():\n query['facets'][k] = {\"terms\": v}\n\n if terms:\n boolean = {'must': []}\n for term in terms:\n if not isinstance(terms[term], list):\n terms[term] = [terms[term]]\n for val in terms[term]:\n obj = {'term': {}}\n obj['term'][term] = val\n boolean['must'].append(obj)\n if q and not isinstance(q, dict):\n boolean['must'].append({'query_string': {'query': q}})\n elif q and 'query' in q:\n boolean['must'].append(query['query'])\n query['query'] = {'bool': boolean}\n\n # FIXME: this may only work if a term is also supplied above - code is a bit tricky to read\n if should_terms is not None and len(should_terms) > 0:\n for s in should_terms:\n if not isinstance(should_terms[s], list):\n should_terms[s] = [should_terms[s]]\n query[\"query\"][\"bool\"][\"must\"].append({\"terms\": {s: should_terms[s]}})\n\n sort_specified = False\n for k, v in kwargs.items():\n if k == '_from':\n query['from'] = v\n elif k == 'sort':\n sort_specified = True\n query['sort'] = v\n else:\n query[k] = v\n if \"sort\" in query:\n sort_specified = True\n\n if not sort_specified and consistent_order:\n query['sort'] = [{\"id\": {\"order\": \"asc\"}}]\n\n # print json.dumps(query)\n return query",
"def build_query_structure(self):\n query_list = list()\n filter_list = list()\n for key, val in self.q_dict.items():\n if key in self.es_query_keys:\n query_list.append(\n {\"match\": {\".\".join(key.split(\"_\")): val[0]}})\n elif key in self.es_date_keys:\n filter_list.append(\n {\"range\": {\".\".join(key.split(\"_\")): val}})\n elif \":\" in val[0]:\n #for handling queries like dd_dct=gte:1\n range_val = val[0].split(\":\")\n filter_list.append({\"range\": {\".\".join(key.split(\"_\")): {\n range_val[0]: int(range_val[1])}}})\n else:\n filter_list.append(\n {\"terms\": {\".\".join(key.split(\"_\")): val}})\n return query_list, filter_list",
"def make_complex_query_set(self):\n\n query = self.request.GET.get(\"q\")\n category_name = self.request.GET.get(\"category_name\")\n o_net_soc_code = self.request.GET.get(\"o_net_soc_code\")\n description = self.request.GET.get(\"description\")\n job_title = self.request.GET.get(\"job_title\")\n level = self.request.GET.get(\"level\", '')\n\n q_object = Q()\n\n if query:\n q_object.add((\n Q(category__category_name__icontains=query) |\n Q(category__o_net_soc_code__icontains=query) |\n Q(category__description__icontains=query) |\n Q(category__job_title__description__icontains=query) |\n Q(category__job_title__title__icontains=query)\n ), Q.OR)\n\n if query.isnumeric():\n q_object.add(\n Q(category__job_title__level__icontains=int(query)), Q.OR)\n\n else:\n if category_name:\n q_object.add(\n Q(category__category_name__icontains=category_name),\n Q.AND)\n\n if o_net_soc_code:\n q_object.add(\n Q(category__o_net_soc_code__icontains=o_net_soc_code),\n Q.AND)\n\n if description:\n q_object.add((\n Q(category__description__icontains=description) |\n Q(\n category__job_title__description__icontains=description)\n ), Q.AND)\n\n if job_title:\n q_object.add(\n Q(category__job_title__title__icontains=job_title),\n Q.AND)\n\n if level:\n if level.isnumeric():\n q_object.add(\n Q(category__job_title__level__icontains=int(level)),\n Q.AND)\n else:\n raise Exception(\n ErrorMessage.WRONG_FIELD_TYPE.value.format(\"level\",\n \"numeric\"))\n\n return q_object",
"def set_choices_from_facets(self, facets):\n # borrowed from ppa-django;\n # populate facet field choices from current facets\n for key, facet_dict in facets.items():\n # restructure dict to set values of each key to tuples of (label, count)\n if key == \"type\":\n # for doctype, label should be translated, so use doctype object\n facet_dict = {\n label: (\n DocumentType.objects_by_label.get(label, _(\"Unknown type\")),\n count,\n )\n for (label, count) in facet_dict.items()\n }\n else:\n # for other formfields, label == facet name\n facet_dict = {\n label: (label, count) for (label, count) in facet_dict.items()\n }\n # use field from facet fields map or else field name as is\n formfield = self.solr_facet_fields.get(key, key)\n # for each facet, set the corresponding choice field\n if formfield in self.fields:\n self.fields[formfield].populate_from_facets(facet_dict)",
"def facets(self, facets):\n\n self._facets = facets",
"async def query(self, app_id, namespace, index_name, query, projection_fields,\n sort_expressions, limit, offset, cursor, keys_only,\n auto_discover_facet_count, facet_requests, facet_refinements,\n facet_auto_detect_limit):\n index_schema = await self._get_schema_info(app_id, namespace, index_name)\n # Convert Search API query to Solr query with a list of fields to search.\n query_options = query_converter.prepare_solr_query(\n query, index_schema.fields, index_schema.grouped_fields\n )\n # Process GAE projection fields\n solr_projection_fields = self._convert_projection(\n keys_only, projection_fields, index_schema\n )\n # Process GAE sort expressions\n solr_sort_fields = self._convert_sort_expressions(\n sort_expressions, index_schema\n )\n # Process GAE facet-related parameters\n refinement_filter = None\n if facet_refinements:\n # Determine if we need to filter by refinement.\n refinement_filter = facet_converter.generate_refinement_filter(\n index_schema.grouped_facet_indexes, facet_refinements\n )\n facet_items, stats_items = await self._convert_facet_args(\n auto_discover_facet_count, facet_auto_detect_limit, facet_requests,\n index_schema, query_options, refinement_filter\n )\n stats_fields = [stats_line for solr_field, stats_line in stats_items]\n\n # DO ACTUAL QUERY:\n solr_result = await self.solr.query_documents(\n collection=index_schema.collection,\n query=query_options.query_string, offset=offset, limit=limit,\n cursor=cursor, fields=solr_projection_fields, sort=solr_sort_fields,\n def_type=query_options.def_type, query_fields=query_options.query_fields,\n facet_dict=dict(facet_items) if facet_items else None,\n stats_fields=stats_fields or None, filter_=refinement_filter\n )\n\n # Convert Solr results to unified models\n docs = [_from_solr_document(solr_doc)\n for solr_doc in solr_result.documents]\n # Read stats results\n stats_results = []\n for solr_field, stats_line in stats_items:\n stats_info = solr_result.stats_results[solr_field.solr_name]\n stats_results.append((solr_field.gae_name, stats_info))\n # Convert facet results from Solr facets and stats\n facet_results = facet_converter.convert_facet_results(\n solr_result.facet_results, stats_results\n )\n result = SearchResult(\n num_found=solr_result.num_found, scored_documents=docs,\n cursor=cursor, facet_results=facet_results\n )\n return result",
"def buildReport(cls, queryList):\n boxList = list()\n for dslString,filterList in queryList:\n data = cls.__dataRequest(dslString[0])\n if data != '{}':\n for filter in filterList:\n try:\n if filter:\n filterObj = filter()\n filterObj.loadData(data)\n boxList.extend(filterObj.createBoxList())\n except Exception as e:\n devLogger.error(\"Could not create Filter object: \" + str(e))\n return boxList",
"def build_filters(self, view, filters=None):\n query_builder = self.get_query_builder(backend=self, view=view)\n return query_builder.build_query(**(filters if filters else {}))",
"def build_query(self, query, templates, registries):\n # build query builder\n query_builder = self.query_builder(query, self.sub_document_root)\n\n if type(templates) is str:\n templates = json.loads(templates)\n\n if type(registries) is str:\n registries = json.loads(registries)\n\n # if registries, check if activated\n list_activated_registry = list(\n oai_registry_api.get_all_activated_registry().values_list(\n \"id\", flat=True\n )\n )\n if len(registries) > 0:\n activated_registries = [\n activated_registy_id\n for activated_registy_id in registries\n if activated_registy_id in list_activated_registry\n ]\n else:\n activated_registries = list_activated_registry\n\n if len(templates) > 0:\n # get list of template ids\n list_template_ids = [template[\"id\"] for template in templates]\n # get all metadata formats used by the registries\n list_metadata_format = (\n oai_harvester_metadata_format_api.get_all_by_list_registry_ids(\n activated_registries\n )\n )\n # Filter metadata formats that use the given templates\n list_metadata_formats_id = [\n str(x.id)\n for x in list_metadata_format\n if x.template is not None\n and str(x.template.id) in list_template_ids\n ]\n query_builder.add_list_metadata_formats_criteria(\n list_metadata_formats_id\n )\n else:\n # Only activated registries\n query_builder.add_list_registries_criteria(activated_registries)\n\n # do not include deleted records\n query_builder.add_not_deleted_criteria()\n # create a raw query\n return query_builder.get_raw_query()",
"def facets(self, *args, **kwargs) -> Any:\n pass",
"def search_disjunctive_faceting(self, query, disjunctive_facets, params = {}, refinements = {}):\n if not(isinstance(disjunctive_facets, str)) and not(isinstance(disjunctive_facets, list)):\n raise AlgoliaException(\"Argument \\\"disjunctive_facets\\\" must be a String or an Array\")\n if not(isinstance(refinements, dict)):\n raise AlgoliaException(\"Argument \\\"refinements\\\" must be a Hash of Arrays\")\n\n if isinstance(disjunctive_facets, str):\n disjunctive_facets = disjunctive_facets.split(',')\n\n disjunctive_refinements = {}\n for key in refinements.keys():\n if (key in disjunctive_facets):\n disjunctive_refinements[key] = refinements[key]\n\n queries = []\n filters = []\n\n for key in refinements:\n r = list(map(lambda x: key + \":\" + x, refinements[key]))\n\n if (str(key) in disjunctive_refinements):\n filters.append(r)\n else:\n filters += r\n params[\"indexName\"] = self.index_name\n params[\"query\"] = query\n params[\"facetFilters\"] = filters\n queries.append(dict(params))\n for disjunctive_facet in disjunctive_facets:\n filters = []\n\n for key in refinements:\n if key != disjunctive_facet:\n r = list(map(lambda x: key + \":\" + x, refinements[key]))\n\n if (str(key) in disjunctive_refinements):\n filters.append(r)\n else:\n filters += r\n\n params[\"indexName\"] = self.index_name\n params[\"query\"] = query\n params[\"facetFilters\"] = filters\n params[\"page\"] = 0\n params[\"hitsPerPage\"] = 0\n params[\"attributesToRetrieve\"] = []\n params[\"attributesToHighlight\"] = []\n params[\"attributesToSnippet\"] = []\n params[\"facets\"] = disjunctive_facet\n params[\"analytics\"] = False\n queries.append(dict(params))\n answers = self.client.multiple_queries(queries)\n\n aggregated_answer = answers['results'][0]\n aggregated_answer['disjunctiveFacets'] = {}\n for i in range(1, len(answers['results'])):\n for facet in answers['results'][i]['facets']:\n aggregated_answer['disjunctiveFacets'][facet] = answers['results'][i]['facets'][facet]\n if (not facet in disjunctive_refinements):\n continue\n for r in disjunctive_refinements[facet]:\n if aggregated_answer['disjunctiveFacets'][facet][r] == None:\n aggregated_answer['disjunctiveFacets'][facet][r] = 0\n return aggregated_answer",
"def build_query_filters(**kwargs):\n queryTarget = kwargs.get(\"queryTarget\", None)\n targetSubtreeClass = kwargs.get(\"targetSubtreeClass\", None)\n queryTargetFilter = kwargs.get(\"queryTargetFilter\", None)\n rspSubtree = kwargs.get(\"rspSubtree\", None)\n rspSubtreeClass = kwargs.get(\"rspSubtreeClass\", None)\n rspSubtreeInclude = kwargs.get(\"rspSubtreeInclude\", None)\n rspPropInclude = kwargs.get(\"rspPropInclude\", None)\n orderBy = kwargs.get(\"orderBy\", None)\n opts = \"\"\n if queryTarget is not None:\n opts+= \"&query-target=%s\" % queryTarget\n if targetSubtreeClass is not None:\n opts+= \"&target-subtree-class=%s\" % targetSubtreeClass\n if queryTargetFilter is not None:\n opts+= \"&query-target-filter=%s\" % queryTargetFilter\n if rspSubtree is not None:\n opts+= \"&rsp-subtree=%s\" % rspSubtree\n if rspSubtreeClass is not None:\n opts+= \"&rsp-subtree-class=%s\" % rspSubtreeClass\n if rspSubtreeInclude is not None:\n opts+= \"&rsp-subtree-include=%s\" % rspSubtreeInclude\n if rspPropInclude is not None:\n opts+= \"&rsp-prop-include=%s\" % rspPropInclude\n if orderBy is not None:\n opts+= \"&order-by=%s\" % orderBy\n\n if len(opts)>0: opts = \"?%s\" % opts.strip(\"&\")\n return opts",
"def add_facet_query(self, query_string):\n solr = self._clone()\n solr.params['facet'] = 'true'\n solr.params['facet.field'].append(query_string)\n return solr",
"def _build_query_filters(self, query: dict, filters: list) -> dict:\n\n for filter_tuple in filters:\n if not isinstance(filter_tuple, tuple) or len(filter_tuple) != 3:\n LOG.error(\"polling_filters tuple %s : invalid format or does not contain 3 elements - skipping this filter\", filter_tuple)\n continue\n if isinstance(filter_tuple[2], list) :\n # If \"value\" is a list of values then create a rule (json object) for each \n # value and use \"OR\" condition.\n condition = {'condition': \"OR\",\n 'rules': []}\n for value in filter_tuple[2]:\n rule = {}\n # Prepend fieldname with \"table.\" string\n rule['field'] = f\"table.{filter_tuple[0]}\"\n rule['operator'] = filter_tuple[1]\n rule['value'] = value\n condition['rules'].append(rule)\n query['rules'].append(condition)\n else:\n # Create a single rule for this tuple\n rule = {}\n field_name = f\"table.{filter_tuple[0]}\"\n rule['field'] = field_name\n rule['operator'] = filter_tuple[1]\n rule['value'] = filter_tuple[2]\n query['rules'].append(rule)\n return query",
"def build_query(self):\r\n # build query from base class add required field for joining with parent\r\n query = super().build_query()\r\n query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)\r\n\r\n # get the remaining query parameters\r\n asset_class = request.args.get('asset_class')\r\n model_name = request.args.get('model_name')\r\n pricing_library = request.args.get('pricing_library')\r\n\r\n # process each parameter and, if valid, add as a query condition\r\n if asset_class is not None:\r\n query = query.filter(QuantModelMetric.asset_class == asset_class)\r\n if model_name is not None:\r\n query = query.filter(QuantModelMetric.model_name == model_name)\r\n if pricing_library is not None:\r\n query = query.filter(QuantModelMetric.pricing_library == pricing_library)\r\n return query",
"def build(self, query, resource, filters, subfilters, embeds=None,\n offset=None, limit=None, sorts=None, strict=True,\n stack_size_limit=100, dialect_override=None):\n # apply filters\n try:\n query = self.apply_filters(\n query,\n resource.model,\n filters=filters,\n nested_conditions=resource.get_required_nested_filters,\n whitelist=resource.whitelist,\n stack_size_limit=stack_size_limit,\n convert_key_names_func=resource.convert_key_name,\n gettext=resource.context.get(\"gettext\", None))\n except InvalidMqlException as exc:\n self._handle_filter_errors(\n resource=resource,\n exc=exc)\n query = resource.apply_required_filters(query)\n if subfilters or embeds:\n # more complex process.\n # don't apply offset/limit/sorts here\n # will need to be taken care of by apply_subquery_loads\n query = self.apply_subquery_loads(\n query=query,\n resource=resource,\n subfilters=subfilters,\n embeds=embeds,\n offset=offset,\n limit=limit,\n sorts=sorts,\n strict=strict,\n dialect_override=dialect_override\n )\n else:\n # simple query, apply offset/limit/sorts now\n if not sorts and offset is not None:\n sorts = []\n for key in resource.schema.id_keys:\n attr = resource.schema.fields.get(key).data_key or key\n sorts.append(SortInfo(attr=attr))\n if sorts:\n for sort in sorts:\n if not isinstance(sort, SortInfo):\n raise TypeError(\"Each sort must be of type SortInfo.\")\n try:\n query = self.apply_sorts(\n query, [sort], resource.convert_key_name)\n except AttributeError:\n if strict:\n raise resource.make_error(\n \"invalid_sort_field\", field=sort.attr)\n try:\n query = self.apply_offset(query, offset)\n except ValueError:\n if strict:\n raise resource.make_error(\n \"invalid_offset_value\", offset=offset)\n try:\n query = self.apply_limit(query, limit)\n except ValueError:\n if strict:\n raise resource.make_error(\n \"invalid_limit_value\", limit=limit)\n return query",
"def _build_filters(self, criteria: Q):\n composed_query = query.Q()\n\n if criteria.connector == criteria.AND:\n for child in criteria.children:\n if isinstance(child, Q):\n composed_query = composed_query & self._build_filters(child)\n else:\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n lookup = lookup_class(stripped_key, child[1])\n if criteria.negated:\n composed_query = composed_query & ~lookup.as_expression()\n else:\n composed_query = composed_query & lookup.as_expression()\n else:\n for child in criteria.children:\n if isinstance(child, Q):\n composed_query = composed_query | self._build_filters(child)\n else:\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n lookup = lookup_class(stripped_key, child[1])\n if criteria.negated:\n composed_query = composed_query | ~lookup.as_expression()\n else:\n composed_query = composed_query | lookup.as_expression()\n\n return composed_query",
"def test_facet_query_criteria(cbcsdk_mock):\n api = cbcsdk_mock.api\n facet_q = api.select(ResultFacet).run_id(1).set_device_os([\"WINDOWS\"]).set_device_ids([1, 2, 3]) \\\n .set_device_names([\"Win7x64\", \"Win10\"]).set_policy_ids([1, 2]).set_policy_names([\"default\", \"policy2\"]) \\\n .set_statuses([\"not_started\", \"matched\"])\n assert facet_q._build_request(rows=100) == {\"criteria\": {\n \"device.os\": [\"WINDOWS\"],\n \"device.id\": [1, 2, 3],\n \"device.name\": [\"Win7x64\", \"Win10\"],\n \"device.policy_id\": [1, 2],\n \"device.policy_name\": [\"default\", \"policy2\"],\n \"status\": [\"not_started\", \"matched\"]\n }, \"query\": \"\", \"terms\": {\"fields\": [], \"rows\": 100}}",
"def build_query_filters(**kwargs):\n queryTarget = kwargs.get(\"queryTarget\", None)\n targetSubtreeClass = kwargs.get(\"targetSubtreeClass\", None)\n queryTargetFilter = kwargs.get(\"queryTargetFilter\", None)\n rspSubtree = kwargs.get(\"rspSubtree\", None)\n rspSubtreeInclude = kwargs.get(\"rspSubtreeInclude\", None)\n rspPropInclude = kwargs.get(\"rspPropInclude\", None)\n opts = \"\"\n if queryTarget is not None:\n opts+= \"&query-target=%s\" % queryTarget\n if targetSubtreeClass is not None:\n opts+= \"&target-subtree-class=%s\" % targetSubtreeClass\n if queryTargetFilter is not None:\n opts+= \"&query-target-filter=%s\" % queryTargetFilter\n if rspSubtree is not None:\n opts+= \"&rsp-subtree=%s\" % rspSubtree\n if rspSubtreeInclude is not None:\n opts+= \"&rsp-subtree-include=%s\" % rspSubtreeInclude\n if rspPropInclude is not None:\n opts+= \"&rsp-prop-include=%s\" % rspPropInclude\n\n if len(opts)>0: opts = \"?%s\" % opts.strip(\"&\")\n return opts",
"async def _convert_facet_args(self, auto_discover_facet_count,\n facet_auto_detect_limit, facet_requests,\n index_schema, query_options, refinement_filter):\n # Process Facet params\n facet_items = []\n stats_items = []\n if auto_discover_facet_count:\n # Figure out what facets are specified for greater number of documents.\n atom_facets_stats = await self._get_facets_stats(\n index_schema, query_options, refinement_filter\n )\n # Add auto-discovered facets to the list.\n auto_facet_items, auto_stats_items = facet_converter.discover_facets(\n atom_facets_stats, auto_discover_facet_count,\n facet_auto_detect_limit\n )\n facet_items += auto_facet_items\n stats_items += auto_stats_items\n if facet_requests:\n # Add explicitly specified facets to the list.\n explicit_facet_items, explicit_stats_items = (\n facet_converter.convert_facet_requests(\n index_schema.grouped_facet_indexes, facet_requests\n )\n )\n facet_items += explicit_facet_items\n stats_items += explicit_stats_items\n return facet_items, stats_items",
"def select(self,\n query_dict,\n groups=False,\n facets=False,\n stats=False,\n **kwargs\n ):\n\n if kwargs:\n query_dict.update(kwargs)\n\n response = self.client.post(\n self._get_collection_url('select'),\n body=json.dumps({'params': query_dict})\n )\n\n data = {}\n if groups and 'grouped' in response:\n data['groups'] = response['grouped']\n\n if facets and 'facet_counts' in response:\n data['facets'] = response['facet_counts']\n\n if stats and 'stats' in response:\n data['stats'] = response['stats']\n\n if 'response' in response and 'docs' in response['response']:\n response_data = response['response']\n data['docs'] = response_data['docs']\n data['total'] = response_data.get('numFound', len(data['docs']))\n\n return data",
"def make_slicer_query(\n self,\n base_table: Table,\n joins: Sequence[Join] = (),\n dimensions: Sequence[Field] = (),\n metrics: Sequence[Field] = (),\n filters: Sequence[Filter] = (),\n orders: Sequence = (),\n ) -> Type[QueryBuilder]:\n query = self.query_cls.from_(base_table, immutable=False)\n elements = flatten([metrics, dimensions, filters])\n\n # Add joins\n join_tables_needed_for_query = find_required_tables_to_join(elements, base_table)\n\n for join in find_joins_for_tables(joins, base_table, join_tables_needed_for_query):\n query = query.join(join.table, how=join.join_type).on(join.criterion)\n\n # Add dimensions\n for dimension in dimensions:\n dimension_term = self.transform_field_to_query(dimension, self.trunc_date)\n query = query.select(dimension_term)\n\n if dimension.groupable:\n query = query.groupby(dimension_term)\n\n # Add filters\n for fltr in filters:\n query = query.having(fltr.definition) if fltr.is_aggregate else query.where(fltr.definition)\n\n # Add metrics\n metric_terms = [self.transform_field_to_query(metric) for metric in metrics]\n if metric_terms:\n query = query.select(*metric_terms)\n\n # In the case that the orders are determined by a field that is not selected as a metric or dimension, then it needs\n # to be added to the query.\n select_aliases = {el.alias for el in query._selects}\n for (orderby_field, orientation) in orders:\n orderby_term = self.transform_field_to_query(orderby_field)\n query = query.orderby(orderby_term, order=orientation)\n\n if orderby_term.alias not in select_aliases:\n query = query.select(orderby_term)\n\n return query",
"def build(self, pf_query, search_field=\"keywords\"):\n stack = Stack()\n\n if len(pf_query) == 1:\n stack.push(Q('match', **{search_field: pf_query[0][1]}))\n\n for token in pf_query:\n if token in bool_values:\n q1 = stack.pop()\n q2 = stack.pop()\n\n result = q1 & q2 if token == 'AND' else q1 | q2\n stack.push(result)\n else:\n q = None\n if token[0] == 'KEYWORD':\n q = Q('match', **{search_field: token[1]})\n else:\n q = Q('match', **{search_field: \" \".join(token[1])})\n stack.push(q)\n\n return stack.pop()",
"def facet(self, expr):\n return DataCube.from_expr(self, expr)",
"def facets(self, fieldlist, max_rows=0):\n if not all((field in USBDeviceQuery.VALID_FACET_FIELDS) for field in fieldlist):\n raise ApiError(\"One or more invalid term field names\")\n request = self._build_request(0, -1, False)\n del request[\"rows\"]\n request[\"terms\"] = {\"fields\": fieldlist, \"rows\": max_rows}\n url = self._build_url(\"/_facet\")\n resp = self._cb.post_object(url, body=request)\n result = resp.json()\n return result.get(\"terms\", [])",
"def build_query(self):\r\n\r\n # this filter is required\r\n query = Metric.query.filter(Metric.metric_type == self.metric_type)\r\n\r\n # get query parameters (parameters which are not here are ignored)\r\n is_active = request.args.get('is_active')\r\n frequency = request.args.get('frequency')\r\n threshold_type = request.args.get('threshold_type')\r\n sort = request.args.get('sort')\r\n\r\n # process each parameter, and if valid add it as a query condition\r\n if is_active is not None:\r\n is_active = is_active.lower() == 'true'\r\n query = Metric.query.filter_by(is_active=is_active)\r\n if frequency is not None:\r\n try:\r\n frequency = Frequency.from_name(frequency)\r\n except ValueError as e:\r\n msg = f\"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}\"\r\n abort(400, message=msg)\r\n query = query.filter_by(frequency=frequency)\r\n if threshold_type is not None:\r\n try:\r\n threshold_type = ThresholdType.from_name(threshold_type)\r\n except ValueError as e:\r\n msg = f\"Invalid 'threshold_type': {threshold_type}. Use one of \" \\\r\n f\"{ThresholdType.values()}\"\r\n abort(400, message=msg)\r\n query = query.filter_by(threshold_type=threshold_type)\r\n if sort is not None and sort.lstrip(\"-\") == 'metric_id':\r\n query = query.order_by(Metric.metric_id.desc())\r\n else:\r\n query = query.order_by(Metric.metric_id)\r\n\r\n return query"
] | [
"0.6496745",
"0.6315449",
"0.6258068",
"0.60086054",
"0.5911915",
"0.57755864",
"0.573584",
"0.5670942",
"0.56401163",
"0.5602675",
"0.5568983",
"0.5543967",
"0.553353",
"0.54699177",
"0.5458636",
"0.54096997",
"0.5407078",
"0.5323815",
"0.53104126",
"0.52941847",
"0.52759683",
"0.5264871",
"0.5213126",
"0.5193002",
"0.5132204",
"0.5109384",
"0.51071876",
"0.51043326",
"0.50813097",
"0.50769997"
] | 0.7220995 | 0 |
Validate the 'easy' part of the sort order the tiebreaker fields. Return the 'difficult' part. | def validate_sort_order(filter, main_field):
# The tiebreaker fields are always in the same order, but
# if the main sort field is one of the tiebreaker fields,
# it's removed from the list -- there's no need to sort on
# that field a second time.
default_sort_fields = [
{x: "asc"} for x in ['sort_author', 'sort_title', 'work_id']
if x != main_field
]
assert default_sort_fields == filter.sort_order[1:]
return filter.sort_order[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_admin_sort_by(sort_on):\n try:\n sort_attributes = ['title', 'md_pub_date', 'summary']\n if sort_on in sort_attributes:\n return sort_on\n else:\n return 'title'\n except Exception as e:\n print \"Exception: \" + str(e)",
"def sorting_by_criteria(self, result):\r\n\t\tresult = sorted(result, key=lambda r: r[0])\r\n\t\tflag = False\r\n\t\tm = result[0][0]\r\n\t\tfor i in range(len(result)):\r\n\t\t\tif (result[i][0] == m): continue\r\n\t\t\tflag = True\r\n\t\t\tbreak\r\n\t\tif not flag: i += 1\r\n\t\tresult = result[:i]\r\n\r\n\t\t\"\"\" in prewin status, compare useful_amount only \"\"\"\r\n\t\tif (result[0][0] == 0):\r\n\t\t\tresult = sorted(result, key=lambda r: r[1], reverse=True)\r\n\t\t\ttest = \"\"\r\n\t\t\tfor r in result:\r\n\t\t\t\ttest += \"[{0}, {1}, {2}, {3}], \".format(r[0], r[1], r[2], r[3])\r\n#\t\t\tprint \"prewin status: {0}\".format(test)\r\n\t\t\tself.current_best_state = [result[0][0], result[0][1], result[0][2]]\r\n\t\t\treturn result[0][3]\r\n\r\n\t\t\"\"\" sort by score (big -> small) \"\"\"\r\n\t\tresult = sorted(result, key=lambda r: r[2], reverse=True)\r\n\t\tflag = False\r\n\t\tm = result[0][2]\r\n\t\tfor i in range(len(result)):\r\n\t\t\tif (result[i][2] == m): continue\r\n\t\t\tflag = True\r\n\t\t\tbreak\r\n\t\tif not flag: i += 1\r\n\t\tresult = result[:i]\r\n\r\n\t\t\"\"\" sort by useful card amount (big -> small) \"\"\"\r\n\t\tresult = sorted(result, key=lambda r: r[1], reverse=True)\r\n\r\n\t\t\"\"\" choose one to discard \"\"\"\r\n\t\tdcard = result[0][3]\r\n\t\tm = result[0][1]\r\n\t\tbest = result[0]\r\n\t\tfor r in result:\r\n\t\t\tif (r[1] != m): break\r\n\t\t\tctype = GameBoard.CardType(r[3])\r\n\t\t\tif (ctype == 4) and (self.word_list.count(r[3]) == 1):\r\n\t\t\t\tdcard = r[3]\r\n\t\t\t\tbest = r\r\n\t\t\tif (ctype == 5) and (self.wind_list.count(r[3]) == 1):\r\n\t\t\t\tdcard = r[3]\r\n\t\t\t\tbest = r\r\n\t\tself.current_best_state = [r[0], r[1], r[2]]\r\n\t\treturn dcard",
"def part_2(rules: Rules) -> int:\n\n rules_with_myself = add_myself(rules)\n happiness, _ = max(generate_arrangements(rules_with_myself))\n print(f\"part 2: optimal arrangement (including myself) brings {happiness} happiness\")\n return happiness",
"def answer_sorter(thing):\r\n try:\r\n return float(thing[0])\r\n except ValueError:\r\n # Put all non-numerical answers first.\r\n return float('-inf')",
"def check_sortable_fields(fields, result):\n sortable_fields = get_sortable_fields(result, verbose=False)\n for field in fields:\n if field not in sortable_fields:\n err_str = \"The field %s is not a sortable field for \" % (field)\n err_str += \"result %s\" % (result)\n raise ValueError(err_str)",
"def part_1(rules: Rules) -> int:\n\n happiness, _ = max(generate_arrangements(rules))\n print(f\"part 1: optimal arrangement brings {happiness} happiness\")\n return happiness",
"def validated(self, base):\n return sorted(self.decisions, key=_sort_key, reverse=True)",
"def compare(self, other):\n # First, compare sections\n if (self.section != \"\" or other.section != \"\") and self.section != other.section:\n if self.section == \"\" and other.section != \"\":\n return -1\n elif self.section != \"\" and other.section == \"\":\n return 1\n else:\n if self.section > other.section:\n return 1\n else:\n return -1\n\n # Next, compare topics\n if self.topic != other.topic:\n stopic = _split(self.topic)\n otopic = _split(other.topic)\n if stopic[0] != otopic[0]:\n if stopic[0] > otopic[0]:\n return 1\n else:\n return -1\n if float(stopic[1]) > float(otopic[1]):\n return 1\n else:\n return -1\n\n # Then sub-topics\n if self.sub_topic != other.sub_topic:\n result = _compare(self.sub_topic, other.sub_topic)\n if result != 0:\n return result\n\n # Then cutters\n if self.cutter != other.cutter:\n result = _compare(self.cutter, other.cutter)\n if result != 0:\n return result\n\n # Then normal after-effects in V-Y-O-C priority\n if self.version != other.version:\n if self.version > other.version:\n return 1\n return -1\n\n if self.year != other.year:\n if self.year > other.year:\n return 1\n return -1\n\n # We must take the work letter into account\n if self.work_letter != other.work_letter:\n if self.work_letter > other.work_letter:\n return 1\n return -1\n\n # If any unknown additions are present, try to guess at those.\n if self.other != other.other:\n # TODO: Try to guess numbers vs words and such\n if self.other > other.other:\n return 1\n return -1\n\n # Copy is always evaluated last\n if self.copy != other.copy:\n if self.copy > other.copy:\n return 1\n return -1\n\n return 0 # All else fails, we must be equal.",
"def clean(self):\n if self.hours_played < 0:\n raise ValidationError(\"Hours played cannot be negative.\")\n\n zelda_oot = Game.objects.filter(game_name='The Legend of Zelda: Ocarina of Time')\n if zelda_oot:\n zelda_oot = zelda_oot[0]\n if self != zelda_oot and self.hours_played > zelda_oot.hours_played:\n raise ValidationError(\"Liar.\")\n\n super(Game, self).clean()",
"def validate_doi_view_sort_by(sort_on):\n try:\n sort_attributes = ['title', 'md_pub_date', 'summary', 'assigned_doi_ark']\n if sort_on in sort_attributes:\n return sort_on\n else:\n return 'title'\n except Exception as e:\n print \"Exception: \" + str(e)",
"def input_user_choice_sorting(self):\r\n try:\r\n user_choice = input(\"Classer par\\n Ordre alphabétique (entrez '1')\\n Classement ELO (entrez '2')\\n\")\r\n if user_choice == '1' or user_choice == '2':\r\n return user_choice\r\n else:\r\n raise ValueError\r\n except ValueError:\r\n print(\"Veuillez choisir 1 ou 2\")\r\n return self.input_user_choice_sorting()",
"def selection_sort(book_array, sorting):\r\n if sorting == \"author\":\r\n for i in range(len(book_array)):\r\n min_index = i\r\n\r\n for b in range(i + 1, len(book_array)):\r\n author1 = str(book_array[min_index].author)\r\n author2 = str(book_array[b].author)\r\n author1 = author1.split(\" \")\r\n author2 = author2.split(\" \")\r\n if author1[len(author1) - 1] > author2[len(author2) - 1]:\r\n min_index = b\r\n\r\n book_array[i], book_array[min_index] = book_array[min_index], book_array[i]\r\n\r\n elif sorting == \"title\":\r\n for i in range(len(book_array)):\r\n min_index = i\r\n\r\n for b in range(i + 1, len(book_array)):\r\n title1 = str(book_array[min_index].title)\r\n title2 = str(book_array[b].title)\r\n title1 = title1.split(\" \")\r\n title2 = title2.split(\" \")\r\n if title1[0] == \"The\" and title2[0] == \"The\":\r\n if str(title1[1]) > str(title2[1]):\r\n min_index = b\r\n elif title1[0] == \"The\" and title2[0] != \"The\":\r\n if str(title1[1]) > str(book_array[b].title):\r\n min_index = b\r\n elif title1[0] != \"The\" and title2[0] == \"The\":\r\n if str(book_array[min_index].title) > str(title2[1]):\r\n min_index = b\r\n elif str(book_array[min_index].title) > str(book_array[b].title):\r\n min_index = b\r\n\r\n book_array[i], book_array[min_index] = book_array[min_index], book_array[i]",
"def sort():\n return -1",
"def sort_hgf_fields(config,doctype,inst):\n\torder = config[\"order\"] # get all field defined under config['order'] (1.)\n\torder_index = 2\n\tdefault_set = config[\"default_form\"]\n\tdefault_order = {} # dict with fields from default_form and proper order (2.)\n\tfor k in (default_set.keys()):\n\t\tif k in order.keys(): #field in config[\"order\"]\n\t\t\tdefault_order[k] = order[k]\n\t\tif k in default_set.keys() and default_set[k][order_index] !=\"-\" : #field in config[\"default_form\"] and not \"-\"\n\t\t\tdefault_order[k] = default_set[k][order_index] \n\t\t\n\tif doctype in config[inst].keys(): #get the institutional changes (3.)\n\t\tinst_changes = config[inst][doctype]\n\telse:\n\t\tinst_changes = {}\n\t\n\t\n\tinst_order = {}\n\tfor key in inst_changes.keys():\n\t\tif inst_changes[key] == \"None\":\n\t\t\tif key in default_order.keys(): #delete fields from institutional changes which are set \"None\" and in default_form\n\t\t\t\tdel default_order[key]\n\t\t\tcontinue\n\t\tif inst_changes[key][order_index] == \"-\": #we take the default\n\t\t\tif key in default_order.keys(): pass #already assigned by default_order\n\t\t\telse: \n\t\t\t\tif key in order.keys(): #get the order from config['order']\n\t\t\t\t\tinst_order[key] = order[key]\n\t\t\t\telse: warning(\"Please define the order (config['order']) for field %s in doctype: %s\" %(key,doctype))\n\t\t\tcontinue\n\t\tinst_order[key] = inst_changes[key][order_index] #institutional changes\t\t\t\n\t\t\n\tfinal_order = {}\n\t#get institutional changes in order\n\tmax_score = max(map(int,default_order.values() + inst_order.values())) #get all order values as string, convert strings to int and get the max value\n\tfor k in (default_order.keys() + inst_changes.keys()): \t\n\t\tif k in inst_changes.keys():\n\t\t\tif inst_changes[k] == \"None\": \n\t\t\t\tcontinue\n\t\t\tif inst_changes[k][order_index] == \"-\":\n\t\t\t\tif k in default_order.keys(): #take the default_order\n\t\t\t\t\tfinal_order[k] = default_order[k]\n\t\t\t\telse: \n\t\t\t\t\tif k in order.keys():\n\t\t\t\t\t\tfinal_order[k] = order[k]\n\t\t\t\t\telse: #no default. sort this field to the end\n\t\t\t\t\t\twarning(\"The field %s in doctype: %s is sorted to the end of the form\" %(k,doctype))\n\t\t\t\t\t\tfinal_order[k] = max_score\n\t\t\t\t\t\tmax_score +=1\n\t\t\telse: final_order[k] = inst_changes[k][order_index] #take order from institutional changes\n\t\t\t\n\t\telse: \n\t\t\tfinal_order[k] = default_order[k] # take order from default_form\n\t\n\tfinal_order[\"hgf_end\"] = max_score\n\t\n\tnew_order = sorted(final_order.items(),key=lambda x: int(x[1])) #create list with tuples sorted by value\n\thidden_fields = get_hidden_fields(config) #get hidden fields\n\t\n\tsorted_hgf_fields = []\t\t\n\tfor i in new_order:\n\t\tsorted_hgf_fields.append(i[0])\n\t\n\t# add all hidden fields\n\tfor i in hidden_fields:\n\t\tif i in sorted_hgf_fields: continue\n\t\tsorted_hgf_fields.append(i)\n\treturn sorted_hgf_fields",
"def _check_empty_and_sort_cost_pairs(self, pair_description, pairs):\n\n if pairs is None or len(pairs) == 0:\n raise ValueError(f\"Empty {pair_description} are provided.\")\n\n # sort based on power output\n pairs.sort(key=lambda p: p[0])\n\n return",
"def solve_sort(self):\n if self.k < 0 or self.k > len(self.numbers):\n return None\n\n self.numbers.sort() # in place\n return self.numbers[-self.k]",
"def _validate_ordering_customer_50A(self, val):\n return val",
"def get_sorting_option_from_user():\n while True:\n try:\n sorting_option = int(input(\"Select the type of song you want to download:\\n\"\n \"1. New\\n\"\n \"2. Top\\n\"\n \"3. Most Difficult\\n\\n\"\n \"Enter: \"))\n except ValueError:\n print(\"\\nSorry, that is not a number.\\n\")\n continue\n\n valid_sorting_options = ['new', 'top', 'most-difficult']\n if sorting_option in range(1, 4):\n return valid_sorting_options[sorting_option - 1]\n\n print(f\"\\nSorry, {sorting_option} is not a valid option.\")",
"def check_sort(self):\n if self.list == []:\n return True\n seg_iter = iter(self.list)\n last = next(seg_iter)\n for segment in seg_iter:\n if last > segment:\n raise Exception('non trié')\n last = segment\n return True",
"def _validate_ordering_customer_50K(self, val):\n return val",
"def valid_select(difficulty):\r\n\t\r\n\tif difficulty == \"easy\":\r\n\t\tchoice = \"valid\" \r\n\telif difficulty == \"medium\":\r\n\t\tchoice = \"valid\" \r\n\telif difficulty == \"hard\":\r\n\t\tchoice = \"valid\" \r\n\telse:\r\n\t\tchoice = \"unvalid\"\r\n\t\t\r\n\treturn choice",
"def _validate_ordering_customer_50F(self, val):\n return val",
"def _validate_ordering_institution_52D(self, val):\n return val",
"def _validate_ordering_institution_52A(self, val):\n return val",
"def role_reorder_valid_roles_sort_key(item):\n return item[1]",
"def min_parts():\n # you must replace this with your own value\n return -1",
"def test_fields_effort_time_units_required(self, _mock_check):\n field = EffortField()\n errors = field.check()\n self.assertEqual(len(errors), 1)\n error = errors[0]\n self.assertEqual(\n error.msg, \"Effort fields must define a 'time_units' attribute.\"\n )\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1010\")",
"def _check_difficulty_parameters(difficulty, model):\n max_value = difficulty.shape[1] + 1\n\n if model in [\"grm\"]:\n # Check that all the arguments are sorted\n if not np.all(difficulty[:, :-1] < difficulty[:, 1:]):\n raise AssertionError(\"Difficulty Parameters must be \"\n \"in ascending order\")\n\n elif model in ['gum']:\n # Parameters must be odd\n if max_value % 2:\n raise AssertionError(\"There must be an odd number of \"\n \"difficulty parameters\")\n\n # Parameters must be skew-symmetric about the center point\n middle_index = (difficulty.shape[1] - 1) // 2\n adjusted_difficulty = (difficulty -\n difficulty[:, middle_index][:, None])\n\n if not np.all(np.abs(adjusted_difficulty.sum(axis=1)) < 1e-7):\n raise AssertionError(\"Difficulty Parameters must be \"\n \"symmetric about offset\")\n\n max_value = middle_index + 1\n\n return max_value",
"def human_sort( l ):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n alphanum_key = None\n try:\n l.sort( key=alphanum_key )\n except TypeError:\n l.sort()\n return l",
"def clean(self):\n cleaned_data = super().clean()\n hora_inicio = cleaned_data.get(\"hora_inicio\")\n hora_final = cleaned_data.get(\"hora_final\")\n if hora_inicio > hora_final or hora_inicio == hora_final:\n raise forms.ValidationError(\"Error. La hora de inicio debe ser menor que la hora final\")"
] | [
"0.5422345",
"0.51779574",
"0.51326853",
"0.51216465",
"0.5098696",
"0.49896",
"0.498799",
"0.49557397",
"0.49503902",
"0.49242207",
"0.48612294",
"0.48536846",
"0.48222044",
"0.48191088",
"0.48098657",
"0.4788777",
"0.47594061",
"0.47372675",
"0.4728122",
"0.47202504",
"0.47033507",
"0.470034",
"0.4699107",
"0.4688657",
"0.46721342",
"0.46489784",
"0.46413743",
"0.461262",
"0.46050286",
"0.4588063"
] | 0.6764114 | 0 |
Verify that `filter` is a boolean filter that matches one of a number of possibilities. Return those possibilities. | def dichotomy(filter):
assert "bool" == filter.name
assert 1 == filter.minimum_should_match
return filter.should | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_filter_mixed_function(self):\n for none_type in (False, True):\n for all_type in (False, True):\n for any_type in (False, True, None):\n result = none_type is False and all_type is True \\\n and (any_type is None or any_type is True)\n self._test_filter(none_type, all_type, any_type, result)",
"def filter_or(filters):\n def filt(item):\n for f in filters:\n if f(item):\n return True\n return False\n return filt",
"def test_filter_multiple(self):\n self.es.register_filter(foo=False, bar='baz')\n self.assertFalse(self.es.streamfilter(self.data))\n self.es.filter = {'all': [], 'any': [], 'none': []}\n self.es.register_filter(foo=True, bar='baz')\n self.assertTrue(self.es.streamfilter(self.data))\n # check whether filter functions are different\n f, g = self.es.filter['all']\n c = {'foo': True}\n self.assertNotEqual(f(c), g(c))\n c = {'bar': 'baz'}\n self.assertNotEqual(f(c), g(c))",
"def _validate_filter(filter):\n\n if filter.HasField('composite_filter'):\n for sub_filter in filter.composite_filter.filters:\n _validate_filter(sub_filter)\n elif filter.HasField('property_filter'):\n if filter.property_filter.op in UNSUPPORTED_OPERATORS:\n raise ValueError('Query cannot have any inequality filters.')\n else:\n pass",
"def filter_and(filters):\n def filt(item):\n for f in filters:\n if not f(item):\n return False\n return True\n return filt",
"def _ApplyTestFilter(testfilter, bot_spec):\n if testfilter:\n return [(botname, set(testfilter) | (tests & set(['compile'])))\n for botname, tests in bot_spec]\n else:\n return bot_spec",
"def set_filter():\n try:\n #=======================================================================\n # isofilter=[arg.partition('=')[-1] for arg in argv if 'atomfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isofilter = config.arg('atomfilter')[1:-1].split(',')\n isofilter = [f.split(':') for f in isofilter]\n for f in isofilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isofilter = [['element', 'H', 'True', 'None']]\n try:\n #=======================================================================\n # isopartnerfilter=[arg.partition('=')[-1] for arg in argv if 'partnerfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isopartnerfilter = config.arg('partnerfilter')[1:-1].split(',')\n isopartnerfilter = [f.split(':') for f in isopartnerfilter]\n for f in isopartnerfilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isopartnerfilter = [['None', 'None', 'None', 'None']]\n return isofilter, isopartnerfilter\n isofilterlist = []\n isopartnerfilterlist = []\n for i in xrange(len(isofilter) / 2):\n isofilterlist.append(tuple(isofilter[2 * i:2 * i + 2]))\n for i in xrange(len(isopartnerfilter) / 2):\n isopartnerfilterlist.append(tuple(isopartnerfilter[2 * i:2 * i + 2]))\n\n return [isofilterlist, isopartnerfilterlist]",
"def _test_filter(self, none_type, all_type, any_type, result):\n self.es.filter = {'all': [], 'any': [], 'none': []}\n self.es.register_filter(lambda x: none_type, ftype='none')\n self.es.register_filter(lambda x: all_type, ftype='all')\n if any_type is not None:\n self.es.register_filter(lambda x: any_type, ftype='any')\n self.assertEqual(self.es.streamfilter(self.data), result,\n 'Test EventStreams filter mixed function failed for\\n'\n \"'none': {}, 'all': {}, 'any': {}\\n\"\n '(expected {}, given {})'\n .format(none_type, all_type, any_type,\n result, not result))",
"def match(self, filter):\n return filter in self.tags or filter in self.memo",
"def filtered(filter, xy):\n try:\n x, y = xy\n return bool(filter[x][y])\n except IndexError:\n return False",
"def pass_filters(device):\n if opts.filter_on_group:\n if device.owningTeam not in opts.filter_on_group:\n return False\n if opts.filter_on_type:\n if device.deviceType not in opts.filter_on_type:\n return False\n\n return True",
"def apply_filter(atom, isofilters):\n if 'None' in isofilters[0][0]:\n return True\n\n functionfilters = [isofilter for isofilter in isofilters if not isofilter[-1] == 'None']\n functionfilters = ['{}(atom.{}){}={}'.format(f[3], f[0], f[2], f[1]).replace('True', '=').replace('False', '!') for\n f in functionfilters]\n\n if all(getattr(atom, isofilter[0]) == isofilter[1] for isofilter in isofilters if\n isofilter[2] == 'True' and isofilter[-1] == 'None'):\n if all(getattr(atom, isofilter[0]) != isofilter[1] for isofilter in isofilters if\n isofilter[2] == 'False' and isofilter[-1] == 'None'):\n for functionfilter in functionfilters:\n if not eval(functionfilter):\n return False\n return True\n else:\n return False",
"def _CheckFilter(self, filter, values):\n try:\n match = Query.FILTER_REGEX.match(filter)\n if not match:\n raise datastore_errors.BadFilterError(\n 'Could not parse filter string: %s' % str(filter))\n except TypeError:\n raise datastore_errors.BadFilterError(\n 'Could not parse filter string: %s' % str(filter))\n\n property = match.group(1)\n operator = match.group(3)\n if operator is None:\n operator = '='\n\n if isinstance(values, tuple):\n values = list(values)\n elif not isinstance(values, list):\n values = [values]\n if isinstance(values[0], datastore_types._RAW_PROPERTY_TYPES):\n raise datastore_errors.BadValueError(\n 'Filtering on %s properties is not supported.' % typename(values[0]))\n\n if operator in self.INEQUALITY_OPERATORS:\n if self.__inequality_prop and property != self.__inequality_prop:\n raise datastore_errors.BadFilterError(\n 'Only one property per query may have inequality filters (%s).' %\n ', '.join(self.INEQUALITY_OPERATORS))\n elif len(self.__orderings) >= 1 and self.__orderings[0][0] != property:\n raise datastore_errors.BadFilterError(\n 'Inequality operators (%s) must be on the same property as the '\n 'first sort order, if any sort orders are supplied' %\n ', '.join(self.INEQUALITY_OPERATORS))\n\n if (self.__kind is None and\n property != datastore_types._KEY_SPECIAL_PROPERTY):\n raise datastore_errors.BadFilterError(\n 'Only %s filters are allowed on kindless queries.' %\n datastore_types._KEY_SPECIAL_PROPERTY)\n\n if property in datastore_types._SPECIAL_PROPERTIES:\n if property == datastore_types._KEY_SPECIAL_PROPERTY:\n for value in values:\n if not isinstance(value, Key):\n raise datastore_errors.BadFilterError(\n '%s filter value must be a Key; received %s (a %s)' %\n (datastore_types._KEY_SPECIAL_PROPERTY, value, typename(value)))\n\n return match",
"def validate_filterval(filterval):\n if filterval != 'description' and filterval != 'fulldescription' and filterval != 'completed':\n return False\n else:\n return True",
"def filter_all(_):\n return True",
"def test_boolean_filter(self):\n self.assertEqual(len(self.collection.results()), 6)\n\n result = get_filter_items(\n self.collection_uid, \"exclude_from_nav\", cache_enabled=False\n )\n\n self.assertEqual(len(result), 3)\n self.assertEqual(get_data_by_val(result, \"all\")[\"count\"], 6)\n self.assertEqual(get_data_by_val(result, \"all\")[\"selected\"], True)\n self.assertEqual(get_data_by_val(result, True)[\"count\"], 2)\n self.assertEqual(get_data_by_val(result, False)[\"count\"], 4)\n\n # test narrowed down results\n narrowed_down_result = get_filter_items(\n self.collection_uid,\n \"exclude_from_nav\",\n request_params={\"exclude_from_nav\": True},\n narrow_down=True,\n show_count=True,\n cache_enabled=False,\n )\n\n self.assertEqual(\n len(narrowed_down_result), 2, msg=\"narrowed result length should be 2\"\n )\n self.assertEqual(\n get_data_by_val(narrowed_down_result, True)[\"selected\"],\n True, # noqa\n msg=\"Test that 'Yes' is selected, matching the query\",\n )\n self.assertEqual(\n get_data_by_val(narrowed_down_result, \"all\")[\"count\"],\n 6,\n msg=\"Test that there are 3 results if unselected\",\n )\n\n # test narrowed down results\n narrowed_down_result = get_filter_items(\n self.collection_uid,\n \"exclude_from_nav\",\n request_params={\"exclude_from_nav\": False},\n narrow_down=True,\n show_count=True,\n cache_enabled=False,\n )\n\n self.assertEqual(\n len(narrowed_down_result), 2, msg=\"narrowed result length should be 2\"\n )\n self.assertEqual(\n get_data_by_val(narrowed_down_result, False)[\"selected\"],\n True, # noqa\n msg=\"Test that 'No' is selected, matching the query\",\n )\n self.assertEqual(\n get_data_by_val(narrowed_down_result, \"all\")[\"count\"],\n 6,\n msg=\"Test that there are 3 results if unselected\",\n )",
"def evaluate_boolean_filters(isovar_result, filter_flags):\n filter_values = OrderedDict()\n for boolean_filter_name in filter_flags:\n if boolean_filter_name.startswith(\"not_\"):\n boolean_field_name = boolean_filter_name[4:]\n negate = True\n else:\n boolean_field_name = boolean_filter_name\n negate = False\n if hasattr(isovar_result, boolean_field_name):\n field_value = getattr(isovar_result, boolean_field_name)\n else:\n raise ValueError(\n \"IsovarResult does not have field name '%s'\" % boolean_field_name)\n if field_value is None:\n field_value = False\n elif field_value not in {True, False}:\n raise ValueError(\"Expected filter '%s' to be boolean but got %s\" % (\n boolean_filter_name,\n field_value))\n filter_values[boolean_filter_name] = (\n not field_value if negate else field_value\n )\n return filter_values",
"def or_filter(self, filters: List[Union[Tuple, BinaryExpression]]) -> B[B, E]:\n pass",
"def _apply_filters(self, metadata):\n if \"keywords\" in self.filters:\n if not metadata.keywords:\n return False\n if not all(keyword in metadata.keywords for keyword in self.filters[\"keywords\"]):\n return False\n if \"features\" in self.filters:\n if not metadata.features:\n return False\n if not all(feature in metadata.features for feature in self.filters[\"features\"]):\n return False\n if \"authors\" in self.filters:\n if not metadata.authors:\n return False\n if not all(author in metadata.authors for author in self.filters[\"authors\"]):\n return False\n if \"version\" in self.filters:\n if not metadata.pylith_version:\n return False\n for verMeta in metadata.pylith_version:\n if not eval(\"{ver} {verMeta}\".format(ver=self.filters[\"version\"], verMeta=verMeta)):\n return False\n return True",
"def true_false_both_filter(request, items, parameter):\n if parameter in request['args']:\n test = request['args'][parameter].lower()\n if test == 'true':\n items = [item for item in items if item[parameter]]\n elif test == 'false':\n items = [item for item in items if not item[parameter]]\n elif test == 'both':\n # Otherwise return both true and false values\n pass\n else:\n raise UserException(ERROR_TRUE_FALSE_BOTH_REQUIRED % parameter)\n\n return items",
"def any_of(*conditions):\n def check():\n for c in conditions:\n if c():\n return True\n return False\n return check",
"def all_of(*conditions):\n def check():\n for c in conditions:\n if not c():\n return False\n return True\n return check",
"def test_filter_function_all(self):\n self.es.register_filter(lambda x: True)\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: False)\n self.assertFalse(self.es.streamfilter(self.data))",
"def matches_filters(self, filters: List[Dict[str, Any]]) -> bool:\n return any(\n # If any of consumer filters matches the header\n all(\n # Match: all consumer filter fields match the task header\n self.headers.get(bind_key) == bind_value\n for bind_key, bind_value in task_filter.items()\n )\n for task_filter in filters\n )",
"def check_request_type_filter(\n field_names: list,\n request_type_filter: dict,\n has_start_time: bool,\n has_end_time: bool,\n has_start_date: bool,\n has_end_date: bool,\n has_date: bool,\n has_SP: bool,\n has_year: bool,\n has_month: bool,\n has_week: bool\n):\n\n filter_str = f'\\n\\nFilter:\\n{request_type_filter}\\n\\nField Names:\\n{\", \".join(field_names)}'\n\n assert {(False, True): True, (False, False): False, (True, True): False, (True, False): False}[(has_year, has_month)] == False, 'Cannot provide a month without a year' + filter_str\n assert {(False, True): True, (False, False): False, (True, True): False, (True, False): False}[(has_year, has_week)] == False, 'Cannot provide a week without a year' + filter_str\n assert has_start_time + has_end_time != 1, 'Only one of start/end time was provided' + filter_str\n assert has_start_date + has_end_date != 1, 'Only one of start/end date was provided' + filter_str\n assert (has_SP + has_date != 1) or (has_start_date + has_end_date == 2), 'Only one of date/SP was provided' + filter_str\n assert sum(request_type_filter.values()) == 1, 'Request type could not be determined\\n\\nFilter' + filter_str\n\n return",
"def has_filter(self) -> bool:\n return self.filter_client_reference_id or self.filter_mhr_number or self.filter_registration_type or \\\n self.filter_reg_start_date or self.filter_status_type or self.filter_submitting_name or \\\n self.filter_username",
"def _check_filters(self, level):\n if(self.filters == Filters.NoFilter):\n return True\n else:\n return (self.filters & level.filters == 0)",
"def is_valid_model_filters(model, filters):\n for key in filters.keys():\n if not hasattr(model, key):\n return False\n return True",
"def is_valid_model_filters(model, filters):\n for key in filters.keys():\n if not hasattr(model, key):\n return False\n return True",
"def guess_filter_type(filter_func):\r\n if hasattr(filter_func, 'contextfilter') or \\\r\n hasattr(filter_func, 'environmentfilter'):\r\n return JINJA2, False\r\n\r\n args = inspect.getargspec(filter_func)\r\n if len(args[0]) - (len(args[3]) if args[3] else 0) > 2:\r\n return JINJA2, False\r\n\r\n if hasattr(filter_func, 'needs_autoescape'):\r\n return DJANGO, True\r\n\r\n # Looks like your run of the mill Python function, which are\r\n # easily convertible in either direction.\r\n return False, True"
] | [
"0.6741921",
"0.6474875",
"0.6376854",
"0.6179683",
"0.6159572",
"0.59450597",
"0.5884499",
"0.5780944",
"0.5735032",
"0.5734875",
"0.5713773",
"0.5666095",
"0.56029093",
"0.5585088",
"0.55647486",
"0.55589753",
"0.55378103",
"0.55280507",
"0.55214506",
"0.5489386",
"0.54835516",
"0.54759735",
"0.54536533",
"0.5444215",
"0.5430816",
"0.542956",
"0.5427011",
"0.54230297",
"0.54230297",
"0.5419601"
] | 0.67647254 | 0 |
Verify that a filter only matches when there is no value for the given field. | def assert_matches_nonexistent_field(f, field):
assert (
f.to_dict() ==
{'bool': {'must_not': [{'exists': {'field': field}}]}}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_filterval(filterval):\n if filterval != 'description' and filterval != 'fulldescription' and filterval != 'completed':\n return False\n else:\n return True",
"def test_filter_function_none(self):\n self.es.register_filter(lambda x: False, ftype='none')\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: True, ftype='none')\n self.assertFalse(self.es.streamfilter(self.data))",
"def subfields_none(verifield, required):\n for req_key, req_val in required.items():\n if getitem(verifield, req_key, '') == req_val:\n return False\n return True",
"def fields_not_empty(self, request, fields):\r\n for field in fields:\r\n if request.form.get(field) == \"\":\r\n return True",
"def test_apply_filter_none(app):\n with app.app_context():\n users = User.query\n users = apply_filter(users, User, {})\n assert users.whereclause is None",
"def filter_is_not_null(self, queryobject):\n raise NotImplementedError()",
"def empty_filter(item, *args, **kwargs):\n return True",
"def _validate_filter(filter):\n\n if filter.HasField('composite_filter'):\n for sub_filter in filter.composite_filter.filters:\n _validate_filter(sub_filter)\n elif filter.HasField('property_filter'):\n if filter.property_filter.op in UNSUPPORTED_OPERATORS:\n raise ValueError('Query cannot have any inequality filters.')\n else:\n pass",
"def validate_empty_field(self, field, value):\n self.value = value\n self.field = field\n if self.value == \"\":\n message = \"{} field cannot be blank!\".format(self.field)\n raise GraphQLError(message)",
"def test_filter_params_invalid_fields(self):\n filter_params = {\"invalid\": \"param\"}\n serializer = OCIFilterSerializer(data=filter_params)\n with self.assertRaises(serializers.ValidationError):\n serializer.is_valid(raise_exception=True)",
"def test_filter_false(self):\n self.es.register_filter(foo=False)\n self.assertFalse(self.es.streamfilter(self.data))",
"def _check_filters(self, level):\n if(self.filters == Filters.NoFilter):\n return True\n else:\n return (self.filters & level.filters == 0)",
"def testValidate_None(self):\n def action(field_class):\n # Optional.\n field = field_class(1)\n field.validate(None)\n\n # Required.\n field = field_class(1, required=True)\n self.assertRaisesWithRegexpMatch(messages.ValidationError,\n 'Required field is missing',\n field.validate,\n None)\n\n # Repeated.\n field = field_class(1, repeated=True)\n field.validate(None)\n self.assertRaisesWithRegexpMatch(\n messages.ValidationError,\n 'Repeated values for %s may '\n 'not be None' % field_class.__name__,\n field.validate,\n [None])\n self.assertRaises(messages.ValidationError,\n field.validate,\n (None,))\n self.ActionOnAllFieldClasses(action)",
"def not_empty(verifield, required):\n if not required: return True\n return not not verifield and verifield is not None",
"def _check_filter_value(self, cleaned_data, expected):\n self.assertEqual(cleaned_data, expected)",
"def _test_bad_request_empty_field(self, user, fields, empty_field, zendesk_mock_class, datadog_mock):\r\n altered_fields = fields.copy()\r\n altered_fields[empty_field] = \"\"\r\n resp = self._build_and_run_request(user, altered_fields)\r\n self._assert_bad_request(resp, empty_field, zendesk_mock_class, datadog_mock)",
"def test_no():\n errors = generate_errors(10, 5)\n assert NoFiltering().filter(errors) == errors",
"def test_no_filter(self):\r\n\r\n d1 = {\"% IDENTITY\": \"97.6\"}\r\n d2 = {\"% IDENTITY\": \"0.0\"}\r\n d3 = {\"% IDENTITY\": \"100.0\"}\r\n\r\n self.assertTrue(no_filter(d1))\r\n self.assertTrue(no_filter(d2))\r\n self.assertTrue(no_filter(d3))",
"def test_simplelistfilter_with_none_returning_lookups(self):\n modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)\n request = self.request_factory.get(\"/\", {})\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0]\n self.assertEqual(len(filterspec), 0)",
"def test_filter_with_empty_filters(mockdata, qfilter):\n assert len(qfilter.filter(mockdata)) == 100",
"def is_arbitrary(self):\n return 'conditions' not in type(self)._fields",
"def filter_is_null(self, queryobject):\n raise NotImplementedError()",
"def test_query_filter_field(self):\n obj = self.provision_single_asset()\n # TODO: Write a positive test for this\n ret = self.get('widget', 200,\n params={'__filter': [\n {'field': 'created_at', 'name': 'name', 'op': 'eq'}]})\n assert len(ret['objects']) == 0",
"def check_filter(self, filter):\n if filter is None:\n return True\n if not _valid_filter(filter):\n raise ValueError(filter)\n elif not self._filter_supported(filter):\n msg = \"{} not indexed for filter: '{}'.\"\n raise RuntimeError(msg.format(type(self).__name__, filter))",
"def has_filter(self) -> bool:\n return self.filter_client_reference_id or self.filter_mhr_number or self.filter_registration_type or \\\n self.filter_reg_start_date or self.filter_status_type or self.filter_submitting_name or \\\n self.filter_username",
"def test_optional_filter_params(self):\n del self.internal_filter['max']\n del self.external_filter['max']\n\n # Serialize\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)\n\n # Deserialize\n serializer = serializers.FilterSerializer(data=self.external_filter)\n self.assertTrue(serializer.is_valid())\n self.assertDictEqual(serializer.validated_data, self.internal_filter)",
"def test_no_errors(self):\n try:\n field_name_validator('good_field_name')\n except ValidationError:\n self.fail('Field name raised ValidationError unexpectedly')",
"def test_filter_messages_empty_data(self):\n pass",
"def test_filter_value(self):\n self.es.register_filter(foo=10)\n self.assertFalse(self.es.streamfilter(self.data))",
"def _entry_field_values_are_not_empty(entry: _LexiconEntry) -> None:\n empty_fields = [f for f in _REQUIRED_FIELDS if not entry[f]]\n\n if empty_fields:\n field_str = \", \".join(sorted(empty_fields))\n raise InvalidLexiconEntryError(\n f\"Entry fields have empty values: '{field_str}'\")"
] | [
"0.6997897",
"0.65928715",
"0.65643317",
"0.6421701",
"0.64098537",
"0.6297924",
"0.6291485",
"0.62772375",
"0.6188193",
"0.6145177",
"0.6119406",
"0.6065322",
"0.60579246",
"0.6054443",
"0.60046804",
"0.5999436",
"0.59982795",
"0.5972854",
"0.59515387",
"0.59507996",
"0.59500664",
"0.5925417",
"0.58926505",
"0.58866286",
"0.58452636",
"0.5832678",
"0.58279836",
"0.581946",
"0.581833",
"0.58180904"
] | 0.7020142 | 0 |
A mock of _chain_filters so we don't have to check test results against supercomplicated Elasticsearch filter objects. Instead, we'll get a list of smaller filter objects. | def _mock_chain(self, filters, new_filter):
if filters is None:
# There are no active filters.
filters = []
if isinstance(filters, elasticsearch_dsl_query):
# An initial filter was passed in. Convert it to a list.
filters = [filters]
filters.append(new_filter)
return filters | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_apply_filter(mocker):\n list_of_filter_dict_keys = [\n 'EqualTo',\n 'Contains',\n 'ContainsAll',\n 'ContainsAny',\n 'ContainsIgnoreCase',\n 'DoesNotContain',\n 'GreaterThan',\n 'GreaterThanOrEqualTo',\n 'DoesNotContainIgnoreCase',\n 'In',\n 'LessThan',\n 'LessThanOrEqualTo',\n 'ContainsNone',\n 'ContainsNone',\n 'NotIn',\n 'NotEqualTo',\n 'StartsWith',\n 'StartsWithIgnoreCase',\n ]\n mocked_query_builder = mock.Mock()\n\n for f in list_of_filter_dict_keys:\n apply_filter(mocked_query_builder, {'foo': {'operator': f, 'value': 'bar'}})\n assert mocked_query_builder.Where.call_count == 18\n assert mocked_query_builder.Where().EqualTo.call_count == 1\n assert mocked_query_builder.Where().LessThan.call_count == 1\n assert mocked_query_builder.Where().StartsWithIgnoreCase.call_count == 1",
"def filter(self, filters):",
"def test_default_filter(self):\n request = RequestFactory().get('/?foo=bar')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.data.getlist('status'), ['active', 'paused'])\n self.assertEquals(filter.data.getlist('tags'), ['foo'])\n self.assertEquals(filter.data.getlist('foo'), ['bar'])",
"def _build_filter_chain(self):\n result = None\n for klass in self.filters:\n tmp = klass(self, self.args, result)\n logging.info(\"%s %s\", klass, tmp.active)\n if tmp.active:\n result = tmp\n return result or (lambda x: x)",
"def get_filters(self):",
"def assert_filter_builds_to(self, expect, filter, _chain_filters=None):\n final_query = {'bool': {'must_not': [RESEARCH.to_dict()]}}\n\n if expect:\n final_query['bool']['must'] = expect\n main, nested = filter.build(_chain_filters)\n assert final_query == main.to_dict()\n\n return main, nested",
"def test_tag_filter(self):\n request = RequestFactory().get('/?search=foobar')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.qs.filters['name__icontains'], 'foobar')\n self.assertEquals(filter.qs.filters['status__startswith'], 'foobar')",
"def process_filters(self, filters, queryset, view):\n return filters",
"def filter(self, **kwargs):\n new_filters = self.nested_filter_calls + [kwargs]\n return MockSearch(\n self, self._query, new_filters, self.order,\n self._script_fields\n )",
"def test_no_op(self):\n request = RequestFactory().get('/?search=&tags=&status=')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.qs.filters, {})",
"def extract_filters(self):\n self.filters = self.controller.filters\n\n self.extract_core_stats()\n self.extract_abilities()\n # goes through and adds all list-based filters\n for filterType, elements in self.filters.items():\n if type(elements) == list and len(elements) > 0:\n self.extract_filter_list(filterType, elements)",
"def test_tag_filter(self):\n request = RequestFactory().get('/?tags=foo&tags=bar')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.qs.filters['tags__slug__in'], ['foo', 'bar'])",
"def testUsingFilterTool(self):\n pass",
"def test_filter(self):\n credentials = Mock(base_url=\"\")\n manager = Manager('contacts', credentials)\n\n uri, params, method, body, headers, singleobject = manager._filter(\n order=\"LastName\",\n page=2,\n offset=5,\n since=datetime.datetime(2014, 8, 10, 15, 14, 46),\n Name=\"John\")\n\n self.assertEqual(method, 'get')\n self.assertFalse(singleobject)\n\n expected_params = {\n \"order\": \"LastName\",\n \"page\": 2,\n \"offset\": 5,\n \"where\": 'Name==\"John\"'\n }\n self.assertEqual(params, expected_params)\n\n expected_headers = {\n \"If-Modified-Since\": \"Sun, 10 Aug 2014 15:14:46 GMT\"\n }\n self.assertEqual(headers, expected_headers)\n\n # Also make sure an empty call runs ok\n uri, params, method, body, headers, singleobject = manager._filter()\n self.assertEqual(params, {})\n self.assertIsNone(headers)\n\n manager = Manager('invoices', credentials)\n uri, params, method, body, headers, singleobject = manager._filter(\n **{'Contact.ContactID': '3e776c4b-ea9e-4bb1-96be-6b0c7a71a37f'})\n\n self.assertEqual(\n params,\n {'where': 'Contact.ContactID==Guid(\"3e776c4b-ea9e-4bb1-96be-6b0c7a71a37f\")'}\n )",
"def test_list_filtering(self):\n # Test the \"all\" response.\n url = '/api/users/?all=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.contract_user.email)\n self.assertContains(response, self.del_user.email)\n self.assertContains(response, self.shared.email)\n # Test filtering by ad_deleted.\n url = '/api/users/?ad_deleted=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.del_user.email)\n self.assertNotContains(response, self.user1.email)\n url = '/api/users/?ad_deleted=false'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertNotContains(response, self.del_user.email)\n self.assertContains(response, self.user1.email)\n # Test filtering by email (should return only one object).\n url = '/api/users/?email={}'.format(self.user1.email)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n j = response.json()\n self.assertEqual(len(j['objects']), 1)\n self.assertContains(response, self.user1.email)\n self.assertNotContains(response, self.user2.email)\n # Test filtering by GUID (should return only one object).\n url = '/api/users/?ad_guid={}'.format(self.user1.ad_guid)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n j = response.json()\n self.assertEqual(len(j['objects']), 1)\n self.assertContains(response, self.user1.email)\n self.assertNotContains(response, self.user2.email)\n # Test filtering by cost centre (should return all, inc. inactive and contractors).\n url = '/api/users/?cost_centre={}'.format(self.cc2.code)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.user2.email)\n self.assertContains(response, self.contract_user.email)\n self.assertContains(response, self.del_user.email)\n self.assertNotContains(response, self.user1.email)\n self.assertNotContains(response, self.shared.email) # Belongs to CC1.\n # Test filtering by O365 licence status.\n self.user1.o365_licence = True\n self.user1.save()\n url = '/api/users/?o365_licence=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.user1.email)\n self.assertNotContains(response, self.user2.email)",
"def test_filter_with_empty_filters(mockdata, qfilter):\n assert len(qfilter.filter(mockdata)) == 100",
"def filters(self):\n\t\treturn self.local_filter",
"def recursive_filter(filters, tiddlers):\n if len(filters) == 0:\n return tiddlers\n filter = filters.pop(0)\n try:\n return recursive_filter(filters, filter(tiddlers))\n except AttributeError, exc:\n raise FilterError('malformed filter: %s' % exc)",
"def _split_filters(self, filters):\n # specifying ancestor_location returns an ANDFilter and does not have a column name\n # assume that it should go into inner filters\n complex_filters = [f for f in filters if not hasattr(f, 'column_name')]\n simple_filters = [f for f in filters if hasattr(f, 'column_name')]\n inner_filters = [f for f in simple_filters if f.column_name not in self.AGGREGATE_FILTERS]\n outer_filters = [f for f in simple_filters if f.column_name in self.AGGREGATE_FILTERS]\n return {\n 'inner': inner_filters + complex_filters,\n 'outer': outer_filters,\n }",
"def filters(self):\n return self.__filters",
"def filters(self):\n return self._filters",
"def test_filter_multiple(self):\n self.es.register_filter(foo=False, bar='baz')\n self.assertFalse(self.es.streamfilter(self.data))\n self.es.filter = {'all': [], 'any': [], 'none': []}\n self.es.register_filter(foo=True, bar='baz')\n self.assertTrue(self.es.streamfilter(self.data))\n # check whether filter functions are different\n f, g = self.es.filter['all']\n c = {'foo': True}\n self.assertNotEqual(f(c), g(c))\n c = {'bar': 'baz'}\n self.assertNotEqual(f(c), g(c))",
"def __init__(self, filters, use_include_order):\n self.filters = filters\n self.use_include_order = use_include_order",
"def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n grouped = get_grouped_filters(filters)\n branch_filters = get_branch_filter(filters)\n orm_filters = super(StoryResource, self).build_filters(filters)\n orm_filters['grouped'] = grouped\n orm_filters['br_filter'] = branch_filters\n\n if 'content_type__in' in filters:\n orm_filters['content_type__in'] = [CONTENT_HYDRATE[f] for f in filters['content_type__in'].split(',')]\n\n return orm_filters",
"def test_filters_with_extra_extraction(self) -> None:\n\n # pylint: disable=too-many-locals\n\n import hammer_config\n\n tech_dir, tech_dir_base = HammerToolTestHelpers.create_tech_dir(\"dummy28\")\n tech_json_filename = os.path.join(tech_dir, \"dummy28.tech.json\")\n\n def add_named_library(in_dict: Dict[str, Any]) -> Dict[str, Any]:\n out_dict = deepdict(in_dict)\n out_dict[\"libraries\"].append({\n \"name\": \"abcdef\",\n \"milkyway techfile\": \"test/abcdef.tf\"\n })\n return out_dict\n\n HammerToolTestHelpers.write_tech_json(tech_json_filename, add_named_library)\n sys.path.append(tech_dir_base)\n tech = self.get_tech(hammer_tech.HammerTechnology.load_from_dir(\"dummy28\", tech_dir))\n tech.cache_dir = tech_dir\n\n def filter_func(lib: hammer_tech.Library) -> bool:\n return lib.milkyway_techfile is not None\n\n def paths_func(lib: hammer_tech.Library) -> List[str]:\n assert lib.milkyway_techfile is not None\n return [lib.milkyway_techfile]\n\n def extraction_func(lib: hammer_tech.Library, paths: List[str]) -> List[str]:\n assert len(paths) == 1\n if lib.name is None:\n name = \"\"\n else:\n name = str(lib.name)\n return [json.dumps({\"path\": paths[0], \"name\": name}, cls=HammerJSONEncoder, indent=4)]\n\n def sort_func(lib: hammer_tech.Library):\n assert lib.milkyway_techfile is not None\n return lib.milkyway_techfile\n\n test_filter = LibraryFilter.new(\"metatest\", \"Test filter that extracts metadata\",\n is_file=True, filter_func=filter_func,\n paths_func=paths_func,\n extraction_func=extraction_func,\n sort_func=sort_func)\n\n database = hammer_config.HammerDatabase()\n tech.set_database(database)\n raw = tech.process_library_filter(pre_filts=[], filt=test_filter,\n must_exist=False,\n output_func=hammer_tech.HammerTechnologyUtils.to_plain_item)\n\n # Disable false positive from pylint\n outputs = list(map(lambda s: json.loads(s), raw)) # pylint: disable=unnecessary-lambda\n self.assertEqual(outputs,\n [\n {\"path\": tech.prepend_dir_path(\"test/abcdef.tf\"), \"name\": \"abcdef\"},\n {\"path\": tech.prepend_dir_path(\"test/coconut\"), \"name\": \"\"},\n {\"path\": tech.prepend_dir_path(\"test/soy\"), \"name\": \"\"}\n ])\n\n # Cleanup\n shutil.rmtree(tech_dir_base)",
"def make_mock_filter_method(cls, counts=None):\n if counts is None:\n counts = {}\n\n def filter_method(*args, **kwargs):\n count_val = 0\n\n try:\n q_obj = args[0]\n if isinstance(q_obj, Q):\n filter_kwargs = {k:v for k, v in q_obj.children}\n except IndexError:\n filter_kwargs = kwargs\n\n for kwarg, val in filter_kwargs.items():\n try:\n count_val = counts[kwarg][val]\n break\n except KeyError:\n pass\n \n mqs = mock.MagicMock(spec=QuerySet)\n mqs.count.return_value = count_val\n return mqs\n\n return filter_method",
"def test_filter_function_all(self):\n self.es.register_filter(lambda x: True)\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: False)\n self.assertFalse(self.es.streamfilter(self.data))",
"def _test_filter(self, none_type, all_type, any_type, result):\n self.es.filter = {'all': [], 'any': [], 'none': []}\n self.es.register_filter(lambda x: none_type, ftype='none')\n self.es.register_filter(lambda x: all_type, ftype='all')\n if any_type is not None:\n self.es.register_filter(lambda x: any_type, ftype='any')\n self.assertEqual(self.es.streamfilter(self.data), result,\n 'Test EventStreams filter mixed function failed for\\n'\n \"'none': {}, 'all': {}, 'any': {}\\n\"\n '(expected {}, given {})'\n .format(none_type, all_type, any_type,\n result, not result))",
"def _get_filters(self, request, queryset, view): # noqa\n self.opts = queryset.model._meta\n filter_fields = getattr(view, \"filter_fields\", None)\n self.exclude = {}\n self.filters = {}\n\n if filter_fields:\n blacklist = RexList(getattr(view, \"filter_blacklist\", []))\n mapping = self._get_mapping(view)\n\n for fieldname_arg in self.query_params:\n raw_value = self.query_params.get(fieldname_arg)\n if raw_value in [\"''\", '\"\"']:\n raw_value = \"\"\n\n negate = fieldname_arg[-1] == \"!\"\n\n if negate:\n filter_field_name = fieldname_arg[:-1]\n TARGET = self.exclude\n else:\n TARGET = self.filters\n filter_field_name = fieldname_arg\n\n if filter_field_name in self.excluded_query_params:\n continue\n if self.ignore_filter(request, filter_field_name, view):\n continue\n try:\n if filter_field_name in blacklist:\n raise InvalidQueryArgumentError(fieldname_arg)\n parts = None\n if \"__\" in filter_field_name:\n parts = filter_field_name.split(\"__\")\n filter_field_name = parts[0]\n op = parts[-1]\n else:\n op = \"\"\n processor = getattr(\n self,\n \"process_{}\".format(filter_field_name),\n getattr(view, \"drfqs_filter_{}\".format(filter_field_name), None),\n )\n\n if (filter_field_name not in filter_fields) and (not processor):\n self.unknown_arguments.append((fieldname_arg, filter_field_name))\n continue\n # raise InvalidQueryArgumentError(filter_field_name)\n if raw_value is None and not processor:\n continue\n # field is configured in Serializer\n # so we use 'source' attribute\n if filter_field_name in mapping:\n real_field_name = mapping[filter_field_name].source\n # if '.' in real_field_name:\n # real_field_name = real_field_name.split('.')[0]\n # field_name = real_field_name.replace('.', '__')\n else:\n real_field_name = filter_field_name\n\n if processor:\n payload = {\n \"field\": filter_field_name,\n \"request\": request,\n \"param\": fieldname_arg,\n \"negate\": negate,\n \"op\": op,\n \"field_name\": real_field_name,\n \"parts\": parts,\n \"value\": raw_value,\n \"real_field_name\": real_field_name,\n }\n _f, _e = processor(dict(self.filters), dict(self.exclude), **payload)\n self.filters.update(**_f)\n self.exclude.update(**_e)\n else:\n if not raw_value:\n continue\n # field_object = opts.get_field(real_field_name)\n value_type = self.field_type(real_field_name)\n if parts:\n f = \"{}__{}\".format(real_field_name, \"__\".join(parts[1:]))\n else:\n f = filter_field_name\n if op in [\"in\", \"contained_by\"]:\n value = raw_value.split(\",\")\n elif op == \"acontains\":\n value = raw_value.split(\",\")\n f = f.replace(\"__acontains\", \"__contains\")\n elif op == \"isnull\":\n value = parse_bool(raw_value)\n elif value_type == bool:\n value = parse_bool(raw_value)\n else:\n value = raw_value\n TARGET[f] = value\n except ValueError:\n raise InvalidQueryValueError(fieldname_arg, raw_value)\n except QueryFilterException:\n raise\n except Exception as e:\n logger.exception(e)\n raise\n return self.filters, self.exclude",
"def test_filter(self):\n\n # Set a global filter for all items\n self.site.filter(r\"(.*)\", lambda item: item)\n # Set another filter on the index item\n self.site.filter(r\"index.html\", lambda item: item)\n\n self.assertEqual(2, len(self.site.items[\"index.html\"].filters))\n self.assertEqual(1, len(self.site.items[\"test/test.html\"].filters))"
] | [
"0.65804183",
"0.6574269",
"0.6492736",
"0.63977313",
"0.6358299",
"0.631415",
"0.6254892",
"0.6224293",
"0.620702",
"0.6090167",
"0.6076573",
"0.6039105",
"0.59843934",
"0.5979323",
"0.59743553",
"0.5895422",
"0.58411616",
"0.5840082",
"0.57979",
"0.5793634",
"0.5764596",
"0.5761781",
"0.5759962",
"0.5755838",
"0.5754985",
"0.57292074",
"0.57031953",
"0.5672721",
"0.5665088",
"0.5662941"
] | 0.8190246 | 0 |
Clears the model directory and only maintains the latest `checkpoints` number of checkpoints. | def clear_model_dir(self, checkpoints, logger):
files = os.listdir(self.model_dir)
last_modification = [(os.path.getmtime(os.path.join(self.model_dir, f)), f) for f in files]
# Sort the list by last modified.
last_modification.sort(key=itemgetter(0))
# Delete everything but the last 10 files.
ckpnt_no = 0
for time, f in last_modification[:-checkpoints]:
ckpnt_no += 1
os.remove(os.path.join(self.model_dir, f))
msg = "Deleted %d checkpoints" % (ckpnt_no)
logger.debug(msg)
print(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear_checkpoints(self):\n if tf.gfile.Exists(str(self.info.checkpoint_path)):\n tf.gfile.DeleteRecursively(str(self.info.checkpoint_path))",
"def clear_model_checkpoints(self):\n if self.file_prefix is None:\n return\n\n with os.scandir() as path_list:\n for entry in path_list:\n if entry.is_file() and entry.name.startswith(self.file_prefix) and entry.name.endswith(\".h5\"):\n print(\"{}: Removing {}\".format(self.MODEL_NAME, entry.path))\n os.remove(entry.path)",
"def clear_checkpoint(checkpoint_dir):\n filelist = [f for f in os.listdir(checkpoint_dir) if f.endswith(\".pth.tar\")]\n for f in filelist:\n os.remove(os.path.join(checkpoint_dir, f))\n\n print(\"Checkpoint successfully removed\")",
"def clean_up(model_path):\n cmds = [\"rm */grad*.pickle\",\n \"rm -r checkpoints\",\n \"rm */train_len\",\n \"rm log_human_read.csv\",\n \"rm */log_human_read.csv\",\n \"rm -r best_model\",\n \"rm */*epoch*\"]\n\n for cmd in cmds:\n os.system(\"cd {} && {}\".format(model_path, cmd))",
"def clear_checkpoints(save_path):\n dir_name = os.path.dirname(save_path)\n for file_name in os.listdir(dir_name):\n if re.search(constants.CHECKPOINT_MARK, file_name):\n os.remove(os.path.join(dir_name, file_name))",
"def _delete_old_checkpoints(checkpoint_path: str):\n latest_checkpoint = tf.train.latest_checkpoint(checkpoint_path)\n if not latest_checkpoint:\n return\n\n checkpoint_path = pathlib.Path(checkpoint_path)\n for p in checkpoint_path.iterdir():\n if p.match(str(checkpoint_path / 'checkpoint')):\n continue\n elif p.match(latest_checkpoint + '*'):\n continue\n else:\n p.unlink()",
"def clear_save_name():\n clear_dir(MODEL_SAVE_DIR)\n clear_dir(SUMMARY_SAVE_DIR)\n clear_dir(IMG_SAVE_DIR)",
"def _post_training_cleanup(self):\n tf.reset_default_graph()\n self.sess.close()\n os.chdir(\"../../\")",
"def checkpoint_unset():\n unwind(checkpoints.pop())",
"def ClearModels(self):\n self._modelFileNames = []\n self._models = []\n self.Modified(readAgain=True)",
"def clear_brain():\n\n if os.path.exists(os.path.abspath(\"papaya_data\")):\n shutil.rmtree(os.path.abspath(\"papaya_data\"))",
"def clean_up(self, early_stopping, current_epoch):\n\n early_stopping: EarlyStopping = early_stopping\n\n if early_stopping.enable_stopping:\n lower_limit = early_stopping.best_loss_index - 1\n else:\n lower_limit = current_epoch - self.config.model_files_stored - 1\n\n for file in listdir(self.training_model_path):\n\n try:\n epoch_of_file = int(file.split('.')[0].split('-')[-1])\n if epoch_of_file <= lower_limit:\n os.remove(self.training_model_path + file)\n except ValueError:\n pass\n except Exception as e:\n print(e)",
"def save(model_dir,\n model,\n model_name,\n global_step,\n max_to_keep=8,\n keep_latest=True):\n\n # prevent save incomplete checkpoint due to key interrupt\n with DelayedKeyboardInterrupt():\n ckpt_info_path = Path(model_dir) / \"checkpoints.json\"\n ckpt_filename = \"{}-{}.tckpt\".format(model_name, global_step)\n ckpt_path = Path(model_dir) / ckpt_filename\n if not ckpt_info_path.is_file():\n ckpt_info_dict = {'latest_ckpt': {}, 'all_ckpts': {}}\n else:\n with open(ckpt_info_path, 'r') as f:\n ckpt_info_dict = json.loads(f.read())\n ckpt_info_dict['latest_ckpt'][model_name] = ckpt_filename\n if model_name in ckpt_info_dict['all_ckpts']:\n ckpt_info_dict['all_ckpts'][model_name].append(ckpt_filename)\n else:\n ckpt_info_dict['all_ckpts'][model_name] = [ckpt_filename]\n all_ckpts = ckpt_info_dict['all_ckpts'][model_name]\n\n torch.save(model.state_dict(), ckpt_path)\n # check ckpt in all_ckpts is exist, if not, delete it from all_ckpts\n all_ckpts_checked = []\n for ckpt in all_ckpts:\n ckpt_path_uncheck = Path(model_dir) / ckpt\n if ckpt_path_uncheck.is_file():\n all_ckpts_checked.append(str(ckpt_path_uncheck))\n all_ckpts = all_ckpts_checked\n if len(all_ckpts) > max_to_keep:\n if keep_latest:\n ckpt_to_delete = all_ckpts.pop(0)\n else:\n # delete smallest step\n get_step = lambda name: int(name.split('.')[0].split('-')[1])\n min_step = min([get_step(name) for name in all_ckpts])\n ckpt_to_delete = \"{}-{}.tckpt\".format(model_name, min_step)\n all_ckpts.remove(ckpt_to_delete)\n #os.remove(str(Path(model_dir) / ckpt_to_delete))\n try:\n os.remove(ckpt_to_delete)\n except FileNotFoundError:\n print(ckpt_to_delete)\n\n all_ckpts_filename = _ordered_unique([Path(f).name for f in all_ckpts])\n ckpt_info_dict['all_ckpts'][model_name] = all_ckpts_filename\n with open(ckpt_info_path, 'w') as f:\n f.write(json.dumps(ckpt_info_dict, indent=2))",
"def __purge_old_files(self):\n\n chkpts = self.checkpointer.sorted_checkpoints()\n p_chkpts = []\n e_chkpts = []\n for c in chkpts:\n if c.startswith(self.checkpointer.prefix + CheckpointingCallback.PERIODIC_PREFIX):\n p_chkpts.append(c)\n\n if c.startswith(self.checkpointer.prefix + CheckpointingCallback.EPOCH_PREFIX):\n e_chkpts.append(c)\n\n # Delete periodic checkpoints\n if self.max_files is not None and len(p_chkpts) > self.max_files:\n for c in p_chkpts[self.max_files:]:\n log.debug(\"CheckpointingCallback deleting {}\".format(c))\n self.checkpointer.delete(c)\n\n # Delete older epochs\n if self.max_epochs is not None and len(e_chkpts) > self.max_epochs:\n for c in e_chkpts[self.max_epochs:]:\n log.debug(\"CheckpointingCallback deleting (epoch) {}\".format(c))\n self.checkpointer.delete(c)",
"def _purge_stale_checkpoints(self):\n if len(self._checkpoint_files) > self.max_checkpoints:\n purge_files = self._checkpoint_files[: -self.max_checkpoints]\n self._checkpoint_files = self._checkpoint_files[-self.max_checkpoints:]\n for chk in purge_files:\n silent_try(chk.purge_values)",
"def clear():\n\t\tModel.counter = 0",
"def reset(self):\n # Clear mutable data, but leave the immutables intact\n self.train_data = {}\n self.val_data = {}\n self.test_data = {}\n self.model_files = []\n self.custom_data = {}\n # Remove all the physical assets\n for item in os.scandir(self.root_path):\n os.remove(item.path)\n # Reserialize\n self.serialize()",
"def reset(self):\n logging.info(\"Resetting DINTModel.\")\n if self.classifier:\n self.server.remove_model(self.classifier)\n # for ds in self.server.datasets:\n # self.server.remove_dataset(ds)\n # TODO: remove datasets?\n self.classifier = None",
"def delete_model(self):\n os.remove(self.filepath)\n self.cmodel = None",
"def cleanup(self):\n if self.cleanup_allowed:\n shutil.rmtree(self.out_dir)\n self.train_df, self.valid_df, self.test_df = None, None, None",
"def clean_up_temp_files():\n global __tmp_model_dir\n\n if __tmp_model_dir is not None:\n FileUtils.deleteDirectory(__tmp_model_dir)\n __tmp_model_dir = None",
"def shutdown(self):\n del self.model\n del self.train_dataset\n del self.test_dataset",
"def clearmodels(self):\n \n dbpath, config = self._start() \n ModelDescriptionTable(dbpath).empty()\n ModelPhenotypeTable(dbpath).empty()\n ModelScoreTable(dbpath).empty() \n self._end()",
"def reset_train(self):\n\n self.model.apply(self._reset_weights)\n self.epoch_loss.reset()\n self.epoch = 0\n del self.batch_process\n self.batch_process = None",
"def deleteCheckpoint(self):\n if len(self.__stack) == 0:\n raise EmptyStackException()\n self.__stack = self.__stack[:-1]",
"def reset(self):\n checkpoint = torch.load(\n 'model_lr_finder.pth.tar',\n map_location=self.device)\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.model.to(self.device)\n self.model.train()",
"def reset(self):\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear() # layer: index\n self._substrate = None\n self._experiments.clear() # analyzed experiments\n self._tmpstandards.clear()",
"def remove_old_ckpts_dir(model_dir, reverse=False):\n ckpts = os.listdir(join(model_dir, 'ckpt'))\n score_list = [float(ckpt.split('-')[-1]) for ckpt in ckpts]\n ckpts_score_sorted = sorted(zip(score_list, ckpts), key=lambda p: p[0], reverse=reverse)\n _, ckpts_sorted = zip(*ckpts_score_sorted)\n for ckpt in ckpts_sorted[3:]:\n shutil.rmtree(join(model_dir, 'ckpt', ckpt))\n #os.remove(join(model_dir, 'ckpt', ckpt))\n logging.info(\"Best model: {}\".format(join(model_dir, 'ckpt', ckpts_sorted[0])))\n #print(\"Best model: {}\".format(join(model_dir, 'ckpt', ckpts_sorted[0])))",
"def reset(self):\n # from pathlib import Path\n # import pickle as pkl\n # path_traj = Path.home() / 'TmrlData' / 'reward' / 'traj.pkl'\n # with open(path_traj, 'wb') as file_traj:\n # pkl.dump(self.traj, file_traj)\n\n self.cur_idx = 0\n self.step_counter = 0\n self.failure_counter = 0\n\n # self.traj = []",
"def cleanUp(self):\r\n # Close any open models\r\n openModels = getAllModels()\r\n if len(openModels):\r\n for model in openModels:\r\n setCurrentModel(model)\r\n performAction(\"FileClose\")\r\n # Wait \r\n time.sleep(1)"
] | [
"0.7887548",
"0.78405684",
"0.70502526",
"0.7037212",
"0.69053566",
"0.6885986",
"0.64904153",
"0.6437186",
"0.64033055",
"0.6368597",
"0.6348156",
"0.63442576",
"0.6341169",
"0.6286433",
"0.6260607",
"0.62325686",
"0.6117156",
"0.6105783",
"0.60879576",
"0.6058965",
"0.6050128",
"0.60275936",
"0.6019192",
"0.6002371",
"0.59942746",
"0.5993123",
"0.59901416",
"0.5969217",
"0.59414524",
"0.59260964"
] | 0.8535333 | 0 |
Rebuilds the surfaces based on the original positions and alpha value. This can be used to reset the states of buttons after returning to a Menu a second time. | def reset(self):
self.x = self.x_original
self.alpha = self.alpha_original
# Button "background" - active
self.active_background_surface.set_alpha(self.alpha)
# Button "background" - inactive
self.inactive_background_surface.set_alpha(self.alpha)
# active
self.active_text_surface = self.active_font.render(self.text, True, self.color_text)
self.active_textRect = self.active_text_surface.get_rect()
# inactive
self.inactive_text_surface = self.inactive_font.render(self.text, True, self.color_text)
self.inactive_textRect = self.inactive_text_surface.get_rect()
if self.text_alignment == 'CENTER':
self.active_textRect.center = ((self.x + (self.rect.w / 2)), (self.y + (self.rect.h / 2)))
self.inactive_textRect.center = ((self.x + (self.rect.w / 2)), (self.y + (self.rect.h / 2)))
elif self.text_alignment == 'RIGHT':
self.active_textRect.centery = self.y + (self.rect.h / 2)
self.active_textRect.right = self.x + self.w - 15 # padding of 15
self.inactive_textRect.centery = self.y + (self.rect.h / 2)
self.inactive_textRect.right = self.x + self.w - 15 # padding of 15
else: # LEFT (or invalid)
self.active_textRect.centery = self.y + (self.rect.h / 2)
self.active_textRect.left = self.x + 15 # padding of 15
self.inactive_textRect.centery = self.y + (self.rect.h / 2)
self.inactive_textRect.left = self.x + 15 # padding of 15 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reindex_graphics(self):\n for obj in self.context.static_objects:\n self.canvas.children.remove(obj.widget.canvas)\n # fill _objects_z_index\n _objects_z_index = {}\n for obj in self.context.static_objects:\n y = obj.widget.pos[1]\n if not y in _objects_z_index:\n _objects_z_index[y] = []\n _objects_z_index[y].append(obj)\n _keys = _objects_z_index.keys()\n _keys.sort()\n _keys.reverse()\n for k in _keys:\n objs = _objects_z_index[k]\n for obj in objs:\n self.canvas.add(obj.widget.canvas)",
"def resetOpacity(self):\n opa = (0,)\n for i in range(1,256):\n opa += (i,)\n if self._displayPjt:\n self._displayPjt.setOpacityPalette(opa)\n if self._displayUsr:\n self._displayUsr.setOpacityPalette(opa)\n if self._displayVtk:\n self._displayVtk.setOpacityPalette(opa)",
"def reset(self):\r\n self._p = self._p_init\r\n self._r = self._r_init\r\n self._v = self._v_init\r\n self._w = self._w_init\r\n self._a = self._a_init\r\n self._alpha = self._alpha_init",
"def __init__(self, surface, position):\n\n self.state = 'main' # stores the state of the menu, this is used to choose which menu to render\n self.surface = surface # surface to draw all menu object on to\n self.rect = self.surface.get_rect() # dimensions of the surface\n self.resolution = surface.get_size() # tuple of width and height of surface\n self.rect.center = position # place the surface onto the specified location\n self.center = (self.resolution[0]/2, self.resolution[1]/2) # center for object placement reference\n self.bg_colour = (0, 0, 0) # background colour for the menu surface\n # fonts for use in menus\n self.fonts = {\n # font name, size(pt), bold, italic\n 'regular': pygame.font.SysFont('Courier New', 15, False, False),\n 'heading': pygame.font.SysFont('Courier New', 18, True, False)\n }\n self.text_colour = (240, 240, 240) # colour for all text in menus\n\n # Main menu text |\n self.tx_main_heading = create_text(\n 'GAME!!', self.fonts['heading'], self.text_colour,\n (self.resolution[0]/2, 30)\n )\n self.tx_play = create_text(\n '[ENTER] - PLAY', self.fonts['regular'], self.text_colour,\n self.center)\n self.tx_instructions = create_text(\n '[I] - INSTRUCTIONS', self.fonts['regular'], self.text_colour,\n (self.center[0], self.center[1]+20)\n )\n self.main_menu_text = [\n self.tx_main_heading,\n self.tx_play,\n self.tx_instructions\n ]\n\n # instructions text |\n self.tx_instruct_heading = create_text(\n 'INSTRUCTIONS', self.fonts['heading'], self.text_colour,\n (self.resolution[0]/2, 30)\n )\n self.tx_instruct_exit = create_text(\n '[ESC] - BACK', self.fonts['regular'], self.text_colour,\n (self.center[0], self.resolution[1]-30)\n )\n self.instruction_menu_text = [\n self.tx_instruct_heading,\n self.tx_instruct_exit\n ]\n\n # pause menu text |\n self.tx_paused_heading = create_text(\n 'PAUSED', self.fonts['heading'], self.text_colour,\n (self.center[0], 30)\n )\n self.tx_paused_resume = create_text(\n '[ESC] - RESUME', self.fonts['regular'], self.text_colour,\n (self.center[0], 60)\n )\n self.tx_paused_exit = create_text(\n '[M] - EXIT TO MENU', self.fonts['regular'], self.text_colour,\n (self.center[0], self.resolution[1]-30)\n )\n self.paused_menu_text = [\n self.tx_paused_heading,\n self.tx_paused_resume,\n self.tx_paused_exit\n ]",
"def init_settings_menu(self):\n self.settings_menu_surface = pygame.Surface(self.screen_dimensions)\n self.main_menu_surface = None\n self.pause_menu_surface = None\n self.buttons = []\n self.settings_buttons = []\n style_button = TextButton((0, 450), get_style_name(self.game_data.get_style()), menu_fonts, 30, white, red,\n self.switch_style)\n style_button.center_horizontally(self.screen_dimensions)\n self.settings_buttons.append(style_button)\n self.buttons.append(style_button)\n x_center = center_horizontally(pygame.Surface((0, 0)), self.screen_dimensions)\n music_button = ImageButton((x_center - 50, 510), self.get_music_button_img(), self.get_music_button_img_h(),\n self.toggle_music)\n self.settings_buttons.append(music_button)\n self.buttons.append(music_button)\n sound_button = ImageButton((x_center + 20, 510), self.get_sound_button_img(), self.get_sound_button_img_h(),\n self.toggle_sound)\n self.settings_buttons.append(sound_button)\n self.buttons.append(sound_button)\n main_menu_button = TextButton((0, 550), \"Back to main menu\", menu_fonts, 30, white, red,\n lambda: pygame.event.post(pygame.event.Event(events.BACK_TO_MAIN_MENU, {})))\n main_menu_button.center_horizontally(self.screen_dimensions)\n self.buttons.append(main_menu_button)",
"def render_fading(self):\n self.alpha = self.alpha - self.fading_steps\n self.inactive_background_surface.set_alpha(self.alpha)\n if self.sliding_disappearance:\n self.x -= self.sliding_steps\n self.active_textRect.x -= self.sliding_steps\n\n # Rendering button \"background\"\n self.screen.blit(self.inactive_background_surface, (self.x, self.y)) # (0,0) are the top-left coordinates\n if self.alpha > self.alpha_border: # Render button text until its alpha value is reduced by x\n self.screen.blit(self.active_text_surface, self.active_textRect)",
"def reset_alpha(self, alpha):\n self.alpha, old_alpha = alpha, self.alpha\n priorities = [self.tree.get_val(i)**-old_alpha for i in range(self.tree.filled_size())]\n self.priority_update(range(self.tree.filled_size()), priorities)",
"def init_main_menu_surface(self):\n if self.level == 0:\n self.level = self.max_level = self.game_data.get_max_level()\n self.main_menu_surface = pygame.Surface(self.screen_dimensions)\n self.pause_menu_surface = None\n self.settings_menu_surface = None\n self.main_menu_surface.fill(black)\n self.buttons = []\n self.level_buttons = []\n level_button = TextButton((0, 450), \"Level \" + str(self.level), menu_fonts, 30, white, white, lambda: None)\n level_button.center_horizontally(self.screen_dimensions)\n self.level_buttons.append(level_button)\n self.buttons.append(level_button)\n level_down_button = ImageButton(left_of(img_arrow_left, level_button, 20),\n img_arrow_left, img_arrow_left_hover, self.level_down, False)\n self.level_buttons.append(level_down_button)\n self.buttons.append(level_down_button)\n level_up_button = ImageButton(right_of(img_arrow_right, level_button, 20),\n img_arrow_right, img_arrow_right_hover, self.level_up, False)\n self.level_buttons.append(level_up_button)\n self.update_level_buttons()\n self.buttons.append(level_up_button)\n # the line of code below creates a MenuButton that contains white text that gets red when hovered,\n # and sends an event to start game mode 0 for the control unit if clicked.\n start_button = TextButton((0, 500), \"Start game\", menu_fonts, 30, white, red,\n self.start_game_mode_0)\n start_button.center_horizontally(self.screen_dimensions)\n self.buttons.append(start_button)\n settings_button = TextButton((0, 550), \"Settings\", menu_fonts, 30, white, red,\n lambda: pygame.event.post(pygame.event.Event(events.OPEN_SETTINGS, {})))\n settings_button.center_horizontally(self.screen_dimensions)\n self.buttons.append(settings_button)\n how_to_button = TextButton((0, 600), \"How to play\", menu_fonts, 30, white, red,\n lambda: pygame.event.post(pygame.event.Event(events.OPEN_HOW_TO, {})))\n how_to_button.center_horizontally(self.screen_dimensions)\n self.buttons.append(how_to_button)\n quit_button = self.create_quit_button(650)\n self.buttons.append(quit_button)",
"def reset_alpha(self, alpha):\n self.alpha, old_alpha = alpha, self.alpha\n priorities = [self.tree.get_val(i) ** -old_alpha for i in range(self.tree.filled_size())]\n self.priority_update(range(self.tree.filled_size()), priorities)",
"def reset_alpha(self, alpha):\n self.alpha, old_alpha = alpha, self.alpha\n priorities = [self.tree.get_val(i) ** -old_alpha for i in range(self.tree.filled_size())]\n self.priority_update(range(self.tree.filled_size()), priorities)",
"def reset_alpha(self, alpha):\n self.alpha, old_alpha = alpha, self.alpha\n priorities = [(self.tree.get_val(i) + self.__e) ** -\n old_alpha for i in range(self.tree.filled_size())]\n self.priority_update(range(self.tree.filled_size()), priorities)",
"def create_reference_array(self):\r\n self.active = True\r\n self.pxarray = pygame.surfarray.pixels3d(self.surface)\r\n pygame.display.flip()\r\n return",
"def setup_menu(self, surface, background_colour=BLACK):\n # flood fill the surface with the background colour\n surface.fill(background_colour)\n\n # set up the fixed items on the menu\n # Add buttons and labels\n menu_config = [\n (\"Speed\", 6, 6, BLUE, WHITE), #, 62, 100, WHITE),\n (\"Maze\", 122, 6, BLUE, WHITE), #, 62, 100, WHITE),\n (\"Rainbow\", 6, 70, BLUE, WHITE), #, 62, 100, WHITE),\n (\"Golf\", 122, 70, BLUE, WHITE), #, 62, 100, WHITE),\n (\"Pi Noon\", 6, 134, BLUE, WHITE), #, 62, 100, WHITE),\n (\"Obstacle\", 122, 134, BLUE, WHITE), #, 62, 100, WHITE),\n (\"Shooting\", 6, 198, BLUE, WHITE), #, 62, 100, WHITE),\n (\"RC\", 122, 198, BLUE, WHITE), #, 62, 100, WHITE),\n (\"Exit\", 6, 262, BLUE, WHITE), #, 40, 210, WHITE),\n (\"Calibrate\", 122, 262, BLUE, WHITE),\n ]\n\n # perform list comprehension on menu_config, wherein we call\n # make_button with the index, and individual item arguments\n # note *item performs unpacking of the tuple and provides them\n # as individual arguments to make_button\n return [\n self.make_button(index, *item)\n for index, item\n in enumerate(menu_config)\n ]",
"def mainmenu_background():\n surface.fill((40, 0, 40))",
"def draw(self):\n self.screen.fill((0,51,102))\n # get the new drawables\n self.drawables = (self.game_model.get_background_drawables()\n + self.game_model.get_plane_drawables()\n + self.game_model.get_bullet_drawables()\n + self.game_model.get_enemy_drawables())\n for d in self.drawables:\n rect = d.get_rect()\n surf = d.get_surface()\n surf.set_colorkey((255,255,255))\n self.screen.blit(surf, rect)",
"def setOpenGLState(self):\n # Enable transparency.\n pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA,\n pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)\n pyglet.gl.glEnable(pyglet.gl.GL_BLEND)",
"def init_pause_menu(self):\n self.pause_menu_surface = pygame.Surface(self.screen_dimensions)\n self.pause_menu_surface.set_alpha(240)\n self.pause_menu_surface.fill(black)\n self.main_menu_surface = None\n self.settings_menu_surface = None\n self.buttons = []\n continue_button = TextButton((0, 450), \"Continue\", menu_fonts, 30, white, red,\n lambda: pygame.event.post(pygame.event.Event(events.EXIT_PAUSE, {})))\n continue_button.center_horizontally(self.screen_dimensions)\n self.buttons.append(continue_button)\n main_menu_button = TextButton((0, 500), \"Back to main menu\", menu_fonts, 30, white, red,\n lambda: pygame.event.post(pygame.event.Event(events.BACK_TO_MAIN_MENU, {})))\n main_menu_button.center_horizontally(self.screen_dimensions)\n self.buttons.append(main_menu_button)\n quit_button = self.create_quit_button(550)\n self.buttons.append(quit_button)",
"def draw_rand_toggle():\n # Random Mode\n rand_mode = pygame.Rect(700, 85, 15, 15) # creates a rectangle object\n rand_txt = BTN_FONT.render(RND_TOG, True, BLACK) # render font\n\n pygame.draw.rect(RENDER_WINDOW, TEXTCOLOR, rand_mode) #draw random mode check box\n RENDER_WINDOW.blit(rand_txt, (rand_mode.x, rand_mode.y - 4)) # render text centered on button\n\n # Random Label\n rand_label = pygame.Rect(600, 85, 100, 20) # creates a rectangle object\n rlab_txt = DIR_FONT.render(\"Random Mode\", True, TEXTCOLOR) # render font\n\n pygame.draw.rect(RENDER_WINDOW, BACKGROUNDCOLOR, rand_label) #draw label\n RENDER_WINDOW.blit(rlab_txt, (rand_label.x, rand_label.y)) # render text centered on button\n\n # Normal Mode\n norm_mode = pygame.Rect(700, 125, 15, 15) # creates a rectangle object\n norm_txt = BTN_FONT.render(N_MODE, True, BLACK) # render font\n\n pygame.draw.rect(RENDER_WINDOW, TEXTCOLOR, norm_mode) #draw normal mode check box\n RENDER_WINDOW.blit(norm_txt, (norm_mode.x, norm_mode.y - 4)) # render text centered on button\n\n # Normal Label\n norm_label = pygame.Rect(600, 125, 100, 20) # creates a rectangle object\n nlab_txt = DIR_FONT.render(\"Normal Mode\", True, TEXTCOLOR) # render font\n\n pygame.draw.rect(RENDER_WINDOW, BACKGROUNDCOLOR, norm_label) #draw K value directions\n RENDER_WINDOW.blit(nlab_txt, (norm_label.x, norm_label.y)) # render text centered on button\n\n global R_BTN, N_BTN\n R_BTN = rand_mode\n N_BTN = norm_mode",
"def _draw(self):\r\n \r\n if self.active:\r\n self.surface = self.activeSurface # Set active surface to be displayed.\r\n else:\r\n self.surface = self.passiveSurface # Set passive surface to be displayed.\r",
"def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True",
"def draw_alpha(self):\n if self.alpha == 255:\n self.current_sprite_alpha = self.current_sprite\n else:\n mask = pygame.Surface(self.current_sprite.get_size(), flags=pygame.SRCALPHA)\n mask.fill((255, 255, 255, self.alpha))\n self.current_sprite_alpha = self.current_sprite.copy()\n self.current_sprite_alpha.blit(mask, (0, 0), special_flags=pygame.BLEND_RGBA_MULT)",
"def invalidate(self):\n self.original_image.fill(BLUE) # transparent\n\n # check crashed\n\n\n #-------------------------------------------------------------------------------------------\n # blit the wheels\n # iterate over wheels\n for i, wheels in enumerate(self.wheels):\n for wheel, cx, cy in wheels:\n\n cx, cy = self.cc2sc(cx, cy)\n\n if (i == 0): # front\n wheel = pygame.transform.rotate(wheel, self.steering)\n\n elif (i == 1): # back\n pass\n\n rect = wheel.get_rect()\n rect.center = (cx, cy)\n x = cx - rect.w * 0.5\n y = cy - rect.h * 0.5\n\n self.original_image.blit(wheel, (x, y))\n\n #-------------------------------------------------------------------------------------------\n # draw body\n if (self.crashed):\n self.color = RED\n pygame.draw.rect(self.original_image, self.color, self.car_rect, border_radius=5)\n\n # draw front window\n fw_points = [self.cc2sc(x, y) for x, y in self.front_window]\n bw_points = [self.cc2sc(x, y) for x, y in self.back_window]\n pygame.draw.polygon(self.original_image, (100, 100, 100), fw_points)\n pygame.draw.polygon(self.original_image, (100, 100, 100), bw_points)\n\n # draw lights\n pygame.draw.line(self.original_image, self.taillights_color,\n self.cc2sc(*self.taillights[0]), self.cc2sc(*self.taillights[1]))\n\n for pos in self.headlights:\n pygame.draw.line(self.original_image, self.headlights_color,\n self.cc2sc(*pos[0]), self.cc2sc(*pos[1]), width=4)\n\n # rotate the surface\n old_center = self.rect.center\n self.image = pygame.transform.rotate(self.original_image, 180. / pi * self.phi)\n self.image.set_colorkey(BLUE)\n self.rect = self.image.get_rect()\n self.rect.center = old_center\n\n # transform coordinates to pygame coords\n self.rect.center = cart2pg(array([self.x, self.y]) * 1000 * self.scale, self.world_h)\n\n #-------------------------------------------------------------------------------------------",
"def prepare_to_visualize(self):\n self.system.hold_structure_changes()\n for surface in self.inactive_surfaces:\n surface.activate_constraint()\n self.system.resume_structure_changes()",
"def initialize_shade(self,shade_name,shade_color,alpha):\n\n self.shades[shade_name] = [0, pygame.Surface(self.image.get_size())]\n self.shades[shade_name][1].fill(shade_color)\n self.shades[shade_name][1].set_alpha(alpha)",
"def update(self):\r\n if self.opportunity or 'key' in inventory:\r\n self.image = pygame.transform.scale(pygame.image.load_extended(\"images/greenPortal.png\").convert_alpha(),\r\n (50, 75))\r\n self.image.set_colorkey((255, 255, 255))\r\n elif not self.opportunity:\r\n self.image = pygame.transform.scale(pygame.image.load_extended(\"images/redPortal.png\").convert_alpha(),\r\n (50, 75))\r\n self.image.set_colorkey((255, 255, 255))",
"def reset(self):\n self.posXY = (0,0)\n self.magXY = (1.0,1.0)\n self.rot = 0.0\n self.trans = 255\n self.isDone = False\n self.isFirst = True\n\n self.kill()\n self.Group = pyglet.graphics.OrderedGroup(self.order)\n self.isReady = self.check()",
"def update(self):\n if self.state['enabled']:\n if not self.state['blue'] and not self.state['return']:\n self.update_normal()\n elif self.state['blue']:\n self.update_blue()\n elif self.state['return']:\n self.update_return()\n self.last_position = (self.rect.centerx, self.rect.centery)",
"def updateShaderState(self):\n\n dopts = self.opts\n copts = self.canvas.opts\n lightPos = None\n flatColour = dopts.getConstantColour()\n useNegCmap = (not dopts.useLut) and dopts.useNegativeCmap\n\n if self.threedee:\n lightPos = np.array(copts.lightPos)\n lightPos *= (copts.zoom / 100.0)\n else:\n lightPos = None\n\n if dopts.useLut:\n delta = 1.0 / (dopts.lut.max() + 1)\n cmapXform = transform.scaleOffsetXform(delta, 0.5 * delta)\n else:\n cmapXform = self.cmapTexture.getCoordinateTransform()\n\n fslgl.glmesh_funcs.updateShaderState(\n self,\n useNegCmap=useNegCmap,\n cmapXform=cmapXform,\n flatColour=flatColour,\n lightPos=lightPos)",
"def transition_in(self, *args):\r\n self.transition_surface.set_alpha(self.alpha)\r\n self.alpha -= c.TRANSITION_SPEED\r\n if self.alpha <= 0:\r\n self.alpha = 0\r\n self.state = c.NORMAL",
"def __init__(self, rect: pygame.Rect, function: Callable, overlay: Union[pygame.Surface, str] = None, button_default_color: Tuple[int, int, int] = (100, 100, 100), button_highlighted_color: Tuple[int, int, int] = (115, 115, 115), button_pressed_color: Tuple[int, int, int] = (90, 90, 90), button_disabled_color: Tuple[int, int, int] = (75, 75, 75), lerp_duration: int = 6, enabled: bool = True):\n self.rect = rect\n self.function = function\n self.overlay = overlay if not isinstance(overlay, str) else ButtonFont.render(overlay, True, KDS.Colors.White)\n self.button_default_color = button_default_color\n self.button_highlighted_color = button_highlighted_color\n self.button_pressed_color = button_pressed_color\n self.button_disabled_color = button_disabled_color\n self.button_old_color = button_default_color if enabled else button_disabled_color\n self.button_color_fade = KDS.Animator.Value(0.0, 1.0, lerp_duration, KDS.Animator.AnimationType.Linear, KDS.Animator.OnAnimationEnd.Loop)\n self.enabled = enabled\n\n \"\"\"Updates and draws the button onto a surface.\n\n Args:\n surface (Surface): The surface the button will be drawn onto.\n mouse_pos (Tuple[int, int]): The SCALED position of the mouse.\n clicked (bool): Determines if the button's function should be executed.\n \"\"\""
] | [
"0.6098649",
"0.5991369",
"0.5905092",
"0.5815281",
"0.5792299",
"0.57288677",
"0.5722896",
"0.5714045",
"0.5713487",
"0.5713487",
"0.57038414",
"0.5668134",
"0.56377494",
"0.5615933",
"0.5568823",
"0.55665016",
"0.5559408",
"0.55072635",
"0.54790586",
"0.54756534",
"0.547195",
"0.5452503",
"0.54497975",
"0.5444322",
"0.54406685",
"0.54345316",
"0.54298913",
"0.5422128",
"0.5407191",
"0.53974473"
] | 0.6623888 | 0 |
Rendering the inactive button onto the screen surface. | def render_inactive(self):
# Rendering button "background"
self.screen.blit(self.inactive_background_surface, (self.x, self.y))
# Rendering button text
self.screen.blit(self.active_text_surface, self.active_textRect) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def render_active(self):\n # Rendering button \"background\"\n if self.resize_right:\n self.active_background_surface = pygame.Surface((self.w * 1.05, self.h))\n else:\n self.active_background_surface = pygame.Surface((self.w, self.h))\n self.active_background_surface.set_alpha(self.alpha)\n self.active_background_surface.fill(self.color_bg_active)\n self.screen.blit(self.active_background_surface, (self.x, self.y)) # (0,0) are the top-left coordinates\n # Rendering button text\n self.screen.blit(self.inactive_text_surface, self.inactive_textRect)",
"def draw_button(self):\n self.screen.fill(self.button_color, self.rect)\n self.screen.blit(self.msg_img, self.msg_img_rect)",
"def draw_button(self):\r\n self.surface.fill(self.button_color, self.rect)\r\n self.surface.blit(self.msg_image, self.msg_image_rect)",
"def draw_button(self):\n # Draw the button's outline\n pg.draw.rect(self.screen, self.text_color, pg.Rect(self.rect.left - 1, self.rect.top - 1, self.rect.width + 2, self.rect.height + 2))\n\n # Draw the button\n pg.draw.rect(self.screen, self.button_color, self.rect)\n\n # Blit the button's text onto it\n self.screen.blit(self.txt_surface, self.txt_surface_rect)",
"def render_fading(self):\n self.alpha = self.alpha - self.fading_steps\n self.inactive_background_surface.set_alpha(self.alpha)\n if self.sliding_disappearance:\n self.x -= self.sliding_steps\n self.active_textRect.x -= self.sliding_steps\n\n # Rendering button \"background\"\n self.screen.blit(self.inactive_background_surface, (self.x, self.y)) # (0,0) are the top-left coordinates\n if self.alpha > self.alpha_border: # Render button text until its alpha value is reduced by x\n self.screen.blit(self.active_text_surface, self.active_textRect)",
"def draw(self):\n # static\n surf = self.surf.copy()\n\n # dynamic\n pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*130), 40)\n self.button_rect = self.button_surf.get_rect(center=pos)\n surf.blit(self.button_surf, self.button_rect)\n # move of button box to correct screen position\n self.button_rect.move_ip(self.xpos, self.ypos)\n\n # screen\n screen.blit(surf, (self.xpos, self.ypos))",
"def draw(self, screen):\n if self.state == self.S_ACTIVE:\n screen.blit(self.image, self.rect)",
"def draw(self, screen: pygame.Surface) -> None:\n page = self.pages[self.current_page]\n # Draw background\n screen.blit(page.background, (0, 0))\n # Draw buttons to screen\n for button in page.buttons:\n if button.image is not None:\n screen.blit(button.image, button.rect)\n screen.blit(button.text, button.rect)\n # Draw highlights if mouse is hovering over button\n if button.tag not in ('display', 'output') and \\\n button.rect.collidepoint(self.mouse_pos):\n surf = create_trans_surf(button.rect.width, button.rect.height, 50, (100, 255, 100))\n screen.blit(surf, button.rect)",
"def drawButtons(self):\n self.__pausedTitle.draw(self.__screen)\n self.__exitGameButton.draw(self.__screen)\n self.__resumeButton.draw(self.__screen)\n self.__mainMenuButton.draw(self.__screen)",
"def testDrawDoesNotCrash(self):\n my_button = buttonsprite.ButtonSprite()\n my_button.rect.size = (64, 24)\n my_button._createImage()\n my_button.setMode('inactive')\n my_button._draw()\n my_button.setMode('highlighted')\n my_button._draw()\n my_button.setMode('pressed')\n my_button._draw()\n my_button.setMode('normal')\n my_button._draw()",
"def sprint(self):\n self.buttons = []\n self.screen.blit(self.background_image, (0, 0))\n self.create_button((self.width // 2 - 257, self.height // 8 - 85), 501, 200, Colors.BLACK, \"20L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 3 - 81), 501, 200, Colors.BLACK, \"40L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 5 - 86), 501, 200, Colors.BLACK, \"100L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 7 - 85), 501, 200, Colors.BLACK, \"1000L\")\n self.show_buttons()\n self.show_text_in_buttons()\n pygame.display.flip()",
"def draw(self, p):\r\n self.active = True\r\n surface = pygame.surfarray.make_surface(p)\r\n self.screen.blit(surface, (0, 0))\r\n pygame.display.flip()\r\n return",
"def draw(screen):\n MY.restart_button.draw(screen)\n MY.display_text.draw(screen)",
"def draw(self):\n self.game.screen.blit(self.image, self.game.off(self.pos))",
"def draw_sound_button(self):\n if self.settings.sound_on:\n self.screen.blit(self.image_sound_on, self.rect)\n else:\n self.screen.blit(self.image_sound_off, self.rect)",
"def show(self):\n screen_copy = screen.copy()\n if background_chanel.get_busy():\n self.music_btn.image = self.music_on_image\n else:\n self.music_btn.image = self.music_off_image\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n return None\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == pygame.BUTTON_LEFT:\n pos = event.pos\n pos = (pos[0] - self.rect.x, pos[1] - self.rect.y)\n if self.play_btn.rect.collidepoint(pos):\n return None\n elif self.menu_btn.rect.collidepoint(pos):\n return MAIN_MENU\n elif self.music_btn.rect.collidepoint(pos):\n if background_chanel.get_busy():\n self.music_btn.image = self.music_off_image\n background_chanel.stop()\n else:\n self.music_btn.image = self.music_on_image\n background_chanel.play(background_game_play_music, loops=-1)\n elif self.restart_level_btn.rect.collidepoint(pos):\n return RESTART_LEVEL\n pause_group.draw(self.surface)\n screen_copy.blit(self.surface, self.rect.topleft)\n screen.blit(screen_copy, (0, 0))\n if pygame.mouse.get_focused():\n cursor.show(screen)\n pygame.display.flip()",
"def blit_me(self):\n self.start_button.blit_me()\n self.title.blit_me()\n self.screen.blit(self.unicorn_img, self.rect)",
"def draw_buttons(self):\n for button in self.playing_buttons:\n button.draw(self.screen)",
"def blank_screen_and_hide_control_buttons(self):\n event_logger.debug(\"Blanking display\")\n rpi_utils.toggle_screen_state(\"off\")\n self.hide_control_buttons()",
"def DrawButton(self, dc, wnd, in_rect, button, orientation):\r\n\r\n bitmap_id, button_state = button.id, button.cur_state\r\n \r\n if bitmap_id == AUI_BUTTON_CLOSE:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_close_bmp\r\n elif button_state & AUI_BUTTON_STATE_HOVER:\r\n bmp = self._hover_close_bmp\r\n elif button_state & AUI_BUTTON_STATE_PRESSED:\r\n bmp = self._pressed_close_bmp\r\n else:\r\n bmp = self._active_close_bmp\r\n\r\n elif bitmap_id == AUI_BUTTON_LEFT:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_left_bmp\r\n else:\r\n bmp = self._active_left_bmp\r\n\r\n elif bitmap_id == AUI_BUTTON_RIGHT:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_right_bmp\r\n else:\r\n bmp = self._active_right_bmp\r\n\r\n elif bitmap_id == AUI_BUTTON_WINDOWLIST:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_windowlist_bmp\r\n else:\r\n bmp = self._active_windowlist_bmp\r\n\r\n else:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = button.dis_bitmap\r\n else:\r\n bmp = button.bitmap\r\n \r\n if not bmp.IsOk():\r\n return\r\n\r\n rect = wx.Rect(*in_rect)\r\n\r\n if orientation == wx.LEFT:\r\n \r\n rect.SetX(in_rect.x)\r\n rect.SetY(((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2))\r\n rect.SetWidth(bmp.GetWidth())\r\n rect.SetHeight(bmp.GetHeight())\r\n \r\n else:\r\n \r\n rect = wx.Rect(in_rect.x + in_rect.width - bmp.GetWidth(),\r\n ((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2),\r\n bmp.GetWidth(), bmp.GetHeight())\r\n \r\n rect = IndentPressedBitmap(rect, button_state)\r\n dc.DrawBitmap(bmp, rect.x, rect.y, True)\r\n\r\n out_rect = rect\r\n\r\n if bitmap_id == AUI_BUTTON_RIGHT:\r\n self._buttonRect = wx.Rect(rect.x, rect.y, 30, rect.height)\r\n \r\n return out_rect",
"def draw(self, screen):",
"def draw(self):\n\n self.state_stack.peek().draw(self.screen)",
"def show_buttons(self):\n for button in self.buttons:\n x = button.starting_x\n y = button.starting_y\n self.screen.fill(button.color, ((x, y), (button.width, button.height)))",
"def display_screen(self):\n self.screen.blit(self.bg, (0, 0))\n pygame.display.update()",
"def _inactive(self):\n self._click()\n if self._last is None and self._touch is not None:\n self._state = STATE_COUNTDOWN\n self._game = Gameplay()\n self._last = self._touch",
"def DrawButton(self, dc, wnd, in_rect, button, orientation):\r\n\r\n bitmap_id, button_state = button.id, button.cur_state\r\n \r\n if bitmap_id == AUI_BUTTON_CLOSE:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_close_bmp\r\n else:\r\n bmp = self._active_close_bmp\r\n\r\n elif bitmap_id == AUI_BUTTON_LEFT:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_left_bmp\r\n else:\r\n bmp = self._active_left_bmp\r\n\r\n elif bitmap_id == AUI_BUTTON_RIGHT:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_right_bmp\r\n else:\r\n bmp = self._active_right_bmp\r\n\r\n elif bitmap_id == AUI_BUTTON_WINDOWLIST:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = self._disabled_windowlist_bmp\r\n else:\r\n bmp = self._active_windowlist_bmp\r\n\r\n else:\r\n if button_state & AUI_BUTTON_STATE_DISABLED:\r\n bmp = button.dis_bitmap\r\n else:\r\n bmp = button.bitmap\r\n \r\n if not bmp.IsOk():\r\n return\r\n\r\n rect = wx.Rect(*in_rect)\r\n\r\n if orientation == wx.LEFT:\r\n \r\n rect.SetX(in_rect.x)\r\n rect.SetY(((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2))\r\n rect.SetWidth(bmp.GetWidth())\r\n rect.SetHeight(bmp.GetHeight())\r\n \r\n else:\r\n \r\n rect = wx.Rect(in_rect.x + in_rect.width - bmp.GetWidth(),\r\n ((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2),\r\n bmp.GetWidth(), bmp.GetHeight())\r\n\r\n self.DrawButtons(dc, rect, bmp, wx.WHITE, button_state)\r\n\r\n out_rect = wx.Rect(*rect)\r\n return out_rect",
"def render_screen(self):\n pygame.display.update(self.screen_rect)\n return",
"def basic_render(self, surface) -> None:\n if not self.visible:\n return\n l, t = self.pos\n r, b = self.get_anchor_pos(Anchor.bottom_right)\n tpos = self.get_anchor_pos(Anchor.middle)\n backcolor = (128, 128, 128)\n forecolor = {False: (255, 255, 192), True: (255, 0, 0)}\n pts = ((l, t), (r, t), (r, b), (l, b))\n pygame.draw.polygon(surface, backcolor, pts, 0)\n pygame.draw.polygon(surface, forecolor[self.hover], pts, 1)\n BitmapFont.set_colors(BitmapFont.medium, backcolor, forecolor[self.hover])\n BitmapFont.render(surface, str(self.label), BitmapFont.medium, tpos, Anchor.middle)",
"def draw(self, surface, offset=(0,0)):\n for button in self.buttons:\n button.draw(surface, offset)",
"def main_background(self):\n self.screen.blit(self.background, (0, 0))"
] | [
"0.824468",
"0.727387",
"0.7242291",
"0.69929683",
"0.69051856",
"0.6741595",
"0.6675783",
"0.6625641",
"0.660019",
"0.6552847",
"0.65396786",
"0.6446757",
"0.6421722",
"0.64049184",
"0.63985157",
"0.6314176",
"0.6296651",
"0.6284545",
"0.62568057",
"0.6159919",
"0.6135134",
"0.61311245",
"0.61271834",
"0.6107575",
"0.61060554",
"0.60982555",
"0.609529",
"0.60861015",
"0.6074039",
"0.6068378"
] | 0.90140796 | 0 |
Rendering the active button onto the screen surface. | def render_active(self):
# Rendering button "background"
if self.resize_right:
self.active_background_surface = pygame.Surface((self.w * 1.05, self.h))
else:
self.active_background_surface = pygame.Surface((self.w, self.h))
self.active_background_surface.set_alpha(self.alpha)
self.active_background_surface.fill(self.color_bg_active)
self.screen.blit(self.active_background_surface, (self.x, self.y)) # (0,0) are the top-left coordinates
# Rendering button text
self.screen.blit(self.inactive_text_surface, self.inactive_textRect) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def render_inactive(self):\n # Rendering button \"background\"\n self.screen.blit(self.inactive_background_surface, (self.x, self.y))\n # Rendering button text\n self.screen.blit(self.active_text_surface, self.active_textRect)",
"def draw_button(self):\r\n self.surface.fill(self.button_color, self.rect)\r\n self.surface.blit(self.msg_image, self.msg_image_rect)",
"def draw_button(self):\n self.screen.fill(self.button_color, self.rect)\n self.screen.blit(self.msg_img, self.msg_img_rect)",
"def draw_button(self):\n # Draw the button's outline\n pg.draw.rect(self.screen, self.text_color, pg.Rect(self.rect.left - 1, self.rect.top - 1, self.rect.width + 2, self.rect.height + 2))\n\n # Draw the button\n pg.draw.rect(self.screen, self.button_color, self.rect)\n\n # Blit the button's text onto it\n self.screen.blit(self.txt_surface, self.txt_surface_rect)",
"def draw(self):\n # static\n surf = self.surf.copy()\n\n # dynamic\n pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*130), 40)\n self.button_rect = self.button_surf.get_rect(center=pos)\n surf.blit(self.button_surf, self.button_rect)\n # move of button box to correct screen position\n self.button_rect.move_ip(self.xpos, self.ypos)\n\n # screen\n screen.blit(surf, (self.xpos, self.ypos))",
"def draw(self, screen: pygame.Surface) -> None:\n page = self.pages[self.current_page]\n # Draw background\n screen.blit(page.background, (0, 0))\n # Draw buttons to screen\n for button in page.buttons:\n if button.image is not None:\n screen.blit(button.image, button.rect)\n screen.blit(button.text, button.rect)\n # Draw highlights if mouse is hovering over button\n if button.tag not in ('display', 'output') and \\\n button.rect.collidepoint(self.mouse_pos):\n surf = create_trans_surf(button.rect.width, button.rect.height, 50, (100, 255, 100))\n screen.blit(surf, button.rect)",
"def show_buttons(self):\n for button in self.buttons:\n x = button.starting_x\n y = button.starting_y\n self.screen.fill(button.color, ((x, y), (button.width, button.height)))",
"def drawButtons(self):\n self.__pausedTitle.draw(self.__screen)\n self.__exitGameButton.draw(self.__screen)\n self.__resumeButton.draw(self.__screen)\n self.__mainMenuButton.draw(self.__screen)",
"def render_fading(self):\n self.alpha = self.alpha - self.fading_steps\n self.inactive_background_surface.set_alpha(self.alpha)\n if self.sliding_disappearance:\n self.x -= self.sliding_steps\n self.active_textRect.x -= self.sliding_steps\n\n # Rendering button \"background\"\n self.screen.blit(self.inactive_background_surface, (self.x, self.y)) # (0,0) are the top-left coordinates\n if self.alpha > self.alpha_border: # Render button text until its alpha value is reduced by x\n self.screen.blit(self.active_text_surface, self.active_textRect)",
"def draw(self, screen):\n if self.state == self.S_ACTIVE:\n screen.blit(self.image, self.rect)",
"def sprint(self):\n self.buttons = []\n self.screen.blit(self.background_image, (0, 0))\n self.create_button((self.width // 2 - 257, self.height // 8 - 85), 501, 200, Colors.BLACK, \"20L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 3 - 81), 501, 200, Colors.BLACK, \"40L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 5 - 86), 501, 200, Colors.BLACK, \"100L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 7 - 85), 501, 200, Colors.BLACK, \"1000L\")\n self.show_buttons()\n self.show_text_in_buttons()\n pygame.display.flip()",
"def draw(self, surface, offset=(0,0)):\n for button in self.buttons:\n button.draw(surface, offset)",
"def draw_buttons(self):\n for button in self.playing_buttons:\n button.draw(self.screen)",
"def draw(self, p):\r\n self.active = True\r\n surface = pygame.surfarray.make_surface(p)\r\n self.screen.blit(surface, (0, 0))\r\n pygame.display.flip()\r\n return",
"def button(msg, font_size, x, y, w, h, color, action):\r\n mouse = pygame.mouse.get_pos() # Grabbing cursor position\r\n click = pygame.mouse.get_pressed() # Mouse button status\r\n \r\n # Check if cursor is on the button\r\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\r\n # Draw the button\r\n pygame.draw.rect(display, color, (x, y, w, h)) \r\n \r\n # Check if we have clicked on the button\r\n if click[0] == 1 and action is not None:\r\n \r\n # Run singleplayer mode\r\n if action == \"Play S\": \r\n mode = \"singleplayer\" # set mode\r\n ctf.ctf_game(mode, selected_map)\r\n \r\n # Run multiplayer mode\r\n if action == \"Play M\":\r\n mode = \"multiplayer\" # set mode\r\n ctf.ctf_game(mode, selected_map)\r\n \r\n # Quit\r\n if action == \"Quit\":\r\n pygame.quit()\r\n quit()\r\n \r\n # Demo\r\n if action == \"Demo\":\r\n mode = \"demo\"\r\n ctf.ctf_game(mode, selected_map)\r\n \r\n # set display\r\n pygame.display.set_mode((display_width, display_height), pygame.RESIZABLE)\r\n \r\n # Displaying text on the button\r\n font = pygame.font.Font('freesansbold.ttf', font_size)\r\n text_surf, text_rect = text_objects(msg, font)\r\n text_rect.center = ((x+(w/2)), (y+(h/2)))\r\n display.blit(text_surf, text_rect)",
"def show(self):\n screen_copy = screen.copy()\n if background_chanel.get_busy():\n self.music_btn.image = self.music_on_image\n else:\n self.music_btn.image = self.music_off_image\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n return None\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == pygame.BUTTON_LEFT:\n pos = event.pos\n pos = (pos[0] - self.rect.x, pos[1] - self.rect.y)\n if self.play_btn.rect.collidepoint(pos):\n return None\n elif self.menu_btn.rect.collidepoint(pos):\n return MAIN_MENU\n elif self.music_btn.rect.collidepoint(pos):\n if background_chanel.get_busy():\n self.music_btn.image = self.music_off_image\n background_chanel.stop()\n else:\n self.music_btn.image = self.music_on_image\n background_chanel.play(background_game_play_music, loops=-1)\n elif self.restart_level_btn.rect.collidepoint(pos):\n return RESTART_LEVEL\n pause_group.draw(self.surface)\n screen_copy.blit(self.surface, self.rect.topleft)\n screen.blit(screen_copy, (0, 0))\n if pygame.mouse.get_focused():\n cursor.show(screen)\n pygame.display.flip()",
"def buttonPress(self):\n if self.inPlay and not self.shown:\n self.configure(image = Tile.images[0])",
"def draw(self):\n\n self.state_stack.peek().draw(self.screen)",
"def colour_press(self):\n global last_button\n if last_button is None:\n # If there is no \"last button press\", set this as the latest one\n last_button = self\n else:\n # Another button has been pressed before. Switch the colours of the two\n last_button.background_color, self.background_color = self.background_color, last_button.background_color\n # Set their states back to normal and reset the last button pressed\n last_button.state = 'normal'\n self.state = 'normal'\n last_button = None\n # Check if the switch removed any blocks\n points = self.screen.check_removal()\n if points == 0:\n # If nothing has been removed, the player gets one step closer to losing\n self.screen.misses += 1\n else:\n # Give the player the points\n self.screen.points += points\n if self.screen.misses > 3:\n # Player has lost, leave the game\n self.screen.leave()",
"def show_text_in_buttons(self):\n for button in self.buttons:\n self.screen.blit(button.rendered_text, button.get_text_position())",
"def set_active_tool_button(self, active_button):\n\n # button_style = 'font-weight: bold'\n # active_style = \"background-color: blue; color: white\"\n # active_style = \"background-color: rgb(0,49,80); color: white\"\n active_style = \"background-color: rgb(0,112,192); color: white\"\n inactive_style = \"background-color: none; color: none\"\n\n # Reset all button colours\n self.projConfigButton.setStyleSheet(inactive_style)\n self.rawDataButton.setStyleSheet(inactive_style)\n self.dataQualityButton.setStyleSheet(inactive_style)\n self.statsScreeningButton.setStyleSheet(inactive_style)\n self.spectralScreeningButton.setStyleSheet(inactive_style)\n self.histogramsButton.setStyleSheet(inactive_style)\n self.seascatterButton.setStyleSheet(inactive_style)\n self.transFuncsButton.setStyleSheet(inactive_style)\n self.fatigueButton.setStyleSheet(inactive_style)\n\n # Colour active dashboard button\n if active_button == \"config\":\n self.projConfigButton.setStyleSheet(active_style)\n if active_button == \"raw\":\n self.rawDataButton.setStyleSheet(active_style)\n if active_button == \"quality\":\n self.dataQualityButton.setStyleSheet(active_style)\n if active_button == \"stats\":\n self.statsScreeningButton.setStyleSheet(active_style)\n if active_button == \"spectral\":\n self.spectralScreeningButton.setStyleSheet(active_style)\n if active_button == \"histograms\":\n self.histogramsButton.setStyleSheet(active_style)\n if active_button == \"seascatter\":\n self.seascatterButton.setStyleSheet(active_style)\n if active_button == \"tf\":\n self.transFuncsButton.setStyleSheet(active_style)\n if active_button == \"fatigue\":\n self.fatigueButton.setStyleSheet(active_style)",
"def render_button(self):\n return self.widgets.get('button').render()",
"def _draw(self):\r\n \r\n if self.active:\r\n self.surface = self.activeSurface # Set active surface to be displayed.\r\n else:\r\n self.surface = self.passiveSurface # Set passive surface to be displayed.\r",
"def draw(screen):\n MY.restart_button.draw(screen)\n MY.display_text.draw(screen)",
"def draw(self, win, outline=None):\n # Call this method to draw the button on the screen\n if outline:\n pygame.draw.rect(win, outline, (self.x - 2, self.y - 2, self.width + 4, self.height + 4), 0)\n\n pygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.height), 0)\n\n if self.text != '':\n font = pygame.font.SysFont('comicsans', 30)\n text = font.render(self.text, 1, black)\n win.blit(text, (self.x + (self.width // 2 - text.get_width() // 2), self.y + (self.height // 2 - text.get_height() // 2)))",
"def display(self):\n\t\tprint('The button in the window was clicked!')",
"def draw(self, screen):\r\n if self.selected:\r\n used_color = (255 - self.color[0], 255 - self.color[1], 255 - self.color[2])\r\n else:\r\n used_color = self.color\r\n pygame.draw.rect(screen, used_color,\r\n (self.location_top_left[0], self.location_top_left[1], self.size_x, self.size_y), 0)",
"def render_screen(self):\n pygame.display.update(self.screen_rect)\n return",
"def show(self):\n screen_copy = screen.copy()\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == pygame.BUTTON_LEFT:\n pos = event.pos\n pos = (pos[0] - self.rect.x, pos[1] - self.rect.y)\n if self.menu_btn.rect.collidepoint(pos):\n return MAIN_MENU\n elif self.restart_level_btn.rect.collidepoint(pos):\n return RESTART_LEVEL\n game_panel_group.draw(self.surface)\n screen_copy.blit(self.surface, self.rect.topleft)\n screen.blit(screen_copy, (0, 0))\n if pygame.mouse.get_focused():\n cursor.show(screen)\n pygame.display.flip()",
"def draw_me(self):\r\n\t\tself.image.fill((100, 200, 100))\r\n\t\tif self.active: pg.draw.rect(self.image, (100, 100, 200), self.frame, 3) #if active => draw frame around selected entity width 3\r\n\t\tself.display_surface.blit(self.image, self.rect)"
] | [
"0.8091009",
"0.73016804",
"0.7288868",
"0.72423387",
"0.7085554",
"0.6943015",
"0.6838501",
"0.6808473",
"0.67748725",
"0.6729412",
"0.664229",
"0.6615208",
"0.6613954",
"0.65206283",
"0.6385157",
"0.63824177",
"0.63584137",
"0.63529414",
"0.62700206",
"0.6234393",
"0.6221679",
"0.62007004",
"0.6195177",
"0.6177959",
"0.6163609",
"0.61335504",
"0.6117873",
"0.61157584",
"0.6112516",
"0.6107687"
] | 0.8646287 | 0 |
Checks whether the mouse is on the button and returns a boolean. | def mouse_on_button(self, mouse) -> bool:
return self.x + self.w > mouse[0] > self.x and self.y + self.h > mouse[1] > self.y | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __check_if_got_pressed(self):\n mouse_x_pos,mouse_y_pos = pg.mouse.get_pos()\n\n if utilitiez.on_object(self.rect.x, self.rect.y, self.rect.width, self.rect.height, mouse_x_pos, mouse_y_pos,\n MOUSE_WIDTH, MOUSE_HEIGHT):\n self.__on_click()",
"def isButtonPressed() -> bool:\n pass",
"def isButtonReleased() -> bool:\n pass",
"def is_pressed(self) -> bool:",
"def is_pressed(self) -> bool:\n return True",
"def is_mouse(self, key):\n return key == curses.KEY_MOUSE",
"def handle_mousedown(self, button, name):\r\n x = widget.Widget.handle_mousedown(self, button, name)\r\n if not self.mouse_on_me():\r\n return False\r\n if not self.get_visible():\r\n return False\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_mousedown(button, name):\r\n return True\r\n return x",
"def button_a(self) -> bool:\n return bool(self.pressed & 0x2)",
"def pressed(self) -> bool:\n return self.type == \"JOYBUTTONDOWN\"",
"def get_pressed(self):\n\n self.update()\n\n if self.pressed:\n self.pressed = False\n return True\n\n return False",
"def button_b(self) -> bool:\n return bool(self.pressed & 0x4)",
"def get_mouse_state(self):\n try:\n mouse_state = curses.getmouse()\n return mouse_state\n except:\n self.app.log(get_error_info())\n return False",
"def handle_mousedown(self, button, name):\r\n if self.get_visible():\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_mousedown(button, name):\r\n return True\r\n return False",
"def handle_mouseup(self, button, name):\r\n x = widget.Widget.handle_mouseup(self, button, name)\r\n if not self.mouse_on_me():\r\n return False\r\n if not self.get_visible():\r\n return False\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_mouseup(button, name):\r\n return True\r\n return x",
"def is_button_pressed():\n return btn_pin.value() == 0",
"def mousePressed(self, _evt, _id):\n if not self.is_enabled: return False\n \n self.mouse_icon.mousePressed(_evt, _id)\n return False",
"def was_pressed(self) -> bool:",
"def check_button_hover(self, mouse_pos):\n for button in self.buttons: # type: Button\n if button.is_position_on_button(mouse_pos):\n button.hover()\n else:\n button.un_hover()",
"def was_pressed(self) -> bool:\n return True",
"def is_pressed(self):\n return GPIO.input(self.pin) == self.closed_status",
"def handle_mousehold(self, button, name):\r\n if self.get_visible():\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_mousehold(button, name):\r\n return True\r\n return False",
"def is_left_click(event):\n return (event.type == pygame.MOUSEBUTTONDOWN\n and event.button == MOUSE_LEFT)",
"def get_button_status(self, button):\n return glfw.get_mouse_button(self.window.context.glfw_window, button)",
"def click(self, mouse_pos: Tuple[int, int]):\n self.clicked = self.img_rect and self.img_rect.collidepoint(\n mouse_pos) and not self.clicked\n return self.clicked",
"def check_clicked(self, events):\n x = self.x\n y = self.y\n xsize = self.xsize\n ysize = self.ysize\n (a, b) = pygame.mouse.get_pos()\n if a>x and b>y and a<x+xsize and b<y+ysize:\n for event in events:\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.clickedAction(events)\n self.clicked = True\n return self.clicked",
"def handle_mouseup(self, button, name):\r\n if self.get_visible():\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_mouseup(button, name):\r\n return True\r\n return False",
"def ispressed(self,key):\n \n if (self.buttons & b0uttondict.get(key,0)): return True\n else: return False",
"def switch(self) -> bool:\n return bool(self.pressed & 0x1)",
"def mouse_button_state():\n x, y = c_int(0), c_int(0)\n bmask = mouse.SDL_GetMouseState(byref(x), byref(y))\n return ButtonState(bmask)",
"def handle_mousehold(self, button, name):\r\n if widget.Widget.handle_mousehold(self, button, name):\r\n app.App.handle_mousehold(self, button, name)\r\n return True\r\n return False"
] | [
"0.76346713",
"0.75949246",
"0.75622696",
"0.74238867",
"0.73842466",
"0.73723227",
"0.73164",
"0.72117794",
"0.71592665",
"0.7093802",
"0.70712423",
"0.70122606",
"0.68814075",
"0.68803525",
"0.685196",
"0.68248737",
"0.682373",
"0.6745367",
"0.6737135",
"0.6720025",
"0.6718927",
"0.6708512",
"0.66992426",
"0.66932",
"0.6688971",
"0.6651477",
"0.66106135",
"0.6569525",
"0.655651",
"0.65155196"
] | 0.8396938 | 0 |
Test that a correct description passes the check and that a dot is added. | def test_description(self):
self.assertEqual(
"Description.",
DescribedModel.parse_obj({"name": "Name", "description": "Description"}).description,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_description(question):\n assert \"description\" in question[\"instance\"]\n description = question[\"instance\"][\"description\"]\n # there shouldn't be whitespace at the beginning or end\n assert description.strip() == description\n words = description.split()\n # we should have at least three words\n assert len(words) >= 3\n # the first letter should be capitalized\n assert description[0].isupper()\n # the description should end with a period\n assert description.endswith(\".\")\n # the description should not have two periods at the end\n assert not description.endswith(\"..\")\n # the last letter of the first word should be 's'\n assert words[0][-1] == \"s\"\n # enforce set of allowed characters. Must be ascii printable, no pipes (|)\n assert \"|\" not in description\n assert set(description).issubset(set(string.printable))",
"def test_empty_description(self):\n self.check_validation_error('description\\n string does not match regex \".+\"', name=\"Name\", description=\"\")",
"def test_long_description(question):\n assert \"description\" in question[\"instance\"]\n assert \"longDescription\" in question[\"instance\"]\n description = question[\"instance\"][\"description\"]\n longDescription = question[\"instance\"][\"longDescription\"]\n # there shouldn't be whitespace at the beginning or end\n assert longDescription.strip() == longDescription\n words = longDescription.split()\n # we should have at least five words\n assert len(words) >= 5\n # the first letter should be capitalized\n assert longDescription[0].isupper()\n # long description should end with a period\n assert longDescription.endswith(\".\")\n # long description should not have two periods at the end\n assert not longDescription.endswith(\"..\")\n # description should not be the same as long description\n assert longDescription != description",
"def test_missing_description(self):\n self.check_validation_error(\"description\\n field required\", name=\"Name\")",
"def test_description_with_punctuation(self):\n self.assertEqual(\n \"Description?\",\n DescribedModel.parse_obj({\"name\": \"Name\", \"description\": \"Description?\"}).description,\n )",
"def test_descriptions_render_correctly(self):\n # help text in fields\n self.assertContains(\n self.response, \"<td>first name - The person's first name</td>\"\n )\n self.assertContains(\n self.response, \"<td>last name - The person's last name</td>\"\n )\n\n # method docstrings\n self.assertContains(self.response, \"<p>Get the full name of the person</p>\")\n\n link = '<a class=\"reference external\" href=\"/admindocs/models/%s/\">%s</a>'\n markup = \"<p>the related %s object</p>\"\n company_markup = markup % (link % (\"admin_docs.company\", \"admin_docs.Company\"))\n\n # foreign keys\n self.assertContains(self.response, company_markup)\n\n # foreign keys with help text\n self.assertContains(self.response, \"%s\\n - place of work\" % company_markup)\n\n # many to many fields\n self.assertContains(\n self.response,\n \"number of related %s objects\"\n % (link % (\"admin_docs.group\", \"admin_docs.Group\")),\n )\n self.assertContains(\n self.response,\n \"all related %s objects\"\n % (link % (\"admin_docs.group\", \"admin_docs.Group\")),\n )\n\n # \"raw\" and \"include\" directives are disabled\n self.assertContains(\n self.response,\n \"<p>"raw" directive disabled.</p>\",\n )\n self.assertContains(\n self.response, \".. raw:: html\\n :file: admin_docs/evilfile.txt\"\n )\n self.assertContains(\n self.response,\n \"<p>"include" directive disabled.</p>\",\n )\n self.assertContains(self.response, \".. include:: admin_docs/evilfile.txt\")\n out = self.docutils_stderr.getvalue()\n self.assertIn('\"raw\" directive disabled', out)\n self.assertIn('\"include\" directive disabled', out)",
"def testValidDescriptions(self):\n self.assertTrue(self.app._ignore_jobs(\"\"))\n self.assertTrue(self.app._ignore_jobs(\"This is valid\"))\n self.assertTrue(self.app._ignore_jobs(\"you can telecommute\"))",
"def test_get_description(self):\n description = get_description(self.work_data[\"description\"])\n expected = \"First in the Old Kingdom/Abhorsen series.\"\n self.assertEqual(description, expected)",
"def testDescription(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"description\")\n\n self.util.stringPropertyTest(self, dis_meta, \"description\")",
"def test_readme_proper_description():\n READMELOOKSGOOD = True\n f = open(\"README.md\", \"r\", encoding=\"utf-8\")\n content = f.read()\n f.close()\n for c in README_CONTENT_CHECK_FOR:\n if c not in content:\n print(c)\n READMELOOKSGOOD = False\n break\n\n assert READMELOOKSGOOD is True, \"You have not described all the functions/class well in your README.md file\"",
"def check(self, docstring: PetscDocStringImpl, section: SectionImpl, loc: SourceRange) -> None:\n name = section.transform(section.name)\n if self.sep != self.expected_sep:\n diag = section.diags.wrong_description_separator\n mess = f\"{name} seems to be missing a description separator; I suspect you may be using '{self.sep}' as a separator instead of '{self.expected_sep}'. Expected '{self.arg} {self.expected_sep} {self.description}'\"\n elif not self.description:\n diag = section.diags.missing_description\n mess = f\"{name} missing a description. Expected '{self.arg} {self.expected_sep} a very useful description'\"\n else:\n return # ok?\n docstring.add_diagnostic_from_source_range(Diagnostic.Kind.ERROR, diag, mess, loc)\n return",
"def validate_description(description):\n if description is None or description == '':\n return None\n # Replace double quotes with single quotes to avoid breaking the docstring\n validated_description = description.replace('\"', \"'\")\n if validated_description != description:\n cmd.print_info('Replaced double quotes with single quotes in class description')\n return validated_description",
"def test_desc(self):\n\t\tself.assertEqual('description', self.filter.get_description())",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.cd, 'desc')\n )\n\n self.assertEqual(\n [],\n self.cc.desc\n )",
"def test_session10_readme_proper_description():\n READMELOOKSGOOD = True\n f = open(\"README.md\", \"r\", encoding=\"utf-8\")\n content = f.read()\n f.close()\n for c in README_CONTENT_CHECK_FOR:\n if c not in content:\n print(c)\n READMELOOKSGOOD = False\n pass\n assert READMELOOKSGOOD == True, \"You have not described all the functions/class well in your README.md file\"",
"def test_clean_description(self):\n text = '!@#$%^&*()_+1234567890-='\n self.assertEqual(sync.clean_description(text),\n '!@#$%^&*()_+1234567890-=')\n\n text = \"Darwin\\u00c2\\u00bfs Bulldog\"\n self.assertEqual(sync.clean_description(text), \"Darwin's Bulldog\")\n\n text = \"\\n\\r\\nSome<BR><br /></BR>Text\"\n self.assertEqual(sync.clean_description(text), \"\\n\\r\\nSome\\n\\nText\")",
"def test_deprecated_in_description() -> None:\n soup = generate_case(\"deprecated\", GenerationConfiguration(deprecated_from_description=True))\n\n tests.html_schema_doc_asserts.assert_property_names(\n soup, [\"deprecated1\", \"deprecated2\", \"deprecated3\", \"deprecated4\", \"not_deprecated\"]\n )\n tests.html_schema_doc_asserts.assert_deprecated(soup, [True, True, True, True, False])",
"def testInvalidDescriptions(self):\n self.assertFalse(self.app._ignore_jobs(\"telecommuting is not an option\"))\n self.assertFalse(self.app._ignore_jobs(\"No telecommuting\"))\n self.assertFalse(self.app._ignore_jobs(\"No telecommute\"))\n self.assertFalse(self.app._ignore_jobs(\"TELECOMMUTE IS NOT AN OPTION\"))",
"def test_invalid_general_collateral_description():\n collateral = copy.deepcopy(GENERAL_COLLATERAL)\n collateral['description'] = 'XX'\n\n is_valid, errors = validate(collateral, 'generalCollateral', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid",
"def test_with_multiple_descriptions():\n soup = generate_case(\"with_descriptions\")\n\n tests.html_schema_doc_asserts.assert_descriptions(\n soup,\n [\n \"Exact address\",\n \"Exact address\",\n \"Delivery info depending on the delivery type\",\n \"The delivery is a gift, no prices displayed\",\n ],\n )",
"def test_descriptions3(self):\n kb = logic.PropKB()\n kb.tell(logic.expr('ISA(c-cat, c-animal)'))\n kb.tell(logic.expr('INSTANCEOF(i-petunia, c-cat)'))\n kb.tell(logic.expr('color(i-petunia, i-gray)'))\n kb.tell(logic.expr('ISA(i-gray, c-color)'))\n kb.tell(logic.expr('alternate-spelling(i-gray, grey)'))\n kb.tell(logic.expr('INSTANCEOF(i-domestic-short-hair, c-species)'))\n kb.tell(logic.expr('species(i-petunia, i-domestic-short-hair)'))\n kb.tell(logic.expr('origin(i-domestic-short-hair, i-egypt)'))\n kb.tell(logic.expr('INSTANCEOF(i-egypt, c-city)'))\n kb.tell(logic.expr('capital(i-egypt, cairo)'))\n # Description of a cat whose color is something with an alternate\n # spelling 'grey' and whose species has an origin whose capital is\n # cairo.\n species_d = logic.Description(\n 'c-species',\n {'origin': logic.Description('c-city', {'capital': 'cairo'})})\n d = logic.Description(\n 'c-cat',\n {'color': logic.Description('c-color', {'alternate-spelling': 'grey'}),\n 'species': species_d})\n self.assertEqual(d.find_all(kb), [logic.expr('i-petunia')])\n self.assertEqual(d.find_instances(kb), [logic.expr('i-petunia')])\n\n self.assertEqual(kb.isa(logic.expr(d), logic.expr('c-animal')), True)\n self.assertEqual(kb.isa(logic.expr(d), logic.expr('c-cat')), True)\n self.assertBindingsEqual(kb.ask(\n logic.expr('ISA')(logic.expr(d), logic.expr('c-animal'))), {})\n self.assertBindingsEqual(kb.ask(\n logic.expr('ISA')(logic.expr(d), logic.expr('c-cat'))), {})",
"def test_short_description(self):\n position = \"European Commissioner for European Commissioner for Economic\" \\\n + \" and Monetary Affairs and the Euro|Economic and Financial Affairs, \" \\\n + \"European Commissioner for Taxation and Customs Union, Audit and Anti-Fr\"\n short_description = \"European Commissioner for European Commissioner for Economic\" \\\n + \" and Monetary Affairs and the Euro|Ec...\"\n political_function = PoliticalFunction(position=position)\n self.assertEquals(political_function.short_description, short_description)",
"def test_description_with_ref() -> None:\n soup = generate_case(\"description_with_ref\")\n\n tests.html_schema_doc_asserts.assert_descriptions(\n soup, [\"We should see this\", \"inner description\", \"We should see this too\"]\n )",
"def testDescription(self):\n project = self.session.create_project()\n\n self.util.stringTypeTest(self, project, \"description\")\n\n self.util.stringPropertyTest(self, project, \"description\")",
"def test_help_strings_end_with_periods():\n for param in cli.params:\n if isinstance(param, click.core.Option):\n assert hasattr(param, \"help\")\n assert param.help.endswith(\".\")",
"def test_description_from_ref() -> None:\n soup = generate_case(\"description_from_ref\")\n\n tests.html_schema_doc_asserts.assert_descriptions(soup, [\"a filled string\"] * 2)",
"def testDescription(self):\n place = Place()\n self.assertTrue(hasattr(place, \"description\"))\n self.assertEqual(place.description, \"\")",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.cc, 'desc')\n )\n\n self.assertEqual(\n [],\n self.cc.desc\n )",
"def test_description(self):\n place = Place()\n self.assertTrue(hasattr(place, \"description\"))\n self.assertEqual(type(place.description), str)\n self.assertEqual(place.description, \"\")",
"def test_for_correct_updating_one(self):\r\n assert increase_sentence_count_if_we_should('one. two. three.', 3, 'a') == (4, 'one. two. three.a')"
] | [
"0.7467771",
"0.7183706",
"0.7170313",
"0.69735307",
"0.69044524",
"0.67804605",
"0.66104364",
"0.65192705",
"0.64968574",
"0.6449131",
"0.6411322",
"0.6408082",
"0.63846517",
"0.63669723",
"0.63474953",
"0.6342997",
"0.6311893",
"0.63043153",
"0.6295491",
"0.6244171",
"0.62389034",
"0.62104106",
"0.6190058",
"0.6168498",
"0.6162737",
"0.61591035",
"0.6140814",
"0.61351204",
"0.61096734",
"0.61033124"
] | 0.71865505 | 1 |
Test that a description with punctuation passes the check. | def test_description_with_punctuation(self):
self.assertEqual(
"Description?",
DescribedModel.parse_obj({"name": "Name", "description": "Description?"}).description,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_description(question):\n assert \"description\" in question[\"instance\"]\n description = question[\"instance\"][\"description\"]\n # there shouldn't be whitespace at the beginning or end\n assert description.strip() == description\n words = description.split()\n # we should have at least three words\n assert len(words) >= 3\n # the first letter should be capitalized\n assert description[0].isupper()\n # the description should end with a period\n assert description.endswith(\".\")\n # the description should not have two periods at the end\n assert not description.endswith(\"..\")\n # the last letter of the first word should be 's'\n assert words[0][-1] == \"s\"\n # enforce set of allowed characters. Must be ascii printable, no pipes (|)\n assert \"|\" not in description\n assert set(description).issubset(set(string.printable))",
"def is_punct(self, word, language):",
"def test_clean_row_punctuation(self):\n\t\tobj_ut = sentiment.clean_row(\n\t\t\t'100\\tan \"apple...:\" is it yellow-green, or red/orange?')\n\t\tself.assertEqual(obj_ut[1], \"an apple is it yellowgreen or redorange\")",
"def test_long_description(question):\n assert \"description\" in question[\"instance\"]\n assert \"longDescription\" in question[\"instance\"]\n description = question[\"instance\"][\"description\"]\n longDescription = question[\"instance\"][\"longDescription\"]\n # there shouldn't be whitespace at the beginning or end\n assert longDescription.strip() == longDescription\n words = longDescription.split()\n # we should have at least five words\n assert len(words) >= 5\n # the first letter should be capitalized\n assert longDescription[0].isupper()\n # long description should end with a period\n assert longDescription.endswith(\".\")\n # long description should not have two periods at the end\n assert not longDescription.endswith(\"..\")\n # description should not be the same as long description\n assert longDescription != description",
"def test_drop_punctuation():\n assert TextCleaner().transform([[\"'test!?\"]])[\"corpus\"][0] == \"test\"",
"def test_clean_description(self):\n text = '!@#$%^&*()_+1234567890-='\n self.assertEqual(sync.clean_description(text),\n '!@#$%^&*()_+1234567890-=')\n\n text = \"Darwin\\u00c2\\u00bfs Bulldog\"\n self.assertEqual(sync.clean_description(text), \"Darwin's Bulldog\")\n\n text = \"\\n\\r\\nSome<BR><br /></BR>Text\"\n self.assertEqual(sync.clean_description(text), \"\\n\\r\\nSome\\n\\nText\")",
"def is_punctuation(text):\n return not (text.lower() in AVRO_VOWELS or\n text.lower() in AVRO_CONSONANTS)",
"def descriptionValidator(self, description):\n if type(description) != str:\n API.abort(\n 400, error_messages[13]['int_des'])\n\n # check if the contents of description have characters between a-z and A-Z\n elif not re.match(r\"(^[a-zA-Z_ ]+$)\", description) or description.isspace == True:\n API.abort(400, error_messages[14]['wrong_format_des'])\n\n return True",
"def test_empty_description(self):\n self.check_validation_error('description\\n string does not match regex \".+\"', name=\"Name\", description=\"\")",
"def testValidDescriptions(self):\n self.assertTrue(self.app._ignore_jobs(\"\"))\n self.assertTrue(self.app._ignore_jobs(\"This is valid\"))\n self.assertTrue(self.app._ignore_jobs(\"you can telecommute\"))",
"def is_punctuation(ch):\n if (ch == '.'): return False\n if (ch >= '!' and ch <= '/'): return True\n if (ch >= ':' and ch <= '@'): return True\n if (ch >= '\\u2010' and ch <= '\\u2014'): return True # various dashes\n if (is_quote_mark(ch)): return True\n return False",
"def validateDescription(description):\n \n if not(description) or len(description.split()) < 5:\n return \"You must supply a description of at least 5 words.\"",
"def test_unusual_misc():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n sentences = \"{:C}\".format(doc).split(\"\\n\\n\")\n assert len(sentences) == 2\n sentence = sentences[0].split(\"\\n\")\n assert len(sentence) == 14\n\n for word in sentence:\n pieces = word.split(\"\\t\")\n assert len(pieces) == 1 or len(pieces) == 10\n if len(pieces) == 10:\n assert all(piece for piece in pieces)",
"def test_oneWord(self):\n s = 'This code \"works.\"'\n r = text.splitQuoted(s)\n self.failUnlessEqual(['This', 'code', 'works.'], r)",
"def test_snippet_long_unicode(self):\n message = Message(clean_text=u\"This sentence — pauses a bit\")\n self.assertEqual(\n message.snippet,\n 'This sentence -- paus...'\n )",
"def test_legal_names(self):\n test_list = generate_products()\n names_list = []\n for i in test_list:\n names_list.append(i[0])\n for name in names_list:\n nameparts = name.split()\n self.assertEqual(len(nameparts), 2,\n msg=\"missing noun, space, or adj\")\n the_adj = nameparts[0]\n self.assertIn(the_adj, ADJECTIVES, msg='Bad Adj')\n the_noun = nameparts[1]\n self.assertIn(the_noun, NOUNS, msg='Bad Noun')",
"def test_parens_disabled():\n assert get_html(PARENS_TEXT) == \"<p>I am a ((parens)) example.</p>\"",
"def test_legal_names(self):\n gen_prods_split = [p.name.split(\" \")\n for p in generate_products()]\n should_be_adjs = [n[0] for n in gen_prods_split]\n should_be_nouns = [n[1] for n in gen_prods_split]\n\n for a in should_be_adjs:\n self.assertIn(a, ADJECTIVES)\n\n for n in should_be_nouns:\n self.assertIn(n, NOUNS)",
"def test_forbidden_words(self) -> None:\n pad_open: bool = False\n words: List[Word] = self.report.get_words()\n forbidden_words: List[Word] = []\n last_error: bool = False\n\n for word in words:\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n if (word.text in self.rules.forbidden_words) or any(\n [b in self.rules.forbidden_words for b in word.baseform]\n ):\n forbidden_words.append(word)\n last_error = True\n continue\n if last_error:\n last_error = False\n combo = \" \".join([w.text for w in forbidden_words])\n start, _ = self.report.get_word_postion(forbidden_words[0])\n _, end = self.report.get_word_postion(forbidden_words[-1])\n self.add_error(\n f\"Ordet {combo} får endast förekomma i citat.\", position=(start,end)\n )",
"def test_legal_names(self):\n product_list = generate_products()\n for prod in product_list:\n noun = prod.name.split(\" \")[1]\n adjective = prod.name.split(\" \")[0]\n self.assertIn(noun, NOUNS)\n self.assertIn(adjective, ADJECTIVES)",
"def negation_check(self,sentence):",
"def test_legal_names(self):\r\n products = generate_products()\r\n for product in products:\r\n test_adjective, test_noun = product.name.split(\"_\")\r\n self.assertIn(test_adjective, ADJECTIVES)\r\n self.assertIn(test_noun, NOUNS)",
"def testInvalidDescriptions(self):\n self.assertFalse(self.app._ignore_jobs(\"telecommuting is not an option\"))\n self.assertFalse(self.app._ignore_jobs(\"No telecommuting\"))\n self.assertFalse(self.app._ignore_jobs(\"No telecommute\"))\n self.assertFalse(self.app._ignore_jobs(\"TELECOMMUTE IS NOT AN OPTION\"))",
"def hasPunct(str):\n for c in str:\n if c in string.punctuation:\n return True\n return False",
"def test_get_description_markdown_paragraphs(self):\n description = get_description(\"Paragraph 1\\n\\nParagraph 2\")\n expected = \"<p>Paragraph 1</p>\\n<p>Paragraph 2</p>\"\n self.assertEqual(description, expected)",
"def test_legal_names(self):\n products = generate_products()\n\n for product in products:\n names = product.name.split(\" \")\n self.assertIn(names[0], ADJECTIVES)\n self.assertIn(names[1], NOUNS)",
"def password_validator(password):\n if list(PUNCTUATIONS) in password:\n \"\"\"\n >>> list(string.punctuation)\n ['!', '\"', '#', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.',\n '/', ':', ';', '<', '=', '>', '?', '@', '[', '\\\\', ']', '^', '_', '`',\n '{', '|', '}', '~']\n >>>\n \"\"\"\n return False\n else:\n return True",
"def punctcheck(word):\r\n remove = string.punctuation\r\n pattern = r\"[{}]\".format(remove)\r\n\r\n while len(word) > 0 and word[0] in pattern:\r\n word = word[1:]\r\n\r\n while len(word) > 0 and word[-1] in pattern:\r\n word = word[:-1]\r\n\r\n return word",
"def check(self, docstring: PetscDocStringImpl, section: SectionImpl, loc: SourceRange) -> None:\n name = section.transform(section.name)\n if self.sep != self.expected_sep:\n diag = section.diags.wrong_description_separator\n mess = f\"{name} seems to be missing a description separator; I suspect you may be using '{self.sep}' as a separator instead of '{self.expected_sep}'. Expected '{self.arg} {self.expected_sep} {self.description}'\"\n elif not self.description:\n diag = section.diags.missing_description\n mess = f\"{name} missing a description. Expected '{self.arg} {self.expected_sep} a very useful description'\"\n else:\n return # ok?\n docstring.add_diagnostic_from_source_range(Diagnostic.Kind.ERROR, diag, mess, loc)\n return",
"def test_not_subset_word():\n message = 'hello world'\n words = ['test']\n assert not message_checker(message, words)\n\n message = 'hello world;'\n words = [';']\n assert not message_checker(message, words)\n\n message = 'hello world.'\n words = ['.']\n assert not message_checker(message, words)\n\n message = 'hello world.'\n words = ['world']\n assert not message_checker(message, words)"
] | [
"0.7259576",
"0.70288295",
"0.67982197",
"0.6772335",
"0.66292626",
"0.6555726",
"0.65338016",
"0.65078914",
"0.6434341",
"0.6406588",
"0.6294614",
"0.620535",
"0.61716986",
"0.61261255",
"0.6119967",
"0.6109077",
"0.6107635",
"0.61050224",
"0.60843354",
"0.6074227",
"0.6065107",
"0.606489",
"0.60341126",
"0.5992712",
"0.59920794",
"0.59320784",
"0.5896204",
"0.58820647",
"0.58785",
"0.5871477"
] | 0.794725 | 0 |
Test that the description is mandatory. | def test_missing_description(self):
self.check_validation_error("description\n field required", name="Name") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_empty_description(self):\n self.check_validation_error('description\\n string does not match regex \".+\"', name=\"Name\", description=\"\")",
"def testDescription(self):\n project = self.session.create_project()\n\n self.util.stringTypeTest(self, project, \"description\")\n\n self.util.stringPropertyTest(self, project, \"description\")",
"def test_description(question):\n assert \"description\" in question[\"instance\"]\n description = question[\"instance\"][\"description\"]\n # there shouldn't be whitespace at the beginning or end\n assert description.strip() == description\n words = description.split()\n # we should have at least three words\n assert len(words) >= 3\n # the first letter should be capitalized\n assert description[0].isupper()\n # the description should end with a period\n assert description.endswith(\".\")\n # the description should not have two periods at the end\n assert not description.endswith(\"..\")\n # the last letter of the first word should be 's'\n assert words[0][-1] == \"s\"\n # enforce set of allowed characters. Must be ascii printable, no pipes (|)\n assert \"|\" not in description\n assert set(description).issubset(set(string.printable))",
"def test_missing_description(superuser):\n form = RegisterForm(superuser, name='Client',\n is_confidential=choice([True, False]),\n redirect_uris='http://localhost/',\n default_scopes='read write')\n\n assert form.validate() is False\n assert _('This field is required.') in form.description.errors",
"def test_description(self):\n self.assertEqual(\n \"Description.\",\n DescribedModel.parse_obj({\"name\": \"Name\", \"description\": \"Description\"}).description,\n )",
"def testDescription(self):\n place = Place()\n self.assertTrue(hasattr(place, \"description\"))\n self.assertEqual(place.description, \"\")",
"def test_desc(self):\n\t\tself.assertEqual('description', self.filter.get_description())",
"def testInputDesc(self):\n\n self.assertTrue(\n hasattr(self.cd, 'input_desc')\n )\n\n self.assertEqual(\n None,\n self.cd.input_desc\n )",
"def testDescription(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"description\")\n\n self.util.stringPropertyTest(self, dis_meta, \"description\")",
"def testInputDesc(self):\n\n self.assertTrue(\n hasattr(self.cc, 'input_desc')\n )\n\n self.assertEqual(\n None,\n self.cc.input_desc\n )",
"def check_no_description(self):\n context = TestContext(session_context=ducktape_mock.session_context(),\n cls=DummyTestNoDescription, function=DummyTestNoDescription.test_this)\n assert context.description == \"\"",
"def test_description(self):\n place = Place()\n self.assertTrue(hasattr(place, \"description\"))\n self.assertEqual(type(place.description), str)\n self.assertEqual(place.description, \"\")",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.cd, 'desc')\n )\n\n self.assertEqual(\n [],\n self.cc.desc\n )",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.cc, 'desc')\n )\n\n self.assertEqual(\n [],\n self.cc.desc\n )",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'desc')\n )\n\n self.assertEqual(\n [],\n self.node.desc\n )",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'desc')\n )\n\n self.assertEqual(\n [],\n self.node.desc\n )",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'desc')\n )\n\n self.assertEqual(\n [],\n self.node.desc\n )",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'desc')\n )\n\n self.assertEqual(\n [],\n self.node.desc\n )",
"def testInputDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'input_desc')\n )\n\n self.assertEqual(\n None,\n self.node.input_desc\n )",
"def test_empty_description_create(self):\n\n responses.add(\n responses.POST,\n self.host + \"/manager\",\n json={'message': \"Description cannot be empty.\", 'status':\"error\"},\n status=200\n )\n\n with self.assertRaises(CreateError):\n self.azk.create('Project', '')",
"def testValidDescriptions(self):\n self.assertTrue(self.app._ignore_jobs(\"\"))\n self.assertTrue(self.app._ignore_jobs(\"This is valid\"))\n self.assertTrue(self.app._ignore_jobs(\"you can telecommute\"))",
"def validate_description(description):\n if description is None or description == '':\n return None\n # Replace double quotes with single quotes to avoid breaking the docstring\n validated_description = description.replace('\"', \"'\")\n if validated_description != description:\n cmd.print_info('Replaced double quotes with single quotes in class description')\n return validated_description",
"def test_invalid_general_collateral_missing_description():\n collateral = copy.deepcopy(GENERAL_COLLATERAL)\n del collateral['description']\n\n is_valid, errors = validate(collateral, 'generalCollateral', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid",
"def test_long_description(question):\n assert \"description\" in question[\"instance\"]\n assert \"longDescription\" in question[\"instance\"]\n description = question[\"instance\"][\"description\"]\n longDescription = question[\"instance\"][\"longDescription\"]\n # there shouldn't be whitespace at the beginning or end\n assert longDescription.strip() == longDescription\n words = longDescription.split()\n # we should have at least five words\n assert len(words) >= 5\n # the first letter should be capitalized\n assert longDescription[0].isupper()\n # long description should end with a period\n assert longDescription.endswith(\".\")\n # long description should not have two periods at the end\n assert not longDescription.endswith(\"..\")\n # description should not be the same as long description\n assert longDescription != description",
"def testInputDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'input_desc')\n )\n\n self.assertEqual(\n None,\n self.node.input_desc\n )\n\n self.node.input_desc = 'Sunset with an Eizo'\n\n self.assertEqual(\n 'Sunset with an Eizo',\n self.node.input_desc\n )",
"def descriptionValidator(self, description):\n if type(description) != str:\n API.abort(\n 400, error_messages[13]['int_des'])\n\n # check if the contents of description have characters between a-z and A-Z\n elif not re.match(r\"(^[a-zA-Z_ ]+$)\", description) or description.isspace == True:\n API.abort(400, error_messages[14]['wrong_format_des'])\n\n return True",
"def test_get_description(self):\n description = get_description(self.work_data[\"description\"])\n expected = \"First in the Old Kingdom/Abhorsen series.\"\n self.assertEqual(description, expected)",
"def test_description_from_ref() -> None:\n soup = generate_case(\"description_from_ref\")\n\n tests.html_schema_doc_asserts.assert_descriptions(soup, [\"a filled string\"] * 2)",
"def test_too_short_description(superuser):\n form = RegisterForm(superuser, name='Client',\n description='C',\n is_confidential=choice([True, False]),\n redirect_uris='http://localhost/',\n default_scopes='read write')\n\n assert form.validate() is False\n assert _('Field must be between 3 and 350 characters long.') in form.description.errors",
"def requires_description(self, requires_description):\n\n self._requires_description = requires_description"
] | [
"0.8143264",
"0.7655286",
"0.7621963",
"0.7548257",
"0.7531445",
"0.74477756",
"0.7441477",
"0.7300264",
"0.72981095",
"0.7288913",
"0.7284006",
"0.7265562",
"0.7217067",
"0.7123357",
"0.7076178",
"0.7076178",
"0.7076178",
"0.7076178",
"0.70645094",
"0.7035895",
"0.7025577",
"0.692971",
"0.6876282",
"0.68451756",
"0.683048",
"0.682752",
"0.67922336",
"0.6746364",
"0.6731523",
"0.6731187"
] | 0.8563048 | 0 |
Test that the description has a nonzero length. | def test_empty_description(self):
self.check_validation_error('description\n string does not match regex ".+"', name="Name", description="") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_no_description(self):\n context = TestContext(session_context=ducktape_mock.session_context(),\n cls=DummyTestNoDescription, function=DummyTestNoDescription.test_this)\n assert context.description == \"\"",
"def test_missing_description(self):\n self.check_validation_error(\"description\\n field required\", name=\"Name\")",
"def test_short_string_zero_length(self):\n self.failUnlessEqual(self.callFunc('encode_shortstr', ''), '\\x00', '0 length short string encoding FAILED...')",
"def test_initial_length_equals_zero(self):\r\n msg_list = messages.MessageList()\r\n self.assertEqual(msg_list.length(), 0)",
"def validateDescription(description):\n \n if not(description) or len(description.split()) < 5:\n return \"You must supply a description of at least 5 words.\"",
"def _check_description_count(self):\n\n for rec in self:\n if rec.description and len(rec.description)>50:\n raise except_orm(_('Warning!'),\n _(\"Description Lenght must be less than or equal to 50. \"))",
"def test_handles_empty_string(self):\n result = encode_run_length(\"\")\n self.assertEqual(result, \"\")",
"def testDescription(self):\n place = Place()\n self.assertTrue(hasattr(place, \"description\"))\n self.assertEqual(place.description, \"\")",
"def test_description(question):\n assert \"description\" in question[\"instance\"]\n description = question[\"instance\"][\"description\"]\n # there shouldn't be whitespace at the beginning or end\n assert description.strip() == description\n words = description.split()\n # we should have at least three words\n assert len(words) >= 3\n # the first letter should be capitalized\n assert description[0].isupper()\n # the description should end with a period\n assert description.endswith(\".\")\n # the description should not have two periods at the end\n assert not description.endswith(\"..\")\n # the last letter of the first word should be 's'\n assert words[0][-1] == \"s\"\n # enforce set of allowed characters. Must be ascii printable, no pipes (|)\n assert \"|\" not in description\n assert set(description).issubset(set(string.printable))",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'desc')\n )\n\n self.assertEqual(\n [],\n self.node.desc\n )",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'desc')\n )\n\n self.assertEqual(\n [],\n self.node.desc\n )",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'desc')\n )\n\n self.assertEqual(\n [],\n self.node.desc\n )",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'desc')\n )\n\n self.assertEqual(\n [],\n self.node.desc\n )",
"def test_long_string_zero_length(self):\n self.failUnlessEqual(self.callFunc('encode_longstr', ''), '\\x00\\x00\\x00\\x00', '0 length long string encoding FAILED...')",
"def is_empty(self):\n if self.length == 0:\n return True\n else:\n return False",
"def check_empty_desc_file(out):\n return MISSING_RESOURCE in out.lower()",
"def test_long_description(question):\n assert \"description\" in question[\"instance\"]\n assert \"longDescription\" in question[\"instance\"]\n description = question[\"instance\"][\"description\"]\n longDescription = question[\"instance\"][\"longDescription\"]\n # there shouldn't be whitespace at the beginning or end\n assert longDescription.strip() == longDescription\n words = longDescription.split()\n # we should have at least five words\n assert len(words) >= 5\n # the first letter should be capitalized\n assert longDescription[0].isupper()\n # long description should end with a period\n assert longDescription.endswith(\".\")\n # long description should not have two periods at the end\n assert not longDescription.endswith(\"..\")\n # description should not be the same as long description\n assert longDescription != description",
"def test_len_when_empty(self):\n l_list = DoubleLinkedList()\n self.assertEqual(l_list.len(), 0)",
"def test_heads_len_nonzero(repository: Repository) -> None:\n assert 1 == len(repository.heads)",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.cd, 'desc')\n )\n\n self.assertEqual(\n [],\n self.cc.desc\n )",
"def test_has_correct_length(self) -> None:\n assert len(list(ccc.MessageDataset())) == 138737",
"def test_empty(self):\n pass",
"def test_len(self):\n self.assertEqual(len(self.tester), 27)",
"def test_len(self):\n self.assertEqual(len(self.tester), 27)",
"def test_empty_string(self):\n self.assertTrue(all_unique_chars(\"\"))",
"def LengthTest(arr):\n\tif len(arr) == 8:\n\t\treturn True;\n\telif len(arr) == 7:\n\t\treturn IsMissingField('cid', arr)\n\telse:\n\t\treturn False",
"def testDesc(self):\n\n self.assertTrue(\n hasattr(self.cc, 'desc')\n )\n\n self.assertEqual(\n [],\n self.cc.desc\n )",
"def test_empty_string(self):\n self.assertTrue(all_unique_chars_no_set(\"\"))",
"def test_minlength():\n assert has_min_length(None, 8) is None\n assert has_min_length('abcd1234', 8) is None\n assert has_min_length('a', 8)",
"def test_invalid_general_collateral_missing_description():\n collateral = copy.deepcopy(GENERAL_COLLATERAL)\n del collateral['description']\n\n is_valid, errors = validate(collateral, 'generalCollateral', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid"
] | [
"0.6839186",
"0.6832089",
"0.6650879",
"0.66502285",
"0.6583756",
"0.65266544",
"0.6453843",
"0.6399938",
"0.63959414",
"0.63910407",
"0.63910407",
"0.63910407",
"0.63910407",
"0.63842446",
"0.63687706",
"0.636613",
"0.63494056",
"0.6325557",
"0.6310538",
"0.63036925",
"0.6303357",
"0.6289826",
"0.62874365",
"0.62874365",
"0.62842816",
"0.6279541",
"0.6259554",
"0.6254016",
"0.6253595",
"0.6248791"
] | 0.7342262 | 0 |
Wait for clone process to finish | def wait_for_clone(repo, wait_for_ready, http_exc):
start_time = time.time()
while time.time() - start_time < wait_for_ready:
repo.wipe_data()
try:
if repo.is_cloned:
return
except HTTPRequestError:
_mod_log().debug('Failed to get status of the repository %s', repo.rid)
raise SAPCliError(f'Waiting for the repository to be in READY state timed out\n{http_exc}') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def wait(self):\n self.Popen.wait()",
"def wait(self):\n pass",
"def wait(self):\n pass",
"def wait_finish(self):\r\n self.proc.join()",
"def wait(self):\n\n for output in self.proc.communicate():\n if output is not None:\n self.output += output",
"def do_wait(self):\n pass",
"def wait():\n pass",
"async def wait_until_done(self) -> None:\n ...",
"def wait_progress(self):\n pass",
"def wait_progress(self):\n pass",
"def wait(self):\n self.mainloop().wait()",
"def clone(self):\n out, err, code = self.command( [\"git\", \"clone\", self.repo] )\n\n # find the directory into which the\n self.directory = self.path\n for path in os.listdir(self.path):\n self.directory = os.path.join(self.path,path)\n break",
"def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")",
"def waitUntilSuccess():",
"def wait(self):\r\n self.jobs.join()",
"def wait_complete(self):\n self.join()",
"def check_wait_for_copy_complete(jobs):\n for j in jobs:\n stdout, stderr = j.communicate()\n\n if j.returncode != 0:\n output = stdout or stderr or ''\n error = 'COPY FAILED with {0}: {1}'.format(j.returncode, output.strip())\n raise RuntimeError(error)",
"def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()",
"def clone_all_repo(self):\n repo_path = self.cfg['paths']['repo']\n since = datetime.datetime.strptime(self.cfg['filters']['since'], self.cfg['filters']['date_format'])\n threads = []\n # INITIALIZE ALL REPOS\n for repo in self.cfg[\"repos\"]:\n if (not self.cfg['replace_existing_repo']) and (os.path.exists(repo_path + repo['name'])):\n print(self.repo_exist_msg.format(repo_path + repo['name']))\n else:\n if os.path.exists(repo_path + repo['name']):\n # delete directory and contents\n shutil.rmtree(repo_path + repo['name'])\n # creat directory\n os.makedirs(repo_path + repo['name'])\n thread = threading.Thread(target=self.clone_repo, args=(repo_path, since, repo,),\n name=f\"task_{repo['name']}\")\n thread.start()\n threads.append(thread)\n\n # Wait all finished cloning\n for t in threads:\n t.join()\n print(\"Done cloning!\")",
"def clone_experiment(self, id):\n archive_name = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"backup\", str(id)+\".zip\")\n cmd = [\"rclone\", \"copy\", archive_name, \"Team_BUILD:/backup\"]\n retval = self.um.run_process(cmd)\n return \"Process started\"",
"def test_clone_system(self):\n pass",
"def wait():\n time.sleep(1)",
"def test_clone_scenario(self):\n pass",
"def finish(self):\r\n self.start_finish()\r\n self.wait_finish()",
"def collect_finish(self, thread):\n index = thread.index + 1\n self.packaging_wrapper = thread.packaging_wrapper\n self.total_copy_amount = self.packaging_wrapper.copy_files_count\n self.thread_finish(thread)\n if index == len(self.packaging_wrapper.nodes):\n self.packaging_wrapper.modify_nodes_path()\n self.copy_process()",
"def run(self):\n\n # Only create GUI ONCE in callback, so that it will only load when the plugin is started\n if self.first_start == True:\n self.first_start = False\n self.dlg = TMDDialog()\n self.dlg.setWindowFlag(QtCore.Qt.WindowCloseButtonHint, False)\n\n self.dlg.show()\n\n self.git_thread = CloneThread() # This is the thread object\n # Connect the signal from the thread to the finished method\n self.git_thread.signal.connect(self.finished)\n self.git_thread.start()"
] | [
"0.65956825",
"0.65956825",
"0.65956825",
"0.65956825",
"0.6524684",
"0.6290063",
"0.6290063",
"0.6060584",
"0.6007381",
"0.59562814",
"0.59526855",
"0.59149146",
"0.5834006",
"0.5834006",
"0.58271825",
"0.5822418",
"0.57953876",
"0.57722795",
"0.5770641",
"0.5743308",
"0.57270044",
"0.5706751",
"0.5659775",
"0.5653087",
"0.5639585",
"0.5608359",
"0.5575431",
"0.55745924",
"0.5570931",
"0.5565703"
] | 0.6913001 | 0 |
Checks out the given branch in the given repository on the give system | def checkout(connection, branch, rid=None, repo=None):
if repo is None:
repo = Repository(connection, rid)
return repo.checkout(branch) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gitCheckoutBranch(self, path, branch):\r\n\r\n with workInDirectory(path):\r\n fetch_cmd = [\"git\", \"fetch\"]\r\n if self.verbose:\r\n print(\"Runing Command : {}\".format(\" \".join(fetch_cmd)))\r\n\r\n SubProcessUtility.runCommand(fetch_cmd)\r\n\r\n checkout_branch_command = [\"git\", \"checkout\", branch]\r\n if self.verbose:\r\n print(\"Running Command : {}\".format(\" \".join(checkout_branch_command)))\r\n SubProcessUtility.runCommand(checkout_branch_command)",
"def checkout(branch=\"lf-dev\"):\n with cd(FOLDER):\n sudo('git fetch', user='tomcat')\n sudo('git checkout %s' % branch, user='tomcat')\n status()",
"def git_dir_checkout_branch(c, org_name, repo_name, remote, branch):\n print('Fetching updates from Git repository')\n c.run('git remote add {remote} [email protected]:{org_name}/{repo_name}.git'.format(remote=remote, org_name=org_name, repo_name=repo_name),\n warn=True)\n c.run('git fetch --all')\n\n print('Checking out {}/{}'.format(remote, branch))\n try:\n c.run('git checkout {}/{}'.format(remote, branch))\n except Failure:\n # probably branch is tag name\n print('Checking out failed. Assuming this is a tag, attempting to checkout without stating remote')\n c.run('git checkout {}'.format(branch))",
"def _check_branch(opt, params):\n\n # Check the current branch and hash\n _get_branch(opt)\n\n if params.git_branch != opt.git_branch or params.git_hash != opt.git_hash:\n msg = 'You are not on the right branch or commit. Please run the following in the repository: \\n'\n msg += f'git checkout {params.git_branch}\\n'\n msg += f'git revert {params.git_hash}'\n sys.exit(msg)",
"def check_branch(subcommand, branch):\n if subcommand != \"checkout\":\n return\n # first make sure actual branch name was given\n if branch is None:\n return \"Branch name to checkout must be supplied with '-b' option\"\n # next check that the local repo is clean\n cmd = [\"git\", \"status\", \"--untracked-files=no\", \"--porcelain\"]\n p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True, universal_newlines=True)\n if p.stdout.strip():\n return \"Need to have clean working tree to checkout!\\n\\n\" + p.stdout\n # next check that the branch name doesn't already exist\n cmd = [\"git\", \"show-ref\", \"--verify\", \"--quiet\", \"refs/heads/\" + branch]\n p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)\n if not p.returncode:\n return f\"Branch {branch!r} already exists\"",
"def checkout_branch(self, branchname, current_path):\n p = Popen(\n [\"git\", \"checkout\", branchname],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n return {\"code\": p.returncode, \"message\": my_output.decode(\"utf-8\")}\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git checkout \" + branchname,\n \"message\": my_error.decode(\"utf-8\"),\n }",
"def git_checkout_branch(name):\n\n if subprocess.call([\"git\", \"diff\", \"--quiet\", \"HEAD\"]) != 0:\n raise Exception(\"Dirty working tree; not checking out %s\" % name)\n\n if subprocess.call([\"git\", \"checkout\", name]) != 0:\n raise Exception(\"Could not checkout %s\" % name)",
"def git_branch(self, app, branch):\n if app == self.PROJECT_NAME:\n app_path = self.PROJECT_DIR\n else:\n raise ValueError('Unknown app')\n\n with lcd(app_path):\n self.local('git pull && git checkout %s' % branch)\n\n self.display('%s has been successfully switched to tag/branch %s.' % (app, branch), color='green')",
"def checkout_new_branch(self, branchname, current_path):\n p = Popen(\n [\"git\", \"checkout\", \"-b\", branchname],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n return {\"code\": p.returncode, \"message\": my_output.decode(\"utf-8\")}\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git checkout \" + \"-b\" + branchname,\n \"message\": my_error.decode(\"utf-8\"),\n }",
"def checkout_branches(args):\n\n ensure_tracking_branches([])\n if check_dirty([]) and '-f' not in args:\n raise Exception(\"Cannot checkout new branches with dirty projects.\")\n \n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n print >>sys.stderr, \"Checking out tracking branch in project: %s\" % name\n repo = GitRepo(workdir_for_project(project))\n # Check that sucker out\n repo.check_command([\"checkout\", project.tracking_branch])",
"def switch_branch(branch, rdir):\r\n # Get the latest remote\r\n try:\r\n cmd_log(['git', 'fetch', ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to fetch remote: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n\r\n # Check if the branch is available from the remote.\r\n cmd = ['git', 'ls-remote', 'origin', '-h', 'refs/heads/{0}'.format(branch), ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of remote branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n if not branch in output:\r\n raise GitImportError(GitImportError.REMOTE_BRANCH_MISSING)\r\n # Check it the remote branch has already been made locally\r\n cmd = ['git', 'branch', '-a', ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of local branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n branches = []\r\n for line in output.split('\\n'):\r\n branches.append(line.replace('*', '').strip())\r\n\r\n if branch not in branches:\r\n # Checkout with -b since it is remote only\r\n cmd = ['git', 'checkout', '--force', '--track',\r\n '-b', branch, 'origin/{0}'.format(branch), ]\r\n try:\r\n cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to checkout remote branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n # Go ahead and reset hard to the newest version of the branch now that we know\r\n # it is local.\r\n try:\r\n cmd_log(['git', 'reset', '--hard', 'origin/{0}'.format(branch), ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to reset to branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)",
"def checkout(revision):\n subprocess.run(\n ['git', 'checkout', revision],\n check=True\n )",
"def test_branching(self):\r\n repo_dir = self.GIT_REPO_DIR\r\n # Test successful import from command\r\n if not os.path.isdir(repo_dir):\r\n os.mkdir(repo_dir)\r\n self.addCleanup(shutil.rmtree, repo_dir)\r\n\r\n # Checkout non existent branch\r\n with self.assertRaisesRegexp(GitImportError, GitImportError.REMOTE_BRANCH_MISSING):\r\n git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', 'asdfasdfasdf')\r\n\r\n # Checkout new branch\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n def_ms = modulestore()\r\n # Validate that it is different than master\r\n self.assertIsNotNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n\r\n # Attempt to check out the same branch again to validate branch choosing\r\n # works\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n\r\n # Delete to test branching back to master\r\n delete_course(def_ms, contentstore(),\r\n self.TEST_BRANCH_COURSE,\r\n True)\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n 'master')\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n self.assertIsNotNone(def_ms.get_course(SlashSeparatedCourseKey.from_deprecated_string(self.TEST_COURSE)))",
"def verify_branch(path, expected_branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch is %s:\" % expected_branch)\n branch = run_in_component(path, ['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n branch = branch.strip()\n\n if branch == expected_branch:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You must be on branch %s to release, you are on %s\" % (expected_branch, branch))",
"def checkout_nightly_version(branch, spdir):\n nightly_version = _nightly_version(spdir)\n cmd = [\"git\", \"checkout\", \"-b\", branch, nightly_version]\n p = subprocess.run(cmd, check=True)",
"def __init__(self, git_repo_path: Path, git_repo_branch: str) -> None:\n self._repo: git.Repo = git.Repo(git_repo_path)\n self._branch: str = git_repo_branch\n if self._repo.head.ref.name != self._branch:\n for branch in self._repo.branches:\n if branch.name == self._branch:\n branch.checkout()\n break\n else:\n raise ValueError(\n f\"Branch {self._branch} doesn't exist in {self._repo.working_dir} repo\"\n )",
"def verify_up_to_date(path, branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch up to date:\")\n run_in_component(path, ['git', 'remote', 'update'])\n\n result = run_in_component(path, ['git', 'rev-list', 'HEAD...origin/%s' % branch, '--count'])\n count = int(result.strip())\n\n if count == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You branch is not up-to-date with remote branch: %d different commits\" % count)",
"def checkGit(directory):",
"def main(branch):\n try:\n # Ensure that we're in a git repository. This command is silent unless\n # you're not actually in a git repository, in which case, you receive a\n # \"Not a git repository\" error message.\n output = subprocess.check_output(['git', 'rev-parse']).decode('utf-8')\n sys.stdout.write(output)\n except subprocess.CalledProcessError:\n # Bail if we're not in a git repository.\n return\n\n # This behavior ensures a better user experience for those that aren't\n # intimately familiar with git.\n ensure_remote_branch_is_tracked(branch)\n\n # Switch to the specified branch and update it.\n subprocess.check_call(['git', 'checkout', '--quiet', branch])\n\n # Pulling is always safe here, because we never commit to this branch.\n subprocess.check_call(['git', 'pull', '--quiet'])\n\n # Checkout the top commit in the branch, effectively going \"untracked.\"\n subprocess.check_call(['git', 'checkout', '--quiet', '%s~0' % branch])\n\n # Clean up the repository of Python cruft. Because we've just switched\n # branches and compiled Python files should not be version controlled,\n # there are likely leftover compiled Python files sitting on disk which may\n # confuse some tools, such as sqlalchemy-migrate.\n subprocess.check_call(['find', '.', '-name', '\"*.pyc\"', '-delete'])\n\n # For the sake of user experience, give some familiar output.\n print('Your branch is up to date with branch \\'origin/%s\\'.' % branch)",
"def test_checkout_repository(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.contribtool.checkout_repository(TOOLNAME,username,userpass)",
"def checkout2(repo, branch, overwrite=True):\n cmd = 'git checkout %s' % (branch,)\n out = repo.issue(cmd, error='return')\n if overwrite and out is not None:\n repo._handle_overwrite_error(out)\n repo._handle_abort_merge_rebase(out)\n # Retry\n repo.issue(cmd)",
"def test_branch_commit_get(repository: Repository) -> None:\n branch = repository.head\n assert repository.heads[branch.name] == branch.commit",
"def test_branch_can_be_copied():\n\n setup_org()\n setup_repo()\n\n responses.add(responses.GET, \"https://api.github.com/repos/my-org/my-repo/branches/master\",\n body=my_repo_branch,\n content_type='text/json',\n status=200)\n\n responses.add(responses.POST, \"https://api.github.com/repos/my-org/my-repo/git/refs\",\n body=my_new_ref,\n content_type='text/json',\n status=201)\n\n responses.add(responses.GET, \"https://api.github.com/repos/my-org/my-repo/branches/main\",\n body=my_repo_branch,\n content_type='text/json',\n status=200)\n\n token = '__dummy__'\n org = \"my-org\"\n client = GithubRestClient(token)\n new_branch_name = \"main\"\n\n repo = get_repository(client, org, \"my-repo\")\n new_branch = copy_branch(repo, repo.default_branch, new_branch_name)\n assert None is not new_branch",
"def code_checkout(cesm_repo, coderoot, tag):\n\n sandbox = os.path.split(coderoot)[-1]\n\n if os.path.exists(coderoot):\n print('Check for right tag: '+coderoot)\n p = Popen('git status', shell=True, cwd=coderoot, stdout=PIPE, stderr=PIPE)\n stdout, stderr = p.communicate()\n stdout = stdout.decode('UTF-8')\n stderr = stderr.decode('UTF-8')\n print(stdout)\n print(stderr)\n if tag not in stdout.split('\\n')[0]:\n raise ValueError('tag does not match')\n\n else:\n stat = check_call(['mkdir', '-p', coderoot])\n if stat != 0: sys.exit(1)\n\n # clone the repo\n p = Popen('git clone '+cesm_repo+' '+sandbox, shell=True,\n cwd=coderoot+'/..', stdout=PIPE, stderr=PIPE)\n stdout, stderr = p.communicate()\n if stdout:\n print(stdout)\n if stderr:\n print(stderr)\n if p.returncode != 0:\n raise Exception('git error')\n\n # check out the right tag\n p = Popen('git checkout %s'%tag, shell=True, cwd=coderoot)\n stdout, stderr = p.communicate()\n if stdout:\n print(stdout)\n if stderr:\n print(stderr)\n if p.returncode != 0:\n raise Exception('git error')\n\n # check out externals\n p = Popen('./manage_externals/checkout_externals -v', shell=True, cwd=coderoot)\n stdout, stderr = p.communicate()\n if stdout:\n print(stdout)\n if stderr:\n print(stderr)\n if p.returncode != 0:\n raise Exception('git error')",
"def checkout_latest():\n with cd(env.repo_path):\n run('git checkout %(branch)s;' % env)\n run('git pull origin %(branch)s' % env)",
"def checkout(location, rev):\n ensure_dir(location)\n\n logger = utils.get_logger()\n\n with utils.cd(location):\n logger.debug(\n 'Checking out rev: {} at location: {}'.format(rev, location))\n cmd = '/usr/bin/git checkout --force --quiet {}'.format(rev)\n subprocess.check_call(cmd, shell=True)",
"def test_default_repo_branch(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check\", exitcode=None)\n self.assertIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Target: ywangd:dev\", output)",
"def test_branch_name_get(repository: Repository) -> None:\n branch = repository.branch(repository.head.name)\n assert repository.head.name == branch.name",
"def git_branch():\n result, output = popen('git branch', False, False)\n branch = None\n for line in output:\n if line.startswith('*'):\n branch = line.split('*')[-1].strip()\n break\n return branch",
"def gh_pages_branch(repo):\n repo.git.branch('gh-pages')\n repo.git.checkout('gh-pages')"
] | [
"0.7553268",
"0.72212356",
"0.7122081",
"0.71060395",
"0.7037166",
"0.69275075",
"0.6837741",
"0.6769412",
"0.6685118",
"0.6674647",
"0.6668449",
"0.65646195",
"0.6517232",
"0.6484005",
"0.64520335",
"0.6417905",
"0.6379777",
"0.6373071",
"0.6324217",
"0.6288217",
"0.6272058",
"0.6241507",
"0.6188755",
"0.6171018",
"0.61673355",
"0.6167288",
"0.61582696",
"0.61532843",
"0.60976285",
"0.6083349"
] | 0.728686 | 1 |
Pulls the given repository on the give system | def pull(connection, rid=None, repo=None):
if repo is None:
repo = Repository(connection, rid)
return repo.pull() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pull1(repo, **kwargs):\n ret = do_pull(repo, \"topology.virl\")\n if not ret:\n exit(1)",
"def pull(self):\n origin = self.git_repo.remotes.origin\n origin.pull()",
"def pull(self, remote, branch, *args):\n return self.cmd('pull', remote, branch, *args)",
"def pull(reference, provider):\n try:\n repository = Repository.objects.get(remote_id=int(reference), provider=provider)\n except ValueError:\n owner, name = reference.rsplit(\"/\", 1)\n repository = Repository.objects.get(owner=owner, name=name, provider=provider)\n\n log.info(\"repos.pull\", repo=repository)\n\n with tempfile.TemporaryDirectory() as repo_dir:\n try:\n repo_path = download_repository(repository, repo_dir)\n except (MissingFilesError, RepositoryNotFoundError) as err:\n log.info(\"repos.pull.git_error\", repo=repository, error=err)\n return\n\n repo_analyzers.run_all(repository, repo_path)\n runner.run_checks_and_save_results(AUDITING_CHECKS, repository, repo_path)",
"def fetch_pull(ref):\n origin.fetch(tags=True)\n repo.git.checkout(\"{}\".format(ref))\n repo.git.pull(\"origin\", \"{}\".format(ref))",
"def pull(self):\n run('git', 'pull', 'origin', 'master')",
"async def pull(ctx):\n author = ctx.author\n if author.id in authorities:\n out = subprocess.Popen(['git', 'pull'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n stdout,stderr = out.communicate()\n stdout = stdout.decode(\"utf-8\")\n msg = '**Output: **{0}\\n'.format(stdout)\n if stderr:\n stderr = stderr.decode(\"utf-8\")\n msg += '**Error: **\\n{0}'.format(stderr)\n await ctx.send(msg)\n else:\n await ctx.send(\"You can't tell me what to do!\")",
"def pull(args):\n do_all_projects(args + [\"pull\"])",
"def pull(repo, **kwargs):\n ret = do_pull(repo, \"topology.yaml\")\n if not ret:\n ret = do_pull(repo, \"topology.virl\")\n if not ret:\n exit(1)",
"def call_git_pull():\n print(\"This will pull the remote repo and overwrite the local notes\")\n call([\"git\", \"pull\"])",
"def pull():\n _with_deploy_env(['git pull'])",
"def pull2(repo, overwrite=True):\n cmd = 'git pull --no-edit'\n out = repo.issue(cmd, error='return')\n if overwrite and out is not None:\n repo._handle_overwrite_error(out)\n # Retry\n repo.issue(cmd)",
"def pull(self, data):\n required = {'token', 'source'}\n api.validate(data, required)\n token = data['token']\n repo = data['source']\n self.credentials_module.authorize(token)\n result = self.docker_module.pull_image(repo)\n # credentials_module.add_image(token, result['image_id'])\n return result",
"def do_pull(self, arg):\n checkLocalGitLocation()\n teamorindividual = input(\"Is this a team or individual (t or i):\")\n if teamorindividual == 'i':\n for student in returnAllStudents():\n os.system(\"cd %s && git pull https://github.ccs.neu.edu/%s && cd ..\" %\n (localgitlocation, 'cs5500/' + student))\n else:\n for team in returnAllTeams():\n os.system(\"cd %s && git pull https://github.ccs.neu.edu/%s/%s && cd ..\" %\n (localgitlocation + '/' + team, githuborg, team))",
"def fpull(var, wrapper, message):\n _git_pull(wrapper)",
"def pull(self, repo, tag):\n check_blacklist(repo)\n logger.info(\"Pulling Docker image {}:{}\".format(repo, tag))\n with SimpleFlock(self.FLOCKFILE, timeout=1200):\n stream = self.client.pull(repo, tag=tag, stream=True, insecure_registry=True)\n log_output(stream)",
"def pull(self, verbose=True):\n fetch_cmd = [\"git\", \"fetch\"]\n if not verbose:\n fetch_cmd.append(\"-q\")\n subprocess.call(fetch_cmd, cwd=self.path)\n checkout_cmd = [\"git\", \"checkout\", \"origin/master\", \"-B\", \"master\"]\n if not verbose:\n checkout_cmd.append(\"-q\")\n return subprocess.call(checkout_cmd, cwd=self.path)",
"def fetch_repo(data):\n repo = Repository.objects.get(**data)\n\n # create a temporary directory\n tmp_dir = util.tmp_dir('github')\n\n # log\n log.info(\"Fetching repo %s to %s\", repo.full_name, tmp_dir)\n\n # clone the repository to the directory\n git.Repo.clone_from(repo.git_url, tmp_dir)\n\n # add the repo path to the database\n repo.local_path = tmp_dir\n repo.save()\n\n # tell workers the repo is available\n publish('github.repo_available', data)",
"def git_pull():\n\n puts(yellow(\"Pull master from GitHub\"))\n with cd(env.source_dir):\n run('git reset --hard HEAD')\n run('git pull')",
"def test_pull_explicit_remote(self, repo):\n dest = os.path.join(self._tmpdir, 'cloned_repo')\n clone(['arg0', repo.path, dest])\n cloned = ComponentTestGitRepository(dest)\n self._check_repo_state(cloned, 'master', ['master'])\n eq_(pull(['argv0', 'origin']), 0)\n assert len(repo.get_commits()) == 1",
"def pulls_by_name(self, username, repository_name, access_token=None):\n return self._complete_request_by_name(\n username, repository_name, \"pulls\", access_token)",
"def update():\n call('git -C ~/norminette+ pull', shell=True)",
"def odoo_repos_pull(version=None, fast=False):\n if version and isinstance(version, list):\n for v in version:\n odoo_repos_pull(v, fast)\n fast = True # only pull internal and paas once\n return\n failed_checkouts = []\n if version:\n failed_checkouts = odoo_repos_checkout([version])\n repos = VERSIONED_REPOS[:]\n if not fast:\n repos += SINGLE_VERSION_REPOS\n for fc in failed_checkouts:\n repos.remove(fc)\n\n def pull(*args, **kwargs):\n kwargs[\"remote\"].pull()\n\n for repo_name, repo in zip(repos, _repos(repos)):\n repo_name = shorten_path(repo_name)\n print(f\"Pulling {repo_name}\")\n _try_for_all_remotes(repo, pull, raise_on_exception=False)",
"def test_pull_default_remote(self, repo):\n dest = os.path.join(self._tmpdir, 'cloned_repo')\n clone(['arg0', repo.path, dest])\n cloned = ComponentTestGitRepository(dest)\n self._check_repo_state(cloned, 'master', ['master'])\n eq_(pull(['argv0']), 0)\n assert len(repo.get_commits()) == 1",
"def fetch_repo(root, repo, url, destination_temp):\n\n print \"Fetching %s from %s\" % (repo, url)\n\n if root.exists('repos/%s' % repo):\n print \"Repo %s exists, issuing a git pull...\" % repo\n call('cd repos/%s; git pull' % repo, shell=True)\n else:\n print \"Repo %s does not exist, issuing a git clone...\" % repo\n\n # explicitely create dir as implicit creation fails on server\n root.makedir('%s/%s' % (destination_temp, repo))\n call('cd repos; git clone %s %s' % (url, repo), shell=True)\n # call('git clone %s %s/%s > /dev/null 2>&1' % (repo['url'], source, repo['id']), shell=True)",
"def pull(release):\n image = f\"breqwatr/rsyslog:{release}\"\n ecr.pull(image)",
"def infocalypse_pull(ui_, repo, **opts):\n params, stored_cfg = get_config_info(ui_, opts)\n\n if opts['hash']:\n # Use FMS to lookup the uri from the repo hash.\n if opts['uri'] != '':\n ui_.warn(\"Ignoring --uri because --hash is set!\\n\")\n if len(opts['hash']) != 1:\n raise util.Abort(\"Only one --hash value is allowed.\")\n params['FMSREAD_HASH'] = opts['hash'][0]\n params['FMSREAD_ONLYTRUSTED'] = bool(opts['onlytrusted'])\n request_uri = get_uri_from_hash(ui_, repo, params, stored_cfg)\n else:\n request_uri = opts['uri']\n\n if request_uri == '':\n request_uri = stored_cfg.get_request_uri(repo.root)\n if not request_uri:\n ui_.warn(\"There is no stored request URI for this repo.\\n\"\n \"Please set one with the --uri option.\\n\")\n return\n\n params['REQUEST_URI'] = request_uri\n # Hmmmm... can't really implement rev.\n execute_pull(ui_, repo, params, stored_cfg)",
"def git_pull(where=None):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n run('git pull')\n run('git submodule update')\n collectstatic(where)\n restart(where)",
"def pull_repo(project_path, webhook):\n try:\n final_output = subprocess.check_output(\n # TODO: Rebase without the need to specify the branch\n f'git -C {project_path} pull --rebase origin main',\n stdin=None,\n stderr=None,\n shell=True,\n timeout=Global.GIT_TIMEOUT,\n )\n print(final_output)\n return final_output\n except subprocess.TimeoutExpired:\n final_output = 'Error: Harvey timed out during git pull operation.'\n print(final_output)\n Utils.kill(final_output, webhook)\n except subprocess.CalledProcessError:\n final_output = f'\\nError: Harvey could not pull {Global.repo_full_name(webhook)}.'\n print(final_output)\n Utils.kill(final_output, webhook)",
"def _pull(self) -> None:\n raise NotImplementedError() # pragma: no cover"
] | [
"0.73290116",
"0.717763",
"0.7176306",
"0.7152554",
"0.7016408",
"0.69733423",
"0.69663095",
"0.69643307",
"0.69623786",
"0.69125956",
"0.6869978",
"0.67377836",
"0.66976327",
"0.6685887",
"0.6681624",
"0.666945",
"0.66548884",
"0.6588705",
"0.6575877",
"0.65757114",
"0.65705335",
"0.6512172",
"0.6476095",
"0.6452166",
"0.64465636",
"0.64377964",
"0.63585985",
"0.6344849",
"0.63339555",
"0.6316692"
] | 0.7484018 | 0 |
Get the statistics for the all builders. | def get_buildbot_stats(time_window : datetime.datetime) -> BuildStats:
print('getting list of builders...')
stats = BuildStats()
for builder in requests.get(BASE_URL).json().keys():
# TODO: maybe filter the builds to the ones we care about
stats += get_builder_stats(builder, time_window )
return stats | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_builder_stats(builder: str, time_window: datetime.datetime) -> BuildStats:\n print('Gettings builds for {}...'.format(builder))\n # TODO: can we limit the data we're requesting?\n url = '{}/{}/builds/_all'.format(BASE_URL, builder)\n stats = BuildStats()\n for build, results in requests.get(url).json().items(): \n start_time = datetime.datetime.fromtimestamp(float(results['times'][0]))\n if start_time < time_window:\n continue\n successful = results['text'] == ['build', 'successful']\n stats.add(successful)\n return stats",
"def getAllBuilders(self):\n names = self.status.getBuilderNames(categories=self.categories)\n builders = [self.status.getBuilder(n) for n in names]\n return builders",
"def statistics(self):\n return self.get_statistics()",
"def get_stats(self):\n return self.manager.get_stats(self)",
"def get_statistics(self):\n with self._conn.begin():\n stats = dict(\n self._conn.execute(\n \"SELECT * FROM get_statistics()\"\n ).first().items()\n )\n stats['builds_last_hour'] = {\n row.abi_tag: row.builds\n for row in self._conn.execute(\n \"SELECT * FROM get_builds_last_hour()\"\n )\n }\n return stats",
"def statistics(self):\n return self._queue.statistics(self._name)",
"def stats(self):\n pass",
"def statistics(self):\n return self._statistics",
"def metrics(_):\r\n collector = BuildsCollector()\r\n build_metrics, headers = collector.get_metrics_table()\r\n print(tabulate(build_metrics, headers=headers))",
"def get_stats(self):\n return self.stats",
"def getStats(self):\n\n raise NotImplementedError",
"def get_statistics(self):\n return self.results",
"def get_all_stats():\n\n return get_component(CachingPackage.COMPONENT_NAME).get_all_stats()",
"def stats(self):\n return self._stats",
"def stats(self):",
"def getAllMetrics(self):\n result = self.getReportMetrics()\n result.update(self.getOptimizationMetrics())\n return result",
"def stats(self):\n url = client.build_url('stats')\n _, res_json = client.get(url, headers=self.headers)\n\n return res_json",
"def get_stats():\n datasets = [\n (\"../data_processing/data/page2answer_single_abstractive_summ.json\", \"p2a-single-abs\"),\n (\"../data_processing/data/page2answer_single_extractive_summ.json\", \"p2a-single-ext\"),\n (\"../data_processing/data/section2answer_multi_abstractive_summ.json\", \"s2a-multi-abs\"),\n (\"../data_processing/data/page2answer_multi_extractive_summ.json\", \"p2a-multi-ext\"),\n (\"../data_processing/data/section2answer_single_abstractive_summ.json\", \"s2a-single-abs\"),\n (\"../data_processing/data/section2answer_single_extractive_summ.json\", \"s2a-single-ext\"),\n (\"../data_processing/data/section2answer_multi_extractive_summ.json\", \"s2a-multi-ext\"),\n (\"../data_processing/data/question_driven_answer_summarization_primary_dataset.json\", \"complete_dataset\"),\n ]\n\n stats = SummarizationDataStats()\n for dataset in datasets:\n print(dataset[1])\n stats.load_data(dataset[0], dataset[1])\n stats.iterate_data()",
"def IterBuildStats(\n self) -> Generator[Tuple[str, str, BaseBuildStats], None, None]:\n return self.IterToValueType(BuildStats)",
"def stats(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s/stats\" %\n (self.sessionid, self.name))\n return self.server.json_body(resp)",
"def stats(self):\r\n return {}",
"def Load_AllCourseBuildersStatistics(self, data, suffix=''):\n\t\tself.temp[:]=[]\n\t\tfor x in xrange(len(self.active_tournaments)):\n\t\t\tself.temp.append(self.active_tournaments[x])\n\n\t\treturn self.temp",
"def get_all_stats(self) -> Dict[str, Any]:\n return self.http.get(self.config.paths.stat)",
"def query_builders():\n return allthethings.list_builders()",
"def stats(self, **kwargs):\n return self.client.api.stats(self.id, **kwargs)",
"def list_builders(self) -> List[str]:\n return sorted(_iter_builder_names(self._ns2data_dir))",
"def get_stats(self):\n stats = \"\\n\\nBOT STATS: This bot currently knowns \"\n if self.intents:\n categoryqty = 0\n patternqty = 0\n responseqty = 0\n\n for intent in self.intents['intents']:\n categoryqty += 1\n patternqty += len(intent['patterns'])\n responseqty += len(intent['responses'])\n \n stats += str(categoryqty)\n stats += \" Categories with in total \"\n stats += str(patternqty)\n stats += \" Input Patterns and \"\n stats += str(responseqty)\n stats += \" possible Responses\"\n stats += \"\\n\\n\"\n\n return stats",
"def get_stats(self):\n return utils.csv_to_dict(wait(self.proto.stat()))",
"def get_stats(self):\n # Retrieve all trainers\n session = self._db_session()\n trainers = []\n regular_trainer_list = session.query(RegularTrainer).filter(\n RegularTrainer.type == \"Regular Trainer\").all()\n for trainer in regular_trainer_list:\n trainers.append(trainer)\n gym_leader_list = session.query(GymLeader).filter(\n GymLeader.type == \"Gym Leader\").all()\n for trainer in gym_leader_list:\n trainers.append(trainer)\n session.close()\n\n num_total_trainers = 0\n num_gym_leaders = 0\n num_regular_trainers = 0\n num_trainers_with_partner = 0\n num_trainer_per_location = {}\n\n for trainer in trainers:\n num_total_trainers += 1\n if trainer.type == 'Regular Trainer':\n num_regular_trainers += 1\n if trainer.have_partner is 1:\n num_trainers_with_partner += 1\n else:\n num_gym_leaders += 1\n\n for trainer in trainers:\n if trainer.location in num_trainer_per_location:\n num_trainer_per_location[trainer.location] += 1\n else:\n num_trainer_per_location.update({trainer.location: 1})\n\n stats_output = TrainerStats(\n num_total_trainers, num_gym_leaders, num_regular_trainers,\n num_trainers_with_partner, num_trainer_per_location)\n\n return stats_output",
"def get_stats(self):\n if len(self.customer_agents) > 0:\n waiting = avg([customer.get_waiting_time() for customer in self.customer_agents.values()])\n total = avg(\n [customer.total_time() for customer in self.customer_agents.values() if customer.total_time()])\n else:\n waiting, total = 0, 0\n\n return {\n \"waiting\": \"{0:.2f}\".format(waiting),\n \"totaltime\": \"{0:.2f}\".format(total),\n \"finished\": self.is_simulation_finished(),\n \"is_running\": self.simulation_running,\n }"
] | [
"0.70549256",
"0.6995379",
"0.66598487",
"0.6448039",
"0.6427277",
"0.6422584",
"0.6358498",
"0.63564634",
"0.6297995",
"0.62931466",
"0.62858886",
"0.6255862",
"0.6241165",
"0.6240544",
"0.61738175",
"0.61579835",
"0.61428285",
"0.6082591",
"0.60764337",
"0.60596114",
"0.6024094",
"0.60085577",
"0.60025907",
"0.59758246",
"0.5952665",
"0.59494984",
"0.5935941",
"0.58854914",
"0.58640414",
"0.5817364"
] | 0.7632881 | 0 |
Get the statistics for one builder. | def get_builder_stats(builder: str, time_window: datetime.datetime) -> BuildStats:
print('Gettings builds for {}...'.format(builder))
# TODO: can we limit the data we're requesting?
url = '{}/{}/builds/_all'.format(BASE_URL, builder)
stats = BuildStats()
for build, results in requests.get(url).json().items():
start_time = datetime.datetime.fromtimestamp(float(results['times'][0]))
if start_time < time_window:
continue
successful = results['text'] == ['build', 'successful']
stats.add(successful)
return stats | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_buildbot_stats(time_window : datetime.datetime) -> BuildStats:\n print('getting list of builders...')\n stats = BuildStats()\n for builder in requests.get(BASE_URL).json().keys():\n # TODO: maybe filter the builds to the ones we care about\n stats += get_builder_stats(builder, time_window )\n return stats",
"def statistics(self):\n return self.get_statistics()",
"def get_stats(self):\n return self.manager.get_stats(self)",
"def get_stats(self):\n return self.stats",
"def get_statistics(self):\n with self._conn.begin():\n stats = dict(\n self._conn.execute(\n \"SELECT * FROM get_statistics()\"\n ).first().items()\n )\n stats['builds_last_hour'] = {\n row.abi_tag: row.builds\n for row in self._conn.execute(\n \"SELECT * FROM get_builds_last_hour()\"\n )\n }\n return stats",
"def statistics(self):\n return self._statistics",
"def statistics(self):\n return self._queue.statistics(self._name)",
"def stats(self):\n return self._stats",
"def getStats(self):\n\n raise NotImplementedError",
"def get_statistics(self):\n\t\treturn Job(SDK.PrlSrv_GetStatistics(self.handle)[0])",
"def get_statistics(self):\n return self.results",
"def stats(self) -> Dict:\n return self._stats",
"def stats(self):\n pass",
"def stats(self):\n url = client.build_url('stats')\n _, res_json = client.get(url, headers=self.headers)\n\n return res_json",
"def stats(self):",
"def get_stats(self):\n return scales.getStats()[self.stats_name]",
"def stats(self):\r\n return {}",
"def stats(self, **kwargs):\n return self.client.api.stats(self.id, **kwargs)",
"def stats(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s/stats\" %\n (self.sessionid, self.name))\n return self.server.json_body(resp)",
"def stats(self):\r\n\t\tdata = self._get('global/', query=None)\r\n\t\treturn data",
"def IterBuildStats(\n self) -> Generator[Tuple[str, str, BaseBuildStats], None, None]:\n return self.IterToValueType(BuildStats)",
"def get_statistics(self):\n\t\treturn Job(SDK.PrlVm_GetStatistics(self.handle)[0])",
"def get_stats():\n datasets = [\n (\"../data_processing/data/page2answer_single_abstractive_summ.json\", \"p2a-single-abs\"),\n (\"../data_processing/data/page2answer_single_extractive_summ.json\", \"p2a-single-ext\"),\n (\"../data_processing/data/section2answer_multi_abstractive_summ.json\", \"s2a-multi-abs\"),\n (\"../data_processing/data/page2answer_multi_extractive_summ.json\", \"p2a-multi-ext\"),\n (\"../data_processing/data/section2answer_single_abstractive_summ.json\", \"s2a-single-abs\"),\n (\"../data_processing/data/section2answer_single_extractive_summ.json\", \"s2a-single-ext\"),\n (\"../data_processing/data/section2answer_multi_extractive_summ.json\", \"s2a-multi-ext\"),\n (\"../data_processing/data/question_driven_answer_summarization_primary_dataset.json\", \"complete_dataset\"),\n ]\n\n stats = SummarizationDataStats()\n for dataset in datasets:\n print(dataset[1])\n stats.load_data(dataset[0], dataset[1])\n stats.iterate_data()",
"def get_stats(self):\n return utils.csv_to_dict(wait(self.proto.stat()))",
"def getStats(self):\n if self.type != \"CREATURE\" and self.type != None:\n return\n self.stats = _xmlUrlToDict(serverString + \"/rest/creature/\" + self.id, float)",
"def get_stats(self):\n stats = \"\\n\\nBOT STATS: This bot currently knowns \"\n if self.intents:\n categoryqty = 0\n patternqty = 0\n responseqty = 0\n\n for intent in self.intents['intents']:\n categoryqty += 1\n patternqty += len(intent['patterns'])\n responseqty += len(intent['responses'])\n \n stats += str(categoryqty)\n stats += \" Categories with in total \"\n stats += str(patternqty)\n stats += \" Input Patterns and \"\n stats += str(responseqty)\n stats += \" possible Responses\"\n stats += \"\\n\\n\"\n\n return stats",
"def get_batch_stats(self, batch):\n\t\t\n\t\treturn self.batch_stats[batch]",
"def getAllMetrics(self):\n result = self.getReportMetrics()\n result.update(self.getOptimizationMetrics())\n return result",
"def statistics(self):\n return StatisticsCollection(self._statistics)",
"def stats():\n return jsonify(shorten.get_stats(get_db(), app.config['MINI_URL_BASE']))"
] | [
"0.7178298",
"0.6534593",
"0.64914143",
"0.6488856",
"0.63973767",
"0.63478684",
"0.6330162",
"0.62668484",
"0.6216657",
"0.61736965",
"0.615647",
"0.6031543",
"0.6031311",
"0.6029025",
"0.5993337",
"0.59911054",
"0.5989218",
"0.5979158",
"0.59560424",
"0.5946391",
"0.59271216",
"0.5918512",
"0.5908496",
"0.58747345",
"0.58656037",
"0.57746667",
"0.5773947",
"0.5717638",
"0.56982625",
"0.5668147"
] | 0.7503165 | 0 |
Create metric descriptors on Stackdriver. Recreating these with every call is fine. | def gcp_create_metric_descriptor(project_id: str):
client = monitoring_v3.MetricServiceClient()
project_name = client.project_path(project_id)
for desc_type, desc_desc in [
["buildbots_percent_failed", "Percentage of failed builds"],
["buildbots_builds_successful", "Number of successful builds in the last 24h."],
["buildbots_builds_failed", "Number of failed builds in the last 24h."],
["buildbots_builds_total", "Total number of builds in the last 24h."],
]:
descriptor = monitoring_v3.types.MetricDescriptor()
descriptor.type = 'custom.googleapis.com/buildbots_{}'.format(desc_type)
descriptor.metric_kind = (
monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE)
descriptor.value_type = (
monitoring_v3.enums.MetricDescriptor.ValueType.DOUBLE)
descriptor.description = desc_desc
descriptor = client.create_metric_descriptor(project_name, descriptor)
print('Created {}.'.format(descriptor.name)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def recreate_metrics():\n all = monitor_client.list_metric_descriptors(\n project_path, filter_='metric.type=starts_with(\"custom.\")'\n )\n for a in all:\n if \"accumulator\" in str(a) or \"biquery\" in str(a):\n metric_name = monitor_client.metric_descriptor_path(\n settings.PROJECT_ID, a.type\n )\n\n try:\n monitor_client.delete_metric_descriptor(metric_name)\n except Exception as e:\n print(e)\n\n metric_descriptor = {\n \"type\": f\"custom.googleapis.com/{Monitoring.PING}\",\n \"labels\": [\n {\n \"key\": \"operation\",\n \"valueType\": \"STRING\",\n # \"description\": \"Performed operation name\"\n }\n ],\n \"metricKind\": \"GAUGE\",\n \"valueType\": \"DOUBLE\",\n \"unit\": \"items\",\n \"description\": \"Function performed in a loop with hard limit\",\n \"displayName\": \"Repeated Function Execution\",\n }\n\n return monitor_client.create_metric_descriptor(\n settings.PROJECT_ID, metric_descriptor\n )",
"def create_system_metrics(system):\n pass",
"def create(self,\n metric_type,\n metric_kind='GAUGE',\n value_type='DOUBLE',\n description='N/A'):\n descriptor = ga_metric.MetricDescriptor()\n if metric_type.startswith('custom.googleapis.com/'):\n descriptor.type = metric_type\n else:\n descriptor.type = 'custom.googleapis.com/%s' % metric_type\n descriptor.metric_kind = (getattr(ga_metric.MetricDescriptor.MetricKind,\n metric_kind))\n descriptor.value_type = (getattr(ga_metric.MetricDescriptor.ValueType,\n value_type))\n descriptor.description = description\n LOGGER.info(f'Creating metric descriptor \"{descriptor.type}\" ...')\n return self.client.create_metric_descriptor(\n name=self.project, metric_descriptor=descriptor)",
"def create_metric(self) -> EvalMetric:\n pass",
"def test_create_derived_metric(self):\n pass",
"def CreateNodeMetrics(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def initialize_metrics():\n metrics = {\n 'cd_losses': [],\n 'cd_corrects': [],\n 'cd_precisions': [],\n 'cd_recalls': [],\n 'cd_f1scores': [],\n }\n\n return metrics",
"def test_create_metrics_dict(self):\n # binary tasks have 1 class at class definition.\n num_classes = 1\n metrics_dict = create_metrics_dict(num_classes)\n assert 'iou_1' in metrics_dict.keys()\n assert 'iou_2' not in metrics_dict.keys()\n\n num_classes = 3\n metrics_dict = create_metrics_dict(num_classes)\n assert 'iou_1' in metrics_dict.keys()\n assert 'iou_2' in metrics_dict.keys()\n assert 'iou_3' not in metrics_dict.keys()\n del metrics_dict",
"def metrics_group():",
"def init_metric_definitions():\n metric_definitions = []\n\n # add info to list in memory, one by one, following signature values\n metric_def_ID = 1\n metric_def_name = \"Recovery Time\"\n metric_def_info = \"Measures time taken by ONAP to restore a VNF\"\n metric_definitions.append(RecoveryTimeDef(metric_def_ID, metric_def_name,\n metric_def_info))\n\n metric_def_ID = 2\n metric_def_name = \"Uptime Percentage\"\n metric_def_info = \"Measures ratio of uptime to reference time, not counting planned downtime\"\n metric_definitions.append(UptimePercentageDef(metric_def_ID, metric_def_name,\n metric_def_info))\n\n\n # write list to binary file\n write_list_bin(metric_definitions, FILE_METRIC_DEFINITIONS)\n\n return metric_definitions",
"def get_descriptor(self, name, record, kind_map, default_kind):\n custom_type = self.name_to_type(name)\n descriptor = self.__custom_descriptors.get(custom_type, None)\n if descriptor is not None:\n return descriptor\n\n label_list = [{'key': 'MicroserviceSrc', 'valueType': 'STRING'},\n {'key': 'InstanceSrc', 'valueType': 'STRING'}],\n label_list.add_all([{'key': tag['key'], 'valueType': 'STRING'}\n for tag in record['values'][0]['tags']])\n if name == 'controller.invocations':\n self.hack_maybe_add_label('account', label_list)\n\n custom = {\n 'name': name,\n 'type': custom_type,\n 'labels': label_list,\n 'metricKind': kind_map.get(record['kind'], default_kind),\n 'valueType': 'DOUBLE',\n }\n\n self.logger.info('Creating %s', name)\n try:\n descriptor = self.__stackdriver.projects().metricDescriptors().create(\n name=self.__project_name, body=custom).execute()\n self.logger.info('Added %s', name)\n except HttpError as err:\n self.logger.error('CAUGHT: %s', err)\n descriptor = None\n\n self.__custom_descriptors['type'] = descriptor\n return descriptor",
"def set_metrics(self):",
"def create_metric(self) -> 'LossMetric':\n raise NotImplementedError()",
"def CreatePodMetrics(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)",
"def _SnapMetrics(deadline):\n next_deadline = deadline + frequency_seconds\n callback = partial(_SnapMetrics, next_deadline)\n cls._timeouts[group_key] = IOLoop.current().add_timeout(next_deadline, callback)\n\n sample = meter.sample()\n sample_json = json.dumps(sample)\n new_metric = Metric.Create(group_key, machine_id, deadline, sample_json)\n with util.Barrier(_UploadSuccess, _UploadError) as b:\n retry.CallWithRetryAsync(retry_policy, new_metric.Update, client=client, callback=b.Callback())",
"def __init__(self, metrics_to_record):\n self.tape = {}\n\n for metric_name in metrics_to_record:\n self.tape[metric_name] = []",
"def build_metrics_dict(node):\n\n # Initialize tensors\n n = 0\n n = _recv(n,node)\n keys = [[0 for j in range(8)] for i in range(n)] # max_seq_len for metric name is 8\n values = [0.0 for i in range(n)]\n higher_is_better = [0 for i in range(n)]\n\n # Read data\n keys = _recv(keys,node)\n values = _recv(values,node)\n higher_is_better = _recv(higher_is_better,node)\n\n # Reorganize output + decode dict keys\n orig_keys = [encode_string(key, string_to_int=False) for key in keys]\n values_dict = [{'value': float(v), 'higher_is_better': bool(higher_is_better[i])} for i, v in enumerate(values)]\n metrics = dict(zip(orig_keys,values_dict))\n num_instances = int(metrics.pop('num')['value'])\n\n result = None, metrics, num_instances\n \n return result",
"def GenerateForecastMetrics(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def defineMetricSpecs(self):\n metricSpecs = (\n MetricSpec(field=self.fieldToPredict, metric='multiStep',\n inferenceElement='multiStepBestPredictions',\n params={'errorMetric': 'aae', 'window': 1000, 'steps': self.steps}),\n MetricSpec(field=self.fieldToPredict, metric='trivial',\n inferenceElement='prediction',\n params={'errorMetric': 'aae', 'window': 1000, 'steps': self.steps}),\n MetricSpec(field=self.fieldToPredict, metric='multiStep',\n inferenceElement='multiStepBestPredictions',\n params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': self.steps}),\n MetricSpec(field=self.fieldToPredict, metric='trivial',\n inferenceElement='prediction',\n params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': self.steps})\n )\n return metricSpecs",
"def make_metric(name):\n return {\n \"type\": \"Metric\",\n \"name\": name,\n \"value\": \"\",\n \"units\": \"\",\n \"rating\": \"\",\n \"notes\": \"\",\n \"comment\": \"\",\n }",
"def foreach_descriptor(self, func, **args):\n request = self.__stackdriver.projects().metricDescriptors().list(**args)\n count = 0\n while request:\n self.logger.info('Fetching metricDescriptors')\n response = request.execute()\n for elem in response.get('metricDescriptors', []):\n count += 1\n func(elem)\n request = self.__stackdriver.projects().metricDescriptors().list_next(\n request, response)\n return count",
"def add_many_descriptors(self, descriptors):",
"def compute_metrics(self):\n pass",
"def test_create_api_metrics(mocker, response, result):\n mocker.patch.object(demisto, 'results')\n mocker.patch('CommonServerPython.is_demisto_version_ge', return_value=True)\n mocker.patch('MicrosoftApiModule.is_demisto_version_ge', return_value=True)\n mocker.patch.object(demisto, 'callingContext', {'context': {'ExecutedCommands': [{'moduleBrand': 'msgraph'}]}})\n client = retry_on_rate_limit_client(True)\n client.create_api_metrics(response)\n\n metric_results = demisto.results.call_args_list[0][0][0]\n assert metric_results.get('Contents') == 'Metrics reported successfully.'\n assert metric_results.get('APIExecutionMetrics') == result",
"def Create(cls, group_key, machine_id, timestamp, payload):\n sort_key = util.CreateSortKeyPrefix(timestamp, randomness=False) + machine_id\n metric = Metric(group_key, sort_key)\n metric.machine_id = machine_id\n metric.timestamp = timestamp\n metric.payload = payload\n return metric",
"def build_metrics_counter_data(count_metrics):\n return [{'name': name, 'delta': delta} for name, delta in iteritems(count_metrics)]",
"def __init__(self, metrics, gt, pred):\n self.dict_metrics = self.compute_metrics(metrics, gt, pred)",
"def metric_descriptor(self, type_,\n metric_kind=MetricKind.METRIC_KIND_UNSPECIFIED,\n value_type=ValueType.VALUE_TYPE_UNSPECIFIED,\n labels=(), unit='', description='', display_name=''):\n return MetricDescriptor(\n self, type_,\n metric_kind=metric_kind,\n value_type=value_type,\n labels=labels,\n unit=unit,\n description=description,\n display_name=display_name,\n )",
"def _init_metrics(self, mp_type, namespace, unreachable=False):\n kind = ResourceKind.METRIC if namespace else ResourceKind.GLOBALMETRIC\n metrics_info = (\n self._METRICS_PROVIDER_INFO.get(mp_type, {})\n .get(namespace, {})\n .get(\"metrics\", [])\n )\n\n # We will collect metrics from all metrics providers of the correct type\n # and in the correct namespace in the metrics list.\n metrics = []\n\n # all metrics providers of the correct type in the correct namespace\n if unreachable:\n mps_list = self.unreachable_metrics_providers\n else:\n mps_list = self.metrics_providers\n mps = mps_list.get(mp_type, {}).get(namespace, {})\n\n # Mapping from metrics provider resource definition to its metrics.\n # Initially empty.\n metrics_for_mp = dict.fromkeys([mp for mp in mps], [])\n for metric_info in metrics_info:\n # check if metric has the correct reachability. Skip if not.\n mp_name = metric_info[\"mp_name\"]\n reachability_matches = True if mp_name in [mp.name for mp in mps] else False\n if reachability_matches:\n # Create and collect the metric\n metric_name = metric_info[\"name\"]\n mp_metric_name = metric_info.get(\"mp_metric_name\", None)\n metric = BaseMetricDefinition(\n metric_name,\n kind,\n namespace,\n metric_info[\"allowed_values\"],\n metric_info[\"min\"],\n metric_info[\"max\"],\n mp_name,\n mp_metric_name=mp_metric_name,\n )\n metrics.append(metric)\n\n # remember its metrics provider for performance reasons\n mps_w_correct_name = [mp for mp in mps if mp.name == mp_name]\n if len(mps_w_correct_name) != 1:\n msg = (\n f\"Expected 1 metrics provider with the name {mp_name}. \"\n f\"Found {len(mps_w_correct_name)}.\"\n )\n raise ValueError(msg)\n mp = mps_w_correct_name[0]\n self._metric_to_metrics_provider[metric] = mp\n\n # save this metric to the metrics provider so it can be added later.\n metrics_for_mp[mp].append(metric)\n\n # The metrics providers need their metrics, so we add them here - also for\n # non-static metrics providers, since information about the metrics they\n # provide is needed in the tests.\n sanity_check_number_of_metrics = 0\n for mp, mp_metrics in metrics_for_mp.items():\n sanity_check_number_of_metrics += len(mp_metrics)\n self._add_metrics_to_metrics_provider(mp, mp_metrics)\n if len(metrics) != sanity_check_number_of_metrics:\n msg = (\n f\"Expected {len(metrics)} and {sanity_check_number_of_metrics} \"\n f\"to be equal.\"\n )\n raise ValueError(msg)\n\n return metrics"
] | [
"0.70970815",
"0.6102869",
"0.5952238",
"0.5923666",
"0.58200157",
"0.5791009",
"0.5774546",
"0.575687",
"0.57353896",
"0.5720316",
"0.5650112",
"0.5590122",
"0.5529896",
"0.54889554",
"0.54316115",
"0.54202217",
"0.5382546",
"0.5348047",
"0.53211987",
"0.5313823",
"0.530327",
"0.5297029",
"0.52894604",
"0.5250983",
"0.52482057",
"0.52412903",
"0.5240154",
"0.5230253",
"0.52058256",
"0.5204344"
] | 0.66897804 | 1 |
initialize a receptor library by setting the number of receptors, the number of substrates it can respond to, and optional additional parameters in the parameter dictionary | def __init__(self, num_substrates, num_receptors, parameters=None):
# the call to the inherited method also sets the default parameters from
# this class
super(LibraryBinaryNumeric, self).__init__(num_substrates,
num_receptors, parameters)
# prevent integer overflow in collecting activity patterns
assert num_receptors <= self.parameters['max_num_receptors'] <= 63
# check fixed_mixture_size parameter
fixed_mixture_size = self.parameters['fixed_mixture_size']
if fixed_mixture_size is False:
# special case where we accept False and silently convert to None
self.parameters['fixed_mixture_size'] = None
elif fixed_mixture_size is not None:
# if the value is not None it better is an integer
try:
fixed_mixture_size = int(fixed_mixture_size)
if 0 <= fixed_mixture_size <= self.Ns:
self.parameters['fixed_mixture_size'] = fixed_mixture_size
else:
raise ValueError
except (TypeError, ValueError):
raise ValueError('`fixed_mixture_size` must either be None or '
'an integer between 0 and Ns.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, num_params):\r\n self.num_params = num_params",
"def __init__(self, num_params):\r\n self.num_params = num_params",
"def __init__(self, *args, **kwargs):\n self.specGenerator = WMSpecGenerator()\n self.count = 0\n self.maxWmSpec = kwargs.setdefault('numOfSpecs', 1)\n self.type = kwargs.setdefault(\"type\", 'ReReco')\n if self.type != 'ReReco':\n raise TypeError('unknown request type %s' % self.type)\n self.splitter = kwargs.setdefault('splitter', 'DatasetBlock')\n self.inputDataset = kwargs.setdefault('inputDataset', None)\n self.dbsUrl = kwargs.setdefault('dbsUrl', None)\n self.status = {}\n self.progress = {}\n self.msg = {}\n self.names = []\n self.openRunningTimeout = kwargs.setdefault('openRunningTimeout', 0)\n import logging\n self['logger'] = logging",
"def __init__(self, *args, **kwargs):\n self.params = kwargs\n self.output_len = kwargs['num_neurons']\n self.input_len = kwargs['input_len']\n self.weights = Vector(data=np.random.randn(self.output_len, self.input_len))\n self.biases = Vector(data=np.zeros((self.output_len, 1)))\n self.input_activations = None\n self.output_activations = Vector()",
"def __init__(self, context_dim, num_actions, buffer_s=-1, intercept=False):\n\n self._context_dim = context_dim\n self._num_actions = num_actions\n self._contexts = None\n self._rewards = None\n self.actions = []\n self.buffer_s = buffer_s\n self.intercept = intercept",
"def __init__(self, corr_cls, trajectory, norigins=-1, *args, **kwargs):\n # Instantiate correlation objects\n # with args passed upon construction\n self.trajectory = trajectory\n self.nbodies = corr_cls.nbodies\n self.phasespace = [] # nothing to dump\n self._corr_cls = corr_cls\n self._args = args\n self._kwargs = kwargs\n self._kwargs['norigins'] = '1'\n self.skip = adjust_skip(self.trajectory, norigins)",
"def __init__(self,n_terms=3):\r\n self.n_terms = n_terms\r\n self.num_parameters = 3 * self.n_terms + 1",
"def __init__(self, **kwargs):\n\n args = {\n 'nobs': None, # Number of observations\n 'npred': None, # Number of predictors\n 'nrelpred': None, # Number of relevant predictors\n 'relpos': None, # Position of relevant predictor components\n 'gamma': None, # Decay factor of eigenvalue of predictor\n 'rsq': None, # Coefficient of determination\n 'sim_type': None, # Type of simulation: univariate, bivariate, multivariate\n }\n for key, value in args.items():\n setattr(self, key, value)\n\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def __init__(self,n_terms=3):\r\n self.n_terms = n_terms\r\n self.num_parameters = 3 * self.n_terms",
"def init(self, parameters, agent_parameters):\n pass",
"def __init__(self, num_actions, observation_shape, params={}, verbose=False):\n\n self.verbose = verbose\n self.num_actions = num_actions\n\n # observation shape will be a tuple\n self.observation_shape = observation_shape[0]\n logging.info('Initialized with params: {}'.format(params))\n\n self.lr = params['lr']\n self.reg = params['reg']\n self.num_hidden = params['num_hidden']\n self.hidden_size = params['hidden_size']\n\n self.session = self.create_model()",
"def __init__(self, params):\n defaults = {}\n super(Regralizer, self).__init__(params, defaults)",
"def __init__(self, limit, action_shape, observation_shape):\r\n self.limit = limit\r\n\r\n self.observations0 = RingBuffer(limit, shape=observation_shape)\r\n self.actions = RingBuffer(limit, shape=action_shape)\r\n self.rewards = RingBuffer(limit, shape=(1,))\r\n self.terminals1 = RingBuffer(limit, shape=(1,))\r\n self.observations1 = RingBuffer(limit, shape=observation_shape)",
"def __init__(self, *args, **kwds):\n if args or kwds:\n super(ModifyParametersRequest, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.major_ax is None:\n self.major_ax = 0.\n if self.minor_ax is None:\n self.minor_ax = 0.\n if self.coup_strength is None:\n self.coup_strength = 0.\n if self.limit_cycle is None:\n self.limit_cycle = 0.\n if self.forward_velocity is None:\n self.forward_velocity = 0.\n if self.curvature is None:\n self.curvature = 0.\n if self.x_offset is None:\n self.x_offset = []\n if self.y_offset is None:\n self.y_offset = []\n if self.coupling_1 is None:\n self.coupling_1 = []\n if self.coupling_2 is None:\n self.coupling_2 = []\n if self.coupling_3 is None:\n self.coupling_3 = []\n if self.coupling_4 is None:\n self.coupling_4 = []\n if self.coupling_5 is None:\n self.coupling_5 = []\n if self.coupling_6 is None:\n self.coupling_6 = []\n else:\n self.major_ax = 0.\n self.minor_ax = 0.\n self.coup_strength = 0.\n self.limit_cycle = 0.\n self.forward_velocity = 0.\n self.curvature = 0.\n self.x_offset = []\n self.y_offset = []\n self.coupling_1 = []\n self.coupling_2 = []\n self.coupling_3 = []\n self.coupling_4 = []\n self.coupling_5 = []\n self.coupling_6 = []",
"def __init__(self, input_shapes, n_actions, n_agents, output_shapes={}, layer_args={}, args=None):\n\n super(MACKRLCritic, self).__init__()\n self.args = args\n self.n_agents = n_agents\n self.n_actions = n_actions\n\n # Set up input regions automatically if required (if sensible)\n self.input_shapes = {}\n self.input_shapes[\"avail_actions\"] = self.n_actions\n self.input_shapes.update(input_shapes)\n\n # Set up output_shapes automatically if required\n self.output_shapes = {}\n self.output_shapes[\"advantage\"] = 1\n self.output_shapes[\"vvalue\"] = 1\n self.output_shapes.update(output_shapes)\n\n # Set up layer_args automatically if required\n self.layer_args = {}\n self.layer_args[\"vfunction\"] = {}\n self.layer_args.update(layer_args)\n\n self.MACKRLVFunction = MACKRLVFunction(input_shapes={\"main\":self.input_shapes[\"vfunction\"]},\n output_shapes={},\n layer_args={\"main\":self.layer_args[\"vfunction\"]},\n n_agents = self.n_agents,\n n_actions = self.n_actions,\n args=self.args)\n\n # self.MACKRLAdvantage = MACKRLAdvantage(input_shapes={\"avail_actions\":self.input_shapes[\"avail_actions\"],\n # \"qvalues\":self.MACKRLQFunction.output_shapes[\"qvalues\"],\n # \"agent_action\":self.input_shapes[\"agent_action\"],\n # \"agent_policy\":self.input_shapes[\"agent_policy\"]},\n # output_shapes={},\n # n_actions=self.n_actions,\n # args=self.args)\n\n pass",
"def __init__(self, input_size, neurons):\n super().__init__()\n self.input_size = input_size\n self.neurons = neurons\n self.params[\"w\"] = np.random.randn(input_size, neurons)\n self.params[\"b\"] = np.random.randn(1, neurons)\n self.grads = {}",
"def __init__(self):\n # Manage command line args\n args = ut_generic.getParserArgsRobot().parse_args()\n self.gzclient = args.gzclient\n self.realSpeed = args.realSpeed\n # self.realSpeed = True\n self.debug = args.debug\n self.multiInstance = args.multiInstance\n self.port = args.port\n # Set the path of the corresponding URDF file\n if self.realSpeed:\n urdf = \"biped.urdf\"\n self.urdfPath = get_prefix_path(\n \"lobot_description\") + \"/share/lobot_description/robots/\" + urdf\n else:\n print(\"Non real speed not yet supported. Use real speed instead. \")\n\n # TODO: Include launch logic here, refer to code from the .launch.py files\n # Note that after including the launch logic the code will no longer be debuggable due to multi process stuff\n\n # Create the node after the new ROS_DOMAIN_ID is set in generate_launch_description()\n rclpy.init()\n self.node = rclpy.create_node(self.__class__.__name__)\n\n # class variables\n self._observation_msg = None\n self.max_episode_steps = 1024 # default value, can be updated from baselines\n self.iterator = 0\n self.reset_jnts = True\n self._collision_msg = None\n\n #############################\n # Environment hyperparams\n #############################\n EE_POINTS = np.asmatrix([[0, 0, 0]])\n EE_VELOCITIES = np.asmatrix([[0, 0, 0]])\n\n # # Topics for the robot publisher and subscriber.\n JOINT_PUBLISHER = '/lobot_arm/control'\n # Get Joint names from the parameter server\n get_joints_client = self.node.create_client(GetAllJoints, \"/GetAllControlJoints\",\n qos_profile=qos_profile_services_default)\n req = GetAllJoints.Request()\n req.robot = \"lobot_arm\"\n while not get_joints_client.wait_for_service(timeout_sec=3.0):\n self.node.get_logger().info('service not available, waiting again...')\n\n future = get_joints_client.call_async(req)\n rclpy.spin_until_future_complete(self.node, future)\n if future.result() is not None:\n joint_names = future.result().joints\n self.node.get_logger().info(\n 'Number of joints: %d' %\n (len(joint_names)))\n else:\n self.node.get_logger().info('Service call failed %r' % (future.exception(),))\n JOINT_ORDER = joint_names\n INITIAL_JOINTS = np.full((len(joint_names)), 0.0).tolist()\n reset_condition = {\n 'initial_positions': INITIAL_JOINTS,\n 'initial_velocities': []\n }\n #############################\n\n m_jointOrder = copy.deepcopy(JOINT_ORDER)\n\n # Initialize target end effector position\n self.environment = {\n 'jointOrder': m_jointOrder,\n 'reset_conditions': reset_condition,\n 'tree_path': self.urdfPath,\n 'end_effector_points': EE_POINTS,\n }\n\n # Subscribe to the appropriate topics, taking into account the particular robot\n self._pub = self.node.create_publisher(JointControl, JOINT_PUBLISHER, qos_profile=qos_profile_sensor_data)\n self._sub = self.node.create_subscription(JointState, \"/joint_states\", self.observation_callback,\n qos_profile_sensor_data)\n\n # TODO: Make the clock node run on a separate thread so weird issues like outdated clock can stop happening\n self.lock = threading.Lock()\n self.clock_node = rclpy.create_node(self.__class__.__name__ + \"_clock\")\n self._sub_clock = self.clock_node.create_subscription(RosClock, '/clock', self.clock_callback,\n qos_profile=qos_profile_sensor_data)\n self.exec = rclpy.executors.MultiThreadedExecutor()\n self.exec.add_node(self.clock_node)\n t1 = threading.Thread(target=self.spinClockNode, daemon=True)\n t1.start()\n # self._imu_sub = self.node.create_subscription(JointState, \"/lobot_IMU_controller/out\", self.imu_callback, qos_profile_sensor_data)\n # self._sub = self.node.create_subscription(JointTrajectoryControllerState, JOINT_SUBSCRIBER, self.observation_callback, qos_profile=qos_profile_sensor_data)\n self._reset_sim = self.node.create_client(Empty, '/reset_simulation')\n self._physics_pauser = self.node.create_client(Empty, '/pause_physics')\n self._robot_resetter = self.node.create_client(Empty, '/lobot_arm/reset')\n self._physics_unpauser = self.node.create_client(Empty, '/unpause_physics')\n self.delete_entity = self.node.create_client(DeleteEntity, '/delete_entity')\n self.numJoints = len(JOINT_ORDER)\n # Initialize a KDL Jacobian solver from the chain.\n # self.jacSolver = ChainJntToJacSolver(self.mara_chain)\n\n # Observable dimensions, each joint has 2 (joint position + joint velocity), the IMU gives 6\n self.obs_dim = self.numJoints * 2 + 6\n\n # # Here idially we should find the control range of the robot. Unfortunatelly in ROS/KDL there is nothing like this.\n # # I have tested this with the mujoco enviroment and the output is always same low[-1.,-1.], high[1.,1.]\n\n low = -np.pi * np.ones(self.numJoints) * 0.4\n high = np.pi * np.ones(self.numJoints) * 0.4\n\n self.action_space = spaces.Box(low, high)\n\n high = np.inf * np.ones(self.obs_dim)\n low = -high\n self.observation_space = spaces.Box(low, high)\n\n self.seed()\n self.buffer_dist_rewards = []\n self.buffer_tot_rewards = []\n self.collided = 0\n\n # Set the time source\n self._sim_time = 0\n self._sim_time_msg = builtin_interfaces.msg.Time()",
"def init(self, parameters):\n pass",
"def __init__(\n self, illusion_strength=0, difference=0, size_min=0.5\n ):\n self.parameters = _rodframe_parameters(\n illusion_strength=illusion_strength,\n difference=difference\n )",
"def __init__(self, *args, **kwds):\n if args or kwds:\n super(Rscanpose, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.Rscanpose is None:\n self.Rscanpose = [0.] * 645\n else:\n self.Rscanpose = [0.] * 645",
"def __init__(**params):",
"def __init__(self, DorR, cat_corr, **kwargs):\n if 'spec' not in cat_corr.keys(): \n # default spectrum parameters\n cat_corr['spec'] = {\n 'P0': 20000, #P0 \n 'Lbox': 3600.0, \n 'Ngrid':360, \n 'quad': False\n }\n\n self.cat_corr = cat_corr.copy()\n self.kwargs = kwargs\n self.type = DorR\n\n self.file_name = self.file()",
"def __init__(\n self, trainable=False, reverb_length=48000, add_dry=True, name=\"reverb\"\n ):\n super(Reverb, self).__init__(name=name, trainable=trainable)\n self._reverb_length = reverb_length\n self._add_dry = add_dry\n if self.trainable:\n self._ir = th.nn.Parameter(\n th.empty(self._reverb_length).normal_(0, 1e-6)\n )",
"def __init__(self, xray_structure=None, pair_sym_table=None, proxies=None,\n i_seqs=None, sigma_12=0.004, sigma_13=None,\n buffer_thickness=3.5, connectivity=None):\n assert [xray_structure, pair_sym_table].count(None) == 1\n if i_seqs is not None and len(i_seqs) == 0: i_seqs = None\n if sigma_13 is None: sigma_13 = sigma_12\n if proxies is None:\n proxies = adp_restraints.shared_rigu_proxy()\n\n build_proxies(proxies, adp_restraints.rigu_proxy, sigma_12, sigma_13,\n xray_structure=xray_structure, pair_sym_table=pair_sym_table,\n i_seqs=i_seqs, buffer_thickness=buffer_thickness, connectivity=connectivity)\n\n self.proxies = proxies",
"def __init__(self, alterRegressor, egoRegressor, windowSize):\n Parameter.checkClass(alterRegressor, AbstractPredictor)\n Parameter.checkClass(egoRegressor, AbstractPredictor)\n \n self.alterRegressor = alterRegressor\n self.egoRegressor = egoRegressor\n self.windowSize = windowSize",
"def __init__(self, limit, noise, random_seed, n_state=33, n_action=4):\n # Actor Model: Local, Target, & Optimizer\n self.Actor, self.ActorTarget, self.actorOpt = [], [], []\n for i in range(limit):\n self.Actor.append(Actor(n_state, n_action, random_seed).to(device)) \n self.actorOpt.append(optim.Adam(self.Actor[i].parameters(), lr=LR_ACTOR))\n self.ActorTarget = Actor(n_state, n_action, random_seed).to(device) \n # Critic Model: Local, Target, & Optimizer\n self.Critic = Critic(n_state, n_action, random_seed).to(device)\n self.CriticTarget = Critic(n_state, n_action, random_seed).to(device)\n self.criticOpt = optim.Adam(self.Critic.parameters(), lr=LR_CRITIC)\n # Replay Buffer\n self.Memory = ReplayBuffer2(n_action, BUFFER_SIZE, BATCH_SIZE, device, random_seed)\n self.step = 0\n # OUNoise Process\n self.noise = noise\n # Display\n print('\\nBUFFER_SIZE', BUFFER_SIZE,\n '\\nBATCH_SIZE', BATCH_SIZE,\n '\\nGAMMA', GAMMA,\n '\\nTAU', TAU,\n '\\nLR_ACTOR', LR_ACTOR,\n '\\nLR_CRITIC', LR_CRITIC)\n # Display Actor & Critic\n print('\\nACTOR[i]:\\n', self.Actor[0])\n print('CRITIC:\\n', self.Critic)",
"def __init__(self, **kwargs):\n super(RidgeRegressionComb, self).__init__(**kwargs)\n self.time_window = None\n self.alphas = None\n self.lst_features = None\n self.target_var = None\n self.n_outputs = None\n self.history_buffer = None\n self.feature_aggregator = None\n self.target_aggregator = None\n self.model = None\n self.is_adaptive = None\n #self.pub_feature_rel = None\n self.pub_r2 = None\n self.pub_std = None\n # Feature space scaling parameters\n self.scaler = None\n self.r2 = None\n self.pub_mean = None\n self.mean = None\n self.std = None\n self.cache_file = []",
"def __init__(self, model_info, alg_config, **kwargs):\n import_config(globals(), alg_config)\n super().__init__(\n alg_name=kwargs.get(\"name\") or \"muzero\",\n model_info=model_info[\"actor\"],\n alg_config=alg_config,\n )\n # self.buff = ReplayBuffer(BUFFER_SIZE)\n self.buff = PrioritizedReplayBuffer(BUFFER_SIZE, alpha=1)\n self.discount = GAMMA\n self.unroll_step = UNROLL_STEP\n self.td_step = TD_STEP\n self.async_flag = False",
"def __init__( self, parameters={} ):\n self.params = {}\n self.reset(parameters)",
"def __init__(self, num_synapses=0, weights=0.0, delays=1,\n connection_array=None):\n self._num_synapses = num_synapses\n self._weights = weights\n self._delays = delays\n self._connection_array = connection_array"
] | [
"0.624694",
"0.624694",
"0.59270716",
"0.58812773",
"0.5859252",
"0.5857603",
"0.5856646",
"0.5854381",
"0.5844939",
"0.58047056",
"0.5773737",
"0.57722926",
"0.57650805",
"0.57243747",
"0.5716238",
"0.56949776",
"0.5688408",
"0.56387156",
"0.5638388",
"0.5597038",
"0.55425453",
"0.55389273",
"0.5497699",
"0.5492458",
"0.5476631",
"0.54632336",
"0.54586446",
"0.5452642",
"0.5445658",
"0.5444325"
] | 0.7083537 | 0 |
calculate the number of steps to do for `scheme` | def get_steps(self, scheme):
if scheme == 'monte_carlo':
# calculate the number of steps for a monte-carlo scheme
if self.parameters['monte_carlo_steps'] == 'auto':
steps_min = self.parameters['monte_carlo_steps_min']
steps_max = self.parameters['monte_carlo_steps_max']
steps = np.clip(10 * 2**self.Nr, steps_min, steps_max)
# Here, the factor 10 is an arbitrary scaling factor
else:
steps = self.parameters['monte_carlo_steps']
elif scheme == 'metropolis':
# calculate the number of steps for a metropolis scheme
if self.parameters['metropolis_steps'] == 'auto':
steps_min = self.parameters['metropolis_steps_min']
steps_max = self.parameters['metropolis_steps_max']
steps = np.clip(10 * 2**self.Nr, steps_min, steps_max)
# Here, the factor 10 is an arbitrary scaling factor
else:
steps = self.parameters['metropolis_steps']
else:
raise ValueError('Unknown stepping scheme `%s`' % scheme)
return int(steps) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_steps_num():\n return 0",
"def decode_step_count(self, board=None):\n # TODO decide which one is better.. not crucial\n # steps = 0\n # for key_pow, val_coor in self.read_bits.items():\n # steps += (self.matrix_board[val_coor] * 2) ** key_pow\n # return steps\n if board is None:\n board = self.matrix_board\n bit3 = int(board[self.read_bits[3]])\n bit2 = int(board[self.read_bits[2]])\n bit1 = int(board[self.read_bits[1]])\n bit0 = int(board[self.read_bits[0]])\n return int(f'0b{bit3}{bit2}{bit1}{bit0}', 2)",
"def total_steps(self) -> global___Expression:",
"def num_steps(self):\n return self.torsoStepCount() + 1",
"def number_of_iterations(self) -> int:\n pass",
"def number_of_steps(self) -> int:\n return len(self.step_points)",
"def numberOfSteps(num):\n steps = 0\n \n while num != 0:\n if num % 2 == 0:\n num /= 2\n steps += 1\n else:\n num -= 1\n steps += 1\n return steps",
"def num_steps(self) -> int:\n return self._num_steps",
"def number_of_iterations(self) -> int:\n return self._solution.info.iter",
"def number_of_steps(molecule):\n # Thanks https://www.reddit.com/r/adventofcode/comments/3xflz8/day_19_solutions/cy4etju\n elements = [el.group() for el in re.finditer(r'[A-Z][a-z]?', molecule)]\n rn_or_ar = [el for el in elements if el == 'Rn' or el == 'Ar']\n y_elements = [el for el in elements if el == 'Y']\n\n steps = len(elements) - len(rn_or_ar) - 2*len(y_elements) - 1\n\n return steps",
"def count(steps: List[int]):\n # this needs two passes but does them with a builtin\n # the factor 2x should be much smaller than the Python vs Builtin factor\n return steps.count(1), steps.count(3)",
"def n_steps(self) -> int:\n return len(self) - 1 # subtract the base metric",
"def test_step_count(self):\n inp = [(0, 0), (1, 1), (1, 2)]\n expected = 2\n actual = get_num_steps(inp)\n self.assertEqual(expected, actual)",
"def number_of_iterations(self):\n return self._solution.nit",
"def count_change(amount):\n \"*** YOUR CODE HERE ***\"\n\n def findm(pig):\n i = 0\n a = 1\n while 2**i < pig:\n i += 1\n a = 2**(i-1)\n return a\n\n def count_partitions(n, m):\n \"\"\"Count the ways to partition n using parts up to m.\"\"\"\n # print(n, m)\n if n == 0:\n return 1\n elif n < 0:\n return 0\n elif m == 0:\n return 0\n else:\n return count_partitions(n-m, m) + count_partitions(n, m//2)\n\n \n \n c = findm(amount)\n b = count_partitions(amount, c)\n # print(b)\n return b\n # return count_partitions(amount, b)",
"def number_of_connectives(formula):\n pass\n # ======== YOUR CODE HERE ========",
"def overall_reduction(self):\n return 84",
"def part_2() -> int:\n initial_input = _load_input()\n rows = len(initial_input)\n cols = len(initial_input[0])\n\n input = initial_input.copy()\n step_num = 0\n\n while True:\n flashed = list()\n step_glow_count = 0\n\n step_num += 1\n\n for row in range(rows):\n for col in range(cols):\n coords = [[col, row]]\n new_input, glow_count = _get_glow_counts(coords, input, flashed)\n input = new_input\n step_glow_count += glow_count\n\n if step_glow_count == 100:\n break\n\n return step_num",
"def number_of_iterations(self):\n return self._solution[\"iterations\"]",
"def get_steps(steps):\n cexc.step_exceptions(steps)\n steps_int = int(steps)\n if steps_int > MAX_STEPS:\n steps_int = MAX_STEPS\n return steps_int",
"def get_number_of_parts(score): \n number_of_parts = 0\n for e in score.recurse().parts:\n number_of_parts = number_of_parts + 1\n\n return( number_of_parts ) # get_number_of_parts ",
"def problem():\n size = 1001\n return sum(n**2 * 4 - 6 * n + 6 for n in range(3, size+1, 2)) + 1",
"def calc_stepsize(self):\n # Calculate step size\n step = 1.0/((self.n+self.d)*np.max(np.sum(self.p, axis=0)))\n return step",
"def __count_recursion_depth(link_size, recursion_depth, prev_link_size, first_run):\n if not first_run:\n if link_size == prev_link_size + 1:\n recursion_depth += 1\n prev_link_size = link_size\n for i in range(1, 20):\n if link_size == prev_link_size - i:\n recursion_depth -= i\n prev_link_size = link_size\n else:\n prev_link_size = link_size\n first_run = False\n\n return [recursion_depth, prev_link_size, first_run]",
"def complexity(self, mode='#nodes'):\n if mode == '#nodes':\n return len(self.nodes)",
"def n_timesteps(self) -> int:\n return len(self.time)",
"def number_of_atoms(formula):\n pass\n # ======== YOUR CODE HERE ========",
"def getSteps():",
"def getNumSteps(self, *args):\n return _CompuCell.Simulator_getNumSteps(self, *args)",
"def main():\n\n rules = parse_input(get_input())\n for part in [5, 18]:\n image = np.array(START_PATTERN).astype(bool)\n for i in range(part):\n image = enlarge(image, rules)\n count = sum(sum(ch for ch in row) for row in image)\n\n print(\"Number of # in the final matrix after {} iterations is {}.\".format(part, count))\n return"
] | [
"0.65601104",
"0.65067685",
"0.6441884",
"0.64148223",
"0.6364415",
"0.62706876",
"0.6186873",
"0.61510324",
"0.6133444",
"0.60866106",
"0.60067546",
"0.60036564",
"0.5896404",
"0.58475363",
"0.58371323",
"0.5806053",
"0.5803067",
"0.58011645",
"0.57761735",
"0.5773252",
"0.57671833",
"0.57575405",
"0.5746445",
"0.57350373",
"0.571114",
"0.5659849",
"0.56521684",
"0.56475204",
"0.5627029",
"0.56231934"
] | 0.7198185 | 0 |
return the sorted `sensitivity_matrix` or sorts the internal sensitivity_matrix in place. This function rearranges receptors such that receptors reacting to an equal number of substrates and to similar substrates are close together. | def sort_sensitivity_matrix(self, sensitivity_matrix=None):
if sensitivity_matrix is None:
sens_mat = self.sens_mat
else:
sens_mat = sensitivity_matrix
data = [(sum(item), list(item)) for item in sens_mat]
sens_mat = np.array([item[1] for item in sorted(data)])
if sensitivity_matrix is None:
self.sens_mat = sens_mat
else:
return sens_mat | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def SortAndFilterSuspects(self, suspects):\n if not suspects or len(suspects) == 1:\n return suspects\n\n suspects.sort(key=lambda suspect: -suspect.confidence)\n max_score = suspects[0].confidence\n min_score = max(suspects[-1].confidence, 0.0)\n if max_score == min_score:\n return []\n\n filtered_suspects = []\n for suspect in suspects: # pragma: no cover\n # The ratio of the probabilities of 2 suspects equal to\n # exp(suspect1.confidence)/exp(suspect2.confidence), so\n # suspect1.confidence - suspect2.confidence <= log(0.5) means the\n # suspect1 is half likely than suspect2.\n if (suspect.confidence <= min_score or\n suspect.confidence - max_score <= _THRESHOLD_RATIO):\n break\n\n filtered_suspects.append(suspect)\n\n return filtered_suspects",
"def sort_similarities():\n reader = open(\"similarities.txt\", \"r\")\n lines = reader.readlines()\n sims_ranked = list()\n for line in lines:\n a, b, sim = line.split(\"\\t\")\n sims_ranked.append(float(sim))\n\n sims_ranked = sorted(sims_ranked, reverse=True)\n writer = open(\"sorted_similarities.txt\", \"w\")\n for sim in sims_ranked:\n writer.write(str(sim) + \"\\n\")",
"def choose_sensitivity_matrix(self, density=0, avoid_correlations=False):\n shape = (self.Nr, self.Ns)\n \n if density == 'auto':\n # determine optimal parameters for the interaction matrix\n from .lib_bin_theory import LibraryBinaryUniform\n theory = LibraryBinaryUniform.from_other(self)\n density = theory.get_optimal_library()['density']\n \n if density == 0:\n # simple case of empty matrix\n self.sens_mat = np.zeros(shape, np.uint8)\n \n elif density >= 1:\n # simple case of full matrix\n self.sens_mat = np.ones(shape, np.uint8)\n \n elif avoid_correlations:\n # choose receptor substrate interaction randomly but try to avoid\n # correlations between the receptors\n self.sens_mat = np.zeros(shape, np.uint8)\n num_entries = int(round(density * self.Nr * self.Ns))\n \n empty_sens_mat = True\n while num_entries > 0:\n # specify the substrates that we want to detect\n if num_entries >= self.Ns:\n i_ids = np.arange(self.Ns)\n num_entries -= self.Ns\n else:\n i_ids = np.random.choice(np.arange(self.Ns), num_entries,\n replace=False)\n num_entries = 0\n \n if empty_sens_mat:\n # set the receptors for the substrates\n a_ids = np.random.randint(0, self.Nr, len(i_ids))\n for i, a in zip(i_ids, a_ids):\n self.sens_mat[a, i] = 1\n empty_sens_mat = False\n \n else:\n # choose receptors for each substrate from the ones that\n # are not activated, yet\n for i in i_ids:\n a_ids = np.flatnonzero(self.sens_mat[:, i] == 0)\n self.sens_mat[random.choice(a_ids), i] = 1\n \n else: # not avoid_correlations:\n # choose receptor substrate interaction randomly and don't worry\n # about correlations\n self.sens_mat = (np.random.random(shape) < density).astype(np.uint8)\n \n # save the parameters determining this matrix\n self.parameters['sensitivity_matrix_params'] = {\n 'density': density,\n 'avoid_correlations': avoid_correlations\n }",
"def queen_corners(self, sensitivity):\n tessellation = self.tessellation.copy()\n changes = {}\n qid = 0\n\n for ix, row in tqdm(tessellation.iterrows(), total=tessellation.shape[0]):\n corners = []\n change = []\n\n cell = row.geometry\n coords = cell.exterior.coords\n for i in coords:\n point = Point(i)\n possible_matches_index = list(self.sindex.intersection(point.bounds))\n possible_matches = tessellation.iloc[possible_matches_index]\n precise_matches = sum(possible_matches.intersects(point))\n if precise_matches > 2:\n corners.append(point)\n\n if len(corners) > 2:\n for c, it in enumerate(corners):\n next_c = c + 1\n if c == (len(corners) - 1):\n next_c = 0\n if corners[c].distance(corners[next_c]) < sensitivity:\n change.append([corners[c], corners[next_c]])\n elif len(corners) == 2:\n if corners[0].distance(corners[1]) > 0:\n if corners[0].distance(corners[1]) < sensitivity:\n change.append([corners[0], corners[1]])\n\n if change:\n for points in change:\n x_new = np.mean([points[0].x, points[1].x])\n y_new = np.mean([points[0].y, points[1].y])\n new = [(x_new, y_new), id]\n changes[(points[0].x, points[0].y)] = new\n changes[(points[1].x, points[1].y)] = new\n qid = qid + 1\n\n for ix, row in tqdm(tessellation.iterrows(), total=tessellation.shape[0]):\n cell = row.geometry\n coords = list(cell.exterior.coords)\n\n moves = {}\n for x in coords:\n if x in changes.keys():\n moves[coords.index(x)] = changes[x]\n keys = list(moves.keys())\n delete_points = []\n for move, k in enumerate(keys):\n if move < len(keys) - 1:\n if (\n moves[keys[move]][1] == moves[keys[move + 1]][1]\n and keys[move + 1] - keys[move] < 5\n ):\n delete_points = delete_points + (\n coords[keys[move] : keys[move + 1]]\n )\n # change the code above to have if based on distance not number\n\n newcoords = [changes[x][0] if x in changes.keys() else x for x in coords]\n for coord in newcoords:\n if coord in delete_points:\n newcoords.remove(coord)\n if coords != newcoords:\n if not cell.interiors:\n # newgeom = Polygon(newcoords).buffer(0)\n be = Polygon(newcoords).exterior\n mls = be.intersection(be)\n if len(list(shapely.ops.polygonize(mls))) > 1:\n newgeom = MultiPolygon(shapely.ops.polygonize(mls))\n geoms = []\n for g, n in enumerate(newgeom):\n geoms.append(newgeom[g].area)\n newgeom = newgeom[geoms.index(max(geoms))]\n else:\n newgeom = list(shapely.ops.polygonize(mls))[0]\n else:\n newgeom = Polygon(newcoords, holes=cell.interiors)\n tessellation.loc[ix, \"geometry\"] = newgeom\n return tessellation",
"def get_switchy_score_order(x):\n switchy_scores = np.apply_along_axis(switchy_score, axis=0, arr=x)\n return np.argsort(switchy_scores)",
"def compute_snr_and_detection_grids(sensitivity=\"O1\", snr_threshold=8.0, Mc_max=300.0, Mc_step=0.1,\n eta_max=0.25, eta_step=0.01, snr_max=1000.0, snr_step=0.1):\n # get interpolator given sensitivity\n interpolator = selection_effects.SNRinterpolator(sensitivity)\n\n # create chirp mass and eta arrays\n Mc_array = np.arange(Mc_step, Mc_max + Mc_step, Mc_step)\n eta_array = np.arange(eta_step, eta_max + eta_step, eta_step)\n\n # convert to total, primary and secondary mass arrays\n Mt_array = Mc_array / eta_array[:,np.newaxis]**0.6\n M1_array = Mt_array * 0.5 * (1. + np.sqrt(1. - 4 * eta_array[:,np.newaxis]))\n M2_array = Mt_array - M1_array\n\n # interpolate to get snr values if binary was at 1Mpc\n snr_grid_at_1Mpc = interpolator(M1_array, M2_array)\n\n # precompute a grid of detection probabilities as a function of snr\n snr_array = np.arange(snr_step, snr_max + snr_step, snr_step)\n detection_probability_from_snr = selection_effects.detection_probability_from_snr(snr_array, snr_threshold)\n\n return snr_grid_at_1Mpc, detection_probability_from_snr",
"def finish_sensitivity(self):\n # do at most 1000 features\n idx = torch.randperm(self._features.shape[1])[:100]\n self._features = self._features[:, idx]\n\n weight = self.module.weight.data\n num_features_in = weight.shape[1]\n selected_in = torch.zeros(num_features_in).bool()\n\n # greedy approach to rank in features\n for rank in reversed(range(num_features_in)):\n error_best = torch.Tensor([np.Inf])\n best = None\n\n # loop through remaining features to see which to add next\n for idx_in in range(num_features_in):\n # it's already in the set, no need trying to add it...\n if selected_in[idx_in]:\n continue\n\n # try adding in feature j and compute error\n selected_in[idx_in] = 1\n error_with_j = (\n self._features[selected_in].sum(dim=0) ** 2\n ).sum()\n\n # see if it's better than previous best\n if error_with_j < error_best:\n error_best = error_with_j\n best = idx_in\n\n # remove j from selectedIn for now\n selected_in[idx_in] = 0\n\n # add best one from this round to selectedIn\n selected_in[best] = 1\n\n # also note the rank of best in the sensitivities\n self.sensitivity_in[best] = rank",
"def get_tops(similarities, k):\n tops = similarities.argsort(axis=1)[:, :k].tolist()\n return tops",
"def order_violations(s, im):\n return np.power(np.linalg.norm(np.maximum(0, s - im)),2)",
"def rankPairs (self):\n def key (matrix, pair):\n # majority is positive, we want larger ones first\n major = matrix[pair[0]][pair[1]]\n # minority is negative because we want the smaller ones first\n minor = -1*matrix[pair[1]][pair[0]]\n return (major,minor)\n\n self.pairs = [(x,y) for x in self.poller.candidates for y in self.poller.candidates if x != y]\n matrix = self.poller.voteMatrix()\n # reverse=true to indicate descending sort\n self.pairs.sort(key=lambda pair: key(matrix,pair), reverse=True)\n self.weights = { pair : key(matrix,pair) for pair in self.pairs }\n self.pairs = [pair for pair in self.pairs if self.weights[pair][0] > -1*self.weights[pair][1]]",
"def order_sim(im, s):\n YmX = (s.unsqueeze(1).expand(s.size(0), im.size(0), s.size(1))\n - im.unsqueeze(0).expand(s.size(0), im.size(0), s.size(1)))\n score = -YmX.clamp(min=0).pow(2).sum(2).sqrt().t()\n return score",
"def sort_col_by_sim(col):\n sims = sorted(col[1], key=lambda pair: (pair[0], pair[1]), reverse=True)\n return (col[0], sims[0:k])",
"def recommendation_ranking(self):\n iu = self.final_recommendation_score_matrix()\n new_iu = []\n for row in iu:\n li = []\n temp = row\n if self.product != \"dist\":\n temp = -np.sort(-temp)\n for element in row:\n li.append(binary_search_opp(temp,element)+1) \n else:\n temp = np.sort(temp)\n for element in row:\n li.append(np.searchsorted(temp,element)+1)\n new_iu.append(li)\n return np.array(new_iu)",
"def sort_eyes(self):\n x1 = self.eyes[0][0]\n x2 = self.eyes[1][0]\n\n if x1 > x2:\n self.eyes.reverse()",
"def sorting_by_criteria(self, result):\r\n\t\tresult = sorted(result, key=lambda r: r[0])\r\n\t\tflag = False\r\n\t\tm = result[0][0]\r\n\t\tfor i in range(len(result)):\r\n\t\t\tif (result[i][0] == m): continue\r\n\t\t\tflag = True\r\n\t\t\tbreak\r\n\t\tif not flag: i += 1\r\n\t\tresult = result[:i]\r\n\r\n\t\t\"\"\" in prewin status, compare useful_amount only \"\"\"\r\n\t\tif (result[0][0] == 0):\r\n\t\t\tresult = sorted(result, key=lambda r: r[1], reverse=True)\r\n\t\t\ttest = \"\"\r\n\t\t\tfor r in result:\r\n\t\t\t\ttest += \"[{0}, {1}, {2}, {3}], \".format(r[0], r[1], r[2], r[3])\r\n#\t\t\tprint \"prewin status: {0}\".format(test)\r\n\t\t\tself.current_best_state = [result[0][0], result[0][1], result[0][2]]\r\n\t\t\treturn result[0][3]\r\n\r\n\t\t\"\"\" sort by score (big -> small) \"\"\"\r\n\t\tresult = sorted(result, key=lambda r: r[2], reverse=True)\r\n\t\tflag = False\r\n\t\tm = result[0][2]\r\n\t\tfor i in range(len(result)):\r\n\t\t\tif (result[i][2] == m): continue\r\n\t\t\tflag = True\r\n\t\t\tbreak\r\n\t\tif not flag: i += 1\r\n\t\tresult = result[:i]\r\n\r\n\t\t\"\"\" sort by useful card amount (big -> small) \"\"\"\r\n\t\tresult = sorted(result, key=lambda r: r[1], reverse=True)\r\n\r\n\t\t\"\"\" choose one to discard \"\"\"\r\n\t\tdcard = result[0][3]\r\n\t\tm = result[0][1]\r\n\t\tbest = result[0]\r\n\t\tfor r in result:\r\n\t\t\tif (r[1] != m): break\r\n\t\t\tctype = GameBoard.CardType(r[3])\r\n\t\t\tif (ctype == 4) and (self.word_list.count(r[3]) == 1):\r\n\t\t\t\tdcard = r[3]\r\n\t\t\t\tbest = r\r\n\t\t\tif (ctype == 5) and (self.wind_list.count(r[3]) == 1):\r\n\t\t\t\tdcard = r[3]\r\n\t\t\t\tbest = r\r\n\t\tself.current_best_state = [r[0], r[1], r[2]]\r\n\t\treturn dcard",
"def sort_solutions(self, solutions):\r\n if self.breeding_rules.sorting_order is ScoresSortingOrder.ASCENDING:\r\n reverse = False\r\n else:\r\n reverse = True\r\n return sorted(solutions, reverse=reverse, key=lambda solution: solution.score)",
"def findSimilarInSurvey(self, key, threshold=10):\n \n key = key.strip()\n description = CodeBook.getCodeDescription(key)\n \n r = re.compile('([a-z]+[0-9]+[a-z])')\n m = r.match(key)\n \n if (not m) or description.startswith('SUSPECT'):\n print description\n return None, None\n \n group_key = m.groups()[0]\n print 'Matching for: {}'.format(group_key)\n print 'Description: {}'.format(description)\n \n candidates = self.master_list[map(lambda x: x.startswith(group_key), self.master_list)]\n \n def getScore(x,Y):\n score_list = []\n for y in Y:\n d = CodeBook.getCodeDescription(y)\n score = edit_distance(x,d)\n \n if (score < threshold) and (d != x):\n score_list.append((y,score,d))\n \n score_list = sorted(score_list, key=lambda x: x[1])\n \n return score_list\n \n scores = getScore(description,candidates)\n \n if len(scores) > 0:\n print 'Matched Column: '\n print '\\n'.join(['{} {} {}'.format(*x) for x in scores])\n return [x[0] for x in scores], scores\n \n return None, None",
"def _sort_rows(matrix, num_rows):\n tmatrix = array_ops.transpose(matrix, [1, 0])\n sorted_tmatrix = nn_ops.top_k(tmatrix, num_rows)[0]\n return array_ops.transpose(sorted_tmatrix, [1, 0])",
"def _merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1):\n\n def quality_method(p):\n return p.get_quality(self.quality, self.quality_aggregation)\n\n per_entity_prediction_filtered = defaultdict(list)\n for sub, per_obj_predictions in per_entity_prediction.items():\n # print([(k, p.triple[2], qaulity_method(p)) for k, p in per_obj_predictions.items()])\n merged_predictions = list(\n filter(lambda p: quality_method(p) > threshold, list(per_obj_predictions.values())))\n\n merged_predictions.sort(key=quality_method, reverse=True)\n\n include = topk if topk > 0 else len(merged_predictions)\n per_entity_prediction_filtered[sub] = merged_predictions[:include]\n\n return per_entity_prediction_filtered",
"def _get_top_k_movies(self, similarity, movie_id, k):\n return [\n self._get_movies()[str(x+1)]\n for x in np.argsort(similarity[movie_id-1,:])[:-k-1:-1]\n ]",
"def get_best_sensitivity_metrics(self,\n verbose: bool = True) -> Tuple[int, int]:\n sensitivity_scores = list()\n for i in self.search_space:\n classes = self.convert_classes(threshold=i)\n tn, fp, fn, tp = confusion_matrix(self.y_true, classes).ravel()\n sensitivity = tp / (tp + fn)\n sensitivity_scores.append(sensitivity)\n best_sensitivity_score, best_sensitivity_threshold = self._get_best_metrics(\n metric_type='sensitivity_score',\n scores=sensitivity_scores,\n greater_is_better=True,\n verbose=verbose\n )\n return best_sensitivity_score, best_sensitivity_threshold",
"def sort_filtered_contours(self):\r\n\r\n # Get the contours again\r\n invert = 255 - self.thresh_invert\r\n real_contours = cv2.findContours(invert, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n real_contours = real_contours[0] if len(real_contours) == 2 else real_contours[1]\r\n\r\n # Make sure that they're within the correct range for size\r\n # If too small, it is probably noise; if too large, then should be things around the grid\r\n for i, c in enumerate(real_contours, 1):\r\n contour_area = cv2.contourArea(c)\r\n if self.min_cell_size < contour_area < self.max_cell_size:\r\n self.good_contours.append(c)\r\n\r\n # We assume a square board, so the number of rows/cols should be the square root of total contours/cells\r\n self.board_dimension = int(math.sqrt(len(self.good_contours)))\r\n\r\n # Sort the contours from top to bottom\r\n (half_sorted_contours, _) = contours.sort_contours(self.good_contours, method=\"top-to-bottom\")\r\n\r\n # We then sort each row from left to right\r\n row = []\r\n for i, c in enumerate(half_sorted_contours, 1):\r\n row.append(c)\r\n if i % self.board_dimension == 0:\r\n (full_sorted_contours, _) = contours.sort_contours(row, method=\"left-to-right\")\r\n self.game_board_contours.append(full_sorted_contours)\r\n row = []",
"def _sort_compounds(self):\n self.sorted_molecules = sorted(self.values(), key=operator.attrgetter('criterion'))",
"def _sort_modes(self):\n sort_idx = np.lexsort((self.modes[:, 1], self.modes[:, 0], self.modes[:, 2]))\n self._modes = self.modes[sort_idx]",
"def standard_sorting(cls, zmat):\n if zmat is None:\n return None\n nats = len(zmat)\n ncoords = 3*nats - 6\n if nats < 4:\n return None\n else:\n r_coords = [0, 1, 3]\n a_coords = [2, 4]\n t_coords = [5]\n if nats > 4:\n extra = np.arange(6, ncoords+1)\n r_coords += extra[::4].tolist()\n a_coords += extra[1::4].tolist()\n t_coords += extra[2::4].tolist()\n return np.argsort(np.concatenate([r_coords, a_coords, t_coords]))",
"def reorder( self ):\n self.sorted.sort(self.compareFunction)",
"def reorder_examples(self):\n self.example_wise_shrink(Ordering, key=sort_key)",
"def get_most_similar(\n claims: torch.Tensor,\n premises: torch.Tensor,\n k: int,\n similarity: Similarity\n) -> Tuple[torch.Tensor, torch.Tensor]:\n sim_values, indices = similarity.sim(left=claims, right=premises).topk(k=k, largest=True, sorted=True)\n\n return premises[indices], indices",
"def _order_observations(self):\n\n list_observations_y = zip(self.list_observations, self.list_y)\n list_observations_y = sorted(\n list_observations_y,\n key=lambda obs_y: np.linalg.norm(np.array(obs_y[0]))\n )\n self.list_observations = [obs for obs, y in list_observations_y]\n self.list_y = [y for obs, y in list_observations_y]",
"def sort_suggestions(\n suggestions: List[Tuple[Set[str], float]]\n) -> List[Tuple[Set[str], float]]:\n confidence_list = [suggestion[1] for suggestion in suggestions]\n sort_index = sorted(range(len(confidence_list)), key=lambda k: confidence_list[k])\n # Inverse the sort\n sort_index = sort_index[::-1]\n return [suggestions[i] for i in sort_index]"
] | [
"0.54372156",
"0.5328664",
"0.5222484",
"0.4775966",
"0.47067013",
"0.46552995",
"0.46477485",
"0.46280968",
"0.4625397",
"0.46188542",
"0.46107998",
"0.45985577",
"0.45515847",
"0.45426014",
"0.45174512",
"0.45159692",
"0.4500111",
"0.44911516",
"0.44894326",
"0.44743133",
"0.4471201",
"0.4459995",
"0.44173896",
"0.43983883",
"0.4393735",
"0.43753716",
"0.43741328",
"0.43658572",
"0.43640393",
"0.4362932"
] | 0.73758173 | 0 |
iterate over all mixtures and yield the mixture with probability | def _iterate_mixtures(self):
if self._iterate_steps > self.parameters['max_steps']:
raise RuntimeError('The iteration would take more than %g steps'
% self.parameters['max_steps'])
hi = self.commonness
Jij = self.correlations
mixture_size = self.parameters['fixed_mixture_size']
if mixture_size is None:
# iterate over all mixtures
for c in itertools.product((0, 1), repeat=self.Ns):
c = np.array(c, np.uint8)
weight_c = np.exp(np.dot(np.dot(Jij, c) + hi, c))
yield c, weight_c
elif mixture_size == 0:
# special case which is not covered by the iteration below
yield np.zeros(self.Ns, np.uint8), 1
elif mixture_size == self.Ns:
# special case which is not covered by the iteration below
yield np.ones(self.Ns, np.uint8), 1
else:
# iterate over all mixtures with constant number of substrates
c = np.zeros(self.Ns, np.uint8)
for nz in itertools.combinations(range(self.Ns), mixture_size):
c[:] = 0
c[np.array(nz)] = 1
weight_c = np.exp(np.dot(np.dot(Jij, c) + hi, c))
yield c, weight_c | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _sample_binary_mixtures(model, steps, dtype=np.uint):\n mixture_size = model.parameters['fixed_mixture_size']\n \n if not model.is_correlated_mixture and mixture_size is None:\n # use simple monte carlo algorithm\n prob_s = model.substrate_probabilities\n \n for _ in range(int(steps)):\n # choose a mixture vector according to substrate probabilities\n yield (np.random.random(model.Ns) < prob_s).astype(dtype)\n\n elif mixture_size is None:\n # go through all mixtures and don't keep the size constant\n\n # use metropolis algorithm\n hi = model.commonness\n Jij = model.correlations\n \n # start with a random concentration vector \n c = np.random.randint(0, 2, model.Ns).astype(dtype)\n E_last = -np.dot(np.dot(Jij, c) + hi, c)\n \n for _ in range(int(steps)):\n i = random.randrange(model.Ns)\n c[i] = 1 - c[i] #< switch the entry\n Ei = -np.dot(np.dot(Jij, c) + hi, c)\n if Ei < E_last or random.random() < np.exp(E_last - Ei):\n # accept the new state\n E_last = Ei\n else:\n # reject the new state and revert to the last one\n c[i] = 1 - c[i]\n \n yield c\n \n elif mixture_size == 0:\n # special case which is not covered by the iteration below\n c_zero = np.zeros(model.Ns, dtype)\n for _ in range(model._sample_steps):\n yield c_zero\n\n elif mixture_size == model.Ns:\n # special case which is not covered by the iteration below\n c_ones = np.ones(model.Ns, dtype)\n for _ in range(steps):\n yield c_ones\n \n else:\n # go through mixtures with keeping their size constant\n\n # use metropolis algorithm\n hi = model.commonness\n Jij = model.correlations\n\n # create random concentration vector with fixed substrate count\n c = np.r_[np.ones(mixture_size, dtype),\n np.zeros(model.Ns - mixture_size, dtype)]\n np.random.shuffle(c)\n E_last = -np.dot(np.dot(Jij, c) + hi, c)\n \n for _ in range(int(steps)):\n # find the next mixture by swapping two items\n i0 = random.choice(np.flatnonzero(c == 0)) #< find 0\n i1 = random.choice(np.flatnonzero(c)) #< find 1\n c[i0], c[i1] = 1, 0 #< swap entries\n Ei = -np.dot(np.dot(Jij, c) + hi, c)\n if Ei < E_last or random.random() < np.exp(E_last - Ei):\n # accept the new state\n E_last = Ei\n else:\n # reject the new state and revert to the last one\n c[i0], c[i1] = 0, 1\n \n yield c",
"def _build_mixture(self) -> None:\n for mu, sigma in zip(self.mus, self.sigmas):\n self.pdfs.append(norm(mu, sigma))",
"def mixture_welfare(game, mixtures, num_resamples, *, percentiles=None, processes=None):\n return profile_function(\n game,\n regret.mixed_social_welfare,\n mixtures,\n num_resamples,\n percentiles=percentiles,\n processes=processes,\n )",
"def probability(self, samples):\n pass",
"def _gen_pert(self, count, **kwargs):\n self._check_pert(**kwargs)\n pert = FairBetaPert(**kwargs)\n rvs = pert.random_variates(count)\n return rvs",
"def monte_carlo_sample(self):\n\t\tresult = dict()\n\t\tfor n in self.topological_sort():\n\t\t\tpvals = tuple(result[p] for p in n.parents)\n\t\t\tresult[n.name] = n.cpt.rand_result(pvals)\n\t\treturn result",
"def mixture_statistics_brute_force(self):\n \n Z = 0\n hist1d = np.zeros(self.Ns)\n hist2d = np.zeros((self.Ns, self.Ns))\n \n # iterate over all mixtures\n for c, weight_c in self._iterate_mixtures():\n Z += weight_c \n hist1d += c * weight_c\n hist2d += np.outer(c, c) * weight_c\n \n # calculate the frequency and the correlations \n ci_mean = hist1d / Z\n cij = hist2d / Z\n cij_corr = cij - np.outer(ci_mean, ci_mean)\n \n ci_var = np.diag(cij_corr)\n return {'mean': ci_mean, 'std': np.sqrt(ci_var), 'var': ci_var,\n 'cov': cij_corr}",
"def _sample_mixtures(self, steps=None, dtype=np.uint):\n if steps is None:\n steps = self._sample_steps\n \n return _sample_binary_mixtures(self, steps, dtype)",
"def reproduce(population:list):\n new_gen = []\n probs = []\n for p in population:\n probs.append(p[3])\n while len(new_gen) != len(probs):\n parents = selection(probs)\n son,eval_son,daughter,eval_daughter = xo(population[parents[0]][0],population[parents[0]][1], population[parents[1]][0],population[parents[1]][1],2)\n new_gen.append([son,eval_son])\n new_gen.append([daughter,eval_daughter])\n # mutation\n # lets say 5% of the population gets mutated\n how_many_to_mutate = int(NUM_OF_CHROMOZOMS * (1/100))\n t = [i for i in range(NUM_OF_CHROMOZOMS)]\n # choose percent of the population randomly, uniformly\n indices_to_mutate = choice(t, how_many_to_mutate, replace=False)\n for i in range(len(indices_to_mutate)):\n mutate(new_gen[indices_to_mutate[i]])\n\n evaluateAll(new_gen)\n return new_gen",
"def _generate_distribution_samples(self, set_count, parameter_count):\n self._samples = numpy.zeros((set_count, parameter_count))\n for i, distribution in enumerate(self.parameter_distributions.values()):\n self._samples[:, i] = distribution.ppf(self._quantiles[:, i])",
"def mixture_regret(game, mixtures, num_resamples, *, percentiles=None, processes=None):\n return profile_function(\n game,\n regret.mixture_regret,\n mixtures,\n num_resamples,\n percentiles=percentiles,\n processes=processes,\n )",
"def random_times(p):\n while True:\n if sum(p.values()) != 1:\n raise ValueError('Probabilities must sum to unity')\n r = random.random()\n remaining = 1\n for category, probability in p.items():\n remaining -= probability\n if remaining <= r:\n yield category\n break",
"def probabilities(self):\n raise NotImplementedError",
"def sample(probs):\n\n probs = probs / probs.sum()\n return np.random.choice(np.arange(len(probs)), p=probs.flatten())",
"def __iter__(self):\n for sample in self.samples:\n yield sample",
"def sample(self, probabilities):\n return self.sample_bernoulli(probabilities)",
"def probability(series, params):\n\n prob = 1\n\n for result in series:\n\n prob *= params[result]\n\n return prob * params[\"die\"]",
"def __GenerateVariantsDistribution(self):\n np.random.seed(self.random_seed_parametr)\n try:\n Students = pd.read_excel(self.student_path)\n print('Load {}'.format(self.student_path))\n students_number = len(Students)\n\n self.__create_ALL_LR()\n Course_structure, variants_numbers = self.__generate_stracture()\n print('Generate stracture')\n Number_of_weaks = len(Course_structure)\n\n number_of_distribution = 0\n for WeakNumber in range(Number_of_weaks):\n for TaskNumber in range(Course_structure[WeakNumber]):\n Students['Week {0} Task {1}'.format(WeakNumber + 1, TaskNumber + 1)] = np.random.randint(\n variants_numbers[number_of_distribution], size=students_number)\n number_of_distribution += 1\n\n writer = pd.ExcelWriter(self.students_with_variants_path)\n print('Save {}'.format(self.students_with_variants_path))\n Students.to_excel(writer)\n writer.save()\n except:\n print('File with students doesnot exist')",
"def bootstrap(items, choices, repeats):\n for i in range(repeats):\n yield sample(items, choices, replace=True)",
"def _iterate_steps(self):\n mixture_size = self.parameters['fixed_mixture_size']\n if mixture_size is None:\n return 2 ** self.Ns\n else:\n return scipy.special.comb(self.Ns, mixture_size, exact=True)",
"def test_probabilities_are_ok(self, seed):\n bins = defaultdict(int)\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", \"2\", \"3\", \"4\")\n categories = OrderedDict(zip(categories, probs))\n dim = Categorical(\"yolo\", categories)\n for _ in range(500):\n sample = dim.sample(seed=seed)[0]\n bins[sample] += 1\n for keys in bins.keys():\n bins[keys] /= float(500)\n for key, value in categories.items():\n assert abs(bins[key] - value) < 0.01",
"def permutate_genome_percent(human, phix, bacteria):\n \n per = list(itertools.product(human, phix, bacteria))\n sum_per = [sum(i) for i in zip(*per)]\n \n #check percentage sum < 1\n if all(i > 1 for i in sum_per):\n print \"Some combinations of human, phix and bacteria greater than 1\"\n sys.exit(0)\n \n return per",
"def generate_probabilities(self):\n k = 1\n v= 10\n for g in self.class_probabilities:\n curr_list = self.class_probabilities[g]\n for l in range(0,28):\n for w in range(0,28):\n total = float(curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2])\n curr_list[l][w][0] = (float(curr_list[l][w][0])+k)/(total + k*v) \n curr_list[l][w][1] = (float(curr_list[l][w][1])+k)/(total + k*v)\n curr_list[l][w][2] = (float(curr_list[l][w][2])+k)/(total + k*v)\n curr_list[l][w][3] = curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2]",
"def generate_samples(self):\n self.analytic_probability()",
"def normalize(probabilities):\n for person in probabilities:\n\n # normalize the \"gene\"\n geneSum = probabilities[person][\"gene\"][0] + probabilities[person][\"gene\"][1] + probabilities[person][\"gene\"][2]\n for i in range(3):\n probabilities[person][\"gene\"][i] /= geneSum\n\n # normalize the \"trait\"\n traitSum = probabilities[person][\"trait\"][True] + probabilities[person][\"trait\"][False]\n probabilities[person][\"trait\"][True] /= traitSum\n probabilities[person][\"trait\"][False] /= traitSum",
"def probability(structure,seq, react=None):\n return energy_to_proba(get_ens_energy(seq,react),get_stru_energy(structure,seq,react))",
"def _ratios() -> Iterable[float]:\n index = 0\n primes = 0\n\n while True:\n primes += 1 if is_prime(_first_spiral_arm(index)) else 0\n primes += 1 if is_prime(_second_spiral_arm(index)) else 0\n primes += 1 if is_prime(_third_spiral_arm(index)) else 0\n primes += 1 if is_prime(_fourth_spiral_arm(index)) else 0\n\n yield primes / (index * 4 + 1)\n\n index += 1",
"def test_generate_paulis(generators, num_qubits, result):\n pauli_ops = qml.paulix_ops(generators, num_qubits)\n for p1, p2 in zip(pauli_ops, result):\n assert p1.compare(p2)",
"def sample_discrete(probs):\r\n q = np.random.rand()\r\n i = 0\r\n p_sum = 0.0\r\n while p_sum < q:\r\n p_sum += probs[i]\r\n i += 1\r\n return i - 1",
"def gen_mixture():\n npr.seed(0)\n num_exp = int(1e4)\n x_dim = 2\n z_dim = 2\n mu1 = [5, 5,]\n mu2 = [-5, -5]\n theta = np.array([[2,1],[-1,-2]])\n sigma = 0.1\n u = npr.uniform((num_exp,))\n z = np.zeros((num_exp, z_dim))\n cov = np.zeros((z_dim, z_dim))\n np.fill_diagonal(cov, 1)\n sz = int(num_exp/2)\n z[:sz, ]= npr.multivariate_normal(mu1, cov,sz)\n z[sz:, ] = npr.multivariate_normal(mu2,cov,sz)\n mu_x = [email protected]()\n\n x = np.zeros((num_exp, x_dim))\n for i in range(num_exp):\n x[i,:] = npr.multivariate_normal(mu_x[:,i], sigma*cov)\n print(x.shape)\n np.save('data/syn_mixture.npy', x)"
] | [
"0.6160292",
"0.6110729",
"0.59938663",
"0.59472424",
"0.58536416",
"0.58510166",
"0.58116955",
"0.5767724",
"0.57352465",
"0.5675324",
"0.5663256",
"0.5660493",
"0.56528705",
"0.55985093",
"0.5573837",
"0.55732846",
"0.5572714",
"0.55721015",
"0.55552113",
"0.55295265",
"0.5524025",
"0.5510295",
"0.5486286",
"0.54838157",
"0.5471655",
"0.5467816",
"0.5438942",
"0.5424649",
"0.5417386",
"0.54014355"
] | 0.73259944 | 0 |
calculates mixture statistics using a brute force algorithm | def mixture_statistics_brute_force(self):
Z = 0
hist1d = np.zeros(self.Ns)
hist2d = np.zeros((self.Ns, self.Ns))
# iterate over all mixtures
for c, weight_c in self._iterate_mixtures():
Z += weight_c
hist1d += c * weight_c
hist2d += np.outer(c, c) * weight_c
# calculate the frequency and the correlations
ci_mean = hist1d / Z
cij = hist2d / Z
cij_corr = cij - np.outer(ci_mean, ci_mean)
ci_var = np.diag(cij_corr)
return {'mean': ci_mean, 'std': np.sqrt(ci_var), 'var': ci_var,
'cov': cij_corr} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_mixture_features(args):\n workspace = args.workspace\n speech_dir = args.speech_dir\n noise_dir = args.noise_dir\n data_type = args.data_type\n fs = cfg.sample_rate\n dir_name = args.dir_name\n\n fid_clean = open(speech_dir, 'r')\n lines_clean = fid_clean.readlines()\n fid_clean.close()\n\n fid_reverb = open(noise_dir, 'r')\n lines_reverb = fid_reverb.readlines()\n fid_reverb.close()\n\n for files_clean, files_reverb in zip(lines_clean, lines_reverb):\n\n files_clean = files_clean.strip('\\n')\n files_reverb = files_reverb.strip('\\n')\n\n fid = open(files_clean,'r')\n wavLines_clean = fid.readlines()\n fid.close()\n fid = open(files_reverb,'r')\n wavLines_reverb = fid.readlines()\n fid.close()\n\n cnt = 0 \n\n for wavs_clean, wavs_reverb in zip(wavLines_clean, wavLines_reverb):\n \n t1 = time.time()\n # cnt = 0\n\n wav_name_clean, wav_path_clean = wavs_clean.split()\n wav_name_reverb, wav_path_reverb = wavs_reverb.split()\n \n # Read clean speech audio. \n (speech_audio, _) = read_audio(wav_path_clean, target_fs=fs)\n \n # Read reverb speech audio. \n (noise_audio, _) = read_audio(wav_path_reverb, target_fs=fs)\n \n # Cut reverb speech to the same length as clean speech. \n if len(noise_audio) > len(speech_audio):\n noise_audio = noise_audio[0: len(speech_audio)]\n \n # Extract spectrogram. \n mixed_complx_x = calc_sp(noise_audio, mode='complex')\n speech_x = calc_sp(speech_audio, mode='magnitude')\n\n # Write out features. \n out_feat_path = os.path.join(workspace, \"features\", \"spectrogram\", \n data_type, dir_name, \"%s.p\" % wav_name_reverb)\n create_folder(os.path.dirname(out_feat_path))\n data = [mixed_complx_x, speech_x, wav_name_reverb]\n pickle.dump(data, open(out_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n \n # Print. \n if cnt % 100 == 0:\n print(cnt)\n # print(mixed_complx_x)\n # print(speech_x)\n \n cnt += 1\n\n print(\"Extracting feature time: %s\" % (time.time() - t1))",
"def _build_mixture(self) -> None:\n for mu, sigma in zip(self.mus, self.sigmas):\n self.pdfs.append(norm(mu, sigma))",
"def calculate_mixture_features(data_type):\n workspace = config.workspace\n data_dir = config.data_dir\n speech_dir = os.path.join(data_dir,'{}_speech'.format(data_type))\n noise_dir = os.path.join(data_dir,'{}_noise'.format(data_type)) \n \n fs = config.sample_rate\n \n if data_type == 'train':\n snr = config.Tr_SNR\n elif data_type == 'test':\n snr = config.Te_SNR \n else:\n raise Exception(\"data_type must be train | test!\")\n \n \n # Open mixture csv. \n mixture_csv_path = os.path.join(workspace, \"mixture_csvs\", \"%s.csv\" % data_type)\n with open(mixture_csv_path, 'r') as f:\n reader = csv.reader(f, delimiter='\\t')\n lis = list(reader)\n \n t1 = time.time()\n cnt = 0\n for i1 in range(1, len(lis)):\n [speech_na, noise_na, noise_onset, noise_offset] = lis[i1]\n noise_onset = int(noise_onset)\n noise_offset = int(noise_offset)\n \n # Read speech audio. \n speech_path = os.path.join(speech_dir, speech_na)\n (speech_audio, _) = read_audio(speech_path, target_fs=fs)\n \n # Read noise audio. \n noise_path = os.path.join(noise_dir, noise_na)\n (noise_audio, _) = read_audio(noise_path, target_fs=fs)\n \n # Repeat noise to the same length as speech. \n if len(noise_audio) < len(speech_audio):\n n_repeat = int(np.ceil(float(len(speech_audio)) / float(len(noise_audio))))\n noise_audio_ex = np.tile(noise_audio, n_repeat)\n noise_audio = noise_audio_ex[0 : len(speech_audio)]\n # Truncate noise to the same length as speech. \n else:\n noise_audio = noise_audio[noise_onset : noise_offset]\n \n # Scale speech to given snr. \n scaler = get_amplitude_scaling_factor(speech_audio, noise_audio, snr=snr)\n speech_audio *= scaler\n \n # Get normalized mixture, speech, noise. \n (mixed_audio, speech_audio, noise_audio, alpha) = additive_mixing(speech_audio, noise_audio)\n\n # Write out mixed audio. \n out_bare_na = os.path.join(\"%s.%s\" % \n (os.path.splitext(speech_na)[0], os.path.splitext(noise_na)[0]))\n out_audio_path = os.path.join(workspace, \"mixed_audios\", \"spectrogram\", \n data_type, \"%ddb\" % int(snr), \"%s.wav\" % out_bare_na)\n create_folder(os.path.dirname(out_audio_path))\n write_audio(out_audio_path, mixed_audio, fs)\n\n # Extract spectrogram. \n mixed_complx_x = calc_sp(mixed_audio, mode='complex')\n speech_x = calc_sp(speech_audio, mode='magnitude')\n noise_x = calc_sp(noise_audio, mode='magnitude')\n\n # Write out features. \n out_feat_path = os.path.join(workspace, \"features\", \"spectrogram\", \n data_type, \"%ddb\" % int(snr), \"%s.p\" % out_bare_na)\n create_folder(os.path.dirname(out_feat_path))\n data = [mixed_complx_x, speech_x, noise_x, alpha, out_bare_na]\n pickle.dump(data, open(out_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n \n # Print. \n if cnt % 100 == 0:\n print(cnt)\n \n cnt += 1\n\n print(\"Extracting feature time: %s\" % (time.time() - t1))",
"def mixture_of_gauss(X,Y):\n \n # Split training/testing\n X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y)\n \n # Fit and transform with LDA\n lda = LDA().fit(X_train, Y_train)\n X_train = lda.transform(X_train)\n X_test = lda.transform(X_test)\n \n # Initialize GMM\n clf = mixture.GMM(n_components=4)\n \n # \"Fit\" to Y. Specify the component means for each cluster. Component labels are not necesarily the same as Y however.\n clf.means_ = np.array([X_train[Y_train == i].mean(axis=0) for i in range(4)])\n # Fit X\n clf.fit(X_train)\n \n # Break up X into 4 based on the Y label\n x_0t = [ x for i,x in enumerate(X_train) if Y_train[i] == 0]\n x_90t = [ x for i,x in enumerate(X_train) if Y_train[i] == 1]\n x_180t = [ x for i,x in enumerate(X_train) if Y_train[i] == 2]\n x_270t = [ x for i,x in enumerate(X_train) if Y_train[i] == 3]\n \n # Matrix of known Y vs. prediction on the train set.\n mat = [ [ sum(clf.predict(x)==i) for i in [0,1,2,3] ] for x in [x_0t, x_90t, x_180t, x_270t] ]\n\n # Pick the max of each row. If clusters are good then there will be no collisions\n map0 = mat[0].index(max(mat[0]))\n map1 = mat[1].index(max(mat[1]))\n map2 = mat[2].index(max(mat[2]))\n map3 = mat[3].index(max(mat[3]))\n \n #Heavy handed way to make sure that mapping is collision free. If this assertion is false, try again, you probably just got unlucky. \n num_unique = len(set([map0, map1, map2, map3]))\n assert num_unique == 4, str(map0) + str(map1) + str(map2) + str(map3) + str(mat)\n \n # Transforms clf cluster prediction to expected Y label.\n def map_predict(X):\n # Make a dictionary\n d = { map0:0, map1:1, map2:2, map3:3 }\n \n # For each prediction, consult dictionary.\n return map(lambda z: d[z], clf.predict(X))\n \n \n # Use our mapped predictions instead of clf.predict\n test = map_predict(X_test) == Y_test\n train = map_predict(X_train) == Y_train\n \n # Little accuracy function. Should have done this sooner.\n accuracy = lambda X: 1.*sum(X)/len(X)\n \n # Print training and testing accuracy\n print \"train:\", accuracy(train), \"test:\", accuracy(test)\n \n # Return everything needed to run on a new testing set.\n return test, train, clf, lda, map_predict",
"def mixture_statistics(self, method='auto'):\n\n if method == 'auto':\n fixed_mixture_size = self.parameters['fixed_mixture_size']\n \n if self.is_correlated_mixture or fixed_mixture_size is not None:\n # mixture has correlations => we do Metropolis sampling\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n method = 'brute-force'\n else:\n method = 'monte-carlo'\n else:\n # the estimate is exact for mixtures without correlations\n method = 'estimate'\n\n if method == 'brute-force' or method == 'brute_force':\n return self.mixture_statistics_brute_force()\n elif method == 'monte-carlo' or method == 'monte_carlo':\n return self.mixture_statistics_monte_carlo()\n elif method == 'estimate':\n return self.mixture_statistics_estimate()\n else:\n raise ValueError('Unknown method `%s` for mixture statistics'\n % method)",
"def get_mixture(data, components):\n from jcvi.apps.base import popen\n\n probs, mus, sigmas = [], [], []\n fw = must_open(\"tmp\", \"w\")\n log_data = [log(x) for x in data if x > .05]\n data = \"\\n\".join([\"%.4f\" % x for x in log_data]).replace(\"inf\\n\", \"\")\n fw.write(data)\n fw.close()\n\n cmd = \"gmm-bic {0} {1} {2}\".format(components, len(log_data), fw.name)\n pipe = popen(cmd)\n\n for row in pipe:\n if row[0] != '#':\n continue\n\n atoms = row.split(\",\")\n a, b, c = atoms[1:4]\n a = float(a)\n b = float(b)\n c = float(c)\n\n mus.append(a)\n sigmas.append(b)\n probs.append(c)\n\n os.remove(fw.name)\n return probs, mus, sigmas",
"def compute_mixing_coefficients_bot(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n\n v_upts = TTTW_func.v2u(self.v)\n\n self.sigma_bot = []\n self.Kv0 = np.zeros([Ly,N+1])\n self.Kt0 = np.zeros([Ly,N+1])\n for j in range(Ly):\n # turbulent velocity sclaes with buoyancy effects neglected\n ustar2 = self.r_D[j] * np.sqrt(self.u[j,0]**2 + v_upts[j,0]**2)\n wm = self.vonKar * np.sqrt(ustar2)\n ws = wm\n \n for k in range(1,N):\n k_w = k\n k_r = k - 1\n\n if k_w < self.kbl[j]: # NEED Zob\n sigma = np.min( [ ((z_u_w[j,k_w] - z_u_w[j,0] + self.Zob) / (self.hbbl[j] + self.Zob)),1.])\n if j ==1:\n self.sigma_bot.append(sigma)\n a1 = sigma - 2.\n a2 = 3. - 2.*sigma\n a3 = sigma - 1.\n\n self.Kv0[j,k_w] = wm * self.hbbl[j] * ( sigma * (1. + sigma * ( a1 + a2*self.Gm1_bot[j]+a3*self.dGm1_dS_bot[j]))) \n self.Kt0[j,k_w] = ws * self.hbbl[j] * ( sigma * (1. + sigma * ( a1 + a2*self.Gt1_bot[j]+a3*self.dGt1_dS_bot[j])))",
"def estimate(self, bases, freqs, **kwargs):\n \n # Make sure that frequencies are provided for every basis measured\n if len(bases) != len(freqs):\n print(\"Error, must provide frequency data for all bases measured.\")\n return\n\n # Go through the keyword arguments and set mu and eps if required.\n mu = 1e-4\n eps = 0.1 \n\n if \"mu\" in kwargs:\n mu = kwargs[\"mu\"]\n if \"eps\" in kwargs:\n eps = kwargs[\"eps\"]\n\n # Separate the bases out into measured and unmeasured\n meas_bs_idx = []\n unmeas_bs_idx = []\n\n for x in range(self.dim):\n if x in bases:\n meas_bs_idx.append(x)\n else:\n unmeas_bs_idx.append(x)\n\n # Handle the vertical slope separately\n if -1 in bases:\n meas_bs_idx.append(-1)\n else:\n unmeas_bs_idx.append(-1)\n\n # Begin with the initial state, the maximally mixed state\n rho_0 = (1.0 / self.dim) * np.eye(self.dim)\n rho_n = rho_0\n\n \"\"\"print(\"Measured bases are \", end = \"\")\n print(meas_bs_idx)\n print(\"Unmeasured bases are \", end = \"\")\n print(unmeas_bs_idx)\"\"\"\n \n n = 1\n\n # Iterate\n while (n):\n ########################################################\n # Compute W(rho)\n # I might eventually put this in a separate method, but\n # for now I'm going to leave it here to avoid having to\n # repeatedly pass the same (large chunk of) information \n # to some helper function.\n ########################################################\n term_1 = np.zeros((self.dim, self.dim))\n term_2 = np.zeros((self.dim, self.dim))\n\n # Compute the first sum, which contains the measurement \n # frequencies and the measured bases. Note that in theory\n # the bases may not be in ascending order, however the \n # frequencies will be generated in the same order as the \n # bases are placed in the list. So create a separate counter\n # for frequencies to just iterate through them one at a time\n # as we go through the bases by their slope index.\n freq_idx = 0 \n for basis_idx in meas_bs_idx:\n for proj_idx in range(self.dim):\n this_projector = self.projectors[basis_idx][proj_idx]\n\n p_num = freqs[freq_idx][proj_idx] \n p_denom = np.trace(np.dot(rho_n, this_projector))\n prefactor = p_num / p_denom\n\n term_1 = term_1 + (prefactor * this_projector)\n freq_idx += 1\n\n # If there are no unmeasured basis, do nothing\n if len(unmeas_bs_idx) != 0:\n # Compute the second sum, which is over all the unmeasured bases.\n for basis_idx in unmeas_bs_idx:\n for proj_idx in range(self.dim):\n this_projector = self.projectors[basis_idx][proj_idx]\n\n prefactor = log(np.trace(np.dot(rho_n, this_projector)))\n\n term_2 = term_2 + (prefactor * this_projector)\n \n \n # Finally, compute W(rho)\n W_rho_n = term_1 - mu * term_2\n ########################################################\n\n #print(\"n = \" + str(n))\n #print(rho_n) \n\n # Check if we've got a good estimate. If the desired accuracy \n # is satisfied by the most recent rho_n, then we're done. \n # Return the estimator and the number of steps.\n # If not, increment n and keep going.\n if self.check_accuracy(W_rho_n, rho_n):\n return rho_n, n \n else:\n n += 1\n\n # Compute the next term in the series. It's a big ugly expression,\n # so I've separated out a term 'clump', and also the num/denom\n clump = W_rho_n - np.trace(np.dot(W_rho_n, rho_n)) * np.eye(self.dim)\n \n numerator = np.dot(np.eye(self.dim) + eps * clump, \\\n np.dot(rho_n, np.eye(self.dim) + eps * clump))\n denominator = 1 + (eps ** 2) * np.trace(np.dot(np.dot(clump, clump), rho_n))\n\n rho_np1 = numerator / denominator\n rho_n = rho_np1",
"def compute_stats(self, dataset, portion):\n with torch.no_grad():\n specgrams = []\n samples = 5000\n for i_batch, (mix, _, _) in enumerate(dataset):\n mix = mix[portion]\n spec = self.calculate_mag(mix, db_conversion=True)\n specgrams.append(spec)\n if (i_batch + 1) * mix.shape[0] > samples:\n break\n specgrams = torch.cat(specgrams, 0)\n self.mean.data = specgrams.mean(dim=(0, 2), keepdim=True)\n self.std.data = specgrams.std(dim=(0, 2), keepdim=True)\n None",
"def mixture_entropy_brute_force(self):\n Z, sum_wlogw = 0, 0\n\n # Naive implementation of measuring the entropy is\n # p(c) = w(c) / Z with Z = sum_c w(c)\n # H_c = -sum_c p(c) * log2(p(c))\n # This can be transformed to a more stable implementation:\n # H_c = log2(Z) - 1/Z * sum_c w(c) * log2(w(c))\n \n for _, weight_c in self._iterate_mixtures():\n if weight_c > 0:\n Z += weight_c\n sum_wlogw += weight_c * np.log2(weight_c)\n \n if Z == 0:\n return 0\n else:\n return np.log2(Z) - sum_wlogw / Z",
"def mixed_prob( means,stds,weights,validt):",
"def simulationWithDrug(numViruses, maxPop, maxBirthProb, clearProb, resistances,\n mutProb, numTrials):\n \n #create viruses list\n viruses = []\n for i in range(numViruses):\n viruses.append(ResistantVirus(maxBirthProb, clearProb, resistances, mutProb))\n \n #create test patient P1\n results = np.zeros(numTrials*300).reshape(300,numTrials)\n resultsPopResist = np.zeros(numTrials*300).reshape(300,numTrials)\n \n #runs numTrials of 300 steps, putting results in an array of 300 lines, \n # numTrials columns\n for t in range(numTrials) :\n P1 = TreatedPatient(viruses, maxPop)\n for s in range(150):\n P1.update()\n results[s][numTrials-1] += P1.getTotalPop()\n resultsPopResist[s][numTrials-1] += P1.getResistPop(['guttagonol'])\n \n P1.addPrescription('guttagonol')\n for s in range(150,300):\n P1.update()\n results[s][numTrials-1]+=P1.getTotalPop()\n resultsPopResist[s][numTrials-1] += P1.getResistPop(['guttagonol'])\n \n \n #calculating average of virus population size at each step \n yValues1 = []\n for i in range(300):\n a = sum(results[i].tolist())/len(results[i])\n yValues1.append(a)\n \n yValues2 = []\n for i in range(300):\n a = sum(resultsPopResist[i].tolist())/len(resultsPopResist[i])\n yValues2.append(a)\n\n pylab.plot(yValues1,label='pop average')\n pylab.plot(yValues2,'r--',label = 'resistant virus population')\n pylab.title('virus pop average at each step')\n pylab.legend()\n pylab.xlabel('Time Steps')\n pylab.ylabel('pop #')\n pylab.show()",
"def simulationTwoDrugsVirusPopulations():\n #TODO",
"def process_0(self):\n raw_data = self.pull_data(self.sub_folder)\n\n prepped_data = self._prep_data(raw_data)\n\n print(len(prepped_data))\n\n\n gmm = GaussianMixture(5)\n\n gmm.fit(prepped_data)\n\n return gmm.means_",
"def calcium_titanate():\n\n positions = [[0.991521, 0.044799, 0.750000],\n [0.491521, 0.455201, 0.250000],\n [0.508479, 0.544799, 0.750000],\n [0.008479, 0.955201, 0.250000],\n [0.500000, 0.000000, 0.500000],\n [0.000000, 0.500000, 0.500000],\n [0.000000, 0.500000, 0.000000],\n [0.500000, 0.000000, 0.000000],\n [0.921935, 0.520580, 0.250000],\n [0.421935, 0.979420, 0.750000],\n [0.578065, 0.020580, 0.250000],\n [0.078065, 0.479420, 0.750000],\n [0.707456, 0.291917, 0.959281],\n [0.207456, 0.208083, 0.040719],\n [0.792544, 0.791917, 0.540719],\n [0.292544, 0.708083, 0.459281],\n [0.707456, 0.291917, 0.540719],\n [0.207456, 0.208083, 0.459281],\n [0.292544, 0.708083, 0.040719],\n [0.792544, 0.791917, 0.959281]]\n\n species = ['Ca','Ca','Ca','Ca','Ti','Ti','Ti','Ti',\n 'O ','O ','O ','O ','O ','O ','O ','O ','O ','O ','O ','O ']\n\n bravais = 'orthorhombic'\n\n space_group = 62\n lattice_parameters = {'a': Set(5.40444906, 'angstrom'),\n 'b': Set(5.51303112, 'angstrom'),\n 'c': Set(7.69713264, 'angstrom')}\n data = {'fractional': positions,\n 'species': species,\n 'lattice_parameters': lattice_parameters,\n 'space_group': ('', space_group),\n 'n_atoms': len(species)}\n\n return data",
"def main(fname, N, n, params):\n\n gmm = GaussianMixtureModel.from_file( fname )\n k, d, M, S, w = gmm.k, gmm.d, gmm.means, gmm.sigmas, gmm.weights\n\n X = gmm.sample( N, n )\n\n # Set seed for the algorithm\n sc.random.seed( int( params.seed ) )\n\n algo = GaussianMixtureEM( k, d )\n\n O = M, S, w\n def report( i, O_, lhood ):\n M_, _, _ = O_\n lhood, Z, O_ = algo.run( X, None, report )\n\n M_, S_, w_ = O_\n M_ = closest_permuted_matrix( M.T, M_.T ).T\n\n # Table\n print column_aerr( M, M_ ), column_rerr( M, M_ )",
"def naiveBayesMixture(train_set, train_labels, dev_set, bigram_lambda,unigram_smoothing_parameter, bigram_smoothing_parameter, pos_prior):\n\n # TODO: Write your code here\n # return predicted labels of development set\n\n # counters for Training Phase\n ham = Counter()\n ham_bi = Counter()\n spam = Counter()\n spam_bi = Counter()\n\n for string, label in zip(train_set, train_labels):\n for i in range(len(string)):\n word = string[i]\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if label == 1:\n ham_bi.update({word_bi:1})\n else:\n spam_bi.update({word_bi:1})\n if label == 1:\n ham.update({word:1})\n else:\n spam.update({word:1})\n\n ham_len = 0\n for w in ham:\n ham_len += ham[w]\n spam_len = 0\n for w in spam:\n spam_len += spam[w]\n \n hambi_len = 0\n for w in ham_bi:\n hambi_len += ham_bi[w]\n spambi_len = 0\n for w in spam_bi:\n spambi_len += spam_bi[w]\n\n # labels for Development Phase\n dev_labels = []\n # dicts for P(word|ham) and P(word|spam)\n p_ham = {}\n p_spam = {}\n p_hambi = {}\n p_spambi = {}\n\n # develop likelihoods based on dev_set\n for word in ham:\n numerator = ham[word] + unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham[word] = numerator / denominator\n for word in spam:\n numerator = spam[word] + unigram_smoothing_parameter\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam[word] = numerator / denominator\n\n for word_bi in ham_bi:\n numerator = ham_bi[word_bi] + bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi[word_bi] = numerator / denominator\n for word_bi in spam_bi:\n numerator = spam_bi[word_bi] + bigram_smoothing_parameter\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi[word_bi] = numerator / denominator\n \n numerator = unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham_zero = numerator / denominator\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam_zero = numerator / denominator\n\n numerator = bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi_zero = numerator / denominator\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi_zero = numerator / denominator\n\n for string in dev_set:\n p_words_ham = math.log(pos_prior)\n p_words_spam = math.log(1 - pos_prior)\n\n p_words_hambi = math.log(pos_prior)\n p_words_spambi = math.log(1 - pos_prior)\n \n for i in range(len(string)):\n word = string[i]\n if word in p_ham:\n p_words_ham += math.log(p_ham[word])\n else:\n p_words_ham += math.log(p_ham_zero)\n if word in p_spam:\n p_words_spam += math.log(p_spam[word])\n else:\n p_words_spam += math.log(p_spam_zero)\n\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if word_bi in p_hambi:\n p_words_hambi += math.log(p_hambi[word_bi])\n else:\n p_words_hambi += math.log(p_hambi_zero)\n if word_bi in p_spambi:\n p_words_spambi += math.log(p_spambi[word_bi])\n else:\n p_words_spambi += math.log(p_spambi_zero)\n\n p_ham_mix = p_words_ham*(1-bigram_lambda) + p_words_hambi*bigram_lambda\n p_spam_mix = p_words_spam*(1-bigram_lambda) + p_words_spambi*bigram_lambda\n\n dev_labels.append(p_ham_mix >= p_spam_mix)\n\n return dev_labels",
"def main( prefix, N, n, delta, params ):\n gmm = GaussianMixtureModel.from_file( prefix )\n k, d, M, w = gmm.k, gmm.d, gmm.means, gmm.weights\n logger.add( \"M\", M )\n logger.add_consts( \"M\", M, k, 2 )\n logger.add( \"w_min\", w.min() )\n logger.add( \"w_max\", w.max() )\n\n X = gmm.sample( N, n )\n logger.add( \"k\", k )\n logger.add( \"d\", d )\n logger.add( \"n\", n )\n\n # Set seed for the algorithm\n sc.random.seed( int( params.seed ) )\n logger.add( \"seed\", int( params.seed ) )\n\n P, T = sample_moments( X, k )\n Pe, Te = exact_moments( M, w )\n\n start = time.time()\n M_ = recover_components( k, P, T, Pe, Te, delta = delta )\n stop = time.time()\n logger.add( \"time\", stop - start )\n\n M_ = closest_permuted_matrix( M.T, M_.T ).T\n logger.add( \"M_\", M )\n\n # Error data\n logger.add_err( \"M\", M, M_ )\n logger.add_err( \"M\", M, M_, 'col' )\n\n print column_aerr(M, M_), column_rerr(M, M_)",
"def mixture_statistics_estimate(self):\n ci_mean = self.substrate_probabilities\n \n if self.is_correlated_mixture:\n J_ij = self.correlations\n pi_s = ci_mean\n bar_pi_s = 1 - pi_s\n \n ci_mean = pi_s * (1 + 2*bar_pi_s*np.dot(J_ij, pi_s))\n ci_var = ci_mean * (1 - ci_mean)\n cij_cov = (\n np.diag(ci_var)\n + 2*np.einsum('ij,i,j->ij', J_ij, ci_var, ci_var)\n )\n\n else:\n # uncorrelated mixtures\n ci_var = ci_mean * (1 - ci_mean)\n cij_cov = np.diag(ci_var)\n\n return {'mean': ci_mean, 'std': np.sqrt(ci_var), 'var': ci_var,\n 'cov': cij_cov}",
"def run_metropolis(self):\n\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances(positions)\n # check if the wave function is zero\n while True:\n test_wavefunction = self.w.wavefunction(positions)\n if test_wavefunction**2 <= 1e-14:\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances(positions)\n else:\n break\n\n # Initialize sampler method for each new Monte Carlo run\n self.sam.initialize()\n\n for i in range(self.mc_cycles):\n new_positions = self.metropolis_step(positions)\n positions = new_positions\n self.sam.sample_values(positions)\n\n self.sam.average_values(self.mc_cycles)\n energy = self.sam.local_energy\n d_El = self.sam.derivative_energy\n var = self.sam.variance\n self.print_averages()\n return d_El, energy, var",
"def test_gaussian_em():\n fname = \"gmm-3-10-0.7.npz\"\n gmm = GaussianMixtureModel.generate( fname, 3, 3 )\n k, d, M, S, w = gmm.k, gmm.d, gmm.means, gmm.sigmas, gmm.weights\n N, n = 1e6, 1e5\n\n\n X = gmm.sample( N, n )\n\n algo = GaussianMixtureEM(k, d)\n\n def report( i, O_, lhood ):\n M_, _, _ = O_\n lhood, Z, O_ = algo.run( X, None, report )\n\n M_, S_, w_ = O_\n\n M_ = closest_permuted_matrix( M, M_ )\n w_ = closest_permuted_vector( w, w_ )\n\n print w, w_\n\n print norm( M - M_ )/norm(M)\n print abs(S - S_).max()\n print norm( w - w_ ) \n\n assert( norm( M - M_ )/norm(M) < 1e-1 )\n assert (abs(S - S_) < 1 ).all()\n assert( norm( w - w_ ) < 1e-2 )",
"def _iterate_mixtures(self):\n \n if self._iterate_steps > self.parameters['max_steps']:\n raise RuntimeError('The iteration would take more than %g steps'\n % self.parameters['max_steps'])\n \n hi = self.commonness\n Jij = self.correlations\n\n mixture_size = self.parameters['fixed_mixture_size']\n if mixture_size is None:\n # iterate over all mixtures\n for c in itertools.product((0, 1), repeat=self.Ns):\n c = np.array(c, np.uint8)\n weight_c = np.exp(np.dot(np.dot(Jij, c) + hi, c))\n yield c, weight_c\n \n elif mixture_size == 0:\n # special case which is not covered by the iteration below\n yield np.zeros(self.Ns, np.uint8), 1\n \n elif mixture_size == self.Ns:\n # special case which is not covered by the iteration below\n yield np.ones(self.Ns, np.uint8), 1\n \n else:\n # iterate over all mixtures with constant number of substrates\n c = np.zeros(self.Ns, np.uint8)\n for nz in itertools.combinations(range(self.Ns), mixture_size):\n c[:] = 0\n c[np.array(nz)] = 1\n weight_c = np.exp(np.dot(np.dot(Jij, c) + hi, c))\n yield c, weight_c",
"def gen_mixture():\n npr.seed(0)\n num_exp = int(1e4)\n x_dim = 2\n z_dim = 2\n mu1 = [5, 5,]\n mu2 = [-5, -5]\n theta = np.array([[2,1],[-1,-2]])\n sigma = 0.1\n u = npr.uniform((num_exp,))\n z = np.zeros((num_exp, z_dim))\n cov = np.zeros((z_dim, z_dim))\n np.fill_diagonal(cov, 1)\n sz = int(num_exp/2)\n z[:sz, ]= npr.multivariate_normal(mu1, cov,sz)\n z[sz:, ] = npr.multivariate_normal(mu2,cov,sz)\n mu_x = [email protected]()\n\n x = np.zeros((num_exp, x_dim))\n for i in range(num_exp):\n x[i,:] = npr.multivariate_normal(mu_x[:,i], sigma*cov)\n print(x.shape)\n np.save('data/syn_mixture.npy', x)",
"def make_mixture_parameters(param_dict):\n compound1 = param_dict['compound1_name']\n compound2 = param_dict['compound2_name']\n compound1_mw = param_dict[compound1]['mw']\n compound2_mw = param_dict[compound2]['mw']\n n_fractions = param_dict['n_fractions']\n compound1_frac_range = np.linspace(0,1,n_fractions)\n total_mass = param_dict['total_mass'] #grams\n output_mass = {}\n output_mass[compound1] = np.zeros(n_fractions)\n output_mass[compound2] = np.zeros(n_fractions) \n compound_mw_array = np.array([compound1_mw, compound2_mw])\n for i, frac in enumerate(compound1_frac_range):\n fractions = np.linalg.solve([compound_mw_array,[1.0-frac, -1.0*frac]],[10, 0])\n output_mass[compound1][i] = fractions[0]*compound1_mw\n output_mass[compound2][i] = fractions[1]*compound2_mw\n return output_mass",
"def compute(N, T):\n print(N, T)\n fraction_of_sick_people = np.zeros(len(betas))\n for k, beta in enumerate(betas):\n for run in range(runs):\n # initialize random array with its state sick/healthy\n # 0 for healthy\n # 1 for infected/sick\n current_state = np.random.randint(0, 2, N)\n for j in range(T):\n next_state = np.zeros(N, dtype=int)\n for i in range(N):\n if current_state[i] == 0: # is healthy and cannot infect anyone\n continue\n if random.random() < beta: # infect left neighbour\n next_state[i - 1] = 1\n if random.random() < beta: # infect right neighbour\n next_state[(i + 1) % N] = 1\n # next_state[i] == 0 means current person is not yet infected by left neighbour\n # because of if current_state[i] == 0 we know that the current person is sick\n # with random.random() <= beta we \"roll a dice\" if it gets healthy\n # if not it is going to be sick\n # Note: In the round of i+1 it can still be infected!\n if next_state[i] == 0 and random.random() <= gamma:\n next_state[i] = 0\n else:\n next_state[i] = 1\n current_state = next_state.copy()\n fraction_of_sick_people[k] += sum(current_state) / N\n fraction_of_sick_people[k] = fraction_of_sick_people[k] / runs\n return fraction_of_sick_people",
"def useThibautsData(log, output, bcgr=72.2, sigma=0.75, iterations=4, loc=1900, galaxies=1000,\n datadir='/Users/smn2/EUCLID/CTItesting/uniform/',\n thibautCDM03=False, beta=False, serial=1, parallel=1):\n files = g.glob(datadir + '*.fits')\n #pick randomly\n files = np.random.choice(files, galaxies, replace=False)\n\n #trap parameters: parallel\n if thibautCDM03:\n f1 = '/Users/smn2/EUCLID/vissim-python/data/cdm_thibaut_parallel.dat'\n f2 = '/Users/smn2/EUCLID/vissim-python/data/cdm_thibaut_serial.dat'\n params = ThibautsCDM03params()\n params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel))\n else:\n f1 = '/Users/smn2/EUCLID/vissim-python/data/cdm_euclid_parallel.dat'\n f2 = '/Users/smn2/EUCLID/vissim-python/data/cdm_euclid_serial.dat'\n params = MSSLCDM03params()\n params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel))\n if beta:\n params.update(dict(beta_p=0.6, beta_s=0.6))\n\n print f1, f2\n\n #store shapes\n eclean = []\n e1clean = []\n e2clean = []\n R2clean = []\n xclean = []\n yclean = []\n eCTI = []\n e1CTI = []\n e2CTI = []\n R2CTI = []\n xCTI = []\n yCTI = []\n eCTIfixed = []\n e1CTIfixed = []\n e2CTIfixed = []\n R2CTIfixed = []\n xCTIfixed = []\n yCTIfixed = []\n\n fh = open(output.replace('.pk', '.csv'), 'w')\n fh.write('#files: %s and %s\\n' % (f1, f2))\n for key in params:\n print key, params[key]\n fh.write('# %s = %s\\n' % (key, str(params[key])))\n fh.write('#file, delta_e, delta_e1, delta_e2, delta_R2, delta_x, delta_y\\n')\n for f in files:\n print 'Processing: ', f\n\n #load data\n nocti = pf.getdata(f)\n\n #scale to SNR about 10 (average galaxy, a single exposure)\n nocti /= np.sum(nocti)\n nocti *= 1500.\n\n #place it on canvas\n tmp = np.zeros((2066, 2048))\n ysize, xsize = nocti.shape\n ysize /= 2\n xsize /= 2\n tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize] = nocti.copy()\n\n #add background\n tmp += bcgr\n\n #run CDM03\n c = CTI.CDM03bidir(params, [])\n tmp = c.applyRadiationDamage(tmp.copy().transpose()).transpose()\n\n #remove background and make a cutout\n CTIdata = tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize]\n CTIdata -= bcgr\n CTIdata[CTIdata < 0.] = 0.\n\n #write files\n #fileIO.writeFITS(nocti, f.replace('.fits', 'noCTI.fits'), int=False)\n #fileIO.writeFITS(CTI, f.replace('.fits', 'CTI.fits'), int=False)\n\n #reset settings\n settings = dict(sigma=sigma, iterations=iterations)\n\n #calculate shapes\n sh = shape.shapeMeasurement(nocti.copy(), log, **settings)\n results = sh.measureRefinedEllipticity()\n\n eclean.append(results['ellipticity'])\n e1clean.append(results['e1'])\n e2clean.append(results['e2'])\n R2clean.append(results['R2'])\n xclean.append(results['centreX'])\n yclean.append(results['centreY'])\n\n #CTI, fitted centroid\n sh = shape.shapeMeasurement(CTIdata.copy(), log, **settings)\n results2 = sh.measureRefinedEllipticity()\n\n eCTI.append(results2['ellipticity'])\n e1CTI.append(results2['e1'])\n e2CTI.append(results2['e2'])\n R2CTI.append(results2['R2'])\n xCTI.append(results2['centreX'])\n yCTI.append(results2['centreY'])\n\n #fixed centroid\n settings['fixedPosition'] = True\n settings['fixedX'] = results['centreX']\n settings['fixedY'] = results['centreY']\n settings['iterations'] = 1\n sh = shape.shapeMeasurement(CTIdata.copy(), log, **settings)\n results3 = sh.measureRefinedEllipticity()\n\n eCTIfixed.append(results3['ellipticity'])\n e1CTIfixed.append(results3['e1'])\n e2CTIfixed.append(results3['e2'])\n R2CTIfixed.append(results3['R2'])\n xCTIfixed.append(results3['centreX'])\n yCTIfixed.append(results3['centreY'])\n\n text = '%s,%e,%e,%e,%e,%e,%e\\n' % (f, results['ellipticity'] - results2['ellipticity'],\n results['e1'] - results2['e1'], results['e2'] - results2['e2'],\n results['R2'] - results2['R2'],\n results['centreX'] - results2['centreX'],\n results['centreY'] - results2['centreY'])\n fh.write(text)\n print text\n\n fh.close()\n\n results = {'eclean': np.asarray(eclean),\n 'e1clean': np.asarray(e1clean),\n 'e2clean': np.asarray(e2clean),\n 'R2clean': np.asarray(R2clean),\n 'xclean': np.asarray(xclean),\n 'yclean': np.asarray(yclean),\n 'eCTI': np.asarray(eCTI),\n 'e1CTI': np.asarray(e1CTI),\n 'e2CTI': np.asarray(e2CTI),\n 'R2CTI': np.asarray(R2CTI),\n 'xCTI': np.asarray(xCTI),\n 'yCTI': np.asarray(yCTI),\n 'eCTIfixed': np.asarray(eCTIfixed),\n 'e1CTIfixed': np.asarray(e1CTIfixed),\n 'e2CTIfixed': np.asarray(e2CTIfixed),\n 'R2CTIfixed': np.asarray(R2CTIfixed),\n 'xCTIfixed': np.asarray(xCTIfixed),\n 'yCTIfixed': np.asarray(yCTIfixed)}\n\n #save to a file\n fileIO.cPickleDumpDictionary(results, output)\n\n return results",
"def simulationDelayedTreatment(numTrials):\n \n \n results = []\n gutresults = []\n for a in range(300):\n results.append([])\n gutresults.append([])\n for b in range(numTrials):\n viruses = []\n for c in range(10000):\n resistances = {'guttagonol': False}\n vir = ResistantVirus(.1, .05, resistances, .005)\n viruses.append(vir)\n \n Mark = TreatedPatient(viruses, 1000)\n \n for d in range(150):\n pop = Mark.update()\n results[d].append(pop)\n gutpop = Mark.getResistPop(['guttagonol'])\n gutresults[d].append(gutpop)\n \n Mark.addPrescription('guttagonol')\n \n for e in range(150, 300):\n newpop = Mark.update()\n results[e].append(newpop)\n newgutpop = Mark.getResistPop(['guttagonol'])\n gutresults[e].append(newgutpop)\n \n FinalResults = results[299]\n print len(FinalResults)\n \n \n \n pylab.figure(5)\n pylab.hist(FinalResults, bins = 10)\n pylab.title('Simulation with Drugs - Frequency')\n pylab.xlabel('Virus Population')\n pylab.ylabel('Number of Trials with Population') \n pylab.legend()\n pylab.show()",
"def run_metropolis_PBC(self):\n\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances_PBC(positions)\n # check if the wave function is zero\n while True:\n test_wavefunction = self.w.wavefunction(positions)\n if test_wavefunction**2 <= 1e-14:\n # print ('obs')\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances_PBC(positions)\n else:\n break\n # Initialize sampler method for each new Monte Carlo run\n self.sam.initialize()\n\n for i in range(self.mc_cycles):\n new_positions = self.metropolis_step_PBC(positions)\n positions = new_positions\n self.sam.sample_values(positions)\n\n self.sam.average_values(self.mc_cycles)\n energy = self.sam.local_energy\n d_El = self.sam.derivative_energy\n var = self.sam.variance\n print ('w = ', self.w.wavefunction(positions))\n self.print_averages()\n return d_El, energy, var",
"def get_effect_size(self, summ, b, nmc=5000):\n m0b, v0b = self.DModel.models[0].predict(np.array([b])) \n m1b, v1b = self.DModel.models[1].predict(np.array([b]))\n \n d_mean_D = np.squeeze(m1b - m0b) # TODO: why was this swapped around?\n d_var_D = np.squeeze(v0b + v1b)\n d_std_D = np.sqrt(d_var_D)\n \n if d_mean_D < 0:\n pval = 1 - stats.norm.cdf(x=0, loc=d_mean_D, scale=d_std_D)\n else:\n pval = stats.norm.cdf(x=0, loc=d_mean_D, scale=d_std_D)\n \n xmin, xmax = (np.min([d_mean_D - 4*d_std_D, -0.1*d_std_D]), \n np.max([d_mean_D + 4*d_std_D, 0.1*d_std_D]))\n \n n = 300\n xrange = np.linspace(xmin, xmax, n)\n y = stats.norm.pdf(xrange, d_mean_D, d_std_D) \n \n samples = np.zeros((nmc))\n nspike = int(np.round(summ['pmp']['pmc']*nmc))\n samples[nspike:] = np.random.normal(loc=d_mean_D, \n scale=np.sqrt(d_var_D), \n size=(nmc-nspike))\n \n if not np.isscalar(b):\n d_bma = None\n else:\n \n if nspike==nmc:\n # BMA dominated by continuous model\n # Put all mass at xrange closest to b\n d_bma = np.zeros((n))\n xdelta = xrange[1] - xrange[0]\n ix = np.argmin((xrange-b)**2)\n d_bma[ix] = 1.0 / xdelta\n elif nspike==0:\n # BMA dominated by discontinuous model\n d_bma = y\n else:\n # BMA is a mixture\n kde_fit = stats.gaussian_kde(samples, \n bw_method='silverman')\n d_bma = kde_fit(xrange)\n \n return {'es_BMA': d_bma,\n 'es_Disc': y,\n 'es_disc_stats': (d_mean_D, d_std_D),\n 'pval': pval,\n 'es_range': xrange,\n 'f(b)': (m0b, m1b),\n 'es_transform': lambda z: z*d_std_D + d_mean_D}",
"def _gibbs_sampling_iteration(self):\n for m, dm in enumerate(self.corpus):\n for n, w_mn in enumerate(dm):\n k = self.z_mn[m, n]\n self.n_mk[m, k] -= 1\n self.n_m[m] -= 1\n self.n_kt[k, w_mn] -= 1\n self.n_k[k] -= 1\n k = self._conditional_z(\n self.n_components, self.alpha, self.beta,\n self.n_mk, self.n_kt, m, w_mn, self.beta_sum, self.n_k)\n self.z_mn[m, n] = k\n self.n_mk[m, k] += 1\n self.n_m[m] += 1\n self.n_kt[k, w_mn] += 1\n self.n_k[k] += 1"
] | [
"0.603266",
"0.6025553",
"0.5984378",
"0.59416246",
"0.58981115",
"0.5829733",
"0.5794666",
"0.5727615",
"0.57198894",
"0.5643767",
"0.5639376",
"0.56325966",
"0.5592785",
"0.55927706",
"0.55848724",
"0.5584382",
"0.5581739",
"0.55519193",
"0.5520184",
"0.55121636",
"0.5510202",
"0.5494665",
"0.5461173",
"0.54520506",
"0.54485965",
"0.5441632",
"0.54339457",
"0.54207635",
"0.5413277",
"0.5410207"
] | 0.70597595 | 0 |
estimates the mixture statistics | def mixture_statistics_estimate(self):
ci_mean = self.substrate_probabilities
if self.is_correlated_mixture:
J_ij = self.correlations
pi_s = ci_mean
bar_pi_s = 1 - pi_s
ci_mean = pi_s * (1 + 2*bar_pi_s*np.dot(J_ij, pi_s))
ci_var = ci_mean * (1 - ci_mean)
cij_cov = (
np.diag(ci_var)
+ 2*np.einsum('ij,i,j->ij', J_ij, ci_var, ci_var)
)
else:
# uncorrelated mixtures
ci_var = ci_mean * (1 - ci_mean)
cij_cov = np.diag(ci_var)
return {'mean': ci_mean, 'std': np.sqrt(ci_var), 'var': ci_var,
'cov': cij_cov} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mixture_statistics_brute_force(self):\n \n Z = 0\n hist1d = np.zeros(self.Ns)\n hist2d = np.zeros((self.Ns, self.Ns))\n \n # iterate over all mixtures\n for c, weight_c in self._iterate_mixtures():\n Z += weight_c \n hist1d += c * weight_c\n hist2d += np.outer(c, c) * weight_c\n \n # calculate the frequency and the correlations \n ci_mean = hist1d / Z\n cij = hist2d / Z\n cij_corr = cij - np.outer(ci_mean, ci_mean)\n \n ci_var = np.diag(cij_corr)\n return {'mean': ci_mean, 'std': np.sqrt(ci_var), 'var': ci_var,\n 'cov': cij_corr}",
"def compute_stats(self, dataset, portion):\n with torch.no_grad():\n specgrams = []\n samples = 5000\n for i_batch, (mix, _, _) in enumerate(dataset):\n mix = mix[portion]\n spec = self.calculate_mag(mix, db_conversion=True)\n specgrams.append(spec)\n if (i_batch + 1) * mix.shape[0] > samples:\n break\n specgrams = torch.cat(specgrams, 0)\n self.mean.data = specgrams.mean(dim=(0, 2), keepdim=True)\n self.std.data = specgrams.std(dim=(0, 2), keepdim=True)\n None",
"def process_0(self):\n raw_data = self.pull_data(self.sub_folder)\n\n prepped_data = self._prep_data(raw_data)\n\n print(len(prepped_data))\n\n\n gmm = GaussianMixture(5)\n\n gmm.fit(prepped_data)\n\n return gmm.means_",
"def mixture_statistics(self, method='auto'):\n\n if method == 'auto':\n fixed_mixture_size = self.parameters['fixed_mixture_size']\n \n if self.is_correlated_mixture or fixed_mixture_size is not None:\n # mixture has correlations => we do Metropolis sampling\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n method = 'brute-force'\n else:\n method = 'monte-carlo'\n else:\n # the estimate is exact for mixtures without correlations\n method = 'estimate'\n\n if method == 'brute-force' or method == 'brute_force':\n return self.mixture_statistics_brute_force()\n elif method == 'monte-carlo' or method == 'monte_carlo':\n return self.mixture_statistics_monte_carlo()\n elif method == 'estimate':\n return self.mixture_statistics_estimate()\n else:\n raise ValueError('Unknown method `%s` for mixture statistics'\n % method)",
"def _build_mixture(self) -> None:\n for mu, sigma in zip(self.mus, self.sigmas):\n self.pdfs.append(norm(mu, sigma))",
"def calculate_mixture_features(data_type):\n workspace = config.workspace\n data_dir = config.data_dir\n speech_dir = os.path.join(data_dir,'{}_speech'.format(data_type))\n noise_dir = os.path.join(data_dir,'{}_noise'.format(data_type)) \n \n fs = config.sample_rate\n \n if data_type == 'train':\n snr = config.Tr_SNR\n elif data_type == 'test':\n snr = config.Te_SNR \n else:\n raise Exception(\"data_type must be train | test!\")\n \n \n # Open mixture csv. \n mixture_csv_path = os.path.join(workspace, \"mixture_csvs\", \"%s.csv\" % data_type)\n with open(mixture_csv_path, 'r') as f:\n reader = csv.reader(f, delimiter='\\t')\n lis = list(reader)\n \n t1 = time.time()\n cnt = 0\n for i1 in range(1, len(lis)):\n [speech_na, noise_na, noise_onset, noise_offset] = lis[i1]\n noise_onset = int(noise_onset)\n noise_offset = int(noise_offset)\n \n # Read speech audio. \n speech_path = os.path.join(speech_dir, speech_na)\n (speech_audio, _) = read_audio(speech_path, target_fs=fs)\n \n # Read noise audio. \n noise_path = os.path.join(noise_dir, noise_na)\n (noise_audio, _) = read_audio(noise_path, target_fs=fs)\n \n # Repeat noise to the same length as speech. \n if len(noise_audio) < len(speech_audio):\n n_repeat = int(np.ceil(float(len(speech_audio)) / float(len(noise_audio))))\n noise_audio_ex = np.tile(noise_audio, n_repeat)\n noise_audio = noise_audio_ex[0 : len(speech_audio)]\n # Truncate noise to the same length as speech. \n else:\n noise_audio = noise_audio[noise_onset : noise_offset]\n \n # Scale speech to given snr. \n scaler = get_amplitude_scaling_factor(speech_audio, noise_audio, snr=snr)\n speech_audio *= scaler\n \n # Get normalized mixture, speech, noise. \n (mixed_audio, speech_audio, noise_audio, alpha) = additive_mixing(speech_audio, noise_audio)\n\n # Write out mixed audio. \n out_bare_na = os.path.join(\"%s.%s\" % \n (os.path.splitext(speech_na)[0], os.path.splitext(noise_na)[0]))\n out_audio_path = os.path.join(workspace, \"mixed_audios\", \"spectrogram\", \n data_type, \"%ddb\" % int(snr), \"%s.wav\" % out_bare_na)\n create_folder(os.path.dirname(out_audio_path))\n write_audio(out_audio_path, mixed_audio, fs)\n\n # Extract spectrogram. \n mixed_complx_x = calc_sp(mixed_audio, mode='complex')\n speech_x = calc_sp(speech_audio, mode='magnitude')\n noise_x = calc_sp(noise_audio, mode='magnitude')\n\n # Write out features. \n out_feat_path = os.path.join(workspace, \"features\", \"spectrogram\", \n data_type, \"%ddb\" % int(snr), \"%s.p\" % out_bare_na)\n create_folder(os.path.dirname(out_feat_path))\n data = [mixed_complx_x, speech_x, noise_x, alpha, out_bare_na]\n pickle.dump(data, open(out_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n \n # Print. \n if cnt % 100 == 0:\n print(cnt)\n \n cnt += 1\n\n print(\"Extracting feature time: %s\" % (time.time() - t1))",
"def mixed_prob( means,stds,weights,validt):",
"def mixture_of_gauss(X,Y):\n \n # Split training/testing\n X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y)\n \n # Fit and transform with LDA\n lda = LDA().fit(X_train, Y_train)\n X_train = lda.transform(X_train)\n X_test = lda.transform(X_test)\n \n # Initialize GMM\n clf = mixture.GMM(n_components=4)\n \n # \"Fit\" to Y. Specify the component means for each cluster. Component labels are not necesarily the same as Y however.\n clf.means_ = np.array([X_train[Y_train == i].mean(axis=0) for i in range(4)])\n # Fit X\n clf.fit(X_train)\n \n # Break up X into 4 based on the Y label\n x_0t = [ x for i,x in enumerate(X_train) if Y_train[i] == 0]\n x_90t = [ x for i,x in enumerate(X_train) if Y_train[i] == 1]\n x_180t = [ x for i,x in enumerate(X_train) if Y_train[i] == 2]\n x_270t = [ x for i,x in enumerate(X_train) if Y_train[i] == 3]\n \n # Matrix of known Y vs. prediction on the train set.\n mat = [ [ sum(clf.predict(x)==i) for i in [0,1,2,3] ] for x in [x_0t, x_90t, x_180t, x_270t] ]\n\n # Pick the max of each row. If clusters are good then there will be no collisions\n map0 = mat[0].index(max(mat[0]))\n map1 = mat[1].index(max(mat[1]))\n map2 = mat[2].index(max(mat[2]))\n map3 = mat[3].index(max(mat[3]))\n \n #Heavy handed way to make sure that mapping is collision free. If this assertion is false, try again, you probably just got unlucky. \n num_unique = len(set([map0, map1, map2, map3]))\n assert num_unique == 4, str(map0) + str(map1) + str(map2) + str(map3) + str(mat)\n \n # Transforms clf cluster prediction to expected Y label.\n def map_predict(X):\n # Make a dictionary\n d = { map0:0, map1:1, map2:2, map3:3 }\n \n # For each prediction, consult dictionary.\n return map(lambda z: d[z], clf.predict(X))\n \n \n # Use our mapped predictions instead of clf.predict\n test = map_predict(X_test) == Y_test\n train = map_predict(X_train) == Y_train\n \n # Little accuracy function. Should have done this sooner.\n accuracy = lambda X: 1.*sum(X)/len(X)\n \n # Print training and testing accuracy\n print \"train:\", accuracy(train), \"test:\", accuracy(test)\n \n # Return everything needed to run on a new testing set.\n return test, train, clf, lda, map_predict",
"def test_gaussian_mixture_num_components(n_mixture_components):\n # Set random seed\n set_random_seed_from_args(\n \"test_gaussian_mixture_num_components\",\n n_mixture_components,\n )\n # Initialise input arguments\n output_dim = 4\n n_train = np.random.randint(10, 20)\n n_test = np.random.randint(10, 20)\n input_dim = np.random.randint(2, 5)\n # Initialise data set\n classification_data = data.MixtureOfGaussians(\n input_dim=input_dim,\n output_dim=output_dim,\n n_train=n_train,\n n_test=n_test,\n n_mixture_components=n_mixture_components,\n )\n assert classification_data.train.x.shape == (input_dim, n_train)\n assert classification_data.test.x.shape == (input_dim, n_test)\n assert classification_data.train.labels.shape == (n_train, )\n assert classification_data.test.labels.shape == (n_test, )\n assert classification_data.train.y.shape == (output_dim, n_train)\n assert classification_data.test.y.shape == (output_dim, n_test)",
"def mixture_statistics_monte_carlo(self):\n return self.concentration_statistics_monte_carlo()",
"def getMixtureParams(self, y):\n if len(y.shape) == 1:\n # avoid underrun\n alpha = np.maximum(y[0:self.M], np.finfo(float).eps)\n sigma = y[self.M:2*self.M]\n mu = np.reshape(y[2*self.M:], [self.c, self.M]).T\n else:\n # avoid underrun\n alpha = np.maximum(y.T[0:self.M], np.finfo(float).eps)\n sigma = y.T[self.M:2*self.M]\n mu = np.reshape(y[:, 2*self.M:], [y.shape[0], self.c, self.M]).T\n return alpha, sigma, mu",
"def get_effect_size(self, summ, b, nmc=5000):\n m0b, v0b = self.DModel.models[0].predict(np.array([b])) \n m1b, v1b = self.DModel.models[1].predict(np.array([b]))\n \n d_mean_D = np.squeeze(m1b - m0b) # TODO: why was this swapped around?\n d_var_D = np.squeeze(v0b + v1b)\n d_std_D = np.sqrt(d_var_D)\n \n if d_mean_D < 0:\n pval = 1 - stats.norm.cdf(x=0, loc=d_mean_D, scale=d_std_D)\n else:\n pval = stats.norm.cdf(x=0, loc=d_mean_D, scale=d_std_D)\n \n xmin, xmax = (np.min([d_mean_D - 4*d_std_D, -0.1*d_std_D]), \n np.max([d_mean_D + 4*d_std_D, 0.1*d_std_D]))\n \n n = 300\n xrange = np.linspace(xmin, xmax, n)\n y = stats.norm.pdf(xrange, d_mean_D, d_std_D) \n \n samples = np.zeros((nmc))\n nspike = int(np.round(summ['pmp']['pmc']*nmc))\n samples[nspike:] = np.random.normal(loc=d_mean_D, \n scale=np.sqrt(d_var_D), \n size=(nmc-nspike))\n \n if not np.isscalar(b):\n d_bma = None\n else:\n \n if nspike==nmc:\n # BMA dominated by continuous model\n # Put all mass at xrange closest to b\n d_bma = np.zeros((n))\n xdelta = xrange[1] - xrange[0]\n ix = np.argmin((xrange-b)**2)\n d_bma[ix] = 1.0 / xdelta\n elif nspike==0:\n # BMA dominated by discontinuous model\n d_bma = y\n else:\n # BMA is a mixture\n kde_fit = stats.gaussian_kde(samples, \n bw_method='silverman')\n d_bma = kde_fit(xrange)\n \n return {'es_BMA': d_bma,\n 'es_Disc': y,\n 'es_disc_stats': (d_mean_D, d_std_D),\n 'pval': pval,\n 'es_range': xrange,\n 'f(b)': (m0b, m1b),\n 'es_transform': lambda z: z*d_std_D + d_mean_D}",
"def main(DATASET='campbell', N_AGE_MIX=1):\n files = glob(f'resources/SN*_{DATASET}_chain.tsv')\n N_SNE = len(files)\n # end = -11 - len(DATASET)\n # get the numbers after the SN.\n snids = map(lambda x: re.search('(?<=SN)\\d*', x).group(0), files)\n snids = list(map(int, snids))\n\n\n model = GaussianMixture(N_AGE_MIX)\n amplitudes = np.zeros((N_SNE, N_AGE_MIX))\n means = np.zeros((N_SNE, N_AGE_MIX))\n stds = np.zeros((N_SNE, N_AGE_MIX))\n\n print(f'Fitting ages to {N_AGE_MIX} Gaussians')\n pdf = PdfPages(f'resources/age_{DATASET}_{N_AGE_MIX}gaus_representation_preview.pdf')\n\n for i, f in enumerate(files):\n data = np.genfromtxt(f, delimiter='\\t')\n data = data[:, 7]\n\n model.fit(np.expand_dims(data, 1))\n\n amplitudes[i] = model.weights_.reshape(N_AGE_MIX)\n means[i] = model.means_.reshape(N_AGE_MIX)\n stds[i] = np.sqrt(model.covariances_).reshape(N_AGE_MIX)\n\n plt.figure()\n plt.hist(data, bins=np.linspace(-5, 20, 200))\n plt.hist(model.sample(1020000)[0], alpha=0.5, bins=np.linspace(-5, 20, 200))\n plt.title(f)\n \n pdf.savefig()\n plt.close()\n\n if (i+1)%10 == 0:\n print(f'Finished with the {i+1}th age fit')\n\n pdf.close()\n\n # if DATASET != 'both':\n ages = np.column_stack((snids, amplitudes, means, stds))\n # todo update the header to match the number of Gaussians used.\n np.savetxt(f'resources/age_{DATASET}_{N_AGE_MIX}gaus_representation.csv', ages, delimiter=',',\n header='sn id, amp_1, amp_2, amp_3, mean_1, mean_2, mean_2, std_1, std_2, std_3')\n \n print(f'Done with {N_AGE_MIX} Gaussian mixture for {DATASET}.')",
"def test_gaussian_em():\n fname = \"gmm-3-10-0.7.npz\"\n gmm = GaussianMixtureModel.generate( fname, 3, 3 )\n k, d, M, S, w = gmm.k, gmm.d, gmm.means, gmm.sigmas, gmm.weights\n N, n = 1e6, 1e5\n\n\n X = gmm.sample( N, n )\n\n algo = GaussianMixtureEM(k, d)\n\n def report( i, O_, lhood ):\n M_, _, _ = O_\n lhood, Z, O_ = algo.run( X, None, report )\n\n M_, S_, w_ = O_\n\n M_ = closest_permuted_matrix( M, M_ )\n w_ = closest_permuted_vector( w, w_ )\n\n print w, w_\n\n print norm( M - M_ )/norm(M)\n print abs(S - S_).max()\n print norm( w - w_ ) \n\n assert( norm( M - M_ )/norm(M) < 1e-1 )\n assert (abs(S - S_) < 1 ).all()\n assert( norm( w - w_ ) < 1e-2 )",
"def gen_mixture():\n npr.seed(0)\n num_exp = int(1e4)\n x_dim = 2\n z_dim = 2\n mu1 = [5, 5,]\n mu2 = [-5, -5]\n theta = np.array([[2,1],[-1,-2]])\n sigma = 0.1\n u = npr.uniform((num_exp,))\n z = np.zeros((num_exp, z_dim))\n cov = np.zeros((z_dim, z_dim))\n np.fill_diagonal(cov, 1)\n sz = int(num_exp/2)\n z[:sz, ]= npr.multivariate_normal(mu1, cov,sz)\n z[sz:, ] = npr.multivariate_normal(mu2,cov,sz)\n mu_x = [email protected]()\n\n x = np.zeros((num_exp, x_dim))\n for i in range(num_exp):\n x[i,:] = npr.multivariate_normal(mu_x[:,i], sigma*cov)\n print(x.shape)\n np.save('data/syn_mixture.npy', x)",
"def calculate_mixture_features(args):\n workspace = args.workspace\n speech_dir = args.speech_dir\n noise_dir = args.noise_dir\n data_type = args.data_type\n fs = cfg.sample_rate\n dir_name = args.dir_name\n\n fid_clean = open(speech_dir, 'r')\n lines_clean = fid_clean.readlines()\n fid_clean.close()\n\n fid_reverb = open(noise_dir, 'r')\n lines_reverb = fid_reverb.readlines()\n fid_reverb.close()\n\n for files_clean, files_reverb in zip(lines_clean, lines_reverb):\n\n files_clean = files_clean.strip('\\n')\n files_reverb = files_reverb.strip('\\n')\n\n fid = open(files_clean,'r')\n wavLines_clean = fid.readlines()\n fid.close()\n fid = open(files_reverb,'r')\n wavLines_reverb = fid.readlines()\n fid.close()\n\n cnt = 0 \n\n for wavs_clean, wavs_reverb in zip(wavLines_clean, wavLines_reverb):\n \n t1 = time.time()\n # cnt = 0\n\n wav_name_clean, wav_path_clean = wavs_clean.split()\n wav_name_reverb, wav_path_reverb = wavs_reverb.split()\n \n # Read clean speech audio. \n (speech_audio, _) = read_audio(wav_path_clean, target_fs=fs)\n \n # Read reverb speech audio. \n (noise_audio, _) = read_audio(wav_path_reverb, target_fs=fs)\n \n # Cut reverb speech to the same length as clean speech. \n if len(noise_audio) > len(speech_audio):\n noise_audio = noise_audio[0: len(speech_audio)]\n \n # Extract spectrogram. \n mixed_complx_x = calc_sp(noise_audio, mode='complex')\n speech_x = calc_sp(speech_audio, mode='magnitude')\n\n # Write out features. \n out_feat_path = os.path.join(workspace, \"features\", \"spectrogram\", \n data_type, dir_name, \"%s.p\" % wav_name_reverb)\n create_folder(os.path.dirname(out_feat_path))\n data = [mixed_complx_x, speech_x, wav_name_reverb]\n pickle.dump(data, open(out_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n \n # Print. \n if cnt % 100 == 0:\n print(cnt)\n # print(mixed_complx_x)\n # print(speech_x)\n \n cnt += 1\n\n print(\"Extracting feature time: %s\" % (time.time() - t1))",
"def mixture_vMF_density(x, mu_list, k_list):\n return_value = 0\n \n nr_mixtures = len(mu_list)\n \n for mu, k in zip(mu_list,k_list):\n \n Z = 2 * np.pi * ( np.exp(k) - np.exp(- k) ) / k\n \n return_value += 1 / Z * np.exp( k * np.dot(x, mu) )\n \n return return_value / nr_mixtures",
"def calc_stats(act):\n act = act.view(act.shape[0], -1).cpu().numpy()\n mu = np.mean(act, axis=0)\n sigma = np.cov(act, rowvar=False)\n return mu, sigma",
"def h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed, use_1sample_U=True):\r\n Kxxy = torch.cat((Kx,Kxy),1)\r\n Kyxy = torch.cat((Kxy.transpose(0,1),Ky),1)\r\n Kxyxy = torch.cat((Kxxy,Kyxy),0)\r\n nx = Kx.shape[0]\r\n ny = Ky.shape[0]\r\n is_unbiased = True\r\n if is_unbiased:\r\n xx = torch.div((torch.sum(Kx) - torch.sum(torch.diag(Kx))), (nx * (nx - 1)))\r\n yy = torch.div((torch.sum(Ky) - torch.sum(torch.diag(Ky))), (ny * (ny - 1)))\r\n # one-sample U-statistic.\r\n if use_1sample_U:\r\n xy = torch.div((torch.sum(Kxy) - torch.sum(torch.diag(Kxy))), (nx * (ny - 1)))\r\n else:\r\n xy = torch.div(torch.sum(Kxy), (nx * ny))\r\n mmd2 = xx - 2 * xy + yy\r\n else:\r\n xx = torch.div((torch.sum(Kx)), (nx * nx))\r\n yy = torch.div((torch.sum(Ky)), (ny * ny))\r\n # one-sample U-statistic.\r\n if use_1sample_U:\r\n xy = torch.div((torch.sum(Kxy)), (nx * ny))\r\n else:\r\n xy = torch.div(torch.sum(Kxy), (nx * ny))\r\n mmd2 = xx - 2 * xy + yy\r\n if not is_var_computed:\r\n return mmd2, None, Kxyxy\r\n hh = Kx+Ky-Kxy-Kxy.transpose(0,1)\r\n V1 = torch.dot(hh.sum(1)/ny,hh.sum(1)/ny) / ny\r\n V2 = (hh).sum() / (nx) / nx\r\n varEst = 4*(V1 - V2**2)\r\n if varEst == 0.0:\r\n print('error_var!!'+str(V1))\r\n return mmd2, varEst, Kxyxy",
"def __init__(self, quantity, dist_weights, gauss_params, upper_bound, lower_bound):\n self.dist_weights = dist_weights\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n if len(self.dist_weights) != len(gauss_params):\n print(\n \"Number of distribution weights do not match number of distributions!\"\n )\n diff = len(gauss_params) - len(dist_weights)\n if diff < 0:\n print(\"Ignoring trailing distribution weights\")\n self.dist_weights = self.dist_weights[: len(dist_weights) + diff]\n else:\n print(\"Assuming default weights of 1\")\n self.dist_weights.extend([1] * diff)\n # normalize weights\n self.dist_weights = np.array(\n [float(i) / sum(self.dist_weights) for i in self.dist_weights]\n )\n # create samples\n self.samples = []\n self.gauss_params = gauss_params\n sample_size = quantity\n self.sample_min, self.sample_max = [float(\"inf\"), -float(\"inf\")]\n while True:\n # determine the gaussian to sample from for each sample\n mixture_idx = np.random.choice(\n len(self.dist_weights),\n size=sample_size,\n replace=True,\n p=self.dist_weights,\n )\n # create the samples from the respective gaussian\n temp = np.fromiter(\n (ss.norm.rvs(*(gauss_params[i])) for i in mixture_idx), dtype=np.float64\n )\n # remember mixed sampled extremas for plotting\n self.sample_min = min(self.sample_min, temp.min())\n self.sample_max = max(self.sample_max, temp.max())\n # add those samples that are within the bounds\n self.samples = np.concatenate(\n [\n self.samples,\n np.fromiter(\n [x for x in temp if x <= upper_bound and x >= lower_bound],\n dtype=np.float64,\n ),\n ]\n )\n sample_size = quantity - len(self.samples)\n if sample_size == 0:\n break",
"def test_5_scalar_variance_1step(self):\n print(\"test 5 comparing variances\")\n\n means, vars, cl_probs = EM_step(\n self.X_h, self.means_h, self.dispersions_h, self.cluster_probabilities_h\n )\n\n self.assertEqual(means.shape[0], 2)\n\n print(vars[0], vars[1])",
"def train_and_score_model(\n dataset: np.ndarray\n ) -> Tuple[sklearn.mixture.GaussianMixture, float]:\n model = create_model()\n\n ### Fit model ###\n logger.info(\"Fitting GaussianMixture\")\n model.fit(dataset)\n\n ### Score model ###\n score = model.score(dataset)\n logger.info(f\"GaussianMixture trained. Score: {score}\")\n\n return model, score",
"def get_mixture(data, components):\n from jcvi.apps.base import popen\n\n probs, mus, sigmas = [], [], []\n fw = must_open(\"tmp\", \"w\")\n log_data = [log(x) for x in data if x > .05]\n data = \"\\n\".join([\"%.4f\" % x for x in log_data]).replace(\"inf\\n\", \"\")\n fw.write(data)\n fw.close()\n\n cmd = \"gmm-bic {0} {1} {2}\".format(components, len(log_data), fw.name)\n pipe = popen(cmd)\n\n for row in pipe:\n if row[0] != '#':\n continue\n\n atoms = row.split(\",\")\n a, b, c = atoms[1:4]\n a = float(a)\n b = float(b)\n c = float(c)\n\n mus.append(a)\n sigmas.append(b)\n probs.append(c)\n\n os.remove(fw.name)\n return probs, mus, sigmas",
"def mixing_ratio(ds, var):\n ds[var['mix_ratio']] = 0.5 * (1 - np.sqrt(1 - 4 * ds[var['spec_h']]))\n return ds",
"def main(fname, N, n, params):\n\n gmm = GaussianMixtureModel.from_file( fname )\n k, d, M, S, w = gmm.k, gmm.d, gmm.means, gmm.sigmas, gmm.weights\n\n X = gmm.sample( N, n )\n\n # Set seed for the algorithm\n sc.random.seed( int( params.seed ) )\n\n algo = GaussianMixtureEM( k, d )\n\n O = M, S, w\n def report( i, O_, lhood ):\n M_, _, _ = O_\n lhood, Z, O_ = algo.run( X, None, report )\n\n M_, S_, w_ = O_\n M_ = closest_permuted_matrix( M.T, M_.T ).T\n\n # Table\n print column_aerr( M, M_ ), column_rerr( M, M_ )",
"def test_Gaussian_NB_estimators():",
"def __init__(self, n_components, covariance_type=\"full\", eps=1.e-6, init_params=\"kmeans\", mu_init=None,\n var_init=None, n_init=2, max_iter=1000, tol=1e-3, random_state=None):\n super(GaussianMixture, self).__init__()\n\n self.n_init = n_init\n self.max_iter = max_iter\n self.tol = tol\n self.random_state = random_state\n\n self.n_components = n_components\n\n self.mu_init = mu_init\n self.var_init = var_init\n self.eps = eps\n\n self.log_likelihood = -np.inf\n\n self.covariance_type = covariance_type\n self.init_params = init_params\n\n assert self.covariance_type in [\"full\", \"diag\"]\n assert self.init_params in [\"kmeans\", \"random\"]",
"def estimate(self):\n mu = self.mean()\n var = np.average((self.particles - mu) ** 2, weights=self.weights, axis=0)\n\n return mu, var",
"def main( prefix, N, n, delta, params ):\n gmm = GaussianMixtureModel.from_file( prefix )\n k, d, M, w = gmm.k, gmm.d, gmm.means, gmm.weights\n logger.add( \"M\", M )\n logger.add_consts( \"M\", M, k, 2 )\n logger.add( \"w_min\", w.min() )\n logger.add( \"w_max\", w.max() )\n\n X = gmm.sample( N, n )\n logger.add( \"k\", k )\n logger.add( \"d\", d )\n logger.add( \"n\", n )\n\n # Set seed for the algorithm\n sc.random.seed( int( params.seed ) )\n logger.add( \"seed\", int( params.seed ) )\n\n P, T = sample_moments( X, k )\n Pe, Te = exact_moments( M, w )\n\n start = time.time()\n M_ = recover_components( k, P, T, Pe, Te, delta = delta )\n stop = time.time()\n logger.add( \"time\", stop - start )\n\n M_ = closest_permuted_matrix( M.T, M_.T ).T\n logger.add( \"M_\", M )\n\n # Error data\n logger.add_err( \"M\", M, M_ )\n logger.add_err( \"M\", M, M_, 'col' )\n\n print column_aerr(M, M_), column_rerr(M, M_)",
"def gamma_mixture_distillation_profile(crude1, crude2, vol1, vol2):\n assert vol1 >= 0 and vol2 >= 0, \\\n \"Specified volumes 'vol1' and 'vol2' must be positive.\"\n\n # fit gamma CDF to each crude\n fig, ax = plt.subplots(1, 3, figsize=(20, 6))\n try:\n fit_params1, cov_matrix1, fit_vals1, ax1 = gamma_fit(crude1, ax[0])\n except TypeError:\n return\n ax[0].set_title(f\"Crude 1 : {crude1} (Vol={vol1})\", size=16)\n\n try:\n fit_params2, cov_matrix2, fit_vals2, ax2 = gamma_fit(crude2, ax[1])\n except TypeError:\n return\n ax[1].set_title(f\"Crude 2 : {crude2} (Vol={vol2})\", size=16)\n\n # create gamma mixture model using volumes and CDFs\n total_vol = vol1 + vol2\n temps = np.linspace(0, 1500, 1500)\n mixture_model = ((vol1 / total_vol)*scipy.stats.gamma(a=fit_params1[0],\n scale=fit_params1[1])\n .cdf(temps)) + \\\n ((vol2 / total_vol)*scipy.stats.gamma(a=fit_params2[0],\n scale=fit_params2[1])\n .cdf(temps))\n\n # plot results\n ax[2].plot(temps, mixture_model, color=\"red\")\n ax[2].set_xlabel(\"Temperature (oC)\", size=16)\n ax[2].set_ylabel(\"Mass % Recovered\", size=16)\n ax[2].tick_params(labelsize=16)\n ax[2].annotate(f\"Gamma mixture model: \\n\" +\n \"----------------------------------\\n\" +\n f\"{vol1 / total_vol :.2f} x F({crude1}) + \" +\n f\"\\n{vol2 / total_vol :.2f} x F({crude2})\",\n xy=(500, 0),\n size=16,\n horizontalalignment = \"left\")\n ax[2].set_title(\"Crude Mixture Distillation Profile\", size=16)\n ax[2].set_ylim(-0.05, 1)\n\n # determine temperature values at relevant remaining mass percentages\n distilliation_percentages = [0.05] + \\\n [0.1*x for x in range(1, 10)] + \\\n [0.95, 0.99]\n temps = [np.absolute(mixture_model - x).argmin()-1\n for x in distilliation_percentages]\n\n mix_dist_df = pd.DataFrame({\"Mass % Recovered\": distilliation_percentages,\n \"Temperature (oC)\": temps})\n\n return mix_dist_df, fig"
] | [
"0.7492281",
"0.6477891",
"0.639532",
"0.63856214",
"0.62459314",
"0.6159641",
"0.6154703",
"0.6007116",
"0.5989447",
"0.5913637",
"0.58870196",
"0.5853389",
"0.58513975",
"0.5809787",
"0.57763517",
"0.5753387",
"0.57354677",
"0.5716687",
"0.570656",
"0.56891644",
"0.56604356",
"0.5655337",
"0.56415045",
"0.5624899",
"0.5610214",
"0.56095225",
"0.5599827",
"0.5582275",
"0.5580228",
"0.55605316"
] | 0.6841529 | 1 |
gets the entropy in the mixture distribution using brute force | def mixture_entropy_brute_force(self):
Z, sum_wlogw = 0, 0
# Naive implementation of measuring the entropy is
# p(c) = w(c) / Z with Z = sum_c w(c)
# H_c = -sum_c p(c) * log2(p(c))
# This can be transformed to a more stable implementation:
# H_c = log2(Z) - 1/Z * sum_c w(c) * log2(w(c))
for _, weight_c in self._iterate_mixtures():
if weight_c > 0:
Z += weight_c
sum_wlogw += weight_c * np.log2(weight_c)
if Z == 0:
return 0
else:
return np.log2(Z) - sum_wlogw / Z | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def entropy(temp,pres):\n g_t = liq_g(1,0,temp,pres)\n s = -g_t\n return s",
"def calc_entropy(data_set): #calculates total entropy of the dataset\r\n republicans = 0\r\n democrats = 0\r\n total = 0\r\n for data_point in data_set:\r\n party = data_point.dat_party\r\n if party == \"R\":\r\n republicans+=1\r\n elif party == \"D\":\r\n democrats+=1\r\n total+=1\r\n\r\n if total == 0: return 0\r\n prob_dem = democrats/total\r\n prob_rep = republicans/total\r\n if prob_dem == 0: return -(prob_rep * math.log(prob_rep, 2))\r\n if prob_rep == 0: return -(prob_dem * math.log(prob_dem, 2))\r\n\r\n entropy = (-prob_dem * math.log(prob_dem, 2)) -(prob_rep * math.log(prob_rep, 2))\r\n return entropy",
"def entropy(self):\n raise NotImplementedError",
"def mixture_entropy(self):\n \n mixture_size = self.parameters['fixed_mixture_size']\n \n if self.is_correlated_mixture or mixture_size is not None:\n # complicated case => run brute force or monte carlo\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n return self.mixture_entropy_brute_force()\n else:\n return self.mixture_entropy_monte_carlo()\n \n else:\n # simple case => calculate explicitly\n return super(LibraryBinaryNumeric, self).mixture_entropy()",
"def entropy(self):\n Z = self.sum()\n assert (Z > 0), 'Non-normalizable factor (perhaps log factor?)' # also check for positivity?\n H = 0.0\n for x in np.nditer(self.t, op_flags=['readonly']):\n p = x/Z\n H += 0.0 if p==0 else -p*np.log(p)\n return H",
"def entropy(d, total, word_count):\n\t# Entropie je - Sum_morf p(morf) * log_2 p(morf)\n\t# p(morf) = c(morf) / c(all)\n\te = 0\n\tfor count in d.values():\n\t\tp = count/total\n\t\ttype_e = - p * log2(p)\n\t\te += type_e * count\n\treturn e / word_count",
"def entropy(group_counts):\n total = sum(group_counts)\n entro = 0\n for item_count in group_counts:\n entro += item_entropy(item_count, total)\n return entro",
"def entropyDistributed(distribution):\n return -sum(map(lambda p : p * log(p, 2), distribution))",
"def entropy(data):\n e = 0\n\n counter = collections.Counter(data)\n l = len(data)\n for count in counter.values():\n p_x = count / l\n e += - p_x * math.log2(p_x)\n\n return e",
"def mixture_entropy_monte_carlo(self):\n if self.Ns > 63:\n raise ValueError('Mixture entropy estimation only works for fewer '\n 'than 64 substrates.')\n \n # sample mixtures\n base = 2 ** np.arange(0, self.Ns)\n observations = collections.Counter()\n for c in self._sample_mixtures():\n observations[np.dot(c, base)] += 1\n \n # estimate entropy from the histogram\n counts = np.fromiter(observations.values(), np.double,\n len(observations))\n \n # Naive implementation of measuring the entropy is\n # ps = counts / self._sample_steps\n # H = -np.sum(ps * np.log2(ps))\n # This can be transformed to a more stable implementation:\n log_steps = np.log2(self._sample_steps)\n return -np.sum(counts*(np.log2(counts) - log_steps))/self._sample_steps",
"def entropy(data):\n\n freqs = {}\n suma = len(data)\n\n for i in range(0, len(data)):\n freqs[data[i]] = 1.0 + freqs.get(data[i], 0)\n\n res = 0.0\n for i in freqs:\n res += (freqs[i] / suma) * log((freqs[i] / suma), 2)\n return -res",
"def entropy(strength=256, wordlist=wordlist):\n return os.urandom(strength // 8)",
"def entropy(data):\n strings, lens = Counter(data), np.float(len(data))\n return -sum(count / lens * np.log2(count / lens) for count in strings.values())",
"def entropy( freq ):\n N = 0.0\n entropy = 0.0\n for x, v in freq.items( ):\n N += v\n entropy -= v * math.log( v, 2 )\n return (N * math.log( N, 2 ) + entropy) / N",
"def entropy(dist):\n #dist = array([max(d,1e-100) for d in dist])\n dist = dist + 1e-20\n return dot(dist,(log(1.0/dist) * (1.0/log(2.0))).T)",
"def div(self):\n freqList = [i / sum(self.has.values()) for i in self.has.values()]\n entropies = [i * math.log(i, 2) for i in freqList]\n entropy = -sum(entropies)\n return entropy",
"def calculate_entropy():\n\tstat = {} # dictionary - chars and number of repetitions\n\tallchar = 0.0 # total number of characters\n\tentropy = 0.0 # initial entropy\n\n\tfor line in sys.stdin.readlines():\n\t\tline = re.sub(r'\\s', '', line)\n\t\tfor znak in line:\n\t\t\tif znak in stat:\n\t\t\t\tstat[znak] += 1\n\t\t\telse:\n\t\t\t\tstat[znak] = 1\n\t\t\tallchar += 1\n\n\tfor znak in stat:\n\t\tstat[znak] = stat[znak]/allchar\n\t\tentropy += stat[znak] * log(stat[znak], 2)\n\n\tentropy *= -1\n\treturn entropy",
"def entropy(Y):\n\n temp = np.unique(Y, return_counts=True)\n uniq_Y = list(temp[0])\n Y_count = list(temp[1])\n \n total = sum(Y_count)\n\n ent = 0\n for elem in uniq_Y:\n prob = Y_count[uniq_Y.index(elem)] / total\n # print(\"prob:\", prob)\n ent -= (prob * (math.log2(prob)))\n # print(\"ent:\",ent)\n\n return ent",
"def entropy(s):\n p, lns = Counter(s), float(len(s))\n return -sum( count/lns * math.log(count/lns, 2) for count in p.values())",
"def _entropy(data):\n hist = np.array(PIL.Image.fromarray(data).histogram())\n hist = hist / hist.sum()\n hist = hist[hist != 0]\n return -np.sum(hist * np.log2(hist))",
"def entropy(img):\n # by calculating\n histogram = img.histogram()\n histogram_size = sum(histogram)\n histogram = [float(h) / histogram_size for h in histogram]\n\n return -sum([p * math.log(p, 2) for p in histogram if p != 0])",
"def entropy(self):\n ent = 0.0\n for f in self.byte_freq:\n if f > 0:\n freq = float(f) / self.byte_total\n ent = ent + freq * math.log(freq, 2)\n return -ent",
"def GetEntropy(flanks):\n countA = 0\n countT = 0\n countG = 0\n countC = 0\n for i in flanks:\n if i == \"A\":\n countA += 1\n elif i == \"T\":\n countT += 1\n elif i == \"C\":\n countC += 1\n elif i == \"G\":\n countG += 1\n else: pass\n total = countA+countT+countG+countC\n fractions = [item*1.0/total for item in [countA,countT,countG,countC]]\n entropy = sum([-1.0*item*math.log(item,2) for item in fractions if item != 0])\n return entropy",
"def entropy(self):\n\n \"\"\"Gets the first neighbours, which are the first 2*r+1 cells.\"\"\"\n current_neighbours = []\n amount = [0] * self.k ** (2 * self.r + 1)\n for i in range(2 * self.r + 1):\n current_neighbours.append(self.config[self.t, i % self.width])\n\n \"\"\"Calculates the rule and adds one to it's amount. It then removes the\n leftmost cell and adds a cell to the right.\"\"\"\n for i in range(len(self.config[self.t]) - 1):\n rule = 0\n for j in range(len(current_neighbours)):\n rule += int(current_neighbours[j] *\n self.k ** ((2 * self.r + 1) - j - 1))\n amount[len(amount) - 1 - rule] += 1\n current_neighbours.pop(0)\n current_neighbours.append(\n self.config[self.t, (2 * self.r + 1 + i) % self.width])\n\n \"\"\"Calculates the rule for the last neighbourhood.\"\"\"\n rule = 0\n for j in range(len(current_neighbours)):\n rule += int(current_neighbours[j] *\n self.k ** ((2 * self.r + 1) - j - 1))\n amount[len(amount)-1 - rule] += 1\n\n \"\"\"Calculates the Shannon entropy and the the average entropy so far.\"\"\"\n shannon = 0\n for i in range(len(amount)):\n if(amount[i] != 0):\n probability = amount[i] / self.width\n shannon -= probability * np.log2(probability)\n self.average_entropy = (self.average_entropy *\n self.t + shannon) / (self.t + 1)",
"def entropyRandom(stream):\n prob = 1.0 / len(stream)\n return -(prob * log(prob, 2)) * len(stream)",
"def entropy(self, text):\n\n# text = self.myReplacer.replace(text)\n# text = self.tokenizer.tokenize(text)\n new_text = []\n for word in text:\n if word.count('\\'') > 0:\n words = word.split('\\'')\n for w in words:\n new_text.append(w)\n else:\n new_text.append(word)\n text = new_text\n \n e = 0.0\n lenth = len(text)\n if lenth == 0:\n return 0\n elif lenth < self._n:\n current_n = lenth\n else:\n current_n = self._n\n \n for i in range(current_n - 1, len(text)):\n context = tuple(text[(i - current_n + 1) : i])\n token = text[i]\n e += self.logprob(token, context)\n return e",
"def entropy(self):\n n = len(self.y)\n sum_ = 0\n for i in np.unique(self.y):\n v = len(self.y[self.y == i])\n sum_ += -((v/n) * log2(v/n))\n return sum_",
"def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')",
"def entropy(n_bits):\n return n_bits and random.getrandbits(n_bits)",
"def entropy(y):\n EPS = 0.0005\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return - np.sum(pk * np.log(pk + EPS))"
] | [
"0.69703406",
"0.6953341",
"0.69284886",
"0.69082105",
"0.687003",
"0.6861443",
"0.6851223",
"0.6819869",
"0.6818582",
"0.68088657",
"0.677869",
"0.67508334",
"0.66926396",
"0.66545224",
"0.6642097",
"0.66395456",
"0.6627271",
"0.6620591",
"0.6618552",
"0.66109055",
"0.6587405",
"0.6585056",
"0.6547002",
"0.6542134",
"0.6509462",
"0.648532",
"0.648438",
"0.6470614",
"0.6468062",
"0.64449763"
] | 0.7468018 | 0 |
estimates the average activity of the receptor as a response to single ligands. `ret_receptor_activity` determines whether the mean receptor activity will also be returned. `approx_prob` determines whether the probabilities of encountering ligands in mixtures are calculated exactly or only approximative, which should work for small probabilities. `clip` determines whether the estimates will be forced to be in [0, 1]. `ignore_correlations` determines whether correlations in the mixtures will be ignored or not. | def receptor_crosstalk_estimate(self, ret_receptor_activity=False,
approx_prob=False, clip=False,
ignore_correlations=False):
if not ignore_correlations and self.is_correlated_mixture:
r_n, r_nm = self.receptor_activity_estimate(ret_correlations=True,
approx_prob=approx_prob,
clip=clip)
q_nm = r_nm - np.outer(r_n, r_n)
if clip:
np.clip(q_nm, 0, 1, q_nm)
if ret_receptor_activity:
return r_n, q_nm
else:
return q_nm
raise NotImplementedError('Not implemented for correlated mixtures')
S_ni = self.sens_mat
p_i = self.substrate_probabilities
if approx_prob:
# approximate calculation for small p_i
q_nm = np.einsum('ni,mi,i->nm', S_ni, S_ni, p_i)
if clip:
np.clip(q_nm, 0, 1, q_nm)
else:
# proper calculation of the probabilities
S_ni_mask = S_ni.astype(np.bool)
q_nm = np.zeros((self.Nr, self.Nr))
for n in range(self.Nr):
for m in range(self.Nr):
mask = S_ni_mask[n, :] * S_ni_mask[m, :]
q_nm[n, m] = 1 - np.product(1 - p_i[mask])
if ret_receptor_activity:
q_n = self.receptor_activity_estimate(approx_prob=approx_prob,
clip=clip)
return q_n, q_nm
else:
return q_nm | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def receptor_activity_estimate(self, ret_correlations=False,\n approx_prob=False, clip=False):\n S_ni = self.sens_mat\n p_i = self.substrate_probabilities\n\n # calculate receptor activity assuming uncorrelated mixtures \n if approx_prob:\n # approximate calculation for small p_i\n r_n = np.dot(S_ni, p_i)\n if clip:\n np.clip(r_n, 0, 1, r_n)\n \n else:\n # proper calculation of the probabilities\n r_n = np.zeros(self.Nr)\n S_ni_mask = S_ni.astype(np.bool)\n for n in range(self.Nr):\n r_n[n] = 1 - np.product(1 - p_i[S_ni_mask[n, :]])\n \n if self.is_correlated_mixture:\n # add linear correction term for correlated mixtures\n J_ij = self.correlations\n p_ni = p_i[None, :] * (1 - S_ni)\n \n corr1 = 1 + np.einsum('ij,ni,nj->n', J_ij, p_ni, p_ni)\n corr2 = 1 + np.einsum('ij,i,j->', J_ij, p_i, p_i)\n \n barr_n_0 = 1 - r_n\n barr_n = barr_n_0 * (1 + corr1 - corr2)\n r_n = 1 - barr_n\n if clip:\n np.clip(r_n, 0, 1, r_n)\n \n if ret_correlations:\n # estimate the correlations from the estimated crosstalk\n q_nm = self.receptor_crosstalk_estimate(approx_prob=approx_prob,\n ignore_correlations=True)\n \n if approx_prob:\n r_nm = np.outer(r_n, r_n) + q_nm\n else:\n r_nm = 1 - (1 - q_nm)*(1 - np.outer(r_n, r_n))\n \n if self.is_correlated_mixture:\n # add one correction term for correlated mixtures\n p_nmi = np.einsum('i,ni,mi->nmi', p_i, 1 - S_ni, 1 - S_ni)\n \n corr1 = 1 + np.einsum('ij,nmi,nmj->nm', J_ij, p_nmi, p_nmi)\n # corr2 = 1 + np.einsum('ij,i,j->', J_ij, p_i, p_i)\n # this term has already been calculated above and can be reused\n\n # convert r_nm_0 (here given as r_nm) into barr_nm_0\n barr_nm_0 = 1 - barr_n_0[:, None] - barr_n_0[None, :] + r_nm\n \n # correct barr_nm for the correlations J_ij\n barr_nm = barr_nm_0 * (1 + corr1 - corr2)\n\n # convert barr_nm into r_nm\n r_nm = 1 - barr_n[:, None] - barr_n[None, :] + barr_nm\n \n if clip:\n np.clip(r_nm, 0, 1, r_nm)\n \n return r_n, r_nm\n \n else:\n return r_n",
"def receptor_activity_brute_force(self, ret_correlations=False):\n S_ni = self.sens_mat\n Z = 0\n r_n = np.zeros(self.Nr)\n if ret_correlations:\n r_nm = np.zeros((self.Nr, self.Nr))\n \n # iterate over all mixtures\n for c, prob_c in self._iterate_mixtures():\n # get the activity vector associated with m\n a_n = (np.dot(S_ni, c) >= 1)\n Z += prob_c\n\n r_n[a_n] += prob_c\n if ret_correlations:\n r_nm[np.outer(a_n, a_n)] += prob_c\n \n # return the normalized output\n r_n /= Z\n if ret_correlations:\n r_nm /= Z\n return r_n, r_nm\n else:\n return r_n",
"def actor_loss(self, buffer: VpgBuffer):\n\n d = buffer.get_data()\n weights = d[\"traj_rewards\"]\n if self.method == \"togo\":\n weights = d[\"togo_rewards\"]\n if self.method == \"value baseline\":\n value_est = self.value_estimator.forward(d[\"observations\"])\n weights = d[\"togo_rewards\"] - value_est\n\n self.update_value_estimator(value_est, d[\"togo_rewards\"])\n\n # Create 1-hot mask in shape of actions (num steps, num actions)\n action_mask = F.one_hot(d[\"actions\"].long(), self.num_actions)\n\n # Use mask to find probabilities of actions taken\n masked_probs = torch.sum(action_mask.float() * d[\"log_probs\"], dim=1)\n\n return - torch.mean(weights * masked_probs)",
"def process_experience_for_neural_agents(\n experience: types.NestedTensor,\n accepts_per_arm_features: bool,\n training_data_spec: types.NestedTensorSpec,\n) -> Tuple[types.NestedTensor, types.Tensor, types.Tensor]:\n flattened_experience, _ = nest_utils.flatten_multi_batched_nested_tensors(\n experience, training_data_spec\n )\n\n observation = flattened_experience.observation\n action = flattened_experience.action\n reward = flattened_experience.reward\n\n if not accepts_per_arm_features:\n return observation, action, reward\n\n # The arm observation we train on needs to be copied from the respective\n # policy info field to the per arm observation field. Pretending there was\n # only one action, we fill the action field with zeros.\n chosen_arm_features = flattened_experience.policy_info.chosen_arm_features\n observation[bandit_spec_utils.PER_ARM_FEATURE_KEY] = tf.nest.map_structure(\n lambda t: tf.expand_dims(t, axis=1), chosen_arm_features\n )\n action = tf.zeros_like(action)\n if bandit_spec_utils.NUM_ACTIONS_FEATURE_KEY in observation:\n # This change is not crucial but since in training there will be only one\n # action per sample, it's good to follow the convention that the feature\n # value for `num_actions` be less than or equal to the maximum available\n # number of actions.\n observation[bandit_spec_utils.NUM_ACTIONS_FEATURE_KEY] = tf.ones_like(\n observation[bandit_spec_utils.NUM_ACTIONS_FEATURE_KEY]\n )\n\n return observation, action, reward",
"def receptor_activity(self, method='auto', ret_correlations=False, **kwargs):\n if method == 'auto':\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n method = 'brute_force'\n else:\n method = 'monte_carlo'\n \n if method == 'brute_force' or method == 'brute-force':\n return self.receptor_activity_brute_force(ret_correlations, **kwargs)\n elif method == 'monte_carlo' or method == 'monte-carlo':\n return self.receptor_activity_monte_carlo(ret_correlations, **kwargs)\n elif method == 'estimate':\n return self.receptor_activity_estimate(ret_correlations, **kwargs)\n else:\n raise ValueError('Unknown method `%s`.' % method)",
"def get_average_percision_(qres, ibs=None, gt_aids=None):\n recall_range_, p_interp_curve = get_interpolated_precision_vs_recall_(qres, ibs=ibs, gt_aids=gt_aids)\n\n if recall_range_ is None:\n ave_p = np.nan\n else:\n ave_p = p_interp_curve.sum() / p_interp_curve.size\n\n return ave_p",
"def mr_effect_estimate(phenotypes, outcome, exposure, n_iter=1000,\n y_g_test=\"linear\", x_g_test=\"linear\"):\n def _estimate_beta(phen):\n # Regress big_gamma = Y ~ G\n stats = regress(\"{} ~ grs\".format(outcome), y_g_test, phen)\n big_gamma = stats[\"beta\"]\n\n # Regress small_gamma = X ~ G\n stats = regress(\"{} ~ grs\".format(exposure), x_g_test, phen)\n small_gamma = stats[\"beta\"]\n\n # Ratio estimate is beta = big_gamma / small_gamma\n return big_gamma / small_gamma\n\n # Using the percentile method to compute a confidence interval.\n df = phenotypes._phenotypes\n beta = _estimate_beta(phenotypes)\n\n betas = np.empty(n_iter, dtype=float)\n n = phenotypes.get_nb_samples()\n for i in range(n_iter):\n idx = np.random.choice(n, size=n, replace=True)\n phenotypes._phenotypes = df.iloc[idx, :]\n betas[i] = _estimate_beta(phenotypes)\n\n # Find the critical values\n # 95% CI -> 2.5% and 97.5%\n low, high = np.percentile(betas, [2.5, 97.5])\n\n # p-value\n # This method to calculate the p-value is derived from:\n # An Introduction to the Bootstrap. 1993. doi:10.1007/978-1-4899-4541-9\n # Efron B., Tibshirani RJ.\n #\n # Section 15.4: Relationship of hypothesis tests to confidence intervals\n # and the bootstrap.\n # TODO verify...\n # p = np.sum(betas < 0) / n_iter\n\n return beta, low, high, None",
"def computeCorr(pred_act,responses):\n\n num_pres,num_neurons = np.shape(responses)\n corr=np.zeros(num_neurons)\n \n for i in xrange(0,num_neurons):\n if np.all(pred_act[:,i]==0) & np.all(responses[:,i]==0):\n corr[i]=1.\n elif not(np.all(pred_act[:,i]==0) | np.all(responses[:,i]==0)):\n # /!\\ To prevent errors due to very low values during computation of correlation\n if abs(pred_act[:,i]).max()<1:\n pred_act[:,i]=pred_act[:,i]/abs(pred_act[:,i]).max()\n if abs(responses[:,i]).max()<1:\n responses[:,i]=responses[:,i]/abs(responses[:,i]).max() \n corr[i]=pearsonr(np.array(responses)[:,i].flatten(),np.array(pred_act)[:,i].flatten())[0]\n \n return corr",
"def get_average_repro(self):\n return np.mean([agent.get_fledge_probability() for agent in self.agents])",
"def estimate_advantages(rewards, masks, values, gamma, tau, device):\n\n #rewards, masks, values = to_device(torch.device('cpu'), rewards, masks, values)\n tensor_type = type(rewards)\n deltas = tensor_type(rewards.size(0), 1)\n advantages = tensor_type(rewards.size(0), 1)\n\n prev_value = 0\n prev_advantage = 0\n for i in reversed(range(rewards.size(0))):\n deltas[i] = rewards[i] + gamma * prev_value * masks[i] - values[i] # at the end of every episode m=0 so we're\n advantages[i] = deltas[i] + gamma * tau * prev_advantage * masks[i] # computing from there backwards each time\n prev_value = values[i, 0]\n prev_advantage = advantages[i, 0]\n\n returns = values + advantages\n advantages = (advantages - advantages.mean()) / advantages.std()\n\n advantages, returns = to_device(device, advantages, returns)\n return advantages, returns",
"def vtrace_pg_loss(target_logits, baselines, rewards, trajectories,\n action_fields):\n # Remove last timestep from trajectories and baselines.\n debug = False\n\n print(\"action_fields\", action_fields) if debug else None\n\n trajectories = Trajectory(*tuple(item[:-1] for item in trajectories))\n print(\"trajectories.reward\", trajectories.reward) if debug else None\n\n rewards = rewards[:-1]\n values = baselines[:-1]\n\n # Filter for only the relevant actions/logits/masks.\n\n target_logits = filter_by(action_fields, target_logits)\n print(\"target_logits\", target_logits) if debug else None\n print(\"target_logits.shape\", target_logits.shape) if debug else None\n\n # shape: [batch_seq_size x action_size]\n split_target_logits = target_logits\n action_size = tuple(list(target_logits.shape[1:])) # from the 3rd dim, it is action dim, may be [S] or [C, S] or [H, W]\n\n # shape: [batch_size x seq_size x action_size]\n split_target_logits = split_target_logits.reshape(AHP.batch_size, AHP.sequence_length, *action_size)\n # shape: [seq_size x batch_size x action_size]\n split_target_logits = torch.transpose(split_target_logits, 0, 1)\n # shape: [new_seq_size x batch_size x action_size]\n split_target_logits = split_target_logits[:-1]\n # shape: [seq_batch_size x action_size]\n split_target_logits = split_target_logits.reshape(-1, *action_size)\n print(\"split_target_logits\", split_target_logits) if debug else None \n print(\"split_target_logits.shape\", split_target_logits.shape) if debug else None \n\n target_logits = split_target_logits\n print(\"target_logits\", target_logits) if debug else None\n print(\"target_logits.shape\", target_logits.shape) if debug else None\n\n behavior_logits = filter_by_for_lists(action_fields, trajectories.behavior_logits)\n print(\"behavior_logits\", behavior_logits) if debug else None\n print(\"behavior_logits.shape\", behavior_logits.shape) if debug else None\n\n actions = filter_by_for_lists(action_fields, trajectories.action)\n print(\"actions\", actions) if debug else None\n print(\"actions.shape\", actions.shape) if debug else None\n\n if action_fields == 'units' or action_fields == 'target_unit':\n seqbatch_unit_shape = target_logits.shape[0:2]\n target_logits = target_logits.reshape(-1, target_logits.shape[-1])\n behavior_logits = behavior_logits.reshape(-1, behavior_logits.shape[-1])\n actions = actions.reshape(-1, actions.shape[-1])\n\n if action_fields == 'target_location':\n target_logits = target_logits.reshape(target_logits.shape[0], -1)\n behavior_logits = behavior_logits.reshape(behavior_logits.shape[0], -1)\n\n actions_2 = torch.zeros(behavior_logits.shape[0], 1, dtype=torch.int64, device=device)\n print(\"actions_2.shape\", actions_2.shape) if debug else None\n\n for i, pos in enumerate(actions):\n # note: for pos, the first index is x, the seconde index is y\n # however, for the matrix, the first index is y (row), and the second index is x (col)\n x = pos[0]\n assert x >= 0\n assert x < SCHP.world_size\n y = pos[1]\n assert y >= 0\n assert y < SCHP.world_size\n index = SCHP.world_size * y + x\n actions_2[i][0] = index\n\n actions = actions_2\n print(\"actions_2.shape\", actions_2.shape) if debug else None\n\n # Compute and return the v-trace policy gradient loss for the relevant subset of logits.\n clipped_rhos = compute_importance_weights(behavior_logits, target_logits, actions)\n\n if action_fields == 'units' or action_fields == 'target_unit':\n clipped_rhos = clipped_rhos.reshape(seqbatch_unit_shape, -1)\n clipped_rhos = torch.mean(clipped_rhos, dim=-1)\n\n # To make the clipped_rhos shape to be [T-1, B]\n clipped_rhos = clipped_rhos.reshape(rewards.shape)\n print(\"clipped_rhos\", clipped_rhos) if debug else None\n print(\"clipped_rhos.shape\", clipped_rhos.shape) if debug else None\n\n discounts = ~np.array(trajectories.is_final)\n discounts = torch.tensor(discounts, dtype=torch.float32, device=device)\n\n # we implement the vtrace_advantages\n # vtrace_advantages(clipped_rhos, rewards, discounts, values, bootstrap_value):\n\n weighted_advantage = vtrace_advantages(clipped_rhos, rewards,\n discounts, values,\n baselines[-1])\n\n print(\"trajectories.masks\", trajectories.masks) if debug else None\n\n masks = filter_by_for_masks(action_fields, trajectories.masks)\n print(\"filtered masks\", masks) if debug else None\n\n # AlphaStar: weighted_advantage = [weighted_advantage] * len(target_logits)\n # mAS: the weighted_advantage is already been unfolded, so we don't need the line\n # we need the pg_advantages of the VTrace_returns, which is in ths index of 1\n weighted_advantage = weighted_advantage[1]\n print(\"weighted_advantage\", weighted_advantage) if debug else None\n\n # here we should reshape the target_logits and actions back to [T-1, B, C] size for computing policy gradient\n if action_fields == 'units':\n target_logits = target_logits.reshape(AHP.sequence_length - 1, AHP.batch_size * AHP.max_selected, -1)\n actions = actions.reshape(AHP.sequence_length - 1, AHP.batch_size * AHP.max_selected, -1)\n\n weighted_advantage = torch.cat([weighted_advantage] * AHP.max_selected, dim=1)\n masks = torch.cat([masks] * AHP.max_selected, dim=1)\n else:\n target_logits = target_logits.reshape(AHP.sequence_length - 1, AHP.batch_size, -1)\n actions = actions.reshape(AHP.sequence_length - 1, AHP.batch_size, -1)\n\n result = compute_over_actions(policy_gradient_loss, target_logits,\n actions, weighted_advantage, masks)\n\n if action_fields == 'units':\n result = result.reshape(-1, AHP.max_selected)\n result = torch.mean(result, dim=-1)\n\n print(\"result\", result) if debug else None\n print(\"result.shape\", result.shape) if debug else None\n\n # note: in mAS, we should make the result not beyond 0\n # return 0.5 * torch.mean(torch.square(result))\n\n debug = False\n\n # note: we change back to use only result \n return result",
"def test_rr_testeffect(results):\n test_t0 = results.test_effect()\n test_t1 = results.test_effect(0.)\n test_t2 = results.test_effect(5.2)\n assert test_t0 == pytest.approx(1.1920928955078125e-07)\n assert test_t1 == pytest.approx(1.1920928955078125e-07)\n assert test_t2 == 1.0",
"def getReward(self, active_corr, simulator, p, active_goal):\n i_r = self.correlations[active_corr].i_reward\n # if i_r is None:\n # reward = self.simulator.getReward()\n # elif self.correlations[i_r].getCertainty() > self.threshold:\n if i_r is None:\n reward = simulator\n elif self.correlations[i_r].getCertainty(p, active_goal) > self.threshold:\n reward = 1\n else:\n reward = 0\n return reward",
"def mask_all_but_correct_references(rec, balance_rep_count=False, include_incorrect=False, \n generate_evoked_mask=False, exclude_partial_ref=True):\n newrec = rec.copy()\n if 'mask' in newrec.signals.keys():\n log.debug('valid bins coming in: %d',np.sum(newrec['mask'].as_continuous()))\n\n newrec = normalize_epoch_lengths(newrec, resp_sig='resp', epoch_regex='^STIM_|^REF|^TAR',\n include_incorrect=include_incorrect)\n\n newrec['resp'] = newrec['resp'].rasterize()\n if 'stim' in newrec.signals.keys():\n newrec['stim'] = newrec['stim'].rasterize()\n resp = newrec['resp']\n\n if balance_rep_count:\n\n epoch_regex = \"^STIM_\"\n epochs_to_extract = ep.epoch_names_matching(resp.epochs, epoch_regex)\n p=resp.get_epoch_indices(\"PASSIVE_EXPERIMENT\")\n a=np.concatenate((resp.get_epoch_indices(\"HIT_TRIAL\"),\n resp.get_epoch_indices(\"CORRECT_REJECT_TRIAL\")), axis=0)\n\n epoch_list=[]\n for s in epochs_to_extract:\n e = resp.get_epoch_indices(s)\n pe = ep.epoch_intersection(e, p)\n ae = ep.epoch_intersection(e, a)\n if len(pe)>len(ae):\n epoch_list.extend(ae)\n subset=np.round(np.linspace(0,len(pe),len(ae)+1)).astype(int)\n for i in subset[:-1]:\n epoch_list.append(pe[i])\n else:\n subset=np.round(np.linspace(0,len(ae),len(pe)+1)).astype(int)\n for i in subset[:-1]:\n epoch_list.append(ae[i])\n epoch_list.extend(pe)\n\n newrec = newrec.create_mask(epoch_list)\n\n elif include_incorrect:\n log.info('INCLUDING ALL TRIALS (CORRECT AND INCORRECT)')\n newrec = newrec.and_mask(['REFERENCE'])\n\n else:\n newrec = newrec.and_mask(['PASSIVE_EXPERIMENT', 'HIT_TRIAL', 'CORRECT_REJECT_TRIAL', 'MISS_TRIAL'])\n newrec = newrec.and_mask(['REFERENCE'])\n\n if exclude_partial_ref:\n mask_data = newrec['mask'].extract_epoch('REFERENCE')\n pp = np.mean(mask_data, axis=2)[:,0]\n # if partial mask, remove completely\n mask_data[(pp>0) & (pp<1),:,:]=0\n tt = (pp>0) & (pp<1) \n if tt.sum() > 0:\n log.info('removing %d incomplete REFERENCES', tt.sum())\n newrec.signals['mask']=newrec['mask'].replace_epoch('REFERENCE', mask_data)\n\n # figure out if some actives should be masked out\n# t = ep.epoch_names_matching(resp.epochs, \"^TAR_\")\n# tm = [tt[:-2] for tt in t] # trim last digits\n# active_epochs = resp.get_epoch_indices(\"ACTIVE_EXPERIMENT\")\n# if len(set(tm)) > 1 and len(active_epochs) > 1:\n# print('Multiple targets: ', tm)\n# files = ep.epoch_names_matching(resp.epochs, \"^FILE_\")\n# keep_files = files\n# e = active_epochs[1]\n# for i,f in enumerate(files):\n# fi = resp.get_epoch_indices(f)\n# if any(ep.epoch_contains([e], fi, 'both')):\n# keep_files = files[:i]\n#\n# print('Print keeping files: ', keep_files)\n# newrec = newrec.and_mask(keep_files)\n\n if 'state' in newrec.signals:\n b_states = ['far', 'hit', 'lick',\n 'puretone_trials', 'easy_trials', 'hard_trials']\n trec = newrec.copy()\n trec = trec.and_mask(['ACTIVE_EXPERIMENT'])\n st = trec['state'].as_continuous().copy()\n str = trec['state_raw'].as_continuous().copy()\n mask = trec['mask'].as_continuous()[0, :]\n for s in trec['state'].chans:\n if s in b_states:\n i = trec['state'].chans.index(s)\n m = np.nanmean(st[i, mask])\n sd = np.nanstd(st[i, mask])\n # print(\"{} {}: m={}, std={}\".format(s, i, m, sd))\n # print(np.sum(mask))\n st[i, mask] -= m\n st[i, mask] /= sd\n str[i, mask] -= m\n str[i, mask] /= sd\n newrec['state'] = newrec['state']._modified_copy(st)\n newrec['state_raw'] = newrec['state_raw']._modified_copy(str)\n\n if generate_evoked_mask:\n mask = newrec['mask'].as_continuous().copy()\n padbins=int(np.round(newrec['resp'].fs * 0.1))\n\n preidx = resp.get_epoch_indices('PreStimSilence', mask=newrec['mask'])\n posidx = resp.get_epoch_indices('PostStimSilence', mask=newrec['mask'])\n for i,p in enumerate(posidx):\n posidx[i]=(p[0]+padbins, p[1])\n\n post_mask = newrec['resp'].epoch_to_signal(indices=posidx)\n pre_mask = newrec['resp'].epoch_to_signal(indices=preidx)\n #mask[post_mask.as_continuous()] = False\n ev_mask = mask.copy()\n ev_mask[pre_mask.as_continuous()] = False\n ev_mask[post_mask.as_continuous()] = False\n newrec['sp_mask'] = newrec['mask']._modified_copy(data=mask)\n newrec['ev_mask'] = newrec['mask']._modified_copy(data=ev_mask)\n\n return newrec",
"def mean_log_prob_approx(self, y=None, name='mean_log_prob_approx'):\n with self._name_and_control_scope(name):\n return approx_expected_log_prob_sigmoid(\n self.loc, self.scale, y,\n MONAHAN_MIX_PROB[self.num_probit_terms_approx],\n MONAHAN_INVERSE_SCALE[self.num_probit_terms_approx])",
"def apphot(im, yx, rap, subsample=4, **kwargs):\n n, f = anphot(im, yx, rap, subsample=subsample, **kwargs)\n if np.size(rap) > 1:\n return n.cumsum(-1), f.cumsum(-1)\n else:\n return n, f",
"def partisan_att_reward(state, election_results, electoral_votes):\n evotes = int(electoral_votes[electoral_votes['state'] == state].evotes)\n dem_votes = int(election_results[(election_results['state'] == state) & (\n election_results['party'] == 'democrat')].votes)\n rep_votes = int(election_results[(election_results['state'] == state) & (\n election_results['party'] == 'republican')].votes)\n total_votes = dem_votes + rep_votes\n margin = (max(dem_votes, rep_votes) -\n min(dem_votes, rep_votes))/total_votes\n return evotes/(1+margin)",
"def _activity(\n run, baseline_activity=0., baseline_sigma=3.0,\n trace_type='deconvolved'):\n if trace_type != 'deconvolved':\n raise ValueError(\n 'Temporal classifier only implemented for deconvolved data.')\n\n if run.run_type == 'spontaneous' and 'sated' in run.tags:\n runs = run.parent.runs(run_types=['spontaneous'], tags=['sated'])\n spontaneous = True\n elif run.run_type == 'spontaneous' and 'hungry' in run.tags:\n runs = run.parent.runs(run_types=['spontaneous'], tags=['hungry'])\n spontaneous = True\n elif run.run_type == 'training':\n runs = run.parent.runs(run_types=['training'])\n spontaneous = False\n else:\n raise ValueError(\n 'Unknown run_type and tags, not sure how to calculate activity.')\n\n baseline, variance, outliers = None, None, None\n if spontaneous:\n popact, outliers = [], []\n for r in runs:\n t2p = r.trace2p()\n pact = t2p.trace('deconvolved')\n fmin = t2p.lastonset()\n mask = t2p.inactivity()\n mask[:fmin] = False\n\n if len(popact):\n popact = np.concatenate([popact, pact[:, mask]], axis=1)\n else:\n popact = pact[:, mask]\n\n trs = t2p.trace('deconvolved')[:, fmin:]\n cellact = np.nanmean(trs, axis=1)\n outs = cellact > np.nanmedian(cellact) + 2*np.std(cellact)\n\n if len(outliers) == 0:\n outliers = outs\n else:\n outliers = np.bitwise_or(outliers, outs)\n\n if len(popact):\n popact = np.nanmean(popact[np.invert(outliers), :], axis=0)\n\n baseline = np.median(popact)\n variance = np.std(popact)\n outliers = outliers\n else:\n popact = []\n for r in runs:\n t2p = r.trace2p()\n ncells = t2p.ncells\n pact = np.nanmean(t2p.trace('deconvolved'), axis=0)\n skipframes = int(t2p.framerate*4)\n\n for cs in ['plus*', 'neutral*', 'minus*', 'pavlovian*']:\n onsets = t2p.csonsets(cs)\n for ons in onsets:\n pact[ons:ons+skipframes] = np.nan\n popact = np.concatenate([popact, pact[np.isfinite(pact)]])\n\n if len(popact):\n # baseline = np.median(popact)\n\n # Exclude extremes\n percent = 2.0\n popact = np.sort(popact)\n trim = int(percent*popact.size/100.)\n popact = popact[trim:-trim]\n\n baseline = np.median(popact) # Moved to after extreme exclusion on 190326\n variance = np.std(popact)\n outliers = np.zeros(ncells, dtype=bool)\n\n if baseline is None:\n baseline, variance = 0.01, 0.08*baseline_sigma\n else:\n baseline *= baseline_activity\n variance *= baseline_sigma\n\n return baseline, variance, outliers",
"def calculate_reactivity_v1(self,\n sequence_slice: slice = slice(0, None),\n nt_a_c_only: bool = True):\n\n # Get the list of \"rates\" - \"modified\" and \"untreated\"\n # - Directly use `reactivity_profile` (= modified - untreated)\n # modified_rate = [item.modified_rate for item in self.profile_list]\n # untreated_rate = [item.untreated_rate for item in self.profile_list]\n reactivity_profile = [item.reactivity_profile for item in self.profile_list]\n\n # --------------\n # Adjusted profile rate\n # Rules\n # - negative value -> 0\n # - \"None\" -> INVALID\n # - If \"AC-only\", INVALID for \"GU\" nt.\n #\n # - negative value -> 0\n reactivity_profile_adjusted \\\n = [abs(rate) if rate is not None and rate < 0.0 else rate for rate in reactivity_profile]\n # - \"None\" -> INVALID\n reactivity_profile_adjusted \\\n = [rate if rate is not None else self.INVALID_VALUE for rate in reactivity_profile_adjusted]\n if nt_a_c_only:\n # INVALID for \"GU\" nt.\n self.sequence.calculate_length()\n reactivity_profile_adjusted \\\n = [rate if rate is not None and self.sequence.is_nt_ac(index + 1) else self.INVALID_VALUE for index, rate in enumerate(reactivity_profile_adjusted)]\n\n # Convert it to \"numpy array\"\n reactivity_profile_adjusted = numpy.array(reactivity_profile_adjusted, dtype=float)\n\n # Normalize the rates\n reactivity_profile_adjusted = reactivity_profile_adjusted[sequence_slice] # Apply the \"sequence slice\"\n max_rate = numpy.amax(reactivity_profile_adjusted)\n self.neo_reactivity_list = \\\n numpy.array([rate / max_rate if rate != self.INVALID_VALUE else rate for rate in reactivity_profile_adjusted])\n self.neo_reactivity_list = self.neo_reactivity_list.tolist()",
"def add_is_approximation(self):\n \n if (self.mf_steps == 0 or self.alpha ==0) and (not self.mixture)\\\n and (self.gibbs_steps ==0):\n print(\"Importance distribution is uniform\")\n weight_term = T.log(self.num_samples) + self.num_vars*T.log(0.5)\n \n elif (self.mf_steps > 0 and self.alpha > 0) and (not self.mixture)\\\n and (self.gibbs_steps ==0):\n print(\"Importance distribution is not uniform\")\n weight_term = T.log(self.num_samples)+\\\n self.get_importance_evals(T.transpose(self.x_tilda), \n T.transpose(self.sampler_theta))\n \n if self.resample:\n weight_term = T.reshape(weight_term, \n [self.batch_size, self.num_samples])\n \n elif self.mixture:\n \n if self.resample:\n n_iters = self.num_samples*self.batch_size \n else:\n n_iters = self.num_samples \n \n weight_term, _ =\\\n theano.scan(lambda j: \n self.get_mixture_evals(T.transpose(self.x_tilda[j,:])),\n sequences = [T.arange(n_iters)])\n \n if self.resample:\n weight_term = T.reshape(weight_term, \n [self.batch_size, self.num_samples]) \n \n weight_term = T.log(self.num_samples) + weight_term \n \n elif self.gibbs_steps > 0:\n print(\"Importance distribution is gibbs sampler\") \n weight_term = T.log(self.num_samples)+\\\n self.get_importance_evals(T.transpose(self.x_tilda), \n T.transpose(self.sampler_theta))\n \n if self.resample:\n weight_term = T.reshape(weight_term, \n [self.batch_size, self.num_samples]) \n \n \n if self.resample and self.num_hidden ==0:\n \n approx_Z = -self.compute_energy(self.x_tilda, \n self.num_samples*self.batch_size)\n \n elif (not self.resample) and self.num_hidden ==0:\n \n approx_Z = -self.compute_energy(self.x_tilda, \n self.num_samples)\n \n elif self.resample and self.num_hidden > 0:\n \n approx_Z = -self.compute_free_energy(self.x_tilda)\n \n elif (not self.resample) and self.num_hidden >0:\n \n approx_Z = -self.compute_free_energy(self.x_tilda)\n \n if self.resample:\n \n approx_Z = T.reshape(approx_Z, \n [self.batch_size, self.num_samples])\n \n approx_Z = approx_Z - weight_term\n \n return approx_Z",
"def measure(self, recommender):\n interactions = recommender.interactions\n if interactions.size == 0: # initially, there are no interactions\n self.observe(None)\n return\n histogram = self._generate_interaction_histogram(\n interactions, recommender.num_users, recommender.num_items\n )\n histogram[::-1].sort()\n if self._old_histogram is None:\n self._old_histogram = np.zeros(recommender.num_items)\n self.observe(np.trapz(self._old_histogram, dx=1) - np.trapz(histogram, dx=1), copy=False)\n self._old_histogram = np.copy(histogram)\n self.histogram = histogram",
"def log_batch_stats(observes, actions, advantages, disc_sum_rew, task_r, imitation_r, imitation_r_logs, logger, episode):\n logger.log({'_mean_obs': np.mean(observes),\n '_min_obs': np.min(observes),\n '_max_obs': np.max(observes),\n '_std_obs': np.mean(np.var(observes, axis=0)),\n '_mean_act': np.mean(actions),\n '_min_act': np.min(actions),\n '_max_act': np.max(actions),\n '_std_act': np.mean(np.var(actions, axis=0)),\n '_mean_adv': np.mean(advantages),\n '_min_adv': np.min(advantages),\n '_max_adv': np.max(advantages),\n '_std_adv': np.var(advantages),\n '_mean_discrew': np.mean(disc_sum_rew),\n '_min_discrew': np.min(disc_sum_rew),\n '_max_discrew': np.max(disc_sum_rew),\n '_std_discrew': np.var(disc_sum_rew),\n '_Episode': episode,\n '_mean_task_reward': np.mean(task_r),\n '_mean_imitation_reward': np.mean(imitation_r),\n '_mean_position_cost': np.mean(imitation_r_logs[0]),\n '_mean_velocity_cost': np.mean(imitation_r_logs[1]),\n '_mean_com_position_cost': np.mean(imitation_r_logs[2]),\n '_mean_com_velocity_cost': np.mean(imitation_r_logs[3])\n })",
"def receptor_crosstalk(self, method='auto', ret_receptor_activity=False,\n **kwargs):\n if method == 'auto':\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n method = 'brute_force'\n else:\n method = 'monte_carlo'\n \n if method == 'estimate':\n # estimate receptor crosstalk directly\n q_nm = self.receptor_crosstalk_estimate(**kwargs)\n if ret_receptor_activity:\n q_n = self.receptor_activity_estimate(**kwargs)\n \n else:\n # calculate receptor crosstalk from the observed probabilities\n r_n, r_nm = self.receptor_activity(method, ret_correlations=True,\n **kwargs)\n q_n = r_n\n q_nm = r_nm - np.outer(r_n, r_n)\n if kwargs.get('clip', False):\n np.clip(q_nm, 0, 1, q_nm)\n \n if ret_receptor_activity:\n return q_n, q_nm\n else:\n return q_nm",
"def test_average_cont(mock_visibility_data_cont):\n uu, vv, weight, data_re, data_im = mock_visibility_data_cont\n\n averager = gridding.DataAverager.from_image_properties(\n cell_size=0.005,\n npix=800,\n uu=uu,\n vv=vv,\n weight=weight,\n data_re=data_re,\n data_im=data_im,\n )\n\n print(averager.uu.shape)\n print(averager.nchan)\n\n averager._grid_visibilities()",
"def set_measured_activity(self):\n\t\t\n\t\t# True receptor activity\n\t\tself.Yy = receptor_activity(self.Ss, self.Kk1, self.Kk2, self.eps)\n\t\t\n\t\t# Learned background activity only utilizes average background signal \n\t\tself.Yy0 = receptor_activity(self.Ss0, self.Kk1, self.Kk2, self.eps)\n\t\t\n\t\t# Measured response above background\n\t\tself.dYy = self.Yy - self.Yy0\n\t\n\t\t# Add effects of divisive normalization if called.\n\t\tif self.divisive_normalization == True:\n\t\t\tself.Yy0 = inhibitory_normalization(self.Yy0, self.inh_C, \n\t\t\t\t\t\tself.inh_D, self.inh_eta, self.inh_R)\n\t\t\tself.Yy = inhibitory_normalization(self.Yy, self.inh_C, \n\t\t\t\t\t\tself.inh_D, self.inh_eta, self.inh_R)\n\t\t\tself.dYy = self.Yy - self.Yy0",
"def compute_advantage(self, ob_no, re_n, hidden, masks, tau=0.95):\n bsize = len(re_n)\n rewards = np.squeeze(re_n)\n masks = np.squeeze(masks)\n # print(\"ob_no.shape = \", ob_no.shape)\n # print(\"hidden.shape = \", hidden.shape)\n values = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: ob_no, self.sy_hidden: hidden})[:,None]\n gamma = self.gamma\n\n assert rewards.shape == masks.shape == (bsize,)\n # print(\"values.shape = \", values.shape)\n assert values.shape == (bsize, 1)\n\n bsize = len(rewards)\n returns = np.empty((bsize,))\n deltas = np.empty((bsize,))\n advantages = np.empty((bsize,))\n\n prev_return = 0\n prev_value = 0\n prev_advantage = 0\n for i in reversed(range(bsize)):\n returns[i] = rewards[i] + gamma * prev_return * masks[i]\n deltas[i] = rewards[i] + gamma * prev_value * masks[i] - values[i]\n advantages[i] = deltas[i] + gamma * tau * prev_advantage * masks[i]\n\n prev_return = returns[i]\n prev_value = values[i]\n prev_advantage = advantages[i]\n\n advantages = (advantages - np.mean(advantages, axis=0)) / np.std(advantages, axis=0)\n return advantages, returns",
"def fit(num, similarity_name=None, reward_name='acc', res=0.05):\n\n # --\n # Get subject's data\n sdata = get_behave_data(num)\n sdata.update(get_similarity_data(num))\n \n responses = np.array(sdata['resp'])\n\n rewards = None\n if reward_name == 'acc':\n rewards = np.array(sdata['acc'],dtype=np.float32)\n elif reward_name == 'gl':\n rewards = np.array(sdata['gl'],dtype=np.float32)\n\n trials = np.array(fmri.catreward.roi.data.get_trials())\n conds = list(set(trials))\n ## conds are the uniqu entries in trials\n \n # Each cond has n states, \n # matching the number of \n # responses (approx 2: {1,6}).\n #\n # Wrong button presses \n # are included, however these\n # are never rewarded so stay at 0.\n params = None\n log_L = 0\n for cond in conds:\n print(cond)\n\n if cond == 0: continue\n ## Drop jitter.\n\n # Create states and their rewards.\n mask = trials == cond\n states_c = responses[mask]\n rewards_c = rewards[mask] ## _c for cond...\n\n # Get the RL alg we want to run.\n # based on similarity_name\n params_c = None\n log_L_c = None\n if similarity_name == None:\n # No similarity, so just fit:\n params_c, log_L_c = rl.fit.ml_delta(rewards_c, states_c, res)\n else:\n # Get the similarity data, filter it by mask, and fit.\n similarity_c = np.array(sdata[similarity_name])[mask]\n params_c, log_L_c = rl.fit.ml_delta_similarity(\n rewards_c, states_c, similarity_c, res)\n \n # Add cond log_L_c to the overall log_L score\n log_L += log_L_c\n\n # params is the average for all conds\n if params == None: \n params = deepcopy(params_c)\n params = (np.array(params_c) + np.array(params)) / 2.\n \n return tuple(params), log_L",
"def __call__(self, adv, annotation=None, unpack=True,\r\n abort_early=True, epsilons=10000):\r\n\r\n a = adv\r\n del adv\r\n del annotation\r\n del unpack\r\n\r\n image = a.original_image\r\n min_, max_ = a.bounds()\r\n axis = a.channel_axis(batch=False)\r\n hw = [image.shape[i] for i in range(image.ndim) if i != axis]\r\n h, w = hw\r\n size = max(h, w)\r\n\r\n if not isinstance(epsilons, Iterable):\r\n epsilons = np.linspace(0, 0.2, num=epsilons + 1)[1:]\r\n\r\n for epsilon in tqdm(epsilons):\r\n # epsilon = 1 will correspond to\r\n # sigma = size = max(width, height)\r\n sigmas = [epsilon * size] * 3\r\n sigmas[axis] = 0\r\n blurred = gaussian_filter(image, sigmas)\r\n blurred = np.clip(blurred, min_, max_)\r\n _, is_adversarial = a.predictions(blurred)\r\n if is_adversarial and abort_early:\r\n return",
"def apply_chromatic_adaptation(val_x, val_y, val_z, orig_illum, targ_illum,\r\n observer='2', adaptation='bradford'):\r\n\r\n # It's silly to have to do this, but some people may want to call this\r\n # function directly, so we'll protect them from messing up upper/lower case.\r\n orig_illum = orig_illum.lower()\r\n targ_illum = targ_illum.lower()\r\n adaptation = adaptation.lower()\r\n\r\n logger.debug(\" \\* Applying adaptation matrix: %s\", adaptation)\r\n # Retrieve the appropriate transformation matrix from the constants.\r\n transform_matrix = _get_adaptation_matrix(orig_illum, targ_illum,\r\n observer, adaptation)\r\n\r\n # Stuff the XYZ values into a NumPy matrix for conversion.\r\n XYZ_matrix = numpy.array((val_x, val_y, val_z))\r\n # Perform the adaptation via matrix multiplication.\r\n result_matrix = numpy.dot(XYZ_matrix, transform_matrix)\r\n\r\n # Return individual X, Y, and Z coordinates.\r\n return result_matrix[0], result_matrix[1], result_matrix[2]",
"def pose_bc_loss(pi, action_batch, mix_policy_ratio=0, huber=False):\n pred_act_pt = control_points_from_rot_and_trans(pi[: :, 3:], pi[: :, :3], device=\"cuda\")\n gt_act_pt = control_points_from_rot_and_trans( action_batch[: :, 3:], action_batch[: :, :3], device=\"cuda\")\n return torch.mean(torch.abs(pred_act_pt - gt_act_pt).sum(-1) )"
] | [
"0.64386016",
"0.49481305",
"0.48385146",
"0.4779224",
"0.46856108",
"0.46765503",
"0.4662905",
"0.46267968",
"0.45856437",
"0.45664948",
"0.43939775",
"0.43856367",
"0.43828163",
"0.436443",
"0.43603247",
"0.43355826",
"0.43281123",
"0.43260527",
"0.43019044",
"0.42894408",
"0.42662933",
"0.42642",
"0.4249237",
"0.42468354",
"0.42346826",
"0.42333823",
"0.42311087",
"0.42298523",
"0.4228851",
"0.42281866"
] | 0.5905821 | 1 |
calculates the average activity of each receptor `method` can be ['brute_force', 'monte_carlo', 'estimate', 'auto']. If it is 'auto' than the method is chosen automatically based on the problem size. | def receptor_activity(self, method='auto', ret_correlations=False, **kwargs):
if method == 'auto':
if self.Ns <= self.parameters['brute_force_threshold_Ns']:
method = 'brute_force'
else:
method = 'monte_carlo'
if method == 'brute_force' or method == 'brute-force':
return self.receptor_activity_brute_force(ret_correlations, **kwargs)
elif method == 'monte_carlo' or method == 'monte-carlo':
return self.receptor_activity_monte_carlo(ret_correlations, **kwargs)
elif method == 'estimate':
return self.receptor_activity_estimate(ret_correlations, **kwargs)
else:
raise ValueError('Unknown method `%s`.' % method) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def receptor_score(self, method='auto', multiprocessing=False):\n init_arguments = self.init_arguments\n init_arguments['parameters']['initialize_state']['sensitivity'] = 'exact'\n init_arguments['parameters']['sensitivity_matrix'] = self.sens_mat\n joblist = [(copy.deepcopy(self.init_arguments), 'mutual_information',\n {'method': method})]\n \n # add one job for each receptor\n for n in range(self.Nr):\n init_arguments = self.init_arguments\n init_arguments['num_receptors'] -= 1\n \n # modify the current state and add it to the job list\n sens_mat = np.delete(self.sens_mat, n, axis=0)\n init_arguments['parameters']['sensitivity_matrix'] = sens_mat\n joblist.append((copy.deepcopy(init_arguments), 'mutual_information',\n {'method': method}))\n \n if multiprocessing:\n # calculate all results in parallel\n pool = mp.Pool(processes=self.get_number_of_cores())\n results = pool.map(_run_job, joblist)\n \n else:\n # create a generator over which we iterate later\n results = [_run_job(job) for job in joblist]\n \n # find the scores of all receptors\n scores = results[0] - np.array(results[1:])\n return scores",
"def score(self, method: str = \"\"):\n if not (method):\n if isinstance(self.steps[-1][1], Regressor):\n method = \"r2\"\n else:\n method = \"accuracy\"\n return self.steps[-1][1].score(method)",
"def _do_estimate(recommendation_type: str, pipfile: Pipfile) -> None:",
"def receptor_crosstalk(self, method='auto', ret_receptor_activity=False,\n **kwargs):\n if method == 'auto':\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n method = 'brute_force'\n else:\n method = 'monte_carlo'\n \n if method == 'estimate':\n # estimate receptor crosstalk directly\n q_nm = self.receptor_crosstalk_estimate(**kwargs)\n if ret_receptor_activity:\n q_n = self.receptor_activity_estimate(**kwargs)\n \n else:\n # calculate receptor crosstalk from the observed probabilities\n r_n, r_nm = self.receptor_activity(method, ret_correlations=True,\n **kwargs)\n q_n = r_n\n q_nm = r_nm - np.outer(r_n, r_n)\n if kwargs.get('clip', False):\n np.clip(q_nm, 0, 1, q_nm)\n \n if ret_receptor_activity:\n return q_n, q_nm\n else:\n return q_nm",
"def runAnalyticalSim(self, sim_rounds = 10**7, factor=\"mu\"):\n # create simulation agents\n M = self.getPopulationSize()\n N = self.getSampleSize()\n\n available_strategies = self.getAvailableStrategies()\n sim_agents = [Agent(available_strategies) for i in range(M)]\n tot_count = [0 for strategy in available_strategies]\n\n # count strategies in current population\n strat_count = [0 for strategy in available_strategies]\n for sim_agent in sim_agents:\n strat_count[available_strategies.index(sim_agent.getStrategy())] += 1\n\n # repeat 10 million times\n for i in range(sim_rounds):\n\n # handle each agent\n for focal_player in sim_agents:\n\n # update frequencies for avg payoffs\n self.clearFrequencies()\n for i, strategy in enumerate(available_strategies):\n self.setFrequency(strategy, strat_count[i])\n\n # option 1: random switch strategy\n mu_proba = np.random.random()\n if mu_proba <= self.getExplorationRate():\n strat_count[available_strategies.index(focal_player.getStrategy())] -= 1\n focal_player.switchToOtherAvailableStrategy()\n strat_count[available_strategies.index(focal_player.getStrategy())] += 1\n\n # option 2: choose model to (maybe) imitate\n else:\n # select model player\n model_player_index = np.random.randint(0, M-1)\n while model_player_index == sim_agents.index(focal_player):\n model_player_index = np.random.randint(0, M-1)\n model_player = sim_agents[model_player_index]\n\n # define imitation outcome\n proba_copy = self.Fermi(self.getPayoff(model_player.getStrategy()), self.getPayoff(focal_player.getStrategy()))\n proba_event = np.random.random()\n if proba_event <= proba_copy:\n strat_count[available_strategies.index(focal_player.getStrategy())] -= 1\n focal_player.setStrategy(model_player.getStrategy())\n strat_count[available_strategies.index(focal_player.getStrategy())] += 1\n\n # remember population strategies\n for i in range(len(tot_count)):\n tot_count[i] += strat_count[i]\n\n # obtain final frequency\n for i in range(len(strat_count)):\n strat_count[i] = strat_count[i] / M\n\n # obtain total frequency\n for i, strategy in enumerate(available_strategies):\n tot_count[i] = tot_count[i] / (sim_rounds * M)\n\n # export to file: strat_count (enables comparison of both results)\n self.saveResults(tot_count, \"{}\".format(self.getCase()), factor)",
"def calculate_parameters_magnitudes(self, method=None):\n assert method is not None, \"No method was chosen to calculate the parameters' magnitudes.\"\n\n # Get the parameters for every key\n param_keys = {}\n parameters_magnitudes_dict = {}\n parameters_magnitudes = []\n\n for parameter in self.optimizable_parameters:\n if parameter.param_key in param_keys:\n param_keys[parameter.param_key].append(parameter.value)\n else:\n param_keys[parameter.param_key] = []\n param_keys[parameter.param_key].append(parameter.value)\n\n if method.lower() == \"geometric\":\n # Compute the geometric mean\n for param_key in param_keys:\n geometric_mean = 1.0\n n = 0.0\n for value in param_keys[param_key]:\n if abs(value) > 1e-8:\n # If value is not zero\n geometric_mean = geometric_mean * np.abs(value)\n n = n + 1\n if abs(geometric_mean) > 1e-8 and n > 0:\n geometric_mean = geometric_mean ** (1.0 / n)\n parameters_magnitudes_dict[param_key] = geometric_mean\n else:\n parameters_magnitudes_dict[param_key] = self.parameters_magnitudes[param_key]\n\n elif method.lower() == \"arithmetic\":\n # Arithmetic mean\n for param_key in param_keys:\n arithmetic_mean = 0.0\n n = 0.0\n for value in param_keys[param_key]:\n arithmetic_mean = arithmetic_mean + np.abs(value)\n n = n + 1\n\n if abs(arithmetic_mean) > 1e-8 and n > 0:\n arithmetic_mean = arithmetic_mean / n\n parameters_magnitudes_dict[param_key] = arithmetic_mean\n else:\n parameters_magnitudes_dict[param_key] = self.parameters_magnitudes[param_key]\n\n elif method.lower() == \"default\":\n for param_key in param_keys:\n parameters_magnitudes_dict[param_key] = self.parameters_magnitudes[param_key]\n else:\n raise NotImplementedError(\n \"\\t * Mean type {} not available to guess the prior widths.\".format(method))\n\n for parameter in self.optimizable_parameters:\n parameters_magnitudes.append(parameters_magnitudes_dict[parameter.param_key])\n\n # Convert to numpy array\n prior_widths = np.asarray(parameters_magnitudes)\n\n return parameters_magnitudes_dict, prior_widths",
"def _pool(array: np.ndarray, method: str) -> float:\n if method == \"fro\":\n return np.linalg.norm(array)\n if method == \"mean\":\n return np.mean(array)\n if method == \"median\":\n return np.median(array)\n return np.linalg.norm(array)",
"def apply_policy(self, policy, method):\n action, optimal_value, move = policy(self, method)\n return action, optimal_value, move",
"def mixture_statistics(self, method='auto'):\n\n if method == 'auto':\n fixed_mixture_size = self.parameters['fixed_mixture_size']\n \n if self.is_correlated_mixture or fixed_mixture_size is not None:\n # mixture has correlations => we do Metropolis sampling\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n method = 'brute-force'\n else:\n method = 'monte-carlo'\n else:\n # the estimate is exact for mixtures without correlations\n method = 'estimate'\n\n if method == 'brute-force' or method == 'brute_force':\n return self.mixture_statistics_brute_force()\n elif method == 'monte-carlo' or method == 'monte_carlo':\n return self.mixture_statistics_monte_carlo()\n elif method == 'estimate':\n return self.mixture_statistics_estimate()\n else:\n raise ValueError('Unknown method `%s` for mixture statistics'\n % method)",
"def _apply_method(self, X, method):\n n_epochs, n_channels, n_times = X.shape\n # trial as time samples\n X = np.transpose(X, [1, 0, 2])\n X = np.reshape(X, [n_channels, n_epochs * n_times]).T\n # apply method\n method = getattr(self.estimator, method)\n X = method(X)\n # put it back to n_epochs, n_dimensions\n X = np.reshape(X.T, [-1, n_epochs, n_times]).transpose([1, 0, 2])\n return X",
"def _check_aggregation_method(self, method):\n if method not in {self._MEAN, self._MEDIAN, self._WEIGHTED_MEAN}:\n raise ValueError(\n f\"Invalid aggregation_method: {method}.\\n\"\n f\"Valid options are: {(self._MEAN, self._MEDIAN, self._WEIGHTED_MEAN)}\"\n )",
"def set_method(self, method: AbstractMethod):\n self.method = self.mover.move_module(method)\n assert isinstance(self.method, AbstractMethod)\n self.method_ema = ModelEMA.maybe_init(self.logger, self.method, self.ema_decay, self.ema_device)\n self.mover.empty_cache()\n self.optimizers, self.schedulers = self.method.configure_optimizers()\n self.method.logger = self.exp_logger\n\n self.loader_train = self.method.train_dataloader()\n self.loader_eval = self.method.val_dataloader()\n self.loader_test = self.method.test_dataloader()",
"def calculate_average_run_accuracy(self):\n overall_true_rate, true_positive_rate, true_negative_rate, false_positive_rate, false_negative_rate, true_positive_rate_cutoff, true_negative_rate_cutoff, \\\n false_positive_rate_cutoff, false_negative_rate_cutoff, unclassified_cutoff, matthews_correlation_coefficient, brier_score, auc_score, fit_time, hmeasure = [0] * 15\n balanced_accuracy_arr = []\n auc_arr = []\n hmeasure_arr = []\n brier_score_arr = []\n fit_time_arr = []\n mcc_arr = []\n true_positive_arr = []\n true_negative_arr = []\n false_positive_arr = []\n false_negative_arr = []\n\n count = 0\n for result_dictionary in self.errors:\n for z in range(len(result_dictionary[\"balanced_accuracy_arr\"])):\n overall_true_rate += result_dictionary[\"balanced_accuracy_arr\"][z]\n true_positive_rate += result_dictionary[\"true_positive_rate_arr\"][z]\n true_negative_rate += result_dictionary[\"true_negative_rate_arr\"][z]\n false_positive_rate += result_dictionary[\"false_positive_rate_arr\"][z]\n false_negative_rate += result_dictionary[\"false_negative_rate_arr\"][z]\n matthews_correlation_coefficient += result_dictionary[\"mcc_arr\"][z]\n auc_score += result_dictionary[\"auc_arr\"][z]\n brier_score += result_dictionary[\"brier_score_arr\"][z]\n fit_time += result_dictionary[\"fit_time_arr\"][z]\n hmeasure += result_dictionary[\"hmeasure_arr\"][z]\n count += 1\n\n true_positive_rate_cutoff += result_dictionary[\"avg_true_positive_rate_with_prob_cutoff\"]\n true_negative_rate_cutoff += result_dictionary[\"avg_true_negative_rate_with_prob_cutoff\"]\n false_positive_rate_cutoff += result_dictionary[\"avg_false_positive_rate_with_prob_cutoff\"]\n false_negative_rate_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n unclassified_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n balanced_accuracy_arr += result_dictionary[\"balanced_accuracy_arr\"]\n hmeasure_arr += result_dictionary[\"hmeasure_arr\"]\n auc_arr += result_dictionary[\"auc_arr\"]\n brier_score_arr += result_dictionary[\"brier_score_arr\"]\n fit_time_arr += result_dictionary[\"fit_time_arr\"]\n mcc_arr += result_dictionary[\"mcc_arr\"]\n true_positive_arr += result_dictionary[\"true_positive_rate_arr\"]\n true_negative_arr += result_dictionary[\"true_negative_rate_arr\"]\n false_positive_arr += result_dictionary[\"false_positive_rate_arr\"]\n false_negative_arr += result_dictionary[\"false_negative_rate_arr\"]\n\n avg_run_results = [None] * 31\n avg_run_results[0] = matthews_correlation_coefficient / float(count)\n avg_run_results[1] = brier_score / float(count)\n avg_run_results[2] = overall_true_rate / float(count)\n avg_run_results[3] = true_positive_rate / float(count)\n avg_run_results[4] = true_negative_rate / float(count)\n avg_run_results[5] = false_positive_rate / float(count)\n avg_run_results[6] = false_negative_rate / float(count)\n avg_run_results[7] = true_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[8] = true_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[9] = false_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[10] = false_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[11] = unclassified_cutoff / float(len(self.errors))\n avg_run_results[12] = fit_time / float(count)\n avg_run_results[14] = balanced_accuracy_arr\n avg_run_results[15] = auc_score / float(count)\n avg_run_results[16] = auc_arr\n avg_run_results[17] = brier_score_arr\n avg_run_results[18] = fit_time_arr\n avg_run_results[19] = mcc_arr\n avg_run_results[13] = self.calculate_std_deviation(balanced_accuracy_arr)\n avg_run_results[20] = self.calculate_std_deviation(mcc_arr)\n avg_run_results[21] = self.calculate_std_deviation(brier_score_arr)\n avg_run_results[22] = self.calculate_std_deviation(auc_arr)\n avg_run_results[23] = self.calculate_std_deviation(fit_time_arr)\n avg_run_results[24] = self.calculate_std_deviation(true_positive_arr)\n avg_run_results[25] = self.calculate_std_deviation(true_negative_arr)\n avg_run_results[26] = self.calculate_std_deviation(false_positive_arr)\n avg_run_results[27] = self.calculate_std_deviation(false_negative_arr)\n avg_run_results[28] = hmeasure / float(count)\n avg_run_results[29] = self.calculate_std_deviation(hmeasure_arr)\n avg_run_results[30] = hmeasure_arr\n\n return avg_run_results",
"def computeClassAtt( y_true, y_pred, method=\"---\" ) :\n\n acc = round( accuracy_score( y_true, y_pred ), 2 )\n\n M = confusion_matrix( y_true, y_pred )\n print( \"Confusion Matrix for \", method, \":\\n\", M)\n\n GT_acc = round( M[1,1] / (M[1,0]+M[1,1]), 2 )\n nGT_acc = round( M[0,0] / (M[0,1]+M[0,0]),2 )\n\n GT_prec = round( precision_score( y_true, y_pred ),2 )\n nGT_prec = round( precision_score( y_true, y_pred, pos_label=0 ), 2 )\n\n return pd.DataFrame( {\"method\":method, \"overal_accuracy\":[acc], \"GT_accuracy\":[GT_acc], \"GT_precision\":[GT_prec],\n \"nonGT_accuracy\":[nGT_acc], \"nonGT_precision\":[nGT_prec] })",
"def avg_performance(env, policy):\n\n sum_reward = 0.\n episode = 100\n max_iteration = 6000\n for i in range(episode):\n done = False\n ob = env.reset()\n\n for j in range(max_iteration):\n a = policy[ob]\n ob, reward, done, _ = env.step(a)\n sum_reward += reward\n if done:\n break\n\n return sum_reward / i",
"def area_analysis(self, method='MEAN'):\n new_cubelist = []\n for cube in self.cubelist:\n new_cubelist.append(self.cube_area_analysis(cube, method))\n self.cubelist = iris.cube.CubeList(new_cubelist)\n self.processes.append('area_analysis')\n return self.cubelist",
"def algorithm_avg_time(n, score, algorithm, *args, **kwargs):\r\n algorithms = []\r\n for num in range(n):\r\n algorithms.append(algorithm(*args, **kwargs))\r\n\r\n prof = cProfile.Profile()\r\n for num in range(n):\r\n prof.runctx('algorithms[num].run_to_score(score)', globals(), locals())\r\n stats = pstats.Stats()\r\n stats.add(prof)\r\n return(stats)",
"def age_from_radius(R, method=None, **kwargs):\n\n if method in ['lin', 'log']:\n age = lin_reg_pred(R, method=method)\n\n elif method in ['estimate', 'TM99-0', 'TM99-simple']:\n age = physics_age(R, model=method, **kwargs)\n\n elif method in ['pheno', 'phenomenological']:\n age = pheno_age(R, **kwargs)\n\n else:\n raise ValueError(\"method={} not currently supported.\".format(method))\n\n return age",
"def set_algorithm(self, initmethod = 'pca', algtype = 'batch', neighborhoodmethod = 'gaussian', alfatype = 'inv', alfaini = .5, alfafinal = .005):\n self.initmethod = initmethod\n self.algtype = algtype\n self.alfaini = alfaini\n self.alfafinal = alfafinal\n self.neigh = neighborhoodmethod",
"def random_behavior(self, method=\"norm\"):\n if method==\"norm\":\n X = np.random.rand(self.N, self.Q, self.M)\n X = X / X.sum(axis=2).repeat(self.M).reshape(self.N, self.Q,\n self.M)\n elif method == \"diff\":\n X = np.random.rand(self.N, self.Q, self.M-1)\n X = np.concatenate((np.zeros((self.N, self.Q, 1)),\n np.sort(X, axis=-1),\n np.ones((self.N, self.Q, 1))), axis=-1)\n X = X[:, :, 1:] - X[:, :, :-1]\n return X",
"def area_analysis(self, method='MEAN'):\n self.cube = self.cube_area_analysis(self.cube, method)\n self.processes.append('area_analysis')\n return self.cube",
"def calculate(self):\n\n rating = 0\n\n props = ['aroma', 'appearance', 'taste', 'palate', 'bottle_style']\n for item in props:\n rating += getattr(self, item, 0)\n\n self.overall = (rating / self.total) / .2",
"def simulate(self, observation_matrix, method=\"smoother\"):\n if method == \"filter\":\n means = self.filtered_state_means\n covariances = self.filtered_state_covariances\n else:\n means = self.smoothed_state_means\n covariances = self.smoothed_state_covariances\n simulated_means = []\n simulated_variances = []\n for t, _ in enumerate(means):\n simulated_means.append(np.dot(observation_matrix, means[t]))\n var = np.diag(np.dot(observation_matrix,\n np.dot(covariances[t], observation_matrix.T)))\n # prevent variances to become less than 0\n simulated_variances.append(np.maximum(var, 0))\n return (simulated_means, simulated_variances)",
"def compute_cost(y, tx, w, method=\"mae\"):\n err = y - tx.dot(w)\n if method.lower() == \"mae\":\n cost_f = np.mean(np.abs(err))\n elif method.lower() == \"mse\":\n cost_f = np.mean(err**2)/2\n else:\n return NotImplementedError\n return cost_f",
"def run_several_iterations(iterations, means, horizon):\n\n # Initializing the results vector.\n results = [0]*horizon\n\n for iteration in range(iterations):\n\n # The current cumulative regret.\n results = np.add(results, run_sparring_algorithm(means[:, iteration], horizon))\n\n # Returning the average cumulative regret.\n return results/(iterations +.0)",
"def analyzeImage(path, res, method='cleantests', MoDirt='mo', \n Mask=0, autoMaskEdges=False, stdDir='standards/', verbose=False):\n img = fun.loadImg(path)\n MoDirt = fun.checkMoDirt(MoDirt)\n \n if Mask==0:\n mask = np.ones(img.shape)\n elif type(Mask)==np.ndarray and Mask.shape == img.shape:\n mask = Mask.copy()\n else:\n raise Exception\n # Uses my OLD maskEdges function to mask off the dark area around a foil if \n # specified. \n if autoMaskEdges:\n maskedImg, mask = fun.maskEdge(img)\n retData = {}\n \n # MOLYBDENUM ANALYSIS ======================================================\n if MoDirt == 'mo':\n # Method used by cleantests ––––––––––––––––––––––––––––––––––––\n if method.lower() in ['cleantests','smallfoils', 'cleantest']:\n (PtArea, \n FoilArea, \n MolyArea, \n MolyMass, \n threshed) = Monalysis(img, res,verbose=verbose)\n \n PercPt = 100*PtArea/FoilArea\n poster = fun.makePoster(img)\n \n # Method used by bigfoils –––––––––––––––––––––––––––––––––––––\n elif method.lower() in ['bigfoils','big','bigscans','no border']:\n stats, picts = ImgAnalysis(img, mask, res, MoDirt=MoDirt,returnSizes=False)\n (PtArea,\n FoilArea,\n PercPt) = stats\n MolyArea = FoilArea-PtArea\n MolyMass = MolyArea*.3*10.2 #moly mass in micrograms\n (threshed, poster) = picts\n \n # Method Used by Histogram Analysis (newmethod) ––––––––––––––––––––\n elif method.lower() in ['newmethod', 'histograms', 'histo', 'histogram']:\n stats, picts = analyzeByHisto (img, res, \n Mask=mask, verbose=verbose,\n MoDirt=MoDirt, returnPoster=True,\n returnData=False,returnSizes=False)\n (PtArea,\n PercPt,\n FoilArea) = stats\n \n MolyArea = FoilArea-PtArea\n MolyMass = MolyArea*.3*10.2 #moly mass in micrograms\n \n (threshed, poster) = picts\n \n # STANDARD ANALYSIS –––––––––––––––––––––––––––––––––––––––––––\n elif method.lower() in ['standards','standard','std','stds']:\n poster = fun.posterfy(img)\n imgName = os.path.splitext(os.path.split(path)[1])[0]\n PtMapPath = os.path.join(stdDir, 'all_plat/')+imgName+'.png'\n if os.path.exists(PtMapPath):\n threshed = fun.loadImg(PtMapPath)\n PtArea = meas.calcExposedPt(threshed, res, getAreaInSquaremm=True)\n PixFoil = np.sum(mask.astype(np.bool_))\n FoilArea = round(PixFoil*res*10**-6, 4)\n MolyArea = FoilArea-PtArea\n MolyMass = MolyArea*.3*10.2 #moly mass in micrograms\n if FoilArea == 0:\n PercPt = 0\n else:\n PercPt = round(float(PtArea)/float(FoilArea)*100,2)\n else:\n print \"Not a standard: \" + imgName\n print \" File path does not Exist: \" + PtMapPath\n retData = blankDataDict(MoDirt)\n threshed = blankImg(img.shape)\n return retData, (threshed, poster)\n \n # UNMATCHED METHOD ––––––––––––––––––––––––––––––––––––––––––––\n else:\n raise Exception(\"\"\"The specified method is not available: {0} \\n\n Method should be one of the following: \\n\n 'cleantests','bigfoils','histogram','standard'.\n \"\"\".format(str(method)))\n \n # Prepare Return Data Dictionary ---------------------------------------\n retData = {'Pt Area (mm^2)':round(PtArea,4),\n 'Foil Area (mm^2)':round(FoilArea,2),\n 'Moly Area (mm^2)':round(MolyArea,3),\n 'Mass Molybdenum (micrograms)':round(MolyMass,3),\n '% Exposed Pt':round(PercPt,3)}\n \n # DIRT ANALYSIS ============================================================\n elif MoDirt == 'dirt':\n \n # Method used by cleantests ––––––––––––––––––––––––––––––––––––\n if method.lower() in ['cleantests','smallfoils', 'cleantest','ct','foils']:\n (DirtNum,\n DirtArea,\n threshed,\n DirtSizes) = dirtnalysis (img, res, MaskEdges=True, retSizes=True)\n \n poster = fun.makePoster(img)\n \n # Method used by bigfoils –––––––––––––––––––––––––––––––––––––\n elif method.lower() in ['bigfoils','big','bigscans','no border']:\n stats, picts = ImgAnalysis(img, mask, res, \n MoDirt=MoDirt,returnSizes=True)\n (DirtNum,\n DirtArea,\n AreaFoil,\n Perc,\n DirtSizes) = stats\n \n (threshed, poster) = picts\n \n # Method Used by Histogram Analysis (newmethod) ––––––––––––––––––––\n elif method.lower() in ['newmethod', 'histograms', 'histo', 'histogram']:\n stats, picts = analyzeByHisto (img, res, \n Mask=mask, verbose=verbose,\n MoDirt=MoDirt, returnPoster=True,\n returnData=False,returnSizes=True)\n (DirtNum,\n DirtArea,\n DirtSizes,\n AreaFoil) = stats\n \n \n (threshed, poster) = picts\n \n # STANDARD ANALYSIS –––––––––––––––––––––––––––––––––––––––––––\n elif method.lower() in ['standards','standard','std','stds']:\n poster = fun.posterfy(img)\n imgName = os.path.splitext(os.path.split(path)[1])[0]\n DirtMapPath = os.path.join(stdDir, 'all_dirt/')+imgName+'.png'\n if os.path.exists(DirtMapPath):\n threshed = fun.loadImg(DirtMapPath)\n (DirtArea, \n DirtNum,\n DirtSizes,\n labeled) = meas.calcDirt(threshed,\n res, \n returnSizes=True,\n returnLabelled=True, \n getAreaInSquaremm=True) \n else:\n print \"Not a standard: \" + imgName\n print \" File path does not Exist: \" + DirtMapPath\n retData = blankDataDict(MoDirt)\n threshed = blankImg(img.shape)\n return retData, (threshed, poster)\n \n # UNMATCHED METHOD ––––––––––––––––––––––––––––––––––––––––––––\n else:\n raise Exception(\"\"\"The specified method is not available: {0} \\n\n Method should be one of the following: \\n\n 'cleantests','bigfoils','histogram','standard'.\n \"\"\".format(str(method)))\n \n # Prepare Return Data Dictionary ---------------------------------------\n (MeanSize, \n MaxSize, \n percOver100) = meas.getDirtSizeData(DirtSizes, res)\n \n retData = {'Dirt Count':DirtNum,\n 'Dirt Area (mm^2)':round(DirtArea, 5),\n 'Mean Particle Area (micron^2)':round(MeanSize,1),\n 'Max Particle Area (micron^2)':round(MaxSize,1),\n '% Dirt Particles over 100micron diameter':round(percOver100,3)}\n \n # Return results\n retPicts = (threshed,poster)\n \n return retData, retPicts",
"def sklearn(experiment, method, prediction_threshold=0.5, **kwargs):\n experiment['method'] = method\n experiment['prediction_threshold'] = prediction_threshold\n X_train = experiment['X_train']\n X_test = experiment['X_test']\n y_train = experiment['y_train']\n\n\n classifier = None\n if method == 0:\n # k-Nearest Neighbors\n classifier = KNeighborsClassifier(**kwargs)\n elif method == 1:\n # Logistic Regression\n classifier = LogisticRegression(**kwargs)\n elif method == 2:\n # Random Forest\n classifier = RandomForestClassifier(**kwargs)\n elif method == 3:\n # Support Vector Classifier\n classifier = SVC(kernel = 'rbf') # kernel = linear, poly, rbf, sigmoid\n elif method == 4:\n # Gaussian Naive Bayes\n classifier = GaussianNB(**kwargs)\n elif method == 5:\n # Decision Trees\n classifier = DecisionTreeClassifier(**kwargs)\n elif method == 6:\n # AdaBoost Classifier\n classifier = AdaBoostClassifier(**kwargs)\n elif method == 7:\n # Gradient Boosting Classifier\n classifier = GradientBoostingClassifier(**kwargs)\n elif method == 8:\n # Neural Network Classifier\n classifier = MLPClassifier(**kwargs)\n # classifier = MLPClassifier(hidden_layer_sizes=(10, 5))\n else:\n print('Invalid method!')\n\n classifier.fit(X_train, np.ravel(y_train))\n\n # output probability of prediction, use threshold to pick class\n y_train_probabilities = classifier.predict_proba(X_train)\n y_test_probabilities = classifier.predict_proba(X_test)\n\n\n y_test = experiment['y_test']\n\n FPR, TPR, prediction_threshold = roc_curve(y_test, y_test_probabilities[:, 1], pos_label=1)\n\n N_roc = np.shape(FPR)[0]\n best_d = 10\n best_i = 0\n d = np.ones((N_roc, 1))\n for i in range(N_roc):\n d[i] = np.sqrt((1 - TPR[i]) ** 2 + FPR[i] ** 2)\n if best_d > d[i]:\n best_d = d[i]\n best_i = i\n\n threshold = prediction_threshold[best_i]\n # auc2 = roc_auc_score(y_test, y_test_probabilities[:, 1])\n y_train_prediction = (y_train_probabilities[:, 1] >= threshold) * 1\n y_test_prediction = (y_test_probabilities[:, 1] >= threshold) * 1\n\n experiment['FPR'] = FPR\n experiment['TPR'] = TPR\n experiment['y_test_probabilities'] = y_test_probabilities\n experiment['y_train_probabilities'] = y_train_probabilities\n experiment['y_test_prediction'] = y_test_prediction\n experiment['y_train_prediction'] = y_train_prediction\n\n return experiment",
"def _action(self, method, profile, verbose=False):\n small_profile = self.profile()\n\n if verbose:\n print('Computing the kernel using the profile:')\n print(small_profile)\n\n algebra = self.base_ring()\n finite_algebra = algebra.__class__(algebra.prime(), profile=small_profile)\n\n fp_result = method(\n self.change_ring(finite_algebra),\n verbose=verbose)\n\n return fp_result.change_ring(self.base_ring())",
"def timecall(method, **kwargs):\r\n repeat = 1\r\n if 'repeat' in kwargs:\r\n repeat = kwargs['repeat']\r\n def wrapper(*args, **kwargs):\r\n durations = []\r\n for iteration in range(1, repeat + 1):\r\n start = time.time()\r\n result = method(*args, **kwargs)\r\n durations.append(time.time() - start)\r\n print(\"{:d}/{:d}: {:.2f}s\".format(iteration, repeat, durations[-1]))\r\n average = sum(durations) / len(durations)\r\n print(\"Average: {:.2f}s\\n\".format(average))\r\n duration = average\r\n return (duration, result)\r\n return wrapper",
"def __init__(self, \n method='mean',\n columns_to_impute='all',\n keep_dummies=True,\n impute_inf=True,\n rows_to_scan='all'):\n self.columns_to_impute = columns_to_impute\n self.rows_to_scan = rows_to_scan\n self.impute_inf = impute_inf\n self.keep_dummies = keep_dummies\n self.method = method"
] | [
"0.62602246",
"0.5744438",
"0.531374",
"0.5277215",
"0.50603354",
"0.50047123",
"0.49545625",
"0.491439",
"0.4823283",
"0.48020002",
"0.47525263",
"0.47253197",
"0.47229403",
"0.46382526",
"0.46355662",
"0.4616571",
"0.45886013",
"0.45580858",
"0.45328122",
"0.45287707",
"0.45149532",
"0.4506039",
"0.4500184",
"0.44843686",
"0.4482053",
"0.4479755",
"0.4453453",
"0.4443456",
"0.44398433",
"0.442208"
] | 0.6362825 | 0 |
estimates the average activity of each receptor. `ret_correlations` determines whether the correlations between receptors are returned in addition to the mean activations. `approx_prob` determines whether the probabilities of encountering substrates in mixtures are calculated exactly or only approximative, which should work for small probabilities. `clip` determines whether the estimates will be forced to be in [0, 1]. | def receptor_activity_estimate(self, ret_correlations=False,
approx_prob=False, clip=False):
S_ni = self.sens_mat
p_i = self.substrate_probabilities
# calculate receptor activity assuming uncorrelated mixtures
if approx_prob:
# approximate calculation for small p_i
r_n = np.dot(S_ni, p_i)
if clip:
np.clip(r_n, 0, 1, r_n)
else:
# proper calculation of the probabilities
r_n = np.zeros(self.Nr)
S_ni_mask = S_ni.astype(np.bool)
for n in range(self.Nr):
r_n[n] = 1 - np.product(1 - p_i[S_ni_mask[n, :]])
if self.is_correlated_mixture:
# add linear correction term for correlated mixtures
J_ij = self.correlations
p_ni = p_i[None, :] * (1 - S_ni)
corr1 = 1 + np.einsum('ij,ni,nj->n', J_ij, p_ni, p_ni)
corr2 = 1 + np.einsum('ij,i,j->', J_ij, p_i, p_i)
barr_n_0 = 1 - r_n
barr_n = barr_n_0 * (1 + corr1 - corr2)
r_n = 1 - barr_n
if clip:
np.clip(r_n, 0, 1, r_n)
if ret_correlations:
# estimate the correlations from the estimated crosstalk
q_nm = self.receptor_crosstalk_estimate(approx_prob=approx_prob,
ignore_correlations=True)
if approx_prob:
r_nm = np.outer(r_n, r_n) + q_nm
else:
r_nm = 1 - (1 - q_nm)*(1 - np.outer(r_n, r_n))
if self.is_correlated_mixture:
# add one correction term for correlated mixtures
p_nmi = np.einsum('i,ni,mi->nmi', p_i, 1 - S_ni, 1 - S_ni)
corr1 = 1 + np.einsum('ij,nmi,nmj->nm', J_ij, p_nmi, p_nmi)
# corr2 = 1 + np.einsum('ij,i,j->', J_ij, p_i, p_i)
# this term has already been calculated above and can be reused
# convert r_nm_0 (here given as r_nm) into barr_nm_0
barr_nm_0 = 1 - barr_n_0[:, None] - barr_n_0[None, :] + r_nm
# correct barr_nm for the correlations J_ij
barr_nm = barr_nm_0 * (1 + corr1 - corr2)
# convert barr_nm into r_nm
r_nm = 1 - barr_n[:, None] - barr_n[None, :] + barr_nm
if clip:
np.clip(r_nm, 0, 1, r_nm)
return r_n, r_nm
else:
return r_n | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def receptor_crosstalk_estimate(self, ret_receptor_activity=False,\n approx_prob=False, clip=False,\n ignore_correlations=False):\n if not ignore_correlations and self.is_correlated_mixture:\n r_n, r_nm = self.receptor_activity_estimate(ret_correlations=True,\n approx_prob=approx_prob,\n clip=clip)\n q_nm = r_nm - np.outer(r_n, r_n)\n if clip:\n np.clip(q_nm, 0, 1, q_nm)\n if ret_receptor_activity:\n return r_n, q_nm\n else:\n return q_nm\n raise NotImplementedError('Not implemented for correlated mixtures')\n\n S_ni = self.sens_mat\n p_i = self.substrate_probabilities\n \n if approx_prob:\n # approximate calculation for small p_i\n q_nm = np.einsum('ni,mi,i->nm', S_ni, S_ni, p_i)\n if clip:\n np.clip(q_nm, 0, 1, q_nm)\n \n else:\n # proper calculation of the probabilities\n S_ni_mask = S_ni.astype(np.bool)\n q_nm = np.zeros((self.Nr, self.Nr))\n for n in range(self.Nr):\n for m in range(self.Nr):\n mask = S_ni_mask[n, :] * S_ni_mask[m, :]\n q_nm[n, m] = 1 - np.product(1 - p_i[mask])\n \n \n if ret_receptor_activity:\n q_n = self.receptor_activity_estimate(approx_prob=approx_prob,\n clip=clip)\n return q_n, q_nm\n else:\n return q_nm",
"def computeCorr(pred_act,responses):\n\n num_pres,num_neurons = np.shape(responses)\n corr=np.zeros(num_neurons)\n \n for i in xrange(0,num_neurons):\n if np.all(pred_act[:,i]==0) & np.all(responses[:,i]==0):\n corr[i]=1.\n elif not(np.all(pred_act[:,i]==0) | np.all(responses[:,i]==0)):\n # /!\\ To prevent errors due to very low values during computation of correlation\n if abs(pred_act[:,i]).max()<1:\n pred_act[:,i]=pred_act[:,i]/abs(pred_act[:,i]).max()\n if abs(responses[:,i]).max()<1:\n responses[:,i]=responses[:,i]/abs(responses[:,i]).max() \n corr[i]=pearsonr(np.array(responses)[:,i].flatten(),np.array(pred_act)[:,i].flatten())[0]\n \n return corr",
"def corr(actual: np.ndarray, predicted: np.ndarray):\n avg_m = np.mean(predicted)\n avg_o = np.mean(actual)\n\n diff_a = actual - avg_o\n diff_p = predicted - avg_m\n\n numerator = np.dot(diff_a, diff_p)\n denominator = np.sqrt(np.sum(np.square(diff_a)) * np.sum(np.square(diff_p)))\n\n return np.mean(np.divide(numerator, denominator))",
"def approx_interactions(index, shap_values, X):\n\n if X.shape[0] > 10000:\n a = np.arange(X.shape[0])\n np.random.shuffle(a)\n inds = a[:10000]\n else:\n inds = np.arange(X.shape[0])\n\n x = X[inds, index]\n srt = np.argsort(x)\n shap_ref = shap_values[inds, index]\n shap_ref = shap_ref[srt]\n inc = max(min(int(len(x) / 10.0), 50), 1)\n interactions = []\n for i in range(X.shape[1]):\n val_other = X[inds, i][srt].astype(np.float)\n v = 0.0\n if not (i == index or np.sum(np.abs(val_other)) < 1e-8):\n for j in range(0, len(x), inc):\n if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0:\n v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1])\n interactions.append(v)\n\n return np.argsort(-np.abs(interactions))",
"def get_average_percision_(qres, ibs=None, gt_aids=None):\n recall_range_, p_interp_curve = get_interpolated_precision_vs_recall_(qres, ibs=ibs, gt_aids=gt_aids)\n\n if recall_range_ is None:\n ave_p = np.nan\n else:\n ave_p = p_interp_curve.sum() / p_interp_curve.size\n\n return ave_p",
"def auto_correlation_batch_means(target_distribution, x0, xs, accepteds, batch_count=4):\n n = len(xs)\n batch_size = int(np.round(n / batch_count))\n samples = xs[0:(batch_count * batch_size)]\n batch_samples = np.reshape(samples, (batch_count, -1) + samples.shape[1:])\n var_batch_means = np.var(np.mean(batch_samples, axis=1), axis=0)\n var = np.var(samples, axis=0)\n acorr_times = batch_size * var_batch_means / var\n acorr_times[np.isclose(var, 0.0)] = batch_size\n return -np.mean(acorr_times)",
"def receptor_activity_brute_force(self, ret_correlations=False):\n S_ni = self.sens_mat\n Z = 0\n r_n = np.zeros(self.Nr)\n if ret_correlations:\n r_nm = np.zeros((self.Nr, self.Nr))\n \n # iterate over all mixtures\n for c, prob_c in self._iterate_mixtures():\n # get the activity vector associated with m\n a_n = (np.dot(S_ni, c) >= 1)\n Z += prob_c\n\n r_n[a_n] += prob_c\n if ret_correlations:\n r_nm[np.outer(a_n, a_n)] += prob_c\n \n # return the normalized output\n r_n /= Z\n if ret_correlations:\n r_nm /= Z\n return r_n, r_nm\n else:\n return r_n",
"def get_average_repro(self):\n return np.mean([agent.get_fledge_probability() for agent in self.agents])",
"def approx_interactions(X, shap_values, index):\n if X.shape[0] > 10000:\n a = np.arange(X.shape[0])\n np.random.shuffle(a)\n inds = a[:10000]\n else:\n inds = np.arange(X.shape[0])\n\n x = X[inds,index]\n srt = np.argsort(x)\n shap_ref = shap_values[inds,index]\n shap_ref = shap_ref[srt]\n inc = min(int(len(x)/10.0), 50)\n interactions = []\n for i in range(X.shape[1]):\n val_other = X[inds,i][srt]\n\n if i == index or np.sum(np.abs(val_other)) < 1e-8:\n v = 0\n else:\n v = np.sum(np.abs([np.corrcoef(shap_ref[i:i+inc],val_other[i:i+inc])[0,1] for i in range(0,len(x),inc)]))\n interactions.append(v)\n\n return np.argsort(-np.abs(interactions))",
"def reprojection_error_mean(*args, **kwargs):\n return np.mean(reprojection_error_vector(*args, **kwargs))",
"def test_corr_transform_performance(meta_cres, corr, signal_masks, simulatedata_cbma):\n _, (ground_truth_foci, _) = simulatedata_cbma\n mask = meta_cres.masker.mask_img\n ground_truth_foci_ijks = [tuple(mm2vox(focus, mask.affine)) for focus in ground_truth_foci]\n sig_idx, nonsig_idx = [\n meta_cres.masker.transform(img).astype(bool).squeeze() for img in signal_masks\n ]\n\n p_array = meta_cres.maps.get(\"p\")\n if p_array is None or corr.method == \"montecarlo\":\n p_array = 10 ** -meta_cres.maps.get(\"logp_level-voxel_corr-FWE_method-montecarlo\")\n\n n_iters = corr.parameters.get(\"n_iters\")\n\n # ALE with MKDA kernel with montecarlo correction\n # combination gives poor performance\n if (\n isinstance(meta_cres.estimator, ale.ALE)\n and isinstance(meta_cres.estimator.kernel_transformer, kernel.MKDAKernel)\n and meta_cres.estimator.get_params().get(\"null_method\") == \"approximate\"\n and corr.method != \"montecarlo\"\n ):\n good_sensitivity = True\n good_specificity = False\n elif (\n isinstance(meta_cres.estimator, ale.ALE)\n and isinstance(meta_cres.estimator.kernel_transformer, kernel.MKDAKernel)\n and \"montecarlo\" in meta_cres.estimator.get_params().get(\"null_method\")\n ):\n good_sensitivity = False\n good_specificity = True\n elif (\n isinstance(meta_cres.estimator, ale.ALE)\n and isinstance(meta_cres.estimator.kernel_transformer, kernel.MKDAKernel)\n and meta_cres.estimator.get_params().get(\"null_method\") == \"approximate\"\n and corr.method == \"montecarlo\"\n ):\n good_sensitivity = False\n good_specificity = True\n elif (\n isinstance(meta_cres.estimator, ale.ALE)\n and type(meta_cres.estimator.kernel_transformer) == kernel.KDAKernel\n and (\n \"montecarlo\" in meta_cres.estimator.get_params().get(\"null_method\")\n or (\n meta_cres.estimator.get_params().get(\"null_method\") == \"approximate\"\n and corr.method == \"montecarlo\"\n )\n )\n ):\n good_sensitivity = False\n good_specificity = True\n elif (\n isinstance(meta_cres.estimator, ale.ALE)\n and type(meta_cres.estimator.kernel_transformer) == kernel.KDAKernel\n and meta_cres.estimator.get_params().get(\"null_method\") == \"approximate\"\n ):\n good_sensitivity = True\n good_specificity = False\n elif (\n isinstance(meta_cres.estimator, mkda.MKDADensity)\n and isinstance(meta_cres.estimator.kernel_transformer, kernel.ALEKernel)\n and meta_cres.estimator.get_params().get(\"null_method\") != \"reduced_montecarlo\"\n and corr.method != \"montecarlo\"\n ):\n good_sensitivity = False\n good_specificity = True\n else:\n good_sensitivity = True\n good_specificity = True\n\n _check_p_values(\n p_array,\n meta_cres.masker,\n sig_idx,\n nonsig_idx,\n ALPHA,\n ground_truth_foci_ijks,\n n_iters=n_iters,\n good_sensitivity=good_sensitivity,\n good_specificity=good_specificity,\n )",
"def test_preds_average():\n pred_1 = np.array([[0.1, 0.3, 0.1, 0.5], [0.9, 0.05, 0.025, 0.025]])\n pred_2 = np.array([[0.6, 0.1, 0.2, 0.1], [0.8, 0.1, 0.05, 0.05]])\n av = preds_average([pred_1, pred_2], [0.9, 0.1])\n assert (av == np.array([3, 0])).all()",
"def anomaly_correlation(y_true, y_pred, mean=0., regularize_mean='mse', reverse=True):\n if regularize_mean is not None:\n assert regularize_mean in ['global', 'spatial', 'mse', 'mae']\n a = (K.mean(y_pred * y_true)\n / K.sqrt(K.mean(K.square(y_pred)) * K.mean(K.square(y_true))))\n if regularize_mean is not None:\n if regularize_mean == 'global':\n m = K.abs((K.mean(y_true) - K.mean(y_pred)) / K.mean(y_true))\n elif regularize_mean == 'spatial':\n m = K.mean(K.abs((K.mean(y_true, axis=[-2, -1]) - K.mean(y_pred, axis=[-2, -1]))\n / K.mean(y_true, axis=[-2, -1])))\n elif regularize_mean == 'mse':\n m = mean_squared_error(y_true, y_pred)\n elif regularize_mean == 'mae':\n m = mean_absolute_error(y_true, y_pred)\n if reverse:\n if regularize_mean is not None:\n return m - a\n else:\n return -a\n else:\n if regularize_mean:\n return a - m\n else:\n return a",
"def mch_approximation( samples, dlamda ):\n dE = calc_e(samples,dlamda)\n dE -= dE.min()\n ZFraction = 1. / np.mean(np.exp(-dE))\n predsisj = pair_corr( samples, weights=np.exp(-dE)/len(dE) )[1] * ZFraction \n assert not (np.any(predsisj<-1.00000001) or\n np.any(predsisj>1.000000001)),\"Predicted values are beyond limits, (%1.6f,%1.6f)\"%(predsisj.min(),\n predsisj.max())\n return predsisj",
"def mse_and_corr(targets, preds, targets_len):\n mse_list = []\n corr_list = []\n for i in range(targets.shape[1]):\n len_i = targets_len[i]\n test_data_i = targets[:len_i,i,:]\n pred_i = preds[:len_i,i,:]\n mse_list.append(np.mean((test_data_i-pred_i)**2))\n corr_list.append(np.corrcoef(test_data_i.flatten(), pred_i.flatten())[0,1])\n tot_mse = np.mean(mse_list)\n tot_corr = np.mean(corr_list)\n \n return tot_mse, tot_corr",
"def mch_approximation(samples, dlamda):\n dE = calc_e(samples, dlamda)\n ZFraction = len(dE) / np.exp(logsumexp(-dE))\n predsisj = pair_corr(samples, weights=np.exp(-dE)/len(dE), concat=True) * ZFraction \n assert not (np.any(predsisj < -1.00000001) or\n np.any(predsisj>1.000000001)),\"Predicted values are beyond limits, (%1.6f,%1.6f)\"%(predsisj.min(),\n predsisj.max())\n return predsisj",
"def optimize_g_mean(self):\n g_means = []\n fpr, tpr, thresholds = metrics.roc_curve(self.target, self.prediction, pos_label=1)\n roc_auc = metrics.auc(fpr, tpr)\n for i in range(len(fpr)):\n g_means.append(sqrt(tpr[i] * (1 - fpr[i])))\n plt.figure()\n idx = argmax(g_means)\n lw = 2\n print('Best Threshold=%f, G-Mean=%.3f' % (thresholds[idx], g_means[idx]))\n plt.plot(fpr, tpr, color='darkorange', lw=lw, label='Curva ROC (area ={0:.2f})'.format(roc_auc))\n plt.scatter(fpr[idx], tpr[idx], marker='o', color='black', label='Melhor Resultado')\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('Especificidade')\n plt.ylabel('Sensibilidade')\n plt.title('Curva ROC')\n plt.legend(loc=\"lower right\")\n plt.show()\n matplotlib.use(\"pgf\")\n matplotlib.rcParams.update({\n \"pgf.texsystem\": \"pdflatex\",\n 'font.family': 'serif',\n 'text.usetex': True,\n 'pgf.rcfonts': False,\n })\n plt.savefig('ROC_Curve2.pgf')\n self.threshold = thresholds[idx]\n self.set_variables()\n self.eval()\n return self",
"def scorr(actual: np.ndarray, predicted: np.ndarray):\n scorr, p_val = sp.stats.spearmanr(actual, predicted)\n return scorr, p_val",
"def compute_ap(gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold):\n # Get matches and overlaps\n gt_match, pred_match, overlaps = compute_matches(\n gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold)\n\n # Compute precision and recall at each prediction box step\n precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1)\n recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match)\n\n # Pad with start and end values to simplify the math\n precisions = np.concatenate([[0], precisions, [0]])\n recalls = np.concatenate([[0], recalls, [1]])\n\n # Ensure precision values decrease but don't increase. This way, the\n # precision value at each recall threshold is the maximum it can be\n # for all following recall thresholds, as specified by the VOC paper.\n for i in range(len(precisions) - 2, -1, -1):\n precisions[i] = np.maximum(precisions[i], precisions[i + 1])\n\n # Compute mean AP over recall range\n indices = np.where(recalls[:-1] != recalls[1:])[0] + 1\n mAP = np.sum((recalls[indices] - recalls[indices - 1]) *\n precisions[indices])\n\n return mAP, precisions, recalls, overlaps",
"def calculate_mean_average_precision(precisions, recalls):\n # Calculate the mean average precision given these recall levels.\n # DO NOT CHANGE. If you change this, the tests will not pass when we run \n # the final evaluation\n recall_levels = np.linspace(0, 1.0, 11)\n interpolated_precisions = np.zeros(shape=recall_levels.shape)\n\n # YOUR CODE HERE\n for i, recall_level in enumerate(recall_levels):\n valid_idxs = np.argwhere(recalls >= recall_level)\n filtered_precisions = precisions[valid_idxs[:,0]]\n\n if len(filtered_precisions) > 0:\n interpolated_precisions[i] = np.amax(filtered_precisions)\n \n mean_average_precision = np.mean(interpolated_precisions)\n\n return mean_average_precision",
"def base_corr(self, cutoff=0.3, show=0):\n\tn = self.data_points\n\tlast_points = int(cutoff*n)\n\tfor i in range(2):\n\t self.the_result.y[i] = self.the_result.y[i] - self.the_result.y[i][:-last_points].mean()\n\tif show == 1 :\n\t return self.the_result\n\treturn self",
"def autocorr(x, **kwargs):\n\t# do same computation as autocovariance,\n\t# but without subtracting the mean\n\tkwargs[ 'debias' ] = False\n\treturn autocov(x, **kwargs)",
"def plot_mean_roc_curve_of_classifiers(classifier_roc_list, data_set_description):\n if const.RECORD_RESULTS is True:\n fig = plt.figure(figsize=(8, 6.66))\n monochrome = (cycler(\"color\", [\"k\"]) * cycler(\"marker\", [\"\"]) *\n cycler(\"linestyle\", [\"-\", \"--\", \"-.\"]))\n color_arr = [\"#64B3DE\", \"#1f78b4\", \"#6ABF20\", \"#FBAC44\", \"#bc1659\", \"#B9B914\", \"#33a02c\", \"#ff7f00\", \"#6a3d9a\", \"black\", \"#b15928\", \"#e31a1c\"]\n plt.rc(\"axes\", prop_cycle=monochrome)\n line_style_index = 0\n color_index = 0\n\n for (test_run_roc_list, classifier_description) in classifier_roc_list:\n if not (None, None) in test_run_roc_list[0]:\n mean_tpr = 0.0\n mean_fpr = np.linspace(0, 1, 100)\n count = 0\n for roc_list in test_run_roc_list:\n for (tpr, fpr) in roc_list:\n mean_tpr += interp(mean_fpr, fpr, tpr)\n mean_tpr[0] = 0.0\n count += 1\n\n mean_tpr /= float(count)\n mean_tpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n line_width = 0.5\n if line_style_index == 1:\n line_width = 0.8\n elif line_style_index == 2:\n line_width = 1.5\n\n plt.plot(mean_fpr, mean_tpr, c=color_arr[color_index], lw=line_width, alpha=1, label=\"{0} ({1:.3f})\".format(classifier_description, mean_auc))\n line_style_index = (line_style_index + 1) % 3\n color_index += 1\n\n plt.locator_params(axis='x', nbins=10)\n plt.locator_params(axis='y', nbins=10)\n plt.plot([0, 1], [0, 1], \"k--\", label=\"Random classification\", lw=0.8)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.title(\"ROC curve for each classifier\")\n plt.legend(loc=\"lower right\", fancybox=True, frameon=True)\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/{0}_roc_classifier_plot_{1}.png\".format(data_set_description, current_time), bbox_inches=\"tight\")\n plt.close(fig)",
"def normalize_estimates(est_np, mix_np):\n mix_max = np.max(np.abs(mix_np))\n return np.stack([est * mix_max / np.max(np.abs(est)) for est in est_np])",
"def test_average_cont(mock_visibility_data_cont):\n uu, vv, weight, data_re, data_im = mock_visibility_data_cont\n\n averager = gridding.DataAverager.from_image_properties(\n cell_size=0.005,\n npix=800,\n uu=uu,\n vv=vv,\n weight=weight,\n data_re=data_re,\n data_im=data_im,\n )\n\n print(averager.uu.shape)\n print(averager.nchan)\n\n averager._grid_visibilities()",
"def tcorr_main(log, subject, segments, tcorrsffx):\n os.chdir(os.path.join(os.environ['decor'], subject, '6mmblur_results'))\n for seg in segments:\n # This is for the AV correlations\n epi1 = '{}_AV.1_{}_splicy+orig'.format(seg, subject)\n epi2 = '{}_AV.2_{}_splicy+orig'.format(seg, subject)\n pref = '{}_AV_{}_{}'.format(seg, subject, tcorrsffx)\n afni_tcorr(log, pref, epi1, epi2)\n\n # This is to get low level visual and auditory correlations\n epi1 = '{}_V_{}_splicy+orig'.format(seg, subject)\n epi2 = '{}_A_{}_splicy+orig'.format(seg, subject)\n pref = '{}_lowlev_{}_{}'.format(seg, subject, tcorrsffx)\n afni_tcorr(log, pref, epi1, epi2)\n\n for m in ('V', 'A'):\n # These are for the V vs AV, A vs AV correlations\n for i in range(1, 3):\n epi1 = '{}_{}_{}_splicy+orig'.format(seg, m, subject)\n epi2 = '{}_AV.{}_{}_splicy+orig.'.format(seg, i, subject)\n pref = '{}_{}.{}_{}_{}'.format(seg, m, i, subject, tcorrsffx)\n afni_tcorr(log, pref, epi1, epi2)\n\n epis = []\n for i in range(1, 3):\n epis.append('{}_{}.{}_{}_{}+orig'.format(\n seg, m, i, subject, tcorrsffx))\n epi_list = ' '.join(epis)\n pref = '{}_{}_{}_{}+orig'.format(seg, m, subject, tcorrsffx)\n mean_res(log, pref, epi_list)\n\n \"\"\"Below revises prior flawed version.\n Had used same epis list as prior call, so\n was including erroneous segments together.\n \"\"\"\n episcond = []\n for m in ['AV', 'A', 'V', 'lowlev']:\n for seg in segments:\n episcond.append('{}_{}_{}_{}+orig'.format(\n seg, m, subject, tcorrsffx))\n epilist = ' '.join(episcond)\n pref = '{}_{}_{}_mean'.format(m, subject, tcorrsffx)\n mean_res(log, pref, epilist)",
"def autocorr(x, **kwargs):\r\n # do same computation as autocovariance,\r\n # but without subtracting the mean\r\n kwargs['debias'] = False\r\n return autocov(x, **kwargs)",
"def anomaly_correlation_loss(mean=None, regularize_mean='mse', reverse=True):\n if mean is not None:\n assert len(mean.shape) > 1\n assert mean.shape[0] == 1\n mean_tensor = K.variable(mean, name='anomaly_correlation_mean')\n\n if regularize_mean is not None:\n assert regularize_mean in ['global', 'spatial', 'mse', 'mae']\n reverse = True\n\n def acc_loss(y_true, y_pred):\n if mean is not None:\n a = (K.mean((y_pred - mean_tensor) * (y_true - mean_tensor))\n / K.sqrt(K.mean(K.square((y_pred - mean_tensor))) * K.mean(K.square((y_true - mean_tensor)))))\n else:\n a = (K.mean(y_pred * y_true)\n / K.sqrt(K.mean(K.square(y_pred)) * K.mean(K.square(y_true))))\n if regularize_mean is not None:\n if regularize_mean == 'global':\n m = K.abs((K.mean(y_true) - K.mean(y_pred)) / K.mean(y_true))\n elif regularize_mean == 'spatial':\n m = K.mean(K.abs((K.mean(y_true, axis=[-2, -1]) - K.mean(y_pred, axis=[-2, -1]))\n / K.mean(y_true, axis=[-2, -1])))\n elif regularize_mean == 'mse':\n m = mean_squared_error(y_true, y_pred)\n elif regularize_mean == 'mae':\n m = mean_absolute_error(y_true, y_pred)\n if reverse:\n if regularize_mean is not None:\n return m - a\n else:\n return -a\n else:\n if regularize_mean:\n return a - m\n else:\n return a\n\n return acc_loss",
"def estimate_advantages(rewards, masks, values, gamma, tau, device):\n\n #rewards, masks, values = to_device(torch.device('cpu'), rewards, masks, values)\n tensor_type = type(rewards)\n deltas = tensor_type(rewards.size(0), 1)\n advantages = tensor_type(rewards.size(0), 1)\n\n prev_value = 0\n prev_advantage = 0\n for i in reversed(range(rewards.size(0))):\n deltas[i] = rewards[i] + gamma * prev_value * masks[i] - values[i] # at the end of every episode m=0 so we're\n advantages[i] = deltas[i] + gamma * tau * prev_advantage * masks[i] # computing from there backwards each time\n prev_value = values[i, 0]\n prev_advantage = advantages[i, 0]\n\n returns = values + advantages\n advantages = (advantages - advantages.mean()) / advantages.std()\n\n advantages, returns = to_device(device, advantages, returns)\n return advantages, returns",
"def calculate_accuracy(targets, preds):\n intersection_foreground = targets * preds\n intersection_background = np.invert(targets) * np.invert(preds)\n\n acc_foreground = float(np.sum(intersection_foreground)) \\\n / (float(np.sum(targets)) + 1e-7)\n acc_background = float(np.sum(intersection_background)) \\\n / (float(np.sum(np.invert(targets))) + 1e-7)\n return (acc_foreground + acc_background) / 2"
] | [
"0.59837115",
"0.57311416",
"0.5284991",
"0.5281454",
"0.5232522",
"0.5206169",
"0.5147281",
"0.5128546",
"0.50398403",
"0.50333416",
"0.4890107",
"0.4862184",
"0.48607644",
"0.48548672",
"0.4842955",
"0.48234457",
"0.47857088",
"0.47831595",
"0.47778708",
"0.47652474",
"0.47633767",
"0.47545937",
"0.47458452",
"0.4720465",
"0.47039443",
"0.46941856",
"0.46862087",
"0.46497893",
"0.46441296",
"0.46305504"
] | 0.7158738 | 0 |
calculate the mutual information. `excitation_method` can be ['brute_force', 'monte_carlo', 'estimate', 'auto'] If it is 'auto' than the excitation_method is chosen automatically based on the problem size. `ret_prob_activity` determines whether the probabilities of the different outputs are returned or not | def mutual_information(self, excitation_method='auto', **kwargs):
if excitation_method == 'auto':
if self.Ns <= self.parameters['brute_force_threshold_Ns']:
excitation_method = 'brute_force'
else:
excitation_method = 'monte_carlo'
if excitation_method == 'brute_force' or excitation_method == 'brute-force':
return self.mutual_information_brute_force(**kwargs)
elif excitation_method == 'monte_carlo' or excitation_method == 'monte-carlo':
return self.mutual_information_monte_carlo(**kwargs)
elif excitation_method == 'estimate':
return self.mutual_information_estimate(**kwargs)
else:
raise ValueError('Unknown excitation_method `%s`.' % excitation_method) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mutual_information_brute_force(self, ret_prob_activity=False):\n base = 2 ** np.arange(0, self.Nr)\n\n # prob_a contains the probability of finding activity a as an output.\n prob_a = np.zeros(2**self.Nr)\n for c, prob_c in self._iterate_mixtures():\n # get the associated output ...\n a = np.dot(self.sens_mat, c).astype(np.bool)\n # ... and represent it as a single integer\n a = np.dot(base, a)\n\n prob_a[a] += prob_c\n \n # normalize the output to make it a probability distribution\n prob_a /= prob_a.sum()\n \n # calculate the mutual information\n MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0)\n \n if ret_prob_activity:\n return MI, prob_a\n else:\n return MI",
"def mutual_information_monte_carlo_extrapolate(self, ret_prob_activity=False):\n if self.is_correlated_mixture:\n raise NotImplementedError('Not implemented for correlated mixtures')\n \n base = 2 ** np.arange(0, self.Nr)\n prob_s = self.substrate_probabilities\n\n max_steps = self._sample_steps\n steps, MIs = [], []\n\n # sample mixtures according to the probabilities of finding\n # substrates\n count_a = np.zeros(2**self.Nr)\n step_check = 10000\n for step in range(max_steps):\n # choose a mixture vector according to substrate probabilities\n m = (np.random.random(self.Ns) < prob_s)\n \n # get the associated output ...\n a = np.dot(self.sens_mat, m).astype(np.bool)\n # ... and represent it as a single integer\n a = np.dot(base, a)\n # increment counter for this output\n count_a[a] += 1\n\n if step == step_check - 1:\n # do an extrapolation step\n # calculate the mutual information from the result pattern\n prob_a = count_a / step\n MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0)\n \n # save the data \n steps.append(step)\n MIs.append(MI)\n \n # do the extrapolation\n if len(steps) >= 3:\n a2, a1, a0 = MIs[-3:]\n MI_ext = (a0*a2 - a1*a1)/(a0 - 2*a1 + a2)\n# MI_ext = self._get_extrapolated_mutual_information(steps, MIs)\n print((step, MIs[-1], MI_ext))\n \n step_check += 10000\n \n else:\n # count_a contains the number of times output pattern a was observed.\n # We can thus construct P_a(a) from count_a. \n \n # calculate the mutual information from the result pattern\n prob_a = count_a / step\n MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0)\n\n if ret_prob_activity:\n return MI, prob_a\n else:\n return MI",
"def mutual_information_estimate(self, approx_prob=False):\n \n # this might be not the right approach\n q_n = self.receptor_activity_estimate(approx_prob=approx_prob)\n q_nm = self.receptor_crosstalk_estimate(approx_prob=approx_prob)\n \n # calculate the approximate mutual information\n return self._estimate_MI_from_q_values(q_n, q_nm)",
"def mutual_information(co_freq, s_freq, t_freq, total_instances, mitype=None):\n if co_freq > 0:\n if mitype is not None:\n if mitype == \"expected\":\n mi = math.log2(\n (total_instances * co_freq) / (s_freq * t_freq)\n ) * (co_freq / total_instances)\n elif mitype == \"normalized\":\n alpha = - math.log2(co_freq / total_instances)\n mi = (\n (math.log2(\n (total_instances * co_freq) / (s_freq * t_freq)) / alpha)\n if alpha != 0 else 0\n )\n elif mitype == \"pmi2\":\n mi = math.log2((co_freq ** 2) / (s_freq * t_freq))\n elif mitype == \"pmi3\":\n mi = math.log2(\n (co_freq ** 3) / (s_freq * t_freq * total_instances))\n else:\n raise ValueError(\n \"Provided Mutual information score type (mitype) is not \"\n \"supported. Provide one value from the following list \"\n \"['expected', 'normalized','pmi2', 'pmi3'] \")\n else:\n mi = math.log2((total_instances * co_freq) / (s_freq * t_freq))\n else:\n mi = 0\n return mi if mi > 0 else 0",
"def mutual_information(pi, pj, pij):\n p_i = 1 - pi\n p_j = 1 - pj\n p_ij = pj - pij\n pi_j = pi - pij\n p_i_j = 1 - pi - pj + pij\n \n log_pi = log(pi)\n log_pj = log(pj)\n log_p_i = log(p_i)\n log_p_j = log(p_j)\n \n mi = pij * (log(pij) - log_pi - log_pj) + \\\n pi_j * (log(pi_j) - log_pi - log_p_j) + \\\n p_i_j * (log(p_i_j) - log_p_i - log_p_j)\n if p_ij != 0: # For language groups and features, this is the only probability that could be zero, and lim_x->0[x*log(x)] = 0 \n mi += p_ij * (log(p_ij) - log_p_i - log_pj)\n \n return mi",
"def _expectation_maximization(self, y, responsibility=None, **kwargs): \n\n # Calculate log-likelihood and initial expectation step.\n __init_responsibility, ll, dl = self._expectation(y, **kwargs)\n if responsibility is None:\n responsibility = __init_responsibility\n\n ll_dl = [(ll.sum(), dl)]\n\n meta = dict(warnflag=False)\n for iteration in range(self.max_em_iterations):\n\n # M-step.\n self._maximization(y, responsibility, **kwargs)\n\n # E-step.\n responsibility, ll, dl = self._expectation(y, **kwargs)\n\n # Check for convergence.\n lls = ll.sum()\n prev_ll, prev_dl = ll_dl[-1]\n change = (lls - prev_ll)/prev_ll\n ll_dl.append([lls, dl])\n\n #print(\"E-M\", iteration, change, self.threshold)\n\n if abs(change) <= self.threshold:\n break\n\n else:\n meta.update(warnflag=True)\n logger.warn(\"Maximum number of E-M iterations reached ({})\"\\\n .format(self.max_em_iterations))\n\n meta.update(log_likelihood=lls, message_length=dl)\n\n return (responsibility, meta)",
"def responseProb(obs, dt, n1, n2, pc, scaling, prevInternalState, reward, costM, costS,\n pRes): \n #0 is default, 1 is cue\n respond = 2; internalState = np.nan; payofftoA = 0; payofftoD = 0\n p = np.full((len(obs)+1,2), np.nan) #array of posterior prob for default, cue\n fs = np.full((len(obs)+1,2), np.nan) #array of scaled f values for default, cue\n \n transition1 = np.array([[1, 0],[0,1]]) #transition probabilities in general\n e = np.array([[n1,1-n1],[1-n2,n2]]) #emission probabilities\n foreperiodSteps = int((6/dt)+1)\n \n \n fs[0,:] = np.array([1,0])\n p[0,:] = fs[0,:]/np.sum(fs[0,:])\n \n #inference process \n for i in range(len(obs)):\n if i < foreperiodSteps:\n r = 1/(foreperiodSteps-i)\n #print(r, i, sep= ' ')\n transition2 = np.array([[1-pc*r,pc*r],[0,1]])\n #transition probability in foreperiod, before transition\n fs[i+1, :] = scaling*e[:,int(obs[i])]*np.matmul(fs[i,:], transition2)\n #calculaitng joint probabilities\n else:\n fs[i+1, :] = scaling*e[:,int(obs[i])]*np.matmul(fs[i,:], transition1)\n #calculaitng joint probabilities\n \n p[i+1, :] = fs[i+1,:]/np.sum(fs[i+1,:]) #posterior probabilites\n \n #response process\n \n #calculating payoffs\n if prevInternalState == 'default' :\n payofftoA = p[len(obs),1]*pRes[1,1]*reward + p[len(obs),0]*pRes[0,1]*reward - costS\n payofftoD = p[len(obs),0]*pRes[0,0]*reward + p[len(obs),1]*pRes[1,0]*reward\n elif prevInternalState == 'active' :\n payofftoA = p[len(obs),1]*pRes[1,1]*reward + p[len(obs),0]*pRes[0,1]*reward - costM\n payofftoD = p[len(obs),0]*pRes[0,0]*reward + p[len(obs),1]*pRes[1,0]*reward\n \n \n #deciding internal state based on payoffs\n if payofftoA > payofftoD :\n internalState = 'active'\n k = np.random.binomial(1,pRes[1,1]) #probabilistic response in A\n if k == 1:\n respond = 1\n elif k == 0:\n respond = 0\n \n elif payofftoA < payofftoD :\n internalState = 'default'\n k = np.random.binomial(1,pRes[0,0]) #probabilistic response in D\n if k == 1:\n respond = 0\n elif k == 0:\n respond = 1\n \n \n return respond, internalState, p",
"def calc_mutual_information(probability_mat):\n\n marginals = sp.outer(\n sp.sum(probability_mat, axis=1), sp.sum(probability_mat, axis=0))\n p = probability_mat[probability_mat != 0.0]\n m = marginals[probability_mat != 0.0]\n return sp.sum(p * sp.log(p / m))",
"def _debug_mutual_info(self, labels: np.array, outputs: np.array, mi: float):\n print(f'MI={mi} between \\nl\\t[{\",\".join(map(str, labels))}] and \\no\\t[{\",\".join(map(str, outputs))}]')\n\n label_to_output = {}\n output_to_label = {}\n\n for cls in range(0, self._num_classes):\n label_to_output[cls] = set()\n output_to_label[cls] = set()\n\n for label, output in zip(labels, outputs):\n label_to_output[label].add(output)\n output_to_label[output].add(label)\n\n logger.debug(f'label->output: {label_to_output}')\n logger.debug(f'output->label: {output_to_label}')\n\n if SpLearningConvergenceExperimentTemplate._representation_perfect(label_to_output, output_to_label):\n logger.debug(f'representation is perfect, mutual info should be 1 and is {mi}')\n if mi > 1.000001 or mi < 0.99999:\n logger.error(f'Mutual info is {mi} but should be 1!')",
"def step(self):\n\n \"\"\" First updates the variables values of the current time form the environment \"\"\"\n self.update_crispval(self.env.context)\n\n \"\"\"\n here the decision making of the agent\n to determine which activity to suggest to the patient\n i apply the creative controller to the current context\n \"\"\"\n curr_input = sample_inputs(False, 0, self.curr_interaction, self.variables_default_val, self.action_var,\n self.fuzzysets_values, self.variables_universe)\n c_out, rules_activations, is_cc_exception = self.creative_controller.computeOutput(curr_input, False)\n\n \"\"\" i obtain a number of ouput crisp values.\n i determine which one achieves the max expected output w.r.t. the a-rules \"\"\"\n best_a = None\n best_a_val = -1000\n best_a_exphapp = 5\n if self.verbose > Constants.VERBOSE_BASIC:\n print(\"rules activations\")\n for a in rules_activations:\n if rules_activations[a] > 0:\n print(str(a) + \"\\n\\t\\t\\t-> \" + str(rules_activations[a]))\n for item in c_out.items(): # for each pair <activity, crisp output>\n if self.verbose > Constants.VERBOSE_BASIC:\n print(item)\n if not item[\n 0] in self.curr_iter_suggestions: # if i didn't suggest the same activity already in the same interaction\n inputs = dict(curr_input) # I create a copy fo the dict\n inputs[item[0]] = item[1]\n assessor_id = self.actions_to_ti[item[0]]\n self.assessors[assessor_id].feed_inputs(inputs)\n is_ac_exception = False\n assout = []\n try:\n a_out, a_rules_activations, is_ac_exception = self.assessors[assessor_id].compute(verbose=False)\n assout = [a_out[ao] for ao in a_out]\n except:\n is_ac_exception = True\n traceback.print_exc()\n # todo the following assumes that every assessor controller has same eval var\n for v in self.eval_var:\n assout.append(self.variables_default_val[v])\n if len(assout) == 0:\n for v in self.eval_var:\n assout.append(self.variables_default_val[v])\n w_ta = self.weights_therapeutic_interventions[self.actions_to_ti[item[0]]]\n\n avg_credit_rules_that_suggested_action = 1.0\n nr_rules_that_suggested_action = 0\n for r in rules_activations:\n if (rules_activations[r] > 0) and (str(item[0]) in str(r)):\n avg_credit_rules_that_suggested_action = avg_credit_rules_that_suggested_action + \\\n self.rules_credits[str(r)]\n nr_rules_that_suggested_action = nr_rules_that_suggested_action + 1\n if nr_rules_that_suggested_action > 0:\n avg_credit_rules_that_suggested_action = (\n avg_credit_rules_that_suggested_action - 1.0) / nr_rules_that_suggested_action\n repetition_cost = 1.0\n a_val = (mean(assout) * w_ta * avg_credit_rules_that_suggested_action) / repetition_cost\n if (a_val > best_a_val) and (\n item[1] >= (self.variables_default_val[item[0]] + self.range_step[item[0]])):\n best_a = item\n best_a_val = a_val\n best_a_exphapp = mean(assout)\n\n \"\"\"I suggest the activity with best expected outcome and store the information to populate the interactions \n memory \"\"\"\n self.proposeActivity(best_a)\n if not best_a is None:\n if (self.verbose > Constants.VERBOSE_FALSE) and (self.verbose <= Constants.VERBOSE_BASIC):\n print(\"proposing activity\" + str(best_a) + \" which has expected feedback: \" + str(\n best_a_exphapp) + \", which weighted is \" + str(best_a_val))\n self.curr_iter_suggestions.append(best_a[0])\n self.last_suggestion = best_a\n else:\n if (self.verbose > Constants.VERBOSE_FALSE) and (self.verbose <= Constants.VERBOSE_BASIC):\n print(\"the activity proposed is \" + str(\n best_a) + \" so I don't suggest anything. I will ask a question instead\")\n self.last_suggestion = []\n self.expected_feedback = best_a_exphapp\n self.last_context = self.env.context.copy()\n self.last_rules_activations = rules_activations",
"def mr_pairs_have_less_mi_exp(filename=None):\n trials = 500\n matrix = [[0,0,0,0] for i in range(L)]\n motif = [random_site(L) for i in range(n)]\n scale = 0.01 #use this to prevent overflows in anneal\n scaled_sse = lambda(matrix,motif):sse_optimized(matrix,motif)*scale\n annealed_system = lambda :anneal(scaled_sse,\n lambda(matrix,motif):propose(matrix,motif),\n (matrix,motif),\n verbose=True,\n iterations=100000,\n stopping_crit = 0.1*scale)\n systems = [annealed_system() for i in xrange(500)]\n motifs = map(second,systems)\n ics = map(motif_ic,motifs)\n control_motifs = [sa_motif_with_desired_ic(ic,0.1,n,L) for ic in verbose_gen(ics)]\n mis = map(total_motif_mi,motifs)\n control_mis = map(total_motif_mi,control_motifs)\n plt.scatter(mis,control_mis)\n plt.xlabel(\"M-R System Mutual Information (bits)\")\n plt.ylabel(\"Annealed Motif Mutual Information (bits)\")\n plt.plot([0,5],[0,5])\n maybesave(filename)\n #mannwhitneyu(mis,control_mis) -> (47673.0, 1.2864021557444156e-64)\n return mis,control_mis",
"def mr_effect_estimate(phenotypes, outcome, exposure, n_iter=1000,\n y_g_test=\"linear\", x_g_test=\"linear\"):\n def _estimate_beta(phen):\n # Regress big_gamma = Y ~ G\n stats = regress(\"{} ~ grs\".format(outcome), y_g_test, phen)\n big_gamma = stats[\"beta\"]\n\n # Regress small_gamma = X ~ G\n stats = regress(\"{} ~ grs\".format(exposure), x_g_test, phen)\n small_gamma = stats[\"beta\"]\n\n # Ratio estimate is beta = big_gamma / small_gamma\n return big_gamma / small_gamma\n\n # Using the percentile method to compute a confidence interval.\n df = phenotypes._phenotypes\n beta = _estimate_beta(phenotypes)\n\n betas = np.empty(n_iter, dtype=float)\n n = phenotypes.get_nb_samples()\n for i in range(n_iter):\n idx = np.random.choice(n, size=n, replace=True)\n phenotypes._phenotypes = df.iloc[idx, :]\n betas[i] = _estimate_beta(phenotypes)\n\n # Find the critical values\n # 95% CI -> 2.5% and 97.5%\n low, high = np.percentile(betas, [2.5, 97.5])\n\n # p-value\n # This method to calculate the p-value is derived from:\n # An Introduction to the Bootstrap. 1993. doi:10.1007/978-1-4899-4541-9\n # Efron B., Tibshirani RJ.\n #\n # Section 15.4: Relationship of hypothesis tests to confidence intervals\n # and the bootstrap.\n # TODO verify...\n # p = np.sum(betas < 0) / n_iter\n\n return beta, low, high, None",
"def mutual_information_penalty(\n structured_generator_inputs,\n predicted_distributions,\n weights=1.0,\n scope=None,\n add_summaries=False):\n #print('cat shape', log_prob_cat.shape) \n q_cont = predicted_distributions\n sigma_cont = tf.ones_like(q_cont)\n q_cont = ds.Normal(loc=q_cont, scale=sigma_cont)\n log_prob_con = tf.reduce_mean(q_cont.log_prob(structured_generator_inputs), axis = 0)\n\n loss = -1 * losses.compute_weighted_loss(log_prob_con, weights, scope)\n\n return loss",
"def receptor_activity_brute_force(self, ret_correlations=False):\n S_ni = self.sens_mat\n Z = 0\n r_n = np.zeros(self.Nr)\n if ret_correlations:\n r_nm = np.zeros((self.Nr, self.Nr))\n \n # iterate over all mixtures\n for c, prob_c in self._iterate_mixtures():\n # get the activity vector associated with m\n a_n = (np.dot(S_ni, c) >= 1)\n Z += prob_c\n\n r_n[a_n] += prob_c\n if ret_correlations:\n r_nm[np.outer(a_n, a_n)] += prob_c\n \n # return the normalized output\n r_n /= Z\n if ret_correlations:\n r_nm /= Z\n return r_n, r_nm\n else:\n return r_n",
"def get_mutual_information_table(self, dims_to_use=None, ignore_negative_values=True, use_correlation=False):\n from mlabwrap import mlab\n bad_dims = self.get_markers('surface_ignore')\n bad_dims.append('Cell Length')\n bad_dims.append('Time')\n bad_dims.append('191-DNA')\n bad_dims.append('193-DNA')\n bad_dims.append('103-Viability')\n bad_dims.append('cluster_name')\n bad_dims.append('stim')\n bad_dims.append('cluster_num')\n if not dims_to_use:\n dims_to_use = self.dims[:]\n dims_to_use = [d for d in dims_to_use if not d in bad_dims] \n num_dims = len(dims_to_use)\n res = np.zeros((num_dims, num_dims))\n logging.info(\n 'Calculating mutual information for %d pairs...' % ((num_dims ** 2 - num_dims) / 2))\n timer = MultiTimer((num_dims ** 2 - num_dims) / 2)\n for i in xrange(num_dims):\n for j in xrange(i):\n arr = self.get_points(dims_to_use[i], dims_to_use[j])\n if ignore_negative_values:\n arr = arr[np.all(arr > 0, axis=1)]\n if arr.shape[0] < 100:\n logging.warning('Less than 100 cells in MI calculation for (%s, %s)' % (dims_to_use[i], dims_to_use[j]))\n res[j,i] = 0\n res[i,j] = 0\n continue\n if use_correlation:\n res[i,j] = np.corrcoef(arr.T[0], arr.T[1])[0,1]\n else:\n res[i,j] = mlab.mutualinfo_ap(arr, nout=1)\n res[j,i] = res[i,j]\n timer.complete_task('%s, %s' % (dims_to_use[i], dims_to_use[j]))\n return DataTable(res, dims_to_use)",
"def nmi(y_pred, y_true, average_method='geometric'):\n return metrics.normalized_mutual_info_score(y_true, y_pred, average_method=average_method)",
"def mutual_information(mc_preds):\n mutual_info = entropy(np.mean(mc_preds, axis=0)) - np.mean(entropy(mc_preds),\n axis=0)\n return mutual_info",
"def mite_2m(train_df, test_df, features, outcome, treatment, exposure, clf_t, clf_c, clf_er):\n np.random.seed(0)\n\n train_exposed_df, train_not_exposed_df = split_treatment_control(train_df, exposure)\n train_t_df, _ = split_treatment_control(train_df, treatment)\n\n clf_t_trained = clf_t.fit(train_exposed_df[features], train_exposed_df[outcome])\n clf_c_trained = clf_c.fit(train_not_exposed_df[features], train_not_exposed_df[outcome])\n clf_er_trained = clf_er.fit(train_t_df[features], train_t_df[exposure])\n\n test_f_df = test_df[features]\n return clf_er_trained.predict_proba(test_f_df)[:, 1] * \\\n (clf_t_trained.predict_proba(test_f_df)[:, 1] - clf_c_trained.predict_proba(test_f_df)[:, 1])",
"def mutual_information(x, y):\r\n\r\n # INSERT YOUR CODE HERE\r\n xvalue, xcount = np.unique(x,return_counts = True)\r\n probx = xcount.astype(float)/len(x)\r\n Hyx = 0.0\r\n for pxval,xval in zip(probx,xvalue):\r\n Hyx += (pxval)*entropy(y[x==xval])\r\n \r\n Ixy = entropy(y) - Hyx\r\n return Ixy\r\n raise Exception('Function not yet implemented!')",
"def mutual_information(x, y, logfunc=np.log2, nperms=1e4):\n def entropy(freqDict):\n return -np.array([p*logFunc(p) for p in freqDict.values()]).sum()\n freqx = objhist(x)\n freqy = objhist(y)\n \n Hx = freqx.entropy()\n Hy = freqy.entropy()\n Hxy = objhist(zip(x,y)).entropy()\n M = Hx + Hy - Hxy\n Mstar = 2*M / (Hx+Hy)\n\n if len(freqx)==1 or len(freqy)==1:\n p = 1\n elif np.all([xi==yi for xi,yi in zip(x,y)]):\n p = 0\n else:\n Mperms = np.array([Hx + Hy - objhist(zip(permutation(x),y)).entropy() for i in np.arange(nperms)])\n p = (Mperms >= M).sum() / nperms\n\n return M, Mstar, p, Hx, Hy, Hxy",
"def fitness(self, *model_parameters):\r\n\r\n try:\r\n model_instance = self.fitted_model(*model_parameters)\r\n except FloatingPointError:\r\n message = utils.errorResp()\r\n logger = logging.getLogger('Fitter')\r\n logger.warning(\r\n u\"{0}\\n. Abandoning fitting with parameters: {1} Returning an action choice probability for each trialstep of {2}\".format(message,\r\n repr(\r\n self.get_model_parameters(\r\n *model_parameters)),\r\n repr(\r\n self.float_error_response_value)))\r\n return np.ones(np.array(self.participant_rewards).shape) * self.float_error_response_value\r\n except ValueError as e:\r\n logger = logging.getLogger('Fitter')\r\n logger.warn(\r\n \"{0} in fitted model. Abandoning fitting with parameters: {1} Returning an action choice probability for each trialstep of {2} - {3}, - {4}\".format(\r\n type(e),\r\n repr(self.get_model_parameters(*model_parameters)),\r\n repr(self.float_error_response_value),\r\n e.message,\r\n e.args))\r\n return np.ones(np.array(self.participant_rewards).shape) * self.float_error_response_value\r\n\r\n # Pull out the values to be compared\r\n model_data = model_instance.returnTaskState()\r\n model_choice_probabilities = model_data[self.model_fitting_variable]\r\n\r\n if self.fit_subset_described is None:\r\n model_performance = model_choice_probabilities\r\n else:\r\n model_performance = model_choice_probabilities[self.fit_subset_described]\r\n\r\n if np.isnan(model_performance).any():\r\n logger = logging.getLogger('Fitter')\r\n message = \"model performance values contain ``Not a Number`` (NaN), i.e. the model had a problem.\"\r\n logger.warning(message + \".\\n Abandoning fitting with parameters: \"\r\n + repr(self.get_model_parameters(*model_parameters))\r\n + \" Returning an action choice probability for each trialstep of \"\r\n + repr(self.float_error_response_value))\r\n return np.ones(np.array(self.participant_rewards).shape) * self.float_error_response_value\r\n\r\n return model_performance",
"def ite_2m(train_df, test_df, features, outcome, treatment, clf_t, clf_c):\n np.random.seed(0)\n\n train_t_df, train_c_df = split_treatment_control(train_df, treatment)\n\n clf_t_trained = clf_t.fit(train_t_df[features], train_t_df[outcome])\n clf_c_trained = clf_c.fit(train_c_df[features], train_c_df[outcome])\n\n test_f_df = test_df[features]\n return clf_t_trained.predict_proba(test_f_df)[:, 1] - clf_c_trained.predict_proba(test_f_df)[:, 1]",
"def nmi(ypred, y):\n# print (ypred)\n# print (y)\n return normalized_mutual_info_score(y,ypred)",
"def pmi(cls, *marginals):\n return (_log2(marginals[NGRAM] * marginals[TOTAL] ** (cls._n - 1)) -\n _log2(_product(marginals[UNIGRAMS])))",
"def adjusted_mutual_info(self):\n # Prepare row totals and check for special cases\n row_totals = np.fromiter(self.iter_row_totals(), dtype=np.int64)\n col_totals = np.fromiter(self.iter_col_totals(), dtype=np.int64)\n R = len(row_totals)\n C = len(col_totals)\n if R == C == 1 or R == C == 0:\n # No clustering since the data is not split. This is a perfect match\n # hence return 1.0.\n return 1.0\n\n # In one step, calculate entropy for each labeling and mutual\n # information\n h_true, h_pred, mi = self._entropies()\n mi_max = max(h_true, h_pred)\n\n # Calculate the expected value for the MI\n emi = emi_from_margins(row_totals, col_totals)\n\n # Calculate the adjusted MI score\n ami = (mi - emi) / (mi_max - emi)\n return ami",
"def compute_empirical_mutual_info_nats(var1_values, var2_values):\n\n # -------------------------------------------------------------------------\n # YOUR CODE HERE\n #\n\n empirical_mutual_info_nats = 0.0\n \n var1_distribution = compute_empirical_distribution(var1_values)\n var2_distribution = compute_empirical_distribution(var2_values)\n joint_distribution = compute_empirical_distribution(list(zip(var1_values,var2_values)))\n \n empirical_mutual_info_nats = 0\n for var1 in var1_distribution:\n for var2 in var2_distribution:\n empirical_mutual_info_nats += joint_distribution[(var1, var2)] \\\n * np.log(joint_distribution[(var1,var2)]/(var1_distribution[var1]*var2_distribution[var2]))\n \n #\n # END OF YOUR CODE\n # -------------------------------------------------------------------------\n\n return empirical_mutual_info_nats",
"def _responsibility_matrix(y, mean, covariance, weight, covariance_type):\n\n precision = _compute_precision_cholesky(covariance, covariance_type)\n weighted_log_prob = np.log(weight) + \\\n _estimate_log_gaussian_prob(y, mean, precision, covariance_type)\n\n log_likelihood = scipy.misc.logsumexp(weighted_log_prob, axis=1)\n with np.errstate(under=\"ignore\"):\n log_responsibility = weighted_log_prob - log_likelihood[:, np.newaxis]\n\n responsibility = np.exp(log_responsibility).T\n \n return (responsibility, log_likelihood)",
"def mutual_information_max(self):\n return np.log2(special.comb(self.Nr, self.coding_receptors))",
"def mutual_information(pred, true):\n \n #for now , only for univariate forecasting. So reshapes entire batch of K timesteps into vector as if single feature\n MI = mutual_info_regression(true.detach().numpy().flatten().reshape(-1,1), pred.detach().numpy().flatten())[0]\n return torch.tensor(MI)",
"def obtain_consistent_marginals(self, priv_marginal_config, priv_split_method) -> Marginals:\n\n # generate_all_pub_marginals() generates all the one and two way marginals of the public set which is implemented in DataLoader.py\n if self.data.pub_ref:\n pub_marginals = self.data.generate_all_pub_marginals()\n \n # get_noisy_marginals() is in synthesizer.py\n # which first calls generate_..._by_config(), and computes on priv_data to return marginal_sets, epss\n # (note that 'marginal_key' could be 'priv_all_one_way' or 'priv_all_two_way')\n # later it calls anonymize() which add noises to marginals\n # (what decides noises is 'priv_split_method') \n # priv_split_method[set_key]='lap' or....\n # Step 1: generate noisy marginals\n noisy_marginals = self.get_noisy_marginals(priv_marginal_config, priv_split_method)\n\n # since calculated on noisy marginals\n # we use mean function to estimate the number of synthesized records\n num_synthesize_records = np.mean([np.sum(x.values) for _, x in noisy_marginals.items()]).round().astype(np.int)\n print(\"------------------------> now we get the estimate of records' num by averaging from nosiy marginals:\", num_synthesize_records)\n \n \n \n # the list of all attributes' name(str) except the identifier attribute\n self.attr_list = self.data.obtain_attrs()\n # domain_list is an array recording the count of each attribute's candidate values\n self.domain_list = np.array([len(self.data.encode_schema[att]) for att in self.attr_list])\n \n # map the attribute str to its index in attr_list, for possible use\n # use enumerate to return Tuple(index, element) \n self.attr_index_map = {att: att_i for att_i, att in enumerate(self.attr_list)}\n\n\n # views are wrappers of marginals with additional functions for consistency\n # if there exist public dataset to refer to\n if self.data.pub_ref:\n pub_onehot_view_dict, pub_attr_view_dict = self.construct_views(pub_marginals)\n # Step 2: create some data structures\n noisy_onehot_view_dict, noisy_attr_view_dict = self.construct_views(noisy_marginals)\n \n # all_views is one-hot to view dict, views_dict is attribute to view dict\n # they have different format to satisfy the needs of consistenter and synthesiser\n # to fit in code when we do not have public things to utilize \n if not self.data.pub_ref:\n pub_onehot_view_dict = noisy_onehot_view_dict\n pub_attr_view_dict = noisy_attr_view_dict\n\n self.onehot_view_dict, self.attrs_view_dict = self.normalize_views(\n pub_onehot_view_dict,\n pub_attr_view_dict,\n noisy_attr_view_dict,\n self.attr_index_map,\n num_synthesize_records)\n\n # consist the noisy marginals to submit to some rules\n consistenter = Consistenter(self.onehot_view_dict, self.domain_list)\n consistenter.consist_views()\n\n # consistenter uses unnormalized counts;\n # after consistency, synthesizer uses normalized counts\n for _, view in self.onehot_view_dict.items():\n view.count /= sum(view.count)\n\n return noisy_marginals, num_synthesize_records"
] | [
"0.7677321",
"0.6729323",
"0.6610971",
"0.62965286",
"0.5475816",
"0.54467",
"0.53803253",
"0.53383917",
"0.5304403",
"0.52832675",
"0.5273865",
"0.52435094",
"0.5235883",
"0.52353334",
"0.51856315",
"0.5179638",
"0.516652",
"0.51329374",
"0.5128437",
"0.5115408",
"0.50964516",
"0.50935835",
"0.50890595",
"0.5055294",
"0.5049285",
"0.5039296",
"0.5037484",
"0.5037215",
"0.5026958",
"0.49881944"
] | 0.78565335 | 0 |
calculate the mutual information by constructing all possible mixtures | def mutual_information_brute_force(self, ret_prob_activity=False):
base = 2 ** np.arange(0, self.Nr)
# prob_a contains the probability of finding activity a as an output.
prob_a = np.zeros(2**self.Nr)
for c, prob_c in self._iterate_mixtures():
# get the associated output ...
a = np.dot(self.sens_mat, c).astype(np.bool)
# ... and represent it as a single integer
a = np.dot(base, a)
prob_a[a] += prob_c
# normalize the output to make it a probability distribution
prob_a /= prob_a.sum()
# calculate the mutual information
MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0)
if ret_prob_activity:
return MI, prob_a
else:
return MI | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _build_mixture(self) -> None:\n for mu, sigma in zip(self.mus, self.sigmas):\n self.pdfs.append(norm(mu, sigma))",
"def mixture_statistics_brute_force(self):\n \n Z = 0\n hist1d = np.zeros(self.Ns)\n hist2d = np.zeros((self.Ns, self.Ns))\n \n # iterate over all mixtures\n for c, weight_c in self._iterate_mixtures():\n Z += weight_c \n hist1d += c * weight_c\n hist2d += np.outer(c, c) * weight_c\n \n # calculate the frequency and the correlations \n ci_mean = hist1d / Z\n cij = hist2d / Z\n cij_corr = cij - np.outer(ci_mean, ci_mean)\n \n ci_var = np.diag(cij_corr)\n return {'mean': ci_mean, 'std': np.sqrt(ci_var), 'var': ci_var,\n 'cov': cij_corr}",
"def _iterate_mixtures(self):\n \n if self._iterate_steps > self.parameters['max_steps']:\n raise RuntimeError('The iteration would take more than %g steps'\n % self.parameters['max_steps'])\n \n hi = self.commonness\n Jij = self.correlations\n\n mixture_size = self.parameters['fixed_mixture_size']\n if mixture_size is None:\n # iterate over all mixtures\n for c in itertools.product((0, 1), repeat=self.Ns):\n c = np.array(c, np.uint8)\n weight_c = np.exp(np.dot(np.dot(Jij, c) + hi, c))\n yield c, weight_c\n \n elif mixture_size == 0:\n # special case which is not covered by the iteration below\n yield np.zeros(self.Ns, np.uint8), 1\n \n elif mixture_size == self.Ns:\n # special case which is not covered by the iteration below\n yield np.ones(self.Ns, np.uint8), 1\n \n else:\n # iterate over all mixtures with constant number of substrates\n c = np.zeros(self.Ns, np.uint8)\n for nz in itertools.combinations(range(self.Ns), mixture_size):\n c[:] = 0\n c[np.array(nz)] = 1\n weight_c = np.exp(np.dot(np.dot(Jij, c) + hi, c))\n yield c, weight_c",
"def mutual_information_3d(self,max_lag,percent_calc=.5,digitize=True):\n\n if digitize:\n M = utilities.mi_digitize(self.X)\n else:\n M = self.X\n\n rs, cs, zs = np.shape(M)\n\n rs_iters = int(rs*percent_calc)\n cs_iters = int(cs*percent_calc)\n\n r_picks = np.random.choice(np.arange(rs),size=rs_iters,replace=False)\n c_picks = np.random.choice(np.arange(cs),size=cs_iters,replace=False)\n\n\n # The r_picks are used to calculate the MI in the columns\n # and the c_picks are used to calculate the MI in the rows\n\n c_mi = np.zeros((rs_iters,max_lag))\n r_mi = np.zeros((cs_iters,max_lag))\n\n for i in range(rs_iters):\n for j in range(max_lag):\n\n rand_z = np.random.randint(0,zs)\n ind = j+1\n unshift = M[r_picks[i],ind:,rand_z]\n shift = M[r_picks[i],:-ind,rand_z]\n c_mi[i,j] = skmetrics.mutual_info_score(unshift,shift)\n\n for i in range(cs_iters):\n for j in range(max_lag):\n\n rand_z = np.random.randint(0,zs)\n ind=j+1\n unshift = M[ind:, c_picks[i],rand_z]\n shift = M[:-ind, c_picks[i],rand_z]\n r_mi[i,j] = skmetrics.mutual_info_score(unshift,shift)\n\n #for the z dimension\n rs,cs = np.where(np.random.rand(rs,cs)<percent_calc)\n z_mi = np.zeros( (len(rs),max_lag) )\n\n for i, (rs,cs) in enumerate(zip(r_picks,c_picks)):\n for j in range(max_lag):\n\n ind=j+1\n\n unshift = M[rs, cs, ind:]\n shift = M[rs, cs, :-ind]\n z_mi[i,j] = skmetrics.mutual_info_score(unshift,shift)\n\n r_mut = np.mean(r_mi,axis=0)\n c_mut = np.mean(c_mi,axis=0)\n z_mut = np.mean(z_mi,axis=0)\n\n return r_mut, c_mut, z_mut",
"def make_mixture_parameters(param_dict):\n compound1 = param_dict['compound1_name']\n compound2 = param_dict['compound2_name']\n compound1_mw = param_dict[compound1]['mw']\n compound2_mw = param_dict[compound2]['mw']\n n_fractions = param_dict['n_fractions']\n compound1_frac_range = np.linspace(0,1,n_fractions)\n total_mass = param_dict['total_mass'] #grams\n output_mass = {}\n output_mass[compound1] = np.zeros(n_fractions)\n output_mass[compound2] = np.zeros(n_fractions) \n compound_mw_array = np.array([compound1_mw, compound2_mw])\n for i, frac in enumerate(compound1_frac_range):\n fractions = np.linalg.solve([compound_mw_array,[1.0-frac, -1.0*frac]],[10, 0])\n output_mass[compound1][i] = fractions[0]*compound1_mw\n output_mass[compound2][i] = fractions[1]*compound2_mw\n return output_mass",
"def calculate_mixture_features(args):\n workspace = args.workspace\n speech_dir = args.speech_dir\n noise_dir = args.noise_dir\n data_type = args.data_type\n fs = cfg.sample_rate\n dir_name = args.dir_name\n\n fid_clean = open(speech_dir, 'r')\n lines_clean = fid_clean.readlines()\n fid_clean.close()\n\n fid_reverb = open(noise_dir, 'r')\n lines_reverb = fid_reverb.readlines()\n fid_reverb.close()\n\n for files_clean, files_reverb in zip(lines_clean, lines_reverb):\n\n files_clean = files_clean.strip('\\n')\n files_reverb = files_reverb.strip('\\n')\n\n fid = open(files_clean,'r')\n wavLines_clean = fid.readlines()\n fid.close()\n fid = open(files_reverb,'r')\n wavLines_reverb = fid.readlines()\n fid.close()\n\n cnt = 0 \n\n for wavs_clean, wavs_reverb in zip(wavLines_clean, wavLines_reverb):\n \n t1 = time.time()\n # cnt = 0\n\n wav_name_clean, wav_path_clean = wavs_clean.split()\n wav_name_reverb, wav_path_reverb = wavs_reverb.split()\n \n # Read clean speech audio. \n (speech_audio, _) = read_audio(wav_path_clean, target_fs=fs)\n \n # Read reverb speech audio. \n (noise_audio, _) = read_audio(wav_path_reverb, target_fs=fs)\n \n # Cut reverb speech to the same length as clean speech. \n if len(noise_audio) > len(speech_audio):\n noise_audio = noise_audio[0: len(speech_audio)]\n \n # Extract spectrogram. \n mixed_complx_x = calc_sp(noise_audio, mode='complex')\n speech_x = calc_sp(speech_audio, mode='magnitude')\n\n # Write out features. \n out_feat_path = os.path.join(workspace, \"features\", \"spectrogram\", \n data_type, dir_name, \"%s.p\" % wav_name_reverb)\n create_folder(os.path.dirname(out_feat_path))\n data = [mixed_complx_x, speech_x, wav_name_reverb]\n pickle.dump(data, open(out_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n \n # Print. \n if cnt % 100 == 0:\n print(cnt)\n # print(mixed_complx_x)\n # print(speech_x)\n \n cnt += 1\n\n print(\"Extracting feature time: %s\" % (time.time() - t1))",
"def mutual_information(transposed, transposed_2 = False):\n\tmi = []\n\tlength = range(len(transposed))\n\tfor i in length:\n\t\tentropy_i = entropy(transposed[i])\n\t\tmi_list = []\n\t\tif transposed_2 == False:\n\t\t\tfor j in length:\n\t\t\t\tentropy_j = entropy(transposed[j])\n\t\t\t\tjoint = joint_entropy(transposed[i], transposed[j])\n\t\t\t\tmi_calc = entropy_i + entropy_j - joint\n\t\t\t\tmi_list.append(mi_calc)\n\t\t\tmi.append(mi_list)\n\n\t\telse:\n\t\t\tlength_2 = range(len(transposed_2))\n\t\t\tfor j in length_2:\n\t\t\t\tentropy_j = entropy(transposed_2[j])\n\t\t\t\tjoint = joint_entropy(transposed[i], transposed_2[j])\n\t\t\t\tmi_calc = entropy_i + entropy_j - joint\n\t\t\t\tmi_list.append(mi_calc)\n\t\t\tmi.append(mi_list)\n\treturn mi",
"def mutual_information(transposed, transposed_2 = False):\n\tmi = []\n\tlength = range(len(transposed))\n\tfor i in length:\n\t\tentropy_i = entropy(transposed[i])\n\t\tmi_list = []\n\t\tif transposed_2 == False:\n\t\t\tfor j in length:\n\t\t\t\tentropy_j = entropy(transposed[j])\n\t\t\t\tjoint = joint_entropy(transposed[i], transposed[j])\n\t\t\t\tmi_calc = entropy_i + entropy_j - joint\n\t\t\t\tmi_list.append(mi_calc)\n\t\t\tmi.append(mi_list)\n\n\t\telse:\n\t\t\tlength_2 = range(len(transposed_2))\n\t\t\tfor j in length_2:\n\t\t\t\tentropy_j = entropy(transposed_2[j])\n\t\t\t\tjoint = joint_entropy(transposed[i], transposed_2[j])\n\t\t\t\tmi_calc = entropy_i + entropy_j - joint\n\t\t\t\tmi_list.append(mi_calc)\n\t\t\tmi.append(mi_list)\n\treturn mi",
"def get_mixture(data, components):\n from jcvi.apps.base import popen\n\n probs, mus, sigmas = [], [], []\n fw = must_open(\"tmp\", \"w\")\n log_data = [log(x) for x in data if x > .05]\n data = \"\\n\".join([\"%.4f\" % x for x in log_data]).replace(\"inf\\n\", \"\")\n fw.write(data)\n fw.close()\n\n cmd = \"gmm-bic {0} {1} {2}\".format(components, len(log_data), fw.name)\n pipe = popen(cmd)\n\n for row in pipe:\n if row[0] != '#':\n continue\n\n atoms = row.split(\",\")\n a, b, c = atoms[1:4]\n a = float(a)\n b = float(b)\n c = float(c)\n\n mus.append(a)\n sigmas.append(b)\n probs.append(c)\n\n os.remove(fw.name)\n return probs, mus, sigmas",
"def make_mixture_info(parts, operation='+'):\n # type: (List[ModelInfo], str) -> ModelInfo\n # Build new parameter list\n combined_pars = []\n\n # When creating a mixture model that is a sum of product models (ie (1*2)+(3*4))\n # the parameters for models 1 & 2 will be prefixed with A & B respectively,\n # but so will the parameters for models 3 & 4. We need to rename models 3 & 4\n # so that they are prefixed with C & D to avoid overlap of parameter names.\n used_prefixes = []\n for part in parts:\n if part.composition and part.composition[0] == 'mixture':\n i = 0\n for submodel in part.composition[1]:\n npars = len(submodel.parameters.kernel_parameters)\n # List of params of one of the constituent models of part\n submodel_pars = part.parameters.kernel_parameters[i:i+npars]\n # Prefix of the constituent model\n prefix = submodel_pars[0].name[0]\n if prefix not in used_prefixes: # Haven't seen this prefix so far\n used_prefixes.append(prefix)\n i += npars\n continue\n # TODO: don't modify submodel --- it may be used elsewhere\n # Existing code probably doesn't keep a handle on the model\n # parts so its probably okay, but it's possible that a mix\n # on user defined mixture models models will change the\n # parameters used for the parts in the GUI. Even worse if the\n # same plugin is used twice. For example, twosphere.py\n # contains sphere+sphere and you create twosphere+twosphere.\n while prefix in used_prefixes:\n # This prefix has been already used, so change it to the\n # next letter that hasn't been used\n prefix = chr(ord(prefix) + 1)\n used_prefixes.append(prefix)\n prefix += \"_\"\n # Update the parameters of this constituent model to use the\n # new prefix\n for par in submodel_pars:\n # Strip {prefix}_ using par.name[2:], etc.\n # TODO: fails for AB_scale\n par.id = prefix + par.id[2:]\n par.name = prefix + par.name[2:]\n if par.length_control is not None:\n par.length_control = prefix + par.length_control[2:]\n i += npars\n\n for part in parts:\n # Parameter prefix per model, A_, B_, ...\n # Note that prefix must also be applied to id and length_control\n # to support vector parameters\n prefix = ''\n if not part.composition or part.composition[0] == 'product':\n # Model isn't a composition model, so its parameters don't have a\n # a prefix. Add the next available prefix\n prefix = chr(ord('A')+len(used_prefixes))\n used_prefixes.append(prefix)\n prefix += '_'\n\n if operation == '+':\n # If model is a sum model, each constituent model gets its own scale parameter\n scale_prefix = prefix\n if prefix == '' and getattr(part, \"operation\", '') == '*':\n # `part` is a composition product model. Find the prefixes of\n # its parameters to form a new prefix for the scale.\n # For example, a model with A*B*C will have ABC_scale.\n sub_prefixes = []\n for param in part.parameters.kernel_parameters:\n # Prefix of constituent model\n sub_prefix = param.id.split('_')[0]\n if sub_prefix not in sub_prefixes:\n sub_prefixes.append(sub_prefix)\n # Concatenate sub_prefixes to form prefix for the scale\n scale_prefix = ''.join(sub_prefixes) + '_'\n scale = Parameter(scale_prefix + 'scale', default=1.0,\n description=\"model intensity for \" + part.name)\n combined_pars.append(scale)\n for p in part.parameters.kernel_parameters:\n p = copy(p)\n p.name = prefix + p.name\n p.id = prefix + p.id\n if p.length_control is not None:\n p.length_control = prefix + p.length_control\n combined_pars.append(p)\n parameters = ParameterTable(combined_pars)\n # Allow for the scenario in which each component has all its PD parameters\n # active simultaneously. details.make_details() will throw an error if\n # too many are used from any one component.\n parameters.max_pd = sum(part.parameters.max_pd for part in parts)\n\n def random():\n \"\"\"Random set of model parameters for mixture model\"\"\"\n combined_pars = {}\n for k, part in enumerate(parts):\n prefix = chr(ord('A')+k) + '_'\n pars = part.random()\n combined_pars.update((prefix+k, v) for k, v in pars.items())\n return combined_pars\n\n model_info = ModelInfo()\n model_info.id = operation.join(part.id for part in parts)\n model_info.operation = operation\n model_info.name = '(' + operation.join(part.name for part in parts) + ')'\n model_info.filename = None\n model_info.title = 'Mixture model with ' + model_info.name\n model_info.description = model_info.title\n model_info.docs = model_info.title\n model_info.category = \"custom\"\n model_info.parameters = parameters\n model_info.random = random\n #model_info.single = any(part['single'] for part in parts)\n model_info.structure_factor = False\n #model_info.tests = []\n #model_info.source = []\n # Remember the component info blocks so we can build the model\n model_info.composition = ('mixture', parts)\n return model_info",
"def results_muscles(self):\n\n angle = self.res[:, 0]\n\n m1_state = self.res[:, 2:4]\n m2_state = self.res[:, 4:6]\n\n # Initializing the muscles results dictionary\n res_muscles = {'muscle1': np.empty(\n (len(angle), 7)), 'muscle2': np.empty((len(angle), 7))}\n\n # Get the muscle objects\n m1 = self.sys.muscle_sys.Muscle1\n m2 = self.sys.muscle_sys.Muscle2\n\n # Iterate over the states to re compute the paramters\n for i, angle_ in enumerate(angle):\n\n delta_length = self.sys.muscle_sys.delta_length_from_angle(angle_)\n\n # Muscle 1\n res_muscles['muscle1'][i, :] = m1.ode_result(\n m1_state[i, 0], m1_state[i, 1], delta_length[0])\n # Muscle 2\n res_muscles['muscle2'][i, :] = m2.ode_result(\n m2_state[i, 0], m2_state[i, 1], delta_length[1])\n\n return res_muscles",
"def gen_mixture():\n npr.seed(0)\n num_exp = int(1e4)\n x_dim = 2\n z_dim = 2\n mu1 = [5, 5,]\n mu2 = [-5, -5]\n theta = np.array([[2,1],[-1,-2]])\n sigma = 0.1\n u = npr.uniform((num_exp,))\n z = np.zeros((num_exp, z_dim))\n cov = np.zeros((z_dim, z_dim))\n np.fill_diagonal(cov, 1)\n sz = int(num_exp/2)\n z[:sz, ]= npr.multivariate_normal(mu1, cov,sz)\n z[sz:, ] = npr.multivariate_normal(mu2,cov,sz)\n mu_x = [email protected]()\n\n x = np.zeros((num_exp, x_dim))\n for i in range(num_exp):\n x[i,:] = npr.multivariate_normal(mu_x[:,i], sigma*cov)\n print(x.shape)\n np.save('data/syn_mixture.npy', x)",
"def mutual_info(l1, l2):\n return entropy(l1) + entropy(l2) - entropy(joint_dataset(l1, l2))",
"def get_my_mutations(quality_cutoff, coverage_cutoff):\n\n # my_mutations = {}\n # with open('/home/perry/Projects/loh/working/murim.exome.aa_chg.vars') as f:\n # for line in f:\n # my_mutations[line.strip()] = True\n # return my_mutations\n\n bed_file = 'data/nimblegen/2.1M_Human_Exome_Annotation/2.1M_Human_Exome.bed'\n bed_chr2st2end, bed_chr2posLs = bed_tools.load_bed(bed_file, \n 'NimbleGen Tiled Regions')\n # NimbleGen Tiled Regions\n # Target Regions\n\n use_data_dir = '/home/perry/Projects/loh/data/all_non_ref_hg18/'\n all_somatic = {}\n all_inherited = {}\n cancer_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanT.ann')\n normal_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanN.ann')\n for exome in global_settings.exome_types:\n data_file = use_data_dir + exome\n inherited, somatic, murim = mutations.get_mutations(data_file, normal_qualities,\n cancer_qualities, quality_cutoff,\n False, coverage_cutoff)\n # only use the bed_tools NimbleGen\n # restriction for hg18 data\n for s in somatic['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_somatic[s] = True\n for i in inherited['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_inherited[i] = True\n return (set(all_somatic.keys()) & set(get_murim_covered(quality_cutoff)), set(all_inherited.keys()) & set(get_murim_covered(quality_cutoff)))",
"def pairwiseMutualInformation(align, nperms=1e4):\n L=len(align[align.index[0]])\n columns = [align.map(lambda s: s[i]) for i in np.arange(L)]\n M = np.nan*np.zeros((L, L))\n p = np.nan*np.zeros((L, L))\n Mstar = np.nan*np.zeros((L, L))\n for xi, yi in itertools.combinations(np.arange(L), 2):\n freqx = objhist(columns[xi])\n freqy = objhist(columns[yi])\n\n tmpM, tmpMstar, tmpp, Hx, Hy, Hxy= mutual_information(columns[xi],\n columns[yi],\n logfunc=np.log2,\n nperms=nperms)\n \n \"\"\"We wouldn't need to test invariant sites or a site with itself\"\"\"\n if len(freqx) == 1 or len(freqy) == 1:\n tmpp = np.nan\n elif xi == yi:\n tmpp = np.np.nan\n\n M[xi, yi] = tmpM\n p[xi, yi] = tmpp\n Mstar[xi, yi] = tmpMstar\n q = adjustnonnan(p)\n\n return M, Mstar, p, q",
"def check_mixture_health(self):\n h = HealthDict()\n h['mole_fraction_too_low'] = []\n h['mole_fraction_too_high'] = []\n conc = mole_summation(phase=self)\n lo = np.where(conc < 1.0)[0]\n hi = np.where(conc > 1.0)[0]\n if len(lo) > 0:\n h['mole_fraction_too_low'] = lo\n if len(hi) > 0:\n h['mole_fraction_too_high'] = hi\n return h",
"def mutual_information(mc_preds):\n mutual_info = entropy(np.mean(mc_preds, axis=0)) - np.mean(entropy(mc_preds),\n axis=0)\n return mutual_info",
"def mutual_information(co_freq, s_freq, t_freq, total_instances, mitype=None):\n if co_freq > 0:\n if mitype is not None:\n if mitype == \"expected\":\n mi = math.log2(\n (total_instances * co_freq) / (s_freq * t_freq)\n ) * (co_freq / total_instances)\n elif mitype == \"normalized\":\n alpha = - math.log2(co_freq / total_instances)\n mi = (\n (math.log2(\n (total_instances * co_freq) / (s_freq * t_freq)) / alpha)\n if alpha != 0 else 0\n )\n elif mitype == \"pmi2\":\n mi = math.log2((co_freq ** 2) / (s_freq * t_freq))\n elif mitype == \"pmi3\":\n mi = math.log2(\n (co_freq ** 3) / (s_freq * t_freq * total_instances))\n else:\n raise ValueError(\n \"Provided Mutual information score type (mitype) is not \"\n \"supported. Provide one value from the following list \"\n \"['expected', 'normalized','pmi2', 'pmi3'] \")\n else:\n mi = math.log2((total_instances * co_freq) / (s_freq * t_freq))\n else:\n mi = 0\n return mi if mi > 0 else 0",
"def calculate_mixture_features(data_type):\n workspace = config.workspace\n data_dir = config.data_dir\n speech_dir = os.path.join(data_dir,'{}_speech'.format(data_type))\n noise_dir = os.path.join(data_dir,'{}_noise'.format(data_type)) \n \n fs = config.sample_rate\n \n if data_type == 'train':\n snr = config.Tr_SNR\n elif data_type == 'test':\n snr = config.Te_SNR \n else:\n raise Exception(\"data_type must be train | test!\")\n \n \n # Open mixture csv. \n mixture_csv_path = os.path.join(workspace, \"mixture_csvs\", \"%s.csv\" % data_type)\n with open(mixture_csv_path, 'r') as f:\n reader = csv.reader(f, delimiter='\\t')\n lis = list(reader)\n \n t1 = time.time()\n cnt = 0\n for i1 in range(1, len(lis)):\n [speech_na, noise_na, noise_onset, noise_offset] = lis[i1]\n noise_onset = int(noise_onset)\n noise_offset = int(noise_offset)\n \n # Read speech audio. \n speech_path = os.path.join(speech_dir, speech_na)\n (speech_audio, _) = read_audio(speech_path, target_fs=fs)\n \n # Read noise audio. \n noise_path = os.path.join(noise_dir, noise_na)\n (noise_audio, _) = read_audio(noise_path, target_fs=fs)\n \n # Repeat noise to the same length as speech. \n if len(noise_audio) < len(speech_audio):\n n_repeat = int(np.ceil(float(len(speech_audio)) / float(len(noise_audio))))\n noise_audio_ex = np.tile(noise_audio, n_repeat)\n noise_audio = noise_audio_ex[0 : len(speech_audio)]\n # Truncate noise to the same length as speech. \n else:\n noise_audio = noise_audio[noise_onset : noise_offset]\n \n # Scale speech to given snr. \n scaler = get_amplitude_scaling_factor(speech_audio, noise_audio, snr=snr)\n speech_audio *= scaler\n \n # Get normalized mixture, speech, noise. \n (mixed_audio, speech_audio, noise_audio, alpha) = additive_mixing(speech_audio, noise_audio)\n\n # Write out mixed audio. \n out_bare_na = os.path.join(\"%s.%s\" % \n (os.path.splitext(speech_na)[0], os.path.splitext(noise_na)[0]))\n out_audio_path = os.path.join(workspace, \"mixed_audios\", \"spectrogram\", \n data_type, \"%ddb\" % int(snr), \"%s.wav\" % out_bare_na)\n create_folder(os.path.dirname(out_audio_path))\n write_audio(out_audio_path, mixed_audio, fs)\n\n # Extract spectrogram. \n mixed_complx_x = calc_sp(mixed_audio, mode='complex')\n speech_x = calc_sp(speech_audio, mode='magnitude')\n noise_x = calc_sp(noise_audio, mode='magnitude')\n\n # Write out features. \n out_feat_path = os.path.join(workspace, \"features\", \"spectrogram\", \n data_type, \"%ddb\" % int(snr), \"%s.p\" % out_bare_na)\n create_folder(os.path.dirname(out_feat_path))\n data = [mixed_complx_x, speech_x, noise_x, alpha, out_bare_na]\n pickle.dump(data, open(out_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n \n # Print. \n if cnt % 100 == 0:\n print(cnt)\n \n cnt += 1\n\n print(\"Extracting feature time: %s\" % (time.time() - t1))",
"def get_mutual_information(filename):\n categories = {} #{category: speakers of this category}\n features = {} #{feat: speakers who use this feature}\n pos_categories_features = {} #{category: {feat: speakers of category who use this feat}}\n neg_categories_features = {} #{category: {feat: speakers of category who do not use this feat}}\n users = set() #set of all users in data\n \n for line in open(filename):\n userid, c, date, statusid, rawtweet, toktweet, tagtweet = line.split('\\t')\n users.add(userid)\n \n if c not in categories:\n categories[c] = set()\n pos_categories_features[c] = {}\n categories[c].add(userid)\n \n feats = set(toktweet.lower().split()) #lowercase tweet and split into words\n\n for feat in feats:\n if feat not in pos_categories_features[c]:\n pos_categories_features[c][feat] = set()\n pos_categories_features[c][feat].add(userid)\n \n if feat not in features:\n features[feat] = set()\n features[feat].add(userid)\n\n print \"Parsed data\"\n\n numfeats = len(features) #num of features\n print numfeats, \"features\"\n numusers = len(users) #num of users \n print numusers, \"users\"\n\n #keep sizes of sets, not sets themselves\n for feat in features:\n features[feat] = len(features[feat])\n for c in categories:\n categories[c] = len(categories[c])\n for c in pos_categories_features:\n for feat in features:\n if feat in pos_categories_features[c]:\n pos_categories_features[c][feat] = len(pos_categories_features[c][feat])\n else:\n pos_categories_features[c][feat] = 0\n\n for c in categories:\n print c, categories[c], \"users\"\n\n print \"Computed counts\"\n \n mi = {}\n for feat in features:\n mi[feat] = 0.0\n for c in categories:\n #print c, feat, features[feat], pos_categories_features[c][feat]\n \n catprob = categories[c]/numusers\n\n #prob of speakers of category c using feat\n featprob = features[feat]/numusers\n jointprob = pos_categories_features[c][feat]/numusers\n if jointprob > 0 and featprob > 0:\n mi[feat] += jointprob * log2(jointprob/(catprob * featprob))\n \n #prob of speakers of category c NOT using feat\n featprob = 1 - featprob\n jointprob = (categories[c] - pos_categories_features[c][feat])/numusers\n if jointprob > 0 and featprob > 0:\n mi[feat] += jointprob * log2(jointprob/(catprob * featprob))\n\n print \"Computed mutual information\"\n\n feature_scores = sorted(mi.items(), key=lambda x:x[1], reverse=True)\n refcat = categories.keys()[0] #pick one of the categories\n print 'Feature\\tMI\\tP({0}|Feature)\\tNum. users'.format(refcat)\n for feat, score in feature_scores[:200]:\n prob = pos_categories_features[refcat][feat]/features[feat]\n print '{0}\\t{1:.3f}\\t{2:.3f}\\t{3}'.format(feat, score, prob, features[feat])",
"def test_measure_deterministic_multi_qubit_with_sampling(self):\n shots = 100\n qobj = ref_measure.measure_circuits_qobj_deterministic(allow_sampling=True)\n qobj.config.shots = shots\n circuits = [experiment.header.name for experiment in qobj.experiments]\n targets = ref_measure.measure_counts_qobj_deterministic(shots)\n job = QasmSimulator().run(qobj)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)",
"def variations():",
"def mutual_information_union(p1, p2, measure=normalized_mutual_info_score):\n nodes = sorted(set(p1.keys()) | set(p2.keys()))\n if nodes == []: return 0\n return measure(\n [p1[n] if n in p1 else np.random.randint(1e12) for n in nodes],\n [p2[n] if n in p2 else np.random.randint(1e12) for n in nodes]\n )",
"def get_mutual_information_table(self, dims_to_use=None, ignore_negative_values=True, use_correlation=False):\n from mlabwrap import mlab\n bad_dims = self.get_markers('surface_ignore')\n bad_dims.append('Cell Length')\n bad_dims.append('Time')\n bad_dims.append('191-DNA')\n bad_dims.append('193-DNA')\n bad_dims.append('103-Viability')\n bad_dims.append('cluster_name')\n bad_dims.append('stim')\n bad_dims.append('cluster_num')\n if not dims_to_use:\n dims_to_use = self.dims[:]\n dims_to_use = [d for d in dims_to_use if not d in bad_dims] \n num_dims = len(dims_to_use)\n res = np.zeros((num_dims, num_dims))\n logging.info(\n 'Calculating mutual information for %d pairs...' % ((num_dims ** 2 - num_dims) / 2))\n timer = MultiTimer((num_dims ** 2 - num_dims) / 2)\n for i in xrange(num_dims):\n for j in xrange(i):\n arr = self.get_points(dims_to_use[i], dims_to_use[j])\n if ignore_negative_values:\n arr = arr[np.all(arr > 0, axis=1)]\n if arr.shape[0] < 100:\n logging.warning('Less than 100 cells in MI calculation for (%s, %s)' % (dims_to_use[i], dims_to_use[j]))\n res[j,i] = 0\n res[i,j] = 0\n continue\n if use_correlation:\n res[i,j] = np.corrcoef(arr.T[0], arr.T[1])[0,1]\n else:\n res[i,j] = mlab.mutualinfo_ap(arr, nout=1)\n res[j,i] = res[i,j]\n timer.complete_task('%s, %s' % (dims_to_use[i], dims_to_use[j]))\n return DataTable(res, dims_to_use)",
"def mutual_information_monte_carlo_extrapolate(self, ret_prob_activity=False):\n if self.is_correlated_mixture:\n raise NotImplementedError('Not implemented for correlated mixtures')\n \n base = 2 ** np.arange(0, self.Nr)\n prob_s = self.substrate_probabilities\n\n max_steps = self._sample_steps\n steps, MIs = [], []\n\n # sample mixtures according to the probabilities of finding\n # substrates\n count_a = np.zeros(2**self.Nr)\n step_check = 10000\n for step in range(max_steps):\n # choose a mixture vector according to substrate probabilities\n m = (np.random.random(self.Ns) < prob_s)\n \n # get the associated output ...\n a = np.dot(self.sens_mat, m).astype(np.bool)\n # ... and represent it as a single integer\n a = np.dot(base, a)\n # increment counter for this output\n count_a[a] += 1\n\n if step == step_check - 1:\n # do an extrapolation step\n # calculate the mutual information from the result pattern\n prob_a = count_a / step\n MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0)\n \n # save the data \n steps.append(step)\n MIs.append(MI)\n \n # do the extrapolation\n if len(steps) >= 3:\n a2, a1, a0 = MIs[-3:]\n MI_ext = (a0*a2 - a1*a1)/(a0 - 2*a1 + a2)\n# MI_ext = self._get_extrapolated_mutual_information(steps, MIs)\n print((step, MIs[-1], MI_ext))\n \n step_check += 10000\n \n else:\n # count_a contains the number of times output pattern a was observed.\n # We can thus construct P_a(a) from count_a. \n \n # calculate the mutual information from the result pattern\n prob_a = count_a / step\n MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0)\n\n if ret_prob_activity:\n return MI, prob_a\n else:\n return MI",
"def age_group_mixing():\n p = []\n for j in np.linspace(1,5,5):\n for k in np.linspace(1,5,5):\n if j == k:\n p.append(1)\n else:\n p.append(0.2**np.abs(j+1-k))\n p /= sum(p)\n return p",
"def compute_empirical_mutual_info_nats(var1_values, var2_values):\n\n # -------------------------------------------------------------------------\n # YOUR CODE HERE\n #\n\n empirical_mutual_info_nats = 0.0\n \n var1_distribution = compute_empirical_distribution(var1_values)\n var2_distribution = compute_empirical_distribution(var2_values)\n joint_distribution = compute_empirical_distribution(list(zip(var1_values,var2_values)))\n \n empirical_mutual_info_nats = 0\n for var1 in var1_distribution:\n for var2 in var2_distribution:\n empirical_mutual_info_nats += joint_distribution[(var1, var2)] \\\n * np.log(joint_distribution[(var1,var2)]/(var1_distribution[var1]*var2_distribution[var2]))\n \n #\n # END OF YOUR CODE\n # -------------------------------------------------------------------------\n\n return empirical_mutual_info_nats",
"def test_measure_deterministic_multi_qubit_without_sampling(self):\n shots = 100\n qobj = ref_measure.measure_circuits_qobj_deterministic(allow_sampling=False)\n qobj.config.shots = shots\n circuits = [experiment.header.name for experiment in qobj.experiments]\n targets = ref_measure.measure_counts_qobj_deterministic(shots)\n job = QasmSimulator().run(qobj)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)",
"def _multiple_entity_mutated(self, mut_dat, output, variant, item, translocations, fusions, all_except):\n if self.version == \"canc\":\n variant = \"gene\"\n else:\n variant = \"DepMap_ID\"\n\n out_dict = {\"names\": lambda x: list(set(x[self.by[self.version]])), #functions for returning specific data types\n \"dataframe\": lambda x: x}\n\n if output == \"dict\":\n out = {k:mut_dat[self.by[self.version]].loc[v].unique() for k,v in mut_dat.groupby(variant).groups.items()}\n else:\n out = out_dict[output](mut_dat)\n\n return out",
"def mixing2(M_tot, N_tot, alpha, gamma, sigma_mixed, sigma_sulfate, sigma_carbon,\n rho_sulfate, rho_carbon, kappa_sulfate, kappa_carbon, diam_ratio=1.4):\n import numpy as np\n from parcel_model.parcel import AerosolSpecies\n from parcel_model.lognorm import Lognorm\n\n # 1) Compute mixed mode density from prescribed values\n epsilon = 1./(gamma+1)\n rho_mixed = (1.-gamma)*rho_sulfate + gamma*rho_carbon\n kappa_mixed = (1.-gamma)*kappa_sulfate + gamma*kappa_carbon\n\n # 2) Separate internal/external masses\n M_ext = alpha*M_tot\n M_mixed = M_tot - M_ext\n\n # 3) Apportion between sulfate and carbon external modes\n M_sulfate = (epsilon/(1.+epsilon))*M_ext\n M_carbon = M_ext - M_sulfate\n\n # 4) Compute original (alpha = 0) mixed distribution parameters\n #mu_cubed = M_int*(3./(4.*np.pi))*(1./rho_mixed)*(1./N_int)*np.exp((-9./2.)*np.log(sigma_mixed)**2)\n #mu_mixed = mu_cubed**(1./3.) # cm\n mu_mixed = MakeAerosols.calc_mu(M_tot, N_tot, rho_mixed, sigma_mixed)\n\n # Compute N_mixed\n N_mixed = M_mixed/((4.*np.pi/3.)*rho_mixed*mu_mixed**3)*np.exp(-(9./2.)*np.log(sigma_mixed)**2)\n\n # 5) Compute number cocentration of external modes\n weighting_factor = (rho_carbon/rho_sulfate)*(diam_ratio**-3.)\n N_external = N_tot - N_mixed\n N_carbon = N_external/(1. + epsilon*weighting_factor)\n N_sulfate = N_external - N_carbon\n\n ## Finalize distributions\n # Mixed\n mixed = AerosolSpecies('mixed',\n Lognorm(mu=mu_mixed*1e4, sigma=sigma_mixed, N=N_mixed),\n kappa=kappa_mixed, bins=200)\n mixed.rho = rho_mixed\n\n ## Sulfate\n mu_sulfate = MakeAerosols.calc_mu(M_sulfate, N_sulfate, rho_sulfate, sigma_sulfate)\n sulfate = AerosolSpecies('sulfate',\n Lognorm(mu=mu_sulfate*1e4, sigma=sigma_sulfate, N=N_sulfate),\n kappa=kappa_sulfate, bins=200)\n sulfate.rho = rho_sulfate\n\n ## Carbon\n mu_carbon = MakeAerosols.calc_mu(M_carbon, N_carbon, rho_carbon, sigma_carbon)\n carbon = AerosolSpecies('carbon',\n Lognorm(mu=mu_carbon*1e4, sigma=sigma_carbon, N=N_carbon),\n kappa=kappa_carbon, bins=200)\n carbon.rho = rho_carbon\n\n return mixed, sulfate, carbon"
] | [
"0.68045783",
"0.6167079",
"0.61281043",
"0.61196005",
"0.60409695",
"0.5897231",
"0.5874367",
"0.5874367",
"0.58720165",
"0.58610725",
"0.5851441",
"0.5790839",
"0.5739831",
"0.57007676",
"0.56505895",
"0.5636512",
"0.56320953",
"0.56164134",
"0.5601524",
"0.5584634",
"0.5565742",
"0.5525422",
"0.5481202",
"0.5479883",
"0.5472012",
"0.5461158",
"0.5458487",
"0.5456151",
"0.5442749",
"0.5435687"
] | 0.6359313 | 1 |
returns a simple estimate of the mutual information. `approx_prob` determines whether the probabilities of encountering substrates in mixtures are calculated exactly or only approximative, which should work for small probabilities. | def mutual_information_estimate(self, approx_prob=False):
# this might be not the right approach
q_n = self.receptor_activity_estimate(approx_prob=approx_prob)
q_nm = self.receptor_crosstalk_estimate(approx_prob=approx_prob)
# calculate the approximate mutual information
return self._estimate_MI_from_q_values(q_n, q_nm) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mutual_information_monte_carlo_extrapolate(self, ret_prob_activity=False):\n if self.is_correlated_mixture:\n raise NotImplementedError('Not implemented for correlated mixtures')\n \n base = 2 ** np.arange(0, self.Nr)\n prob_s = self.substrate_probabilities\n\n max_steps = self._sample_steps\n steps, MIs = [], []\n\n # sample mixtures according to the probabilities of finding\n # substrates\n count_a = np.zeros(2**self.Nr)\n step_check = 10000\n for step in range(max_steps):\n # choose a mixture vector according to substrate probabilities\n m = (np.random.random(self.Ns) < prob_s)\n \n # get the associated output ...\n a = np.dot(self.sens_mat, m).astype(np.bool)\n # ... and represent it as a single integer\n a = np.dot(base, a)\n # increment counter for this output\n count_a[a] += 1\n\n if step == step_check - 1:\n # do an extrapolation step\n # calculate the mutual information from the result pattern\n prob_a = count_a / step\n MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0)\n \n # save the data \n steps.append(step)\n MIs.append(MI)\n \n # do the extrapolation\n if len(steps) >= 3:\n a2, a1, a0 = MIs[-3:]\n MI_ext = (a0*a2 - a1*a1)/(a0 - 2*a1 + a2)\n# MI_ext = self._get_extrapolated_mutual_information(steps, MIs)\n print((step, MIs[-1], MI_ext))\n \n step_check += 10000\n \n else:\n # count_a contains the number of times output pattern a was observed.\n # We can thus construct P_a(a) from count_a. \n \n # calculate the mutual information from the result pattern\n prob_a = count_a / step\n MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0)\n\n if ret_prob_activity:\n return MI, prob_a\n else:\n return MI",
"def mean_log_prob_approx(self, y=None, name='mean_log_prob_approx'):\n with self._name_and_control_scope(name):\n return approx_expected_log_prob_sigmoid(\n self.loc, self.scale, y,\n MONAHAN_MIX_PROB[self.num_probit_terms_approx],\n MONAHAN_INVERSE_SCALE[self.num_probit_terms_approx])",
"def estimate_moment(self):\n # Due to the optimization, we may store more than k elements in the\n # sample. The following removes excessive elements if needed.\n if len(self.elements) > self.k:\n self._remove_additional_elements()\n\n # The inclusion threshold (highest seed of element in the sample) is\n # used to compute the inclusion probabilities for the other elements\n # in the sample.\n max_in_sample = max(self.elements.items(), key=lambda x: x[1][0])\n threshold = max_in_sample[1][0]\n\n # Computes and sums the inverse-probability estimator for all keys\n # in the sample.\n sum_estimator = 0.0\n for key, (seed, count) in self.elements.items():\n if key != max_in_sample[0]:\n # Warns us if we may run into float precision issues.\n # TODO(ofirg): change this warning to something more robust than\n # a print (and maybe use other approximations of exp() that are\n # better for this case).\n if (count**self.sample_p) * threshold < 2.0**(-24):\n print(\"(count**self.sample_p) * threshold < 2^{-24}\")\n print((count**self.sample_p) * threshold)\n inc_pr = 1.0 - np.exp(-1.0 * (count**self.sample_p) * threshold)\n estimator = self.func_of_freq(count) / inc_pr\n sum_estimator += estimator\n\n return sum_estimator",
"def estimate_moment(self):\n # Due to the optimization, we may store more than k elements in the\n # sample. The following removes excessive elements if needed.\n if len(self.elements) > self.k:\n self._remove_additional_elements()\n\n # The inclusion threshold (highest seed of element in the sample) is\n # used to compute the inclusion probabilities for the other elements\n # in the sample.\n max_in_sample = max(self.elements.items(), key=lambda x: x[1][0])\n threshold = max_in_sample[1][0]\n\n # Computes and sums the inverse-probability estimator for all keys\n # in the sample.\n sum_estimator = 0.0\n for key, (seed, count) in self.elements.items():\n if key != max_in_sample[0]:\n weight = self.func_of_freq(self.advice_obj.predict(key))\n # Warns us if we may run into float precision issues.\n # TODO(ofirg): change this warning to something more robust than\n # a print (and maybe use other approximations of exp() that are\n # better for this case).\n if weight * threshold < 2.0**(-24):\n print(\"weight * threshold < 2^{-24}\")\n print(weight * threshold)\n inc_pr = 1.0 - np.exp(-1.0 * weight * threshold)\n estimator = self.func_of_freq(count) / inc_pr\n sum_estimator += estimator\n\n return sum_estimator",
"def test_iimi1():\n iimi = interactive_intrinsic_mutual_information(n_mod_m(3, 2), rvs=[[0], [1]], crvs=[2], rounds=1)\n assert iimi == pytest.approx(0.0)",
"def mutual_information_brute_force(self, ret_prob_activity=False):\n base = 2 ** np.arange(0, self.Nr)\n\n # prob_a contains the probability of finding activity a as an output.\n prob_a = np.zeros(2**self.Nr)\n for c, prob_c in self._iterate_mixtures():\n # get the associated output ...\n a = np.dot(self.sens_mat, c).astype(np.bool)\n # ... and represent it as a single integer\n a = np.dot(base, a)\n\n prob_a[a] += prob_c\n \n # normalize the output to make it a probability distribution\n prob_a /= prob_a.sum()\n \n # calculate the mutual information\n MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0)\n \n if ret_prob_activity:\n return MI, prob_a\n else:\n return MI",
"def multivariate_gauss_prob(observed, mean, covariance):\n\n return None",
"def calculate_mu(partdict):\n m = 1.67262e-27 * partdict['m']\n v_perp = np.sqrt(partdict['vR']**2+partdict['vz']**2)\n B = np.sqrt(partdict['Bphi']**2+partdict['BR']**2+partdict['Bz']**2)\n mu = m*v_perp**2/(2.*B)\n return mu",
"def calc_mutual_information(probability_mat):\n\n marginals = sp.outer(\n sp.sum(probability_mat, axis=1), sp.sum(probability_mat, axis=0))\n p = probability_mat[probability_mat != 0.0]\n m = marginals[probability_mat != 0.0]\n return sp.sum(p * sp.log(p / m))",
"def expected_improvement(ymin, mu, sig):\n p_imp = norm.cdf((ymin-mu)/sig)\n p_ymin = norm.pdf((ymin-mu)/sig)\n ei = (ymin-mu)*p_imp + sig*p_ymin\n return ei",
"def variational_expectation_(self, y, m, v, cubature=None):\n return variational_expectation_cubature(self, y, m, v, cubature)",
"def test_prob_expectation_values(self, approx_order, strategy, tol):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x = 0.543\r\n y = -0.654\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.probs(wires=[0, 1])\r\n\r\n tapes, fn = finite_diff(tape, approx_order=approx_order, strategy=strategy)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (5, 2)\r\n\r\n expected = (\r\n np.array(\r\n [\r\n [-2 * np.sin(x), 0],\r\n [\r\n -(np.cos(y / 2) ** 2 * np.sin(x)),\r\n -(np.cos(x / 2) ** 2 * np.sin(y)),\r\n ],\r\n [\r\n -(np.sin(x) * np.sin(y / 2) ** 2),\r\n (np.cos(x / 2) ** 2 * np.sin(y)),\r\n ],\r\n [\r\n (np.sin(x) * np.sin(y / 2) ** 2),\r\n (np.sin(x / 2) ** 2 * np.sin(y)),\r\n ],\r\n [\r\n (np.cos(y / 2) ** 2 * np.sin(x)),\r\n -(np.sin(x / 2) ** 2 * np.sin(y)),\r\n ],\r\n ]\r\n )\r\n / 2\r\n )\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)",
"def sample_propensities(mutated_params: torch.Tensor) -> torch.Tensor:\n return torch.softmax(mutated_params, -1)",
"def nmi(y_pred, y_true, average_method='geometric'):\n return metrics.normalized_mutual_info_score(y_true, y_pred, average_method=average_method)",
"def mutual_information(pi, pj, pij):\n p_i = 1 - pi\n p_j = 1 - pj\n p_ij = pj - pij\n pi_j = pi - pij\n p_i_j = 1 - pi - pj + pij\n \n log_pi = log(pi)\n log_pj = log(pj)\n log_p_i = log(p_i)\n log_p_j = log(p_j)\n \n mi = pij * (log(pij) - log_pi - log_pj) + \\\n pi_j * (log(pi_j) - log_pi - log_p_j) + \\\n p_i_j * (log(p_i_j) - log_p_i - log_p_j)\n if p_ij != 0: # For language groups and features, this is the only probability that could be zero, and lim_x->0[x*log(x)] = 0 \n mi += p_ij * (log(p_ij) - log_p_i - log_pj)\n \n return mi",
"def estimate(self, U, mu=None):\n raise NotImplementedError",
"def mutual_information(co_freq, s_freq, t_freq, total_instances, mitype=None):\n if co_freq > 0:\n if mitype is not None:\n if mitype == \"expected\":\n mi = math.log2(\n (total_instances * co_freq) / (s_freq * t_freq)\n ) * (co_freq / total_instances)\n elif mitype == \"normalized\":\n alpha = - math.log2(co_freq / total_instances)\n mi = (\n (math.log2(\n (total_instances * co_freq) / (s_freq * t_freq)) / alpha)\n if alpha != 0 else 0\n )\n elif mitype == \"pmi2\":\n mi = math.log2((co_freq ** 2) / (s_freq * t_freq))\n elif mitype == \"pmi3\":\n mi = math.log2(\n (co_freq ** 3) / (s_freq * t_freq * total_instances))\n else:\n raise ValueError(\n \"Provided Mutual information score type (mitype) is not \"\n \"supported. Provide one value from the following list \"\n \"['expected', 'normalized','pmi2', 'pmi3'] \")\n else:\n mi = math.log2((total_instances * co_freq) / (s_freq * t_freq))\n else:\n mi = 0\n return mi if mi > 0 else 0",
"def test_approximation_estimate(self):\n real_sigmas = np.linalg.svd(self._A, full_matrices=False, compute_uv=False)\n estimate_error = np.linalg.norm(self._A - self._approximation)\n expected_bound = 10 * np.sqrt(self._n * (self._k + self._increment) * self._m * self._k)\n expected_bound *= real_sigmas[self._k]\n self.assertLessEqual(estimate_error, expected_bound)",
"def probability(structure,seq, react=None):\n return energy_to_proba(get_ens_energy(seq,react),get_stru_energy(structure,seq,react))",
"def get_estimate(self, mag=False):\n if mag is False:\n return super(PhotoSamplers,self).get_estimate()\n \n return self._magsamples.get_estimate()",
"def adaptive_parzen_estimator(\n mus: numpy.ndarray | Sequence,\n low: float,\n high: float,\n prior_weight: float = 1.0,\n equal_weight: bool = False,\n flat_num: int = 25,\n) -> tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]:\n mus = numpy.asarray(mus)\n\n prior_mu = (low + high) * 0.5\n prior_sigma = (high - low) * 1.0\n\n size = len(mus)\n if size > 1:\n order = numpy.argsort(mus)\n sorted_mus = mus[order]\n prior_mu_pos = numpy.searchsorted(sorted_mus, prior_mu)\n\n weights = ramp_up_weights(size, flat_num, equal_weight)\n\n mixture_mus = numpy.zeros(size + 1)\n mixture_mus[:prior_mu_pos] = sorted_mus[:prior_mu_pos]\n mixture_mus[prior_mu_pos] = prior_mu\n mixture_mus[prior_mu_pos + 1 :] = sorted_mus[prior_mu_pos:]\n\n mixture_weights = numpy.ones(size + 1)\n mixture_weights[:prior_mu_pos] = weights[:prior_mu_pos]\n mixture_weights[prior_mu_pos] = prior_weight\n mixture_weights[prior_mu_pos + 1 :] = weights[prior_mu_pos:]\n\n sigmas = numpy.ones(size + 1)\n sigmas[0] = mixture_mus[1] - mixture_mus[0]\n sigmas[-1] = mixture_mus[-1] - mixture_mus[-2]\n sigmas[1:-1] = numpy.maximum(\n (mixture_mus[1:-1] - mixture_mus[0:-2]),\n (mixture_mus[2:] - mixture_mus[1:-1]),\n )\n sigmas = numpy.clip(\n sigmas, prior_sigma / max(10, numpy.sqrt(size)), prior_sigma\n )\n\n else:\n if prior_mu < mus[0]:\n\n mixture_mus = numpy.array([prior_mu, mus[0]])\n sigmas = numpy.array([prior_sigma, prior_sigma * 0.5])\n mixture_weights = numpy.array([prior_weight, 1.0])\n else:\n mixture_mus = numpy.array([mus[0], prior_mu])\n sigmas = numpy.array([prior_sigma * 0.5, prior_sigma])\n mixture_weights = numpy.array([1.0, prior_weight])\n\n weights = mixture_weights / mixture_weights.sum()\n\n return mixture_mus, sigmas, weights",
"def has_approx_support(m, m_hat, prob=0.01):\n m_nz = np.flatnonzero(np.triu(m, 1))\n m_hat_nz = np.flatnonzero(np.triu(m_hat, 1))\n\n upper_diagonal_mask = np.flatnonzero(np.triu(np.ones(m.shape), 1))\n not_m_nz = np.setdiff1d(upper_diagonal_mask, m_nz)\n\n intersection = np.in1d(m_hat_nz, m_nz) # true positives\n not_intersection = np.in1d(m_hat_nz, not_m_nz) # false positives\n\n true_positive_rate = 0.0\n if len(m_nz):\n true_positive_rate = 1. * np.sum(intersection) / len(m_nz)\n true_negative_rate = 1. - true_positive_rate\n\n false_positive_rate = 0.0\n if len(not_m_nz):\n false_positive_rate = 1. * np.sum(not_intersection) / len(not_m_nz)\n\n return int(np.less_equal(true_negative_rate + false_positive_rate, prob))",
"def probSimultanea(self, a: str, b: str) -> float:\n \n return self.mat[a][b] * self.probIn[a]",
"def mch_approximation( samples, dlamda ):\n dE = calc_e(samples,dlamda)\n dE -= dE.min()\n ZFraction = 1. / np.mean(np.exp(-dE))\n predsisj = pair_corr( samples, weights=np.exp(-dE)/len(dE) )[1] * ZFraction \n assert not (np.any(predsisj<-1.00000001) or\n np.any(predsisj>1.000000001)),\"Predicted values are beyond limits, (%1.6f,%1.6f)\"%(predsisj.min(),\n predsisj.max())\n return predsisj",
"def ProbCorrect(efficacy, difficulty, a=1):\n return 1 / (1 + math.exp(-a * (efficacy - difficulty)))",
"def _compute_population_estimate(cls, certificates):\n assert isinstance(certificates, list)\n assert len(certificates) >= cls.certificate_sample_length\n\n sum_means = 0\n sum_waits = 0\n for certificate in certificates[:cls.certificate_sample_length]:\n sum_waits += certificate.duration - cls.minimum_wait_time\n sum_means += certificate.local_mean\n\n avg_wait = sum_waits / len(certificates)\n avg_mean = sum_means / len(certificates)\n\n return avg_mean / avg_wait",
"def mutual_info_score(self):\n _, _, I_CK = self._entropies()\n return I_CK / self.grand_total",
"def mutual_information(mc_preds):\n mutual_info = entropy(np.mean(mc_preds, axis=0)) - np.mean(entropy(mc_preds),\n axis=0)\n return mutual_info",
"def get_random_approx_discrete(m,n):\n\n return np.random.choice([-0.99,0.99], size=(m,n))",
"def variational_expectation(self, y, m, v, cubature=None):\n\n # align shapes and compute mask\n y = y.reshape(-1, 1, 1)\n m = m.reshape(-1, 1, 1)\n v = np.diag(v).reshape(-1, 1, 1)\n mask = np.isnan(y)\n y = np.where(mask, m, y)\n\n # compute variational expectations and their derivatives\n var_exp, dE_dm, d2E_dm2 = vmap(self.variational_expectation_, (0, 0, 0, None))(y, m, v, cubature)\n\n # apply mask\n var_exp = np.where(np.squeeze(mask), 0., np.squeeze(var_exp))\n dE_dm = np.where(mask, np.nan, dE_dm)\n d2E_dm2 = np.where(mask, np.nan, d2E_dm2)\n\n return var_exp, np.squeeze(dE_dm, axis=2), np.diag(np.squeeze(d2E_dm2, axis=(1, 2)))"
] | [
"0.5583002",
"0.5569024",
"0.55195194",
"0.54732513",
"0.5439062",
"0.5311237",
"0.5172197",
"0.5165276",
"0.51317364",
"0.50231713",
"0.48275942",
"0.4811054",
"0.48098183",
"0.48028716",
"0.4798505",
"0.47701705",
"0.47494784",
"0.47418514",
"0.4734358",
"0.47275934",
"0.47266895",
"0.4720692",
"0.46792528",
"0.46554664",
"0.46547255",
"0.46459135",
"0.46445152",
"0.46389246",
"0.4625254",
"0.46191573"
] | 0.75532436 | 0 |
calculates the usefulness of each receptor, measured by how much information it adds to the total mutual information. `method` determines which method is used to determine the mutual information. `multiprocessing` determines whether multiprocessing is used for determining the mutual informations of all subsystems. | def receptor_score(self, method='auto', multiprocessing=False):
init_arguments = self.init_arguments
init_arguments['parameters']['initialize_state']['sensitivity'] = 'exact'
init_arguments['parameters']['sensitivity_matrix'] = self.sens_mat
joblist = [(copy.deepcopy(self.init_arguments), 'mutual_information',
{'method': method})]
# add one job for each receptor
for n in range(self.Nr):
init_arguments = self.init_arguments
init_arguments['num_receptors'] -= 1
# modify the current state and add it to the job list
sens_mat = np.delete(self.sens_mat, n, axis=0)
init_arguments['parameters']['sensitivity_matrix'] = sens_mat
joblist.append((copy.deepcopy(init_arguments), 'mutual_information',
{'method': method}))
if multiprocessing:
# calculate all results in parallel
pool = mp.Pool(processes=self.get_number_of_cores())
results = pool.map(_run_job, joblist)
else:
# create a generator over which we iterate later
results = [_run_job(job) for job in joblist]
# find the scores of all receptors
scores = results[0] - np.array(results[1:])
return scores | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mutual_information(self, excitation_method='auto', **kwargs):\n if excitation_method == 'auto':\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n excitation_method = 'brute_force'\n else:\n excitation_method = 'monte_carlo'\n \n if excitation_method == 'brute_force' or excitation_method == 'brute-force':\n return self.mutual_information_brute_force(**kwargs)\n elif excitation_method == 'monte_carlo' or excitation_method == 'monte-carlo':\n return self.mutual_information_monte_carlo(**kwargs)\n elif excitation_method == 'estimate':\n return self.mutual_information_estimate(**kwargs)\n else:\n raise ValueError('Unknown excitation_method `%s`.' % excitation_method)",
"def evaluate_clustering_methods(methods):\r\n results = {}\r\n for m in methods:\r\n res = results[m['name']] = {}\r\n prec = 3\r\n res['Adjusted Rand Score'] = round(sklearn.metrics.adjusted_rand_score(m['target'], m['clustering']),prec)\r\n res['Normalized Mutual Information'] = round(sklearn.metrics.normalized_mutual_info_score(m['target'], m['clustering']),prec)\r\n res['Adjusted Mutual Information'] = round(sklearn.metrics.adjusted_mutual_info_score(m['target'], m['clustering']),prec)\r\n return np.transpose(results)",
"def apply_method_to_multiple_sinograms(data, method, para, ncore=None,\n prefer=\"threads\"):\n if ncore is None:\n ncore = np.clip(mp.cpu_count() - 1, 1, None)\n else:\n ncore = np.clip(ncore, 1, None)\n if not isinstance(para, list):\n para = tuple(list([para]))\n else:\n para = tuple(para)\n (depth, height, width) = data.shape\n if method in dir(remo):\n method_used = getattr(remo, method)\n elif method in dir(filt):\n method_used = getattr(filt, method)\n elif method in dir(rec):\n method_used = getattr(rec, method)\n else:\n raise ValueError(\"Can't find the method: '{}' in the namespace\"\n \"\".format(method))\n data_out = Parallel(n_jobs=ncore, prefer=prefer)(\n delayed(method_used)(data[:, i, :], *para) for i in range(height))\n data_out = np.moveaxis(np.asarray(data_out), 0, 1)\n return data_out",
"def receptor_activity(self, method='auto', ret_correlations=False, **kwargs):\n if method == 'auto':\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n method = 'brute_force'\n else:\n method = 'monte_carlo'\n \n if method == 'brute_force' or method == 'brute-force':\n return self.receptor_activity_brute_force(ret_correlations, **kwargs)\n elif method == 'monte_carlo' or method == 'monte-carlo':\n return self.receptor_activity_monte_carlo(ret_correlations, **kwargs)\n elif method == 'estimate':\n return self.receptor_activity_estimate(ret_correlations, **kwargs)\n else:\n raise ValueError('Unknown method `%s`.' % method)",
"def mixture_statistics(self, method='auto'):\n\n if method == 'auto':\n fixed_mixture_size = self.parameters['fixed_mixture_size']\n \n if self.is_correlated_mixture or fixed_mixture_size is not None:\n # mixture has correlations => we do Metropolis sampling\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n method = 'brute-force'\n else:\n method = 'monte-carlo'\n else:\n # the estimate is exact for mixtures without correlations\n method = 'estimate'\n\n if method == 'brute-force' or method == 'brute_force':\n return self.mixture_statistics_brute_force()\n elif method == 'monte-carlo' or method == 'monte_carlo':\n return self.mixture_statistics_monte_carlo()\n elif method == 'estimate':\n return self.mixture_statistics_estimate()\n else:\n raise ValueError('Unknown method `%s` for mixture statistics'\n % method)",
"def get_nmi_mod(method, print_summary=True):\n\n methods = ['snf', 'rbf']\n if method not in methods:\n raise ValueError(f'Provided `method` {method} invalid.')\n\n scales = [f'scale{f}' for f in ['033', '060', '125', '250', '500']]\n keys = [\n 'cortical_thickness',\n 'subcortical_volume',\n 'dat_scans',\n 'csf_assays',\n 'behavioral_measures',\n 'all'\n ]\n\n # iterate over all CT dimensionalities and generate NMI / mod estimates\n nmi, mod = [], []\n for scale in scales:\n # get data for provided scale\n fname = op.join(directories.snf, f'{scale}_deterministic.h5')\n hdf = structures.Frog(fname)\n pd_data = [hdf.load(f'/processed/pd_{key}') for key in keys[:-1]]\n\n # generate affinity matrix and cluster labels\n # if we're using SNF we can just pre-load the matrices + labels\n if method == 'snf':\n path = '/snf/processed/{}/sqeuclidean/gridsearch/{}'\n affinities = [\n hdf.load(path.format(key, 'fusion_avg')) for key in keys\n ]\n labels = [\n hdf.load(path.format(key, 'consensus')) for key in keys\n ]\n # otherwise, we have to generate the affinities using cosine similarity\n # and then use spectral clustering to generate the labels\n elif method == 'rbf':\n affinities = [\n metrics.pairwise.cosine_similarity(\n sstats.zscore(f)\n ) + 1 for f in pd_data\n ] + [\n metrics.pairwise.cosine_similarity(\n sstats.zscore(np.column_stack(pd_data))\n ) + 1\n ]\n labels = [\n spectral_clustering(aff, n_clusters=3, random_state=1234)\n for aff in affinities\n ]\n\n # get NMI + modularity estimates\n nmi.append(snf.metrics.nmi(labels)[-1, :-1])\n mod.append(list(gen_mod(affinities[:-1], labels[-1])))\n\n nmi, mod = np.asarray(nmi), np.asarray(mod)\n\n if print_summary:\n _print_summary(nmi, 'NMI')\n print()\n _print_summary(mod, 'modularity')\n print()\n\n return nmi, mod",
"def test_method_mode():\r\n\r\n M = Module()\r\n M.x = T.dvector()\r\n M.f = Method([M.x], M.x*4, mode='FAST_COMPILE')\r\n M.g = Method([M.x], M.x*4)\r\n M.h = Method([M.x], M.x*4)\r\n m = M.make(mode='FAST_RUN')\r\n\r\n assert m.f.maker.mode != m.g.maker.mode\r\n assert m.h.maker.mode == m.g.maker.mode\r\n assert numpy.all(m.f([1,2]) == m.g([1,2]))",
"def run(method = 'ParetoMTL', num = 10):\r\n \r\n pf = create_pf()\r\n f_value_list = []\r\n \r\n weights = circle_points([1], [num])[0]\r\n \r\n\r\n \r\n for i in range(num):\r\n \r\n print(i)\r\n \r\n if method == 'ParetoMTL':\r\n x, f = pareto_mtl_search(ref_vecs = weights,i = i)\r\n if method == 'MOOMTL':\r\n x, f = moo_mtl_search()\r\n if method == 'Linear':\r\n x, f = linear_scalarization_search()\r\n \r\n f_value_list.append(f)\r\n \r\n \r\n f_value = np.array(f_value_list)\r\n plt.plot(pf[:,0],pf[:,1])\r\n plt.scatter(f_value[:,0], f_value[:,1], c = 'r', s = 80)",
"def intersection_score(method1,method2):\n\tpass",
"def _pool(array: np.ndarray, method: str) -> float:\n if method == \"fro\":\n return np.linalg.norm(array)\n if method == \"mean\":\n return np.mean(array)\n if method == \"median\":\n return np.median(array)\n return np.linalg.norm(array)",
"def calculate_parameters_magnitudes(self, method=None):\n assert method is not None, \"No method was chosen to calculate the parameters' magnitudes.\"\n\n # Get the parameters for every key\n param_keys = {}\n parameters_magnitudes_dict = {}\n parameters_magnitudes = []\n\n for parameter in self.optimizable_parameters:\n if parameter.param_key in param_keys:\n param_keys[parameter.param_key].append(parameter.value)\n else:\n param_keys[parameter.param_key] = []\n param_keys[parameter.param_key].append(parameter.value)\n\n if method.lower() == \"geometric\":\n # Compute the geometric mean\n for param_key in param_keys:\n geometric_mean = 1.0\n n = 0.0\n for value in param_keys[param_key]:\n if abs(value) > 1e-8:\n # If value is not zero\n geometric_mean = geometric_mean * np.abs(value)\n n = n + 1\n if abs(geometric_mean) > 1e-8 and n > 0:\n geometric_mean = geometric_mean ** (1.0 / n)\n parameters_magnitudes_dict[param_key] = geometric_mean\n else:\n parameters_magnitudes_dict[param_key] = self.parameters_magnitudes[param_key]\n\n elif method.lower() == \"arithmetic\":\n # Arithmetic mean\n for param_key in param_keys:\n arithmetic_mean = 0.0\n n = 0.0\n for value in param_keys[param_key]:\n arithmetic_mean = arithmetic_mean + np.abs(value)\n n = n + 1\n\n if abs(arithmetic_mean) > 1e-8 and n > 0:\n arithmetic_mean = arithmetic_mean / n\n parameters_magnitudes_dict[param_key] = arithmetic_mean\n else:\n parameters_magnitudes_dict[param_key] = self.parameters_magnitudes[param_key]\n\n elif method.lower() == \"default\":\n for param_key in param_keys:\n parameters_magnitudes_dict[param_key] = self.parameters_magnitudes[param_key]\n else:\n raise NotImplementedError(\n \"\\t * Mean type {} not available to guess the prior widths.\".format(method))\n\n for parameter in self.optimizable_parameters:\n parameters_magnitudes.append(parameters_magnitudes_dict[parameter.param_key])\n\n # Convert to numpy array\n prior_widths = np.asarray(parameters_magnitudes)\n\n return parameters_magnitudes_dict, prior_widths",
"def benchmark(self):\n nsites = []\n for m in self.methods:\n for name, structure in self.test_structures.items():\n cns = []\n if self.unique_sites:\n es = SpacegroupAnalyzer(structure).get_symmetrized_structure().equivalent_sites\n sites = [structure.index(x[0]) for x in es]\n else:\n sites = range(len(structure))\n\n for key, val in self.hi.items():\n if name == key:\n for j in sites:\n if isinstance(m, NearNeighbors):\n tmpcn = m.get_cn_dict(structure, j, self.use_weights)\n else:\n tmpcn = m.compute(structure, j)\n if tmpcn == \"null\":\n continue\n if self.nround:\n self._roundcns(tmpcn, self.nround)\n cns.append((structure[j].species_string, tmpcn))\n if self.cation_anion:\n for mat, cat in self.cations.items():\n if (name == mat) and cat:\n cns = self._popel(cns, cat)\n elif self.anion_cation:\n for mat, an in self.anions.items():\n if name == mat:\n cns = self._popel(cns, an)\n m._cns[name] = cns\n nsites.append(len(cns))\n self.nsites = max(nsites)",
"def score(self, method: str = \"\"):\n if not (method):\n if isinstance(self.steps[-1][1], Regressor):\n method = \"r2\"\n else:\n method = \"accuracy\"\n return self.steps[-1][1].score(method)",
"def _get_met_classes(self, spec_like, method='mix'):\n # assess if spectrum or molfam\n is_spectrum = isinstance(spec_like, Spectrum)\n\n # gather classes for spectra, using right method\n # choose the main method here by including it as 'main' in the method parameter\n use_canopus = ('main' in method or 'canopus' in method\n or 'mix' in method) and 'canopus' in self.method_options\n use_mne = ('molnetenhancer' in method or 'mix' in method) and \\\n 'molnetenhancer' in self.method_options\n spec_like_classes, spec_like_classes_names, \\\n spec_like_classes_names_inds = (None, None, None)\n # the order in which the classes are read, determines the priority (now: first canopus, then mne)\n if use_canopus and not spec_like_classes:\n if is_spectrum:\n # list of list of tuples/None - todo: add to spectrum object?\n # take only 'best' (first) classification per ontology level\n all_classes = self.npl.chem_classes.canopus. \\\n spectra_classes.get(spec_like.spectrum_id)\n if all_classes:\n spec_like_classes = [\n cls_per_lvl for lvl in all_classes\n for i, cls_per_lvl in enumerate(lvl) if i == 0\n ]\n spec_like_classes_names_inds = self.npl.chem_classes.canopus. \\\n spectra_classes_names_inds\n else: # molfam\n fam_id = spec_like.family_id\n if fam_id.startswith(\"singleton-\"): # account for singleton families\n fam_id += f'_{spec_like.spectra[0].spectrum_id}'\n all_classes = self.npl.chem_classes.canopus.molfam_classes.get(\n fam_id)\n if all_classes:\n spec_like_classes = [\n cls_per_lvl for lvl in all_classes\n for i, cls_per_lvl in enumerate(lvl) if i == 0\n ]\n spec_like_classes_names_inds = self.npl.chem_classes.canopus. \\\n molfam_classes_names_inds\n if use_mne and not spec_like_classes:\n # if mne or when main/canopus does not get classes\n if is_spectrum:\n spec_like_classes = self.npl.chem_classes.molnetenhancer. \\\n spectra_classes(spec_like.spectrum_id)\n else: # molfam\n fam_id = spec_like.family_id\n if fam_id.startswith(\"singleton\"): # account for singleton families\n fam_id += f'_{spec_like.spectra[0].spectrum_id}'\n spec_like_classes = self.npl.chem_classes.molnetenhancer. \\\n molfam_classes.get(fam_id)\n # classes are same for molfam and spectrum so names are irrespective of is_spectrum\n spec_like_classes_names_inds = self.npl.chem_classes.\\\n molnetenhancer.spectra_classes_names_inds\n return spec_like_classes, spec_like_classes_names_inds",
"def mutual_information_estimate(self, approx_prob=False):\n \n # this might be not the right approach\n q_n = self.receptor_activity_estimate(approx_prob=approx_prob)\n q_nm = self.receptor_crosstalk_estimate(approx_prob=approx_prob)\n \n # calculate the approximate mutual information\n return self._estimate_MI_from_q_values(q_n, q_nm)",
"def test_valid_method(method: str) -> None:\n mapie = MapieClassifier(method=method)\n mapie.fit(X_toy, y_toy)\n check_is_fitted(\n mapie,\n [\n \"single_estimator_\",\n \"n_features_in_\",\n \"n_samples_val_\",\n \"scores_\"\n ]\n )",
"def test_valid_method(method: str) -> None:\n mapie = MapieRegressor(method=method)\n mapie.fit(X_toy, y_toy)\n check_is_fitted(\n mapie,\n [\n \"n_features_in_\",\n \"single_estimator_\",\n \"estimators_\",\n \"k_\",\n \"residuals_\"\n ]\n )",
"def simulate(self, observation_matrix, method=\"smoother\"):\n if method == \"filter\":\n means = self.filtered_state_means\n covariances = self.filtered_state_covariances\n else:\n means = self.smoothed_state_means\n covariances = self.smoothed_state_covariances\n simulated_means = []\n simulated_variances = []\n for t, _ in enumerate(means):\n simulated_means.append(np.dot(observation_matrix, means[t]))\n var = np.diag(np.dot(observation_matrix,\n np.dot(covariances[t], observation_matrix.T)))\n # prevent variances to become less than 0\n simulated_variances.append(np.maximum(var, 0))\n return (simulated_means, simulated_variances)",
"def _process_method(self, method):\n return method",
"def mantra_simulation(lineup, module, mode='ST'):\n\n def try_optimal_solution(module, n_of_players_with_vote):\n\n \"\"\"\n If an optimal solution exists this function assign it to the\n variable \"final\" which is defined inside MANTRA_simulation but not\n globally. That's why we refers to it later by using \"nonlocal\".\n \"\"\"\n\n nonlocal all_lineups\n nonlocal final_field\n nonlocal malus\n\n # For each candidate\n for candidate in all_lineups:\n\n # We create the list where each player in the combination has only\n # 1 role\n candidates_single_role = all_lineups_single_role(candidate)\n\n # And test each of these combinations\n for new_cand in candidates_single_role:\n\n # If we find a solution we store the result\n if find_solution(new_cand, module, n_of_players_with_vote):\n final_field = new_cand\n break\n\n # And stop the iteration over the other condidates\n if final_field:\n malus = 0\n break\n\n def try_efficient_solution(module, n_of_players_with_vote):\n\n \"\"\"\n If an optimal solution is not found we look for an efficient one.\n In case an efficient solution exists we store the lineup and the\n module.\n \"\"\"\n\n modules_for_efficient_solution = copy.copy(all_modules)\n modules_for_efficient_solution.remove(module)\n\n nonlocal all_lineups\n nonlocal final_field\n nonlocal efficient_module\n nonlocal malus\n\n # Iterate over all the candidates\n for candidate in all_lineups:\n candidates_single_role = all_lineups_single_role(candidate)\n for new_cand in candidates_single_role:\n\n # And over all the modules\n for a_module in modules_for_efficient_solution:\n\n # If we find a solution we store the result\n if find_solution(new_cand, a_module,\n n_of_players_with_vote):\n final_field = new_cand\n efficient_module = a_module\n break\n\n # Stop the iteration over the other permutations\n if final_field:\n break\n\n # Stop the iteration over the other candidates\n if final_field:\n malus = 0\n break\n\n def try_adapted_solution(n_of_players_with_vote):\n\n \"\"\"\n If an efficient solution is not found we look for an adapted one.\n In case it exists we store the lineup, the module, the number of\n malus assigned and the other modules that are equally valid.\n \"\"\"\n\n modules_for_adapted_solution = copy.copy(all_modules)\n\n nonlocal all_lineups\n nonlocal final_field\n nonlocal adapted_module\n nonlocal malus\n\n # As for the efficient case we iterate over:\n\n # 1. All the candidates\n for candidate in all_lineups:\n candidates_single_role = all_lineups_single_role(candidate)\n\n # 2. Each candidate transformed in single role\n for new_cand in candidates_single_role:\n\n # 3. All the modules\n for a_module in modules_for_adapted_solution:\n\n n_malus = find_adapted_solution(\n new_cand, a_module, n_of_players_with_vote)\n\n # If a solution for this candidate with this module exists\n # AND n_malus is decreased, we store the number of malus,\n # the module and the lineup\n if n_malus and n_malus < malus:\n malus = n_malus\n adapted_module = a_module\n final_field = new_cand\n\n if malus == 1:\n # In this case we break the loop because 1 is the\n # minimum number of malus possible so we are not\n # interested in iterating over the remaining\n # candidates\n break\n\n if malus == 1:\n break\n if malus == 1:\n break\n\n def look_for_solution(module, n_of_players_with_vote):\n\n \"\"\"\n It sequentially applies the three functions to look for the right\n solution.\n \"\"\"\n\n try_optimal_solution(module, n_of_players_with_vote)\n if not final_field:\n try_efficient_solution(module, n_of_players_with_vote)\n if not final_field:\n try_adapted_solution(n_of_players_with_vote)\n\n def solve_gkeeper():\n\n \"\"\"\n Goal keeper substitution has to be the first thing to solve, if\n needed. Here we modify field, bench and n_subst depending on whether\n the gkeepers has vote or not.\n \"\"\"\n\n nonlocal field\n nonlocal bench\n nonlocal n_subst\n\n # If the goal keeper n the field received a vote we delete all the\n # remaining goal keepers from the bench\n if find_gkeeper(field):\n bench = delete_gkeeper(bench)\n\n # If the gkeeper in the field has no vote but the there is at least one\n # gkeeper in the bench with vote we make the substitution and delete\n # all the remaining gkeepers from the lineup, if there is any. We\n # finally decrease the n_subst\n elif not find_gkeeper(field) and find_gkeeper(bench):\n gkeeper = find_gkeeper(bench)\n field.insert(0, gkeeper)\n bench = delete_gkeeper(bench)\n n_subst -= 1\n\n # If there is no gkeeper with vote neither in the field nor in the\n # bench than we just decrease the n_subst\n elif not find_gkeeper(field) and not find_gkeeper(bench):\n n_subst -= 1\n\n def calculation(a_number):\n\n \"\"\"\n This is the function that is recursively applied to find the correct\n lineup. The input 'a_number' is an integer which represents the\n number of players (gkeeper excluded) who will partecipate in the\n lineup calculation. In case the algorithm does not find any solution\n after the first iteration it repeats the process considering 1\n substitution and 1 player less.\n \"\"\"\n\n nonlocal field\n nonlocal bench\n nonlocal module\n nonlocal n_subst\n nonlocal all_lineups\n\n all_lineups = valid_lineups(field, bench, n_subst)\n look_for_solution(module, a_number)\n\n if not final_field:\n n_subst -= 1\n return calculation(a_number-1)\n\n clean_lineup = [(player[0], modify_player_name(player[1]), player[2])\n for player in lineup]\n\n # Select the players with vote and store the number of substitutions needed\n if mode == 'FG':\n field, bench = players_with_vote(clean_lineup, 'FG')\n else:\n field, bench = players_with_vote(clean_lineup)\n\n n_subst = 11 - len(field)\n\n # In case no substitutions are needed, no calculation is started. In this\n # case malus can only be due to errors coming from the fantaplayers when\n # creating the lineup\n if not n_subst:\n malus = 0\n ref_roles = schemes[module]\n field[0] = (field[0][0], field[0][1], 'Por')\n\n for x in range(10):\n roles_available = ref_roles[x].split('/')\n roles_player = field[x + 1][2]\n roles_shared = list(set(roles_available).intersection(\n roles_player))\n if roles_shared:\n field[x + 1] = (field[x + 1][0], field[x + 1][1],\n roles_shared[0])\n else:\n malus += 1\n for role in roles_player:\n temp_roles = malus_roles[role]\n temp_roles = [role.split('/') for role in temp_roles]\n temp_roles = [single_role for element in temp_roles for\n single_role in element]\n temp_roles = ['W' if role in ('W1', 'W2') else role for\n role in temp_roles]\n temp_roles = list(set(temp_roles))\n roles_shared = list(set(roles_available).intersection(\n temp_roles))\n field[x + 1] = (field[x + 1][0], field[x + 1][1],\n roles_shared[0])\n break\n\n return field, bench, malus\n\n # Initialize all the parameters. We chose 10 for malus just because it is\n # a number high enough and we look for the solution with the lower number\n # of malus\n final_field = [] # The final lineup\n efficient_module = 0 # Valid module in case of eff solution\n adapted_module = 0 # Valid module in case of adp solution\n malus = 10 # Number of malus assigned\n magic_number = 10 # N. of players considered in the lineup\n all_lineups = 0 # All candidates\n\n # We need all the modules to be able to iterate over them in case an\n # efficient or adapted solution is needed. We also remove the module chosen\n # by the fantaplayer and then insert it as first element. In this way, this\n # module will be the first to be checked\n all_modules = ['343', '3412', '3421', '352', '442', '433',\n '4312', '4321', '4231', '4411', '4222']\n all_modules.remove(module)\n all_modules.insert(0, module)\n\n # Set the right magic_number value if n_subst > 3\n if n_subst > 3:\n magic_number = 13 - n_subst\n n_subst = 3\n\n # Handle the goal keeper issue\n solve_gkeeper()\n if find_gkeeper(field):\n gkeeper = field[0]\n field.remove(gkeeper)\n else:\n gkeeper = 0\n\n calculation(magic_number)\n\n if gkeeper:\n gkeeper = (gkeeper[0], gkeeper[1], gkeeper[2][0])\n final_field.insert(0, gkeeper)\n\n # Create the bench\n field_names = [player[1] for player in final_field]\n final_bench = [player for player in clean_lineup[11:] if player[1] not in\n field_names]\n\n return final_field, final_bench, malus, efficient_module, adapted_module",
"def method_info_specialization(self, method: ProtoServiceMethod) -> None:",
"def run(self):\n # load_data\n layers = self.load_all_data() # list of tuples (file_name, feature_matrix)\n\n # check variable types\n if len(self.method) == 1:\n self.method = [self.method[0]] * len(layers)\n elif len(layers) != len(self.method):\n raise ValueError(\"Number of matrices extracted from input files and number of similarity methods \" +\n \"does not correspond\")\n\n # check missing value parameter\n if len(self.missing) == 1:\n self.logger.info(\"#Setting all 'missing' parameters to {}\".format(self.missing[0]))\n self.missing = [self.missing[0]] * len(layers)\n elif len(layers) != len(self.missing):\n raise ValueError(\"Number of matrices extracted from input files and number of given missing parameters \" +\n \"does not correspond\")\n\n # extract sample names\n all_samples = set()\n for layer_data in layers:\n all_samples = all_samples.union({name for name in layer_data[1].columns})\n self.logger.info(\"#Total number of unique samples: {}\".format(len(all_samples)))\n\n out_arrays = {}\n adj_matrices = []\n\n # create adjacency matrices\n for i in range(len(layers)):\n self.logger.info(\"#Layer: {}\".format(i))\n layer_data = layers[i][1]\n\n # add missing samples layer\n samples = {name for name in layer_data.columns}\n for name in all_samples - samples:\n layer_data[name] = np.nan\n\n # sort data frame by sample names\n layer_data.sort_index(axis=1, inplace=True)\n\n # extract feature matrices\n f = layer_data.values.T\n self.logger.info(\"Feature matrix: ({} samples x {} features)\".format(f.shape[0], f.shape[1]))\n\n # check if feature matrix values are correct\n ncat = check_categories(f)\n if ncat != [0, 1]:\n standardized = is_standardized(f, axis=0, atol=self.atol)\n if not standardized[0]:\n raise ValueError(\"Incorrect values in feature matrix. Mean of features in \" +\n \"({},{}) \".format(round(standardized[1][0], 3), round(standardized[1][1], 3)) +\n \"range. Standard deviation of features in \" +\n \"({}, {}) \".format(round(standardized[2][0], 3), round(standardized[2][1], 3)) +\n \"range. Please, supply either binary dataset \" +\n \"(0 or 1 feature values) or continuous values standardized feature-wise. \" +\n \"Alternatively for almost standardized continuous data, \" +\n \"increase '-atol' parameter value (currently {}).\".format(self.atol))\n else:\n self.logger.debug(\"Data is correctly standardized\")\n else:\n self.logger.debug(\"Found two unique categories in data: [0, 1]\")\n if self.method[i] != 'cosine':\n self.logger.info(\"Using '{}' similarity for [0, 1] data. \".format(self.method[i]) +\n \"Suggested better measure: cosine similarity.\")\n\n # create adjacency matrix\n a = feature_to_adjacency(f, missing=self.missing[i], method=self.method[i], n=self.k, alpha=self.alpha)\n self.logger.info('Adjacency matrix {} created [similarity method: {}]'.format(a.shape, self.method[i]))\n\n # plot adjacency matrix\n plot_path = self.plot_base + \"_\" + str(i) + \".png\" if self.plot else self.plot_base\n plot_heatmap_seaborn(a, title=\"Layer {} (source:{})\".format(i, layers[i][0]), file_path=plot_path)\n if self.plot:\n self.logger.info(\"Adjacency matrix plot saved to {}\".format(plot_path))\n\n # add matrices to output arrays\n out_arrays[str(i)] = a\n adj_matrices.append(a)\n out_arrays[\"f\" + str(i)] = f\n\n # check if there are samples not accessible in any layer\n missing_samples = []\n for a in adj_matrices:\n missing_samples += [i for i in range(a.shape[1]) if np.all(np.isnan(a[:, i]))]\n\n samples_to_drop = [sample for sample, val in Counter(missing_samples).items() if val == len(adj_matrices)]\n if samples_to_drop:\n # drop inaccessible samples\n self.logger.info(\"Found samples inaccessible in every layer of graph. \" +\n \"Try changing '-missing' parameter or inspect your data \")\n sample_names = np.array(sorted(list(all_samples)))[np.array(samples_to_drop)]\n self.logger.info(\"Dropped samples: {}\".format(list(sample_names)))\n updated_out_arrays = {}\n selector = np.array([x for x in range(len(all_samples)) if x not in samples_to_drop])\n for i in range(len(out_arrays.keys())):\n if str(i) not in out_arrays.keys():\n break\n updated_out_arrays[str(i)] = out_arrays[str(i)][selector[:, None], selector]\n updated_out_arrays[\"f\" + str(i)] = out_arrays[\"f\" + str(i)][selector, :]\n\n # create output file\n updated_out_arrays[\"samples\"] = np.array(sorted(list(all_samples)))[selector]\n save_arrays_to_npz(data=updated_out_arrays, file_path=self.outfile)\n\n else:\n # create output file\n out_arrays[\"samples\"] = np.array(sorted(list(all_samples)))\n save_arrays_to_npz(data=out_arrays, file_path=self.outfile)\n\n self.logger.info(\"#Output file {} created\".format(self.outfile))",
"def _detect_method(self) -> None:\n if isinstance(self.flm, np.ndarray):\n _logger.info(\"harmonic sum method selected\")\n self.method = \"harmonic_sum\"\n elif isinstance(self.f, np.ndarray) and not isinstance(self.mask, np.ndarray):\n _logger.info(\"integrating the whole sphere method selected\")\n self.method = \"integrate_sphere\"\n elif isinstance(self.f, np.ndarray):\n _logger.info(\"integrating a region on the sphere method selected\")\n self.method = \"integrate_region\"\n else:\n raise RuntimeError(\n \"need to pass one off harmonic coefficients, real pixels \"\n \"or real pixels with a mask\",\n )",
"def sim_meas(self, pauli):\n return [term for term in self.layer._procspec.model_terms if pauli.simultaneous(term)]",
"def time_analysis(self, method='MEAN'):\n new_cubelist = []\n for cube in self.cubelist:\n new_cubelist.append(self.cube_coordinate_analysis(cube, \n self.time_coord, \n method))\n self.cubelist = iris.cube.CubeList(new_cubelist)\n self.processes.append('time_analysis')\n return self.cubelist",
"def test_coherency_regularized():\r\n\r\n for method in methods:\r\n f, c = tsa.coherency_regularized(tseries, 0.05, 1000,\r\n csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0].conjugate())",
"def optimise(\n cls,\n species: \"Species\",\n method: \"Method\",\n n_cores: Optional[int] = None,\n coords: Optional[OptCoordinates] = None,\n **kwargs,\n ) -> None:",
"def receptor_crosstalk(self, method='auto', ret_receptor_activity=False,\n **kwargs):\n if method == 'auto':\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n method = 'brute_force'\n else:\n method = 'monte_carlo'\n \n if method == 'estimate':\n # estimate receptor crosstalk directly\n q_nm = self.receptor_crosstalk_estimate(**kwargs)\n if ret_receptor_activity:\n q_n = self.receptor_activity_estimate(**kwargs)\n \n else:\n # calculate receptor crosstalk from the observed probabilities\n r_n, r_nm = self.receptor_activity(method, ret_correlations=True,\n **kwargs)\n q_n = r_n\n q_nm = r_nm - np.outer(r_n, r_n)\n if kwargs.get('clip', False):\n np.clip(q_nm, 0, 1, q_nm)\n \n if ret_receptor_activity:\n return q_n, q_nm\n else:\n return q_nm",
"def test_member_method_inputs(self):\r\n\r\n # test that explicit Method inputs don't use shared storage\r\n M = Module()\r\n M.x = T.dscalar()\r\n M.y = T.dscalar()\r\n M.f = Method([M.x], M.x + M.y)\r\n M.g = Method([M.y], M.x - M.y)\r\n m = M.make()\r\n m.y = 77\r\n assert m.f(23) == 100\r\n assert m.x is None\r\n m.x = 1000\r\n assert m.g(23) == 977\r\n assert m.y == 77\r\n assert m.x == 1000",
"def run_methods(self):\n results = {}\n methods = self.converter.available_methods[:] # a copy !\n\n if self.include_dummy:\n methods += ['dummy']\n\n if self.to_include:\n methods = [x for x in methods if x in self.to_include]\n elif self.to_exclude:\n methods = [x for x in methods if x not in self.to_exclude]\n\n for method in methods:\n print(\"\\nEvaluating method %s\" % method)\n times = []\n pb = Progress(self.N)\n for i in range(self.N):\n with Timer(times):\n self.converter(method=method)\n pb.animate(i+1)\n results[method] = times\n self.results = results"
] | [
"0.6172455",
"0.5696483",
"0.55845743",
"0.5454974",
"0.5411215",
"0.5398892",
"0.53274125",
"0.5234939",
"0.5197039",
"0.5172683",
"0.51339173",
"0.50398976",
"0.4996997",
"0.4983228",
"0.49595007",
"0.49515915",
"0.49353585",
"0.49350932",
"0.49265435",
"0.49167734",
"0.48881906",
"0.488084",
"0.4868348",
"0.48617855",
"0.48394793",
"0.47974652",
"0.47874483",
"0.47783047",
"0.4767731",
"0.4764834"
] | 0.72649634 | 0 |
optimizes the current library to maximize the result of the target function using gradient descent. By default, the function returns the best value and the associated interaction matrix as result. `direction` is either 'min' or 'max' and determines whether a minimum or a maximum is sought. `steps` determines how many optimization steps we try `multiprocessing` is a flag deciding whether multiple processes are used to calculate the result. Note that this has an overhead and might actually decrease overall performance for small problems `ret_info` determines whether extra information is returned from the optimization `args` is a dictionary of additional arguments that is passed to the target function | def optimize_library_descent(self, target, direction='max', steps=100,
multiprocessing=False, ret_info=False,
args=None):
# get the target function to call
target_function = getattr(self, target)
if args is not None:
target_function = functools.partial(target_function, **args)
# initialize the optimizer
value = target_function()
value_best, state_best = value, self.sens_mat.copy()
if ret_info:
# store extra information
start_time = time.time()
info = {'values': {}}
values_count = self.parameters['optimizer_values_count']
values_step = max(1, steps // values_count)
if multiprocessing:
# run the calculations in multiple processes
pool_size = self.get_number_of_cores()
pool = mp.Pool(processes=pool_size)
if ret_info:
values_step = max(1, values_step // pool_size)
# iterate for given number of steps
for step in range(int(steps) // pool_size):
joblist = []
init_arguments = self.init_arguments
for _ in range(pool_size):
# modify the current state and add it to the job list
i = random.randrange(self.sens_mat.size)
self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]
params = init_arguments['parameters']
params['sensitivity_matrix'] = self.sens_mat
params['initialize_state']['sensitivity'] = 'exact'
joblist.append((copy.deepcopy(init_arguments), target))
self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]
# run all the jobs
results = pool.map(_run_job, joblist)
# find the best result
if direction == 'max':
res_best = np.argmax(results)
if results[res_best] > value_best:
value_best = results[res_best]
state_best = joblist[res_best][0]['parameters']['sensitivity_matrix']
# use the best state as a basis for the next iteration
self.sens_mat = state_best
elif direction == 'min':
res_best = np.argmin(results)
if results[res_best] < value_best:
value_best = results[res_best]
state_best = joblist[res_best][0]['parameters']['sensitivity_matrix']
# use the best state as a basis for the next iteration
self.sens_mat = state_best
else:
raise ValueError('Unsupported direction `%s`' % direction)
if ret_info and step % values_step == 0:
info['values'][step * pool_size] = results[res_best]
else:
# run the calculations in this process
for step in range(int(steps)):
# modify the current state
i = random.randrange(self.sens_mat.size)
self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]
# get the value of the new state
value = target_function()
improved = ((direction == 'max' and value > value_best) or
(direction == 'min' and value < value_best))
if improved:
# save the state as the new best value
value_best, state_best = value, self.sens_mat.copy()
else:
# undo last change
self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]
if ret_info and step % values_step == 0:
info['values'][step] = value_best
# sort the best state and store it in the current object
state_best = self.sort_sensitivity_matrix(state_best)
self.sens_mat = state_best.copy()
if ret_info:
info['total_time'] = time.time() - start_time
info['states_considered'] = steps
info['performance'] = steps / info['total_time']
return value_best, state_best, info
else:
return value_best, state_best | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def optimize_library_descent_multiple(self, target, direction='max',\n trials=4, multiprocessing=False,\n ret_error=False, **kwargs):\n \n # pass some parameters down to the optimization function to call\n kwargs['target'] = target\n kwargs['direction'] = direction\n \n # initialize the list of jobs with an optimization job starting from the\n # current interaction matrix\n joblist = [(self.init_arguments, 'optimize_library_descent', kwargs)]\n sens_mat = self.sens_mat #< store matrix to restore it later\n\n # set the ensemble of sensitivity matrices to try\n self.choose_sensitivity_matrix(density='auto')\n self.parameters['initialize_state']['sensitivity'] = 'ensemble'\n\n # add additional jobs with random initial interaction matrices\n init_arguments = self.init_arguments\n for _ in range(trials - 1):\n joblist.append((copy.deepcopy(init_arguments),\n 'optimize_library_descent', kwargs))\n \n # restore interaction matrix of this object\n self.sens_mat = sens_mat\n \n if multiprocessing:\n # calculate all results in parallel\n pool = mp.Pool(processes=self.get_number_of_cores())\n result_iter = pool.imap_unordered(_run_job, joblist)\n \n else:\n # create a generator over which we iterate later\n result_iter = (_run_job(job) for job in joblist)\n \n # find the best result by iterating over all results\n result_best, values = None, []\n for result in result_iter:\n values.append(result[0])\n # check whether this run improved the result\n if result_best is None:\n result_best = result\n elif ((direction == 'max' and result[0] > result_best[0]) or\n (direction == 'min' and result[0] < result_best[0])):\n result_best = result\n \n # sort the best state and store it in the current object\n state = self.sort_sensitivity_matrix(result_best[1])\n self.sens_mat = state.copy()\n\n if ret_error:\n # replace the best value by a tuple of the best value and its error\n value_best = result_best[0]\n value_err = np.abs(value_best - np.median(values))\n result_best = ((value_best, value_err), ) + result_best[1:]\n return result_best",
"def optimize_library_anneal(self, target, direction='max', steps=100,\n ret_info=False, args=None):\n # lazy import\n from .optimizer import ReceptorOptimizerAnnealer # @UnresolvedImport\n \n # prepare the class that manages the simulated annealing\n annealer = ReceptorOptimizerAnnealer(self, target, direction, args,\n ret_info=ret_info)\n annealer.steps = int(steps)\n annealer.Tmax = self.parameters['anneal_Tmax']\n annealer.Tmin = self.parameters['anneal_Tmin']\n if self.parameters['verbosity'] == 0:\n annealer.updates = 0\n\n # do the optimization\n MI, state = annealer.optimize()\n\n # sort the best state and store it in the current object\n state = self.sort_sensitivity_matrix(state)\n self.sens_mat = state.copy()\n \n if ret_info:\n return MI, state, annealer.info\n else:\n return MI, state",
"def optimize_library(self, target, method='descent', direction='max',\n **kwargs):\n if method == 'descent':\n return self.optimize_library_descent(target, direction, **kwargs)\n elif method == 'descent_multiple' or method == 'descent-multiple':\n return self.optimize_library_descent_multiple(target, direction,\n **kwargs)\n elif method == 'anneal':\n return self.optimize_library_anneal(target, direction, **kwargs)\n \n else:\n raise ValueError('Unknown optimization method `%s`' % method)",
"def Optimization(*args, **kwargs):\n from warnings import warn\n\n warn(\n \"Optimization has been renamed to OptimizationResult and will be removed as soon as v0.13.0\", DeprecationWarning\n )\n return OptimizationResult(*args, **kwargs)",
"def optimize(self, max_iters=1e3, messages=False, use_counter=False,\\\n factr=1e7, pgtol=1e-05):\n logger.debug('Beginning MLE to optimize hyperparams. grad_method=%s'\\\n % self.grad_method)\n\n # setup the optimization\n try:\n x0 = self._transform_parameters(self.parameters)\n assert np.all(np.isfinite(x0))\n except:\n logger.error('Transformation failed for initial values. '\\\n + 'Ensure constraints are met or the value is not too small.')\n raise\n\n # filter out the fixed parameters\n free = np.logical_not(self._fixed_indicies)\n x0 = x0[free]\n\n # setup the counter\n if use_counter:\n self._counter = solver_counter(disp=True)\n else:\n self._counter = None\n\n # run the optimization\n try:\n x_opt, f_opt, opt = fmin_l_bfgs_b(func=self._objective_grad, x0=x0,\\\n factr=factr, pgtol=pgtol, maxiter=max_iters, disp=messages)\n except (KeyboardInterrupt,IndexError):\n logger.info('Keyboard interrupt raised. Cleaning up...')\n if self._counter is not None and self._counter.backup is not None:\n self.parameters = self._counter.backup[1]\n logger.info('will return best parameter set with'\\\n + 'log-likelihood = %.4g' % self._counter.backup[0])\n else:\n logger.info('Function Evals: %d. Exit status: %s' % (f_opt, opt['warnflag']))\n # extract the optimal value and set the parameters to this\n transformed_parameters = self._previous_parameters \n transformed_parameters[free] = x_opt\n self.parameters = self._untransform_parameters(transformed_parameters)\n return opt",
"def Optimise(LogLikelihood,par,func_args,fixed=None,type='max',method='NM',maxiter=10000, maxfun=10000, verbose=True):\n \n if fixed==None:\n var_par = np.copy(par)\n #otherwise construct the parameter vector from var_par and fixed_par_val\n else:\n par = np.array(par)\n fixed = np.array(fixed) #ensure fixed is a np array\n #assign parameters to normal param vector\n fixed_par = par[np.where(fixed==True)]\n var_par = par[np.where(fixed!=True)]\n \n #set the algorithm to use - CG and P not working (at least not well)\n add_kwords = {'verbose':verbose}\n if method == 'NM':\n Algorithm = NelderMead\n add_kwords = {'maxiter':maxiter, 'maxfun':maxfun,'verbose':verbose}\n elif method == 'CG':\n print \"warning: CG method didn't work properly during testing\"\n Algorithm = ConjugateGradient\n elif method == 'P':\n print \"warning: Powell algorithm didn't work properly during testing\"\n Algorithm = Powell\n else:\n print \"error: optimisation function not found\"\n return par\n \n #set the optimisation function to pos or neg for the fmin funcitons\n if type == 'max': OptFunc = NegFixedPar_func\n elif type == 'min': OptFunc = FixedPar_func\n else:\n print \"error: %s not a valid option\" % type\n return par\n \n #call the optimser with the appropriate function\n fitted_par = Algorithm(OptFunc, var_par, (LogLikelihood,func_args,fixed,fixed_par), \\\n **add_kwords)\n \n #now return the params in the correct order...\n if fixed==None:\n return_par = fitted_par\n else:\n return_par = np.copy(par) \n return_par[np.where(fixed!=True)] = fitted_par\n \n return return_par",
"def opt_wrapper(m, **kwargs):\r\n m.optimize(**kwargs)\r\n return m.optimization_runs[-1]",
"def opt_wrapper(m, **kwargs):\n m.optimize(**kwargs)\n return m.optimization_runs[-1]",
"def __call__(self, origin, state, **kwargs):\n fun = kwargs['function']\n d = state['direction']/np.linalg.norm(state['direction'])\n # filter directions that are too large\n if self.filter:\n ndabs_log = -np.log10(np.abs(d))\n mean_log = np.mean(ndabs_log)\n #print \"\\n ** MEAN =\", mean_log\n direction = (ndabs_log > mean_log-1.5).astype(int)*d\n else:\n direction = d\n state['direction'] = direction\n## for pos, d in enumerate(direction):\n## use_dir = self.use_dirs[pos]\n## if use_dir * d < 0:\n## # directions don't match so don't move in this direction\n## direction[pos] = 0\n maxStepSize = self.maxStepSize\n if np.isscalar(maxStepSize):\n stepSize = maxStepSize\n else:\n stepfacs = np.zeros(self.dim)\n for d in range(self.dim):\n # explicit loop so as to catch any ZeroDivisionErrors\n try:\n stepfacs[d] = abs(maxStepSize[d] / direction[d])\n except ZeroDivisionError:\n # Direction is orthogonal to this parameter direction,\n # so ensure won't choose this as the minimum step size\n stepfacs[d] = Inf\n # Stop stepping with giant sizes if direction vector has strong\n # separation of scales\n stepSize = min(stepfacs)\n# print \"direction = \", direction\n# print \"step = \", step\n i = 1\n old_value = state['old_value']\n not_done = True\n# print \"** TEMP: Hardwiring step size to be 0.0005\"\n# stepSize = 0.0005\n init_step = stepSize\n while not_done:\n print(\"\\nLinestep: i =\", i, \"step size =\", stepSize, \"direction =\\n\", end='')\n print(direction)\n p = origin + i * stepSize * direction\n print(\"Testing p = \", p)\n new_value = fun(p)\n if new_value < old_value:\n i += 1\n old_value = new_value\n else:\n if i == 1:\n # don't shrink step size to be less than 1/maxReduceFac of initial\n if stepSize*self.maxReduceFac < init_step:\n not_done = False\n p = origin + (i-1) * stepSize * direction\n else:\n stepSize /= self.stepMod\n else:\n # had found a working step but it's no longer stepping to lower residuals\n not_done = False\n p = origin + (i-1) * stepSize * direction\n state['alpha_step'] = stepSize\n return p",
"def optimization_step(self):\n \n if \"CSS\" in self.algorithm:\n \n input_dict = {self.x: self.train_inputs[self.minibatch_set,:]}\n \n var_list = [self.x_tilda, self.minibatch_set]\n \n if (self.num_samples > 0) and (not self.mixture):\n \n if ((self.mf_steps > 0) and self.alpha >0) or\\\n self.gibbs_steps > 0: \n \n var_list.append(self.sampler_theta)\n \n elif \"CD\" in self.algorithm:\n \n input_dict = {self.x : self.train_inputs[self.minibatch_set,:]} \n \n var_list = [self.minibatch_set]\n \n var_list.append(self.learning_rate)\n \n if self.use_momentum:\n \n var_list.append(self.momentum)\n \n output_vars = [self.pseudo_cost]\n \n if self.report_p_tilda:\n \n output_vars.append(self.p_tilda)\n \n else:\n \n output_vars.append(theano.shared(0))\n \n opt_step = theano.function(inputs = var_list,\n outputs = output_vars,\n updates = self.updates,\n givens = input_dict,\n on_unused_input='warn')\n \n return opt_step",
"def optimize(maxiter: int = 1000,\n tol = None,\n save_steps: int = 1,\n c0: float = 0.62,\n c1: float = 0.1,\n c2: float = 0.602,\n c3: float = 0.101,\n c4: float = 0):\n _spsa_vars = [c0, c1, c2, c3, c4]\n theta = self.vectorizer.vector\n nb_params = len(theta)\n use_exact_grads = 'grads' in self._method\n \n if save_steps:\n theta_vec = [theta]\n cost_vec = [self.vectorized_value_and_grad(theta)[0]]\n \n \n pbar = tqdm(total=maxiter, disable=not self.progbar)\n def callback(_):\n pbar.clear()\n pbar.update()\n val = round(self.loss, 5)\n pbar.set_description(str(val))\n\n if self.loss_target is not None:\n if self.loss < self.loss_target:\n # returning True doesn't terminate optimization\n pbar.close()\n raise KeyboardInterrupt\n \n for ii in range(maxiter):\n \n a_spsa = float(_spsa_vars[0]) / ((ii + 1 + _spsa_vars[4])**_spsa_vars[2])\n c_spsa = float(_spsa_vars[1]) / ((ii + 1)**_spsa_vars[3])\n delta = 2 * randint(0, 2, size=nb_params) - 1\n # plus and minus directions\n \n if use_exact_grads:\n raise NotImplementedError('Will use grad calc to project on to SP-direction')\n else:\n theta_plus = theta + c_spsa * delta\n theta_minus = theta - c_spsa * delta\n\n cost_plus = self.vectorized_value_and_grad(theta_plus)[0]\n cost_minus = self.vectorized_value_and_grad(theta_minus)[0]\n # derivative estimate\n g_spsa = (cost_plus - cost_minus) * delta / (2.0 * c_spsa)\n # updated theta\n theta = theta - a_spsa * g_spsa\n \n callback(ii)\n \n if tol is not None:\n if (cost_plus + cost_minus)/2 < tol:\n pbar.close()\n break\n \n if save_steps:\n theta_vec.append(theta)\n cost_vec.append(cost_plus/2+cost_minus/2)\n \n \n result_dict = {'hyper_parameters':_spsa_vars,\n 'maxiter':maxiter,\n 'theta_opt':theta,\n 'cost_opt':self.vectorized_value_and_grad(theta)[0],\n 'grad_opt':self.vectorized_value_and_grad(theta)[1]}\n if save_steps:\n result_dict['theta_history'] = theta_vec\n result_dict['cost_history'] = cost_vec\n self.result_dict = result_dict\n pbar.close()\n\n return self.inject_res_vector_and_return_tn()",
"def optimize(self, X, y, *args):\n self.loss_function.set_space(X, y, *args)\n self.betas_ = fastgradalgo(\n self.loss_function,\n t_init=self.eta_init,\n max_iter=self.max_iter\n )\n return self.betas_",
"def optimize(self, coords_batch: SameSizeCoordsBatch, energy_helper):\n\n a_coords = coords_batch.coords\n n_total_confs = a_coords.shape[0]\n n_atoms = a_coords.shape[1]\n \n func_evals = 0\n\n # evaluate initial f(x) and df/dx\n loss, std = energy_helper.compute_energy() # loss[nConf]\n st = OptState(self, n_total_confs, n_atoms, loss.dtype)\n st.loss = loss\n st.std = std\n min_loss = st.loss.detach().clone()\n \n minE_no_constraints = energy_helper.energy_no_constraint().detach().clone()\n min_std = torch.full_like(minE_no_constraints, -1)\n st.flat_grad = energy_helper.compute_grad().reshape(st.n_confs,-1)\n min_grad_square_max = torch.full((n_total_confs,), 9e20, dtype=a_coords.dtype, device=self.device)\n #st.abs_grad_sum = st.flat_grad.abs().sum(1)\n \n status = torch.zeros((n_total_confs,),dtype=torch.uint8, device=self.device)\n is_active = torch.ones((n_total_confs,), dtype=torch.uint8, device=self.device).bool()\n conf_steps = torch.full((n_total_confs,), -1, dtype=torch.int16, device=self.device)\n minE_coords = a_coords.detach().clone()\n minE_grad = torch.full((n_total_confs,n_atoms*3), -999, dtype=a_coords.dtype, device=self.device)\n \n current_evals = 1\n func_evals += 1\n n_iter = 0\n\n # optimize for a max of max_iter iterations\n while n_iter < self.convergence_opts.max_iter:\n # keep track of nb of iterations\n n_iter += 1\n\n ############################################################\n # compute gradient descent direction\n ############################################################\n if n_iter == 1:\n st.d = st.flat_grad.neg()\n else:\n # d: direction of step\n # s: step = direction * trust of step\n # y: delta gradient in step (grad - prev_grad) = vector of gradient change\n # ys: sum(y * step) \n # do lbfgs update (update memory)\n y = st.flat_grad.sub(st.prev_flat_grad)\n \n s = st.d*st.t.reshape(-1,1)\n \n ys = torch.sum(y * s, dim=1)\n \n is_valid_step = ys > 1e-10 # DIAL BACK TO 10E-6,4,5, look at RFO, rational function optimization\n # reach out to lee-ping or roland king (optking)\n # try occasionally setting h_diag to 1\n # look into getting code from psi4 to convert, (little bit of a mess) cartesian to internal pyoptking\n # pyberny: has nicer code for internal coordinates\n # maybe can get initial hessian guess in internal coordinates and project back to xyz and use as first guess\n st.old_dirs.push_if(is_valid_step, y)\n st.old_stps.push_if(is_valid_step, s)\n y = y[is_valid_step] \n st.H_diag[is_valid_step] = ys[is_valid_step] / torch.sum(y * y, dim=1)\n d_not_valid_steps = st.flat_grad[~is_valid_step].neg() #d[~is_valid_step]\n \n # compute the approximate (L-BFGS) inverse Hessian\n # multiplied by the gradient\n \n ro = 1. / torch.sum(st.old_dirs.container * st.old_stps.container, dim=2)\n ro[torch.isinf(ro)] = 1e-10\n\n al = torch.zeros((self.history_size,st.n_confs), dtype=loss.dtype, device=self.device)\n \n num_old = st.old_dirs.count_hist.max()\n# log.debug(\"old_dirs {}\\n{}\".format(num_old, st.old_dirs.container[0:num_old]))\n \n q = st.flat_grad.neg()\n for i in range(num_old):\n al[i] = torch.sum(st.old_stps.container[i]* q, dim=1) * ro[i]\n q.add_(-al[i].reshape(-1,1) * st.old_dirs.container[i])\n st.d = r = q * st.H_diag.reshape(-1,1) \n# log.debug(\"al {}\".format(al[0:num_old]))\n# log.debug(\"q {}\".format(q))\n# log.debug(\"d {}\".format(st.d))\n# log.debug(\"H_diag {}\".format(st.H_diag))\n# \n for i in range(num_old - 1, -1, -1):\n be_i = torch.sum(st.old_dirs.container[i] * r, dim=1) * ro[i]\n# log.debug(\"{} od {}\".format(i,st.old_dirs.container[i]))\n# log.debug(\"{} r {}\".format(i,r))\n# log.debug(\"{} ro {}\".format(i,ro[i]))\n# log.debug(\"{} bei {}\".format(i,be_i))\n r.add_((al[i] - be_i).reshape(-1,1) * st.old_stps.container[i])\n# log.debug(\"{} r {}\".format(i,r))\n# st.d[~is_valid_step] = d_not_valid_steps\n\n if st.prev_flat_grad is None:\n st.prev_flat_grad = st.flat_grad.clone()\n else:\n st.prev_flat_grad.copy_(st.flat_grad)\n st.prev_loss = st.loss\n\n ############################################################\n # compute step length\n ############################################################\n # reset initial guess for trust\n if n_iter == 1:\n st.t = self.trust_by_step_size(a_coords, st.d, 0.1) \n else:\n# log.debug(lr)\n st.t = st.lr.clone()\n #if n_iter > 10: st.t = st.t + st.t * random.gauss(0, 0.1)\n\n if self.line_search_fn == \"Armijo\":\n ls_func_evals = self.armijo_line_search(n_iter, a_coords, st, energy_helper)\n \n elif self.line_search_fn == \"Wolfe\":\n ls_func_evals =self.wolfe_lineSearch(n_iter, a_coords, st, energy_helper)\n \n else:\n # directional derivative\n #gtd = torch.sum(st.flat_grad * st.d, dim=1) # g * d\n\n # no line search, simply move with fixed-step\n st.t = self._add_grad(a_coords, st.t, st.d)\n \n if n_iter != self.convergence_opts.max_iter:\n # re-evaluate function only if not in last iteration\n # the reason we do this: in a stochastic setting,\n # no use to re-evaluate that function here\n st.loss, st.std = energy_helper.compute_energy()\n st.flat_grad = energy_helper.compute_grad().reshape(st.n_confs,-1)\n #st.abs_grad_sum = st.flat_grad.abs().sum(1) # not needed\n ls_func_evals = 1\n\n\n # update func eval\n current_evals += ls_func_evals\n func_evals += ls_func_evals\n\n\n ############################################################\n # check conditions\n ############################################################\n #\n # active conformers are conformers that have not convereged\n # all variable in st. (OptState) are limited to active conformers\n #\n # local variables that have only elements for active conformers in the following evaluation\n # code have an \"a_\" prefix\n #\n \n status[is_active] = 0\n \n \n # a_ prefix flags tensors on active conformers only, just as st.\n a_flat_grad_sqare = st.flat_grad.pow(2)\n a_flat_grad_MSE = a_flat_grad_sqare.sum(1) / n_atoms \n a_flat_grad_square_max = a_flat_grad_sqare.max(1)[0]\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug('{} loss: {}, max_grad: {} a_coords[0:5,1,0] {}'.format(\n n_iter, \n st.loss[0:5].detach().cpu().numpy(),\n a_flat_grad_square_max.sqrt().detach().cpu().numpy(),\n a_coords[0:5,0,0].detach().cpu().numpy()))\n\n a_deltaLoss = st.loss - min_loss[is_active]\n st.last_decreased[a_deltaLoss < 0] = n_iter\n\n # check if energy is converged\n a_e_decreased = a_deltaLoss * a_deltaLoss < self.convergence_opts.convergence_es\n e_decreased_idx = st.in_confIdx[a_e_decreased]\n status[e_decreased_idx] |= Status.ENERGY_CONVERGED\n \n # flag geometry as \"decreased\" if it went down or stated ~ same but gradient decreased \n # allow for 10x more tolerance because if forces are decreased\n a_deltaGrad = a_flat_grad_square_max - min_grad_square_max[is_active]\n a_e_decreased = ( (a_deltaLoss < 0)\n | ((a_deltaLoss <= self.convergence_opts.convergence_es * 10) \n &(a_deltaGrad < 0))) \n e_decreased_idx = st.in_confIdx[a_e_decreased]\n \n # store best geometry when geom is found\n if e_decreased_idx:\n minE_coords[e_decreased_idx] = a_coords[a_e_decreased].detach_()\n minE_no_constraints[e_decreased_idx] = \\\n energy_helper.energy_no_constraint()[a_e_decreased].detach_()\n minE_grad[e_decreased_idx] = st.flat_grad[a_e_decreased].detach_()\n min_loss [e_decreased_idx] = st.loss[a_e_decreased].detach().clone()\n if not st.std is None: min_std[e_decreased_idx] = st.std[a_e_decreased]\n min_grad_square_max[e_decreased_idx] = a_flat_grad_square_max[a_e_decreased]\n \n dummy = n_iter - st.last_decreased >= self.convergence_opts.max_it_without_decrease\n status[st.in_confIdx[dummy]] |= Status.ENERGY_NOT_DECREASING\n \n dummy = (a_flat_grad_MSE < self.convergence_opts.convergence_gms) \\\n & (a_flat_grad_square_max < self.convergence_opts.convergence_gmaxs)\n status[st.in_confIdx[dummy]] |= Status.GRADIENT_CONVERGED \n\n dt_square = st.d*st.t.reshape(-1,1)\n dt_square *= dt_square\n dummy = (dt_square.sum(1)/ n_atoms <= self.convergence_opts.convergence_dms) \\\n & (dt_square.max(1)[0] <= self.convergence_opts.convergence_dmaxs)\n status[st.in_confIdx[dummy]] |= Status.GEOMETRY_CONVERGED\n \n if self.plot_data is not None:\n rec = torch.full((n_total_confs,), float('nan'), dtype=st.loss.dtype, device=self.device)\n rec[is_active] = st.loss\n self.plot_data.append([n_iter, rec])\n \n actives_finished = (status[is_active] >= Status.ALL_CONVERGED) \n\n actives_finished_in_idx = st.in_confIdx[actives_finished]\n # set conf_steps for not-finished conformations\n conf_steps[actives_finished_in_idx] = n_iter\n\n if (~actives_finished).sum() == 0:\n log.info(f\"all finished (nIter={n_iter}): {minE_no_constraints}\") \n break\n\n if n_iter == self.convergence_opts.max_iter:\n log.info(f\"MAX_ITER reached: {minE_no_constraints}\")\n status[ st.in_confIdx[~actives_finished] ] |= Status.ITERATIONS_EXCEEDED\n break\n if current_evals >= self.convergence_opts.max_iter * 3: \n status[ st.in_confIdx[~actives_finished] ] |= Status.ITERATIONS_EXCEEDED \n log.info(f\"MAX_EVAL reached: {minE_no_constraints}\") \n break\n \n # filter out completed conformations\n if actives_finished.sum() > 0:\n a_future_actives = ~actives_finished\n is_active[actives_finished_in_idx] = 0\n st.filter_(a_future_actives)\n energy_helper.filter_(a_future_actives) # also filters coords_batch\n a_coords = coords_batch.coords\n\n if n_iter % self.prune_high_energy_freq == 0 and n_iter > 5:\n # this is a global minimum search, to speed up: prune \n # conformations with the highest energy\n drop_count = int(st.loss.shape[0] * self.prune_high_energy_fract)\n if drop_count < 1: continue\n a_to_drop = st.loss.argsort(descending=True)[0:int(st.loss.shape[0] * self.prune_high_energy_fract)]\n to_drop_idx = st.in_confIdx[a_to_drop]\n is_active[to_drop_idx] = 0\n status[to_drop_idx] = Status.HIGH_ENERGY\n conf_steps[to_drop_idx] = n_iter\n a_future_actives = torch.ones_like(st.loss, dtype=torch.uint8).bool()\n a_future_actives[a_to_drop] = 0\n st.filter_(a_future_actives)\n energy_helper.filter_(a_future_actives) # also filters coords_batch\n a_coords = coords_batch.coords\n \n if self.plot_data:\n self.plot(n_total_confs)\n\n # set conf_steps for not-finished conformations\n conf_steps[conf_steps == -1] = n_iter\n \n if log.isEnabledFor(logging.DEBUG):\n log.debug(f'lbfgs completed e={minE_no_constraints}, maxgrad^2={min_grad_square_max}')\n status[(status > Status.ALL_CONVERGED) & (status < Status.HIGH_ENERGY)] = Status.ALL_CONVERGED\n\n if st.std is None: min_std = None\n return minE_coords, minE_no_constraints, minE_grad, min_std, status, conf_steps",
"def gopt_max(fun, bounds, n_warmup = 1000, n_local = 10):\n x_best, y_best = gopt_min(lambda x: -fun(x), bounds, n_warmup, n_local)\n return x_best, -y_best",
"def optimize(self):\n \n if self.verbose:\n print('Starting grid search with bounds: [' + \\\n ';'.join(['%5g to %5g']*len(self.steps))%tuple([(self.steps[i][0], self.steps[i][-1]) for i in range(len(self.steps))]) +']')\n\n for params in self._get_next_point():\n self.transform.set_params(params)\n\n v, _ = self.measure.value_and_derivatives(self.transform)\n\n if v < self.best_value:\n self.best_value = v\n self.best_params = params\n# print('New best value %2.4f at ('%v, ', '.join(['%8.3f']*len(params))%tuple(params), ')')\n\n self.value_history.append(v)\n self.last_value = v\n self.iteration += 1\n\n if self.report_freq > 0 and (self.iteration % self.report_freq == 0) and self.report_func is not None:\n self.report_func(self)\n\n # Set the best transform\n self.transform.set_params(self.best_params)\n self.last_value = self.best_value\n return self.best_value",
"def optimization_manager(config):\n def optimize(state,\n grad,\n warmup=config.optim.warmup,\n grad_clip=config.optim.grad_clip):\n \"\"\"Optimizes with warmup and gradient clipping (disabled if negative).\"\"\"\n lr = state.lr\n if warmup > 0:\n lr = lr * jnp.minimum(state.step / warmup, 1.0)\n if grad_clip >= 0:\n # Compute global gradient norm\n grad_norm = jnp.sqrt(\n sum([jnp.sum(jnp.square(x)) for x in jax.tree_leaves(grad)]))\n # Clip gradient\n clipped_grad = jax.tree_map(\n lambda x: x * grad_clip / jnp.maximum(grad_norm, grad_clip), grad)\n else: # disabling gradient clipping if grad_clip < 0\n clipped_grad = grad\n return state.optimizer.apply_gradient(clipped_grad, learning_rate=lr)\n\n return optimize",
"def solve(self, topology, algorithm=\"SLSQP\", grad=\"AD\", step_size=1e-6, iters=100, eps=1e-6, kappa=1e-8, tmax=100, eta=1e-6, verbose=False):\n if verbose:\n print(\"----------\")\n print(\"Optimization with {} started!\".format(algorithm))\n print(f\"# Parameters: {self.number_of_parameters()}, # Constraints {self.number_of_constraints()}\")\n\n # test for bad stuff before going any further\n self.check_optimization_sanity()\n\n # compose gradient and objective functions\n if grad not in (\"AD\", \"FD\"):\n raise ValueError(f\"Gradient method {grad} is not supported!\")\n if grad == \"AD\":\n if verbose:\n print(\"Computing gradients using automatic differentiation!\")\n x_func = partial(self._optimize_form, topology=topology.copy(), tmax=tmax, eta=eta)\n grad_func = partial(grad_autograd, grad_func=agrad(x_func)) # x, grad, x_func\n\n elif grad == \"FD\":\n if verbose:\n print(f\"Warning: Calculating gradients using finite differences with step size {step_size}. This may take a while...\")\n grad_func = self.gradient_func(grad_finite_differences, topology.copy(), tmax, eta, step_size)\n\n # grad_func = self.gradient_func(grad_func, topology.copy(), tmax, eta, step_size)\n obj_func = self.objective_func(topology, grad_func, tmax, eta)\n\n # generate optimization variables\n x = self.optimization_parameters(topology)\n\n # extract the lower and upper bounds to optimization variables\n bounds_low, bounds_up = self.optimization_bounds(topology)\n\n # stack keyword arguments\n hyper_parameters = {\"f\": obj_func,\n \"algorithm\": algorithm,\n \"dims\": self.number_of_parameters(),\n \"bounds_low\": bounds_low,\n \"bounds_up\": bounds_up,\n \"iters\": iters,\n \"eps\": eps,\n \"ftol\": kappa}\n\n # assemble optimization solver\n solver = nlopt_solver(**hyper_parameters)\n\n # solve optimization problem\n x_opt = None\n start = time()\n try:\n x_opt = solver.optimize(x)\n if verbose:\n print(\"Optimization ended correctly!\")\n except RoundoffLimited:\n print(\"Optimization was halted because roundoff errors limited progress\")\n print(\"Results may still be useful though!\")\n x_opt = self.optimization_parameters(topology)\n except RuntimeError:\n print(\"Optimization failed due to a runtime error!\")\n print(f\"Optimization total runtime: {round(time() - start, 4)} seconds\")\n return static_equilibrium(topology)\n\n # fetch last optimum value of loss function\n time_opt = time() - start\n loss_opt = solver.last_optimum_value()\n evals = solver.get_numevals()\n status = nlopt_status(solver.last_optimize_result())\n\n # set optimizer attributes\n self.time_opt = time_opt\n self.x_opt = x_opt\n self.penalty = loss_opt\n self.evals = evals\n self.status = status\n\n # set norm of the gradient\n # NOTE: np.zeros is a dummy array (signature requirement set by nlopt)\n self.gradient = grad_func(x_opt, np.zeros(x_opt.size))\n self.gradient_norm = np.linalg.norm(self.gradient)\n\n if verbose:\n print(f\"Optimization total runtime: {round(time_opt, 6)} seconds\")\n print(\"Number of evaluations incurred: {}\".format(evals))\n print(f\"Final value of the objective function: {round(loss_opt, 6)}\")\n print(f\"Norm of the gradient of the objective function: {round(self.gradient_norm, 6)}\")\n print(f\"Optimization status: {status}\".format(status))\n print(\"----------\")\n\n # exit like a champion\n return static_equilibrium(topology)",
"def step_func_cat(X, args, Y, info, Ytarget, err, tols, iter, maxIter):\n [XF, ZF, L, EA, W, CB, WL, WEA, L_EA, CB_EA] = args[\"cat\"]\n\n # if abs( err[1] + ZF ) < 0.0001:\n # breakpoint()\n\n [alpha_min, alpha0, alphaR] = args[\n \"step\"\n ] # get minimum alpha, initial alpha, and alpha reduction rate from passed arguments\n\n # J = info['jacobian']\n # dX = -np.matmul(np.linalg.inv(J), err)\n dX = -np.matmul(info[\"stiffnessB\"], err)\n\n # ! Reduce dHF by factor (between 1 at I = 1 and 0 at I = MaxIter) that reduces linearly with iteration count\n # to ensure that we converge on a solution even in the case were we obtain a nonconvergent cycle about the\n # correct solution (this happens, for example, if we jump to quickly between a taut and slack catenary)\n\n alpha = 1.0 # M<<<<<<<< np.max([alpha_min, alpha0*(1.0 - alphaR*iter/maxIter)])\n\n # exponential approach alpha = alpha0 * np.exp( iter/maxIter * np.log(alpha_min/alpha0 ) )\n\n dX[0] = dX[0] * alpha # dHF*( 1.0 - Tol*I )\n dX[1] = dX[1] * alpha # dVF*( 1.0 - Tol*I )\n\n # To avoid an ill-conditioned situation, make sure HF does not go less than or equal to zero by having a lower limit of Tol*HF\n # [NOTE: the value of dHF = ( Tol - 1.0 )*HF comes from: HF = HF + dHF = Tol*HF when dHF = ( Tol - 1.0 )*HF]\n # dX[0] = max( dX[0], ( tol - 1.0 )*info['HF']);\n\n # To avoid an ill-conditioned situation, make sure HF does not get too close to zero, by forcing HF >= tols[0]\n # if info['HF'] + dX[0] <= tol*abs(info['VF']+dX[1]):\n # if info['HF'] + dX[0] <= tols[0]\n if X[0] + dX[0] <= tols[0]:\n # dX[0] = tol*abs(info['VF']+dX[1]) - info['HF']\n # dX[0] = tols[0] - info['HF']\n dX[0] = tols[0] - X[0]\n\n # To avoid an ill-conditioned situation where the line is nearly all on the seabed but the solver gets stuck,\n # if np.abs(err[1] + ZF)/ZF < tol:\n # breakpoint()\n # deltaHFVF = info['HF'] - info['VF']\n # dX[0] = dX[0] - 0.5*deltaHFVF\n # dX[1] = dX[1] + 0.5*deltaHFVF\n\n # prevent silly situation where a line with weight and positive ZF considers a negative VF\n if info[\"ProfileType\"] == 2:\n if X[1] + dX[1] <= tols[1]: # if vertical force is within tolerance of being zero/negative\n VFtarget = (L - info[\"LBot\"]) * W # set next VF value to be the weight of portion of line that's suspended\n dX[1] = VFtarget - X[1]\n\n return dX # returns dX (step to make)",
"def maximize(func, grad_func, x, y, theta_0, alpha_0=0.01, max_it=100):\n return minimize(negate(func), negate_all(grad_func), x, y, theta_0, alpha_0=0.01, max_it=100)",
"def optimize(self,\n max_grad_norm=0.5,\n learning_rate=7e-4,\n rms_decay=0.99,\n rms_epsilon=1e-5):\n grads = tf.gradients(tf.negative(self.objective), self.variables)\n if max_grad_norm is not None:\n grads, _ = tf.clip_by_global_norm(grads, max_grad_norm)\n trainer = tf.train.RMSPropOptimizer(learning_rate=learning_rate,\n decay=rms_decay,\n epsilon=rms_epsilon)\n return trainer.apply_gradients(list(zip(grads, self.variables)))",
"def checkgrad(self, target_param = None, verbose=False, step=1e-6, tolerance = 1e-3):\n\n x = self._get_params_transformed().copy()\n\n if not verbose:\n #just check the global ratio\n dx = step*np.sign(np.random.uniform(-1,1,x.size))\n\n #evaulate around the point x\n f1, g1 = self.objective_and_gradients(x+dx)\n f2, g2 = self.objective_and_gradients(x-dx)\n gradient = self.objective_function_gradients(x)\n\n numerical_gradient = (f1-f2)/(2*dx)\n global_ratio = (f1-f2)/(2*np.dot(dx,gradient))\n\n if (np.abs(1.-global_ratio)<tolerance) and not np.isnan(global_ratio):\n return True\n else:\n return False\n else:\n #check the gradient of each parameter individually, and do some pretty printing\n try:\n names = self._get_param_names_transformed()\n except NotImplementedError:\n names = ['Variable %i'%i for i in range(len(x))]\n\n # Prepare for pretty-printing\n header = ['Name', 'Ratio', 'Difference', 'Analytical', 'Numerical']\n max_names = max([len(names[i]) for i in range(len(names))] + [len(header[0])])\n float_len = 10\n cols = [max_names]\n cols.extend([max(float_len, len(header[i])) for i in range(1, len(header))])\n cols = np.array(cols) + 5\n header_string = [\"{h:^{col}}\".format(h = header[i], col = cols[i]) for i in range(len(cols))]\n header_string = map(lambda x: '|'.join(x), [header_string])\n separator = '-'*len(header_string[0])\n print '\\n'.join([header_string[0], separator])\n\n if target_param is None:\n param_list = range(len(x))\n else:\n param_list = self.grep_param_names(target_param)\n\n for i in param_list:\n xx = x.copy()\n xx[i] += step\n f1, g1 = self.objective_and_gradients(xx)\n xx[i] -= 2.*step\n f2, g2 = self.objective_and_gradients(xx)\n gradient = self.objective_function_gradients(x)[i]\n\n numerical_gradient = (f1-f2)/(2*step)\n ratio = (f1-f2)/(2*step*gradient)\n difference = np.abs((f1-f2)/2/step - gradient)\n\n if (np.abs(ratio-1)<tolerance):\n formatted_name = \"\\033[92m {0} \\033[0m\".format(names[i])\n else:\n formatted_name = \"\\033[91m {0} \\033[0m\".format(names[i])\n r = '%.6f' % float(ratio)\n d = '%.6f' % float(difference)\n g = '%.6f' % gradient\n ng = '%.6f' % float(numerical_gradient)\n grad_string = \"{0:^{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}\".format(formatted_name,r,d,g, ng, c0 = cols[0]+9, c1 = cols[1], c2 = cols[2], c3 = cols[3], c4 = cols[4])\n print grad_string",
"def nn_ga_optimise_from_args(func_caller, worker_manager, max_capital, mode, mutation_op,\n crossover_op=None, options=None, reporter='default'):\n if options is None:\n reporter = get_reporter(reporter)\n options = load_options(ga_opt_args, reporter=reporter)\n options.mode = mode\n return (NNGAOptimiser(func_caller, worker_manager, mutation_op, crossover_op,\n options, reporter)).optimise(max_capital)",
"def checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3):\r\n\r\n x = self._get_params_transformed().copy()\r\n\r\n if not verbose:\r\n # just check the global ratio\r\n\r\n #choose a random direction to find the linear approximation in\r\n if x.size==2:\r\n dx = step * np.ones(2) # random direction for 2 parameters can fail dure to symmetry\r\n else:\r\n dx = step * np.sign(np.random.uniform(-1, 1, x.size))\r\n\r\n # evaulate around the point x\r\n f1, g1 = self.objective_and_gradients(x + dx)\r\n f2, g2 = self.objective_and_gradients(x - dx)\r\n gradient = self.objective_function_gradients(x)\r\n\r\n numerical_gradient = (f1 - f2) / (2 * dx)\r\n global_ratio = (f1 - f2) / (2 * np.dot(dx, np.where(gradient==0, 1e-32, gradient)))\r\n\r\n return (np.abs(1. - global_ratio) < tolerance) or (np.abs(gradient - numerical_gradient).mean() < tolerance)\r\n else:\r\n # check the gradient of each parameter individually, and do some pretty printing\r\n try:\r\n names = self._get_param_names_transformed()\r\n except NotImplementedError:\r\n names = ['Variable %i' % i for i in range(len(x))]\r\n\r\n # Prepare for pretty-printing\r\n header = ['Name', 'Ratio', 'Difference', 'Analytical', 'Numerical']\r\n max_names = max([len(names[i]) for i in range(len(names))] + [len(header[0])])\r\n float_len = 10\r\n cols = [max_names]\r\n cols.extend([max(float_len, len(header[i])) for i in range(1, len(header))])\r\n cols = np.array(cols) + 5\r\n header_string = [\"{h:^{col}}\".format(h=header[i], col=cols[i]) for i in range(len(cols))]\r\n header_string = map(lambda x: '|'.join(x), [header_string])\r\n separator = '-' * len(header_string[0])\r\n print '\\n'.join([header_string[0], separator])\r\n\r\n if target_param is None:\r\n param_list = range(len(x))\r\n else:\r\n param_list = self.grep_param_names(target_param, transformed=True, search=True)\r\n if not np.any(param_list):\r\n print \"No free parameters to check\"\r\n return\r\n\r\n\r\n for i in param_list:\r\n xx = x.copy()\r\n xx[i] += step\r\n f1, g1 = self.objective_and_gradients(xx)\r\n xx[i] -= 2.*step\r\n f2, g2 = self.objective_and_gradients(xx)\r\n gradient = self.objective_function_gradients(x)[i]\r\n\r\n numerical_gradient = (f1 - f2) / (2 * step)\r\n ratio = (f1 - f2) / (2 * step * np.where(gradient==0, 1e-312, gradient))\r\n difference = np.abs((f1 - f2) / 2 / step - gradient)\r\n\r\n if (np.abs(1. - ratio) < tolerance) or np.abs(difference) < tolerance:\r\n formatted_name = \"\\033[92m {0} \\033[0m\".format(names[i])\r\n else:\r\n formatted_name = \"\\033[91m {0} \\033[0m\".format(names[i])\r\n r = '%.6f' % float(ratio)\r\n d = '%.6f' % float(difference)\r\n g = '%.6f' % gradient\r\n ng = '%.6f' % float(numerical_gradient)\r\n grad_string = \"{0:^{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}\".format(formatted_name, r, d, g, ng, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4])\r\n print grad_string",
"def fit(self, start_params=None, method='newton', maxiter=100,\n full_output=True, disp=True, fargs=(), callback=None, retall=False,\n skip_hessian=False, **kwargs):\n Hinv = None # JP error if full_output=0, Hinv not defined\n\n start_params = self._get_start_params(start_params)\n\n # TODO: separate args from nonarg taking score and hessian, ie.,\n # user-supplied and numerically evaluated estimate frprime doesn't take\n # args in most (any?) of the optimize function\n\n nobs = self.endog.shape[0]\n # f = lambda params, *args: -self.loglike(params, *args) / nobs\n\n def f(params, *args):\n return -self.loglike(params, *args) / nobs\n\n if method == 'newton':\n # TODO: why are score and hess positive?\n def score(params, *args):\n return self.score(params, *args) / nobs\n\n def hess(params, *args):\n return self.hessian(params, *args) / nobs\n else:\n def score(params, *args):\n return -self.score(params, *args) / nobs\n\n def hess(params, *args):\n return -self.hessian(params, *args) / nobs\n\n warn_convergence = kwargs.pop('warn_convergence', True)\n optimizer = Optimizer()\n xopt, retvals, optim_settings = optimizer._fit(f, score, start_params,\n fargs, kwargs,\n hessian=hess,\n method=method,\n disp=disp,\n maxiter=maxiter,\n callback=callback,\n retall=retall,\n full_output=full_output)\n\n # NOTE: this is for fit_regularized and should be generalized\n cov_params_func = kwargs.setdefault('cov_params_func', None)\n if cov_params_func:\n Hinv = cov_params_func(self, xopt, retvals)\n elif method == 'newton' and full_output:\n Hinv = np.linalg.inv(-retvals['Hessian']) / nobs\n # TODO: try/except for non-invertible hessian?\n elif not skip_hessian:\n H = -1 * self.hessian(xopt)\n invertible = False\n if np.all(np.isfinite(H)):\n eigvals, eigvecs = np.linalg.eigh(H)\n if np.min(eigvals) > 0:\n invertible = True\n\n if invertible:\n Hinv = eigvecs.dot(np.diag(1.0 / eigvals)).dot(eigvecs.T)\n Hinv = np.asfortranarray((Hinv + Hinv.T) / 2.0)\n else:\n warnings.warn('Inverting hessian failed, no bse or cov_params '\n 'available', HessianInversionWarning)\n Hinv = None\n\n if 'cov_type' in kwargs:\n cov_kwds = kwargs.get('cov_kwds', {})\n kwds = {'cov_type': kwargs['cov_type'], 'cov_kwds': cov_kwds}\n else:\n kwds = {}\n if 'use_t' in kwargs:\n kwds['use_t'] = kwargs['use_t']\n # TODO: add Hessian approximation and change the above if needed\n mlefit = LikelihoodModelResults(self, xopt, Hinv, scale=1., **kwds)\n\n # TODO: hardcode scale?\n if isinstance(retvals, dict):\n mlefit.mle_retvals = retvals\n if warn_convergence and not retvals['converged']:\n warnings.warn(\"Maximum Likelihood optimization failed to \"\n \"converge. Check mle_retvals\",\n ConvergenceWarning)\n\n mlefit.mle_settings = optim_settings\n return mlefit",
"def _optimize(optimizer, regularization_losses, scope, **kwargs):\n sum_loss = _gather_loss(regularization_losses, scope)\n grad = None\n if sum_loss is not None:\n grad = optimizer.compute_gradients(sum_loss, **kwargs)\n return sum_loss, grad",
"def minimize(fun, \n bounds = None, \n value_limit = math.inf,\n num_retries = 1000,\n logger = None,\n workers = mp.cpu_count(),\n popsize = 31, \n max_evaluations = 50000, \n capacity = 500,\n stop_fittness = None,\n optimizer = None,\n ):\n\n if optimizer is None:\n optimizer = de_cma(max_evaluations, popsize, stop_fittness) \n store = Store(bounds, capacity = capacity, logger = logger)\n return retry(fun, store, optimizer.minimize, num_retries, value_limit, workers)",
"def minimizer(f, x, optimizer, grad_f, hess_f=None,\n args=(),\n maxiter=None, tol=1e-5,\n stepsize=1, adaptive=True,\n bounds=None,\n disp=False):\n min_obj = {'steepest': SteepestDescent,\n 'conjugate': ConjugateDescent,\n 'newton': NewtonDescent,\n 'cg': ScipyCG,\n 'ncg': ScipyNCG,\n 'bfgs': ScipyBFGS,\n 'lbfgs': ScipyLBFGS}\n\n if optimizer not in min_obj.keys():\n raise ValueError('unknown optimizer')\n local_meth = optimizer in ('steepest', 'conjugate', 'newton')\n\n if local_meth:\n proj = None\n if not bounds is None:\n if callable(bounds):\n proj = bounds\n else:\n proj = bounds_to_proj(bounds)\n\n return min_obj[optimizer](f, x, grad_f, hess_f=hess_f,\n maxiter=maxiter, tol=tol,\n stepsize=stepsize, adaptive=adaptive,\n proj=proj)\n\n if not bounds is None and optimizer != 'lbfgs':\n raise NotImplementedError('%s optimization method does not accept constraints' % optimizer)\n \n return min_obj[optimizer](f, x, grad_f, hess_f=hess_f,\n maxiter=maxiter, tol=tol,\n bounds=bounds, disp=disp)",
"def run_step_blackbox_optimizer(config,\n current_input,\n blackbox_optimizer,\n proposed_perturbations,\n finished_dnas,\n results,\n logging_data=None):\n core_hyperparameters = blackbox_optimizer.get_hyperparameters()\n function_values = [0.0] * len(proposed_perturbations)\n rewards_for_controller = []\n perturbations = proposed_perturbations\n evaluation_stats = []\n current_value_exact = 0.0\n current_value_exact_counter = 0\n\n for i in range(len(results)):\n rewards_for_controller.append(results[i]['function_value'])\n tag = results[i]['tag']\n index = 0\n if tag > 0:\n if config.est_type == 'antithetic':\n index = (tag - 1) * 2\n function_values[index] += results[i]['function_value']\n else:\n index = tag - 1\n function_values[index] += results[i]['function_value']\n if tag < 0:\n index = (-tag - 1) * 2 + 1\n function_values[index] += results[i]['function_value']\n if tag == 0:\n current_value_exact += results[i]['function_value']\n current_value_exact_counter += 1\n current_value_exact /= float(current_value_exact_counter)\n\n for result in results:\n evaluation_stat = list(result['evaluation_stat'])\n evaluation_stats.append(evaluation_stat)\n\n function_values_reshaped = np.array(function_values)\n perturbations_reshaped = np.array(perturbations)\n\n logging.info('LIST OF FUNCTION VALUES')\n logging.info(function_values_reshaped)\n\n logging.info('MAX VALUE SEEN CURRENTLY')\n logging.info(np.max(function_values_reshaped))\n\n logging.info('MEAN OF VALUES')\n logging.info(np.mean(function_values_reshaped))\n\n if logging_data is not None:\n iteration = logging_data['iteration']\n best_value = logging_data['best_value']\n iteration = logging_data['iteration']\n best_input = logging_data['best_input']\n best_core_hyperparameters = logging_data['best_core_hyperparameters']\n optimizer_state = blackbox_optimizer.get_state()\n\n if current_value_exact > best_value[0]:\n best_value[0] = current_value_exact\n best_input = current_input\n best_core_hyperparameters = core_hyperparameters\n\n # Writing logs.\n if iteration % config.log_frequency == 0:\n util.log_row(config.params_file, current_input)\n util.log_row(config.best_params_file, best_input)\n util.log_row(config.best_core_hyperparameters_file,\n best_core_hyperparameters)\n util.log_row(config.best_value_file, best_value)\n util.log_row(config.optimizer_internal_state_file, optimizer_state)\n util.log_row(config.current_values_list_file, [current_value_exact])\n util.log_row(config.best_values_list_file, [best_value[0]])\n util.log_row(config.fvalues_file, function_values_reshaped)\n util.log_row(config.iteration_file, [iteration])\n\n print('Current exact value estimate:')\n print(current_value_exact)\n sys.stdout.flush()\n\n new_current_input = blackbox_optimizer.run_step(perturbations_reshaped,\n function_values_reshaped,\n current_input,\n current_value_exact)\n config.controller.collect_rewards_and_train(rewards_for_controller,\n finished_dnas)\n\n evaluation_stats_reduced = [sum(x) for x in zip(*evaluation_stats)]\n blackbox_optimizer.update_state(evaluation_stats_reduced)\n\n return [True, new_current_input]",
"def solve(self, **kwargs) -> OptimizationResult:\n raise NotImplementedError",
"def optimizer(grad, method, init_par, alpha, delta, plx_obs, mualpha_obs, mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N):\r\n\r\n\t\r\n\tif grad == 'NO':\r\n\t\tif method == 'Powell' :\r\n\t\t\tres = opt.minimize(Ulike,init_par, method = method,\r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N))\r\n\t\t\treturn res.x, res.nit\r\n\t\telif method == 'Nelder-Mead':\r\n\t\t\tres = opt.minimize(Ulike,init_par, method = method,\r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t\t options = {'ftol': 0.0001})\r\n\t\t\treturn res.x, res.nit\r\n\t\telif method == 'default':\r\n\t\t\tres = opt.minimize(Ulike,init_par, \r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N))\r\n\t\t\treturn res.x, res.nit\r\n\r\n\telif grad == 'YES':\r\n\t\tres = opt.minimize(Ulike, init_par, method = method, jac = stella_grad_full, \r\n \t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t options={'disp': True, 'maxiter': 4000, 'xtol': 1e-4})\r\n\t\treturn res.x, res.nit \r\n\t\t\t\r\n\t\t\r\n\telif grad == 'HESS':\r\n\t\tres = opt.minimize(Ulike, init_par, method = method, jac = stella_grad_full, hess = stella_hessian,\r\n\t\t\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t\t\t options = {'disp': True, 'maxiter': 4000, 'xtol': 1.e-06}) \r\n\t\treturn res.x, res.nit"
] | [
"0.651425",
"0.60995185",
"0.59783125",
"0.5357869",
"0.5349476",
"0.53483707",
"0.5300958",
"0.5280729",
"0.5255571",
"0.51577455",
"0.51000774",
"0.50822264",
"0.5080444",
"0.50465363",
"0.50355375",
"0.50104886",
"0.4993036",
"0.49904832",
"0.49328518",
"0.49109367",
"0.49103644",
"0.49053922",
"0.48841238",
"0.48546988",
"0.4821072",
"0.48169795",
"0.47867662",
"0.4776207",
"0.4774509",
"0.47604144"
] | 0.77414405 | 0 |
optimizes the current library to maximize the result of the target function using simulated annealing. By default, the function returns the best value and the associated interaction matrix as result. `direction` is either 'min' or 'max' and determines whether a minimum or a maximum is sought. `steps` determines how many optimization steps we try `ret_info` determines whether extra information is returned from the optimization `args` is a dictionary of additional arguments that is passed to the target function | def optimize_library_anneal(self, target, direction='max', steps=100,
ret_info=False, args=None):
# lazy import
from .optimizer import ReceptorOptimizerAnnealer # @UnresolvedImport
# prepare the class that manages the simulated annealing
annealer = ReceptorOptimizerAnnealer(self, target, direction, args,
ret_info=ret_info)
annealer.steps = int(steps)
annealer.Tmax = self.parameters['anneal_Tmax']
annealer.Tmin = self.parameters['anneal_Tmin']
if self.parameters['verbosity'] == 0:
annealer.updates = 0
# do the optimization
MI, state = annealer.optimize()
# sort the best state and store it in the current object
state = self.sort_sensitivity_matrix(state)
self.sens_mat = state.copy()
if ret_info:
return MI, state, annealer.info
else:
return MI, state | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def optimize_library_descent(self, target, direction='max', steps=100,\n multiprocessing=False, ret_info=False,\n args=None):\n # get the target function to call\n target_function = getattr(self, target)\n if args is not None:\n target_function = functools.partial(target_function, **args)\n\n # initialize the optimizer\n value = target_function()\n value_best, state_best = value, self.sens_mat.copy()\n \n if ret_info:\n # store extra information\n start_time = time.time()\n info = {'values': {}}\n values_count = self.parameters['optimizer_values_count']\n values_step = max(1, steps // values_count)\n \n if multiprocessing:\n # run the calculations in multiple processes\n pool_size = self.get_number_of_cores()\n pool = mp.Pool(processes=pool_size)\n if ret_info:\n values_step = max(1, values_step // pool_size)\n \n # iterate for given number of steps\n for step in range(int(steps) // pool_size):\n joblist = []\n init_arguments = self.init_arguments\n for _ in range(pool_size):\n # modify the current state and add it to the job list\n i = random.randrange(self.sens_mat.size)\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n params = init_arguments['parameters'] \n params['sensitivity_matrix'] = self.sens_mat\n params['initialize_state']['sensitivity'] = 'exact'\n \n joblist.append((copy.deepcopy(init_arguments), target))\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n \n # run all the jobs\n results = pool.map(_run_job, joblist)\n \n # find the best result \n if direction == 'max':\n res_best = np.argmax(results)\n if results[res_best] > value_best:\n value_best = results[res_best]\n state_best = joblist[res_best][0]['parameters']['sensitivity_matrix']\n # use the best state as a basis for the next iteration\n self.sens_mat = state_best\n \n elif direction == 'min':\n res_best = np.argmin(results)\n if results[res_best] < value_best:\n value_best = results[res_best]\n state_best = joblist[res_best][0]['parameters']['sensitivity_matrix']\n # use the best state as a basis for the next iteration\n self.sens_mat = state_best\n \n else:\n raise ValueError('Unsupported direction `%s`' % direction)\n \n if ret_info and step % values_step == 0:\n info['values'][step * pool_size] = results[res_best]\n \n else:\n # run the calculations in this process\n for step in range(int(steps)):\n # modify the current state\n i = random.randrange(self.sens_mat.size)\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n \n # get the value of the new state\n value = target_function()\n \n improved = ((direction == 'max' and value > value_best) or\n (direction == 'min' and value < value_best))\n if improved:\n # save the state as the new best value\n value_best, state_best = value, self.sens_mat.copy()\n else:\n # undo last change\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n \n if ret_info and step % values_step == 0:\n info['values'][step] = value_best\n\n # sort the best state and store it in the current object\n state_best = self.sort_sensitivity_matrix(state_best)\n self.sens_mat = state_best.copy()\n\n if ret_info:\n info['total_time'] = time.time() - start_time \n info['states_considered'] = steps\n info['performance'] = steps / info['total_time']\n return value_best, state_best, info\n else:\n return value_best, state_best",
"def optimize_library_descent_multiple(self, target, direction='max',\n trials=4, multiprocessing=False,\n ret_error=False, **kwargs):\n \n # pass some parameters down to the optimization function to call\n kwargs['target'] = target\n kwargs['direction'] = direction\n \n # initialize the list of jobs with an optimization job starting from the\n # current interaction matrix\n joblist = [(self.init_arguments, 'optimize_library_descent', kwargs)]\n sens_mat = self.sens_mat #< store matrix to restore it later\n\n # set the ensemble of sensitivity matrices to try\n self.choose_sensitivity_matrix(density='auto')\n self.parameters['initialize_state']['sensitivity'] = 'ensemble'\n\n # add additional jobs with random initial interaction matrices\n init_arguments = self.init_arguments\n for _ in range(trials - 1):\n joblist.append((copy.deepcopy(init_arguments),\n 'optimize_library_descent', kwargs))\n \n # restore interaction matrix of this object\n self.sens_mat = sens_mat\n \n if multiprocessing:\n # calculate all results in parallel\n pool = mp.Pool(processes=self.get_number_of_cores())\n result_iter = pool.imap_unordered(_run_job, joblist)\n \n else:\n # create a generator over which we iterate later\n result_iter = (_run_job(job) for job in joblist)\n \n # find the best result by iterating over all results\n result_best, values = None, []\n for result in result_iter:\n values.append(result[0])\n # check whether this run improved the result\n if result_best is None:\n result_best = result\n elif ((direction == 'max' and result[0] > result_best[0]) or\n (direction == 'min' and result[0] < result_best[0])):\n result_best = result\n \n # sort the best state and store it in the current object\n state = self.sort_sensitivity_matrix(result_best[1])\n self.sens_mat = state.copy()\n\n if ret_error:\n # replace the best value by a tuple of the best value and its error\n value_best = result_best[0]\n value_err = np.abs(value_best - np.median(values))\n result_best = ((value_best, value_err), ) + result_best[1:]\n return result_best",
"def optimize_library(self, target, method='descent', direction='max',\n **kwargs):\n if method == 'descent':\n return self.optimize_library_descent(target, direction, **kwargs)\n elif method == 'descent_multiple' or method == 'descent-multiple':\n return self.optimize_library_descent_multiple(target, direction,\n **kwargs)\n elif method == 'anneal':\n return self.optimize_library_anneal(target, direction, **kwargs)\n \n else:\n raise ValueError('Unknown optimization method `%s`' % method)",
"def Optimise(LogLikelihood,par,func_args,fixed=None,type='max',method='NM',maxiter=10000, maxfun=10000, verbose=True):\n \n if fixed==None:\n var_par = np.copy(par)\n #otherwise construct the parameter vector from var_par and fixed_par_val\n else:\n par = np.array(par)\n fixed = np.array(fixed) #ensure fixed is a np array\n #assign parameters to normal param vector\n fixed_par = par[np.where(fixed==True)]\n var_par = par[np.where(fixed!=True)]\n \n #set the algorithm to use - CG and P not working (at least not well)\n add_kwords = {'verbose':verbose}\n if method == 'NM':\n Algorithm = NelderMead\n add_kwords = {'maxiter':maxiter, 'maxfun':maxfun,'verbose':verbose}\n elif method == 'CG':\n print \"warning: CG method didn't work properly during testing\"\n Algorithm = ConjugateGradient\n elif method == 'P':\n print \"warning: Powell algorithm didn't work properly during testing\"\n Algorithm = Powell\n else:\n print \"error: optimisation function not found\"\n return par\n \n #set the optimisation function to pos or neg for the fmin funcitons\n if type == 'max': OptFunc = NegFixedPar_func\n elif type == 'min': OptFunc = FixedPar_func\n else:\n print \"error: %s not a valid option\" % type\n return par\n \n #call the optimser with the appropriate function\n fitted_par = Algorithm(OptFunc, var_par, (LogLikelihood,func_args,fixed,fixed_par), \\\n **add_kwords)\n \n #now return the params in the correct order...\n if fixed==None:\n return_par = fitted_par\n else:\n return_par = np.copy(par) \n return_par[np.where(fixed!=True)] = fitted_par\n \n return return_par",
"def simulatedAnnealing(problem, maxSteps, userInteraction, beQuiet):\n\n import random\n from math import exp\n\n currentState = problem.state\n steps = 0\n bestYet = currentState\n # for visualization\n problem.hVals.append(problem.getObjValue(currentState))\n\n while steps<maxSteps:\n if problem.isGlobalOptimum(currentState):\n return steps, bestYet\n temperature = tempSchedule(steps, maxSteps)\n # print(temperature)\n if temperature == 0:\n return currentState\n neighbour = problem.getRandomNeighbour(currentState)\n changeInObj = problem.getObjValue(neighbour) - \\\n problem.getObjValue(currentState)\n if changeInObj > 0:\n # if the neighbour is better, jump\n currentState = neighbour\n if not beQuiet:\n if userInteraction:\n input(\"Press enter to continue \")\n print(\"Greedy step taken.\")\n problem.visualize(currentState)\n steps+=1\n\n currentVal = problem.getObjValue(currentState)\n bestYetVal = problem.getObjValue(bestYet)\n if problem.isBetter(currentVal, bestYetVal):\n bestYet = currentState\n\n # for visualization later on\n problem.hVals.append(problem.getObjValue(currentState))\n\n else:\n # if the neighbour is worse, jump with some probability\n if random.random() < exp(-1*changeInObj/temperature):\n \n currentState = neighbour\n if not beQuiet:\n if userInteraction:\n input(\"Press enter to continue \")\n print(\"Step in a worse direction taken.\")\n problem.visualize(currentState)\n steps+=1\n\n currentVal = problem.getObjValue(currentState)\n bestYetVal = problem.getObjValue(bestYet)\n if problem.isBetter(currentVal, bestYetVal):\n bestYet = currentState\n\n # for visualization later on\n problem.hVals.append(problem.getObjValue(currentState))\n return steps, bestYet",
"def process_solve_kwargs(**kwargs):\n\n tol = kwargs.get('tol', DEFAULT_TOL)\n maxiter = kwargs.get('maxiter', MAX_ITER)\n Ainv = kwargs.get('Ainv', None)\n verbose = kwargs.get('verbose', False)\n\n if VERBOSE:\n print(\"tol:\", tol)\n print(\"maxiter:\", maxiter)\n print(\"Ainv:\", Ainv)\n\n return tol, int(maxiter), Ainv, verbose",
"def __call__(self, origin, state, **kwargs):\n fun = kwargs['function']\n d = state['direction']/np.linalg.norm(state['direction'])\n # filter directions that are too large\n if self.filter:\n ndabs_log = -np.log10(np.abs(d))\n mean_log = np.mean(ndabs_log)\n #print \"\\n ** MEAN =\", mean_log\n direction = (ndabs_log > mean_log-1.5).astype(int)*d\n else:\n direction = d\n state['direction'] = direction\n## for pos, d in enumerate(direction):\n## use_dir = self.use_dirs[pos]\n## if use_dir * d < 0:\n## # directions don't match so don't move in this direction\n## direction[pos] = 0\n maxStepSize = self.maxStepSize\n if np.isscalar(maxStepSize):\n stepSize = maxStepSize\n else:\n stepfacs = np.zeros(self.dim)\n for d in range(self.dim):\n # explicit loop so as to catch any ZeroDivisionErrors\n try:\n stepfacs[d] = abs(maxStepSize[d] / direction[d])\n except ZeroDivisionError:\n # Direction is orthogonal to this parameter direction,\n # so ensure won't choose this as the minimum step size\n stepfacs[d] = Inf\n # Stop stepping with giant sizes if direction vector has strong\n # separation of scales\n stepSize = min(stepfacs)\n# print \"direction = \", direction\n# print \"step = \", step\n i = 1\n old_value = state['old_value']\n not_done = True\n# print \"** TEMP: Hardwiring step size to be 0.0005\"\n# stepSize = 0.0005\n init_step = stepSize\n while not_done:\n print(\"\\nLinestep: i =\", i, \"step size =\", stepSize, \"direction =\\n\", end='')\n print(direction)\n p = origin + i * stepSize * direction\n print(\"Testing p = \", p)\n new_value = fun(p)\n if new_value < old_value:\n i += 1\n old_value = new_value\n else:\n if i == 1:\n # don't shrink step size to be less than 1/maxReduceFac of initial\n if stepSize*self.maxReduceFac < init_step:\n not_done = False\n p = origin + (i-1) * stepSize * direction\n else:\n stepSize /= self.stepMod\n else:\n # had found a working step but it's no longer stepping to lower residuals\n not_done = False\n p = origin + (i-1) * stepSize * direction\n state['alpha_step'] = stepSize\n return p",
"def opt_wrapper(m, **kwargs):\r\n m.optimize(**kwargs)\r\n return m.optimization_runs[-1]",
"def opt_wrapper(m, **kwargs):\n m.optimize(**kwargs)\n return m.optimization_runs[-1]",
"def MAXED(N, sigma2, R, f_def, params):\n\n # pull out algorithm-specific parameters\n Omega = params['Omega']\n\n # create the function that we will maximize, Z\n def Z(lam, N, sigma2, R, f_def, Omega):\n \"\"\"A function, the maximization of which is equivalent to the\n maximization of \"\"\"\n\n A = - np.sum(f_def * np.exp(- np.sum((lam * R.T).T, axis=0)))\n B = - (Omega * np.sum(lam**2 * sigma2))**(0.5)\n C = - np.sum(N * lam)\n\n # negate because it's a minimization\n return - (A + B + C)\n\n # create a lambda\n lam = np.ones(len(N))\n\n # apply the simulated annealing to the Z\n mk = {'args': (N, sigma2, R, f_def, Omega)}\n lam = basinhopping(Z, lam, minimizer_kwargs=mk).x\n\n # back out the spectrum values from the lam\n return f_def * np.exp(-np.sum((lam * R.T).T, axis=0))",
"def test_optimalagentfinder () :\n def valNetwork (s) : \n s = s.float()\n v = reduce(model.withReluDropout, model.v[:-1], s)\n v = model.v[-1](v)\n return v\n acrobotBases = acrobotRewardBases(np.pi / 8, np.pi / 8)\n fn = random.sample(acrobotBases, k=1).pop()\n agent = findOptimalAgent(fn)\n model = agent.model\n toExternal = lambda x, y : toExternalStateRep([x, y, 0, 0])\n valFn = reduce(compose, [float, valNetwork, torch.tensor, toExternal])\n RFn = compose(fn, toExternal)\n xRange = np.arange(-np.pi, np.pi, 0.1)\n yRange = np.arange(-np.pi, np.pi, 0.1)\n plotFunction(RFn, xRange, yRange, 'theta1', 'theta2', 'R')\n plotFunction(valFn, xRange, yRange, 'theta1', 'theta2', 'V')",
"def Optimization(*args, **kwargs):\n from warnings import warn\n\n warn(\n \"Optimization has been renamed to OptimizationResult and will be removed as soon as v0.13.0\", DeprecationWarning\n )\n return OptimizationResult(*args, **kwargs)",
"def optimize(self):\n \n if self.verbose:\n print('Starting grid search with bounds: [' + \\\n ';'.join(['%5g to %5g']*len(self.steps))%tuple([(self.steps[i][0], self.steps[i][-1]) for i in range(len(self.steps))]) +']')\n\n for params in self._get_next_point():\n self.transform.set_params(params)\n\n v, _ = self.measure.value_and_derivatives(self.transform)\n\n if v < self.best_value:\n self.best_value = v\n self.best_params = params\n# print('New best value %2.4f at ('%v, ', '.join(['%8.3f']*len(params))%tuple(params), ')')\n\n self.value_history.append(v)\n self.last_value = v\n self.iteration += 1\n\n if self.report_freq > 0 and (self.iteration % self.report_freq == 0) and self.report_func is not None:\n self.report_func(self)\n\n # Set the best transform\n self.transform.set_params(self.best_params)\n self.last_value = self.best_value\n return self.best_value",
"def gopt_max(fun, bounds, n_warmup = 1000, n_local = 10):\n x_best, y_best = gopt_min(lambda x: -fun(x), bounds, n_warmup, n_local)\n return x_best, -y_best",
"def nn_ga_optimise_from_args(func_caller, worker_manager, max_capital, mode, mutation_op,\n crossover_op=None, options=None, reporter='default'):\n if options is None:\n reporter = get_reporter(reporter)\n options = load_options(ga_opt_args, reporter=reporter)\n options.mode = mode\n return (NNGAOptimiser(func_caller, worker_manager, mutation_op, crossover_op,\n options, reporter)).optimise(max_capital)",
"def produce_max(self, *args, **kwargs):\n raise NotImplementedError('This interaction has no produce_max method yet!')",
"def run_step_blackbox_optimizer(config,\n current_input,\n blackbox_optimizer,\n proposed_perturbations,\n finished_dnas,\n results,\n logging_data=None):\n core_hyperparameters = blackbox_optimizer.get_hyperparameters()\n function_values = [0.0] * len(proposed_perturbations)\n rewards_for_controller = []\n perturbations = proposed_perturbations\n evaluation_stats = []\n current_value_exact = 0.0\n current_value_exact_counter = 0\n\n for i in range(len(results)):\n rewards_for_controller.append(results[i]['function_value'])\n tag = results[i]['tag']\n index = 0\n if tag > 0:\n if config.est_type == 'antithetic':\n index = (tag - 1) * 2\n function_values[index] += results[i]['function_value']\n else:\n index = tag - 1\n function_values[index] += results[i]['function_value']\n if tag < 0:\n index = (-tag - 1) * 2 + 1\n function_values[index] += results[i]['function_value']\n if tag == 0:\n current_value_exact += results[i]['function_value']\n current_value_exact_counter += 1\n current_value_exact /= float(current_value_exact_counter)\n\n for result in results:\n evaluation_stat = list(result['evaluation_stat'])\n evaluation_stats.append(evaluation_stat)\n\n function_values_reshaped = np.array(function_values)\n perturbations_reshaped = np.array(perturbations)\n\n logging.info('LIST OF FUNCTION VALUES')\n logging.info(function_values_reshaped)\n\n logging.info('MAX VALUE SEEN CURRENTLY')\n logging.info(np.max(function_values_reshaped))\n\n logging.info('MEAN OF VALUES')\n logging.info(np.mean(function_values_reshaped))\n\n if logging_data is not None:\n iteration = logging_data['iteration']\n best_value = logging_data['best_value']\n iteration = logging_data['iteration']\n best_input = logging_data['best_input']\n best_core_hyperparameters = logging_data['best_core_hyperparameters']\n optimizer_state = blackbox_optimizer.get_state()\n\n if current_value_exact > best_value[0]:\n best_value[0] = current_value_exact\n best_input = current_input\n best_core_hyperparameters = core_hyperparameters\n\n # Writing logs.\n if iteration % config.log_frequency == 0:\n util.log_row(config.params_file, current_input)\n util.log_row(config.best_params_file, best_input)\n util.log_row(config.best_core_hyperparameters_file,\n best_core_hyperparameters)\n util.log_row(config.best_value_file, best_value)\n util.log_row(config.optimizer_internal_state_file, optimizer_state)\n util.log_row(config.current_values_list_file, [current_value_exact])\n util.log_row(config.best_values_list_file, [best_value[0]])\n util.log_row(config.fvalues_file, function_values_reshaped)\n util.log_row(config.iteration_file, [iteration])\n\n print('Current exact value estimate:')\n print(current_value_exact)\n sys.stdout.flush()\n\n new_current_input = blackbox_optimizer.run_step(perturbations_reshaped,\n function_values_reshaped,\n current_input,\n current_value_exact)\n config.controller.collect_rewards_and_train(rewards_for_controller,\n finished_dnas)\n\n evaluation_stats_reduced = [sum(x) for x in zip(*evaluation_stats)]\n blackbox_optimizer.update_state(evaluation_stats_reduced)\n\n return [True, new_current_input]",
"def run_migrad(self, fitarg, **kwargs):\n self.fitarg = fitarg\n kwargs['up'] = 1.\n\n\n logging.debug(self._par_names)\n logging.debug(self.__wrap_likelihood(list(fitarg['pinit'].values())))\n\n if kwargs['scipy']:\n self._res = op.minimize(self.__wrap_likelihood,\n list(fitarg['pinit'].values()),\n bounds=list(fitarg['limits'].values()),\n method='TNC',\n #method='Powell',\n options={'maxiter': kwargs['ncall']} #'xtol': 1e-20, 'eps' : 1e-20, 'disp': True}\n #tol=None, callback=None,\n #options={'disp': False, 'minfev': 0, 'scale': None,\n #'rescale': -1, 'offset': None, 'gtol': -1,\n #'eps': 1e-08, 'eta': -1, 'maxiter': kwargs['ncall'],\n #'maxCGit': -1, 'mesg_num': None, 'ftol': -1, 'xtol': -1, 'stepmx': 0,\n #'accuracy': 0}\n )\n logging.info(self._res)\n for i, k in enumerate(self._par_names):\n fitarg[k] = self._res.x[i]\n\n logging.debug(fitarg)\n\n cmd_string = \"lambda {0}: self.__calcLikelihood({0})\".format(\n (\", \".join(self._par_names), \", \".join(self._par_names)))\n\n string_args = \", \".join(self._par_names)\n global f # needs to be global for eval to find it\n f = lambda *args: self.__calc_likelihood(*args)\n\n cmd_string = \"lambda %s: f(%s)\" % (string_args, string_args)\n logging.debug(cmd_string)\n\n # work around so that the parameters get names for minuit\n self._minimize_f = eval(cmd_string, globals(), locals())\n self._minimize_f.errordef = minuit.Minuit.LEAST_SQUARES\n\n self._m = minuit.Minuit(self._minimize_f,\n #list(fitarg['pinit'].values()),\n **fitarg['pinit'],\n #names=self._par_names\n )\n# print_level=kwargs['verbosity'],\n# errordef=kwargs['up'],\n# pedantic=kwargs['pedantic'],\n #**fitarg)\n\n for p in self._par_names:\n self._m.fixed[p] = fitarg['fix'][p]\n self._m.limits[p] = fitarg['limits'][p]\n self._m.errors[p] = fitarg['error'][p]\n\n self._m.tol = kwargs['tol']\n self._m.strategy = kwargs['strategy']\n\n logging.debug(\"tol {0:.2e}, strategy: {1:n}\".format(\n self._m.tol, self._m.strategy.strategy))\n\n self._m.migrad(ncall=kwargs['ncall']) #, precision = kwargs['precision'])",
"def solve(self, **kwargs) -> OptimizationResult:\n raise NotImplementedError",
"def optimize(self, objective_sense=None, **kwargs):\n\n if objective_sense:\n self.objective.direction = objective_sense\n\n try:\n # self._hidden_optimize_call(kwargs)\n Model.optimize(self, **kwargs)\n solution = self.get_solution()\n self.solution = solution\n return solution\n except SolverError as SE:\n status = self.solver.status\n self.logger.error(SE)\n self.logger.warning('Solver status: {}'.format(status))\n raise (SE)",
"def getMaximum(self, arguments, maximum):\n\n self.sequence.append({\"type\": \"findingMaximum\", \"coords\": arguments})\n\n self.sequence.append({\"type\": \"foundMaximum\", \"coord\": maximum})",
"def maximize(self,x0,method='fmin',**kwargs):\n return self._optimize(x0,'max',method,**kwargs)",
"def _random_max_wrap(*args):\n _, opt_pt = random_maximise(*args)\n return opt_pt",
"def minimize_neldermead(func, x0, args=(), callback=None,\n maxiter=None, maxfev=None, disp=False,\n return_all=False, initial_simplex=None,\n xatol=1e-4, fatol=1e-4, **unknown_options):\n maxfun = maxfev\n retall = return_all\n\n rho = 1\n chi = 2\n psi = 0.5\n sigma = 0.5\n nonzdelt = 0.05\n zdelt = 0.00025\n\n if initial_simplex is None:\n N = len(x0)\n\n sim = numpy.zeros((N + 1, N), dtype=x0.dtype)\n sim[0] = x0\n for k in range(N):\n y = numpy.array(x0, copy=True)\n if y[k] != 0:\n y[k] = (1 + nonzdelt) * y[k]\n else:\n y[k] = zdelt\n sim[k + 1] = y\n\n maxiter = 10\n maxfun = 10\n\n one2np1 = list(range(1, N + 1))\n fsim = numpy.zeros((N + 1,), float)\n\n for k in range(N + 1):\n fsim[k] = func(sim[k])\n\n ind = numpy.argsort(fsim)\n fsim = numpy.take(fsim, ind, 0)\n # sort so sim[0,:] has the lowest function value\n sim = numpy.take(sim, ind, 0)\n raise Exception()\n print('aaaaffaaaaaa')\n\n iterations = 1\n\n while iterations < maxiter:\n if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xatol and\n numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol):\n break\n logger.debug('itr: %s' % iterations)\n print('aaaaaaaaaa')\n xbar = numpy.add.reduce(sim[:-1], 0) / N\n xr = (1 + rho) * xbar - rho * sim[-1]\n fxr = func(xr)\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]\n fxe = func(xe)\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]\n fxc = func(xc)\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink = 1\n else:\n # Perform an inside contraction\n xcc = (1 - psi) * xbar + psi * sim[-1]\n fxcc = func(xcc)\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma * (sim[j] - sim[0])\n fsim[j] = func(sim[j])\n\n ind = numpy.argsort(fsim)\n sim = numpy.take(sim, ind, 0)\n fsim = numpy.take(fsim, ind, 0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n\n x = sim[0]\n fval = numpy.min(fsim)\n warnflag = 0\n\n result = OptimizeResult(fun=fval, nit=iterations, nfev=0,\n status=warnflag, success=(warnflag == 0),\n message=None, x=x, final_simplex=(sim, fsim))\n return result",
"def expectimax_move(game, method='score'):\n\n if method == 'score':\n def val(g):\n return g[1]\n elif method == 'empty':\n val = empty_squares\n elif method == 'gradient':\n val = gradient_value\n else:\n print('Invalid method given to expectimax function')\n exit(1)\n\n _, move = expectimax(game, 2, val)\n return move",
"def optimize(self, max_iters=1e3, messages=False, use_counter=False,\\\n factr=1e7, pgtol=1e-05):\n logger.debug('Beginning MLE to optimize hyperparams. grad_method=%s'\\\n % self.grad_method)\n\n # setup the optimization\n try:\n x0 = self._transform_parameters(self.parameters)\n assert np.all(np.isfinite(x0))\n except:\n logger.error('Transformation failed for initial values. '\\\n + 'Ensure constraints are met or the value is not too small.')\n raise\n\n # filter out the fixed parameters\n free = np.logical_not(self._fixed_indicies)\n x0 = x0[free]\n\n # setup the counter\n if use_counter:\n self._counter = solver_counter(disp=True)\n else:\n self._counter = None\n\n # run the optimization\n try:\n x_opt, f_opt, opt = fmin_l_bfgs_b(func=self._objective_grad, x0=x0,\\\n factr=factr, pgtol=pgtol, maxiter=max_iters, disp=messages)\n except (KeyboardInterrupt,IndexError):\n logger.info('Keyboard interrupt raised. Cleaning up...')\n if self._counter is not None and self._counter.backup is not None:\n self.parameters = self._counter.backup[1]\n logger.info('will return best parameter set with'\\\n + 'log-likelihood = %.4g' % self._counter.backup[0])\n else:\n logger.info('Function Evals: %d. Exit status: %s' % (f_opt, opt['warnflag']))\n # extract the optimal value and set the parameters to this\n transformed_parameters = self._previous_parameters \n transformed_parameters[free] = x_opt\n self.parameters = self._untransform_parameters(transformed_parameters)\n return opt",
"def poll(target, step, args=(), kwargs=None, timeout=None, max_tries=None, check_success=is_truthy,\n step_function=step_constant, ignore_exceptions=(), poll_forever=False, collect_values=None, *a, **k):\n\n logging.info('Starting Polling')\n\n assert (timeout is not None or max_tries is not None) or poll_forever, \\\n ('You did not specify a maximum number of tries or a timeout. Without either of these set, the polling '\n 'function will poll forever. If this is the behavior you want, pass \"poll_forever=True\"')\n\n assert not ((timeout is not None or max_tries is not None) and poll_forever), \\\n 'You cannot specify both the option to poll_forever and max_tries/timeout.'\n\n kwargs = kwargs or dict()\n values = collect_values or Queue()\n\n max_time = Time.time() + timeout if timeout else None\n tries = 0\n logging.debug('Max Time: ' + str(max_time))\n logging.debug('Max Tries: ' + str(max_tries))\n\n last_item = None\n while True:\n\n if max_tries is not None and tries >= max_tries:\n raise MaxCallException(values, last_item)\n\n try:\n logging.debug('Arguments: ' + str(args))\n val = target(*args, **kwargs)\n logging.debug('Results from Target running: ' + str(val))\n last_item = val\n logging.info('Try #:' + str(tries))\n DisplayMessage('Try #:' + str(tries))\n except ignore_exceptions as e:\n logging.error(str(e))\n last_item = e\n else:\n # Condition passes, this is the only \"successful\" exit from the polling function\n if check_target_success(val):\n return val\n\n logging.debug('last result from poll: ' + str(last_item))\n # Condition passes, this is the only \"successful\" exit from the polling function\n if check_target_success(val):\n return val\n else:\n values.put(last_item)\n tries += 1\n # Check the time after to make sure the poll function is called at least once\n if max_time is not None and Time.time() >= max_time:\n # raise TimeoutException(values, last_item)\n logging.info('Time out reached.')\n logging.info('Checking status of job: ' + val)\n logging.info('Job will now sleep for an additional: ' + step)\n logging.debug('Step value: ' + str(step))\n Time.sleep(step)\n step = step_function(step)",
"def optimise_fn(self, x):\n\n success = self._set_material_parameters(x)\n if not success:\n return self._bad_metric()\n\n # some iterations are repeated so cache the results to avoid unnecessary iterations\n cached_result_key = tuple(x)\n metric_value = self.cached_results.get(cached_result_key)\n\n if metric_value is None:\n print('--> Optimiser: {}'.format(self.material_model))\n\n sim_result = fs.run_simulation(stoma_cfg=self.stoma_cfg,\n from_optimiser=True)\n\n # when the simulation fails we want a non-constant measure for the optimiser to use\n metric_value = sim_result.metric_value if sim_result.success else self._bad_metric()\n\n self.cached_results[cached_result_key] = metric_value\n\n print('--> Optimiser: {} - metric={}'.format(self.material_model, metric_value))\n else:\n print('--> Optimiser: {} - metric={} (cached result)'.format(self.material_model, metric_value))\n\n return metric_value",
"def max_(*args, **kwargs):\n ...",
"def test_maximax():\n f = np.asarray([\n [0.99, 1.0, 0.5],\n [0.69, 0.6, 0.6]])\n R = common_metrics.maximax(f, maximise=True)\n expected = np.asarray(\n [1.0, 0.69])\n assert np.allclose(R, expected)\n R = common_metrics.maximax(f, maximise=False)\n expected = np.asarray(\n [-0.5, -0.6])\n assert np.allclose(R, expected)"
] | [
"0.70528316",
"0.59501183",
"0.5642754",
"0.5085643",
"0.50599",
"0.5013388",
"0.5011954",
"0.5007961",
"0.5003236",
"0.49108404",
"0.48720664",
"0.48720354",
"0.48174542",
"0.4795054",
"0.47906741",
"0.47627977",
"0.47517008",
"0.46950796",
"0.46797842",
"0.46576664",
"0.464577",
"0.46434075",
"0.46383366",
"0.45940447",
"0.4570762",
"0.45672405",
"0.4563032",
"0.45611793",
"0.4514062",
"0.45022818"
] | 0.72034293 | 0 |
generator function that samples mixtures according to the `model`. `steps` determines how many mixtures are sampled `dtype` determines the dtype of the resulting concentration vector | def _sample_binary_mixtures(model, steps, dtype=np.uint):
mixture_size = model.parameters['fixed_mixture_size']
if not model.is_correlated_mixture and mixture_size is None:
# use simple monte carlo algorithm
prob_s = model.substrate_probabilities
for _ in range(int(steps)):
# choose a mixture vector according to substrate probabilities
yield (np.random.random(model.Ns) < prob_s).astype(dtype)
elif mixture_size is None:
# go through all mixtures and don't keep the size constant
# use metropolis algorithm
hi = model.commonness
Jij = model.correlations
# start with a random concentration vector
c = np.random.randint(0, 2, model.Ns).astype(dtype)
E_last = -np.dot(np.dot(Jij, c) + hi, c)
for _ in range(int(steps)):
i = random.randrange(model.Ns)
c[i] = 1 - c[i] #< switch the entry
Ei = -np.dot(np.dot(Jij, c) + hi, c)
if Ei < E_last or random.random() < np.exp(E_last - Ei):
# accept the new state
E_last = Ei
else:
# reject the new state and revert to the last one
c[i] = 1 - c[i]
yield c
elif mixture_size == 0:
# special case which is not covered by the iteration below
c_zero = np.zeros(model.Ns, dtype)
for _ in range(model._sample_steps):
yield c_zero
elif mixture_size == model.Ns:
# special case which is not covered by the iteration below
c_ones = np.ones(model.Ns, dtype)
for _ in range(steps):
yield c_ones
else:
# go through mixtures with keeping their size constant
# use metropolis algorithm
hi = model.commonness
Jij = model.correlations
# create random concentration vector with fixed substrate count
c = np.r_[np.ones(mixture_size, dtype),
np.zeros(model.Ns - mixture_size, dtype)]
np.random.shuffle(c)
E_last = -np.dot(np.dot(Jij, c) + hi, c)
for _ in range(int(steps)):
# find the next mixture by swapping two items
i0 = random.choice(np.flatnonzero(c == 0)) #< find 0
i1 = random.choice(np.flatnonzero(c)) #< find 1
c[i0], c[i1] = 1, 0 #< swap entries
Ei = -np.dot(np.dot(Jij, c) + hi, c)
if Ei < E_last or random.random() < np.exp(E_last - Ei):
# accept the new state
E_last = Ei
else:
# reject the new state and revert to the last one
c[i0], c[i1] = 0, 1
yield c | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _sample_mixtures(self, steps=None, dtype=np.uint):\n if steps is None:\n steps = self._sample_steps\n \n return _sample_binary_mixtures(self, steps, dtype)",
"def _iterate_mixtures(self):\n \n if self._iterate_steps > self.parameters['max_steps']:\n raise RuntimeError('The iteration would take more than %g steps'\n % self.parameters['max_steps'])\n \n hi = self.commonness\n Jij = self.correlations\n\n mixture_size = self.parameters['fixed_mixture_size']\n if mixture_size is None:\n # iterate over all mixtures\n for c in itertools.product((0, 1), repeat=self.Ns):\n c = np.array(c, np.uint8)\n weight_c = np.exp(np.dot(np.dot(Jij, c) + hi, c))\n yield c, weight_c\n \n elif mixture_size == 0:\n # special case which is not covered by the iteration below\n yield np.zeros(self.Ns, np.uint8), 1\n \n elif mixture_size == self.Ns:\n # special case which is not covered by the iteration below\n yield np.ones(self.Ns, np.uint8), 1\n \n else:\n # iterate over all mixtures with constant number of substrates\n c = np.zeros(self.Ns, np.uint8)\n for nz in itertools.combinations(range(self.Ns), mixture_size):\n c[:] = 0\n c[np.array(nz)] = 1\n weight_c = np.exp(np.dot(np.dot(Jij, c) + hi, c))\n yield c, weight_c",
"def data_gen(voc_size, batch, nbatches, seq_len = 15):\r\n for i in range(nbatches):\r\n # (batch_size, seq_len)\r\n data = torch.from_numpy(\r\n np.random.randint(1, voc_size, size=(batch, seq_len)))\r\n data[:, 0] = 1 # add start token\r\n src = Variable(data, requires_grad=False)\r\n tgt = Variable(data, requires_grad=False)\r\n yield Batch(src, tgt, 0) # Accessed by next function one by one\r",
"def generate_data(model, n_batches=5, samples_per_batch=200):\n\tcalcium, spec, yn_samples, yb_samples = [], [], [], []\n\twith torch.no_grad():\n\t\tfor i in range(n_batches):\n\t\t\tz_sample = torch.randn(samples_per_batch,1,1,model.z_dim).to(model.device)\n\t\t\tyn_sample = model.sample_yn(z_sample)\n\t\t\tyb_sample = model.sample_yb(z_sample)\n\t\t\tc_μ = model.decode_calcium(yn_sample).squeeze()\n\t\t\tif model.model_type == 'sparse_poe_finch':\n\t\t\t\tspec_rec, _ = model.decode_spec(yb_sample)\n\t\t\telse:\n\t\t\t\tspec_rec = model.decode_spec(yb_sample)\n\t\t\tspec_rec = spec_rec.squeeze()\n\t\t\tcalcium.append(c_μ)\n\t\t\tspec.append(spec_rec)\n\t\t\tyn_samples.append(yn_sample.squeeze(1).squeeze(1))\n\t\t\tyb_samples.append(yb_sample.squeeze(1).squeeze(1))\n\t\tcalcium = torch.cat(calcium, dim=0).detach().cpu().numpy()\n\t\tspec = torch.cat(spec, dim=0).detach().cpu().numpy()\n\t\tyn_samples = torch.cat(yn_samples, dim=0).detach().cpu().numpy()\n\t\tyb_samples = torch.cat(yb_samples, dim=0).detach().cpu().numpy()\n\treturn calcium, spec, yn_samples, yb_samples",
"def data_generator(delta=1, batch_size=32):\n while True:\n yield generate_samples(delta=delta, n=batch_size)",
"def generate_samples(self, n_samples):",
"def generate_samples(self, n_samples):",
"def seq_data_iter_sequential(corpus, batch_size, num_steps):\n # Start with a random offset to partition a sequence\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = torch.tensor(corpus[offset: offset + num_tokens])\n Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y",
"def _sample_steps(self):\n mixture_size = self.parameters['fixed_mixture_size']\n if not self.is_correlated_mixture and mixture_size is None:\n return self.get_steps('monte_carlo')\n else:\n return self.get_steps('metropolis')",
"def seq_data_iter_random(corpus, batch_size, num_steps): #@save\n # Start with a random offset (inclusive of `num_steps - 1`) to partition a\n # sequence\n corpus = corpus[random.randint(0, num_steps - 1):]\n # Subtract 1 since we need to account for labels\n num_subseqs = (len(corpus) - 1) // num_steps\n # The starting indices for subsequences of length `num_steps`\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n # In random sampling, the subsequences from two adjacent random\n # minibatches during iteration are not necessarily adjacent on the\n # original sequence\n random.shuffle(initial_indices)\n\n def data(pos):\n # Return a sequence of length `num_steps` starting from `pos`\n return corpus[pos: pos + num_steps]\n\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n # Here, `initial_indices` contains randomized starting indices for\n # subsequences\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield torch.tensor(X), torch.tensor(Y)",
"def create_mixture_csv(data_type):\n \n workspace = config.workspace\n data_dir = config.data_dir\n speech_dir = os.path.join(data_dir,'{}_speech'.format(data_type))\n noise_dir = os.path.join(data_dir,'{}_noise'.format(data_type)) \n magnification = config.magnification\n fs = config.sample_rate\n \n speech_names = [na for na in os.listdir(speech_dir) if na.lower().endswith(\".wav\")]\n noise_names = [na for na in os.listdir(noise_dir) if na.lower().endswith(\".wav\")]\n \n rs = np.random.RandomState(0)\n out_csv_path = os.path.join(workspace, \"mixture_csvs\", \"%s.csv\" % data_type)\n create_folder(os.path.dirname(out_csv_path))\n \n cnt = 0\n f = open(out_csv_path, 'w')\n f.write(\"%s\\t%s\\t%s\\t%s\\n\" % (\"speech_name\", \"noise_name\", \"noise_onset\", \"noise_offset\"))\n for speech_na in speech_names:\n # Read speech. \n speech_path = os.path.join(speech_dir, speech_na)\n (speech_audio, _) = read_audio(speech_path)\n len_speech = len(speech_audio)\n \n # For training data, mix each speech with randomly picked #magnification noises. \n if data_type == 'train':\n selected_noise_names = rs.choice(noise_names, size=magnification, replace=False)\n # For test data, mix each speech with all noises. \n elif data_type == 'test':\n selected_noise_names = noise_names\n else:\n raise Exception(\"data_type must be train | test!\")\n\n # Mix one speech with different noises many times. \n for noise_na in selected_noise_names:\n noise_path = os.path.join(noise_dir, noise_na)\n (noise_audio, _) = read_audio(noise_path)\n \n len_noise = len(noise_audio)\n\n if len_noise <= len_speech:\n noise_onset = 0\n nosie_offset = len_speech\n # If noise longer than speech then randomly select a segment of noise. \n else:\n noise_onset = rs.randint(0, len_noise - len_speech, size=1)[0]\n nosie_offset = noise_onset + len_speech\n \n if cnt % 100 == 0:\n print(cnt)\n \n cnt += 1\n f.write(\"%s\\t%s\\t%d\\t%d\\n\" % (speech_na, noise_na, noise_onset, nosie_offset))\n f.close()\n print(out_csv_path)\n print(\"Create %s mixture csv finished!\" % data_type)",
"def trainDataGenerator(num_epochs):\r\n samples, all_files = get_filenames()\r\n for num in range(num_epochs):\r\n for i in range(len(samples)):\r\n sample = samples[i]\r\n for file in all_files[i]:\r\n ohvs, Y = prepData(sample, file)\r\n if (ohvs == []):\r\n continue\r\n X = np.array([ohvs[:800]])\r\n yield X, Y\r\n # for i in range(0, len(ohvs), 400):\r\n # X = np.array([ohvs[i : i+400]])\r\n # print(\"\\tX shape =\", X.shape)\r\n # yield X, Y\r",
"def create_step_samples(self):\n pass # Deferred to subclasses\n\n \"\"\" Example using pod height:\n start_value = self.sim.pod.last_height\n end_value = self.sim.pod.height\n\n # Lerp values to get samples\n samples = start_value + self.step_lerp_pcts * (end_value - start_value) # Or use self.lerp(start_value, end_value), but doing it directly is faster since no function call\n if self.noise_scale > 0:\n # Add gaussian noise if specified\n return samples + np.random.normal(0.0, noise_scale, len(samples))\n else:\n # No noise\n return samples \n \"\"\"",
"def calculate_mixture_features(data_type):\n workspace = config.workspace\n data_dir = config.data_dir\n speech_dir = os.path.join(data_dir,'{}_speech'.format(data_type))\n noise_dir = os.path.join(data_dir,'{}_noise'.format(data_type)) \n \n fs = config.sample_rate\n \n if data_type == 'train':\n snr = config.Tr_SNR\n elif data_type == 'test':\n snr = config.Te_SNR \n else:\n raise Exception(\"data_type must be train | test!\")\n \n \n # Open mixture csv. \n mixture_csv_path = os.path.join(workspace, \"mixture_csvs\", \"%s.csv\" % data_type)\n with open(mixture_csv_path, 'r') as f:\n reader = csv.reader(f, delimiter='\\t')\n lis = list(reader)\n \n t1 = time.time()\n cnt = 0\n for i1 in range(1, len(lis)):\n [speech_na, noise_na, noise_onset, noise_offset] = lis[i1]\n noise_onset = int(noise_onset)\n noise_offset = int(noise_offset)\n \n # Read speech audio. \n speech_path = os.path.join(speech_dir, speech_na)\n (speech_audio, _) = read_audio(speech_path, target_fs=fs)\n \n # Read noise audio. \n noise_path = os.path.join(noise_dir, noise_na)\n (noise_audio, _) = read_audio(noise_path, target_fs=fs)\n \n # Repeat noise to the same length as speech. \n if len(noise_audio) < len(speech_audio):\n n_repeat = int(np.ceil(float(len(speech_audio)) / float(len(noise_audio))))\n noise_audio_ex = np.tile(noise_audio, n_repeat)\n noise_audio = noise_audio_ex[0 : len(speech_audio)]\n # Truncate noise to the same length as speech. \n else:\n noise_audio = noise_audio[noise_onset : noise_offset]\n \n # Scale speech to given snr. \n scaler = get_amplitude_scaling_factor(speech_audio, noise_audio, snr=snr)\n speech_audio *= scaler\n \n # Get normalized mixture, speech, noise. \n (mixed_audio, speech_audio, noise_audio, alpha) = additive_mixing(speech_audio, noise_audio)\n\n # Write out mixed audio. \n out_bare_na = os.path.join(\"%s.%s\" % \n (os.path.splitext(speech_na)[0], os.path.splitext(noise_na)[0]))\n out_audio_path = os.path.join(workspace, \"mixed_audios\", \"spectrogram\", \n data_type, \"%ddb\" % int(snr), \"%s.wav\" % out_bare_na)\n create_folder(os.path.dirname(out_audio_path))\n write_audio(out_audio_path, mixed_audio, fs)\n\n # Extract spectrogram. \n mixed_complx_x = calc_sp(mixed_audio, mode='complex')\n speech_x = calc_sp(speech_audio, mode='magnitude')\n noise_x = calc_sp(noise_audio, mode='magnitude')\n\n # Write out features. \n out_feat_path = os.path.join(workspace, \"features\", \"spectrogram\", \n data_type, \"%ddb\" % int(snr), \"%s.p\" % out_bare_na)\n create_folder(os.path.dirname(out_feat_path))\n data = [mixed_complx_x, speech_x, noise_x, alpha, out_bare_na]\n pickle.dump(data, open(out_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n \n # Print. \n if cnt % 100 == 0:\n print(cnt)\n \n cnt += 1\n\n print(\"Extracting feature time: %s\" % (time.time() - t1))",
"def sample(model, x, steps, temperature=1.0, sample=False, top_k=None):\n block_size = model.get_block_size()\n model.eval()\n for k in range(steps):\n x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed\n logits, _ = model(x_cond)\n # pluck the logits at the final step and scale by temperature\n logits = logits[:, -1, :] / temperature\n # optionally crop probabilities to only the top k options\n if top_k is not None:\n logits = top_k_logits(logits, top_k)\n # apply softmax to convert to probabilities\n probs = F.softmax(logits, dim=-1)\n # sample from the distribution or take the most likely\n if sample:\n ix = torch.multinomial(probs, num_samples=1)\n else:\n _, ix = torch.topk(probs, k=1, dim=-1)\n # append to the sequence and continue\n x = torch.cat((x, ix), dim=1)\n\n return x",
"def gen(num_batches,\n batch_size,\n seq_width,\n min_len,\n max_len):\n for batch_num in range(num_batches):\n\n # All batches have the same sequence length\n seq_len = random.randint(min_len, max_len)\n seq = np.random.binomial(1, 0.5, (seq_len, batch_size, seq_width))\n seq = Variable(torch.from_numpy(seq))\n\n # The input includes an additional channel used for the delimiter\n inp = Variable(torch.zeros(seq_len + 1, batch_size, seq_width + 1))\n inp[:seq_len, :, :seq_width] = seq\n inp[seq_len, :, seq_width] = 1.0 # delimiter in our control channel\n outp = seq.clone()\n\n yield batch_num+1, inp.float().to(params.device), outp.float().to(params.device)",
"def gen_mixture():\n npr.seed(0)\n num_exp = int(1e4)\n x_dim = 2\n z_dim = 2\n mu1 = [5, 5,]\n mu2 = [-5, -5]\n theta = np.array([[2,1],[-1,-2]])\n sigma = 0.1\n u = npr.uniform((num_exp,))\n z = np.zeros((num_exp, z_dim))\n cov = np.zeros((z_dim, z_dim))\n np.fill_diagonal(cov, 1)\n sz = int(num_exp/2)\n z[:sz, ]= npr.multivariate_normal(mu1, cov,sz)\n z[sz:, ] = npr.multivariate_normal(mu2,cov,sz)\n mu_x = [email protected]()\n\n x = np.zeros((num_exp, x_dim))\n for i in range(num_exp):\n x[i,:] = npr.multivariate_normal(mu_x[:,i], sigma*cov)\n print(x.shape)\n np.save('data/syn_mixture.npy', x)",
"def bootstrap_sample_generator_3D(samples: Union[NumpyFloatArray, NumpyIntArray]):\n n_samples = samples.shape[1]\n array_shape = samples.shape[1:]\n num_cols = samples.shape[2]\n cols = np.arange(num_cols)\n\n while True:\n _indices = np.random.randint(0, high=n_samples, size=array_shape)\n\n yield samples[:, _indices, cols]",
"def generate(model, n_samples, device, shape, levels):\n channels, height, width = shape[1], shape[2], shape[3]\n x_shapes = create_x_shapes(channels, height, width, levels)\n temperature = 0.7\n x_sample = []\n for ch, h, w in x_shapes:\n x_random = torch.randn(n_samples, ch, h, w) * temperature\n x_sample.append(x_random.to(device))\n x = model.reverse(x_sample)\n #x /= 0.6 # attempt to make it brighter, seen as rescaling it to reverse the effect of using temperature\n return x",
"def generate_samples(model, num_samples, latent):\n\n noise = tf.random.normal([num_samples, latent])\n samples = model(noise)[:, :, 0]\n return np.array(samples)",
"def data_gen(\n v: int, batch: int, nbatches: int, device: torch.device = torch.device(\"cpu\")\n) -> Iterator[Batch]: # TODO bad name\n for i in range(nbatches):\n data = np.random.randint(1, v, size=(batch, 10))\n data[:, 0] = 1\n src: LongTensorType = torch.from_numpy(data)\n tgt: LongTensorType = torch.from_numpy(data)\n src, tgt = src.to(device), tgt.to(device)\n yield Batch(src, tgt, 0)",
"def generate(self, num_steps):\n music = []\n with tf.variable_scope('batch_size'):\n batch_size = tf.shape(self._inputs)[0]\n\n intro_states = []\n # Generators' forward pass\n for i in range(self.num_tracks):\n with tf.variable_scope(f'inputs/{self.tracks[i]}'):\n inputs = tf.concat([self._x_encoded[i], self._x_feedback], axis=-1)\n\n with tf.variable_scope(f'intro_state/{self.tracks[i]}'):\n state = self.generators[i].steps(inputs)\n intro_states.append(state)\n\n #\n with tf.variable_scope('feedback_sampler'):\n samples_h, _, _ = tf.scan(\n self._feedback_recurrence,\n tf.zeros((num_steps, 1)),\n initializer=(\n tf.zeros((batch_size, self._num_dims_generator, self.num_tracks)),\n intro_states,\n self._feedback_final_state)\n )\n\n with tf.variable_scope('samples/encoded/'):\n samples_h = tf.unstack(tf.transpose(samples_h, [1, 0, 2, 3]), axis=-1)\n\n for i in range(self.num_tracks):\n # Decoding inputs into the original format\n with tf.variable_scope(f'samples/{self.tracks[i]}/'):\n _, samples = self.encoders[i].decode(samples_h[i])\n\n music.append(samples)\n\n with tf.variable_scope('samples/'):\n return tf.stack(music, axis=3, name='music')",
"def generate_fake_samples(generator_model : Model, dataset : np.ndarray, patch_shape : int) -> np.ndarray:\n X = generator_model.predict(dataset)\n y = generate_patch_labels(len(X), patch_shape, label=0)\n return X, y",
"def __getitem__(self, item):\n c_ex = self.examples[item]\n # randomly select ref mic\n mics = [x for x in c_ex.keys()]\n if self.train:\n np.random.shuffle(mics) # randomly permute during training to change ref mics\n\n mixtures = []\n sources = []\n for i in range(len(mics)):\n c_mic = c_ex[mics[i]]\n\n if self.segment:\n offset = 0\n if c_mic[\"length\"] > int(self.segment * self.sample_rate):\n offset = np.random.randint(\n 0, c_mic[\"length\"] - int(self.segment * self.sample_rate)\n )\n\n # we load mixture\n mixture, fs = sf.read(\n c_mic[\"mixture\"],\n start=offset,\n stop=offset + int(self.segment * self.sample_rate),\n dtype=\"float32\",\n )\n spk1, fs = sf.read(\n c_mic[\"spk1\"],\n start=offset,\n stop=offset + int(self.segment * self.sample_rate),\n dtype=\"float32\",\n )\n spk2, fs = sf.read(\n c_mic[\"spk2\"],\n start=offset,\n stop=offset + int(self.segment * self.sample_rate),\n dtype=\"float32\",\n )\n else:\n mixture, fs = sf.read(c_mic[\"mixture\"], dtype=\"float32\") # load all\n spk1, fs = sf.read(c_mic[\"spk1\"], dtype=\"float32\")\n spk2, fs = sf.read(c_mic[\"spk2\"], dtype=\"float32\")\n\n mixture = torch.from_numpy(mixture).unsqueeze(0)\n spk1 = torch.from_numpy(spk1).unsqueeze(0)\n spk2 = torch.from_numpy(spk2).unsqueeze(0)\n\n assert fs == self.sample_rate\n mixtures.append(mixture)\n sources.append(torch.cat((spk1, spk2), 0))\n\n mixtures = torch.cat(mixtures, 0)\n sources = torch.stack(sources)\n # we pad till max_mic\n valid_mics = mixtures.shape[0]\n if mixtures.shape[0] < self.max_mics:\n dummy = torch.zeros((self.max_mics - mixtures.shape[0], mixtures.shape[-1]))\n mixtures = torch.cat((mixtures, dummy), 0)\n sources = torch.cat((sources, dummy.unsqueeze(1).repeat(1, sources.shape[1], 1)), 0)\n return mixtures, sources, valid_mics",
"def batch_generator(batch_size, sequence_length,\n x_train_scaled, y_train_scaled, num_x_signals, num_y_signals, num_train):\n # Infinite loop.\n while True:\n # Allocate a new array for the batch of input-signals.\n x_shape = (batch_size, sequence_length, num_x_signals)\n x_batch = np.zeros(shape=x_shape, dtype=np.float16)\n\n # Allocate a new array for the batch of output-signals.\n y_shape = (batch_size, sequence_length, num_y_signals)\n y_batch = np.zeros(shape=y_shape, dtype=np.float16)\n\n # Fill the batch with random sequences of data.\n for i in range(batch_size):\n # Get a random start-index.\n # This points somewhere into the training-data.\n idx = np.random.randint(num_train - sequence_length)\n\n # Copy the sequences of data starting at this index.\n x_batch[i] = x_train_scaled[idx:idx + sequence_length]\n y_batch[i] = y_train_scaled[idx:idx + sequence_length]\n yield x_batch, y_batch\n # return x_batch, y_batch",
"def my_generator(batch_size, img_dir):\n cat_dirs = glob.glob(img_dir + \"/*\")\n counter = 0\n while True:\n input_images = np.zeros(\n (batch_size, config.height, config.width, 3 * 5))\n output_images = np.zeros((batch_size, config.height, config.width, 3))\n random.shuffle(cat_dirs)\n if (counter+batch_size >= len(cat_dirs)):\n counter = 0\n for i in range(batch_size):\n input_imgs = glob.glob(cat_dirs[counter + i] + \"/cat_[0-4]*\") \n imgs = [Image.open(img) for img in sorted(input_imgs)]\n input_images[i] = np.concatenate(imgs, axis=2)\n output_imgs = glob.glob(cat_dirs[counter + i] + \"/cat_[5-7]*\")\n imgs = [Image.open(img) for img in sorted(output_imgs)]\n output_images[i] = np.concatenate(imgs, axis=1)\n input_images[i] /= 255.\n output_images[i] /= 255.\n yield (input_images, output_images)\n counter += batch_size",
"def generate_samples(mu1,cov,number_of_samples):\n samples = np.random.multivariate_normal(mu1, cov,number_of_samples)\n return samples",
"def collect_data(self, cue_size=10, sigma=0.1, test_factors=[2], \n cue_offset=9*pi/8, **kwargs):\n self.results['cue_size'] = cue_size = (pi/180) * cue_size\n self.results['cue_offset'] = cue_offset\n self.results['test_factors'] = test_factors = [0, 1] + test_factors\n self.results['sigma'] = sigma\n \n # Set up model parameters\n pdict = dict( N_outputs=500, \n N_theta=1000,\n C_W=0.05,\n N_cues_local=1, \n N_cues_distal=1, \n local_cue_std=cue_size,\n cue_offset=cue_offset,\n init_random=False,\n gamma_distal=0, \n num_trials=2*len(test_factors),\n monitoring=True )\n pdict.update(kwargs)\n \n # Create the simulation object and save the cue peak (gamma)\n self.out('Running training simulation...')\n model = VMONoiseModel(**pdict)\n if 'T' in kwargs:\n model.T = kwargs['T']\n cue_gamma = model.gamma_local\n \n # Simulate the phase noise test trials without, then with, cue\n for gamma in 0.0, cue_gamma:\n model.gamma_local = gamma\n for factor in test_factors:\n model.sigma = sigma * factor\n model.advance()\n \n # Compute responses and save session data\n self.out('Computing and saving session data files...')\n sessions = VMOSession.get_session_list(model)\n VMOSession.save_session_list(sessions, os.path.join(self.datadir, 'sessions'))\n \n # Save raw simulation data file and clean up\n model.post_mortem().tofile(os.path.join(self.datadir, 'data'))\n \n # Compute population and population lap matrices and save to data directory\n self.out('Computing and saving population responses...')\n clusts = np.arange(pdict['N_outputs'])\n R = [SD.get_population_matrix(clusters=clusts, inplace=True) for SD in sessions]\n R_laps = [SD.get_population_lap_matrix(clusters=clusts, inplace=True) for SD in sessions]\n np.save(os.path.join(self.datadir, 'R_session'), np.asarray(R))\n np.save(os.path.join(self.datadir, 'R_laps'), np.asarray(R_laps))\n \n # All done!\n self.out('Good bye!')",
"def generator(data_dir, samples, batch_size=32):\n num_samples = len(samples)\n while 1:\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n measurements = []\n for batch_sample in batch_samples:\n filename = csv_log_to_image_filename(data_dir,\n batch_sample[0])\n image = cv2.imread(filename)\n if image is not None:\n images.append(image)\n measurements.append(batch_sample[1])\n else:\n print(\"File \" + filename + \" is missing.\")\n\n X_data = np.array(images)\n y_data = np.array(measurements)\n yield sklearn.utils.shuffle(X_data, y_data)",
"def _generate(self, **kwargs):\n self._samples = numpy.array(list(itertools.product(*self.parameter_schema.values())), dtype=object)\n super()._generate()"
] | [
"0.7261067",
"0.6262869",
"0.5684544",
"0.56400836",
"0.55678874",
"0.5419031",
"0.5419031",
"0.5378836",
"0.53703797",
"0.5349273",
"0.5279914",
"0.52595407",
"0.5239342",
"0.52295643",
"0.5188037",
"0.51616156",
"0.515846",
"0.5093254",
"0.50914216",
"0.5061559",
"0.5050774",
"0.5044631",
"0.50437707",
"0.4996947",
"0.49931344",
"0.49859464",
"0.4971209",
"0.49706495",
"0.4952079",
"0.49388003"
] | 0.7653979 | 0 |
test the performance of the brute force and the Monte Carlo method | def performance_test(Ns=15, Nr=3):
num = 2**Ns
hs = np.random.random(Ns)
model = LibraryBinaryNumeric(Ns, Nr, hs)
start = time.time()
model.mutual_information_brute_force()
time_brute_force = time.time() - start
print('Brute force: %g sec' % time_brute_force)
start = time.time()
model.mutual_information_monte_carlo(num)
time_monte_carlo = time.time() - start
print('Monte carlo: %g sec' % time_monte_carlo) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n hash_str = '5411ba21c470e12d49f351a2d240e43618032950'\n salt = '0d71906d0f735e6196c80d0a7cb1748e'\n encrypted_code = 'Ul5SR0ISYFxUXl8OOxITFBFWVlIRQVtRXV4bHQs4ExQREhMUERJDRlhcRxwWZltdQhJaRxFTE0BUQUcUXF1XQV1XFB07EhMUE' \\\n 'RITFBFCQV1fRhsTZVdAQBFhRldSV0BHV0dfGhYbOQ=='\n bruter = CodeBrute(hash_str, salt)\n start = timer()\n _key, iterations = bruter.brute()\n print('Found', _key)\n print('Attempts:', iterations)\n elapsed = timer() - start\n print('Time:', elapsed)\n bruter.decrypt_and_execute(encrypted_code, str(_key))",
"def bruteForceTime():\n start_time = time.time()\n subjects = loadSubjects(SUBJECT_FILENAME)\n maxWork = 8\n answer = bruteForceAdvisor(subjects, maxWork)\n end_time = time.time()\n printSubjects(answer)\n print 'Time taken: ', end_time - start_time\n return None",
"def main():\r\n _evaluative_test(5)\r\n _fuzz_test(1)\r\n _fuzz_test(1, 512)\r\n _fuzz_test(1, 1512)\r\n _fuzz_test(1000)\r\n _fuzz_test(1000, 512)\r\n _fuzz_test(1000, 4077)",
"def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)",
"def mbruteforce(func, alphabet, length, method = 'upto', start = None, threads = None):\n\n def bruteforcewrap(func, alphabet, length, method, start, databag):\n oldloglevel = context.log_level\n context.log_level = 'critical'\n res = bruteforce(func, alphabet, length, method=method, start=start, databag=databag)\n context.log_level = oldloglevel\n databag[\"result\"] = res\n\n if start == None:\n start = (1, 1)\n\n if threads == None:\n try:\n threads = multiprocessing.cpu_count()\n except NotImplementedError:\n threads = 1\n\n h = log.waitfor('MBruteforcing')\n processes = [None] * threads\n shareddata = [None] * threads\n\n (i2, N2) = start\n totalchunks = threads * N2\n\n for i in range(threads):\n shareddata[i] = multiprocessing.Manager().dict()\n shareddata[i]['result'] = None\n shareddata[i]['current_item'] = \"\"\n shareddata[i]['items_done'] = 0\n shareddata[i]['items_total'] = 0\n\n chunkid = (i2-1) + (i * N2) + 1\n\n processes[i] = multiprocessing.Process(target=bruteforcewrap,\n args=(func, alphabet, length, method, (chunkid, totalchunks),\n shareddata[i]))\n processes[i].start()\n\n done = False\n\n while not done:\n # log status\n current_item_list = \",\".join([\"\\\"%s\\\"\" % x[\"current_item\"]\n for x in shareddata if x != None])\n items_done = sum([x[\"items_done\"] for x in shareddata if x != None])\n items_total = sum([x[\"items_total\"] for x in shareddata if x != None])\n\n progress = 100.0 * items_done / items_total if items_total != 0 else 0.0\n\n h.status('Trying %s -- %0.3f%%' % (current_item_list, progress))\n\n # handle finished threads\n for i in range(threads):\n if processes[i] and processes[i].exitcode != None:\n # thread has terminated\n res = shareddata[i][\"result\"]\n processes[i].join()\n processes[i] = None\n\n # if successful, kill all other threads and return success\n if res != None:\n for i in range(threads):\n if processes[i] != None:\n processes[i].terminate()\n processes[i].join()\n processes[i] = None\n h.success('Found key: \"%s\"' % res)\n return res\n\n if all([x == None for x in processes]):\n done = True\n time.sleep(0.3)\n h.failure('No matches found')",
"def _brute_force(self):\n if self.N > 9:\n #print(\"Input set is too big for brute force estimation.\")\n self.best_path = None\n else:\n #print(\"Number of permutations to check: {}\".format(math.factorial(self.N)))\n #init = \n A = self._P + np.finfo(np.float).eps\n A = (A + (1-A).T)/2\n for i in range(A.shape[0]):\n A[i,i] = np.finfo(np.float).eps\n init = (A>0.5).sum(axis=1).argsort()[::-1]\n #--- use log(p(Y=1\\mid s',s)) to shift multiplication to sum\n lP = np.log(A)\n for i in range(lP.shape[0]):\n lP[i,i] = 0\n #init_cost = 0\n ##--- lP[x:x+1] está MAL hay que sumar respecto a i+1 en z, no en lP.\n #for i in range(len(init)-1):\n # init_cost += lP[init[i],init[i+1]:].sum()\n z_star = []\n z_cost = -np.inf\n for z in permutations(range(self.N)):\n cost = 0\n for i in range(len(z)-1):\n cost += lP[z[i],z[i+1:]].sum()\n if cost > z_cost:\n z_cost = cost\n z_star = z\n self.best_path = np.array(z_star)",
"def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)",
"def brute_force(city_list):\n start = time.time()*1000\n shortest = exhaustive_search(city_list,6)\n stop = time.time()*1000\n print(\"Shortest tour for 6 first cities:\", tour_distance(shortest))\n print (\"Time spent on 6 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,7)\n stop = time.time()*1000\n print(\"Shortest tour for 7 first cities:\", tour_distance(shortest))\n print (\"Time spent on 7 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,8)\n stop = time.time()*1000\n print(\"Shortest tour for 8 first cities:\", tour_distance(shortest))\n print (\"Time spent on 8 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,9)\n stop = time.time()*1000\n print(\"Shortest tour for 9 first cities:\", tour_distance(shortest))\n print (\"Time spent on 9 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,10)\n stop = time.time()*1000\n print(\"Shortest tour for 10 first cities:\", tour_distance(shortest))\n print (\"Time spent on 10 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\" \")",
"def evaluate_all_points():\n start_time = timeit.default_timer()\n mua, vra = pgen.get_pdf()\n slack = ptsl.D\n\n all_alloc = list(itertools.product(range(1,ptsl.M+1),repeat=ptsl.NPH))\n riska = []\n f2 = open(\"risk-file-D216-NPH5.csv\",\"w\")\n f2.write(\"alloc1,alloc2,alloc3,alloc4,alloc5,risk,util\\n\")\n count = 0\n for a in all_alloc :\n a1, a2, a3, a4, a5 = a\n r = compute_risk(mua, vra, a, slack)\n \n if r > 0.00001 and r < 1 - 0.00001 :\n riska.append(r)\n util = a1 * mua[a1-1] + a2 * mua[a2-1] + a3 * mua[a3-1] + a4 * mua[a4-1] + a5 * mua[a5-1]\n f2.write(\"%d,%d,%d,%d,%d,%f,%f\\n\"%(a1,a2,a3,a4,a5,r,util))\n count = count + 1\n f2.close()\n np.save(\"stored_risk\",riska)\n elapsed = timeit.default_timer() - start_time\n print(\"Brute Force Evaluation Time for %d points : %fs\"%(count,elapsed))",
"def test_bottleneck(self):\n # import the experiment variable from the example\n exp = bottleneck_example(20, 5, render=False)\n\n # run the experiment for a few time steps to ensure it doesn't fail\n exp.run(1, 5)",
"def bruteforce(func, alphabet, length, method = 'upto', start = None, databag = None):\n\n if method == 'upto' and length > 1:\n iterator = product(alphabet, repeat = 1)\n for i in xrange(2, length + 1):\n iterator = chain(iterator, product(alphabet, repeat = i))\n\n elif method == 'downfrom' and length > 1:\n iterator = product(alphabet, repeat = length)\n for i in xrange(length - 1, 1, -1):\n iterator = chain(iterator, product(alphabet, repeat = i))\n\n elif method == 'fixed':\n iterator = product(alphabet, repeat = length)\n\n else:\n raise TypeError('bruteforce(): unknown method')\n\n if method == 'fixed':\n total_iterations = len(alphabet) ** length\n else:\n total_iterations = (len(alphabet) ** (length + 1) / (len(alphabet) - 1)) - 1\n\n if start is not None:\n i, N = start\n if i > N:\n raise ValueError('bruteforce(): invalid starting point')\n\n i -= 1\n chunk_size = total_iterations / N\n rest = total_iterations % N\n starting_point = 0\n\n for chunk in range(N):\n if chunk >= i:\n break\n if chunk <= rest:\n starting_point += chunk_size + 1\n else:\n starting_point += chunk_size\n\n if rest >= i:\n chunk_size += 1\n\n total_iterations = chunk_size\n\n h = log.waitfor('Bruteforcing')\n cur_iteration = 0\n if start != None:\n consume(i, iterator)\n for e in iterator:\n cur = ''.join(e)\n cur_iteration += 1\n if cur_iteration % 2000 == 0:\n progress = 100.0 * cur_iteration / total_iterations\n h.status('Trying \"%s\", %0.3f%%' % (cur, progress))\n if databag:\n databag[\"current_item\"] = cur\n databag[\"items_done\"] = cur_iteration\n databag[\"items_total\"] = total_iterations\n res = func(cur)\n if res:\n h.success('Found key: \"%s\"' % cur)\n return cur\n if start != None:\n consume(N - 1, iterator)\n\n h.failure('No matches found')",
"def test_stress(self):\n primorial100 = 4711930799906184953162487834760260422020574773409675520188634839616415335845034221205289256705544681972439104097777157991804380284218315038719444943990492579030720635990538452312528339864352999310398481791730017201031090\n for i in range(10000):\n self.assertEqual(primorial(100), primorial100)",
"def speed():\r\n\r\n algo = ['logistic_sgd', 'logistic_cg', 'mlp', 'convolutional_mlp',\r\n 'dA', 'SdA', 'DBN', 'rbm', 'rnnrbm']\r\n to_exec = [True] * len(algo)\r\n# to_exec = [False] * len(algo)\r\n# to_exec[-1] = True\r\n do_float64 = True\r\n do_float32 = True\r\n do_gpu = True\r\n\r\n algo_executed = [s for idx, s in enumerate(algo) if to_exec[idx]]\r\n #Timming expected are from the buildbot that have an i7-920 @\r\n # 2.67GHz with hyperthread enabled for the cpu, 12G of ram. An GeForce GTX\r\n # 285 for the GPU. OS=Fedora 14, gcc=4.5.1, python/BLAS from EPD\r\n # 7.1-2 (python 2.7.2, mkl unknow). BLAS with only 1 thread.\r\n\r\n expected_times_64 = numpy.asarray([10.0, 22.5, 76.1, 73.7, 116.4,\r\n 346.9, 381.9, 558.1, 186.3])\r\n expected_times_32 = numpy.asarray([11.6, 29.6, 42.5, 66.5, 71,\r\n 191.2, 226.8, 432.8, 176.2])\r\n\r\n # Number with just 1 decimal are new value that are faster with\r\n # the Theano version 0.5rc2 Other number are older. They are not\r\n # updated, as we where faster in the past!\r\n # TODO: find why and fix this!\r\n\r\n# Here is the value for the buildbot on February 3th 2012.\r\n# sgd, cg mlp conv da\r\n# sda dbn rbm\r\n# gpu times[3.72957802, 9.94316864, 29.1772666, 9.13857198, 25.91144657,\r\n# 18.30802011, 53.38651466, 285.41386175]\r\n# expected [3.076634879, 7.555234910, 18.99226785, 9.58915591, 24.130070450,\r\n# 24.77524018, 92.66246653, 322.340329170]\r\n# sgd, cg mlp conv da\r\n# sda dbn rbm\r\n#expected/get [0.82492841, 0.75984178, 0.65092691, 1.04930573, 0.93125138\r\n# 1.35324519 1.7356905 1.12937868]\r\n expected_times_gpu = numpy.asarray([3.07663488, 7.55523491, 18.99226785,\r\n 9.6, 24.13007045,\r\n 20.4, 56, 302.6, 315.4])\r\n expected_times_64 = [s for idx, s in enumerate(expected_times_64)\r\n if to_exec[idx]]\r\n expected_times_32 = [s for idx, s in enumerate(expected_times_32)\r\n if to_exec[idx]]\r\n expected_times_gpu = [s for idx, s in enumerate(expected_times_gpu)\r\n if to_exec[idx]]\r\n\r\n def time_test(m, l, idx, f, **kwargs):\r\n if not to_exec[idx]:\r\n return\r\n print algo[idx]\r\n ts = m.call_time\r\n try:\r\n f(**kwargs)\r\n except Exception, e:\r\n print >> sys.stderr, 'test', algo[idx], 'FAILED', e\r\n l.append(numpy.nan)\r\n return\r\n te = m.call_time\r\n l.append(te - ts)\r\n\r\n def do_tests():\r\n m = theano.compile.mode.get_default_mode()\r\n l = []\r\n time_test(m, l, 0, logistic_sgd.sgd_optimization_mnist, n_epochs=30)\r\n time_test(m, l, 1, logistic_cg.cg_optimization_mnist, n_epochs=30)\r\n time_test(m, l, 2, mlp.test_mlp, n_epochs=5)\r\n time_test(m, l, 3, convolutional_mlp.evaluate_lenet5, n_epochs=5,\r\n nkerns=[5, 5])\r\n time_test(m, l, 4, dA.test_dA, training_epochs=2,\r\n output_folder='tmp_dA_plots')\r\n time_test(m, l, 5, SdA.test_SdA, pretraining_epochs=1,\r\n training_epochs=2, batch_size=300)\r\n time_test(m, l, 6, DBN.test_DBN, pretraining_epochs=1,\r\n training_epochs=2, batch_size=300)\r\n time_test(m, l, 7, rbm.test_rbm, training_epochs=1, batch_size=300,\r\n n_chains=1, n_samples=1, output_folder='tmp_rbm_plots')\r\n time_test(m, l, 8, rnnrbm.test_rnnrbm, num_epochs=1)\r\n return numpy.asarray(l)\r\n\r\n #test in float64 in FAST_RUN mode on the cpu\r\n import theano\r\n if do_float64:\r\n theano.config.floatX = 'float64'\r\n theano.config.mode = 'FAST_RUN'\r\n float64_times = do_tests()\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'float64 times', float64_times\r\n print >> sys.stderr, 'float64 expected', expected_times_64\r\n print >> sys.stderr, 'float64 % expected/get', (\r\n expected_times_64 / float64_times)\r\n\r\n #test in float32 in FAST_RUN mode on the cpu\r\n theano.config.floatX = 'float32'\r\n if do_float32:\r\n float32_times = do_tests()\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'float32 times', float32_times\r\n print >> sys.stderr, 'float32 expected', expected_times_32\r\n print >> sys.stderr, 'float32 % expected/get', (\r\n expected_times_32 / float32_times)\r\n\r\n if do_float64:\r\n print >> sys.stderr, 'float64/float32', (\r\n float64_times / float32_times)\r\n print >> sys.stderr\r\n print >> sys.stderr, 'Duplicate the timing to have everything in one place'\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'float64 times', float64_times\r\n print >> sys.stderr, 'float64 expected', expected_times_64\r\n print >> sys.stderr, 'float64 % expected/get', (\r\n expected_times_64 / float64_times)\r\n print >> sys.stderr, 'float32 times', float32_times\r\n print >> sys.stderr, 'float32 expected', expected_times_32\r\n print >> sys.stderr, 'float32 % expected/get', (\r\n expected_times_32 / float32_times)\r\n\r\n print >> sys.stderr, 'float64/float32', (\r\n float64_times / float32_times)\r\n print >> sys.stderr, 'expected float64/float32', (\r\n expected_times_64 / float32_times)\r\n\r\n #test in float32 in FAST_RUN mode on the gpu\r\n import theano.sandbox.cuda\r\n if do_gpu:\r\n theano.sandbox.cuda.use('gpu')\r\n gpu_times = do_tests()\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'gpu times', gpu_times\r\n print >> sys.stderr, 'gpu expected', expected_times_gpu\r\n print >> sys.stderr, 'gpu % expected/get', (\r\n expected_times_gpu / gpu_times)\r\n\r\n if do_float64:\r\n print >> sys.stderr, 'float64/gpu', float64_times / gpu_times\r\n\r\n if (do_float64 + do_float32 + do_gpu) > 1:\r\n print >> sys.stderr\r\n print >> sys.stderr, 'Duplicate the timing to have everything in one place'\r\n print >> sys.stderr, algo_executed\r\n if do_float64:\r\n print >> sys.stderr, 'float64 times', float64_times\r\n print >> sys.stderr, 'float64 expected', expected_times_64\r\n print >> sys.stderr, 'float64 % expected/get', (\r\n expected_times_64 / float64_times)\r\n if do_float32:\r\n print >> sys.stderr, 'float32 times', float32_times\r\n print >> sys.stderr, 'float32 expected', expected_times_32\r\n print >> sys.stderr, 'float32 % expected/get', (\r\n expected_times_32 / float32_times)\r\n if do_gpu:\r\n print >> sys.stderr, 'gpu times', gpu_times\r\n print >> sys.stderr, 'gpu expected', expected_times_gpu\r\n print >> sys.stderr, 'gpu % expected/get', (\r\n expected_times_gpu / gpu_times)\r\n\r\n if do_float64 and do_float32:\r\n print >> sys.stderr, 'float64/float32', (\r\n float64_times / float32_times)\r\n print >> sys.stderr, 'expected float64/float32', (\r\n expected_times_64 / float32_times)\r\n if do_float64 and do_gpu:\r\n print >> sys.stderr, 'float64/gpu', float64_times / gpu_times\r\n print >> sys.stderr, 'expected float64/gpu', (\r\n expected_times_64 / gpu_times)\r\n if do_float32 and do_gpu:\r\n print >> sys.stderr, 'float32/gpu', float32_times / gpu_times\r\n print >> sys.stderr, 'expected float32/gpu', (\r\n expected_times_32 / gpu_times)\r\n\r\n def compare(x, y):\r\n ratio = x / y\r\n # If there is more then 5% difference between the expected\r\n # time and the real time, we consider this an error.\r\n return sum((ratio < 0.95) + (ratio > 1.05))\r\n\r\n if do_float64:\r\n err = compare(expected_times_64, float64_times)\r\n print >> sys.stderr, 'speed_failure_float64=' + str(err)\r\n if do_float32:\r\n err = compare(expected_times_32, float32_times)\r\n print >> sys.stderr, 'speed_failure_float32=' + str(err)\r\n if do_gpu:\r\n err = compare(expected_times_gpu, gpu_times)\r\n print >> sys.stderr, 'speed_failure_gpu=' + str(err)\r\n\r\n assert not numpy.isnan(gpu_times).any()",
"def printAlgorithmResults (test_case, key_size, plaintext, ciphertext, e, n, d):\r\n brute_force_time = -1\r\n\r\n if test_case < 7:\r\n start = time.time()\r\n d_actual_bf = crackRsaBruteForce(e, n)\r\n end = time.time()\r\n brute_force_time = '%.3f'%(end-start)\r\n\r\n start = time.time()\r\n d_actual = crackRsaPrivateKey(e, n)\r\n end = time.time()\r\n cracking_time = '%.3f'%(end-start)\r\n\r\n print (\" **************** Test Case \"+str(test_case+1)+\" - \"+str(key_size)+\" bits **************** \\n\")\r\n print (\"d expected: \"+str(d))\r\n print (\"d actual: \"+str(d_actual))\r\n print (\"plaintext (expected): \"+str(plaintext))\r\n print (\"plaintext (actual): \"+str(decryptRSA(d_actual,n,ciphertext)))\r\n print (\"ciphertext (expected): \"+str(ciphertext))\r\n print (\"ciphertext (actual): \"+str(encryptRSA(e,n,plaintext)))\r\n print (\"cracking took \"+str(cracking_time)+\" seconds\")\r\n\r\n if brute_force_time == -1:\r\n print (\"could not crack the key using brute force\\n\")\r\n else:\r\n print (\"brute force took \"+str(brute_force_time)+\" seconds\\n\")",
"def test_handcrafted_examples(self):\n for i in range(1000):\n self.assertEqual(perfectd(0), True)\n self.assertEqual(prime(0), False)\n self.assertEqual(prime(2), True)\n self.assertEqual(prime(7), True)\n self.assertEqual(prime(15), False)\n self.assertEqual(perfectd(6), True)\n self.assertEqual(perfectd(15), False)",
"def basic_experiments(k, p, num_iter, transforms, train_path, test_path):\n start_time = time.time()\n print(\"Generating advice\")\n with open(train_path, \"r\") as f:\n train_data = f.readlines()\n advice = generate_advice(train_data, transforms)\n print(\"Done: \" + str(time.time() - start_time) + \"\\n\")\n\n print(\"Generating real count for test data\")\n with open(test_path, \"r\") as f:\n test_data = f.readlines()\n real_count = generate_advice(test_data, transforms)\n print(\"Done: \" + str(time.time() - start_time))\n actual = [x.moment(p) for x in real_count]\n print(\"Actual moments:\")\n print(actual)\n\n print(\"Estimating using noisy advice:\")\n est_with_advice = []\n for i in range(num_iter):\n seed = str(random.randint(1, 2**64 + 1)) + str(random.randint(1, 2**64 + 1))\n this_hash_exp = lambda x: hash_exp(x, seed)\n est_with_advice.append(\n estimate_using_advice(test_data, transforms, k, p, this_hash_exp,\n advice))\n print(\"Done: \" + str(time.time() - start_time))\n print(est_with_advice)\n\n print(\"Estimating using perfect advice:\")\n est_perfect_advice = []\n for i in range(num_iter):\n seed = str(random.randint(1, 2**64 + 1)) + str(random.randint(1, 2**64 + 1))\n this_hash_exp = lambda x: hash_exp(x, seed)\n est_perfect_advice.append(\n estimate_using_advice(test_data, transforms, k, p, this_hash_exp,\n real_count))\n print(\"Done: \" + str(time.time() - start_time))\n print(est_perfect_advice)\n\n print(\"Estimating using PPSWOR:\")\n est_ppswor = []\n for i in range(num_iter):\n cur = []\n for x in real_count:\n cur.append(ppswor_estimate_moment(x.counts.items(), k, p))\n est_ppswor.append(cur)\n print(\"Done: \" + str(time.time() - start_time))\n print(est_ppswor)\n\n print(\"Estimating using ell_2 PPSWOR:\")\n est_ppswor_l2 = []\n for i in range(num_iter):\n cur = []\n for x in real_count:\n cur.append(ppswor_estimate_moment(x.counts.items(), k, p, 2))\n est_ppswor_l2.append(cur)\n print(\"Done: \" + str(time.time() - start_time))\n print(est_ppswor_l2)",
"def find():\n b = 0\n q = 0\n while b == q:\n seq = [randint(-10, 10) for _ in range(randint(15, 30))]\n b, b_at = brute_force(seq)\n q = solution(seq)\n print(seq, b, q, b_at)",
"def random_test(self, source):\r\n ret = 1\r\n for seed in range(1, 40):\r\n if source.run(temp_params={\"fitness_function\": (lambda x: -np.sum(x)**2+10),\r\n \"population_size\": 10,\r\n \"time_constraint\": 2,\r\n \"axes\": [(0, 5)],\r\n \"seed\": seed}) != \\\r\n source.run(temp_params={\"fitness_function\": (lambda x: -np.sum(x) ** 2 + 10),\r\n \"population_size\": 10,\r\n \"time_constraint\": 2,\r\n \"axes\": [(0, 5)],\r\n \"seed\": seed}):\r\n ret = 0\r\n if ret == 0:\r\n if self.verbosity > 0:\r\n print(\"ERROR: Random seed non functional, results cannot be replicated.\")\r\n return 0\r\n else:\r\n if self.verbosity > 1:\r\n print(\"Random seed functional, results replicable if a seed is used.\")\r\n return 1",
"def brute_tree(XTRAIN,istopTRAIN,XTEST,istopTEST):\n \n ntrain=XTRAIN.shape[0]\n ntest=XTEST.shape[0]\n \n if np.sum(istopTRAIN)==0:\n return 0,[]\n\n cost0=np.zeros(Ngammas*Nreps)\n cost1=np.zeros(Ngammas*Nreps)\n cost0test=np.zeros(Ngammas*Nreps)\n cost1test=np.zeros(Ngammas*Nreps)\n \n precisionTRAIN=np.zeros(Ngammas*Nreps)\n precisionTEST=np.zeros(Ngammas*Nreps)\n recallTEST=np.zeros(Ngammas*Nreps)\n rate=np.zeros(Ngammas*Nreps)\n \n for iii in range(Ngammas):\n \n gamma=GAMMA[iii]\n \n for jjj in range(Nreps):\n \n \"\"\" train a tree using training data with random splitting \"\"\"\n \n tree_hyperparameters['class_weight']={0:1,1:gamma}\n clf=tree.DecisionTreeClassifier(**tree_hyperparameters)\n clf.fit(XTRAIN,istopTRAIN)\n \n \"\"\"\" record costs and precision on validation data \"\"\"\n \n pTRAIN=clf.predict(XTRAIN)\n precisionTRAIN[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==1),sum(pTRAIN))\n cost0[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==0)\n cost1[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 0 and istopTRAIN[i]==1)\n \n \"\"\" record precision on test data \"\"\"\n \n pTEST=clf.predict(XTEST)\n precisionTEST[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1),sum(pTEST))\n recallTEST[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1)/sum(istopTEST)\n cost0test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==0)\n cost1test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 0 and istopTEST[i]==1)\n \n \"\"\" record positive rate on full data \"\"\"\n \n rate[iii*Nreps+jjj]=(sum(pTRAIN)+sum(pTEST))/(ntrain+ntest)\n \n \"\"\" Compute Pareto front for validation data \"\"\"\n \n Pareto = Lower_Convex_Hull(np.concatenate((cost0.reshape(-1,1),cost1.reshape(-1,1)),1))\n \n \"\"\" make some nice plots for whoever is watching \"\"\"\n \n plt.figure(figsize=(10,5))\n plt.subplot(121)\n plt.plot(cost0,cost1,'.')\n plt.plot(cost0[Pareto],cost1[Pareto],'d')\n plt.xlabel('errors on class zero training data')\n plt.ylabel('errors on class one training data')\n\n plt.subplot(122)\n plt.plot(cost0test,cost1test,'.')\n plt.plot(cost0test[Pareto],cost1test[Pareto],'d')\n plt.xlabel('errors on class zero test data')\n plt.ylabel('errors on class one test data')\n plt.show()\n \n plt.figure(figsize=(15,5))\n plt.subplot(131)\n plt.semilogy(precisionTRAIN,rate,'.')\n plt.semilogy(precisionTRAIN[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on training data')\n plt.ylabel('positive rate')\n\n plt.subplot(132) \n plt.semilogy(precisionTEST,rate,'.')\n plt.semilogy(precisionTEST[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('positive rate')\n\n plt.subplot(133) \n plt.plot(precisionTEST,recallTEST,'.')\n plt.plot(precisionTEST[Pareto],recallTEST[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('recall on test data')\n plt.show() \n \n return {'cost0':cost0,'cost1':cost1,'cost0test':cost0test,'cost1test':cost1test,'precisionTRAIN':precisionTRAIN,'precisionTEST':precisionTEST,'recallTEST':recallTEST,'rate':rate,'Pareto':Pareto}",
"def test_rand_func2(self):\n for i in range(0, 100000):\n num = random.randint(0, 32535143990)\n func2_comp(num)",
"def run_and_evaluate():\n tsp_problems = read_all_problems()\n # Empty list of metrics\n results = []\n for problem in tqdm.tqdm(tsp_problems):\n # As random factors are involved repeat experiments a couple of times\n best_routes_base = []\n best_routes_af = []\n best_routes_ms = []\n base_times = []\n af_times = []\n ms_times = []\n for i in range(10):\n # Base solution\n start_time = timeit.default_timer()\n best_route_base = solve_tsp_basic(problem)\n base_time = timeit.default_timer() - start_time\n best_routes_base.append(Fitness(route=best_route_base).route_distance())\n base_times.append(base_time)\n\n # AF clustering solution\n start_time = timeit.default_timer()\n best_route_af = solve_tsp_affinity_propagation(problem)\n af_time = timeit.default_timer() - start_time\n best_routes_af.append(Fitness(route=best_route_af).route_distance())\n af_times.append(af_time)\n\n # MS solution\n start_time = timeit.default_timer()\n best_route_ms = solve_mean_shift(problem)\n ms_time = timeit.default_timer() - start_time\n best_routes_ms.append(Fitness(route=best_route_ms).route_distance())\n ms_times.append(ms_time)\n\n results.append(\n {\n \"problem name\": problem.name,\n \"optimal solution\": find_route_optimal_route_length(problem),\n \"baseline tour length\": mean(best_routes_base),\n \"af clustering tour length\": mean(best_routes_af),\n \"ms clustering tour length\": mean(best_routes_ms),\n \"baseline algorithm time\": mean(base_times),\n \"af clustering algorithm time\": mean(af_times),\n \"ms clustering algorithm time\": mean(ms_times),\n }\n )\n # Create dataframe and safe results\n df = pd.DataFrame(results)\n df.to_csv(\"results.csv\", index=False)\n return df",
"def challenge2(self):\n # Create emulator, with 6 registers. Set register 0 to 1\n emulator = Emulator(6)\n emulator.registers[0] = 1\n\n # Running this program seems to take forever... let's see if there's a pattern.\n # OK, so after dumping out lots of instructions, this pattern is repeated a lot:\n # [ 3] mulr 1 5 3\n # [ 4] eqrr 3 2 3\n # [ 5] addr 3 4 4\n # [ 6] addi 4 1 4\n # [ 8] addi 5 1 5\n # [ 9] gtrr 5 2 3\n # [10] addr 4 3 4\n # [11] seti 2 2 4\n # In this program, register 4 is the IP register. (IP is shown in [] above)\n # So instruction 6 is just jumping to instruction 8, we can optimize that out\n # And seti 2 2 4 will act as a jump back to instruction 3 (= 2 + 1)\n # Registers look like this after instruction 3:\n # IP: 4 Reg: [0, 1, 10551376, 145834, 3, 145834]\n # So effectively we have, per loop iteration:\n # reg[3] = reg[1] * reg[5]\n # reg[3] = 1 if reg[3] == reg[2] else 0\n # reg[4] = reg[3] + reg[4] => If the above was false, we go to instruction 6.\n # Otherwise we go to instruction 7 (outside the normal loop flow) and do:\n # addr 1 0 0 => reg[0] = reg[0] + reg[1], then proceed as below.\n # If we stay in the loop, or just fallen through now, we are at instruction 8:\n # reg[5] = reg[5] + reg[1]\n # reg[3] = 1 if reg[5] > reg[2] else 0\n # reg[4] = reg[3] + reg[4] => If the above was false, we go to instruction 11 => back to the start.\n # Otherwise, we jump outside the loop to instruction 12.\n # So to write the above in Python code, with registers named R0 etc:\n # while R5 <= R2:\n # R3 = R1 * R5\n # if R3 == R2:\n # R0 += R1\n # R5 += R1\n #\n # This appears to be a brute force way of checking if R1 is a factor of R2 - and if it is, adding it to R0\n # After this loop, if we go to instruction 12:\n # addi 1 1 1 => R1 += 1\n # gtrr 1 2 3 => R3 = 1 if R1 > R2 else 0\n # addr 3 4 4\n # seti 1 4 4\n # mulr 4 4 4\n # These last three mean \"jump to instruction 2 if R1 <= R2 else terminate (by squaring the IP)\"\n # So putting this together, along with instructions 2 and 3 of:\n # seti 1 8 1 => R1 = 1\n # seti 1 3 5 => R5 = 1\n # We are brute-force finding all the factors of R2 and adding all of them together into R0\n # As R2 contains 10551376, we can do this in a cleverer, non brute force way to find the final value of R0!\n number_to_factorize = 10551376\n \n def factors(n): \n return set(reduce(list.__add__, \n ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))\n\n sum_factors = sum(factors(number_to_factorize))\n print(f\"Final value of register 0: {sum_factors}\")",
"def test_compare(self):\n config = {\n 'num_components': 512,\n 'num_features': 128,\n 'covariance': 'spherical'\n }\n\n samples = self.generate_samples(config, 100_000)\n sklearn_time = np.mean([self.train_sklearn(config, samples) for _ in range(3)])\n ours_cpu_time = np.mean([self.train_ours(config, samples) for _ in range(3)])\n ours_gpu_time = np.mean([\n self.train_ours(config, samples.cuda(), gpu=True) for _ in range(3)\n ])\n\n print(f\"-------------------------------------\")\n print(f\"Speedup of CPU implementation: {sklearn_time / ours_cpu_time:.2f}\")\n print(f\"Speedup of GPU implementation: {sklearn_time / ours_gpu_time:.2f}\")\n print(f\"-------------------------------------\")",
"def test_strategy_evaluate(self, MetricClass, seed):\n m = MetricClass()\n strategy = RandomTrader(seed=seed).run(make_randomwalk(seed=seed))\n result0 = np.array(m.result(strategy)) # from metric method\n result1 = np.array(strategy.evaluate(m)) # from strategy method\n assert np.equal(result0, result1).all()",
"def run_tests(test_count=1000, buyer_count=10):\n\n found_error = False\n\n for i in range(test_count):\n\n bp, sp, bw = get_preferences(buyer_count)\n matches = get_matches(bp, sp, bw)\n\n if not check_stability(bp, sp, matches):\n print('ERROR!!!')\n found_error = True\n\n if not found_error:\n print('Executed {} tests without errors'.format(test_count))",
"def test_custom_works_fine(self):\n\n ba_custom = BatAlgorithm(NP=20, A=0.5, r=0.5, Qmin=0.0, Qmax=2.0, seed=self.seed)\n ba_customc = BatAlgorithm(NP=20, A=0.5, r=0.5, Qmin=0.0, Qmax=2.0, seed=self.seed)\n AlgorithmTestCase.algorithm_run_test(self, ba_custom, ba_customc, MyBenchmark())",
"def test_run_jackknifed_beta_diversity_parallel(self):\r\n\r\n run_jackknifed_beta_diversity(\r\n self.test_data['biom'][0],\r\n self.test_data['tree'][0],\r\n 20,\r\n self.test_out,\r\n call_commands_serially,\r\n self.params,\r\n self.qiime_config,\r\n self.test_data['map'][0],\r\n parallel=True,\r\n status_update_callback=no_status_updates)\r\n\r\n weighted_unifrac_upgma_tree_fp = join(self.test_out,\r\n 'weighted_unifrac',\r\n 'upgma_cmp', 'jackknife_named_nodes.tre')\r\n unweighted_unifrac_upgma_tree_fp = join(\r\n self.test_out, 'unweighted_unifrac', 'upgma_cmp',\r\n 'jackknife_named_nodes.tre')\r\n weighted_unifrac_emperor_index_fp = join(\r\n self.test_out, 'weighted_unifrac', 'emperor_pcoa_plots',\r\n 'index.html')\r\n unweighted_unifrac_emperor_index_fp = join(\r\n self.test_out, 'unweighted_unifrac', 'emperor_pcoa_plots',\r\n 'index.html')\r\n\r\n input_file_basename = splitext(split(self.test_data['biom'][0])[1])[0]\r\n unweighted_unifrac_dm_fp = join(self.test_out,\r\n 'unweighted_unifrac_%s.txt' % input_file_basename)\r\n weighted_unifrac_dm_fp = join(self.test_out,\r\n 'weighted_unifrac_%s.txt' % input_file_basename)\r\n\r\n # check for expected relations between values in the unweighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(unweighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n # check for expected relations between values in the weighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(weighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n\r\n # check that final output files have non-zero size\r\n self.assertTrue(getsize(weighted_unifrac_upgma_tree_fp) > 0)\r\n self.assertTrue(getsize(unweighted_unifrac_upgma_tree_fp) > 0)\r\n self.assertTrue(getsize(weighted_unifrac_emperor_index_fp) > 0)\r\n self.assertTrue(getsize(unweighted_unifrac_emperor_index_fp) > 0)\r\n\r\n # Check that the log file is created and has size > 0\r\n log_fp = glob(join(self.test_out, 'log*.txt'))[0]\r\n self.assertTrue(getsize(log_fp) > 0)",
"def test_v2_runtime(self):\r\n\r\n start_time = time.time()\r\n\r\n for n in range(1, 30000):\r\n prime_numbers_v2(n)\r\n\r\n elapsed_time = round(time.time() - start_time, 3)\r\n\r\n print(f\"v2, time required: {elapsed_time}\")",
"def brute_force_search_solution():\n return len(coin_search(TOTAL, COINS))",
"def test_huge_answers(self):\n self.init_player(\n '0', 'Welcome to Oppia!', 'do you know where the name \\'Oppia\\'')\n self.submit_and_compare(\n '0', '', 'In fact, the word Oppia means \\'learn\\'.')\n # This could potentially cause errors in stats_models when the answer\n # is persisted to the backend.\n self.submit_and_compare(\n 'a' * 1000500, 'Sorry, nope, we didn\\'t get it', '')"
] | [
"0.6661952",
"0.6618818",
"0.64001924",
"0.63427407",
"0.6263836",
"0.6232404",
"0.6223552",
"0.59330684",
"0.59220713",
"0.59127235",
"0.58657014",
"0.58575463",
"0.5836921",
"0.58361584",
"0.58269995",
"0.5803118",
"0.5761818",
"0.573099",
"0.57286465",
"0.57004774",
"0.56819415",
"0.5672517",
"0.56461215",
"0.5611996",
"0.5586655",
"0.55782086",
"0.5568866",
"0.55688524",
"0.5551811",
"0.5546691"
] | 0.6652606 | 1 |
Returns Classroom in good representation for user | def __str__(self):
return 'Classroom {} has a capacity of {} persons and ' \
'has the following equipment: {}.'.format(
self.number, str(self.capacity), ', '.join(self.equipment)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __repr__(self):\n return \"Classroom('{}', {}, {})\".format(self.number, self.capacity,\n str(self.equipment))",
"def __str__(self):\n return self.room_name",
"def __str__(self):\n return self.room.name",
"def json(self):\n return {'name': self.neighbourhood_group, 'neighbourhood': self.room_type}",
"def __repr__(self):\n return f'ResidenciaModel(name={self.neighbourhood_group}, neighbourhood={self.room_type})'",
"def format_room(room):\n new = {}\n new[\"name\"] = \"Room\"\n new[\"info\"] = {}\n for key in room:\n if key != \"containedSpaces\" and key != \"topLevelSpace\" and key != \"parentSpace\" and key != 'description' and key != 'id' and key!= 'type':\n new[\"info\"][key] = room[key]\n return new",
"def get_print_room_data(self, room):\n habitants = self.all_rooms[room]['occupants']\n\n return {'room': room, 'names': habitants}",
"def __str__(self):\n fields = []\n try:\n walls = self._walls\n if walls != 0:\n field = \"walls=\"\n if walls & Direction.North:\n field += \"N\"\n if walls & Direction.South:\n field += \"S\"\n if walls & Direction.East:\n field += \"E\"\n if walls & Direction.West:\n field += \"W\"\n fields.append(field)\n except Exception:\n pass\n try:\n egress = self._egress\n if egress:\n fields.append(\"egress=\" + str(egress))\n except Exception:\n pass\n try:\n distance = self._distance\n if distance:\n fields.append(\"distance=\" + str(distance))\n except Exception:\n pass\n try:\n self._check()\n except Exception as excp:\n fields.append(\"error=\" + repr(excp))\n result = \"Room(\" + \", \".join(fields) + \")\"\n return result",
"def addClassroom(classroomName, capacity,location):\n for classroom in classroomEntities:\n if classroom[\"classroomName\"] == classroomName:\n print(\"Two classrooms can not have same name\")\n return False\n\n if classroomEntities==[]:\n lastSavedIdNumber = \"0\"\n else:\n lastSavedId=classroomEntities[-1][\"classroomId\"] #update classroomId as first element in classroomEntities list\n lastSavedIdNumber=lastSavedId[2:]\n numberOfDigitsInID = 3\n if lastSavedIdNumber == \"9\" * len(lastSavedIdNumber):\n numberOfDigitsInID = len(lastSavedIdNumber) + 1\n classroomId=\"CR\"+str(int(lastSavedIdNumber)+1).rjust(numberOfDigitsInID,\"0\")\n\n # add the new Classroom\n newClassroom = {}\n newClassroom[\"classroomId\"] = classroomId\n newClassroom[\"classroomName\"] = classroomName\n newClassroom[\"capacity\"] = capacity\n newClassroom[\"location\"] = location\n classroomEntities.append(newClassroom)\n print(f\"Class Room is added into the system, Class Room id is {classroomId}.\")\n return True",
"def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'owner' : self.user.name,\n 'room' : self.room.name,\n 'description' : self.description,\n 'price' : self.price,\n }",
"def helpClassroom(classroomId):\n selectedClassroomCopy = getClassroomById(classroomId)\n print(\"Class Id: \" + selectedClassroomCopy[\"classroomId\"])\n print(\"Name: \" + selectedClassroomCopy[\"classroomName\"])\n print(\"Capacity: \" + selectedClassroomCopy[\"capacity\"])\n print(\"Location: \" + selectedClassroomCopy[\"location\"])\n return True",
"def __str__(self):\n return self.nivel_participacao",
"def getClassroomById(classroomId):\n for classroom in classroomEntities:\n if classroom[\"classroomId\"] == classroomId:\n return classroom.copy()\n return None",
"def __str__(self):\n return '{0}: {1} \"{2}\" {3}'.format(\n self.race, self.name, self.nick, self.surname)",
"def us_citizen(self, instance):\r\n return instance.user.profile.us_citizen",
"def retrieve_google_classroom_data(request, sociallogin, **kwargs):\n # updates user's extra data\n user = User.objects.get(pk=request.user.id)\n user.name = sociallogin.account.extra_data['name']\n user.avatar = sociallogin.account.extra_data['picture']\n user.save()\n\n # get the user's authorization token\n token = SocialToken.objects.filter(account__user=request.user, account__provider='google')[0].token\n\n save_courses(user, token)",
"def to_representation(self, instance):\n\n return instance.courier_type",
"def serialize(self):\n return {\n \"grade\": self.grade,\n \"student\": {\n \"id\": self.student_id,\n \"fname\": self.student.fname,\n \"lname\": self.student.lname,\n \"dob\": self.student.dob,\n \"grad_year\": self.student.grad_year,\n \"gpa\": self.student.gpa,\n \"occupation\": self.student.occupation.serialize\n },\n \"course\": {\n \"id\": self.course_id,\n \"course_name\": self.course.course_name,\n \"field\": self.course.field.serialize,\n \"faculty\": self.course.faculty.serialize,\n \"semester\": self.course.semester.serialize\n }\n }",
"def test_user_only_sees_own_classroom_state(self):\n self.client.force_authenticate(self.global_user_1)\n data = self.client.get(self.api_classroom_detail_url, {'user': 'current'}).data\n self.assertTrue(data.get('enrolled'))\n\n not_enrolled_user = get_user_model().objects.annotate(\n classrooms_count=Count('classrooms_states'),\n ).filter(classrooms_count=0)[0]\n self.client.force_authenticate(not_enrolled_user)\n data = self.client.get(self.api_classroom_detail_url, {'user': 'current'}).data\n self.assertFalse(data.get('enrolled'))",
"def to_string(self):\n return \"{base_msg} Courses: {courses}\".format(\n base_msg=super().to_string(),\n courses=self.courses_string()\n )",
"def __str__(self):\n return f\"{self.semester} | {self.school} | {self.position} | {self.class_name}\"",
"def get_joint_gensec(self, obj):\n serializer = UserProfileSerializer(obj.joint_gensec, many=True)\n return serializer.data",
"def __str__(self):\n return self.meeting_type",
"def __str__(self):\n print(\"Welcome to our house\")\n for room in self.rooms:\n print(room.name, room.sqr_ft)",
"def __repr__(self):\n return f\"<Tutor {self.first_name.title()} {self.last_name.title()}>\"",
"def serialize(self):\n return {'professor_id': self.professor_id,\n 'attendance': self.attendance,\n 'clarity_color': self.clarity_color,\n 'easy_color': self.easy_color,\n 'help_color': self.help_color,\n 'help_count': self.help_count,\n 'not_help_count': self.not_help_count,\n 'online_class': self.online_class,\n 'quality': self.quality,\n 'clarity': self.clarity,\n 'course': self.course,\n 'comments': self.comments,\n 'date': self.date,\n 'easy': self.easy,\n 'easy_string': self.easy_string,\n 'helpful': self.helpful,\n 'interest': self.interest,\n 'overall': self.overall,\n 'overall_string': self.overall_string,\n 'status': self.status,\n 'text_book_use': self.text_book_use,\n 'would_take_again': self.would_take_again,\n 'sid': self.sid,\n 'taken_for_credit': self.taken_for_credit,\n 'teacher': self.teacher,\n 'teacher_grade': self.teacher_grade,\n 'teacher_rating_tags': self.teacher_rating_tags,\n 'unuseful_grouping': self.unuseful_grouping,\n 'useful_grouping': self.useful_grouping}",
"def __repr__(self):\r\n\r\n return f\"<User info: id = {self.user_id}, name = {self.fname} {self.lname} email = {self.email}>\"",
"def __str__(self):\n return str(self.user)",
"def __str__(self):\n return str(self.user)",
"def __str__(self):\n return str(self.user)"
] | [
"0.6388747",
"0.58620846",
"0.5832608",
"0.55203956",
"0.5508624",
"0.5427314",
"0.54227597",
"0.5291813",
"0.5283396",
"0.52249587",
"0.5194744",
"0.5179897",
"0.51505965",
"0.51442033",
"0.5131981",
"0.5128399",
"0.51204616",
"0.5106739",
"0.5105534",
"0.5094171",
"0.50742364",
"0.50669646",
"0.5043601",
"0.5028339",
"0.5016841",
"0.50076425",
"0.49862003",
"0.49831268",
"0.49831268",
"0.49831268"
] | 0.5878091 | 1 |
Classroom, Classroom > bool Returns True if first room have bigger capacity then second room | def is_larger(self, room2):
return self.capacity > room2.capacity | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __gt__(self, other: Card) -> bool:\n return not self.__le__(other)",
"def pareto_better(self, other: \"EvalItem\") -> bool:\n return self.size <= other.size and other.result <= self.result",
"def __gt__(self, other):\n return self.weight() > other.weight()",
"def __gt__(self, other):\n if other.num_of_certified_applications > self.num_of_certified_applications:\n return True\n elif other.num_of_certified_applications < self.num_of_certified_applications:\n return False\n elif other.name < self.name:\n return True\n else:\n return False",
"def __gt__(self, other):\n student1 = self.calculate_total()\n student2 = other.calculate_total()\n\n if student1 > student2:\n return True\n else:\n return False",
"def __gt__(self, other):\n return self.abs2phy.__gt__(other)",
"def __gt__(self, other):\n return self.abs2phy.__gt__(other)",
"def __gt__(self, other):\n return self.weight > other.weight",
"def __ge__(self, other: Card) -> bool:\n return not self.__lt__(other)",
"def __gt__(self, other):\n if other.groupnumber > self.groupnumber:\n return True\n else:\n return False",
"def __gt__(self, other):\n if self.head_vertex <= other.head_vertex:\n return False\n elif self.tail_vertex <= other.tail_vertex:\n return False\n elif self.weight <= other.weight:\n return False\n return True",
"def __gt__(self, other):\n if self.head_vertex <= other.head_vertex:\n return False\n elif self.tail_vertex <= other.tail_vertex:\n return False\n elif self.weight <= other.weight:\n return False\n return True",
"def __gt__(self, vs) -> bool:\n return vs <= self",
"def save(self, force_insert=False, force_update=False, using=None,\n\t\t\t update_fields=None):\n\t\tif (self.capacity - self.occupied_sits) < 0:\n\t\t\traise ValueError(\"all sits in this classroom are occupied try other classes\")\n\t\telse:\n\t\t\tsuper(ClassRoom, self).save()",
"def has_vacancy(self):\n return len(self.occupants) < self.capacity",
"def __gt__(self, other):\n return self.estimated_cost > other.estimated_cost",
"def __ge__(self, other):\n return int(self.rank) >= int(other.rank)",
"def is_balanced(self,delta=0.030):\n mincell = self.get_min_cell_voltage()\n maxcell = self.get_max_cell_voltage()\n if abs(maxcell-mincell) > delta:\n return False\n else:\n return True",
"def __gt__(self,other):\n self_bounds = self.Bounds\n ndim = self.InferSpatialDimension()\n\n if isinstance(other,Mesh):\n other_bounds = other.Bounds\n mins = (self_bounds[0,:] < other_bounds[0,:]).all()\n maxs = (self_bounds[1,:] > other_bounds[1,:]).all()\n return mins and maxs\n elif isinstance(other,np.ndarray):\n # Otherwise check if an element is within a given bounds\n assert other.shape == (2,ndim)\n mins = (self_bounds[0,:] < other[0,:]).all()\n maxs = (self_bounds[1,:] > other[1,:]).all()\n return mins and maxs\n else:\n raise ValueError(\"Cannot compare mesh with {}\".format(type(other)))",
"def is_full(self) -> bool:\n\n if self._current_pax + 1 <= self._capacity:\n\n # aircraft currently has reached the maximum capacity of passengers\n return False\n\n return True",
"def __gt__(self, other):\n return int(self.rank) > int(other.rank)",
"def __gt__(self, other):\n if self.i1 > other.i1:\n return True\n elif self.i1 == other.i1:\n if self.i2 > other.i2:\n return True\n elif self.i2 == other.i2 and self.axial > other.axial:\n return True\n return False",
"def __le__(self, other: Card) -> bool:\n return compare_map[self.number] <= compare_map[other.number]",
"def __gt__(self,other):\n if isinstance(other, RegularPoly):\n return(self.vert_count > other.vert_count)\n else:\n raise NotImplementedError('Incorrect data type')",
"def _is_full(self):\n if self.allocated_spaces == self.capacity:\n return True\n elif self.allocated_spaces < self.capacity:\n return False",
"def __gt__(self, other):\n return self.x ** 2 + self.y ** 2 > other.x ** 2 + other.y ** 2",
"def __gt__(self, other):\n return self.eval_score < other.eval_score",
"def __gt__(self, other):\n return self.__ge__(other) and self.__ne__(other)",
"def __gt__(self, transposon):\n return self.score > transposon.score",
"def __gt__(self,other):\r\n\t\tsorted_self = sorted(self.vector, reverse=True) #sort both lists in descending order\r\n\t\tsorted_other = sorted(other, reverse=True) \r\n\t\tcmpflag = False\r\n\t\tfor li1, li2 in zip(sorted_self, sorted_other):\r\n\t\t\tif(li1 > li2):\r\n\t\t\t\tcmpflag = True\r\n\t\treturn cmpflag"
] | [
"0.61225253",
"0.5890521",
"0.5887941",
"0.58851635",
"0.5883985",
"0.5871",
"0.5871",
"0.58323383",
"0.57970667",
"0.57674",
"0.57477105",
"0.57477105",
"0.5667831",
"0.5658871",
"0.56532055",
"0.5647148",
"0.56317246",
"0.56295174",
"0.5624304",
"0.55907315",
"0.5584328",
"0.55796695",
"0.5568212",
"0.5559307",
"0.5554279",
"0.5547663",
"0.55410206",
"0.5536703",
"0.55292314",
"0.55155945"
] | 0.73677325 | 0 |
Classroom, Classroom > list Returns the equipment in first room which is missing in second | def equipment_differences(self, room2):
return sorted(list(set(self.equipment).difference(room2.equipment))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_rooms(self, exclude=[]):\n stmt = Session.query(Lesson.room, Lesson.day, Lesson.order,\n Lesson.schedule_id)\n stmt = stmt.group_by(Lesson.room, Lesson.order, Lesson.day, Lesson.schedule_id)\n stmt = stmt.having(func.count(Lesson.room)>1)\n stmt = stmt.filter(not_(Lesson.room.in_(exclude)))\n stmt = stmt.subquery()\n q = Session.query(Lesson).join((stmt, and_(\n Lesson.room == stmt.c.room,\n Lesson.day == stmt.c.day,\n Lesson.order == stmt.c.order,\n Lesson.schedule_id == stmt.c.schedule_id)))\n q = q.order_by(Lesson.day, Lesson.order, Lesson.room)\n\n conflicts = q.all()\n if len(conflicts) == 0:\n return []\n rooms = [[conflicts.pop(0), conflicts.pop(0)]]\n for c in conflicts:\n prev = rooms[-1][-1]\n if c.room == prev.room and c.day == prev.day and c.order == \\\n prev.order and c.schedule_id == prev.schedule_id:\n rooms[-1].append(c)\n else:\n rooms.append([c])\n return rooms",
"def check_for_unguarded_rooms(museum):\r\n\tempty_rooms = []\r\n\r\n\tfor row_idx in range(len(museum)):\t\r\n\t\t\r\n\t\tfor item_idx in range(len(museum[row_idx])): #Go back and fix this to be enumerate instead\r\n\t\t\t\r\n\t\t\tif museum[row_idx][item_idx] == \"0\":\r\n\t\t\t\tempty_rooms.append([row_idx, item_idx])\r\n\r\n\t# for row_idx, row_value in enumerate(museum):\r\n\t# \tfor item_idx, item_value in enumerate(row):\r\n\t# \t\tif item_value == \" \":\r\n\t# \t\t\tprint(item)\r\n\t# \t\t\tempty_rooms.append([row_idx, item_idx]) # need index\r\n\r\n\tif not empty_rooms:\r\n\t\tprint(\"true\")\r\n\t\r\n\telse:\r\n\t\tprint(\"false\")\r\n\r\n\t\tfor room in empty_rooms:\r\n\t\t\tprint(str(room[0]) + \" \" + str(room[1]))",
"def check_room(rooms):\n free_rooms = []\n booked_rooms = [] \n for element in rooms:\n if element['booked'] == False:\n free_rooms.append(element)\n else:\n booked_rooms.append(element)\n return free_rooms, booked_rooms",
"def captain_room(room_list):\n captain_room = '' \n #store the list in total_rooms\n total_rooms = room_list \n #get the unique rooms without repetition of any room\n unique_rooms_num = set(total_rooms) \n #Remove the unique room from the list of total rooms\n for n in unique_rooms_num:\n total_rooms.remove(n) \n without_captain_room = total_rooms\n #The original total room list does not contain captain room number anymore\n #check by print(total_rooms)\n\n #Now, Compare the unique room number: that contains captain number with\n #list without_captain_room\n for i in unique_rooms_num:\n if i not in without_captain_room: \n captain_room = i\n \n return captain_room",
"def findUnoccupied( board, occupation):\n return [ j for j in xrange(len(board.positions))\n if not occupation.has_key(j) ]",
"def get_room_list():\n room_list = []\n # 0\n room = Room(\n \"Patio. You are in a patio, it is surrounded by four long walls.\\n\\\n The south wall has a metal door.\",\n north = None,\n east = None,\n south = 2,\n west = None\n )\n room_list.append(room)\n # 1\n room = Room(\n \"Bedroom. You are in a bedroom, there is nothing uncommon.\\n\\\n The door lead to east\",\n north = None,\n east = 2,\n south = None,\n west = None\n )\n room_list.append(room)\n # 2\n room = Room(\n \"Kitchen. You are in a kitchen, there is some porridge on the oven.\\n\\\n There are three doors, north, west, south\",\n north = 0,\n east = None,\n south = 4,\n west = 1\n )\n room_list.append(room)\n # 3\n room = Room(\n \"Tv room. You are in the tv room, there is an old tv set, \\n\\\n seems like it hasn't being used for years.\\n\\\n The door lead to east\",\n north = None,\n east = 4,\n south = None,\n west = None\n )\n room_list.append(room)\n # 4\n room = Room(\n \"Dinning room. You are in a dinning room, there is a big round table.\\n\\\n There are three doors, north, west, south\",\n north = 2,\n east = None,\n south = 6,\n west = 3\n )\n room_list.append(room)\n # 5\n room = Room(\n \"Master bedroom. You are in the master bedroom, there are many religious things all over.\\n\\\n The door lead to east\",\n north = None,\n east = 6,\n south = None,\n west = None\n )\n room_list.append(room)\n # 6\n room = Room(\n \"Living room. You are in the living room, you can see the exit door to the south.\\n\\\n There are three doors, north, west, south\",\n north = 4,\n east = None,\n south = 7,\n west = 5\n )\n room_list.append(room)\n return room_list",
"def rooms_opposite(self, room):\n if room in self.adjacencies:\n room_side = self.adjacencies[room][0].self_edge_index\n other_side = 0 if room_side == 2 else 2\n return self.adjacencies.filter(lambda obj, info: isinstance(obj, Room) and info.self_edge_index == other_side)",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n if len(rooms)==0:\n return rooms\n n, m = len(rooms), len(rooms[0])\n for i in range(n):\n for j in range(m):\n if rooms[i][j] == 0:\n q= [(i,j,0)]\n while q:\n ci, cj , d = q.pop(0)\n for ni, nj in [(ci+1,cj),(ci-1,cj),(ci,cj+1),(ci,cj-1)]:\n if 0<=ni<n and 0<=nj<m and rooms[ni][nj]!=-1:\n if rooms[ni][nj]>d+1:\n rooms[ni][nj] = d+1\n q.append((ni,nj,d+1))\n return rooms",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n def get_gates(rooms):\n res = []\n M, N = len(rooms), len(rooms[0])\n for i in range(M):\n for j in range(N):\n if rooms[i][j] == 0:\n res.append((i, j))\n return res\n \n que = [(i, j, 0) for i, j in get_gates(rooms)]\n #seen = set()\n deltas = [[0, 1], [0, -1], [-1, 0], [1, 0]]\n R, C = len(rooms), len(rooms[0])\n INF = 2**31 - 1\n while que:\n i, j, distance = que.pop(0)\n for dx, dy in deltas:\n x, y = i + dx, j + dy\n if 0 <= x < R and 0 <= y < C and rooms[x][y] == INF:\n rooms[x][y] = distance + 1\n que.append((x, y, distance + 1))",
"def find_all_available_rooms(self) -> webelement:\n\n available_rooms = list()\n room_listing_section = self.__get_room_listing_section()\n location_div_list = room_listing_section.find_elements(\n By.XPATH,\n \".//div[contains(@class, 'room_listing_bg')]\"\n )\n for location_div in location_div_list:\n house_name = location_div.find_element(\n By.XPATH,\n \".//div[1]/h2[@class='room_data_headline']\"\n ).text\n room_list_table = location_div.find_element(\n By.XPATH,\n \".//table[contains(@class, 'room_data_table')]\"\n )\n room_tr_list = room_list_table.find_elements(By.TAG_NAME, \"tr\")\n room_tr_list = room_tr_list[1:]\n for room_tr in room_tr_list:\n room_type = room_tr.find_element(By.XPATH, \".//td[1]\").text\n number_of_persons = room_tr.find_element(By.XPATH, \".//td[2]\").text\n free_at = room_tr.find_element(By.XPATH, \".//td[3]\").text\n free_at = re.sub(r'^([^\\s]*)\\s+', r'\\1, ', free_at)\n price_euro = room_tr.find_element(By.XPATH, \".//td[4]\").text\n size_square_meter = room_tr.find_element(By.XPATH, \".//td[5]\").text\n floor = room_tr.find_element(By.XPATH, \".//td[6]\").text\n selection_radios = room_tr.find_elements(By.XPATH, \".//td[7]/input[@type='radio']\")\n if len(selection_radios) > 0:\n radio_value = selection_radios[0].get_attribute(\"value\")\n available_rooms.append(\n RoomInfo(\n house_name,\n room_type,\n number_of_persons,\n free_at,\n price_euro,\n size_square_meter,\n floor,\n radio_value\n )\n )\n\n return available_rooms",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n rows = len(rooms)\n if rows == 0:\n return\n cols = len(rooms[0])\n GATE = 0\n EMPTY = pow(2, 31) - 1\n DIRECTIONS = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n q = deque([])\n for i in range(rows):\n for j in range(cols):\n if rooms[i][j] == GATE:\n q.append((i, j))\n while q:\n x, y = q.popleft()\n for idx, idy in DIRECTIONS:\n if x + idx < 0 or x + idx >= rows or y + idy < 0 or y + idy >= cols or rooms[x + idx][y + idy] != EMPTY:\n continue\n rooms[x + idx][y + idy] = rooms[x][y] + 1\n q.append((x + idx, y + idy))",
"def possible_rooms(self):\r\n return self.rooms",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n offsets = [[0,1],[1,0],[0,-1],[-1,0]]\n size = [len(rooms), len(rooms[0])]\n q = []\n for i in range(len(rooms)):\n for j in range(len(rooms[i])):\n if rooms[i][j] == 0:\n q.append([i,j])\n while len(q) != 0:\n cur = q.pop(0)\n for i in offsets:\n x = cur[0]+i[0]\n y = cur[1]+i[1]\n if x >= size[0] or x<0 or y>=size[1] or y<0 or rooms[x][y] != 2147483647:\n continue\n rooms[x][y] = rooms[cur[0]][cur[1]]+1\n q.append([x,y])",
"def get_equipment_from_inventory(self):\n return [x for x in self.inventory if x.is_equip()]",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n if not rooms or not rooms[0]:\n return \n \n queue = deque([])\n for i in range(len(rooms)):\n for j in range(len(rooms[i])):\n if rooms[i][j] == 0:\n queue.append((i,j, 0))\n \n visited = set()\n while queue:\n i, j, distance = queue.popleft()\n \n if not (0 <= i < len(rooms) and 0 <= j < len(rooms[0])):\n continue\n \n if rooms[i][j] == -1:\n continue\n \n if (i, j) in visited:\n continue\n \n visited.add((i, j))\n if rooms[i][j] != 0:\n rooms[i][j] = distance\n \n for offset in OFFSETS:\n queue.append((i + offset[0], j + offset[1], distance + 1))",
"def find_missing_pos_in_motif_otherclass(motifs, motname_li,dict_class,current_class):\n motifs_def=[]\n n=0\n for motname in motname_li:\n found=[]\n found_ranges=[]\n not_found=[]\n for e in motifs:\n if e[0]==motname:\n if not e[2]:\n not_found.append(e[1])\n else:\n motpos= e[1] + current_class.lower() + \" = \" + e[4] +dict_class.lower()\n found.append(motpos)\n found_ranges.append(e[3])\n \n num_nf=len(not_found)\n ranges_all=\",\".join(found_ranges)\n if num_nf ==0:\n motifs_def.append([motname,\" ; \".join(found),(ranges_all)])\n elif num_nf ==3:\n motifs_def.append([motname,\"Motif not found\",\"None\"])\n else:\n motifs_def.append([motname,(\" ; \".join(found) + \" (\" + \" , \".join(not_found) + \" not found)\"),(ranges_all)])\n n+=1\n return motifs_def",
"def getNotMyCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() != 1):\n r.append(p)\n return r",
"def get_room_names(soup: bs4.BeautifulSoup) -> Iterable[str]:\n return set(x.string for x in soup.Lecture.find_all(\"RaumBez\"))",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n lenm = len(rooms)\n lenn = len(rooms[0])\n \n queue = []\n visited = set()\n \n direction = [(0,1),(1,0),(0,-1),(-1,0)]\n \n for i in range(lenm):\n for j in range(lenn):\n if rooms[i][j] == 0:\n queue.append(((i,j),0))\n visited.add((i,j))\n while queue:\n n = len(queue)\n for _ in range(n):\n (a,b),val = queue.pop(0)\n if rooms[a][b] != -1:\n rooms[a][b] = val\n for x,y in direction:\n i = a+x\n j = b+y\n if i <0 or i>=lenm or j<0 or j>=lenn or rooms[i][j] == -1:\n continue\n if (i,j) not in visited:\n visited.add((i,j))\n queue.append(((i,j),val+1))\n \n \n \n \n return rooms",
"def __repr__(self):\n return \"Classroom('{}', {}, {})\".format(self.number, self.capacity,\n str(self.equipment))",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\r\n if not rooms:\r\n return rooms\r\n \r\n ind_0s = []\r\n len_row = len(rooms)\r\n len_col = len(rooms[0])\r\n for i in range(len_row):\r\n for j in range(len_col):\r\n if rooms[i][j] == 0:\r\n ind_0s.append((i,j))\r\n que = ind_0s\r\n while que:\r\n i,j = que.pop()\r\n neighbors = [(i,j-1), (i-1,j), (i,j+1), (i+1,j)]\r\n if rooms[i][j] == 0:\r\n parent_distance = 0\r\n else:\r\n parent_distance = rooms[i][j]\r\n for x, y in neighbors:\r\n if x>=0 and x<len_row and y>=0 and y<len_col and rooms[x][y]!=-1 and rooms[x][y] and \\\r\n parent_distance+1 < rooms[x][y]:\r\n que.append((x,y))",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n ## bfs:\n queue= []\n for r in range(len(rooms)):\n for c in range(len(rooms[0])):\n if rooms[r][c]==0:\n queue.append((r,c, 0)) \n \n while queue:\n r,c, l= queue.pop(0)\n for new_r, new_c in [(r+1, c), (r-1, c), (r, c+1), (r, c-1)]:\n if new_r>=0 and new_c>=0 and new_r<len(rooms) and \\\n new_c<len(rooms[0]) and rooms[new_r][new_c]==2**31 -1:\n rooms[new_r][new_c]=l+1\n queue.append((new_r, new_c, l+1))\n return rooms",
"def _find_homeless_mps(self):\n mps = Person.objects.filter(\n active=True,\n house__name=HOUSE_OF_COMMONS,\n constituency=None,\n )\n\n self.stdout('MPs with missing constituency:')\n for mp in mps:\n self.stdout(f' [{mp.parliamentdotuk}] {mp.name} has no constituency')",
"def _find_memberless_constituencies(self):\n constituencies = Constituency.objects.filter(\n end=None, # Constituency currently exists/is not historical\n mp=None,\n )\n\n self.stdout('Constituencies with missing MP:')\n for constituency in constituencies:\n self.stdout(f'[{constituency.parliamentdotuk}] {constituency.name} {constituency.start}')",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n # start with the gate and search the empty cells it can reach\n # then we take the smallest one\n if not rooms:\n return\n\n m, n = len(rooms), len(rooms[0])\n \n queue = []\n for i in range(m):\n for j in range(n):\n if rooms[i][j] == 0:\n queue.append((i, j))\n\n\n for x, y in queue:\n dist = rooms[x][y] + 1\n\n for dx, dy in ((-1, 0), (1, 0), (0, 1), (0, -1)):\n new_x, new_y = x+dx, y+dy\n if 0 <= new_x < m and 0 <= new_y < n and rooms[new_x][new_y] == 2147483647:\n rooms[new_x][new_y] = dist\n queue.append((new_x, new_y))",
"def get_partidos_espera_equipos_no_jugando_list(fase):\n\tpartidos_espera_list = get_partidos_espera_list(fase)\n\tpartidos_jugando_list = get_partidos_jugando_list(fase)\n\tequipos_jugando = []\n\tfor partido_jugando in partidos_jugando_list:\n\t\tequipos_jugando.append(partido_jugando.equipo_local)\n\t\tequipos_jugando.append(partido_jugando.equipo_visitante)\n\tpartidos_espera_equipos_no_jugando_list = partidos_espera_list.exclude(equipo_local__in=equipos_jugando) \\\n\t\t.exclude(equipo_visitante__in=equipos_jugando)\n\treturn partidos_espera_equipos_no_jugando_list",
"def get_item_bedrooms(self, soup: BeautifulSoup) -> None:\n try:\n bedrooms = (\n soup.find(\"div\", class_=\"_kqh46o\")\n .find_all(\"span\", class_=\"_3hmsj\")[1]\n .get_text()\n )\n try:\n bedrooms = re.findall(\"[0-9]+\", bedrooms)[0]\n studio = 0\n except IndexError:\n bedrooms = 1\n studio = 1\n except (AttributeError, IndexError):\n studio = None\n bedrooms = None\n self.__collected_dic[\"studio\"].append(studio)\n self.__collected_dic[\"bedrooms\"].append(bedrooms)",
"def required_slots(tracker: Tracker) -> List[Text]:\n print(\"required_slots(tracker: Tracker)\")\n return [\"name\",\"roomcount\",\"roomtype\"]",
"def find_missing_pos_in_motif(motifs, motname_li):\n motifs_def=[]\n n=0\n for motname in motname_li:\n found=[]\n found_ranges=[]\n not_found=[]\n for e in motifs:\n if e[0]==motname:\n if not e[2]:\n not_found.append(e[1])\n else:\n found.append(e[1])\n found_ranges.append(e[3])\n \n num_nf=len(not_found)\n ranges_all=\",\".join(found_ranges)\n if num_nf ==0:\n motifs_def.append([motname,\" , \".join(found),(ranges_all)])\n elif num_nf ==3:\n motifs_def.append([motname,\"Motif not found\",\"None\"])\n else:\n motifs_def.append([motname,(\" , \".join(not_found)+\" not found\"),(ranges_all)])\n n+=1\n return motifs_def",
"def availableSquares(self):\n List2=[]\n for item in self.all:\n if item.retrieve()==\"\":\n List2.append(item.name())\n return List2"
] | [
"0.6358367",
"0.6313788",
"0.6150663",
"0.60647494",
"0.5886441",
"0.5728688",
"0.5633727",
"0.5608109",
"0.55511093",
"0.53285015",
"0.5248454",
"0.523609",
"0.5177248",
"0.5149604",
"0.51473695",
"0.5124947",
"0.5094668",
"0.50736094",
"0.50731784",
"0.50704306",
"0.50624216",
"0.50523317",
"0.50450975",
"0.50313354",
"0.50245416",
"0.501638",
"0.49762192",
"0.4965608",
"0.49499065",
"0.49391097"
] | 0.6854201 | 0 |
Takes a datetime object and returns POSIX UTC in nanoseconds | def date_to_nano(ts):
return calendar.timegm(ts.utctimetuple()) * int(1e3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unix_time_nanos(dt):\n return timedelta_to_micros(dt - epoch)",
"def get_epoch_time(utc_datetime=None):\n if not utc_datetime:\n utc_datetime = datetime.datetime.utcnow()\n return math.ceil((utc_datetime - EPOCH_START).total_seconds())",
"def datetime_to_gpstimestamp_nanoseconds(date):\n timestamp = gpstime.utc_to_gps(calendar.timegm(date.utctimetuple()))\n nanosecond = date.microsecond * 1000\n\n return timestamp, nanosecond",
"def get_utc_timestamp(utc_datetime=None):\n if utc_datetime is None:\n utc_datetime = get_now_utc()\n diff = utc_datetime - TIMESTAMP_0\n return int(diff.total_seconds() * 10**6)",
"def get_utc_timestamp(utc_datetime=None):\n if utc_datetime is None:\n utc_datetime = get_now_utc()\n diff = utc_datetime - TIMESTAMP_0\n return int(diff.total_seconds() * 10**6)",
"def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000",
"def datetime_to_epoch_microseconds(obj: \"datetime\") -> float:\n td = datetime_to_epoch_timedelta(obj)\n return (td.days * 86400 + td.seconds) * 10**6 + td.microseconds",
"def _time_ms(self, dt):\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=pytz.utc)\n return int((dt - self._EPOCH).total_seconds() * 1000)",
"def utc_timestamp(d):\n if isinstance(d, pd.tslib.Timestamp):\n # Pandas datetime timestamp whatever thing\n if d.tz != pytz.utc:\n d = d.tz_localize('UTC')\n else:\n # Normal datetime object\n if d.tzinfo != pytz.utc:\n d = pytz.utc.localize(d)\n # Yes, you have to write it out like this, there is no convenient method.\n # See http://stackoverflow.com/questions/8777753/converting-datetime-date-to-utc-timestamp-in-python\n # Datetimes in python really are a mess...\n return (d - datetime(1970, 1, 1, tzinfo=timezone('UTC'))) / timedelta(seconds=1)",
"def utctime(self) -> datetime:\n return datetime.utcfromtimestamp(float(self.ns_since_epoch) / 1e9)",
"def timestampfromutc(utc):\n return (utc - datetime(1970, 1, 1)).total_seconds()",
"def datetime_to_utc(timestamp):\n\n epoch = datetime.utcfromtimestamp(0)\n delta = timestamp-epoch\n\n return long(delta.total_seconds() * 1000)",
"def utc_millisecond_timestamp():\n return __date_to_millisecond_ts(utc())",
"def isotime(dt):\n return dt.astimezone(utc).strftime(\"%Y-%m-%dT%H:%M:%SZ\")",
"def getutv(self):\n t = datetime.datetime.now()\n utc_seconds = (time.mktime(t.timetuple()))\n utc_seconds = int(utc_seconds * 1000)\n return str(utc_seconds)",
"def utctime(stamp):\n return stamp + utc_offset",
"def utctotimestamp(dt):\n return calendar.timegm(dt.utctimetuple())",
"def unixTimeMs(dateAndTime):\n dateAndTime = dateAndTime + datetime.timedelta(hours=HOUR_ADJUSTMENT)\n return int((dateAndTime - EPOCH).total_seconds() * 1000.0)",
"def datetime_to_timestamp(dt):\n\n epoch = datetime.utcfromtimestamp(0).replace(tzinfo=UTC)\n return (dt - epoch).total_seconds()",
"def utcnow_ts():\r\n return calendar.timegm(utcnow().timetuple())",
"def timestamp(dt):\n\tif dt.tzinfo is None:\n\t\treturn time.mktime((\n\t\t\tdt.year, dt.month, dt.day,\n\t\t\tdt.hour, dt.minute, dt.second,\n\t\t\t-1, -1, -1)) + dt.microsecond / 1e6\n\telse:\n\t\treturn (dt - _EPOCH).total_seconds()",
"def timestamp(self):\n # this only returns second precision, which is why we don't use it\n #now = calendar.timegm(datetime.datetime.utcnow().utctimetuple())\n\n # this returns microsecond precision\n # http://bugs.python.org/msg180110\n epoch = datetime.datetime(1970, 1, 1)\n return (self - epoch).total_seconds()",
"def datetime_to_microseconds(dt):\n seconds = calendar.timegm(dt.utctimetuple())\n return seconds * 1000000 + dt.microsecond",
"def timestamp_from_datetime(date):\n if getattr(date, 'tzinfo', None) is None:\n return (date - datetime.datetime(1970, 1, 1)).total_seconds()\n else:\n return (date - datetime.datetime(\n 1970, 1, 1, tzinfo=pytz.utc)).total_seconds()",
"def to_timestamp(date_time: datetime, unit: TimeUnit = TimeUnit.SECONDS) -> float:\n return date_time.replace(tzinfo=timezone.utc).timestamp() * (1000 ** int(unit))",
"def getUnixTime(pool=\"time.apple.com\"):\n time_offset = ntplib.NTPClient().request(pool).offset\n return float(time.time()+time_offset)",
"def EpochNano():\n return int(time.time() * 1000000000)",
"def datetime_to_timestamp(dt):\n delta = dt - datetime.utcfromtimestamp(0)\n return delta.seconds + delta.days * 24 * 3600",
"def db_datetime_utc():\n t = datetime.datetime.utcnow()\n return time.mktime(t.timetuple())",
"def timestamp():\n return round(datetime.datetime.utcnow().timestamp())"
] | [
"0.7196696",
"0.6794556",
"0.67814964",
"0.6744819",
"0.6744819",
"0.67223793",
"0.6684839",
"0.6624673",
"0.65977186",
"0.6539144",
"0.6514163",
"0.65055406",
"0.64669174",
"0.64562154",
"0.63818485",
"0.6372414",
"0.6349268",
"0.62804013",
"0.62467533",
"0.6239043",
"0.6236769",
"0.6234971",
"0.62253135",
"0.620618",
"0.6202328",
"0.62020224",
"0.6190668",
"0.6166496",
"0.6133824",
"0.6131081"
] | 0.6813911 | 1 |
crop a square from a random location in image | def crop_square(image, size):
width, height = image.size
top = random.randint(0, max(0, height-size))
left = random.randint(0, max(0, width-size))
bottom = min(top + size, height)
right = min(left + size, width)
return image.crop((left, top, right, bottom)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __randomCrop(self, img):\n limit = self.PROCESSING_DIM - self.INPUT_DIM\n # pick 2 random integers less than this limit as the origin of the cropped image\n x_start = np.random.randint(limit)\n y_start = np.random.randint(limit)\n return img.crop((x_start, y_start, x_start + self.INPUT_DIM, y_start + self.INPUT_DIM))",
"def img_agu_crop(img_):\n\tscale_ = 5\n\txmin = max(0, random.randint(0, scale_))\n\tymin = max(0, random.randint(0, scale_))\n\txmax = min(img_.shape[1]-1, img_.shape[1]-random.randint(0, scale_))\n\tymax = min(img_.shape[0]-1, img_.shape[0]-random.randint(0, scale_))\n\treturn img_[ymin : ymax, xmin : xmax , : ]",
"def crop_img(img, random_tab):\n dy, dx = (i / 6 for i in img.shape[:2])\n x1 = int(random_tab[0] * dx)\n x2 = int((random_tab[1] + 5) * dx)\n y1 = int(random_tab[2] * dy)\n y2 = int((random_tab[1] + 5) * dy)\n img = img[y1:y2, x1:x2]\n return img",
"def random_crop(image, ratio = 0.75):\n reshape_size = image.shape[0]\n width = int(reshape_size * ratio)\n height = int(reshape_size * ratio)\n x = random.randint(0, reshape_size - width)\n y = random.randint(0, reshape_size - height)\n image = image[y:y+height, x:x+width, :] \n return image",
"def square_image(img):\r\n x,y = img.size\r\n while y > x:\r\n #slice 10px at a time until square\r\n slice_height = min(y - x, 10)\r\n\r\n bottom = img.crop((0, y - slice_height, x, y))\r\n top = img.crop((0, 0, x, slice_height))\r\n\r\n #remove the slice with the least entropy\r\n if image_entropy(bottom) < image_entropy(top):\r\n img = img.crop((0, 0, x, y - slice_height))\r\n else:\r\n img = img.crop((0, slice_height, x, y))\r\n\r\n x,y = img.size\r\n\r\n return img",
"def random_crop(img, target_shape):\n rest = imgproc._get_crop2d_rest(img, target_shape)\n start = _rand_2dshape(rest)\n return imgproc._crop2d(img, start, target_shape)",
"def __crop(img, pos, size):\n ow, oh = img.size\n x1, y1 = pos\n tw = th = size\n if (ow > tw or oh > th):\n return img.crop((x1, y1, x1 + tw, y1 + th))\n return img",
"def crop_random(crop_size_x, crop_size_y, image, corrupted_im=None):\r\n h, w = image.shape\r\n limit_x, limit_y = h - crop_size_x, w - crop_size_y\r\n start_x = random.randint(0, limit_x)\r\n start_y = random.randint(0, limit_y)\r\n cropped_im = image[start_x: start_x + crop_size_x, start_y: start_y + crop_size_y]\r\n if corrupted_im is not None:\r\n corrupted_im = corrupted_im[start_x: start_x + crop_size_x, start_y: start_y + crop_size_y]\r\n return cropped_im, corrupted_im",
"def crop_random(X,Y,random_crop=False,size_crop=_size_crop):\n b = size_crop//2\n shape = tf.shape(X)\n if random_crop: \n cx = tf.random.uniform(shape=(1,),minval=b,maxval=(shape[0]-b),dtype=tf.int32)[0]\n cy = tf.random.uniform(shape=(1,),minval=b,maxval=(shape[1]-b),dtype=tf.int32)[0]\n return X[cx-b:cx+b,cy-b:cy+b,...], Y[cx-b:cx+b,cy-b:cy+b,...]\n else: \n return crop(X,size_crop=size_crop),crop(Y,size_crop=size_crop)",
"def crop_to_square(self, image):\n orig_height, orig_width, orig_channels = image.shape\n if orig_height > orig_width:\n return image[:orig_width, ...]\n elif orig_height < orig_width:\n return image[:, :orig_height, ...]\n return image",
"def random_crop(image, gt, crop_height, crop_width, random_state=None):\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n height, width = image.shape[:2]\n\n y = random_state.randint(0, height - crop_height)\n x = random_state.randint(0, width - crop_width)\n\n cropped_image = image[y:y + crop_height, x:x + crop_width, :]\n cropped_gt = gt[y:y + crop_height, x:x + crop_height]\n\n cropped_image = cv2.resize(cropped_image, (width, height), interpolation=cv2.INTER_NEAREST)\n cropped_gt = cv2.resize(cropped_gt, (width, height), interpolation=cv2.INTER_NEAREST)\n\n return cropped_image, cropped_gt",
"def random_crop(self, img, output_img_h = 0.5, output_img_w = 0.5, p = 0.5):\n if self.decision(p):\n height, width, channels = img.shape\n new_height = random.randint(int(height * output_img_h), height)\n new_width = random.randint(int(width * output_img_w), width)\n y = random.randint(0, height - new_height)\n x = random.randint(0, width - new_width)\n roi = img[y:y + new_height, x:x + new_width]\n # check if cut is ahve to much dark pixels, more then 20 %\n non_zeros = np.count_nonzero(roi)\n non_zeros_procent = non_zeros / roi.size\n if non_zeros_procent < 0.8:\n pass\n else:\n img = roi\n return img",
"def crop_image(image):\r\n return image[40:-20, :]",
"def random_crop(image, steering = 0.0, tx_lower = -20, tx_upper = 20, ty_lower = -2, ty_upper = 2, rand = True):\n\n shape = image.shape\n (col_start, col_end) = (abs(tx_lower), shape[1] - tx_upper)\n horizon = 60\n bonnet = 136\n if rand:\n tx = np.random.randint(tx_lower, tx_upper + 1)\n ty = np.random.randint(ty_lower, ty_upper + 1)\n else:\n (tx, ty) = (0, 0)\n\n crop = image[horizon + ty: bonnet + ty, col_start + tx: col_end + tx, :]\n image = cv2.resize(crop, (320, 160), cv2.INTER_AREA)\n # the steering variable needs to be updated to counteract the shift \n if tx_lower != tx_upper:\n dsteering = -tx / (tx_upper - tx_lower) / 3.0\n else:\n dsteering = 0\n steering += dsteering\n\n return image, steering",
"def doCrop(image, x, y, w, h):\n\tcrop_height = int((config.FACE_HEIGHT / float(config.FACE_WIDTH)) * w)\n\tmidy = y + h/2\n\ty1 = max(0, midy-crop_height/2)\n\ty2 = min(image.shape[0]-1, midy+crop_height/2)\n\treturn image[y1:y2, x:x+w]",
"def crop(img, size, point=(0, 0)):\n y, x = point\n w, h = size\n hf, wf, _ = img.shape\n\n if not isinstance(x, int):\n y = min(int(wf * y), wf)\n x = min(int(hf * x), hf)\n\n if not isinstance(w, int):\n w = int(wf * w)\n h = int(hf * h)\n\n x2 = min(x + h, hf) - 1\n y2 = min(y + w, wf) - 1\n log.debug(\"w = %d, x2=%d, %s\" % (w, x2, img.shape))\n img2 = img[x:x2, y:y2, :].copy()\n return img2",
"def test_random_crop(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = RandomCrop(size=(64, 64))\n _image, _label = transform(image, label)\n assert _image.shape == (64, 64, image.shape[2])\n assert _label.shape == (64, 64, label.shape[2])\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = RandomCrop(size=(64, 64, 8))\n _image, _label = transform(image, label)\n assert _image.shape == (64, 64, 8, image.shape[3])\n assert _label.shape == (64, 64, 8, label.shape[3])",
"def crop_img(image, bound):\n scale = 1.01 # 1%\n return image.crop((bound.vertices[0].x // scale, bound.vertices[0].y // scale,\n int(bound.vertices[2].x * scale), int(bound.vertices[2].y) * scale))",
"def crop(img, x, y, w, h):\n check_type(img)\n return img.crop((x, y, x + w, y + h))",
"def crop_image(image):\n delta = .05\n rand_top_ratio = random.uniform(default_top_ratio - delta,\n default_top_ratio + delta)\n rand_bot_ratio = random.uniform(default_bot_tatio - delta,\n default_bot_tatio + delta)\n image = preprocess(image, top_ratio=rand_top_ratio, bot_ratio=rand_bot_ratio)\n\n return image",
"def random_crop_params(self, img, output_size):\n w, h = img.size\n th, tw = output_size\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = random.randint(0, h - th)\n j = random.randint(0, w - tw) \n return i, j, th, tw",
"def crop_to_square(image):\n\n if image is None:\n return None\n w, h = (image.shape[1], image.shape[0])\n w = float(w)\n h = float(h)\n\n # only crop images automatically if the aspect ratio is not bigger than 2 or not smaller than 0.5\n aspectRatio = w / h\n if aspectRatio > 3 or aspectRatio < 0.3:\n return None\n if aspectRatio == 1.0:\n return image\n \n # the shortest edge is the edge of our new square. b is the other edge\n a = min(w, h)\n b = max(w, h)\n\n # get cropping position\n x = (b - a) / 2.0\n\n # depending which side is longer we have to adjust the points\n # Heigth is longer\n if h > w:\n upperLeft = (0, x) \n else:\n upperLeft = (x, 0)\n cropW = cropH = a \n return crop_image(image, upperLeft[0], upperLeft[1], cropW, cropH)",
"def sample_crop_box(self, img_size, results):\n\n assert isinstance(img_size, tuple)\n h, w = img_size[:2]\n\n key_masks = results[self.instance_key].masks\n x_valid_array = np.ones(w, dtype=np.int32)\n y_valid_array = np.ones(h, dtype=np.int32)\n\n selected_mask = key_masks[np.random.randint(0, len(key_masks))]\n selected_mask = selected_mask[0].reshape((-1, 2)).astype(np.int32)\n max_x_start = max(np.min(selected_mask[:, 0]) - 2, 0)\n min_x_end = min(np.max(selected_mask[:, 0]) + 3, w - 1)\n max_y_start = max(np.min(selected_mask[:, 1]) - 2, 0)\n min_y_end = min(np.max(selected_mask[:, 1]) + 3, h - 1)\n\n for key in results.get('mask_fields', []):\n if len(results[key].masks) == 0:\n continue\n masks = results[key].masks\n for mask in masks:\n assert len(mask) == 1\n mask = mask[0].reshape((-1, 2)).astype(np.int32)\n clip_x = np.clip(mask[:, 0], 0, w - 1)\n clip_y = np.clip(mask[:, 1], 0, h - 1)\n min_x, max_x = np.min(clip_x), np.max(clip_x)\n min_y, max_y = np.min(clip_y), np.max(clip_y)\n\n x_valid_array[min_x - 2:max_x + 3] = 0\n y_valid_array[min_y - 2:max_y + 3] = 0\n\n min_w = int(w * self.min_side_ratio)\n min_h = int(h * self.min_side_ratio)\n\n x1, x2 = self.sample_valid_start_end(x_valid_array, min_w, max_x_start,\n min_x_end)\n y1, y2 = self.sample_valid_start_end(y_valid_array, min_h, max_y_start,\n min_y_end)\n\n return np.array([x1, y1, x2, y2])",
"def crop(img, i, j, h, w):\n return img.crop((j, i, j + w, i + h))",
"def crop(img, i, j, h, w):\n return img.crop((j, i, j + w, i + h))",
"def random_crop(img, mask):\n if str(img.dtype) != 'uint8':\n img = (img * 255).astype(np.uint8)\n if str(mask.dtype) != 'uint8':\n mask = (mask * 255).astype(np.uint8)\n img = Image.fromarray(img)\n mask = Image.fromarray(mask)\n x, y = img.size\n matrix = 256\n img_list = []\n label_list = []\n for i in range(CROP_NUM):\n x1 = randrange(0, x - matrix)\n y1 = randrange(0, y - matrix)\n img_list.append(img.crop((x1, y1, x1 + matrix, y1 + matrix)))\n label_list.append(mask.crop((x1, y1, x1 + matrix, y1 + matrix)))\n\n return img_list, label_list",
"def crop(image, dimX, dimY):\n # TODO\n return image",
"def _generate_crop(self):\n if self.box_drawn == True:\n if (self.cd_pic_num != -1) & (self.cd_crop_num == 1):\n self.communicator.generate_crop(picture_num=self.cd_pic_num, \\\n xa=self.xa, ya=self.ya, xb=self.xb, yb=self.yb)\n else:\n print \"ERROR: can only generate a new crop from a thumbnail\"\n else:\n print \"ERROR: please select an area to generate a crop from\"",
"def crop_image(input_image, output_image, start_x, start_y, width, height):\n box = (start_x, start_y, start_x + width, start_y + height)\n output_img = img.crop(box)\n output_img.save(output_image +\".png\")",
"def test_crop(self):\r\n u = Uploader()\r\n size = (100, 100)\r\n im = Image.new('RGB', size)\r\n folder = tempfile.mkdtemp()\r\n u.upload_folder = folder\r\n im.save(os.path.join(folder, 'image.png'))\r\n coordinates = (0, 0, 50, 50)\r\n file = FileStorage(filename=os.path.join(folder, 'image.png'))\r\n with patch('pybossa.uploader.Image', return_value=True):\r\n err_msg = \"It should crop the image\"\r\n assert u.crop(file, coordinates) is True, err_msg\r\n\r\n with patch('pybossa.uploader.Image.open', side_effect=IOError):\r\n err_msg = \"It should return false\"\r\n assert u.crop(file, coordinates) is False, err_msg"
] | [
"0.7516474",
"0.7385669",
"0.7284761",
"0.72695327",
"0.7268751",
"0.7198979",
"0.71195275",
"0.709904",
"0.70559186",
"0.69418275",
"0.6898815",
"0.6874871",
"0.68389267",
"0.6755586",
"0.67538106",
"0.6685173",
"0.66399777",
"0.66174954",
"0.65900636",
"0.65848845",
"0.6583423",
"0.65732896",
"0.65438795",
"0.65225226",
"0.65225226",
"0.65165293",
"0.64842105",
"0.64750713",
"0.64726853",
"0.6467434"
] | 0.82719237 | 0 |
Generates an SGF file with the game provided within the temp directory | def _get_input_filepath(self, game_id: int) -> str:
with self._db_connection as connection:
with connection.cursor() as cursor:
cursor.execute('SELECT sgf_content FROM games WHERE id=%s', (game_id,))
if cursor.rowcount == 0:
raise GameNotFoundError()
sgf_content, = cursor.fetchone()
file_descriptor, filepath = tempfile.mkstemp('.sgf')
with open(file_descriptor, 'wb') as file:
file.write(sgf_content)
return filepath | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def instantiate_for_spirv_args(self, testcase):\n shader, self.filename = tempfile.mkstemp(\n dir=testcase.directory, suffix=self.suffix)\n shader_object = os.fdopen(shader, 'w')\n shader_object.write(self.source)\n shader_object.close()\n return self.filename",
"def main():\n\n args = parseArgs()\n\n path = args.path\n is_open_gl = args.g\n\n success, failure = genFiles(path, is_open_gl)\n\n print(\"Success: \", \", \".join(success))\n print(\"Failure: \", \", \".join(failure))\n\n ratio = len(success) / (len(success) + len(failure))\n\n print(\"%% success = %.2f\" % (100 * ratio))",
"def gen_fps():\n global data_src ,output_dir \n logger = TaskFileLogger(\"GenFP\")\n\n h_vars = load_hydro_var()\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n for h_type,var_d in h_vars.items():\n print \"considering %s\" %h_type\n\n t_output_dir = os.path.join(output_dir,h_type)\n if not os.path.exists(t_output_dir):\n print \"creating path %s\" %t_output_dir\n os.mkdir(t_output_dir)\n logger.log(\"%s started\" %(h_type))\n\n for fname in glob.glob(data_src):\n complex_id = os.path.basename(fname).split('.')[0] \n fp_path = os.path.join(t_output_dir,complex_id + \".fp\" )\n if os.path.exists(fp_path):\n #print \"%s processed\" %complex_id\n continue\n print \"processing %s,fp saved as %s\" %(fname , fp_path )\n c = Complex(fname,hydro_dict = var_d)\n c.get_fp()\n c.write_fp_to_file(fp_path)\n\n logger.log(\"%s finished\" %(h_type))",
"def run(self):\n generated_gif = self.generate()\n with open(self.out_filename, 'wb') as out_fd:\n out_fd.write(generated_gif)",
"def __init__(self, filename):\r\n self.__output__ = open(format(filename, '08X') + '.gen', 'wb')",
"def newfile(self) :\n\n\t\tfrom tempfile import mkstemp\n\t\timport os\n\t\tglobal configurer\n\n\t\tfd,name = mkstemp(suffix='.blend')\n\t\tos.close(fd)\n\t\tself.name = name\n\t\tfd = open(name,'wb', configurer.get('ServerBufferSize'))\n\t\tself.fd = fd\n\t\tprint name\n\t\treturn 1",
"def generate_surface_temp(self):\n tcl_name = output_folder + \"/surface_output_\" + str(self.input_pdb_path).split(\"/\")[-1][0:-4] + \"_\" + str(self.current_chain) + \".tcl\"\n opened_file = open(tcl_name, \"w\")\n writeable_string = surface(self.input_pdb_path).surface_template(chain = str(self.current_chain))\n opened_file.write(writeable_string)",
"def makeSpkSetupFile(leapSecondFilePath, outputPath):\n\n # If the file already exists, delete it and rewrite it.\n if os.path.exists(outputPath):\n os.remove(outputPath)\n\n# print 'Generating LRONAC compatible .pvl file ' + halfResFilePath\n f = open(outputPath, 'w')\n f.write(\"\\\\begindata\\n\")\n f.write(\"INPUT_DATA_TYPE = 'STATES'\\n\")\n f.write(\"OUTPUT_SPK_TYPE = 13\\n\")\n f.write(\"OBJECT_ID = -85\\n\") # LRO\n f.write(\"CENTER_ID = 301\\n\") # Moon\n f.write(\"REF_FRAME_NAME = 'J2000'\\n\")\n f.write(\"PRODUCER_ID = 'Lronac Pipeline'\\n\")\n f.write(\"DATA_ORDER = 'epoch x y z vx vy vz'\\n\")\n f.write(\"DATA_DELIMITER = ','\\n\")\n f.write(\"LEAPSECONDS_FILE = '\" + leapSecondFilePath + \"'\\n\")\n f.write(\"LINES_PER_RECORD = 1\\n\")\n f.write(\"TIME_WRAPPER = '# ETSECONDS'\\n\")\n #f.write(\"EPOCH_STR_LENGTH = 16\\n\")\n f.write(\"INPUT_DATA_UNITS = ('ANGLES=DEGREES' 'DISTANCES=km')\\n\")\n f.write(\"POLYNOM_DEGREE = 11\\n\")\n f.write(\"SEGMENT_ID = 'SPK_STATES_13'\\n\")\n# f.write(\"INPUT_DATA_FILE = 'spkDataFile.txt'\")\n# f.write(\"OUTPUT_SPK_FILE = '/home/smcmich1/testSpkFile.bsp'\")\n f.write(\"\\\\begintext\\n\")\n f.close()",
"def generate():",
"def test_save_and_load_generation(logger):\n\n generations = 2\n\n options = {}\n options[\"population_size\"] = 10000\n options[\"in-trees\"] = 0\n options[\"out-trees\"] = 1\n options[\"in-actions\"] = 0\n options[\"out-actions\"] = 3\n options[\"library\"] = False\n options[\"seed\"] = None\n\n for generation_index in range(generations):\n population = []\n population_str = ''\n\n # Generate random strategies to initialize the population\n for i in range(options[\"population_size\"]):\n p = evolve.generate_strategy(logger, options[\"in-trees\"], options[\"out-trees\"], options[\"in-actions\"],\n options[\"out-actions\"],\n options[\"seed\"], environment_id=None)\n actions.utils.parse(str(p), logger)\n population.append(p)\n if i == options[\"population_size\"] - 1:\n population_str += str(p)\n else:\n population_str += str(p) + \"\\n\"\n\n # Write the generation file\n filename = os.path.join(test_files_directory, \"generation\" + str(generation_index))\n evolve.write_generation(filename, population)\n\n check_one_file(logger, options, filename, population)",
"def render_saved_game(saved_game, output_dir, prefix=''):\n if prefix:\n output_dir = os.path.join(output_dir, prefix + '_' + saved_game['id'])\n else:\n output_dir = os.path.join(output_dir, saved_game['id'])\n nb_phases = len(saved_game['phases'])\n svg_count = 0\n\n # Checking if already generated\n # Otherwise, regenerating completely\n if os.path.exists(output_dir):\n nb_svg = len([os.path.join(output_dir, file) for file in os.listdir(output_dir) if file[-4:] == '.svg'])\n if nb_svg == 2 * nb_phases:\n print('Rendered {} (Skipped)'.format(saved_game['id']))\n return\n shutil.rmtree(output_dir, ignore_errors=True)\n os.makedirs(output_dir, exist_ok=True)\n\n # Creating a Game to replay all orders, and a new Game object per phase to validate\n entire_game = Game()\n if saved_game['phases']:\n entire_game.set_state(saved_game['phases'][0]['state'])\n\n # Rendering\n for phase in saved_game['phases']:\n phase_game = Game()\n\n # Setting state\n state = phase['state']\n phase_game.set_state(state)\n entire_game.note = phase_game.note\n\n # Setting orders\n phase_game.clear_orders()\n orders = phase['orders']\n for power_name in orders:\n phase_game.set_orders(power_name, orders[power_name])\n entire_game.set_orders(power_name, orders[power_name])\n\n # Validating that we are at the same place\n for power_name in orders:\n assert sorted(phase_game.get_units(power_name)) == sorted(entire_game.get_units(power_name))\n assert sorted(phase_game.get_centers(power_name)) == sorted(entire_game.get_centers(power_name))\n\n # Rendering with and without orders\n with open(os.path.join(output_dir, '%03d%s' % (svg_count, '.svg')), 'w') as file:\n file.write(entire_game.render(incl_orders=False))\n svg_count += 1\n with open(os.path.join(output_dir, '%03d%s' % (svg_count, '.svg')), 'w') as file:\n file.write(entire_game.render(incl_orders=True))\n\n # Processing (for entire game)\n svg_count += 1\n entire_game.process()\n\n print('Rendered {}'.format(saved_game['id']))",
"def main(args):\n args = parse_args(args)\n setup_logging(args.loglevel)\n _logger.debug(\"Generating NUFEB simulation files\")\n\n # create nutrients\n light = Nutrient(1e-1, None, None, \"g\", \"nn\")\n co2 = Nutrient(float(args.co2), 1.9e-09, 44.01, \"l\", \"nn\")\n o2 = Nutrient(0.28125, 2.30e-9, 32, \"l\", \"nn\")\n sucrose = Nutrient(float(args.sucrose), 5.2e-10, 342.3, \"l\", \"nn\")\n gco2 = Nutrient(0, None, 44.01, \"g\", \"nn\")\n TEMPLATES_DIR = (Path(__file__).parent) / \"templates\"\n\n captureRate = round(1000 / args.timestep)\n # define dump parameters\n dump_list = {\n \"vtk_dump\": f\"dump atom_vtk all vtk {captureRate} dump*.vtu id type diameter vx vy vz fx fy fz \\n dump grid_vtk all grid/vtk {captureRate} dump_%_*.vti con\",\n \"image_dump\": f\"dump du_image all image {captureRate} image.*.jpg type diameter zoom 2 bacillus type size 1280 720 view 45 60 \\n dump_modify du_image acolor 1 green acolor 2 red\",\n \"movie_dump\": f\"dump du_mov all movie {captureRate} movie.avi type diameter zoom 1.5 bacillus type size 1280 720 view 0 0 \\n dump_modify du_mov acolor 1 green acolor 2 red\",\n \"hdf_dump\": f\"dump du_h5 all nufeb/hdf5 {captureRate} dump.h5 id type x y z vx vy vz fx fy fz radius conc reac\",\n }\n\n dumps = defaultdict(list)\n for i in range(4):\n tmp = [\"vtk_dump\", \"image_dump\", \"movie_dump\", \"hdf_dump\"]\n dumps[tmp[i]]\n\n for dump, dump_var in zip(\n [args.vtk, args.img, args.movie, args.hdf],\n [\"vtk_dump\", \"image_dump\", \"movie_dump\", \"hdf_dump\"],\n ):\n if dump is True or dump == \"True\":\n dumps[dump_var] = dump_list[dump_var]\n else:\n dumps[dump_var] = \"\"\n\n ## Species-specific parameters\n\n # check for runs folder\n if not os.path.isdir(\"runs\"):\n os.mkdir(\"runs\")\n x = float(args.dims.split(\",\")[0])\n y = float(args.dims.split(\",\")[1])\n z = float(args.dims.split(\",\")[2])\n for n in range(1, int(args.num) + 1):\n culture = Culture(args)\n atoms_list = []\n bacilli_list = []\n # Create list of atoms and bacilli for atom definition file\n for cell in culture.cells:\n atoms_list.append(cell.Atom())\n bacilli_list.append(cell.Bacillus())\n # make atom definition file\n for r in range(1, int(args.reps) + 1):\n L = [\n \" NUFEB Simulation\\r\\n\\n\",\n f\" {args.cells_init} atoms \\n\",\n f\" {len(culture.cell_types)} atom types \\n\",\n f\" {args.cells_init} bacilli \\n\\n\",\n f\" 0.0e-4 {x :.2e} xlo xhi \\n\",\n f\" 0.0e-4 {y :.2e} ylo yhi \\n\",\n f\" 0.0e-4 {z :.2e} zlo zhi \\n\\n\",\n \" Atoms \\n\\n\",\n ]\n atoms = L + atoms_list\n atoms.append(\"\\n\")\n atoms.append(\" Bacilli \\n\\n\")\n atoms = atoms + bacilli_list\n # write atom definition file\n f = open(\n f\"runs/atom_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}_{r}.in\",\n \"w+\",\n )\n f.writelines(atoms)\n RUN_DIR = (\n Path(\"runs\")\n / f\"Run_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}_{args.reps}\"\n )\n if not os.path.isdir(RUN_DIR):\n os.mkdir(RUN_DIR)\n # os.mkdir(f'runs/Run_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}_{args.reps}')\n # write initial conditions json file\n dumpfile = open(RUN_DIR / \"metadata.json\", \"w\")\n # dumpfile = open(f\"/runs/Run_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}_{args.reps}/metadata.json\",'w')\n json.dump(CellInfo, dumpfile, indent=6)\n dumpfile.close()\n ###\n\n # write Inputscript\n # open the file\n filein = open(TEMPLATES_DIR / \"bacillus.txt\")\n # filein = resources.read_text(\"nufeb_tools.templates\", \"Bacillus.txt\")\n # read it\n src = Template(filein.read())\n # do the substitution\n result = src.safe_substitute(\n {\n \"n\": args.cells_init,\n \"SucRatio\": culture.SucRatio,\n \"SucPct\": culture.SucPct,\n \"n_cyanos\": culture.n_cyanos,\n \"n_ecw\": culture.n_ecw,\n \"Replicates\": args.reps,\n \"Timesteps\": args.ntimesteps,\n \"ts\": args.timestep,\n \"CYANOGroup\": culture.cyGroup,\n \"ECWGroup\": culture.ecwGroup,\n \"Zheight\": float(args.dims.split(\",\")[2]),\n \"CYANODiv\": culture.cyDiv,\n \"ECWDiv\": culture.ecwDiv,\n \"light\": light.concentration,\n \"co2\": co2.concentration,\n \"o2\": o2.concentration,\n \"sucrose\": sucrose.concentration,\n \"gco2\": gco2.concentration,\n \"CYANOMonod\": culture.cyMonod,\n \"ECWMonod\": culture.ecwMonod,\n \"CYANOcount\": culture.cyanoCount,\n \"ECWcount\": culture.ecwCount,\n \"v_ncyano\": culture.vcyano,\n \"v_necw\": culture.vecw,\n \"vtk_dump\": dumps[\"vtk_dump\"],\n \"image_dump\": dumps[\"image_dump\"],\n \"movie_dump\": dumps[\"movie_dump\"],\n \"hdf_dump\": dumps[\"hdf_dump\"],\n }\n )\n f = open(\n f\"./runs/Inputscript_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}.lmp\",\n \"w+\",\n )\n f.writelines(result)\n\n # write local run script\n # open the file\n filein = open(TEMPLATES_DIR / \"local.txt\")\n # filein = resources.read_text(\"nufeb_tools.templates\", \"local.txt\")\n # read it\n src = Template(filein.read())\n # do the substitution\n result = src.safe_substitute(\n {\n \"n\": n,\n \"SucRatio\": culture.SucRatio,\n \"SucPct\": culture.SucPct,\n \"n_cyanos\": culture.n_cyanos,\n \"n_ecw\": culture.n_ecw,\n \"Reps\": args.reps,\n }\n )\n f = open(\n f\"./runs/local_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}.sh\", \"w+\"\n )\n f.writelines(result)\n # write slurm script\n # open the file\n filein = open(TEMPLATES_DIR / \"slurm_dev.txt\")\n # filein = resources.read_text(\"nufeb_tools.templates\", \"Slurm.txt\")\n # read it\n src = Template(filein.read())\n # do the substitution\n result = src.safe_substitute(\n {\n \"n\": args.cells_init,\n \"job\": f\"NUFEB_cyano{n}\",\n \"USER\": args.user,\n \"Replicates\": args.reps,\n \"SucPct\": culture.SucPct,\n \"n_cyanos\": culture.n_cyanos,\n \"n_ecw\": culture.n_ecw,\n }\n )\n _logger.info(\"Script ends here\")",
"def _generate_output_file(self):\n\n if self.output_static:\n return\n\n if not self.input_file_generated():\n self.output_generation_log = \"Generation failed. Input wasn't generated\"\n self.output_generation_successful = False\n else:\n solution = self.solution\n if solution is None:\n self.output_generation_log = \"Generation failed. No model solution specified.\"\n self.output_generation_successful = False\n else:\n problem_code = self.problem.get_judge_code()\n testcase_code = self.get_judge_code()\n judge = self.problem.get_judge()\n task_type = self.problem.get_task_type()\n if solution.language not in judge.get_supported_languages():\n self.output_generation_log = \\\n \"Generation failed. Solution language is not supported by the judge\"\n self.output_generation_successful = False\n else:\n evaluation_result = task_type.generate_output(\n problem_code=problem_code,\n testcase_code=testcase_code,\n language=solution.language,\n solution_file=(solution.name, solution.code),\n )\n if not evaluation_result.success:\n self.output_generation_log = \\\n \"Generation failed. Judge couldn't execute the solution. Details: {}\".format(\n evaluation_result.message\n )\n self.output_generation_successful = False\n elif evaluation_result.verdict != JudgeVerdict.ok:\n self.output_generation_log = \\\n \"Generation failed. Solution exited with verdict {} on the judge\".format(\n str(evaluation_result.verdict.name)\n )\n self.output_generation_successful = False\n else:\n self.output_generation_log = \"Generation successful\"\n self.output_generation_successful = True\n self._output_generated_file = evaluation_result.output_file\n self.save()",
"def export_to_gsas():\n # Get workflow\n work_flow = my_data.get()\n\n output_file_name = '/tmp/acceptance_test.gda'\n\n # Clear the file if it exists.\n if os.path.exists(output_file_name):\n os.remove(output_file_name)\n\n status = work_flow.export_gsas_file(run_number=80231)\n assert status\n assert os.path.exists(output_file_name)",
"def test_GFD_export_create_file(self):\n filepath = '1.txt'\n gfd = flow_processing_input.GroundFlowData()\n gfd.detector_flow_data = createGFDDataset(1).dataset\n gfd.export_to_file(filepath)\n # Check if file was created at filepath\n self.assertTrue(os.path.exists(filepath))\n os.remove(filepath)",
"def simulation_stage(self, iteration, instance):\n\t\tk = Kernel(name=\"misc.mkfile\")\n\t\tk.arguments = [\"--size=1000\", \"--filename=simulation-{0}-{1}.dat\".format(iteration, instance)]\n\t\treturn k",
"def test_export_stp(self):\n\n os.system(\"rm test_solid.stp test_solid2.stp test_wire.stp\")\n\n self.test_shape.export_stp(\"test_solid.stp\", mode=\"solid\")\n self.test_shape.export_stp(\"test_solid2.stp\")\n self.test_shape.export_stp(\"test_wire.stp\", mode=\"wire\")\n\n assert Path(\"test_solid.stp\").exists() is True\n assert Path(\"test_solid2.stp\").exists() is True\n assert Path(\"test_wire.stp\").exists() is True\n\n assert Path(\"test_solid.stp\").stat().st_size == Path(\"test_solid2.stp\").stat().st_size\n assert Path(\"test_wire.stp\").stat().st_size < Path(\"test_solid2.stp\").stat().st_size\n\n os.system(\"rm test_solid.stp test_solid2.stp test_wire.stp\")",
"def test_export_stp(self):\n\n os.system(\"rm test_solid.stp test_solid2.stp test_wire.stp\")\n\n self.test_shape.export_stp(\"test_solid.stp\", mode=\"solid\")\n self.test_shape.export_stp(\"test_solid2.stp\")\n self.test_shape.export_stp(\"test_wire.stp\", mode=\"wire\")\n\n assert Path(\"test_solid.stp\").exists() is True\n assert Path(\"test_solid2.stp\").exists() is True\n assert Path(\"test_wire.stp\").exists() is True\n\n assert Path(\"test_solid.stp\").stat().st_size == Path(\"test_solid2.stp\").stat().st_size\n assert Path(\"test_wire.stp\").stat().st_size < Path(\"test_solid2.stp\").stat().st_size\n\n os.system(\"rm test_solid.stp test_solid2.stp test_wire.stp\")",
"def save_game_file(self, game_file_name):\r\n SlTrace.lg(f\"save_game_file {game_file_name}\")\r\n with open(game_file_name, \"w\") as fout:\r\n print(f\"# {game_file_name}\", file=fout)\r\n today = date.today()\r\n d2 = today.strftime(\"%B %d, %Y\")\r\n print(f\"# On: {d2}\\n\", file=fout)\r\n print(f\"from dots_commands import *\", file=fout)\r\n print(f\"\", file=fout)\r\n players = self.get_players()\r\n playing_labels = [player.label for player in players]\r\n playing_str = \",\".join(playing_labels)\r\n print(f\"\"\"set_playing(\"{playing_str}\")\"\"\", file=fout)\r\n max_line = 60\r\n indent_str = \" \"\r\n for player in players:\r\n self.print_set_play(player, max_line=max_line, file=fout,\r\n indent_str=indent_str)\r\n print(f\"start_game()\", file=fout) # Required for any game playing commands\r\n move_type_d = {\r\n PlayMove.MARK_EDGE : \"mark\",\r\n PlayMove.SELECT_EDGE : \"select\",\r\n PlayMove.UNDO_MOVE : \"undo\",\r\n PlayMove.REDO_MOVE : \"redo\",\r\n PlayMove.PLAY_MOVE : \"play_move\",\r\n PlayMove.PLAY_MOVE_TILL : \"play_move_till\",\r\n PlayMove.SET_PLAYING : \"set_playing\",\r\n PlayMove.GAME_CHECK : \"game_check\",\r\n PlayMove.SET_PLAY : \"set_play\"\r\n }\r\n for pm in self.play_moves:\r\n if pm.removed: # Skip removed moves\r\n continue\r\n \r\n if pm.move_type not in move_type_d:\r\n raise SelectError(f\"save_file move type: {pm.move_type} uninplemented\")\r\n gfun = move_type_d[pm.move_type]\r\n hv_str = '\"h\"' if pm.hv == PlayMove.HV_H else '\"v\"'\r\n if pm.move_type == PlayMove.MARK_EDGE:\r\n line_str = f\"{gfun}({hv_str}, {pm.row}, {pm.col})\"\r\n elif pm.move_type == PlayMove.SELECT_EDGE:\r\n line_str = f\"{gfun}({hv_str}, {pm.row}, {pm.col})\"\r\n elif pm.move_type == PlayMove.UNDO_MOVE:\r\n line_str = f\"{gfun}()\"\r\n elif pm.move_type == PlayMove.REDO_MOVE:\r\n line_str = f\"{gfun}()\"\r\n elif pm.move_type == PlayMove.SET_PLAY:\r\n if pm.pre_comment is not None:\r\n print(pm.pre_comment, file=fout)\r\n temp_player = SelectPlayer(self, id=0) # Not real\r\n for field in pm.kwargs:\r\n val = pm.kwargs[field]\r\n setattr(temp_player, field, val)\r\n self.print_set_play(temp_player, file=fout)\r\n if pm.line_comment is not None:\r\n print(pm.line_comment, file=fout)\r\n continue # Done with this move\r\n \r\n elif pm.move_type == PlayMove.SET_PLAYING:\r\n playing_str = \"\" if pm.playing is None else f'\"{pm.playing}\"'\r\n line_str = f\"{gfun}({playing_str})\" # Do we drop this ???\r\n elif pm.move_type == PlayMove.GAME_CHECK:\r\n line_str = f\"{gfun}(\"\r\n if pm.mode is not None:\r\n if not line_str.endswith(\"(\"):\r\n line_str += \", \"\r\n line_str += f'\"{pm.mode}\"'\r\n if pm.row is not None:\r\n if not line_str.endswith(\"(\"):\r\n line_str += \", \"\r\n line_str += str(pm.row)\r\n if pm.col is not None:\r\n if not line_str.endswith(\"(\"):\r\n line_str += \", \"\r\n line_str += str(pm.col)\r\n if pm.is_set is not None:\r\n if not line_str.endswith(\"(\"):\r\n line_str += \", \"\r\n line_str += f\"is_set={pm.is_set}\"\r\n if pm.show_fail is not None:\r\n if not line_str.endswith(\"(\"):\r\n line_str += \", \"\r\n line_str += f\"show_fail={pm.show_fail}\"\r\n line_str += \")\" \r\n elif pm.move_type == PlayMove.PLAY_MOVE:\r\n line_str = f\"{gfun}()\"\r\n elif pm.move_type == PlayMove.PLAY_MOVE_TILL:\r\n line_str = f\"{gfun}()\"\r\n else:\r\n raise SelectError(f\"save_file move type:\"\r\n f\" {pm.move_type} uninplemented\")\r\n pre_comment = pm.pre_comment\r\n if pre_comment is not None:\r\n print(pre_comment, file=fout)\r\n line_comment = pm.line_comment\r\n if line_comment is not None:\r\n line_str += line_comment\r\n print(line_str, file=fout)\r\n \r\n return True",
"def TextureFiles():\n import shutil\n\n # first convert the .psd files to .png\n\n FbmDir = glo.outputFolder + '.fbm'\n\n for d1, d2, filenames in os.walk(FbmDir):\n for filename in filenames:\n \"\"\"filename: vitrin_diffuse.psd\n \"\"\"\n # print \"TextureFiles():\", filename\n if filename[-4:].upper() == '.PSD':\n #print \" -- FbmDir:\" , FbmDir\n #print \" -- in the if clause with filename:\" , filename\n #print \" -- glo.outputFolder\" , glo.outputFolder\n # FbmDir = '../fbx/simplelifeembedmedia.fbm'\n # filename = 'shelves_light.PSD'\n PsdToPngConverter(FbmDir, filename)\n\n # Move only the .png file to the ../png/ directory\n filename = filename[:-4] + '.png'\n src = os.path.join(FbmDir, filename)\n elif filename[0] != '.':\n src = os.path.join(FbmDir, filename)\n pass\n\n shutil.copy(src, glo.outputFolder)\n print os.path.join(glo.outputFolder, filename), \"\\n\"\n sys.stdout.flush()\n # for d1, d2, files in os.walk(glo.outputFolder):\n # if not filename in files:\n # #print \"moving: \", files, filename, not filename in files\n # shutil.copy(src, glo.outputFolder)\n # print os.path.join(glo.outputFolder, filename), \"\\n\"\n # else:\n # print \"%s/%s already exists. File not moved\" % (glo.outputFolder,filename)",
"def build_GenotypeGVCFs_sbatch(working_dir, combined_gvcf_files, scratch=False, interval=None):\n \n name_batch1 = os.path.basename([item for item in combined_gvcf_files if \"batch1\" in item][0])\n interval_name = \"\"\n #there must be at least one batch so look for it, not elegant but works\n if name_batch1.split(\"batch1\") != \".g.vcf.gz\":\n interval_name = name_batch1.split(\"batch1\")[1].split(\".\")[0]\n job_name = \"GenotypeGVCFs{}\".format(interval_name)\n output_file = \"{}_joincalled{}.g.vcf.gz\".format(CONFIG[\"output_header\"], interval_name)\n #create the sbatch file to analyse the current batch of samples\n sbatch_file = os.path.join(working_dir, \"sbatch\", \"{}.sbatch\".format(job_name))\n with open(sbatch_file, \"w\") as GenotypeGVCFs:\n slurm = slurm_header(CONFIG[\"uppmax_project\"], job_name, working_dir)\n GenotypeGVCFs.write(slurm)\n GenotypeGVCFs.write(\"\\n\")\n #rsync to scratch all samples\n if scratch:\n GenotypeGVCFs.write(\"mkdir -p $SNIC_TMP/{} \\n\".format(job_name)) # create tmp directory\n GenotypeGVCFs.write(\"mkdir -p $SNIC_TMP/{}/VCF/ \\n\".format(job_name)) # create tmp directory\n #now cycle over the samples, build the GATK command\n combined_gvcf_string_input = \"\"\n for combined_gvcf in combined_gvcf_files:\n combined_gvcf_path_dir = combined_gvcf\n if scratch:\n GenotypeGVCFs.write(\"rsync -rptoDLv {}* $SNIC_TMP/{}/\\n\".format(combined_gvcf, job_name))\n combined_gvcf_name = os.path.basename(combined_gvcf)\n combined_gvcf_path_dir = \"$SNIC_TMP/{}/{}\".format(job_name, combined_gvcf_name)\n combined_gvcf_string_input += \"-V {} \\\\\\n\".format(combined_gvcf_path_dir)\n\n GATK_command= \"java -Xmx250g -jar {} -T GenotypeGVCFs \\\\\\n\".format(CONFIG[\"GATK\"])\n for option in CONFIG[\"walkers\"][\"GenotypeGVCFs\"]:\n GATK_command += \"{} \\\\\\n\".format(option)\n GATK_command += \"{} \".format(combined_gvcf_string_input)\n if interval is not None:\n GATK_command += \"-L {} \\\\\\n\".format(interval)\n\n if scratch:\n GATK_command += \"-o $SNIC_TMP/{}/VCF/{}\\n\".format(job_name, output_file)\n #once this is done rsync back to lupus\n GATK_command += \"rsync $SNIC_TMP/{}/VCF/{}* {}/VCF/\\n\".format(job_name, output_file , working_dir)\n else:\n GATK_command += \"-o {}/VCF/{}\\n\\n\".format(working_dir, output_file)\n GenotypeGVCFs.write(GATK_command)\n #return path to sbach file\n return sbatch_file",
"def generate_fimo_bg_file(input_file_path, ouptut_file_path, order=2):\n cmd = [\"sh\", \"generate_fimo_bg_file.sh\",\n \"-i\", input_file_path,\n \"-o\", ouptut_file_path,\n \"-m\", order]\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n process.wait()",
"def Template(Fenetre_largeur,Fenetre_hauteur):\r\n li= Select_ligne(\"Nombre de lignes: \",Fenetre_largeur,Fenetre_hauteur)\r\n nom=\"Template\"\r\n fich=\"Template\"\r\n version=0\r\n while Path(\"stages/\"+fich+\".txt\").is_file() == True:\r\n version+=1\r\n fich=nom+str(version)\r\n fichier=open(\"stages/\"+fich+\".txt\",'w')\r\n fichier.write(str(li))\r\n fichier.write(\"\\n\")\r\n fichier.write(\"\\n\")\r\n for i in range(li):\r\n for j in range(10):\r\n fichier.write(\"0,0|\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"gauche: resistance, droite: bonus\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"resistance max: 3\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"6=barre+\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"7=score+\")\r\n fichier.close()",
"def writeStatsToFile( gfname, sfname, tgraph ):\n ParProbG = graphWithCutoff(gfname, 0.0)\n with open(sfname,'wb') as ofile:\n for u,v in itertools.combinations( tgraph.nodes(), 2 ):\n ofile.write(\"{0} {1}\\n\".format( ParProbG[u][v]['weight'] if ParProbG.has_edge(u,v) else 0.0, 1 if tgraph.has_edge(u,v) else 0) )",
"def create_temp_copy(user, code):\n fname = user + \"_primes.py\"\n user_file = open(fname, 'w')\n user_file.write(code)\n user_file.close()\n return fname",
"def export_ctsdg(cfg):\n generator = Generator(\n image_in_channels=config.image_in_channels,\n edge_in_channels=config.edge_in_channels,\n out_channels=config.out_channels\n )\n generator.set_train(False)\n load_checkpoint(cfg.checkpoint_path, generator)\n\n ckpt_path = Path(cfg.checkpoint_path)\n output_file_name = (ckpt_path.parent / ckpt_path.stem).as_posix()\n file_format = config.file_format\n\n img_dummy = mnp.zeros([1, config.image_in_channels, *cfg.image_load_size],\n dtype=mstype.float32)\n edge_dummy = mnp.zeros([1, 2, *cfg.image_load_size], dtype=mstype.float32)\n mask_dummy = mnp.zeros([1, 1, *cfg.image_load_size], dtype=mstype.float32)\n\n export(generator, img_dummy, edge_dummy, mask_dummy,\n file_name=output_file_name, file_format=file_format)\n\n print(f'{output_file_name}.mindir exported successfully!', flush=True)",
"def write_to_file_x(path):\n path1 = path + \"/x_Macros\"\n if not os.path.exists(path1):\n os.mkdir(path1)\n for e in range(int(e_steps)+1):\n filename = \"x%sy0z0ke%s.mac\" %(dx*x + x_min, e*de + e_min)\n path = path1\n fullpath = os.path.join(path, filename)\n f = open(fullpath, \"w\")\n f.write('/rat/physics_list/OmitMuonicProcesses true\\n')\n f.write(\"/rat/physics_list/OmitHadronicProcesses true \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write('/rat/db/set DETECTOR geo_file \"geo/snoplus.geo\"\\n')\n f.write('/rat/db/set GEO[scint] material \"labppo_scintillator\"\\n')\n f.write('/rat/db/set DAQ dqxx_info 0\\n')\n f.write(\"/run/initialize \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/proc frontend\\n\")\n f.write(\"/rat/proc trigger\\n\")\n f.write(\"/rat/proc eventbuilder\\n\")\n f.write(\"/rat/proc count\\n\")\n f.write(\"/rat/procset update 100\\n\")\n f.write(\"/rat/proc calibratePMT\\n\")\n f.write(\"/rat/proc scintFitter\\n\")\n f.write(\"/rat/proclast outroot\\n\")\n f.write('/rat/procset file \"x%sy0z0ke%s.root\"\\n' %(dx*x + x_min, e*de + e_min))\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/generator/add combo gun:point:poisson\\n\")\n f.write(\"# want random, isotropic momentum distribution; energy given in MeV\\n\")\n f.write(\"/generator/vtx/set e- 0 0 0 %s\\n\" %(e*de + e_min))\n f.write(\"# position given in Cartesians, relative to detector center, in mm\\n\")\n f.write(\"/generator/pos/set %s 0 0\\n\" % (dx*x + x_min))\n f.write(\"/generator/rate/set 1\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/run/start %s\\n\" %(n))\n f.write(\"exit\")",
"def createGloveDic():\n saveGloveDicIntoFile(_extractGloveVects())",
"def generate_file(name, size):\n print('=> Generating %s file' % name)\n with open(DATASET_DIR+name+DATASET_EXTENSION, 'wb+') as fout:\n fout.write(os.urandom(size))",
"def write_to_file_z(path):\n path1 = path + \"/z_Macros\"\n if not os.path.exists(path1):\n os.mkdir(path1)\n for e in range(int(e_steps)+1):\n filename = \"x0y0z%ske%s.mac\" %(dz*z + z_min, e*de + e_min)\n path = path1\n fullpath = os.path.join(path, filename)\n f = open(fullpath, \"w\")\n f.write('/rat/physics_list/OmitMuonicProcesses true\\n')\n f.write(\"/rat/physics_list/OmitHadronicProcesses true \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write('/rat/db/set DETECTOR geo_file \"geo/snoplus.geo\"\\n')\n f.write('/rat/db/set GEO[scint] material \"labppo_scintillator\"\\n')\n f.write('/rat/db/set DAQ dqxx_info 0 \\n')\n f.write(\"/run/initialize \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/proc frontend\\n\")\n f.write(\"/rat/proc trigger\\n\")\n f.write(\"/rat/proc eventbuilder\\n\")\n f.write(\"/rat/proc count\\n\")\n f.write(\"/rat/procset update 100\\n\")\n f.write(\"/rat/proc calibratePMT\\n\")\n f.write(\"/rat/proc scintFitter\\n\")\n f.write(\"/rat/proclast outroot\\n\")\n f.write('/rat/procset file \"x0y0z%ske%s.root\"\\n' %(dz*z + z_min, e*de + e_min))\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/generator/add combo gun:point:poisson\\n\")\n f.write(\"# want random, isotropic momentum distribution; energy given in MeV\\n\")\n f.write(\"/generator/vtx/set e- 0 0 0 %s\\n\" %(e*de + e_min))\n f.write(\"# position given in Cartesians, relative to detector center, in mm\\n\")\n f.write(\"/generator/pos/set 0 0 %s\\n\" % (dz*z + z_min))\n f.write(\"/generator/rate/set 1\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/run/start %s\\n\" %(n))\n f.write(\"exit\")"
] | [
"0.5956804",
"0.59355676",
"0.59196234",
"0.586338",
"0.5857218",
"0.58492386",
"0.5716386",
"0.57071114",
"0.5694615",
"0.565109",
"0.56128067",
"0.56030047",
"0.55999684",
"0.5515769",
"0.54997516",
"0.54982835",
"0.5453069",
"0.5453069",
"0.54357845",
"0.5433838",
"0.5382824",
"0.5364694",
"0.5347028",
"0.5344181",
"0.53395617",
"0.53376085",
"0.5335053",
"0.5323215",
"0.532278",
"0.53037673"
] | 0.6236334 | 0 |
return a tuple of (isHit, hitResult). isHit is a Boolean with is true in case of hit and false in case of miss. in case of hit hitResult is HPA if request.addr is cached in the tlb, otherwise hitResult is None | def lookup(self, request):
if request.addr in self._addressMap:
return (True, self._addressMap[request.addr])
else:
return (False, None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _checkHit( self , bp ):\n hit = 0\n if bp.jpyhits != 0 :\n # check hits context\n bp.hitted = bp.hitted + 1\n if bp.hitStyle == HIT_EQUALS_TO and \\\n bp.hitted == bp.jpyhits :\n hit = 1\n elif bp.hitStyle == HIT_GREATER_THAN and \\\n bp.hitted > bp.jpyhits :\n hit = 1\n elif bp.hitStyle == HIT_MULTIPLE_OF and \\\n bp.hitted % bp.jpyhits == 0 :\n hit = 1\n return hit\n else :\n return 1",
"def cache_hit(self):\n return self._properties.get(\"cacheHit\")",
"def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)",
"def incache(self, query):\n key = genkey(query)\n res = self.memcache.get(key)\n if res and type(res) is int:\n return True\n return False",
"def hit(self):\n return self._hit",
"def cache_has(self, metric_name):\n hit = self._cache_has(metric_name)\n if hit:\n self._hits.inc()\n else:\n self._misses.inc()\n return hit",
"def _hit_start_get(self):\n return self._hit_start",
"def find_image_url(lat_value, long_value):\n global custom_memory, custom_hit, custom_miss, total_custom_memory\n image_tuple = (lat_value, long_value)\n \n #When Latitude Longitude in Cache and HIT\n if image_tuple in custom_memory:\n custom_hit+=1\n custom_memory[image_tuple][1] = datetime.now()\n return custom_memory[image_tuple][0],\"hit\"\n \n #When Latitude Longitude NOT in Cache and MISS\n if len(custom_memory) < total_custom_memory:\n custom_miss+=1\n custom_memory[image_tuple] = [GetImageURL(*image_tuple), datetime.now()]\n return custom_memory[image_tuple][0], \"miss_when_not_full\"\n else:\n custom_memory = sorted([(key, list_vals) for key, list_vals in custom_memory.items()], key=lambda i:i[1][1], reverse=False)\n del custom_memory[0]\n custom_memory = dict(custom_memory)\n custom_miss+=1\n custom_memory[image_tuple] = [GetImageURL(*image_tuple), datetime.now()]\n return custom_memory[image_tuple][0], \"miss_when_after_full\"",
"def get_hit(self, damage_type, damage):\n is_destroyed = self.hp_manager.get_hit(damage_type, damage)\n if (is_destroyed):\n self.mark_for_destruction()\n return is_destroyed",
"def get_cacheable_info(self, url):\n url_of_interest = url.replace('.js', '').split(self.api_url)[1]\n if '/' in url_of_interest:\n # We've got an object request like this:\n # hierarchicalrequirement/5128087372.js\n lookup_tuple = url_of_interest.split('/')\n cache_key = lookup_tuple[0]\n elif '?' in url_of_interest:\n # We've got ourselves a query like this:\n # hierarchicalrequirement.js?query=%28FormattedID%20=%20%22us20%\n # 22%29&pagesize=100&start=1&fetch=true\n lookup_tuple = url_of_interest.split('?')\n # Different cache_key because we want to be able to cache these\n # separately with different timeouts to the actual object type\n cache_key = '{0}_query'.format(lookup_tuple[0])\n\n cache_lookup = lookup_tuple[1]\n\n return cache_key, cache_lookup",
"def _cache_get(self, metric_name):\n try:\n with self._lock:\n metric = self.__cache.get(metric_name, False)\n except KeyError:\n # When metrics expire, we still get a KeyError.\n metric = False\n if metric is False:\n return None, False\n else:\n return metric, True",
"def searchTLB(self, vpn):\n if vpn in self.TLB:\n self.TLBHits += 1\n #Update memory usage for ppn\n for i in range(len(self.memory)):\n page = self.memory[i]\n if page[2] == vpn:\n self.memory[i] = (page[0], self.accesses, vpn)\n break\n return self.TLB.get(vpn)\n else:\n self.TLBMisses += 1\n return None",
"def intersect(self, ray):\n # TODO A5 (Step1) implement this function\n # Copy your implementation from A4\n # Then calculate uv coordinates, to be passed into the Hit initializer\n vs = self.vs\n\n a = vs[0][0] - vs[1][0]\n b = vs[0][1] - vs[1][1]\n c = vs[0][2] - vs[1][2]\n d = vs[0][0] - vs[2][0]\n e = vs[0][1] - vs[2][1]\n f = vs[0][2] - vs[2][2]\n\n ray_dir = ray.direction\n ray_orig = ray.origin\n\n g = ray_dir[0]\n h = ray_dir[1]\n i = ray_dir[2]\n j = vs[0][0] - ray_orig[0]\n k = vs[0][1] - ray_orig[1]\n l = vs[0][2] - ray_orig[2]\n\n M = a * (e * i - h * f) + b * (g * f - d * i) + c * (d * h - e * g)\n\n t = -(f * (a * k - j * b) + e * (j * c - a * l) + d *\n (b * l - k * c)) / M\n\n if (t < ray.start or t > ray.end):\n return no_hit\n\n gamma = (i * (a * k - j * b) + h * (j * c - a * l) + g *\n (b * l - k * c)) / M\n\n if (gamma < 0 or gamma > 1):\n return no_hit\n\n beta = (j * (e * i - h * f) + k * (g * f - d * i) +\n l * (d * h - e * g)) / M\n\n if (beta < 0 or beta > 1 - gamma):\n return no_hit\n\n P = ray_orig + t * ray_dir\n\n unit_normal = normalize(np.cross(vs[0] - vs[2], vs[1] - vs[2]))\n\n A = np.linalg.norm(np.cross(vs[1] - vs[0], vs[2] - vs[0])) / 2\n areaA = np.linalg.norm(np.cross(vs[1] - P, vs[2] - P)) / 2\n areaB = np.linalg.norm(np.cross(vs[0] - P, vs[2] - P)) / 2\n areaC = np.linalg.norm(np.cross(vs[0] - P, vs[1] - P)) / 2\n u = areaB / A\n v = areaC / A\n return Hit(t, P, unit_normal, vec([u, v]), self.material)",
"def get_from_cache(self, url):\n cache_key, cache_lookup = self.get_cacheable_info(url)\n\n cache_timeout = self.cache_timeouts.get(cache_key,\n self.default_cache_timeout)\n\n data, access_time = MEM_CACHE[cache_key].get(cache_lookup, (None, 0))\n if data and time.time() - access_time < cache_timeout:\n return data\n return False",
"def _cache_has(self, metric_name):\n pass",
"def retrieve_from_cache(self, x, y):\n return False",
"def hit(self):\n hit = self.delegate.checkHit(100, 0, self.environment)\n assert hit, \"Should always hit\"",
"def info_cache():\n return [custom_hit, custom_miss, len(custom_memory), total_custom_memory]",
"def _query_cache(self, key):\n if self._cache:\n cache_key = self._make_cache_key(key)\n try:\n val = self._cache[cache_key]\n self._log.debug('cache hit for key {cache_key} ({key}) '.format(\n cache_key=cache_key, key=key))\n return val\n except KeyError:\n self._log.debug('cache miss for key {cache_key} ({key}) '.format(\n cache_key=cache_key, key=key))\n return None\n else:\n self._log.debug('cache disabled (self._cache is None)')\n return None",
"def check_in_store(*args, **kwargs):\n output_ids = cached_inner._get_output_identifiers(\n bound_get_hashed(*args, **kwargs), args, kwargs\n )\n if not cached_inner.store_backend.contains_item(output_ids):\n raise NotCachedError(f\"The given call is not cached: {output_ids}\")\n return IN_STORE",
"def check_in_store(*args, **kwargs):\n output_ids = cached_inner._get_output_identifiers(\n bound_get_hashed(*args, **kwargs), args, kwargs\n )\n if not cached_inner.store_backend.contains_item(output_ids):\n raise NotCachedError(f\"The given call is not cached: {output_ids}\")\n return IN_STORE",
"def isCacheable(self, segments, request):\n\t\treturn False",
"def test_hit(self):\n ship = Ship(self.location)\n self.assertEqual(self.location, ship.location)\n self.assertEqual(1, ship.check_hit(self.hit))\n self.assertEqual(1, len(ship.location))",
"def process_shot(self):\n if self.has_active_ship():\n self.mark = constants.HIT_SHIP_MARK\n self.hit_count += 1\n if self.hit_count == self.ship.power:\n self.mark = constants.DEAD_SHIP_MARK\n return constants.KILL\n else:\n return constants.HIT\n elif not self.occupied or self.mark == constants.MISS_HIT_MARK:\n self.mark = constants.MISS_HIT_MARK\n return constants.MISS",
"def rcachehit(self):\n sql = '''SELECT to_char((1 - (phy.value - lob.value - dir.value) / \n ses.value) * 100, 'FM99999990.9999') retvalue \n FROM v$sysstat ses, v$sysstat lob, \n v$sysstat dir, v$sysstat phy \n WHERE ses.name = 'session logical reads' \n AND dir.name = 'physical reads direct' \n AND lob.name = 'physical reads direct (lob)' \n AND phy.name = 'physical reads' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])",
"def _cache_get(self, metric_name):\n encoded_metric_name = self._encode(metric_name)\n with self.__env.begin(self.__metric_to_metadata_db, write=False) as txn:\n payload = txn.get(encoded_metric_name)\n\n if payload == self._EMPTY:\n return None, True\n\n if payload is not None:\n payload = self._decode(payload)\n\n if not payload:\n # cache miss\n return None, False\n\n # found something in the cache\n split = self.__split_payload(payload)\n\n if split is None:\n # invalid string => evict from cache\n with self.__env.begin(self.__metric_to_metadata_db, write=True) as txn:\n txn.delete(key=encoded_metric_name)\n return None, False\n\n # valid value => get id and metadata string\n # TODO: optimization: id is a UUID (known length)\n id_str, metadata_str, timestamp = split\n try:\n id = uuid.UUID(id_str)\n except Exception as e:\n logging.debug(str(e))\n with self.__env.begin(self.__metric_to_metadata_db, write=True) as txn:\n txn.delete(key=encoded_metric_name)\n return None, False\n\n # if the timestamp expired evict it in order to force\n # its recreation for the next time\n if self.__expired_timestamp(timestamp):\n with self.__env.begin(self.__metric_to_metadata_db, write=True) as txn:\n txn.delete(key=encoded_metric_name)\n\n metadata = self.metadata_from_str(metadata_str)\n return bg_metric.Metric(metric_name, id, metadata), True",
"def get_headers_and_data(self):\n\n if self.config.flag_usecache:\n fpath, fhdr, dirpath = self.get_url_store_paths()\n\n fpath_f = os.path.isfile(fpath)\n fhdr_f = os.path.isfile(fhdr)\n \n if fpath_f and fhdr_f:\n try:\n content = zlib.decompress(open(fpath).read())\n headers = eval(zlib.decompress(open(fhdr).read()))\n\n if self.make_head_request(headers):\n # Update URL from cache\n self.url = self.headers.get('url', self.url)\n \n log.info(self.url, \"==> URL is up-to-date, returning data from cache\")\n\n self.content = content\n self.headers = headers\n\n self.content_type = urlhelper.get_content_type(self.url, self.headers)\n \n eventr = crawlerbase.CrawlerEventRegistry.getInstance() \n # Raise the event for retrieving URL from cache\n eventr.publish(self, 'download_cache',\n message='URL has been retrieved from cache',\n code=304,\n event_key=self.url, \n params=self.__dict__) \n\n return True\n except Exception, e:\n log.error(\"Error in getting URL headers & data for URL\",self.url)\n log.error(\"\\t\",str(e))\n else:\n if not fpath_f:\n log.debug(\"Data file [%s] not present =>\" % fpath, self.url)\n if not fhdr_f:\n log.debug(\"Header file [%s] not present =>\" % fhdr, self.url) \n\n return False",
"def get_hit_info(annotation_log=None, bank_annotations=None, annotation_id=None, hit_type=None, hit_id=None):\n assert(hit_type in [\"gen\", \"val\"])\n assert(annotation_id != None or hit_id != None)\n hit_info = None\n df = pd.read_csv(annotation_log)\n df = df.set_index('annotation_id')\n hit_already_in_log = hit_id != None\n if hit_already_in_log:\n # get the annotation_id for the relevant hit_id from the log\n hit_id_column = 'hitid_gen' if hit_type == 'gen' else 'hitid_validation'\n gen_hit_df = df[df[hit_id_column] == hit_id]\n annotation_id = gen_hit_df.index.tolist()[0]\n question_id = df.loc[annotation_id, 'question_id']\n question_text = df.loc[annotation_id, 'question_text']\n # generation HIT\n if hit_type == \"gen\":\n hit_info = GenerationHITInfo(annotation_id, question_id, question_text)\n # validation HIT\n else:\n decomposition = df.loc[annotation_id, 'decomposition']\n generator_id = df.loc[annotation_id, 'workerid_gen']\n generation_hit_id = df.loc[annotation_id, 'hitid_gen']\n bank_annotation = None\n if hit_already_in_log:\n # retreive relevant bank_id information from bank annotations log\n bank_id = df.loc[annotation_id, 'bank_id_validation']\n bank_annotation = get_bank_annotation(bank_id, BANK_ANNOTATIONS_LOG)\n else:\n # get a random bank annotation for validation quality test\n bank_annotation = random_bank_annotation(BANK_ANNOTATIONS_LOG)\n hit_info = ValidationHITInfo(annotation_id, question_id, question_text, decomposition, generator_id,\\\n generation_hit_id, bank_annotation)\n assert(hit_info != None)\n if hit_id != None:\n hit_info.set_hit_id(hit_id)\n return hit_info",
"def is_route_used(_, hash_arg):\n for hash_object in Hash.objects.all():\n if hash_object.hash == hash_arg:\n return HttpResponse(\n json.dumps({\"Used\": True}), mimetype=\"application/json\")\n\n Hash(hash=hash_arg).save()\n return HttpResponse(\n json.dumps({\"Used\": False}), mimetype=\"application/json\")",
"def is_cacheable(self, response: Union[AnyResponse, None]) -> bool:\n if not response:\n return False\n cache_criteria = {\n 'allowed method': response.method in self.allowed_methods,\n 'allowed status': response.status in self.allowed_codes,\n 'not disabled': not self.disabled,\n 'not expired': not getattr(response, 'is_expired', False),\n 'not filtered': self.filter_fn(response),\n }\n logger.debug(f'Pre-cache checks for response from {response.url}: {cache_criteria}') # type: ignore\n return all(cache_criteria.values())"
] | [
"0.59976083",
"0.5989675",
"0.5826117",
"0.5499163",
"0.54839325",
"0.5301686",
"0.52203494",
"0.516528",
"0.5153411",
"0.5124234",
"0.50933915",
"0.5003426",
"0.4978922",
"0.49499053",
"0.49425858",
"0.4921342",
"0.4921074",
"0.49039856",
"0.48851854",
"0.48693472",
"0.48693472",
"0.48692507",
"0.48431885",
"0.48273417",
"0.48050424",
"0.4799164",
"0.47909668",
"0.4770025",
"0.47671735",
"0.47649997"
] | 0.64621645 | 0 |
update the tlb with the translation address from updateObj | def update(self, updateObj):
#if we've allocated all free entries in tlb
if len(self._allocatedQ) == self._maxSize:
#remove the old entries from the tlb (fifo order)
oldUpdateObj = self._allocatedQ.popleft()
del self._addressMap[oldUpdateObj.requestAddr]
reqAddr,tranAddr = updateObj.requestAddr, updateObj.translatedAddr
self._addressMap[reqAddr] = tranAddr
self._allocatedQ.append(updateObj) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self, obj):\n self.identity_map[obj._instance_key] = obj\n self.register_dirty(obj)",
"def _update_object(self, data_dict):\r\n pass",
"def gen_update(self, TL):\r\n pass",
"def update(self, obj):\n self._updater.update(obj)",
"def rs_edit_upd(obj):\n verts = [x.co for x in obj.data.vertices]\n if verts[0] != Vector():\n fix = Vector(verts[0])\n for i in range(len(verts)):\n obj.data.vertices[i].co = obj.data.vertices[i].co - fix\n\n obj.data.update()\n obj.location += fix\n verts[1] = (verts[2] + verts[0]) / 2\n verts[3] = (verts[2] + verts[4]) / 2\n verts[5] = (verts[4] + verts[6]) / 2\n verts[7] = (verts[6] + verts[0]) / 2\n for i in range(len(verts)):\n obj.data.vertices[i].co = verts[i]\n\n obj.data.update()",
"def _update(self, *keys_and_val):\n if len(xxx) < 2:\n raise NotEnoughInfo\n value, *location = xxx[::-1]\n location.reverse()\n final_key = location.pop()\n ptr__target_dct = get_target_dct(location)\n ptr__target_dct[final_key] = value\n return",
"def after_update(self, obj, st):\n pass",
"def update_tlm(self):",
"def remapObject(self,object,newIMod,newIObj=-1):\n (iMod,iObj,objId,objRecords) = object[:4]\n if newIObj == -1: newIObj = iObj\n newObject = (newIMod,newIObj)+object[2:]\n if objRecords and objRecords[0].name == 'MVRF':\n data = cStringIO.StringIO()\n data.write(struct.pack('i',newIObj)[:3])\n data.write(struct.pack('B',newIMod))\n objRecords[0].data = data.getvalue()\n objRecords[0].setChanged(False)\n data.close()\n #print 'Remapped MVRF:',newObject[:3]\n #--Remap any script references\n oldRef = (iMod,iObj)\n if oldRef in self.scptRefs:\n newRef = (newIMod,newIObj)\n for scpt in self.refs_scpt.keys():\n if self.refs_scpt[scpt] == oldRef:\n scpt.setRef(newRef)\n self.refs_scpt[scpt] = newRef\n #print object[:3],newRef, scpt.id\n #--Be sure to call updateScptRefs when finished remapping *all* objects.\n #--Done\n return newObject",
"def unityUpdateObj(objID, objPos, objYaw):\n \n hsc.write(hsc.makeID(objID) + \".transform.position = \" + hsc.vf(objPos) + \";\")\n hsc.write(hsc.makeID(objID) + \".transform.rotation = \" + hsc.qf(objYaw) + \";\")",
"def updateItem(self, object):\n pass",
"def update():",
"def update():",
"def update_object(self, name: str) -> None:",
"def callUpdateTable(self):\r\n self.updateTable()",
"def mapToAtlas(self, obj):\n raise Exception(\"Must be reimplemented in subclass.\")",
"def indirectobject(self, index, version, io):\n self.appendString(\"\\n\")\n self.indirectObjects[index] = self.filesize()\n self.appendString(\"%d %d obj\\n%s\\nendobj\\n\" % (index, version, io))",
"def normal_update(self, game, elfDict):\n self.game = game # update game\n self.my_elves = [elf for elf in elfDict.values() if not elf.elf.already_acted] # update self.my_elves\n self.game = game # update self.game\n self.portals.portals_update(game) # update portals (the object)\n self.my_castle = game.get_my_castle()",
"def update( ):\r\n pass",
"def update(self, obj):\n\n self.cfg.update(obj)",
"def _notify_update(self, cuds_object):",
"def Automaticupdatesobjects():\n pass",
"def update_versioned_target(self, vt):\n self._cache_manager.update(vt.cache_key)",
"def post_migrate_function(obj):\n obj.a = obj.a + u\"-modified\"\n return True",
"def update_to(self, new):\r\n if self.idhex != new.idhex:\r\n plog(\"ERROR\", \"Update of router \"+self.nickname+\"changes idhex!\")\r\n for i in new.__dict__.iterkeys():\r\n if i == \"refcount\" or i == \"_generated\": continue\r\n self.__dict__[i] = new.__dict__[i]",
"def indirectobject(self, index, io):\n if self.indices != '':\n self.indices += ' '\n self.indices += '%d %d' % (index, len(self.ios))\n self.ios += io\n self.objects.append(index)",
"def test_update_virt_realm(self):\n pass",
"def pre_route_target_update(self, resource_id, resource_dict):\n pass",
"def post_physical_interface_update(self, resource_id, resource_dict):\n pass",
"def update_item(self, table, item):"
] | [
"0.57816815",
"0.56557685",
"0.55526066",
"0.5421197",
"0.5410602",
"0.53786093",
"0.5346271",
"0.53038764",
"0.52939814",
"0.5269879",
"0.5261274",
"0.52505845",
"0.52505845",
"0.5190733",
"0.5168241",
"0.51626116",
"0.515856",
"0.51562643",
"0.5152305",
"0.51184386",
"0.50891244",
"0.50512516",
"0.5050154",
"0.5048659",
"0.5030882",
"0.5027314",
"0.49900424",
"0.49263993",
"0.48987272",
"0.48832783"
] | 0.68263865 | 0 |
Create asset, need correct type, title, label and url | def create(self) -> requests.request:
# Check needed values
if None in [self.args.type, self.args.title, self.args.label, self.args.url]:
raise Exception('Provide all parameters for asset creation')
# Check type
if self.args.type not in ['photo', 'video']:
raise Exception('Asset can only be of type photo or video')
# Check URL validity
if self.check_url_invalidity():
raise Exception('Provided URL is not valid')
# Send POST request
return requests.post(
self.REQUEST_URL,
{'type': self.args.type, 'title': self.args.title, 'label': self.args.label, 'url': self.args.url}
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_system_asset(self):\n pass",
"def createAsset(assFolder, *args):\n createAssetUI(assFolder)",
"def GenerateAssetForCreateRequest(args):\n module = dataplex_api.GetMessageModule()\n resource_spec_field = module.GoogleCloudDataplexV1AssetResourceSpec\n resource_spec = module.GoogleCloudDataplexV1AssetResourceSpec(\n name=args.resource_name,\n type=resource_spec_field.TypeValueValuesEnum(args.resource_type),\n )\n request = module.GoogleCloudDataplexV1Asset(\n description=args.description,\n displayName=args.display_name,\n labels=dataplex_api.CreateLabels(module.GoogleCloudDataplexV1Asset, args),\n resourceSpec=resource_spec,\n )\n discovery = GenerateDiscoverySpec(args)\n if discovery != module.GoogleCloudDataplexV1AssetDiscoverySpec():\n setattr(request, 'discoverySpec', discovery)\n return request",
"def saveAsset(*args):\n\n # Get user assigned name\n userName = cmds.textFieldButtonGrp(SpawnObjectsTab.UserField, query=True, text=True)\n\n # Use user's name if there is one or object's if there isn't\n if userName:\n name = userName\n else:\n name = cmds.ls(selection=True)[0]\n\n # Add to Library\n OL.addObjectToLibrary(name)\n # Create icon\n addButtonIcon(name)",
"def create_base_image(self, builder, template, parameters):",
"def test_add_category_to_asset(self):\n pass",
"def test_add_asset_type_assignment_rule(self):\n pass",
"def create(self, validated_data):\n return Asset.objects.create(**validated_data)",
"def create_asset(ocean, publisher):\n sample_ddo_path = get_resource_path(\"ddo\", \"ddo_sa_sample.json\")\n assert sample_ddo_path.exists(), \"{} does not exist!\".format(sample_ddo_path)\n\n asset = DDO(json_filename=sample_ddo_path)\n asset.metadata[\"main\"][\"files\"][0][\"checksum\"] = str(uuid.uuid4())\n my_secret_store = \"http://myownsecretstore.com\"\n auth_service = ServiceDescriptor.authorization_service_descriptor(my_secret_store)\n return ocean.assets.create(asset.metadata, publisher, [auth_service])",
"def GenerateAssetForCreateRequestAlpha(args):\n module = dataplex_api.GetMessageModule()\n resource_spec_field = module.GoogleCloudDataplexV1AssetResourceSpec\n resource_spec = module.GoogleCloudDataplexV1AssetResourceSpec(\n name=args.resource_name,\n type=resource_spec_field.TypeValueValuesEnum(args.resource_type),\n )\n if args.IsSpecified('resource_read_access_mode'):\n resource_spec.readAccessMode = (\n resource_spec_field.ReadAccessModeValueValuesEnum(\n args.resource_read_access_mode\n )\n )\n request = module.GoogleCloudDataplexV1Asset(\n description=args.description,\n displayName=args.display_name,\n labels=dataplex_api.CreateLabels(module.GoogleCloudDataplexV1Asset, args),\n resourceSpec=resource_spec)\n discovery = GenerateDiscoverySpec(args)\n if discovery != module.GoogleCloudDataplexV1AssetDiscoverySpec():\n setattr(request, 'discoverySpec', discovery)\n return request",
"def post(self, slug = None):\n filename = self.request.form.get(\"filename\")\n imgdata = base64.b64decode(self.request.form['data'])\n stream = StringIO.StringIO(imgdata)\n content_length = len(imgdata)\n content_type = \"image/png\"\n\n asset = self.app.module_map.uploader.add(\n stream, \n filename = filename,\n content_type = content_type,\n content_length = content_length,\n )\n\n asset_id = unicode(asset._id)\n return {\n 'url' : self.url_for(\"asset\", asset_id = asset.variants['medium_user']._id),\n 'status' : \"success\",\n 'asset_id' : asset_id\n }",
"def build_assets():\n\n # templates\n template = open(os.path.join(BASE_PATH, 'AssetLibrary.as.template'), 'r').read()\n\n embed_templates = {\n 'image': \"[Embed(source='%(asset_path)s')] private var %(asset_class_name)s:Class;\\n\",\n 'mp3': \"[Embed(source='%(asset_path)s')] private var %(asset_class_name)s:Class;\\n\", \n 'xml': \"[Embed(source='%(asset_path)s', mimeType=\\\"application/octet-stream\\\")] private var %(asset_class_name)s:Class;\\n\"\n }\n \n library_element_template = \"'%(asset_id)s': %(asset_class_name)s\"\n\n # load+parse asset xml\n complete_asset_embed_code = \"\"\n complete_asset_data_code = \"\"\n asset_dom = minidom.parse(ASSET_XML_FILE)\n \n asset_nodes = list(asset_dom.getElementsByTagName('asset'))\n \n for asset_node in asset_nodes:\n asset_attrs = dict(asset_node.attributes.items())\n asset_embed_code = embed_templates[asset_attrs['type']] % {\n 'asset_class_name': asset_attrs['name'],\n 'asset_path': ASSET_BASE + asset_attrs['file']\n }\n\n complete_asset_embed_code += asset_embed_code\n \n asset_data_code = library_element_template % {\n 'asset_id': asset_attrs['name'],\n 'asset_class_name': asset_attrs['name']\n }\n\n complete_asset_data_code += asset_data_code\n\n if asset_nodes.index(asset_node) == len(asset_nodes) - 1:\n complete_asset_data_code += \"\\n\"\n else:\n complete_asset_data_code += \",\\n\"\n \n output = template % {\n 'asset_embeds': complete_asset_embed_code,\n 'asset_data': complete_asset_data_code\n }\n \n # render\n output_f = open(os.path.join(BASE_PATH, 'AssetLibrary.as'), 'w')\n output_f.write(output)",
"def create():",
"def create():",
"def make_asset_key(self, asset_type, path):\r\n raise NotImplementedError()",
"def add_asset(self, file, type='', metadata=''):\n\n if not metadata:\n metadata = {}\n elif not isinstance(metadata, dict):\n error_msg = 'Param \"metadata\" must be a Dict or None.'\n logger.exception(error_msg)\n raise ValueError(error_msg)\n else:\n error_msg = 'Param \"metadata\" must be a Dict or None.'\n logger.exception(error_msg)\n raise ValueError(error_msg)\n\n try:\n if os.path.isfile(file) and os.access(file, os.R_OK):\n with open(file, 'rb') as fp:\n filename = os.path.basename(getattr(fp, 'name', None))\n file_content = fp.read()\n else:\n error_msg = \"The file pointed: (%s) is not a file or is unreadable.\"\n logger.error(error_msg, file)\n except IOError as e:\n error_msg = \"Error found when trying to open file: %s\"\n logger.exception(error_msg, file)\n raise e\n else:\n asset = opac_pb2.Asset(\n file=file_content,\n filename=filename,\n type=type,\n metadata=str(metadata)\n )\n\n return self.stub.add_asset(asset).id",
"def create_thumbnail(self, target, format=None):",
"def create(self):\n\n if len(self.filenames) != len(self.download_links):\n print(\"Must have the same amount off file names than download links\", file=sys.stderr)\n return None\n\n resources = []\n\n #Creating the resource dict\n for i in range(len(self.filenames)):\n resources.append(\n {\n \"id\": self.ids[i],\n \"description\":\"\",\n \"filename\":self.filenames[i],\n \"download_link\":self.download_links[i]\n }\n )\n\n\n #The JSON\n data = {\n \"dataset\":{\n \"project\":self.project,\n \"version\":self.version,\n \"description\":self.description,\n \"project_link\":self.project_link,\n \"data_path\": self.data_path,\n \"metadata\": self.metadata,\n \"files_type\":self.file_type,\n \"protocole\":self.protocole,\n \"resources\":resources,\n \"data_representation\":self.data_representation\n }\n }\n with open(self.dataset_path, \"w\") as json_file:\n json_file.write(json.dumps(data))",
"def asset(atype, aname):\n if atype not in ('css', 'js'):\n raise template.TemplateSyntaxError('Type can only be one of css or js.')\n\n if aname not in ASSETS[atype]:\n raise ValueError('Invalid asset: %r' % aname)\n\n meta = ASSETS[atype][aname]\n\n return {\n 'USE_MINIFIED': USE_MINIFIED,\n 'type': atype,\n 'asset': aname,\n 'meta': meta,\n }",
"def assets_publish(ctx, metadata, brizo, price, service_endpoint, timeout):\n from .api.assets import create\n response = create(metadata,\n secret_store=not brizo,\n price=price,\n service_endpoint=service_endpoint,\n timeout=timeout,\n ocean=ctx.obj['ocean'])\n echo(response)",
"def asset_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n box = layout.box()\r\n act_obj = context.active_object\r\n obj_list = [obj for obj in context.scene.objects if obj.select]\r\n thumbnails_path = get_directory('icons')\r\n is_subsurf = False\r\n view = context.space_data\r\n fx_settings = view.fx_settings\r\n ssao_settings = fx_settings.ssao\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n thumb_list = [thumb.rsplit(\".\", 1)[0] for thumb in listdir(thumbnails_path) if thumb.endswith(extentions)]\r\n \r\n if len(obj_list) >= 2:\r\n asset_name = AM.group_name\r\n \r\n else:\r\n asset_name = act_obj.name\r\n if act_obj.modifiers:\r\n for mod in act_obj.modifiers:\r\n if mod.type == 'SUBSURF':\r\n is_subsurf = True\r\n \r\n if asset_name not in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace':\r\n if asset_name in thumb_list and AM.replace_rename == 'replace':\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if multi_object:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\") \r\n \r\n else:\r\n if len(obj_list) >= 2:\r\n row = box.row()\r\n box.label(\"Choose the asset name\")\r\n box.prop(AM, \"group_name\", text = \"\")\r\n \r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"Name\")\r\n \r\n row = box.row(align = True)\r\n row.prop(AM, \"render_type\", text = \" \", expand = True)\r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n \r\n # ---------------------- # \r\n # RENNDER THUMBNAIL #\r\n # ---------------------- #\r\n \r\n if AM.render_type == 'render':\r\n if len(obj_list) == 1 and not is_subsurf:\r\n box.prop(AM, \"add_subsurf\", text = \"Subsurf\")\r\n box.prop(AM, \"add_smooth\", text = \"Smooth\") \r\n \r\n box.prop(AM, \"material_render\", text=\"Addon material\")\r\n \r\n # --------------------- # \r\n # OPENGL THUMBNAIL #\r\n # --------------------- #\r\n \r\n elif AM.render_type == 'opengl':\r\n row = box.row(align=True)\r\n row.operator(\"object.setup_ogl_render\", text=\"Setup OGL render\" if not \"AM_OGL_Camera\" in [obj.name for obj in context.scene.objects] else \"View camera\", icon='ZOOMIN')\r\n row.operator(\"object.remove_ogl_render\", text=\"\", icon='ZOOMOUT')\r\n row = layout.column()\r\n row = box.row(align=True) \r\n row.label(\"Background:\")\r\n row.prop(AM, \"background_alpha\", text=\"\")\r\n row = box.row(align=True)\r\n row.prop(view, \"show_only_render\")\r\n row = box.row(align=True)\r\n row.prop(view, \"use_matcap\")\r\n if view.use_matcap :\r\n row.prop(AM, \"matcap_options\", text=\"\", icon='TRIA_UP' if AM.matcap_options else 'TRIA_DOWN') \r\n if AM.matcap_options:\r\n row = box.row(align=True)\r\n row.template_icon_view(view, \"matcap_icon\")\r\n row = box.row(align=True)\r\n row.prop(fx_settings, \"use_ssao\", text=\"Ambient Occlusion\")\r\n if fx_settings.use_ssao:\r\n row.prop(AM, \"ao_options\", text=\"\", icon='TRIA_UP' if AM.ao_options else 'TRIA_DOWN') \r\n if AM.ao_options:\r\n subcol = box.column(align=True)\r\n subcol.prop(ssao_settings, \"factor\")\r\n subcol.prop(ssao_settings, \"distance_max\")\r\n subcol.prop(ssao_settings, \"attenuation\")\r\n subcol.prop(ssao_settings, \"samples\")\r\n subcol.prop(ssao_settings, \"color\")\r\n \r\n # -------------------- # \r\n # IMAGE THUMBNAIL #\r\n # -------------------- #\r\n \r\n elif AM.render_type == 'image':\r\n row = box.row(align=True)\r\n row.prop(AM, \"image_type\", text=\" \", expand=True)\r\n if AM.image_type == 'disk':\r\n box.label(\"Choose your thumbnail\")\r\n box.prop(AM, \"custom_thumbnail_path\", text=\"\")\r\n else:\r\n box.prop_search(AM, \"render_name\", bpy.data, \"images\", text=\"\") \r\n \r\n row = box.row(align=True)\r\n if len(obj_list) == 1:\r\n if (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n else:\r\n if AM.group_name and (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n \r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if len(obj_list) >= 2:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\")\r\n row = box.row()\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')",
"def create(self, name, image, command, **kwargs):\n return",
"def publish_asset(\n self,\n *,\n asset_id: str,\n asset_manifest_path: str,\n asset_selector: str,\n asset_type: \"AssetType\",\n ) -> None:\n ...",
"def __init__(self, url, title, media_type, icon_src, duration, c_type):\n self.uuid = str(uuid.uuid4()).replace(\"-\", \"\")\n if self.uuid[0].isdigit():\n self.uuid = \"x\" + self.uuid\n self.url = url\n self.title = title\n self.media_type = media_type\n self.icon_src = icon_src\n self.duration = duration\n self.content_type = c_type",
"def create(self):",
"def create_resource(self, **kwargs):\n results = self.api.action.resource_create(**kwargs)\n # TODO: use `results` rather than re-download, using an isolation layer to standardize the re-structure\n self.get_ckan_metadata(True)\n if 'id' in results:\n self._import_resource_to_cache(kwargs['upload'], results['id'])\n return results",
"def create(*args):",
"def __init__(\n self,\n hass: HomeAssistant,\n unique_id: str,\n name: str,\n content_type: str,\n image: str,\n ) -> None:\n super().__init__(hass)\n self._attr_content_type = content_type\n self._attr_name = name\n self._attr_unique_id = unique_id\n self._image_filename = image",
"def _create(self, **attributes: Dict[str, object]) -> str:\n pass",
"def store_asset(self, asset, type_, layers, uid_prefix):\n logger.debug('Saving: %s' % asset)\n ci = cdb.CI()\n ci.uid = '%s-%s' % (uid_prefix, asset.id)\n ci.content_object = asset\n ci.type_id = type_\n try:\n # new CI\n ci.save()\n ci.layers = layers\n except IntegrityError:\n # Integrity error - existing CI Already in database.\n # Get CI by uid, and use it for saving data.\n ci = cdb.CI.get_by_content_object(asset)\n ci.name = '%s' % asset.name or unicode(asset)\n if 'barcode' in asset.__dict__.keys():\n ci.barcode = asset.barcode\n if isinstance(asset, db.Device):\n active = not asset.deleted\n else:\n active = True\n ci.state = (\n cdb.CI_STATE_TYPES.ACTIVE if active\n else cdb.CI_STATE_TYPES.INACTIVE\n )\n ci.save()\n return ci"
] | [
"0.671682",
"0.64737254",
"0.6421837",
"0.62232536",
"0.6000631",
"0.59809226",
"0.5915411",
"0.5874975",
"0.5862183",
"0.583357",
"0.580872",
"0.57912344",
"0.5771107",
"0.5771107",
"0.57580495",
"0.56909776",
"0.56909263",
"0.5686929",
"0.5659839",
"0.56492126",
"0.56392914",
"0.5617473",
"0.5613298",
"0.55873895",
"0.55189615",
"0.55093",
"0.5491087",
"0.5488998",
"0.54750484",
"0.5437548"
] | 0.6872285 | 0 |
Update asset, needs ID, title, label and url | def update(self) -> requests.request:
# Check if id is set
if self.args.id is None:
raise Exception('Provide id of asset you want to update')
# Check URL validity
if self.args.url is not None and self.check_url_invalidity():
raise Exception('Provided URL is not valid')
# Send PUT request
return requests.put(
self.REQUEST_URL + str(self.args.id),
{'title': self.args.title, 'label': self.args.label, 'url': self.args.url}
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_asset(cls, id, asset_data):\n\n return ph_base._update_record('asset', id, asset_data)",
"def test_update_asset(self):\n pass",
"def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string",
"def test_update_asset_content(self):\n pass",
"def update(self, instance, validated_data):\n instance.asset_name = validated_data.get('asset_name', instance.asset_name)\n instance.asset_type = validated_data.get('asset_type', instance.asset_type)\n instance.asset_class = validated_data.get('asset_class', instance.asset_class)\n instance.save()\n return instance",
"def test_update_test_asset(self):\n pass",
"def _update_asset(request, course_key, asset_key):\r\n if request.method == 'DELETE':\r\n # Make sure the item to delete actually exists.\r\n try:\r\n content = contentstore().find(asset_key)\r\n except NotFoundError:\r\n return JsonResponse(status=404)\r\n\r\n # ok, save the content into the trashcan\r\n contentstore('trashcan').save(content)\r\n\r\n # see if there is a thumbnail as well, if so move that as well\r\n if content.thumbnail_location is not None:\r\n # We are ignoring the value of the thumbnail_location-- we only care whether\r\n # or not a thumbnail has been stored, and we can now easily create the correct path.\r\n thumbnail_location = course_key.make_asset_key('thumbnail', asset_key.name)\r\n try:\r\n thumbnail_content = contentstore().find(thumbnail_location)\r\n contentstore('trashcan').save(thumbnail_content)\r\n # hard delete thumbnail from origin\r\n contentstore().delete(thumbnail_content.get_id())\r\n # remove from any caching\r\n del_cached_content(thumbnail_location)\r\n except:\r\n logging.warning('Could not delete thumbnail: %s', thumbnail_location)\r\n\r\n # delete the original\r\n contentstore().delete(content.get_id())\r\n # remove from cache\r\n del_cached_content(content.location)\r\n return JsonResponse()\r\n\r\n elif request.method in ('PUT', 'POST'):\r\n if 'file' in request.FILES:\r\n return _upload_asset(request, course_key)\r\n else:\r\n # Update existing asset\r\n try:\r\n modified_asset = json.loads(request.body)\r\n except ValueError:\r\n return HttpResponseBadRequest()\r\n contentstore().set_attr(asset_key, 'locked', modified_asset['locked'])\r\n # Delete the asset from the cache so we check the lock status the next time it is requested.\r\n del_cached_content(asset_key)\r\n return JsonResponse(modified_asset, status=201)",
"def post_asset_update(lock, course):\r\n upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)\r\n asset_location = course.id.make_asset_key('asset', 'sample_static.txt')\r\n url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})\r\n\r\n resp = self.client.post(\r\n url,\r\n json.dumps(assets._get_asset_json(\"sample_static.txt\", upload_date, asset_location, None, lock)),\r\n \"application/json\"\r\n )\r\n self.assertEqual(resp.status_code, 201)\r\n return json.loads(resp.content)",
"def test_update_test_asset_content(self):\n pass",
"def test_save_asset_data():\n\n inventory_ = copy.deepcopy(self._inventory)\n\n asset = inventory_[\"assets\"][0]\n asset.update({\n \"key\": \"value\"\n })\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n asset = io.find_one({\"type\": \"asset\", \"name\": asset[\"name\"]})\n print(asset)\n assert_equals(asset[\"data\"][\"key\"], \"value\")",
"def update_object(self, oid, name, url):\n r = self.request(\n 'put',\n safeformat('registry/objects/{:int}/', oid),\n json.dumps({\n 'description': {\n 'name': name,\n 'url': url\n }\n })\n )\n return self._extract_id_from_batch_response(r, 'oid')",
"def test_edit_asset_type(self):\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n get_asset.asset_code = \"IC003\"\n get_asset.save()\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC003\")\n self.assertEqual(get_asset.asset_code, \"IC003\")",
"def testUpdatePhoto(self):\n photo_id = self._UploadEpisodeWithPhoto()\n\n self._tester.UpdatePhoto(self._cookie, photo_id, caption='An Updated Caption',\n placemark={'iso_country_code': 'US', 'country': 'United States',\n 'state': 'NY', 'locality': 'New York', 'sublocality': 'NoHo',\n 'thoroughfare': 'Broadway', 'subthoroughfare': '682'})",
"def test_update_software_asset_content(self):\n pass",
"def asset(self, asset):\n\n self._asset = asset",
"def put(id: int):\r\n parser = reqparse.RequestParser()\r\n parser.add_argument(\"title\", type=str)\r\n args = parser.parse_args()\r\n if args:\r\n filename = Path(__file__).parent / \"recipe-data.csv\"\r\n files = import_file.Files()\r\n recipe_load = files.import_from_csv(filename)\r\n recipes = Recipes(recipe_load)\r\n a_recipe = recipes.update_recipe(id, args)\r\n files.export_to_csv(recipes, filename)\r\n return jsonify(a_recipe)\r\n else:\r\n return abort(404)",
"def test_update_asset_state(self):\n pass",
"def update_metadata(sess, asset_link):\n get_json = sess.get(asset_link).json()\n update_metadata = get_json['customFields']\n update_metadata['SANDAG Last Access Date'] = datetime.datetime.today().strftime('%D')\n\n sess.patch(asset_link, \n data=json.dumps(update_metadata)).json()",
"def test_update_software_asset(self):\n pass",
"def update(self, src, labels): # real signature unknown; restored from __doc__\n pass",
"def update_resource(self, **kwargs):\n logging.warning('Updating a resource removes all existing data. '\n 'If you wish to keep the existing data, use `CachedCKAN.patch_resource`.')\n results = self.api.action.resource_update(**kwargs)\n self.get_ckan_metadata(True)\n if 'upload' in kwargs:\n resource_id = results['id'] if 'id' in results else kwargs['id']\n self._import_resource_to_cache(kwargs['upload'], resource_id)\n return results",
"def test_update_system_asset(self):\n pass",
"def update(self, title=None, description=None):\n url = (\"https://api.imgur.com/3/image/\"\n \"{0}\".format(self._delete_or_id_hash))\n is_updated = self._imgur._send_request(url, params=locals(),\n method='POST')\n if is_updated:\n self.title = title or self.title\n self.description = description or self.description\n return is_updated",
"def update(self, instance, validated_data):\n instance.href = validated_data.get('href', instance.href)\n instance.title = validated_data.get('title', instance.title)\n instance.datetime = validated_data.get('datetime', instance.datetime)\n instance.content = validated_data.get('content', instance.content)\n instance.coverimg = validated_data.get('coverimg', instance.title)\n instance.save()\n return instance",
"def update(self, instance, validated_data):\n instance.cat_name = validated_data.get('cat_name', instance.cat_name)\n instance.img = validated_data.get('img', instance.img)\n instance.desc = validated_data.get('desc', instance.desc)\n instance.save()\n return instance",
"def update_item(id: str, obj: endpoint_model):\n # should this error if exists?\n if obj.id:\n if obj.id != id:\n raise HTTPException(status_code=400, detail=\"id in body does not match id in path\")\n else:\n obj.id = id\n new_obj = db.save(obj)\n return new_obj",
"def put(self,id):\r\n data = request.json\r\n return update(id=id,data=data)",
"def update_object(self, name: str) -> None:",
"def put(self, request, pk, format=None):\n graph = self.get_obj(pk)\n serializer = LoadGraphSerializer(graph, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def update(self, resource, id, **data):\n self.request('/' + resource + '/' + str(id), 'PUT', body=urllib.urlencode(data))\n return True"
] | [
"0.73195666",
"0.6679826",
"0.6523849",
"0.6501261",
"0.6478999",
"0.6240539",
"0.6233769",
"0.61489034",
"0.6104539",
"0.60541105",
"0.59750587",
"0.58513236",
"0.5828668",
"0.580936",
"0.5783961",
"0.5755456",
"0.5726288",
"0.571024",
"0.56786734",
"0.5664309",
"0.56492716",
"0.5624746",
"0.5621808",
"0.5610838",
"0.5584892",
"0.55817485",
"0.55547315",
"0.55406463",
"0.5536661",
"0.55253613"
] | 0.73020154 | 1 |
Delete asset, needs ID | def delete(self) -> requests.request:
# Check if id is set
if self.args.id is None:
raise Exception('Provide id of asset you want to delete')
# Send DELETE request
return requests.delete(self.REQUEST_URL + str(self.args.id)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_asset(self, asset_id, asset_type):\n return self.asset(asset_id, asset_type=asset_type, action='DELETE')",
"def test_delete_asset(self):\n pass",
"def delete(self, _id):",
"def delete_url_asset(self, asset_id):\n return self.delete_asset(asset_id, 'URL')",
"def test_delete(self):\n obj = self.provision_single_asset()\n obj_id = obj.id\n self.delete('widget', 200, params={'id': obj_id})\n obj = self.session.query(self.widget_model).filter_by(id=obj_id).first()\n assert obj is None",
"def delete_handle_asset(self, asset_id):\n return self.delete_asset(asset_id, 'HANDLE')",
"def test_delete_system_asset(self):\n pass",
"def delete_asset(location, filename):\r\n try:\r\n content = Transcript.get_asset(location, filename)\r\n contentstore().delete(content.get_id())\r\n log.info(\"Transcript asset %s was removed from store.\", filename)\r\n except NotFoundError:\r\n pass\r\n return StaticContent.compute_location(location.course_key, filename)",
"def delete():",
"def delete(self, req, id):\n context = None\n try:\n db_api.image_destroy(context, id)\n except exception.NotFound:\n return exc.HTTPNotFound()",
"def delete_object(self, id):\n return self.request(\n \"{0}/{1}\".format(self.version, id), method=\"DELETE\"\n )",
"def delete_item(id):\n return '', 201",
"def delete_image(self):\n Image.objects.get(id = self.id).delete()",
"def delete_item(dataobj_id):\n file = get_by_id(dataobj_id)\n remove_from_index(dataobj_id)\n if file:\n Path(file).unlink()",
"def delete(self, cls, id):\n pass",
"def test_delete_asset_type(self):\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n statuses = AssetStatus.objects.filter(asset=get_asset)\n for status in statuses:\n status.delete()\n get_asset.delete()\n self.assertEqual(self.all_assets.count(), 0)",
"def delete_object(self, id):\n self.request(id, post_args={\"method\": \"delete\"})",
"def delete(self, resource_id, file_id):\n d = Deposition.get(resource_id, user=current_user)\n\n # Sort files raise ForbiddenAction if not authorized\n df = d.remove_file(file_id)\n if df is None:\n abort(404, message=\"File does not exist\", status=404)\n df.delete()\n d.save()\n return \"\", 204",
"def delete(self,id):\r\n return delete(id=id)",
"def delete(self, id):\n raise NotImplementedError",
"def delete_amenity_with_id(amenity_id):\n\n data = storage.get(Amenity, amenity_id)\n if data is None:\n abort(404)\n storage.delete(data)\n storage.save()\n return jsonify({}), 200",
"def delete(self, resource, id):\n self.request('/' + resource + '/' + str(id), 'DELETE')\n return True",
"def delete_phone_asset(self, asset_id):\n return self.delete_asset(asset_id, 'PHONE')",
"def delete(self, req, id):\n context = req.environ['nova.context']\n self._image_service.delete(context, id)\n return webob.exc.HTTPNoContent()",
"def delete_item(id: str):\n db.delete(id, kind=endpoint_model)\n return {\"result\": \"ok\"}",
"def delete(self, id):\r\n try:\r\n self.valid_args()\r\n inst = db.session.query(self.__class__).get(id)\r\n if inst is None:\r\n raise NotFound\r\n getattr(require, self.__class__.__name__.lower()).delete(inst)\r\n db.session.delete(inst)\r\n db.session.commit()\r\n self._refresh_cache(inst)\r\n return '', 204\r\n except Exception as e:\r\n return error.format_exception(\r\n e,\r\n target=self.__class__.__name__.lower(),\r\n action='DELETE')",
"def delete(self, id):\n delete_entry(id)\n return None, 204",
"def delete(self, id):\n\n ns.abort(404, 'This API is not supported yet.')",
"def delete(self, *args, **kwargs):\n pass",
"def delete(self, *args, **kwargs):\n pass"
] | [
"0.79241407",
"0.7676571",
"0.7436167",
"0.7214911",
"0.7157785",
"0.7132754",
"0.6974048",
"0.6962282",
"0.69503105",
"0.6911497",
"0.68340725",
"0.6808648",
"0.6807498",
"0.6734712",
"0.6727808",
"0.67007345",
"0.669589",
"0.6653962",
"0.6645524",
"0.6641693",
"0.6624981",
"0.6609732",
"0.6608589",
"0.66009825",
"0.65647197",
"0.65237814",
"0.65211815",
"0.65134895",
"0.6497846",
"0.6497846"
] | 0.7802429 | 1 |
Returns True if URL is invalid, False if it is not | def check_url_invalidity(self) -> bool:
validate = URLValidator()
try:
validate(self.args.url)
return False
except ValidationError:
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_url(value):\n\n valid = validators.url(value)\n if valid != True:\n return False",
"def is_valid_url(url: str) -> bool:\n try:\n requests.get(url)\n except requests.exceptions.RequestException:\n return False\n return True",
"def check_url(value):\n\n valid = validators.url(value)\n if valid is not True:\n return False",
"def check_url(url=None, parse_url=None):\n return False",
"def isUrlValid(self, url):\n if url is None:\n return False\n elif url.startswith('//'):\n return False\n elif ':' in url:\n return False\n elif url.startswith('/wiki'):\n return True\n elif 'en.wikipedia.org/wiki/' not in url:\n return False\n return True",
"def valid(url):\n return 0 < len(urlparse(url)[1])",
"def validate_url(self):\n pass",
"def _validate_url(url):\n if not url or url.count('/') != 1 or url[0] != '@':\n return False\n return True",
"def isValidURL(self, url):\n if \"imdb.com\" in url:\n return True\n else:\n return False",
"def validateURL(url):\n pattern = re.compile(\"^https*:\\/\\/\\w+(\\.\\w+){2}[\\/A-Za-z\\d\\?\\=]*$\")\n match = pattern.match(url)\n\n return True if match else False",
"def is_valid(url):\n parsed = urlparse(url)\n return bool(parsed.netloc) and bool(parsed.scheme)",
"def is_valid(url):\n parsed = urlparse(url)\n return bool(parsed.netloc) and bool(parsed.scheme)",
"def validate_url(path):\n parsed = urlparse(path)\n return bool(parsed.scheme) and bool(parsed.netloc)",
"def url_validator(url: str) -> bool:\n import re\n regex = re.compile(\n r'^(?:http|ftp)s?://'\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'\n r'localhost|'\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})'\n r'(?::\\d+)?'\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n return re.match(regex, url) is not None",
"def validate_url(attribute_name, url):\n if not url:\n return\n\n try:\n result = urlparse(url=url)\n if [result.scheme, result.netloc, result.path]:\n return True\n except:\n raise ValueError('{attribute_name}: The given string {url} is not a '\n 'valid url.'\n .format(attribute_name=attribute_name, url=url))",
"def url_check(url):\n \n url_tuple = urlparse.urlparse(url)\n if url_tuple[0] == 'http' or url_tuple[0] == 'https' and url_tuple[1] != \"\":\n return url\n else:\n raise Exception('bad url')",
"def is_valid_url(value):\n regex = re.compile(\n r'^(?:http|ftp)s?://'\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'\n r'localhost|'\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}|'\n r'\\[?[A-F0-9]*:[A-F0-9:]+\\]?)'\n r'(?::\\d+)?'\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\n result = regex.match(value)\n return bool(result)",
"def valid_url(self):\r\n if self.resolver:\r\n return True\r\n return False",
"def check_url_and_raise_errors(url: str) -> None:\n if not url:\n raise_error(\"Url can not be empty\", 400)\n\n try:\n URL_REGEX.match(url).span()[1] - URL_REGEX.match(url).span()[0] == len(url)\n except AttributeError:\n raise_error(\"Url should be valid\", 400)",
"def url_is_good(url):\n return website_re.match(url)\n # possible validation of reachability of website\n # http_response = requests.get(url)\n # return http_response < 400:",
"def _is_valid(self, url: ParseResult):\n\n if (\n re.match('(.*).' + self.netloc, url.netloc) is None or\n re.match('(.*)\\+[0-9]*$', url.path) is not None or\n re.match('(.*)javascript:(.*)', url.path) is not None\n ):\n return False\n\n return True",
"def is_url_valid(self, url: str) -> bool:\n if self.exclude and re.search(self.exclude, url):\n return False\n\n parts = urllib.parse.urlparse(url)\n\n if parts.scheme not in ('http', 'https'):\n LOGGER.debug(f'skipping non-http scheme in found at {url}')\n return False\n\n host, _ = urllib.parse.splitport(parts.netloc) # type: ignore\n\n if not self.host_okay(host):\n LOGGER.debug(f'skipping non-root host found at {url}')\n return False\n\n return True",
"def validate_url(url):\n\n RE_D = re.compile(r'^(http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/)?[a-z0-9]+([\\-\\.]{1}[a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(\\/.*)?$')\n return bool(RE_D.match(url))",
"def urlValidator(url):\n if 'amazon.com/' not in url:\n print('ERROR: Please enter a valid amazon.com URL. (ERROR 1)')\n else:\n validURL = url\n if 'Amazon.com/' not in url:\n print('ERROR: Please enter a valid amazon.com URL. (ERROR 2)')\n else:\n validURL = url\n\n return validURL",
"def _validate_url(url):\n if urlparse.urlparse(url).scheme not in VALID_SCHEMES:\n _fail(url, \"Invalid URL\")",
"def __isUrl(self, url):\n if type(url)==str:\n return url.startswith('http://') or url.startswith('https://')\n return False",
"def _is_valid_url(url):\n regex = re.compile(\n r'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?'\n r'|[A-Z0-9-]{2,}\\.?)|' # domain...\n r'localhost|' # localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}|' # ...or ipv4\n r'\\[?[A-F0-9]*:[A-F0-9:]+\\]?)' # ...or ipv6\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n if regex.search(url):\n return True\n return False",
"def is_valid_url(url: str) -> bool:\n if not url:\n return False\n\n regex = (\"((http|https)://)(www.)?\" +\n \"[a-zA-Z0-9@:%._\\\\+~#?&//=]\" +\n \"{2,256}\\\\.[a-z]\" +\n \"{2,6}\\\\b([-a-zA-Z0-9@:%\" +\n \"._\\\\+~#?&//=]*)\")\n p = re.compile(regex)\n return True if re.search(p, url) else False",
"def is_valid_url(self, url):\n output = self.api.download_is_valid_url(url, non_blocking=False)\n error = ''\n if not output:\n error = 'Invalid url'\n return output, error",
"def validate_url(url_in):\n if url_in == \"\":\n error = \"[ERROR] Input is empty\"\n return False\n elif not url_in.startswith(\"https://\"):\n error = \"[ERROR] Input doesn't start with https://\"\n return False\n elif not url_in.startswith(\"https://github.com/\"):\n error = \"[ERROR] Input is not a GitHub URL\"\n return False\n else:\n error = \"[INFO] Input is a valid URL\"\n return True"
] | [
"0.830075",
"0.8289973",
"0.82301325",
"0.82217",
"0.81955594",
"0.8174947",
"0.8111032",
"0.81069154",
"0.81027186",
"0.80677193",
"0.8043112",
"0.8043112",
"0.80158013",
"0.79966223",
"0.79324365",
"0.78702646",
"0.78689444",
"0.7865635",
"0.7850163",
"0.7827034",
"0.78099304",
"0.7809625",
"0.779003",
"0.7771997",
"0.7757441",
"0.77523106",
"0.7748918",
"0.77206296",
"0.7715752",
"0.76990604"
] | 0.8617458 | 0 |
Decorator that lifts an unary predicate into a Predicate. | def predicate(f):
wrapper = Predicate(f)
update_wrapper(wrapper, f)
return wrapper | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def predicate (self, X, * args, ** kw) :\n self.lhs = self.lhs.predicate (X, * args, ** kw)\n return self",
"def visit_unbound_predicate(self, predicate) -> T:",
"def _generate_unary_deferer(op_func):\n\n def deferer(self, *args, **kwargs):\n return type(self)._defer_unary_elementwise(\n self, op_func, *args, **kwargs\n )\n\n return deferer",
"def unary_operator(op):\n # Only negate is currently supported for all our possible input types.\n valid_ops = {'-'}\n if op not in valid_ops:\n raise ValueError(\"Invalid unary operator %s.\" % op)\n\n def unary_operator(self):\n # This can't be hoisted up a scope because the types returned by\n # unary_op_return_type aren't defined when the top-level function is\n # invoked.\n if isinstance(self, NumericalExpression):\n return NumExprFactor(\n \"{op}({expr})\".format(op=op, expr=self._expr),\n self.inputs,\n )\n else:\n return NumExprFactor(\"{op}x_0\".format(op=op), (self,))\n\n unary_operator.__doc__ = \"Unary Operator: '%s'\" % op\n return unary_operator",
"def deco_unary_ufunc(torch_func):\n\n @normalizer\n def wrapped(\n x: ArrayLike,\n /,\n out: Optional[OutArray] = None,\n *,\n where=True,\n casting: Optional[CastingModes] = \"same_kind\",\n order=\"K\",\n dtype: Optional[DTypeLike] = None,\n subok: NotImplementedType = False,\n signature=None,\n extobj=None,\n ):\n if dtype is not None:\n x = _util.typecast_tensor(x, dtype, casting)\n\n if torch_func.__name__ in _fp_unary:\n x = _util.cast_int_to_float(x)\n\n result = torch_func(x)\n result = _ufunc_postprocess(result, out, casting)\n return result\n\n wrapped.__qualname__ = torch_func.__name__\n wrapped.__name__ = torch_func.__name__\n\n return wrapped",
"def unary_wrap(run):\n\n def run_unary(transitions, input, steps):\n return run(transitions, '1' * input, steps)\n\n return run_unary",
"def _build_unary_op(op):\n def unary_op(self):\n \"\"\"`plist` unary operation; applied element-wise to `self`.\n\n `unary_op` is not callable directly from `plist`. It implements the various\n python unary operations: `-`, `~`, `abs`, etc. The unary operators\n can be called directly with their corresponding 'magic' functions,\n `plist.__neg__`, `plist.__invert__`, `plist.__abs__`, etc., but are generally just\n called implicitly.\n\n Examples:\n ```python\n foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])\n (foos.bar == 0).baz = 3 + (foos.bar == 0).foo\n (foos.bar == 1).baz = 6\n\n assert ((-foos.foo).aslist() ==\n [0, -1, -2])\n assert ((~foos.foo).aslist() ==\n [-1, -2, -3])\n\n by_bar = foos.bar.groupby()\n\n assert ((-by_bar.foo).aslist() ==\n [[0, -2], [-1]])\n assert ((~by_bar.foo).aslist() ==\n [[-1, -3], [-2]])\n ```\n\n Returns:\n A new `plist`, where each element of `self` had the operation passed to\n `_build_unary_op` applied to it.\n \"\"\"\n return plist([op(x) for x in self], root=self.__root__)\n\n return unary_op",
"def filter(pred):\n def _filter_xducer(step):\n def _filter_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n return step(r, x) if pred(x) else r\n return _filter_step\n return _filter_xducer",
"def change_predicate(self, new_predicate):\n raise NotImplementedError",
"def predicate (self, X, * args, ** kw) :\n XR = X.REF (X.ETW, _polymorphic_x = self._polymorphic_x)\n self.predicates = list \\\n (p.predicate (XR, * args, ** kw) for p in self.predicates)\n return self",
"def remove_if(ary, predicate, extra_args=[], preamble=\"\", queue=None, wait_for=None):\n return copy_if(ary, \"!(%s)\" % predicate, extra_args=extra_args,\n preamble=preamble, queue=queue, wait_for=wait_for)",
"def addpredicate(self, pred):\n self._preds.append(pred)",
"def unary(op, v):\n if op == \"+\":\n return v\n if op == \"-\":\n return -v\n if op.lower() == \"not\":\n return not(v)\n raise Exception(\"unary op not implemented\")",
"def predicated_path_hook(cls, predicate, *a, **kw):\n def predicated_path_hook_for_FileFinder(path):\n \"\"\"path hook for FileFinder\"\"\"\n if not os.path.isdir(path):\n raise ImportError(\"only directories are supported\")\n if not predicate(path):\n raise ImportError(\"predicate not satisfied\")\n return cls(path, *a, **kw)\n return predicated_path_hook_for_FileFinder",
"def visit_unary(spec):",
"def get_numeric_predicate_bounds(predicate: Predicate) -> ConstructivePredicate:\n unchanged = ConstructivePredicate.unchanged(predicate)\n if (\n isinstance(predicate, partial)\n and len(predicate.args) == 1\n and not predicate.keywords\n ):\n arg = predicate.args[0]\n if (\n (isinstance(arg, Decimal) and Decimal.is_snan(arg))\n or not isinstance(arg, (int, float, Fraction, Decimal))\n or math.isnan(arg)\n ):\n return unchanged\n options = {\n # We're talking about op(arg, x) - the reverse of our usual intuition!\n operator.lt: {\"min_value\": arg, \"exclude_min\": True}, # lambda x: arg < x\n operator.le: {\"min_value\": arg}, # lambda x: arg <= x\n operator.eq: {\"min_value\": arg, \"max_value\": arg}, # lambda x: arg == x\n operator.ge: {\"max_value\": arg}, # lambda x: arg >= x\n operator.gt: {\"max_value\": arg, \"exclude_max\": True}, # lambda x: arg > x\n }\n if predicate.func in options:\n return ConstructivePredicate(options[predicate.func], None)\n\n # This section is a little complicated, but stepping through with comments should\n # help to clarify it. We start by finding the source code for our predicate and\n # parsing it to an abstract syntax tree; if this fails for any reason we bail out\n # and fall back to standard rejection sampling (a running theme).\n try:\n if predicate.__name__ == \"<lambda>\":\n source = extract_lambda_source(predicate)\n else:\n source = inspect.getsource(predicate)\n tree: ast.AST = ast.parse(source)\n except Exception:\n return unchanged\n\n # Dig down to the relevant subtree - our tree is probably a Module containing\n # either a FunctionDef, or an Expr which in turn contains a lambda definition.\n while isinstance(tree, ast.Module) and len(tree.body) == 1:\n tree = tree.body[0]\n while isinstance(tree, ast.Expr):\n tree = tree.value\n\n if isinstance(tree, ast.Lambda) and len(tree.args.args) == 1:\n return numeric_bounds_from_ast(tree.body, tree.args.args[0].arg, unchanged)\n elif isinstance(tree, ast.FunctionDef) and len(tree.args.args) == 1:\n if len(tree.body) != 1 or not isinstance(tree.body[0], ast.Return):\n # If the body of the function is anything but `return <expr>`,\n # i.e. as simple as a lambda, we can't process it (yet).\n return unchanged\n argname = tree.args.args[0].arg\n body = tree.body[0].value\n assert isinstance(body, ast.AST)\n return numeric_bounds_from_ast(body, argname, unchanged)\n return unchanged",
"def is_unary(*args):\n return _ida_hexrays.is_unary(*args)",
"def keep(self, predicate=None):\n self.__ff.append(\n lambda c: filter(\n self._as_callable(predicate) if (predicate is not None) else None,\n c\n )\n )\n return self",
"def filter(self, predicate):\n def _filter(iterator):\n while True:\n item = next(iterator)\n if predicate(item):\n return item\n return self.__class__(self, _filter)",
"def visit_UnaryOp(self, node):\n self.generic_visit(node)\n if isinstance(node.operand, ast.Num):\n # Don't transform negations of numeric literals. Just treat them\n # as literals.\n return node\n return to_call(self.op_to_function(node.op), [node.operand])",
"def unary_op(self):\n return plist([op(x) for x in self], root=self.__root__)",
"def starfilter(\n predicate: Callable[..., bool]\n) -> Callable[[AsyncObservable[Any]], AsyncObservable[Any]]:\n\n def handler(\n next: Callable[[Iterable[Any]], Awaitable[None]], args: Iterable[Any]\n ) -> Awaitable[None]:\n if predicate(*args):\n return next(args)\n return aiotools.empty()\n\n return transform(handler)",
"def filterfalse(iterable, predicate):\n for x in iterable:\n if not predicate(x):\n yield x",
"def __parse_predicate(self, predicate):\n try:\n position = int(predicate)\n if self.axis == AXIS_DESCENDANT:\n return PredicateFilter('position', value=position)\n else:\n # use the search limit feature instead of a checker\n self.soup_args['limit'] = position\n self.index = position - 1\n return None\n except ValueError:\n pass\n\n if predicate == \"last()\":\n self.index = -1\n return None\n\n negate = self._re_predicate_not.match(predicate)\n if negate:\n predicate = negate.group(1)\n\n function_match = self._re_predicate_function.match(predicate)\n if function_match:\n name = function_match.group(1)\n arguments = function_match.group(2)\n value = function_match.group(4)\n if value is not None:\n value = function_match.group(5)\n return PredicateFilter(name, arguments, value)\n\n axis_match = self._re_predicate_axis.match(predicate)\n if axis_match:\n axis = axis_match.group(1)\n if axis is None:\n axis = AXIS_CHILD\n elif axis == '@':\n axis = AXIS_ATTRIBUTE\n if axis == AXIS_ATTRIBUTE:\n # use the attribute search feature instead of a checker\n attribute_name = axis_match.group(3)\n if axis_match.group(5) is not None:\n attribute_value = axis_match.group(6)\n elif not negate:\n attribute_value = True\n else:\n attribute_value = None\n if not self.soup_args.has_key('attrs'):\n self.soup_args['attrs'] = {}\n self.soup_args['attrs'][attribute_name] = attribute_value\n return None\n elif axis == AXIS_CHILD:\n node_test = axis_match.group(3)\n node_value = axis_match.group(6)\n return PredicateFilter('axis', node_test, value=node_value,\n negate=negate)\n\n raise NotImplementedError(\"This predicate is not implemented\")",
"def _(obj: UnboundPredicate, visitor: BooleanExpressionVisitor[T]) -> T:\n return visitor.visit_unbound_predicate(predicate=obj)",
"def lift(f):\n @wraps(f)\n def inner(value):\n result = f(value)\n return SuperBool(result, f.__doc__) if not isinstance(result, SuperBool) else result\n return inner",
"def remove(pred):\n def _remove_xducer(step):\n def _remove_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n return step(r, x) if not pred(x) else r\n return _remove_step\n return _remove_xducer",
"def unary_op(node_factory_function: Callable) -> Callable:\n\n @wraps(node_factory_function)\n def wrapper(input_value: NodeInput, *args: Any, **kwargs: Any) -> Node:\n input_node = as_node(input_value)\n node = node_factory_function(input_node, *args, **kwargs)\n node = _set_node_friendly_name(node, **kwargs)\n return node\n\n return wrapper",
"def finite_unary_rule(spec):\n\n uspec = spec.copy()\n uspec[\"form\"] = \"finite\"\n \n ret = unary_rule(uspec)\n\n ret[\"mother\"][\"syn\"][\"tense\"] = spec[\"tense\"]\n ret[\"mother\"][\"syn\"][\"agr\"] = spec[\"agr\"]\n\n return ret",
"def activate_predicate(self):\n pass"
] | [
"0.61001146",
"0.59637415",
"0.58849007",
"0.58419997",
"0.5755853",
"0.5751772",
"0.5665722",
"0.5661092",
"0.5583315",
"0.55070066",
"0.5454716",
"0.5451329",
"0.53772974",
"0.5363552",
"0.53583723",
"0.53360695",
"0.5334606",
"0.531459",
"0.52997607",
"0.52853334",
"0.5235893",
"0.5232017",
"0.5227021",
"0.5174233",
"0.5171496",
"0.5088681",
"0.5078662",
"0.5026197",
"0.5022496",
"0.49737477"
] | 0.6924056 | 0 |
Test the transaction_for_doi method | def test_get_transaction_for_doi(self):
# Submit a reserve, then use the assigned doi to get the transaction record
reserve_kwargs = {
"input": join(self.input_dir, "pds4_bundle_with_contributors.xml"),
"node": "img",
"submitter": "my_user@my_node.gov",
"force": True,
}
doi_label = self._reserve_action.run(**reserve_kwargs)
dois, _ = self._web_parser.parse_dois_from_label(doi_label)
doi = dois[0]
transaction_record = self._list_action.transaction_for_doi(doi.doi)
self.assertIsInstance(transaction_record, dict)
# Make sure the transaction record aligns with the Doi record
self.assertEqual(doi.doi, transaction_record["doi"])
self.assertEqual(doi.pds_identifier, transaction_record["identifier"])
self.assertEqual(doi.status, transaction_record["status"])
self.assertEqual(doi.title, transaction_record["title"])
# Ensure we get an exception when searching for an unknown DOI value
with self.assertRaises(UnknownDoiException):
self._list_action.transaction_for_doi("unknown/doi") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_transaction_for_identifier(self):\n # Submit a reserve, then use the PDS identifier to get the transaction record\n reserve_kwargs = {\n \"input\": join(self.input_dir, \"pds4_bundle_with_contributors.xml\"),\n \"node\": \"img\",\n \"submitter\": \"my_user@my_node.gov\",\n \"force\": True,\n }\n\n doi_label = self._reserve_action.run(**reserve_kwargs)\n\n dois, _ = self._web_parser.parse_dois_from_label(doi_label)\n doi = dois[0]\n\n transaction_record = self._list_action.transaction_for_identifier(doi.pds_identifier)\n\n self.assertIsInstance(transaction_record, dict)\n\n # Make sure the transaction record aligns with the Doi record\n self.assertEqual(doi.doi, transaction_record[\"doi\"])\n self.assertEqual(doi.pds_identifier, transaction_record[\"identifier\"])\n self.assertEqual(doi.status, transaction_record[\"status\"])\n self.assertEqual(doi.title, transaction_record[\"title\"])\n\n # Ensure we get an exception when searching for an unknown ID value\n with self.assertRaises(UnknownIdentifierException):\n self._list_action.transaction_for_identifier(\"urn:unknown_id\")",
"def test_execute_transaction_6(self):\n # Valid\n domain_data1 = test_data_utils.get_trixie_domain_data()\n statement1 = test_db_utils.domain_stmt(domain_data1)\n # Valid\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement2 = test_db_utils.gene_domain_stmt(gene_domain_data)\n # Invalid '%'\n domain_data2 = test_data_utils.get_trixie_domain_data()\n # \"Description\": \"ParB-like nuclease domain\"\n description = domain_data2[\"Description\"]\n description = description.replace(\"nuclease domain\", \"nuclease % domain\")\n domain_data2[\"Description\"] = description\n domain_data2[\"HitID\"] = \"unique_id\"\n statement3 = test_db_utils.domain_stmt(domain_data2)\n # Valid\n statement4 = get_gene_update_statement(1, TRIXIE_GENEID)\n\n statements = [statement1, statement2, statement3, statement4]\n result = find_domains.execute_transaction(self.connection, statements)\n gene_table_results = test_db_utils.get_data(test_db_utils.gene_table_query)\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n domain_status = gene_table_results[0][\"DomainStatus\"]\n with self.subTest():\n self.assertEqual(len(domain_table_results), 2)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)\n with self.subTest():\n self.assertEqual(domain_status, 1)",
"def test_execute_transaction_7(self):\n # Valid\n domain_data1 = test_data_utils.get_trixie_domain_data()\n statement1 = test_db_utils.domain_stmt(domain_data1)\n # Valid\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement2 = test_db_utils.gene_domain_stmt(gene_domain_data)\n # Invalid '% w'\n domain_data2 = test_data_utils.get_trixie_domain_data()\n # \"Description\": \"ParB-like nuclease domain\"\n description = domain_data2[\"Description\"]\n description = description.replace(\"nuclease domain\", \"nuclease % wdomain\")\n domain_data2[\"Description\"] = description\n domain_data2[\"HitID\"] = \"unique_id\"\n statement3 = test_db_utils.domain_stmt(domain_data2)\n # Valid\n statement4 = get_gene_update_statement(1, TRIXIE_GENEID)\n\n statements = [statement1, statement2, statement3, statement4]\n result = find_domains.execute_transaction(self.connection, statements)\n gene_table_results = test_db_utils.get_data(test_db_utils.gene_table_query)\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n domain_status = gene_table_results[0][\"DomainStatus\"]\n with self.subTest():\n self.assertEqual(len(domain_table_results), 2)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)\n with self.subTest():\n self.assertEqual(domain_status, 1)",
"def assertAccessToTransaction(self, transaction_id, code=200, user=\"\"):\n\n res = self.get(f\"/associations/transactions/{transaction_id}/\")\n self.assertEqual(\n res.status_code,\n code,\n msg=f\"User {user} cannot access transaction {transaction_id}.\",\n )",
"def test_execute_transaction_3(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n statement1 = test_db_utils.domain_stmt(domain_data)\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement2 = test_db_utils.gene_domain_stmt(gene_domain_data)\n statements = [statement1, statement2]\n result = find_domains.execute_transaction(self.connection, statements)\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 1)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)",
"def test_pay_documents_on_transaction_fail(self):\n\n transaction = TransactionFactory.create(\n state=Transaction.States.Pending\n )\n transaction.fail()\n transaction.save()\n\n proforma = transaction.proforma\n invoice = transaction.invoice\n\n self.assertNotEqual(proforma.state, proforma.STATES.PAID)\n self.assertNotEqual(invoice.state, invoice.STATES.PAID)",
"def test_trade(self):\n pass",
"def test_get_transaction_details_request(self):\n self.trans_details.get_transaction_details(\n trans_id = 123456,\n )",
"def test_execute_transaction_2(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n statement = test_db_utils.domain_stmt(domain_data)\n statements = [statement]\n result = find_domains.execute_transaction(self.connection, statements)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)",
"def test_submit_iso20022_payment_instruction(self):\n pass",
"def test_execute_transaction_1(self):\n result = find_domains.execute_transaction(self.connection)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 0)\n with self.subTest():\n self.assertEqual(result, 0)",
"def test_ipfs_transaction():\n ipfs_id = b58encode('hello')\n\n transaction = IPFS(ipfs_id)\n transaction.set_type_group(TRANSACTION_TYPE_GROUP.CORE)\n transaction.set_nonce(1)\n transaction.schnorr_sign('testing')\n transaction_dict = transaction.to_dict()\n\n assert transaction_dict['nonce'] == 1\n assert transaction_dict['signature']\n assert transaction_dict['type'] is TRANSACTION_IPFS\n assert transaction_dict['typeGroup'] == 1\n assert transaction_dict['typeGroup'] == TRANSACTION_TYPE_GROUP.CORE.value\n assert transaction_dict['fee'] == 500000000\n assert transaction_dict['asset']['ipfs'] == ipfs_id\n\n transaction.schnorr_verify() # if no exception is raised, it means the transaction is valid",
"def test_retrieve_iso20022_payment_instruction(self):\n pass",
"def test_execute_transaction_4(self):\n domain_data1 = test_data_utils.get_trixie_domain_data()\n test_db_utils.insert_data(DOMAIN, domain_data1)\n domain_table_results1 = test_db_utils.get_data(test_db_utils.domain_table_query)\n # Duplicate HitID\n statement1 = test_db_utils.domain_stmt(domain_data1)\n # Valid\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement2 = test_db_utils.gene_domain_stmt(gene_domain_data)\n # Valid\n statement3 = get_gene_update_statement(1, TRIXIE_GENEID)\n\n statements = [statement1, statement2, statement3]\n result = find_domains.execute_transaction(self.connection, statements)\n gene_table_results = test_db_utils.get_data(test_db_utils.gene_table_query)\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results2 = test_db_utils.get_data(test_db_utils.domain_table_query)\n domain_status = gene_table_results[0][\"DomainStatus\"]\n with self.subTest():\n self.assertEqual(len(domain_table_results1), 1)\n with self.subTest():\n self.assertEqual(len(domain_table_results2), 1)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)\n with self.subTest():\n self.assertEqual(domain_status, 1)",
"def test_get_payments_by_id(self):\n pass",
"def test_qtd(self):\n self.assertEqual(calcular_qtd_acoes_ate_dia_por_ticker(self.investidor, 'BBPO11', datetime.date(2018, 2, 13)), 617)",
"def test_client_tax_information_retrieve(self):\n pass",
"def test_execute_transaction_5(self):\n # Valid\n domain_data1 = test_data_utils.get_trixie_domain_data()\n statement1 = test_db_utils.domain_stmt(domain_data1)\n # Valid\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement2 = test_db_utils.gene_domain_stmt(gene_domain_data)\n # Invalid\n domain_data2 = test_data_utils.get_trixie_domain_data()\n statement3 = test_db_utils.domain_stmt(domain_data2)\n statement3 = statement3.replace(\"HitID\", \"unique_id\")\n statement3 = statement3.replace(\"Name\", \"Name_invalid\")\n # Valid - function should exit before executing this though.\n statement4 = get_gene_update_statement(1, TRIXIE_GENEID)\n\n statements = [statement1, statement2, statement3, statement4]\n result = find_domains.execute_transaction(self.connection, statements)\n gene_table_results = test_db_utils.get_data(test_db_utils.gene_table_query)\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n domain_status = gene_table_results[0][\"DomainStatus\"]\n with self.subTest():\n self.assertEqual(len(domain_table_results), 0)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 0)\n with self.subTest():\n self.assertEqual(result, 1)\n with self.subTest():\n self.assertEqual(domain_status, 0)",
"def test_get_pay_in_details(self):\n pass",
"def testTransactions2(self):\n predicate = u\"metadata:predicate_Îñţér\"\n t1 = data_store.DB.Transaction(u\"metadata:row1Îñţér\", token=self.token)\n t2 = data_store.DB.Transaction(u\"metadata:row2Îñţér\", token=self.token)\n\n # This grabs read locks on these transactions\n t1.Resolve(predicate)\n t2.Resolve(predicate)\n\n # Now this should not raise since t1 and t2 are on different subjects\n t1.Set(predicate, \"1\")\n t1.Commit()\n t2.Set(predicate, \"2\")\n t2.Commit()",
"def test_get_note(self):\n pass",
"async def test_txn_get(self):\n self.stream.preset_response(transaction=Mocks.make_txns('1')[0])\n\n response = await self.get_assert_200('/transactions/1')\n self.stream.assert_valid_request_sent(transaction_id='1')\n\n self.assertNotIn('head', response)\n self.assert_has_valid_link(response, '/transactions/1')\n self.assertIn('data', response)\n self.assert_txns_well_formed(response['data'], '1')",
"def test_saving_and_retriving_transaction(self):\n\n transaction = models.Transaction.objects.create(\n type=\"E\",\n value=\"100.00\",\n user=sample_user()\n )\n\n self.assertEqual(str(transaction), transaction.value)",
"def test_sign_tx_fetchai(self):\n tx_hash = Web3.keccak(text=\"some_bytes\")\n\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SIGNING,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 0},\n ledger_id=self.ledger_id,\n info=self.info,\n signing_payload={\"tx_hash\": tx_hash},\n )\n\n tx_signature = self.decision_maker._sign_tx(tx_message)\n assert tx_signature is not None",
"def assertNoAccessToTransaction(\n self, transaction_id, code=403, codes=None, user=\"\"\n ):\n\n res = self.get(f\"/associations/transactions/{transaction_id}/\")\n\n if codes is not None:\n self.assertIn(\n res.status_code,\n codes,\n msg=f\"User {user} can access transaction {transaction_id}.\",\n )\n else:\n self.assertEqual(\n res.status_code,\n code,\n msg=f\"User {user} can access transaction {transaction_id}.\",\n )",
"def test_handle_transactions(self):\n # =================================================================\n # test: add_transaction doesn't satisfies order (shares left)\n # =================================================================\n\n sell_order = StockOrderWrapper(self.order_5)\n buy_order = StockOrderWrapper(self.order_7)\n share_amount = 3\n share_price = self.order_5.order_price_per_share\n transaction_status = PROCESSED\n\n buy_order.add_transaction(sell_order=sell_order, share_amount=share_amount)\n\n trans_exp_7 = Transaction(buy=self.order_7, sell=self.order_5, share_amount=3,\n share_price=share_price, transaction_status=transaction_status)\n\n trans_real_7 = buy_order.handle_transactions(sell_order)\n\n self.is_equal_transaction(trans_real_7, trans_exp_7)\n self.assertEqual(sell_order.shares_left, 5)\n self.assertEqual(buy_order.shares_left, 7)",
"def test_post_opening_balance_journals(self):\n pass",
"def test_get_uniqueId():\n rep=RentRepository()\n rep.store(\"12\",\"23\",\"1\", \"1\")\n try:\n\n idBook=\"13\"\n idCustomer=\"54\"\n flag=\"1\"\n id=\"1\"\n Validator.get_uniqueId(rep.get_all(),id)\n assert False\n\n except RepositoryExceptionRent as msg:\n assert True",
"def test_execute_transaction_8(self, es_mock):\n stmt_result1 = 0\n type_error1 = False\n # TODO make sure this is set correctly\n value_error1 = False\n\n msg1 = \"empty\"\n mock_result1 = (stmt_result1, type_error1, value_error1, msg1)\n\n stmt_result2 = 0\n type_error2 = False\n # TODO make sure this is set correctly\n value_error2 = False\n\n msg2 = 2 # the function expects this to be a string, so this should\n # break the code and trigger the except block.\n mock_result2 = (stmt_result2, type_error2, value_error2, msg2)\n es_mock.side_effect = [mock_result1, mock_result2]\n # Valid\n domain_data1 = test_data_utils.get_trixie_domain_data()\n statement1 = test_db_utils.domain_stmt(domain_data1)\n # Valid\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement2 = test_db_utils.gene_domain_stmt(gene_domain_data)\n\n statements = [statement1, statement2]\n result = find_domains.execute_transaction(self.connection, statements)\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 0)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 0)\n with self.subTest():\n self.assertEqual(result, 1)\n with self.subTest():\n self.assertEqual(es_mock.call_count, 2)",
"def test_get_insumo(self):"
] | [
"0.7132114",
"0.59276116",
"0.58578324",
"0.5844066",
"0.5840851",
"0.58291525",
"0.57940173",
"0.57708603",
"0.5729419",
"0.5708635",
"0.569995",
"0.5694162",
"0.56870985",
"0.5622052",
"0.5576013",
"0.55612105",
"0.55599636",
"0.5553421",
"0.5456069",
"0.54547375",
"0.53775835",
"0.5359478",
"0.5354079",
"0.5316291",
"0.52702874",
"0.526724",
"0.52652836",
"0.5260112",
"0.5254513",
"0.5240688"
] | 0.8255177 | 0 |
Test the transaction_for_identifier method | def test_get_transaction_for_identifier(self):
# Submit a reserve, then use the PDS identifier to get the transaction record
reserve_kwargs = {
"input": join(self.input_dir, "pds4_bundle_with_contributors.xml"),
"node": "img",
"submitter": "my_user@my_node.gov",
"force": True,
}
doi_label = self._reserve_action.run(**reserve_kwargs)
dois, _ = self._web_parser.parse_dois_from_label(doi_label)
doi = dois[0]
transaction_record = self._list_action.transaction_for_identifier(doi.pds_identifier)
self.assertIsInstance(transaction_record, dict)
# Make sure the transaction record aligns with the Doi record
self.assertEqual(doi.doi, transaction_record["doi"])
self.assertEqual(doi.pds_identifier, transaction_record["identifier"])
self.assertEqual(doi.status, transaction_record["status"])
self.assertEqual(doi.title, transaction_record["title"])
# Ensure we get an exception when searching for an unknown ID value
with self.assertRaises(UnknownIdentifierException):
self._list_action.transaction_for_identifier("urn:unknown_id") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_transaction_details_request(self):\n self.trans_details.get_transaction_details(\n trans_id = 123456,\n )",
"def test_get_uniqueId():\n rep=RentRepository()\n rep.store(\"12\",\"23\",\"1\", \"1\")\n try:\n\n idBook=\"13\"\n idCustomer=\"54\"\n flag=\"1\"\n id=\"1\"\n Validator.get_uniqueId(rep.get_all(),id)\n assert False\n\n except RepositoryExceptionRent as msg:\n assert True",
"def test_companies_company_id_connections_connection_id_data_commerce_transactions_transaction_id_get(self):\n pass",
"def test_execute_transaction_4(self):\n domain_data1 = test_data_utils.get_trixie_domain_data()\n test_db_utils.insert_data(DOMAIN, domain_data1)\n domain_table_results1 = test_db_utils.get_data(test_db_utils.domain_table_query)\n # Duplicate HitID\n statement1 = test_db_utils.domain_stmt(domain_data1)\n # Valid\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement2 = test_db_utils.gene_domain_stmt(gene_domain_data)\n # Valid\n statement3 = get_gene_update_statement(1, TRIXIE_GENEID)\n\n statements = [statement1, statement2, statement3]\n result = find_domains.execute_transaction(self.connection, statements)\n gene_table_results = test_db_utils.get_data(test_db_utils.gene_table_query)\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results2 = test_db_utils.get_data(test_db_utils.domain_table_query)\n domain_status = gene_table_results[0][\"DomainStatus\"]\n with self.subTest():\n self.assertEqual(len(domain_table_results1), 1)\n with self.subTest():\n self.assertEqual(len(domain_table_results2), 1)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)\n with self.subTest():\n self.assertEqual(domain_status, 1)",
"def test_execute_transaction_5(self):\n # Valid\n domain_data1 = test_data_utils.get_trixie_domain_data()\n statement1 = test_db_utils.domain_stmt(domain_data1)\n # Valid\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement2 = test_db_utils.gene_domain_stmt(gene_domain_data)\n # Invalid\n domain_data2 = test_data_utils.get_trixie_domain_data()\n statement3 = test_db_utils.domain_stmt(domain_data2)\n statement3 = statement3.replace(\"HitID\", \"unique_id\")\n statement3 = statement3.replace(\"Name\", \"Name_invalid\")\n # Valid - function should exit before executing this though.\n statement4 = get_gene_update_statement(1, TRIXIE_GENEID)\n\n statements = [statement1, statement2, statement3, statement4]\n result = find_domains.execute_transaction(self.connection, statements)\n gene_table_results = test_db_utils.get_data(test_db_utils.gene_table_query)\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n domain_status = gene_table_results[0][\"DomainStatus\"]\n with self.subTest():\n self.assertEqual(len(domain_table_results), 0)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 0)\n with self.subTest():\n self.assertEqual(result, 1)\n with self.subTest():\n self.assertEqual(domain_status, 0)",
"def test_create_identity(self):\n pass",
"def assertAccessToTransaction(self, transaction_id, code=200, user=\"\"):\n\n res = self.get(f\"/associations/transactions/{transaction_id}/\")\n self.assertEqual(\n res.status_code,\n code,\n msg=f\"User {user} cannot access transaction {transaction_id}.\",\n )",
"def test_get_transaction_for_doi(self):\n # Submit a reserve, then use the assigned doi to get the transaction record\n reserve_kwargs = {\n \"input\": join(self.input_dir, \"pds4_bundle_with_contributors.xml\"),\n \"node\": \"img\",\n \"submitter\": \"my_user@my_node.gov\",\n \"force\": True,\n }\n\n doi_label = self._reserve_action.run(**reserve_kwargs)\n\n dois, _ = self._web_parser.parse_dois_from_label(doi_label)\n doi = dois[0]\n\n transaction_record = self._list_action.transaction_for_doi(doi.doi)\n\n self.assertIsInstance(transaction_record, dict)\n\n # Make sure the transaction record aligns with the Doi record\n self.assertEqual(doi.doi, transaction_record[\"doi\"])\n self.assertEqual(doi.pds_identifier, transaction_record[\"identifier\"])\n self.assertEqual(doi.status, transaction_record[\"status\"])\n self.assertEqual(doi.title, transaction_record[\"title\"])\n\n # Ensure we get an exception when searching for an unknown DOI value\n with self.assertRaises(UnknownDoiException):\n self._list_action.transaction_for_doi(\"unknown/doi\")",
"def test_companies_company_id_data_bank_accounts_account_id_transactions_get(self):\n pass",
"def test_execute_transaction_1(self):\n result = find_domains.execute_transaction(self.connection)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 0)\n with self.subTest():\n self.assertEqual(result, 0)",
"def startTransaction(self) -> int:\n ...",
"def test_execute_transaction_2(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n statement = test_db_utils.domain_stmt(domain_data)\n statements = [statement]\n result = find_domains.execute_transaction(self.connection, statements)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)",
"def test_execute_transaction_7(self):\n # Valid\n domain_data1 = test_data_utils.get_trixie_domain_data()\n statement1 = test_db_utils.domain_stmt(domain_data1)\n # Valid\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement2 = test_db_utils.gene_domain_stmt(gene_domain_data)\n # Invalid '% w'\n domain_data2 = test_data_utils.get_trixie_domain_data()\n # \"Description\": \"ParB-like nuclease domain\"\n description = domain_data2[\"Description\"]\n description = description.replace(\"nuclease domain\", \"nuclease % wdomain\")\n domain_data2[\"Description\"] = description\n domain_data2[\"HitID\"] = \"unique_id\"\n statement3 = test_db_utils.domain_stmt(domain_data2)\n # Valid\n statement4 = get_gene_update_statement(1, TRIXIE_GENEID)\n\n statements = [statement1, statement2, statement3, statement4]\n result = find_domains.execute_transaction(self.connection, statements)\n gene_table_results = test_db_utils.get_data(test_db_utils.gene_table_query)\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n domain_status = gene_table_results[0][\"DomainStatus\"]\n with self.subTest():\n self.assertEqual(len(domain_table_results), 2)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)\n with self.subTest():\n self.assertEqual(domain_status, 1)",
"def test_execute_transaction_6(self):\n # Valid\n domain_data1 = test_data_utils.get_trixie_domain_data()\n statement1 = test_db_utils.domain_stmt(domain_data1)\n # Valid\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement2 = test_db_utils.gene_domain_stmt(gene_domain_data)\n # Invalid '%'\n domain_data2 = test_data_utils.get_trixie_domain_data()\n # \"Description\": \"ParB-like nuclease domain\"\n description = domain_data2[\"Description\"]\n description = description.replace(\"nuclease domain\", \"nuclease % domain\")\n domain_data2[\"Description\"] = description\n domain_data2[\"HitID\"] = \"unique_id\"\n statement3 = test_db_utils.domain_stmt(domain_data2)\n # Valid\n statement4 = get_gene_update_statement(1, TRIXIE_GENEID)\n\n statements = [statement1, statement2, statement3, statement4]\n result = find_domains.execute_transaction(self.connection, statements)\n gene_table_results = test_db_utils.get_data(test_db_utils.gene_table_query)\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n domain_status = gene_table_results[0][\"DomainStatus\"]\n with self.subTest():\n self.assertEqual(len(domain_table_results), 2)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)\n with self.subTest():\n self.assertEqual(domain_status, 1)",
"def test_execute_transaction_3(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n statement1 = test_db_utils.domain_stmt(domain_data)\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement2 = test_db_utils.gene_domain_stmt(gene_domain_data)\n statements = [statement1, statement2]\n result = find_domains.execute_transaction(self.connection, statements)\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 1)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)",
"async def test_transaction_commit(database_url):\n async with Database(database_url) as database:\n async with database.transaction(force_rollback=True):\n async with database.transaction():\n query = notes.insert().values(text=\"example1\", completed=True)\n await database.execute(query)\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 1",
"def test_execute_transaction(data_manager):\n class NotFred(Exception):\n pass\n\n try:\n with data_manager.dal():\n session = current_context.sqlalchemy\n metadata.create_all(session.bind)\n \n session.execute(text('INSERT INTO test (name) VALUES (:name)'), {'name': 'Fred'})\n record = session.execute(text('SELECT * FROM test')).first()\n assert record.name == 'Fred'\n\n # Now error out to remove \"Fred\"\n raise NotFred('Do not like Fred')\n except NotFred:\n pass\n\n with data_manager.dal():\n session = current_context.sqlalchemy\n record = session.execute(text('SELECT * FROM test')).first()\n # Fred should have been rolled back\n assert not record",
"def test_execute_transaction_8(self, es_mock):\n stmt_result1 = 0\n type_error1 = False\n # TODO make sure this is set correctly\n value_error1 = False\n\n msg1 = \"empty\"\n mock_result1 = (stmt_result1, type_error1, value_error1, msg1)\n\n stmt_result2 = 0\n type_error2 = False\n # TODO make sure this is set correctly\n value_error2 = False\n\n msg2 = 2 # the function expects this to be a string, so this should\n # break the code and trigger the except block.\n mock_result2 = (stmt_result2, type_error2, value_error2, msg2)\n es_mock.side_effect = [mock_result1, mock_result2]\n # Valid\n domain_data1 = test_data_utils.get_trixie_domain_data()\n statement1 = test_db_utils.domain_stmt(domain_data1)\n # Valid\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement2 = test_db_utils.gene_domain_stmt(gene_domain_data)\n\n statements = [statement1, statement2]\n result = find_domains.execute_transaction(self.connection, statements)\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 0)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 0)\n with self.subTest():\n self.assertEqual(result, 1)\n with self.subTest():\n self.assertEqual(es_mock.call_count, 2)",
"def transaction(self, uuid):\r\n return tx.Transaction(self, uuid)",
"def is_transaction(self):\n return self._request.has_var(\"_transid\")",
"async def test_transaction_commit_low_level(database_url):\n\n async with Database(database_url) as database:\n async with database.transaction(force_rollback=True):\n transaction = await database.transaction()\n try:\n query = notes.insert().values(text=\"example1\", completed=True)\n await database.execute(query)\n except: # pragma: no cover\n await transaction.rollback()\n else:\n await transaction.commit()\n\n query = notes.select()\n results = await database.fetch_all(query=query)\n assert len(results) == 1",
"def transaction_exists(self, transaction: \"Transaction\") -> bool:\n try:\n super().inspect_transaction(transaction=transaction)\n return True\n except grpc.RpcError as err:\n err: grpc.Call\n if err.code() == grpc.StatusCode.NOT_FOUND:\n return False\n raise err",
"def transaction_failed(self):",
"async def test_transaction_nonce_lock(self):\n\n no_tests = 20\n\n txs = []\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx = decode_transaction(tx)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\n for i in range(11, 10 + no_tests):\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** i)\n self.assertEqual(decode_transaction(tx).nonce, dtx.nonce)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\n\n responses = await asyncio.gather(*(to_asyncio_future(self.fetch(\"/tx\", method=\"POST\", body={\"tx\": tx})) for tx in txs))\n\n ok = 0\n bad = 0\n for resp in responses:\n if resp.code == 200:\n ok += 1\n else:\n bad += 1\n self.assertEqual(ok, 1)\n self.assertEqual(bad, no_tests - 1)",
"def get_transaction(tx):\n global INVOKE_COUNTER\n INVOKE_COUNTER = INVOKE_COUNTER + 1\n if INVOKE_COUNTER % 3 == 0:\n return \"\"\n else:\n raise_connection_error()",
"def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)",
"def test_commited_transaction_fire_task(self):\n\n @transaction.commit_on_success\n def do_something():\n my_task.delay()\n\n do_something()\n self.assertTrue(my_global[0] is marker)",
"def test_new_transaction_return_type(self):\n transaction = self.blockchain.new_transaction(self.sender, self.recipient, self.amount)\n self.assertIsInstance(transaction, int)",
"def test_saving_and_retriving_transaction(self):\n\n transaction = models.Transaction.objects.create(\n type=\"E\",\n value=\"100.00\",\n user=sample_user()\n )\n\n self.assertEqual(str(transaction), transaction.value)",
"def transaction(self, context: InjectionContext = None) -> \"ProfileSession\":"
] | [
"0.61464214",
"0.60946786",
"0.6051062",
"0.5957183",
"0.5921026",
"0.5911639",
"0.589465",
"0.586385",
"0.5855458",
"0.5846576",
"0.5818048",
"0.5764767",
"0.57319367",
"0.5727006",
"0.57104456",
"0.5699692",
"0.5688432",
"0.5681126",
"0.567688",
"0.56671363",
"0.5657053",
"0.56534153",
"0.565096",
"0.5641734",
"0.5638223",
"0.5611073",
"0.56058186",
"0.5600972",
"0.5584645",
"0.55846274"
] | 0.75881815 | 0 |
Test the output_label_for_transaction method | def test_get_output_label_for_transaction(self):
# Submit a reserve, then use the PDS identifier to get the transaction record
reserve_kwargs = {
"input": join(self.input_dir, "pds4_bundle_with_contributors.xml"),
"node": "img",
"submitter": "my_user@my_node.gov",
"force": True,
}
doi_label = self._reserve_action.run(**reserve_kwargs)
dois, _ = self._web_parser.parse_dois_from_label(doi_label)
doi = dois[0]
transaction_record = self._list_action.transaction_for_identifier(doi.pds_identifier)
# Now use the transaction record to get the label associated to the transaction
output_label_path = self._list_action.output_label_for_transaction(transaction_record)
# Ensure the path returned corresponds to an actual file
self.assertTrue(os.path.exists(output_label_path))
# Read the output label, its contents should match what was returned from
# the reserve request
with open(output_label_path, "r") as infile:
output_label = infile.read()
self.assertEqual(doi_label, output_label)
# Make sure we get an exception when the transaction record references
# a path that does not exist
transaction_record["transaction_key"] = "/fake/path/output.json"
with self.assertRaises(NoTransactionHistoryForIdentifierException):
self._list_action.output_label_for_transaction(transaction_record) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle_output(self, workunit, label, s):\r\n pass",
"def handle_output(self, workunit, label, s):\r\n pass",
"def test_labels(self):\n self.compliance_tester.test_labels(self.oi)",
"def test_label(self):\n xs = t.Label(t.Exactly(\"x\"), 'CustomLabel')\n self.assertEqual(writePython(xs),\n dd(\"\"\"\n def _G_label_1():\n _G_exactly_2, lastError = self.exactly('x')\n self.considerError(lastError, None)\n return (_G_exactly_2, self.currentError)\n _G_label_3, lastError = self.label(_G_label_1, \"CustomLabel\")\n self.considerError(lastError, None)\n _G_label_3\n \"\"\"))",
"def tests_ti_file_get_label(self):\n super().indicator_get_label()",
"def tests_ti_document_get_label(self):\n super().group_get_label()",
"def label(self):\r\n raise NotImplementedError",
"def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")",
"def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")",
"def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")",
"def test_label(self):\n try:\n t = self.OntTerm(label='diffuse')\n raise AssertionError(f'should fail {t!r}')\n except TypeError:\n pass",
"def test_label_callback():\n release_numbers = dict(a='123')\n data = dict(revision='a', attributes=dict(b='c'))\n data2 = dict(revision='b', attributes=dict(d='e'))\n\n assert _label_callback(data, release_numbers) == u'a\\n- Release: 123\\n- b: c'\n assert _label_callback(data2) == u'b\\n- Release: Unknown\\n- d: e'",
"def test_labels(self):\n return self._test_labels",
"def test_get_task_output(self):\n pass",
"def test_label():\n label_path = pjoin(data_path, \"label\", \"lh.BA1.label\")\n label = read_label(label_path)\n # XXX : test more\n assert_true(np.all(label > 0))",
"def handle_output(self, workunit, label, s):\r\n if not self.is_under_main_root(workunit):\r\n return\r\n\r\n if self._show_output_indented(workunit):\r\n self.emit(self._prefix(workunit, s))\r\n elif self._show_output_unindented(workunit):\r\n self.emit(s)\r\n self.flush()",
"def test_normal_goes_normal(self):\n eq_(self.msg, output(self.msg,\"OUTPUT\"))",
"def test_get_node_outputs(self):\n pass",
"def label(self, cfg):\n rep = \"\"\n nl = \"\"\n for node in cfg.nodes:\n rep += nl + \"{}\\tgen={}\\tkill={}\\tout={}\".format(\n node, \n set(self.gen.get(node)),\n set(self.kill.get(node)),\n set(self.out.get(node)))\n nl = \"\\n\"\n return rep",
"def test_qubit_operator_custom_labels(self, obs, expected):\n dev = QeQiskitDevice(\n wires=[\"a\", \"b\", \"c\"], shots=1000, backend=\"qasm_simulator\", analytic=False\n )\n op_str = dev.qubit_operator_string(obs)\n assert op_str == expected",
"def test_issue_get_label(self):\n pass",
"def test_render_label(self):\n label = self.block.meta.label\n self.assertEqual(label, 'Google Calendar', 'The labels are not the same')",
"def test_get_label_line(self):\r\n\r\n sample_id = \"Sample1\"\r\n fasta_label = \"ABCD1234 region=1 length=254\"\r\n bc = \"AAAA\"\r\n corrected_bc = \"AAAT\"\r\n num_errors = 1\r\n actual_label = get_label_line(sample_id, fasta_label, bc, corrected_bc,\r\n num_errors)\r\n\r\n expected_label = \"Sample1 ABCD1234 orig_bc=AAAA new_bc=AAAT bc_diffs=1\"\r\n self.assertEqual(actual_label, expected_label)",
"def _in_out_label_(self):\n return \"%s|%s\" % (FSMWordSymbol(self.word_in),\n FSMWordSymbol(self.word_out))",
"def write_label(output_file, label, curr_function):\n output_file.write(\"(\" + curr_function[0] + \"$\" + label + \")\" + \"\\n\")",
"def test_issue_create_label(self):\n pass",
"def test_recordlabels_get(self):\n pass",
"def test_workload_get_command_human_readable(\n workload_get_success, workload_get_success_hr\n):\n hr_output = prepare_workload_get_output(workload_get_success)\n assert hr_output == workload_get_success_hr",
"def tests_ti_file_add_label(self):\n super().indicator_add_label()",
"def test_tlabel(self):\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n label = \"T\"\n ax.set_tlabel(label)\n assert ax.get_tlabel() == label"
] | [
"0.6840131",
"0.6840131",
"0.6280704",
"0.60962415",
"0.6002755",
"0.5660651",
"0.5654368",
"0.5635582",
"0.5635582",
"0.5635582",
"0.5634114",
"0.562665",
"0.55732125",
"0.5569116",
"0.55691105",
"0.5563312",
"0.5546234",
"0.5544815",
"0.55269396",
"0.5515246",
"0.5514803",
"0.5507106",
"0.5499732",
"0.54904234",
"0.54842526",
"0.5475775",
"0.5461377",
"0.5443416",
"0.5437541",
"0.5432537"
] | 0.7726957 | 0 |
Returns count of open changes per reviewer per project Fetches all open changes from gerrit, and returns a dictionary containing all projects with open changes, and for each project, all reviewers and the count of changes they are reviewing. e.g. { | def get_open_change_reviewers_per_project():
config = GerritFetchConfig()
open_changes = fetch.fetch_open_changes(
config.hostname(), config.username(), config.port())
open_change_reviewers_per_project = {}
for gerrit_change in open_changes:
project = gerrit_change.project
reviewers = gerrit_change.reviewers
if not reviewers:
continue
# Skip Jenkins
reviewers[:] = [
reviewer
for reviewer in reviewers
if reviewer.name and "Jenkins" not in reviewer.name]
if project in open_change_reviewers_per_project:
reviewer_open_count = open_change_reviewers_per_project[project]
for reviewer in reviewers:
if reviewer.name in reviewer_open_count:
reviewer_open_count[reviewer.name] += 1
else:
reviewer_open_count[reviewer.name] = 1
else:
reviewer_open_count = {}
for reviewer in reviewers:
reviewer_open_count[reviewer.name] = 1
open_change_reviewers_per_project[project] = reviewer_open_count
return open_change_reviewers_per_project | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_current_reviewers_and_counts(project_name):\n reviewer_change_count_per_project = current_load_fetcher.\\\n get_open_change_reviewers_per_project()\n\n if project_name not in reviewer_change_count_per_project and \\\n project_name != PROJECT_ALL:\n logging.warning(\"Project %s does not have any current reviewers\",\n project_name)\n return []\n\n if project_name == PROJECT_ALL:\n # go through all projects and combine open change counts for each\n # reviewer\n reviewers_changes_counts = \\\n _get_current_change_counts_across_projects(\n reviewer_change_count_per_project\n )\n else:\n reviewers_changes_counts = \\\n reviewer_change_count_per_project[project_name]\n\n return _create_reviewer_current_change_count_info(reviewers_changes_counts)",
"def _get_reviewer_change_count(reviewer, project_name, from_datetime):\n if project_name == PROJECT_ALL:\n # changes across all projects after from_datetime\n changes = reviewer.changes.filter(\n timestamp__gte=from_datetime).distinct()\n else:\n # changes in given project after from_datetime\n changes = reviewer.changes.filter(\n project_name=project_name,\n timestamp__gte=from_datetime).distinct()\n\n return changes.count()",
"def _get_open_projects_info():\n projects = Project.objects.filter(project_open=True).order_by(\"created_at\")\n projects_sum_hours = []\n for project in projects:\n time_entries_pro_project = TimeEntry.objects.filter(project=project)\n used_hours = _sum_hours(time_entries_pro_project)\n hours_percent = _calculate_hours_percent(used_hours, project.stimated_hours)\n projects_sum_hours.append(\n {\n \"hours_percent_number\": hours_percent,\n \"hours_percent\": f\"{hours_percent}%\",\n \"worked_hours\": used_hours,\n \"project\": project,\n }\n )\n return projects_sum_hours",
"def stat_reviews(self, year):\n stats = dict()\n reviews = list(self.get_reviews(year))\n stats['reviews'] = len(reviews)\n stats['open'] = len(\n [review['issue'] for review in reviews if not review['closed']]\n )\n (messages, comments, patchsets) = (dict(), dict(), dict())\n for review in reviews:\n id = review['issue']\n messages[id] = len(review['messages'])\n patchsets[id] = len(review['patchsets'])\n comments[id] = 0\n for patchset in review['patchsets'].values():\n comments[id] += patchset['num_comments']\n stats['messages'] = helpers.sort(messages, desc=True)\n stats['comments'] = helpers.sort(comments, desc=True)\n stats['patchsets'] = helpers.sort(patchsets, desc=True)\n return stats",
"def query_gerrit(offset=0):\n url = 'https://review.opendev.org/changes/'\n LOG.debug('fetching %s', url)\n raw = requests.get(\n url,\n params={\n 'n': '100',\n 'start': offset,\n 'q': 'project:openstack/governance is:open',\n 'o': [\n 'ALL_REVISIONS',\n 'REVIEWER_UPDATES',\n 'DETAILED_ACCOUNTS',\n 'CURRENT_COMMIT',\n 'LABELS',\n 'DETAILED_LABELS',\n 'MESSAGES',\n ],\n },\n headers={'Accept': 'application/json'},\n )\n return decode_json(raw)",
"def get_contribution_info(self, project):\n drafts = 0\n pending_personal = 0\n personal = 0\n pending_all = None\n\n user = self.context.get('user')\n if not user.is_anonymous():\n personal = project.observations.filter(creator=user).count()\n pending_personal = project.observations.filter(\n creator=user, status='pending').count()\n drafts = project.observations.filter(\n creator=user, status='draft').count()\n\n if project.can_moderate(user):\n pending_all = project.observations.filter(\n status='pending').count()\n\n return {\n 'total': self.get_num_contributions(project),\n 'personal': personal,\n 'pending_all': pending_all,\n 'pending_personal': pending_personal,\n 'drafts': drafts\n }",
"def _get_reviewer_comment_count(reviewer, project_name, from_datetime):\n if project_name == PROJECT_ALL:\n # comments in changes across all projects after from_datetime\n comments = reviewer.comments.filter(\n timestamp__gte=from_datetime).distinct()\n else:\n # comments in changes in given project after from_datetime\n comments = reviewer.comments.filter(\n change__project_name=project_name,\n timestamp__gte=from_datetime).distinct()\n\n return comments.count()",
"def _get_reviewers_and_counts(project_name, from_datetime):\n reviewers_info = []\n for reviewer in _get_reviewers(project_name, from_datetime):\n reviewer_name = reviewer.full_name\n review_count = _get_reviewer_change_count(reviewer, project_name,\n from_datetime)\n comment_count = _get_reviewer_comment_count(reviewer, project_name,\n from_datetime)\n reviewers_info.append(\n _create_reviewer_info(reviewer_name, review_count,\n comment_count))\n\n return reviewers_info",
"def get_pull_requests_count(self):\n repo_details = self.repo_url.strip().split('/')[-2:]\n pull_requests = 0\n i = 1\n while True:\n args = {'state': 'open', 'page': i, 'per_page': 100}\n api_url = \"https://api.github.com/repos/{}/{}/pulls?{}\".format(repo_details[0], repo_details[1],\n urllib.parse.urlencode(args))\n response = requests.request(\"GET\", api_url)\n response = json.loads(response.content)\n if not response:\n return pull_requests\n else:\n pull_requests += len(response)\n i += 1",
"def get_commits(): # pragma: no cover\n global commit_data\n all_commits = 0\n team = {\n 'stevex196x': 0,\n 'TheSchaft': 0,\n 'melxtru': 0,\n 'aylish19': 0,\n 'connormlewis': 0,\n 'tsukkisuki': 0\n }\n while all_commits == 0:\n url = 'https://api.github.com/repos/connormlewis/idb/stats/contributors'\n data = requests.get(\n url, headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n json_list = data.json()\n for entry in json_list:\n total = entry['total']\n user_name = entry['author']['login']\n if user_name in team:\n team[user_name] = total\n all_commits += total\n return team, all_commits",
"def get_open_reviews(args):\n args['status'] = 'pending'\n if 'max_results' not in args:\n args['max_results'] = 100\n\n client = RBClient(REVIEWBOARD_URL)\n\n # If we have a username and password, login\n if REVIEWBOARD_USERNAME and REVIEWBOARD_PASSWORD:\n client.login(REVIEWBOARD_USERNAME, REVIEWBOARD_PASSWORD)\n\n root = client.get_root()\n\n if not root:\n logger.error(u'Could not get RBClient root')\n return None\n\n try:\n req = root.get_review_requests(**args)\n except APIError:\n logger.exception(u'Error querying API')\n return None\n\n ret = {'total': req.total_results, 'reviews': []}\n review_fmt = u\"[{user}] {summary} ({url}/r/{id})\"\n\n for review in req:\n ret['reviews'].append(review_fmt.format(user=review.get_submitter().username,\n summary=review.summary,\n url=REVIEWBOARD_URL,\n id=review.id))\n\n return ret",
"def projects_count(args):\n session = GithubSession()\n\n print(f\"counting {args.name}\")\n\n board = session.get_project(args.name)\n\n tally = []\n\n columns = session.get_columns(board)\n for column in columns:\n print(column[\"name\"], file=sys.stderr)\n\n cards = list(session.get_cards(column))\n\n total = Decimal(0)\n unpointed = 0\n num_cards = 0\n num_walk_ins = 0\n issues = []\n walk_ins = []\n walk_in_points = 0\n\n for card_data in cards:\n issue_number = utils.get_issue_number_from_card_data(card_data)\n if not issue_number: # must be a note\n continue\n\n issue_data = session.get_issue(issue_number)\n labels = issue_data[\"labels\"]\n\n num_cards += 1\n\n points = get_points(labels)\n if points:\n total += points\n else:\n unpointed += 1\n\n issue_data = {\n \"issue_number\": issue_number,\n \"points\": str(points),\n \"unpointed\": points is None,\n \"walk_in\": False,\n }\n\n if is_walk_in(labels):\n num_walk_ins += 1\n if points:\n walk_in_points += points\n\n issue_data[\"walk_in\"] = True\n\n walk_ins.append(issue_data)\n\n issues.append(issue_data)\n\n tally.append(\n {\n \"column\": column[\"name\"],\n # 'issues': issues,\n \"num_cards\": num_cards,\n \"num_walk_ins\": num_walk_ins,\n \"walk_in_points\": str(walk_in_points),\n # 'walk_ins': walk_ins,\n \"total_points\": str(total),\n \"unpointed\": unpointed,\n }\n )\n\n print(json.dumps(tally, indent=4))",
"def get_issues(): # pragma: no cover\n global issue_data\n team = {\n 'stevex196x': 0,\n 'TheSchaft': 0,\n 'melxtru': 0,\n 'aylish19': 0,\n 'connormlewis': 0,\n 'tsukkisuki': 0\n }\n all_issues = 0\n while all_issues == 0:\n url = ('https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100')\n data = requests.get(\n url, headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n link = data.headers.get('Link', None)\n for i in range(1, int(find_last_page(link)) + 1):\n url = (\n 'https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100' + '&page=' + str(i))\n data = requests.get(\n url,\n headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n json_list = data.json()\n for entry in json_list:\n if 'pull_request' not in entry:\n all_issues += 1\n if entry['user']['login'] in team:\n team[entry['user']['login']] += 1\n return team, all_issues",
"def stat_review(self, id):\n stats = dict()\n review = self.get_review(id)\n stats['status'] = 'Closed' if review['closed'] else 'Open'\n stats['created'] = review['created']\n stats['reviewers'] = len(review['reviewers'])\n stats['messages'] = len(review['messages'])\n stats['patchsets'] = len(review['patchsets'])\n return stats",
"def calc_conv_comments(self):\n for conv_comment in self.pull_request.get_issue_comments():\n self._users.add(conv_comment.user.login)\n lowercase_body = conv_comment.body.lower()\n if \"protm\" in lowercase_body:\n self.num_protm += 1\n self.num_conv_comments += 1\n for reaction in conv_comment.get_reactions():\n self._users.add(reaction.user.login)\n self.conv_comment_reactions += 1\n if conv_comment.body is not None:\n self.len_issue_comments += len(conv_comment.body)",
"def get_issues(self, project, weeks=12):\n issues = {\n 'summary': [],\n 'assignee': [],\n 'reporter': [],\n 'description': [],\n 'created': [],\n 'updated': [],\n 'labels': [],\n 'status': []\n }\n\n jql = \"project={0} AND updated >= -{1}w\".format(project.key, weeks)\n project_issues = self.jira.search_issues(jql, maxResults=False, fields=['summary', 'description', 'comment', 'labels'])\n\n for issue in project_issues:\n issues['summary'].append(issue.fields.summary or '')\n issues['description'].append(issue.fields.description or '')\n assignee = issue.fields.assignee\n issues['assignee'].append(assignee.displayName if assignee else '')\n reporter = issue.fields.reporter\n issues['reporter'].append(reporter.displayName if reporter else '')\n issues['created'].append(issue.fields.created)\n issues['updated'].append(issue.fields.updated)\n issues['labels'].append(','.join(issue.fields.labels))\n issues['status'].append(issue.fields.status.name)\n\n return issues",
"def get_total_changes_per_user(contributors, change_type):\n total_changes_per_user = {}\n for contribution in contributors:\n login = contribution['login']\n total_changes_per_user[login] = sum(\n week[change_type] for week in contribution['weeks']\n )\n return total_changes_per_user",
"def commit_count(commit_info_dict):\n commit_counts = {}\n for release, commit_dict in commit_info_dict.items():\n commit_counts_per_release = {}\n for user_id, commit_list in commit_dict.items():\n commit_counts_per_release[user_id] = len(commit_list)\n commit_counts[release] = commit_counts_per_release\n return commit_counts",
"def num_projects(self):\n return self._num_projects",
"def getTopContributorCount(jiradb, projects, requiredProjectCommitCoverage):\n requiredContributorCount = 0\n for project in projects:\n requiredContributorCount += len(getTopContributors(jiradb.session, project, requiredProjectCommitCoverage))\n return requiredContributorCount",
"def get_num_contributions(self, project):\n return project.observations.exclude(\n status='draft').exclude(status='pending').count()",
"def _AccumulateIssueProjectsAndConfigs(\n cnxn, project_dict, config_dict, services, issues):\n new_ids = {issue.project_id for issue in issues}\n new_ids.difference_update(project_dict.iterkeys())\n new_projects_dict = services.project.GetProjects(cnxn, new_ids)\n project_dict.update(new_projects_dict)\n new_configs_dict = services.config.GetProjectConfigs(cnxn, new_ids)\n config_dict.update(new_configs_dict)",
"def get_review_status(pr_id):\n reviews = get_status_json(pr_id, 'reviews')\n requests = get_status_json(pr_id, 'reviewRequests')\n\n requested_authors = [r[\"login\"] for r in requests]\n\n review_status = {}\n for r in reviews:\n author = r['author']['login']\n date = datetime.fromisoformat(r['submittedAt'].strip('Z'))\n state = r['state']\n if author not in review_status:\n review_status[author] = ReviewComment(state, date, author)\n elif state != 'COMMENTED' and review_status[author].date < date:\n review_status[author] = ReviewComment(state, date, author)\n for a in review_status:\n if a in requested_authors:\n review_status[a] = ReviewComment('REVIEW_REQUESTED', review_status[a].date, a)\n for a in requested_authors:\n if a not in review_status:\n review_status[a] = ReviewComment('UNRESPONSIVE', None, a)\n return review_status, requested_authors",
"def project_updates(self):\n return self._tower.project_updates.filter({'project': self.id})",
"def all_commits(change_id):\n commits = []\n manifest = ET.ElementTree(file='.repo/manifest.xml')\n url = (GERRIT_ROOT + 'changes/?o=CURRENT_REVISION&q=status:open+' +\n change_id)\n changes = request.urlopen(url)\n for change in parse_response(changes):\n project = change['project']\n fetch = change['revisions'][change['current_revision']]['fetch']\n # The `ref` is the same for every download scheme, hence we can use\n # the first one that is there\n ref = fetch.values()[0]['ref']\n path = project_path(manifest, project)\n commits.append((project, path, ref))\n return commits",
"def get_open_issues_and_prs(repo):\n open_issues = []\n open_pull_requests = []\n params = {\"state\": \"open\"}\n result = github.get(\"/repos/adafruit/\" + repo[\"name\"] + \"/issues\", params=params)\n if not result.ok:\n return [], []\n\n issues = result.json()\n for issue in issues:\n created = datetime.datetime.strptime(issue[\"created_at\"], \"%Y-%m-%dT%H:%M:%SZ\")\n days_open = datetime.datetime.today() - created\n if days_open.days < 0: # opened earlier today\n days_open += datetime.timedelta(days=(days_open.days * -1))\n\n issue_title = \"{0} (Open {1} days)\".format(issue[\"title\"], days_open.days)\n if \"pull_request\" not in issue: # ignore pull requests\n issue_labels = [\"None\"]\n if len(issue[\"labels\"]) != 0:\n issue_labels = [label[\"name\"] for label in issue[\"labels\"]]\n\n issue_dict = {\n \"title\": issue_title,\n \"url\": issue[\"html_url\"],\n \"labels\": issue_labels,\n }\n\n open_issues.append(issue_dict)\n else:\n open_pull_requests.append({issue[\"html_url\"]: issue_title})\n\n return open_issues, open_pull_requests",
"def calc_comments(self):\n for comment in self.pull_request.get_comments():\n self._users.add(comment.user.login)\n lowercase_body = comment.body.lower()\n if \"protm\" in lowercase_body:\n self.num_protm += 1\n self.num_comments += 1\n if comment.body is not None:\n self.len_comments += len(comment.body)\n for reaction in comment.get_reactions():\n self._users.add(reaction.user.login)\n self.comment_reactions += 1",
"def test_project_state_counters(self):\n #build counters:\n management.call_command('rebuild_counters')\n\n self.client.force_authenticate(self.global_user_1)\n data = self.client.get(self.api_project_list_url, {'user': 'current'}).data['results']\n\n for project_data in data:\n self.assertIn('state', project_data)\n project_state_data = project_data['state']\n if project_state_data:\n project_state_obj = ProjectState.objects.get(project=project_state_data['id'], user=project_state_data['userId'])\n self.assertEqual(project_state_data['numberOfProjectLessons'], project_state_obj.project.lessons.count())\n self.assertEqual(\n project_state_data['numberOfEnrolledLessons'],\n project_state_obj.lesson_states.count()\n )\n self.assertEqual(\n project_state_data['numberOfCompletedLessons'],\n project_state_obj.lesson_states.filter(is_completed=True).count()\n )",
"def display_repos_and_commits(github_id):\r\n\r\n repo_list = get_repos(github_id)\r\n\r\n for repo in repo_list:\r\n commits_count = get_commits(github_id, repo)\r\n print('Repo: {} Number of commits: {}'.format(repo, commits_count))",
"def get_project_issues(repo_slug, max_issues_per_project=None, max_date=None):\n # type: (str, int, str) -> pd.DataFrame\n logging.info(\"Processing %s\", repo_slug)\n all_issues = pd.DataFrame(\n json_imap({\n 'reporter': 'user__login',\n 'role': 'author_association',\n 'number': 'number',\n 'title': 'title',\n 'created_at': 'created_at',\n 'body': 'body',\n 'state': 'state',\n },\n api.repo_issues(repo_slug)),\n ).sort_values('created_at')\n if max_date:\n all_issues = all_issues[all_issues['created_at'] < max_date]\n last_reported = all_issues.groupby(\n 'reporter').last().iloc[:max_issues_per_project]\n first_reported = all_issues.groupby('reporter').first()['created_at']\n # int(timedelta) is ns, times 86400 seconds in a day\n last_reported['tenure'] = (\n pd.to_datetime(last_reported['created_at'])\n - pd.to_datetime(last_reported.index.map(first_reported))\n ).astype(int) // 86400000000000\n last_reported['project'] = repo_slug\n return last_reported.reset_index().sort_values('number')"
] | [
"0.6898937",
"0.6401227",
"0.62829226",
"0.61762327",
"0.61221975",
"0.6012132",
"0.58491695",
"0.56204873",
"0.5591274",
"0.5577237",
"0.54595",
"0.54480326",
"0.54340416",
"0.53901017",
"0.5334617",
"0.52910286",
"0.52823967",
"0.5252443",
"0.5237763",
"0.5233384",
"0.52304065",
"0.51996064",
"0.5195072",
"0.51544756",
"0.513719",
"0.51249",
"0.5092335",
"0.50843734",
"0.5066681",
"0.5058846"
] | 0.87611914 | 0 |
Return an UTCaware datetime in case of USE_TZ=True. | def tz_aware(value: datetime) -> datetime:
if settings.USE_TZ:
value = value.replace(tzinfo=timezone.utc)
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_freeze_with_timezone_aware_datetime_in_utc():\n utc_now = datetime.datetime.utcnow()\n assert utc_now.tzinfo is None",
"def test_freeze_with_timezone_aware_datetime_in_non_utc():\n utc_now = datetime.datetime.utcnow()\n assert utc_now.tzinfo is None\n assert utc_now == datetime.datetime(1970, 1, 1, 4)",
"def ensure_utc_time(ts: datetime) -> datetime:\n if ts.tzinfo is None:\n return datetime(*ts.timetuple()[:6], tzinfo=UTC_TZ)\n elif str(ts.tzinfo) != str(UTC_TZ):\n return ts.astimezone(UTC_TZ)\n return ts",
"def tz_aware(dt: datetime, default: tzinfo = tzutc()) -> datetime:\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=default)\n return dt",
"def utcnow() -> datetime.datetime:\n return datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)",
"def get_zone_aware_datetime(date):\n return datetime.combine(date, datetime.max.time(), pytz.UTC) if date else None",
"def datetime_utc_now() -> datetime:\n return datetime.now(timezone.utc)",
"def make_tz_aware(local_dt):\n aware_dt = timezone('US/Eastern').localize(local_dt)\n return aware_dt",
"def make_aware(value: datetime, timezone=None, is_dst=None) -> datetime:\n\n if timezone is None:\n timezone = get_current_timezone()\n\n if hasattr(timezone, \"localize\"):\n # This method is available for pytz time zones.\n return timezone.localize(value, is_dst=is_dst)\n else:\n # Check that we won't overwrite the timezone of an aware datetime.\n if is_aware(value):\n raise ValueError(\"make_aware expects a naive datetime, got %s\" % value)\n # This may be wrong around DST changes!\n return value.replace(tzinfo=timezone)",
"def omniscient_datetime(*args):\n d = original_datetime(*args)\n if settings.USE_TZ:\n d = timezone.make_aware(d, timezone.utc)\n return d",
"def now_utc() -> datetime:\n return datetime.now(timezone.utc)",
"def get_timezone_aware_datetime(datetime):\n if not datetime:\n return None\n local = timezone.get_current_timezone()\n return (local.localize(parse(datetime), is_dst=None)).astimezone(timezone.utc)",
"def utc_now():\n return datetime.now(tz=timezone.utc)",
"def enforce_timezone(self, value):\n try:\n tz = timezone._active.value\n if (self.default_timezone is not None) and not timezone.is_aware(value):\n return timezone.make_aware(value, tz)\n return value\n except AttributeError:\n return super().enforce_timezone(value)",
"def now_dt(tz='UTC'):\n if tz != 'UTC':\n raise NotImplementedError()\n return datetime.datetime.utcnow().replace(tzinfo = pytz.utc)",
"def _get_tz():\n return 'UTC'",
"def nowUTC():\n return datetime.datetime.now(pytz.utc)",
"def date_make_timezone_aware(datetime_object, timezone_string=None):\n if timezone_string:\n # make the date timezone aware using the given timezone_string\n timezone_object = pytz_timezone_object(timezone_string)\n timezone_aware_datetime_object = timezone_object.localize(datetime_object)\n else:\n # make the date timezone aware using the timezone of the current system\n timezone_aware_datetime_object = datetime_object.astimezone()\n\n return timezone_aware_datetime_object",
"def utcnow():\n return datetime.utcnow().replace(tzinfo=UTC)",
"def get_utc_now():\n return datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(\"UTC\"))",
"async def datetime(self, aware=False) -> dt.datetime:\n if aware is True:\n now = await self.AD.sched.get_now()\n return now.astimezone(self.AD.tz)\n else:\n return await self.AD.sched.get_now_naive()",
"def add_timezone(dt: datetime, tz_info: tzinfo = UTC) -> datetime:\n if dt.tzinfo is not None:\n raise ValueError(f\"{dt} is already tz-aware\")\n return dt.replace(tzinfo=tz_info)",
"def datetime_utcnow() -> datetime:\n return datetime.now(tz=pytz.timezone('UTC'))",
"def make_naive_utc(date_time: datetime.datetime) -> datetime.datetime:\n utc_timezone = datetime.timezone(datetime.timedelta(seconds=0))\n return date_time.astimezone(utc_timezone).replace(tzinfo=None)",
"def timestamp_aware(dt):\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=tz.tzlocal()) # new object\n return dt",
"def fromutc(self, dt):\n if dt.tzinfo is None:\n return dt.replace(tzinfo=self)\n return super(UTC, self).fromutc(dt)",
"def tz_as_utc(dt: datetime) -> datetime:\n if dt.tzinfo is None:\n return dt.replace(tzinfo=tzutc())\n return dt.astimezone(tzutc())",
"def utcnow():\n if utcnow.override_time:\n try:\n return utcnow.override_time.pop(0)\n except AttributeError:\n return utcnow.override_time\n return datetime.datetime.utcnow()",
"def test_utc_in_timez(monkeypatch):\n utcoffset8_local_time_in_naive_utc = (\n datetime.datetime(\n year=2020,\n month=1,\n day=1,\n hour=1,\n minute=23,\n second=45,\n tzinfo=datetime.timezone(datetime.timedelta(hours=8)),\n )\n .astimezone(datetime.timezone.utc)\n .replace(tzinfo=None)\n )\n\n class mock_datetime:\n @classmethod\n def utcnow(cls):\n return utcoffset8_local_time_in_naive_utc\n\n monkeypatch.setattr('datetime.datetime', mock_datetime)\n rfc3339_utc_time = str(cherrypy._cplogging.LazyRfc3339UtcTime())\n expected_time = '2019-12-31T17:23:45Z'\n assert rfc3339_utc_time == expected_time",
"def get_now_utc(no_microseconds=True):\n if no_microseconds:\n return pytz.utc.localize(datetime.datetime.utcnow()).replace(\n microsecond=0\n )\n else:\n return pytz.utc.localize(datetime.datetime.utcnow())"
] | [
"0.7496584",
"0.72608817",
"0.71937054",
"0.71411216",
"0.71222836",
"0.7066944",
"0.7022482",
"0.6987124",
"0.6978549",
"0.696762",
"0.6947956",
"0.6856145",
"0.6847567",
"0.68384445",
"0.6833825",
"0.6821648",
"0.6819736",
"0.6792303",
"0.67815375",
"0.6744564",
"0.67071795",
"0.6666393",
"0.66472435",
"0.6643287",
"0.6627103",
"0.6625337",
"0.6613211",
"0.66059047",
"0.6583427",
"0.6559055"
] | 0.77953434 | 0 |
Adds a step into calculated metrics | def add_step(self):
assert self.y_real is not None and self.y_predicted is not None
# Calculates some metrics
rmse = Metrics.rmse_loss(self.y_real, self.y_predicted)
mse = Metrics.mse_loss(self.y_real, self.y_predicted)
cm = Metrics.confusion_matrix(self.y_real, self.y_predicted)
accuracy = Metrics.accuracy(cm)
# Store them
self.summary['rmse'].append(rmse)
self.summary['accuracy'].append(accuracy)
self.summary['mse'].append(mse)
self.summary['cm'].append(cm) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_metric(self, metric):\n self.metrics.append(metric)\n self.estimate()",
"def add_step(self, step):\n if not step:\n return\n temp = {Result.__STEP: step.get_name(),\n Result.__STATUS: step.get_status(),\n Result.__MESSAGE: step.get_message()}\n self.__run.append(temp)",
"def update(self, step, metrics):\n self.steps_arr.append(step)\n for key, val in metrics.items():\n if isinstance(val, tf.Tensor):\n try:\n self.data[key].append(val.numpy())\n except KeyError:\n self.data[key] = [val.numpy()]",
"def add_step_hook(h):\n add_hook(step, h)",
"def record(self, step):",
"def addStep(self, step):\n self.stepper.addStep(step)\n return self",
"def step(self, estim: EstimBase) -> None:\n _, results = estim.get_last_results()\n results = [self.to_metrics(res) for res in results]\n self.metrics.extend(results)\n if len(self.metrics) >= len(self.population):\n self.population = self._mating(self.population)\n self.metrics = []",
"def log_metric(self, name, val, step):\n raise NotImplementedError",
"def step(self):\n value = self.current_event[\"step\"][\"value\"]\n self.current_value.append(value)",
"def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)",
"def log_metrics(self, metrics, step=None, epoch=None, prefix=None):\n self.experiment.log_metrics(metrics, step=step, epoch=epoch, prefix=prefix)",
"def log_metrics(metrics, step=None):\n mlflow.log_metrics(metrics, step=step)",
"def addStep(self, x):\n if self.method == 'ftol':\n \n self.loss.append(x)\n \n self.nSteps += 1",
"def _report_step(self, learning_rate, step, train_stats=None,\n valid_stats=None):\n if self.report_manager is not None:\n return self.report_manager.report_step(\n learning_rate, step, train_stats=train_stats,\n valid_stats=valid_stats)",
"def step(self, step=None):\n pass",
"def add_step(self, step, run_by_default=True):\n self.steps[step.name] = step\n if run_by_default:\n self.steps_to_run.append(step.name)",
"def summarize(self, step=None, step_metrics=()):\n summaries = []\n result = self.result()\n if step is not None:\n tf.summary.scalar(name=self.name, data=result, step=step)\n for step_metric in step_metrics:\n # Skip plotting the metrics against itself.\n if self.name == step_metric.name:\n continue\n step_tag = '{} vs {}'.format(self.name, step_metric.name)\n tf.summary.scalar(name=step_tag,\n data=result,\n step=int(step_metric.result()))",
"def step(self, **kwargs):\n pass",
"def on_step_end(self, step, logs):\n episode = logs['episode']\n self.observations[episode].append(logs['observation'])\n self.rewards[episode].append(logs['reward'])\n self.actions[episode].append(logs['action'])\n self.metrics[episode].append(logs['metrics'])\n self.step += 1",
"def on_step_end(self, step, logs):\n episode = logs['episode']\n self.observations[episode].append(logs['observation'])\n self.rewards[episode].append(logs['reward'])\n self.actions[episode].append(logs['action'])\n self.metrics[episode].append(logs['metrics'])\n self.step += 1",
"def add_metric(self, metric_fn):\n self._metrics.append(metric_fn)",
"def addStep( self, stepNum ):\n assert isinstance( stepNum, (int, tuple) )\n\n assert isinstance( self._level, int )\n assert isinstance( self._steps, list )\n assert isinstance( self._outter, Env ) or ( self._outter is None )\n\n self._steps.append( stepNum )",
"def total_steps(self) -> global___Expression:",
"def report_step_progress(self, step):\n pass",
"def add_view_step(self, view_step):\n self._data_dict[self.KEY_VIEW_STEPS].append(view_step)",
"def log_test_step(self, test_log: dict, step: Union[int,None] = None) -> None:\n if self.log_mlflow:\n mlflow.log_metrics(test_log, step=self.e)",
"def step(step_name, extra_types=None):\n\n def decorator(func):\n # Register the step, other way return the function unchanged\n step_function = StepFunction(func, step_name, extra_types)\n # Check for similar steps, in both directions\n step_function.search_and_report_similar()\n # Register it\n data.add_step(step_function)\n return func\n\n return decorator",
"def step( self, name ):\n duration = self.summarize_step( start=self.step_start, step_name=name, level=self.level )\n now = time.time()\n self.step_start = now\n return duration",
"def update_metrics(self, state: TrainState, step: int, train_metrics: List[MetricsDict], t0):\n if not self.logflag:\n return\n\n eval_metrics: List[Any] = []\n\n # Build summary dictionary for logging\n # Include training stats\n train_metrics = common_utils.get_metrics(train_metrics)\n summary = {\n f\"train_{k}\": v\n for k, v in jax.tree_util.tree_map(lambda x: x.mean(), train_metrics).items()\n }\n epoch = step // self.steps_per_epoch\n summary[\"epoch\"] = epoch\n summary[\"time\"] = time.time() - t0\n\n # Eval over testing set\n for _ in range(self.steps_per_eval):\n eval_batch = next(self.eval_dt_iter)\n metrics = self.p_eval_step(state, eval_batch)\n eval_metrics.append(metrics)\n # Compute testing metrics\n eval_metrics = common_utils.get_metrics(eval_metrics)\n\n # Add testing stats to summary\n summary_eval = jax.tree_util.tree_map(lambda x: x.mean(), eval_metrics)\n summary.update(summary_eval)\n\n # Update iteration stats object\n assert isinstance(self.itstat_object, IterationStats) # for mypy\n self.itstat_object.insert(self.itstat_insert_func(ArgumentStruct(**summary)))",
"def do_step(self) -> None:"
] | [
"0.6825129",
"0.68105835",
"0.6705063",
"0.66030556",
"0.6589102",
"0.64661735",
"0.64505416",
"0.63579696",
"0.63276815",
"0.6287285",
"0.62792087",
"0.62393504",
"0.621419",
"0.61822873",
"0.61751336",
"0.6074285",
"0.60637534",
"0.6020474",
"0.59689176",
"0.59689176",
"0.59598833",
"0.59549046",
"0.5939177",
"0.590964",
"0.5903946",
"0.5897779",
"0.5877367",
"0.58472186",
"0.5826778",
"0.5821082"
] | 0.7380234 | 0 |
Get all Event by user_id | def get_event_by_user_id(user_id):
return Event.query.filter(Event.user_id == user_id).order_by(Event.created_at.desc()).all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_queryset(self):\n return Event.objects.all().filter(user_id=self.request.user)",
"def event_get(tenant_id, user_id=None):",
"async def retrieve_user_events(self, user_id: int) -> Dict[int, BaseEvent]:\n user_events: Dict[int, BaseEvent] = {}\n event: BaseEvent\n for event_id, event in self.upcoming_events.items():\n if event.organizer.id == user_id:\n user_events[event_id] = event\n\n return user_events",
"def myevents(self, request, pk=None):\n user = request.auth.user\n myevents = user.events\n serializer = EventSerializer(\n myevents, many=True, context={'request': request})\n return Response(serializer.data)",
"def get_events_by_user_id(self, lambda_event):\n user = self.mealShareUsers.get_user_cognito_data(lambda_event)\n current_user = user['user_id']\n events = self.mealShareGroups.get_events_by_user_id(current_user)\n return {\n 'statusCode': 200,\n 'events': events,\n 'user_id': current_user\n }",
"def get_events(self, event_id=None, limit=15):\n if event_id and event_id.isdecimal():\n # event id has been provided\n query = \"SELECT users.Username AS Creator, events.* FROM events INNER JOIN users ON (events.Users_idUsers = users.idUsers) WHERE idEvents={} LIMIT {}\".format(event_id, limit)\n else:\n # fetch all events instead\n query = \"SELECT users.Username AS Creator, events.* FROM events INNER JOIN users ON (events.Users_idUsers = users.idUsers) LIMIT {}\".format(limit)\n\n cursor = DB.instance.connection.cursor()\n cursor.execute(query)\n return cursor.fetchall()",
"def get_events(user, title=None, category=None, fromdt=None, priority=None,\n status=None, place=None, id=None, orderby=None):\n user = get_user(user)\n filters = create_filters(id, title, category,\n priority, status,\n place)\n selection = user.events.filter(**filters)\n if fromdt:\n selection = selection.filter(from_datetime__lte=fromdt)\n\n if orderby:\n selection = selection.order_by(orderby)\n\n if not len(selection):\n raise ObjectDoesNotFound('There is no events with selected filters.')\n return selection",
"def get_user_events_json_list(user_events):\n events = []\n for user_event in user_events:\n events.append(user_event.json())\n return events",
"def get_events(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n\r\n query = \"\"\"\r\n SELECT DISTINCT E.eid, E1.ename, E1.description,\r\n E.category, E1.start_date, E1.end_date, E1.num_cap,\r\n E1.num_attending, L.lname, L.address_1, E.tag, L.lat, L.lon\r\n FROM {}.EventTags AS E, {}.UserTags AS U, {}.Events as E1, {}.Locations as L\r\n WHERE U.username='{}' AND\r\n E.tag = U.tag AND\r\n E1.eid = E.eid AND\r\n E1.lid = L.lid AND\r\n E1.start_date >= {}\r\n ORDER by E1.start_date\r\n \"\"\".format(\r\n ENV_DB,\r\n ENV_DB,\r\n ENV_DB,\r\n ENV_DB,\r\n self.user.username,\r\n str(datetime.date.today())\r\n )\r\n\r\n cursor.execute(query)\r\n data = cursor.fetchall()\r\n database.close()\r\n\r\n return [i for i in data]",
"def get_user_events_v2_public(\n user_id: str,\n end_date: Optional[str] = None,\n event_name: Optional[str] = None,\n offset: Optional[int] = None,\n page_size: Optional[int] = None,\n start_date: Optional[str] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetUserEventsV2Public.create(\n user_id=user_id,\n end_date=end_date,\n event_name=event_name,\n offset=offset,\n page_size=page_size,\n start_date=start_date,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)",
"def get_event_specific_user_v2_handler(\n user_id: str,\n end_date: Optional[str] = None,\n event_name: Optional[str] = None,\n offset: Optional[int] = None,\n page_size: Optional[int] = None,\n start_date: Optional[str] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetEventSpecificUserV2Handler.create(\n user_id=user_id,\n end_date=end_date,\n event_name=event_name,\n offset=offset,\n page_size=page_size,\n start_date=start_date,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)",
"def _filter_events_for_client(self, user_id, events, is_peeking=False):\n types = (\n (EventTypes.RoomHistoryVisibility, \"\"),\n (EventTypes.Member, user_id),\n )\n event_id_to_state = yield self.store.get_state_for_events(\n frozenset(e.event_id for e in events),\n types=types\n )\n res = yield self.filter_events_for_clients(\n [(user_id, is_peeking)], events, event_id_to_state\n )\n defer.returnValue(res.get(user_id, []))",
"def populate_event(event_id):\n event = Event.query.get(event_id)\n users = User.query.filter(User.has_photos==True).limit(10)\n for user in users:\n user.events.append(event)\n db.session.commit()",
"async def get_event_specific_user_v2_handler_async(\n user_id: str,\n end_date: Optional[str] = None,\n event_name: Optional[str] = None,\n offset: Optional[int] = None,\n page_size: Optional[int] = None,\n start_date: Optional[str] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetEventSpecificUserV2Handler.create(\n user_id=user_id,\n end_date=end_date,\n event_name=event_name,\n offset=offset,\n page_size=page_size,\n start_date=start_date,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )",
"def get_group_restricted_events(user, all_events=False):\n types_allowed = get_types_allowed(user)\n\n if all_events:\n return Event.objects.filter(event_type__in=types_allowed)\n else:\n return Event.objects.filter(attendance_event__isnull=False, event_type__in=types_allowed)",
"def getInvitations(self, userid):\n\n ret = []\n\n u_id = EventId()\n u_id.setHashed(userid)\n user = User.getById(u_id)\n invitations = Invitation.getAllFromUser(user)\n\n for e in invitations:\n ret.append(e.getAsDict([\"event\", \"status\"]))\n\n return {\"invitations\": ret}",
"async def get_user_events_v2_public_async(\n user_id: str,\n end_date: Optional[str] = None,\n event_name: Optional[str] = None,\n offset: Optional[int] = None,\n page_size: Optional[int] = None,\n start_date: Optional[str] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetUserEventsV2Public.create(\n user_id=user_id,\n end_date=end_date,\n event_name=event_name,\n offset=offset,\n page_size=page_size,\n start_date=start_date,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )",
"def online_user_events(current_user, user_public_id):\n result = online_user_events_helper(current_user, user_public_id, Event)\n return jsonify(result[0]), result[1]",
"def get_events(self):\n return self.s.query(Event).all()",
"def userevent_list(request):\n if request.method == 'GET':\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n SELECT\n full_name,\n user_id,\n id,\n title,\n description,\n date,\n time,\n name\n FROM\n EVENTS_BY_USER\n \"\"\")\n\n dataset = db_cursor.fetchall()\n\n # Take the flat data from the database, and build the\n # following data structure for each gamer.\n #\n # {\n # 1: {\n # \"id\": 1,\n # \"full_name\": \"Admina Straytor\",\n # \"games\": [\n # {\n # \"id\": 1,\n # \"title\": \"Foo\",\n # \"maker\": \"Bar Games\",\n # \"skill_level\": 3,\n # \"number_of_players\": 4,\n # \"game_type_id\": 2\n # }\n # ]\n # }\n # }\n\n events_by_user = {}\n\n for row in dataset:\n uid = row['user_id']\n if uid in events_by_user:\n events_by_user[uid]['events'].append({\n \"id\": row['id'],\n \"title\": row['title'],\n \"description\": row['description'],\n \"date\": row['date'],\n \"time\": row['time'],\n \"game_name\": row[\"name\"]\n })\n else:\n events_by_user[uid] = {\n \"gamer_id\": uid,\n \"full_name\": row['full_name'],\n \"events\": [{\n \"id\": row['id'],\n \"title\": row['title'],\n \"description\": row['description'],\n \"date\": row['date'],\n \"time\": row['time'],\n \"game_name\": row[\"name\"]\n }]\n }\n\n events = events_by_user.values() \n\n template = 'users/list_with_events.html'\n context = {\n 'userevent_list': events\n }\n\n return render(request, template, context)",
"def get_event(username, event_id=None, maxResults=None):\n token = \"tokens/\" + username + \".pkl\"\n credentials = pickle.load(open(token, \"rb\"))\n service = build('calendar', 'v3', credentials=credentials)\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n\n if event_id and maxResults:\n raise ValueError(\"event_id and maxResults cannot be set at the same time. Choose one.\")\n\n if event_id:\n return service.events().get(calendarId=CALENDAR_ID, eventId=event_id).execute()\n\n if maxResults:\n events_result = service.events().list(calendarId=CALENDAR_ID, timeMin=now,\n maxResults=maxResults, singleEvents=True,\n orderBy='startTime').execute()\n return events_result.get('items', [])",
"def get_user_who_attend_event(self,eid):\n eid = str(eid)\n if not self.cacheRedis.exists(\"user_attend_event:eid:\" + eid):\n uidList = self.db.query(\"SELECT uid FROM fs_user_event WHERE eid=%s and status=0\",eid)",
"def get_user_owned_events(self):\n data = self.eventbrite_sdk_client.get_user_owned_events(id='me')\n if 'error' in data:\n raise Exception(simplejson.dumps(data))\n assert 'page_count' in data.get('pagination', {}), simplejson.dumps(data)\n if data['pagination']['page_count'] > 1:\n raise Exception(\"There are {0} pages of data\".format(data['page_count']))\n return data",
"def EventosList(request):\n print(request.method)\n template = 'User/eventos/all.html'\n user = request.user.get_username()\n eventos = Evento.objects.all()\n\n print(\"...............................\")\n print(user)\n print(\"...............................\")\n context = {'user':user,'eventos':eventos}\n return render(request, template, context)",
"def get_all_events(cls):\n try:\n events = list(events_coll.find())\n events_list = []\n if events is not None:\n for event in events:\n one_event = cls(**event)\n events_list.append(one_event)\n return events_list\n except Exception as e:\n print(e)",
"def get(self, id):\n offset, limit, expand = self.get_pagination_values()\n event = self.session.query(Event).filter_by(id=id).scalar()\n if not event:\n raise exc.NotFound(\"No such Event {} found\".format(id))\n\n json = event.to_dict(base_uri=self.href_prefix, expand=expand)\n\n self.success(json)",
"def test_getEventsFromId(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n events = []\n for i in range(10):\n hh = str(i)\n events.append(dict(start = '2015-08-21T'+hh+':23:00.000Z',\n end = '2015-08-21T'+hh+':25:00.000Z',\n date = '2015-08-21T00:00:00.000Z'))\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n uid = str('alex_' + events[0]['start'] + events[0]['end'])\n invuid = '00000000000000000000000'\n\n for e in events:\n rv = self.json_post('/createEvent/alex', e)\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n\n rv = self.json_get('/getEventFromId/bbbb', {'uid': uid})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getEventFromId/alex', {'uid': invuid})\n assert 'Event not found' in str(rv.data)\n\n for e in events:\n uid = str('alex_' + e['start'] + e['end'])\n rv = self.json_get('/getEventFromId/alex', {'uid': uid})\n assert uid in str(rv.data)\n assert e['start'] in str(rv.data)\n assert e['end'] in str(rv.data)",
"def paginate_events(user_id, page, q, user):\n if q:\n pagination = Event.query.filter(Event.event_name.like(\"%\" + q.lower().strip() + \"%\")).filter_by(user_id=user_id) \\\n .paginate(page=page, per_page=app.config['EVENTS_AND_TICKETS_PER_PAGE'], error_out=False)\n else:\n pagination = user.events.paginate(page=page, per_page=app.config['EVENTS_AND_TICKETS_PER_PAGE'],\n error_out=False)\n previous = None\n if pagination.has_prev:\n if q:\n previous = url_for('events.eventlist', q=q, page=page - 1, _external=True)\n else:\n previous = url_for('events.eventlist', page=page - 1, _external=True)\n nex = None\n if pagination.has_next:\n if q:\n nex = url_for('events.eventlist', q=q, page=page + 1, _external=True)\n else:\n nex = url_for('events.eventlist', page=page + 1, _external=True)\n items = pagination.items\n return items, nex, pagination, previous",
"def find_by_user(cls, user_id: int):\n cls.logger.info(\"Processing user id query for %s ...\", user_id)\n return cls.query.filter(cls.user_id == user_id)",
"def get_events(query):\n pagination = EventModel.query.paginate(\n page=query['page'],\n per_page=query['per_page']\n )\n return {\n 'events': pagination.items,\n 'pagination': pagination_builder(pagination)\n }"
] | [
"0.7322746",
"0.7310857",
"0.72287333",
"0.7047232",
"0.67568713",
"0.6709224",
"0.67006856",
"0.63310444",
"0.62991863",
"0.6286739",
"0.62130445",
"0.62024057",
"0.6089561",
"0.6088284",
"0.6058859",
"0.6029098",
"0.60218024",
"0.6001406",
"0.5960545",
"0.59485763",
"0.59443116",
"0.58951086",
"0.58614147",
"0.58304447",
"0.5826618",
"0.5797041",
"0.57794726",
"0.57792413",
"0.57646936",
"0.5751514"
] | 0.8250652 | 0 |
Create and return Job Details | def create_job_detail(company_name, job_title, application_deadline, job_listing_url, state, city, application_listed, salary):
job_detail = JobDetail(company_name = company_name, job_title = job_title, application_deadline = application_deadline, job_listing_url = job_listing_url, state = state , city = city, application_listed = application_listed, salary = salary)
db.session.add(job_detail)
db.session.commit()
return job_detail | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def created_job(new_job, bulk_request):\n bulk_request.return_value = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <jobInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>THEJOBID</id>\n <operation>update</operation>\n <object>Lead</object>\n </jobInfo>\n '''\n new_job.create()\n return new_job",
"def setup_and_get_job_details_for_sf(self):\n\n self.create_compute_environment()\n jq_response = self.create_job_queue()\n jd_response = self.register_job_definition()\n return dict(jobDefinition=jd_response[\"jobDefinitionName\"], jobQueue=jq_response)",
"def _create_job(self,\n name,\n environment_string,\n description='',\n platform='LINUX'):\n job = data_types.Job()\n job.name = name\n if environment_string.strip():\n job.environment_string = environment_string\n job.platform = platform\n job.descripton = description\n job.put()\n\n return job",
"def create_job(project, description):\n randomnames = open(os.path.join(\"Anemone\", \"templates\", \"namegen.html\")).readlines()\n jobname = (\"Quick.\" +\n random.choice(randomnames)[:-1] + # for some reason choice gives extra space\n random.choice(randomnames)[:-1]) # for some reason choice gives extra space\n\n newjob = Job.create(project=project, name=jobname, description=description)\n newjob.name = newjob.name + \".{0:0=3d}\".format(newjob.id)\n newjob.save()\n return newjob",
"def create(self, validated_data):\n return Job.objects.create(**validated_data)",
"def get_job_details():\n job = dict()\n job['dids'] = json.loads(os.getenv('DIDS', None))\n job['metadata'] = dict()\n job['files'] = dict()\n job['algo'] = dict()\n job['secret'] = os.getenv('secret', None)\n algo_did = os.getenv('TRANSFORMATION_DID', None)\n if job['dids'] is not None:\n for did in job['dids']:\n # get the ddo from disk\n filename = '/data/ddos/' + did\n print(f'Reading json from {filename}')\n with open(filename) as json_file:\n ddo = json.load(json_file)\n # search for metadata service\n for service in ddo['service']:\n if service['type'] == 'metadata':\n job['files'][did] = list()\n index = 0\n for file in service['attributes']['main']['files']:\n job['files'][did].append(\n '/data/inputs/' + did + '/' + str(index))\n index = index + 1\n if algo_did is not None:\n job['algo']['did'] = algo_did\n job['algo']['ddo_path'] = '/data/ddos/' + algo_did\n return job",
"def get_job_details():\n job = dict()\n job['dids'] = json.loads(os.getenv('DIDS', None))\n job['metadata'] = dict()\n job['files'] = dict()\n job['algo'] = dict()\n job['secret'] = os.getenv('secret', None)\n algo_did = os.getenv('TRANSFORMATION_DID', None)\n if job['dids'] is not None:\n for did in job['dids']:\n # get the ddo from disk\n filename = '/data/ddos/' + did\n print(f'Reading json from {filename}')\n with open(filename) as json_file:\n ddo = json.load(json_file)\n # search for metadata service\n for service in ddo['service']:\n if service['type'] == 'metadata':\n job['files'][did] = list()\n index = 0\n for file in service['attributes']['main']['files']:\n job['files'][did].append(\n '/data/inputs/' + did + '/' + str(index))\n index = index + 1\n if algo_did is not None:\n job['algo']['did'] = algo_did\n job['algo']['ddo_path'] = '/data/ddos/' + algo_did\n return job",
"def job_detail(request: HttpRequest, job_id: str) -> HttpResponse:\n table = dynamodb.Table(table_name)\n sis_account_id = request.LTI[\"custom_canvas_account_sis_id\"]\n school_id = sis_account_id.split(\":\")[1]\n school_key = f'SCHOOL#{school_id.upper()}'\n job_query_params = {\n 'KeyConditionExpression': Key('pk').eq(school_key) & Key('sk').eq(job_id),\n 'ScanIndexForward': False,\n }\n logger.debug(f'Retrieving job details for job {job_id}.')\n job = table.query(**job_query_params)['Items'][0]\n\n # Update string timestamp to datetime.\n job.update(created_at=parse_datetime(job['created_at']))\n job.update(updated_at=parse_datetime(job['updated_at']))\n\n tasks_query_params = {\n 'KeyConditionExpression': Key('pk').eq(job_id),\n 'ScanIndexForward': False,\n }\n task_query_result = table.query(**tasks_query_params)\n tasks = task_query_result['Items']\n\n # If there are additional items to be retrieved for this job, the LastEvaluatedKey will be present\n # Use this key as the starting point for subsequent queries to build a full list\n while task_query_result.get('LastEvaluatedKey', False):\n tasks_query_params['ExclusiveStartKey'] = task_query_result.get('LastEvaluatedKey')\n task_query_result = table.query(**tasks_query_params)\n tasks.extend(task_query_result['Items'])\n\n context = {\n 'job': job,\n 'tasks': tasks,\n 'canvas_url': settings.CANVAS_URL\n }\n logger.debug(f'Retrieved job details for job {job_id}.', extra=context)\n return render(request, \"bulk_site_creator/job_detail.html\", context=context)",
"def create_job(self, context=None):\n return self._client.call_method(\n 'UserAndJobState.create_job',\n [], self._service_ver, context)",
"def send_job(self):\n graph = self.processgraphEdit.toPlainText()\n # info(self.iface, graph)\n response = self.connection.job_create(json.loads(graph))\n if response.status_code == 201:\n info(self.iface, \"Successfully created new job, Response: {}\".format(response.status_code))\n else:\n warning(self.iface, \"Not able to created new job, Response: {}\".format(str(response.json())))",
"def _create_job_message(self):\n #TODO: Final check of source file, add xml settings, allow for user\n # to set priority, verify all job data is correct format\n\n if not hasattr(self.required_files, '_get_message'):\n self.add_file_collection()\n\n if self.pool and hasattr(self.pool, 'id'):\n pool_options = {'poolId': self.pool.id}\n\n elif self.pool:\n pool_options = {'poolId': str(self.pool)}\n\n else:\n size = max(int(self.instances), 1)\n pool_options = {'autoPoolSpecification': self._auto_pool(size)}\n\n job_message = {\n 'Name': str(self.name),\n 'Type': self._api.jobtype(),\n 'RequiredFiles': self.required_files._get_message(\"submit\"),\n 'Parameters': list(self._filter_params()),\n 'JobFile': str(self.source),\n 'Settings': str(self.settings),\n 'Priority': 'Medium'\n }\n job_message.update(pool_options)\n\n self._log.debug(\"Job message: {0}\".format(job_message))\n return job_message",
"async def create_job(response: Response,\n request: Request,\n job: Job = Body(\n ...,\n example={\n \"id_video\": \"bbb_0.mp4\",\n \"bitrate\": 7000,\n \"speed\": \"ultrafast\",\n },\n )\n ): \n \n\n # get an ID and return to client\n id_job = mngr.getID()\n logger.debug(\"got id_job %s\" %id_job)\n resp = [\"http:/\"]\n resp.append(request.headers['host'])\n resp.append(id_job)\n response.headers[\"Location\"] = \"/\".join(resp)\n\n # create the task\n mngr.newJob(id_job, \n job.id_video, \n job.bitrate, \n job.speed)\n\n return id_job",
"def post(self):\n data, errors = JobSchema().loads(request.data)\n\n if errors:\n return Response().send(\n data=None, status=400, code=\"bad_request\", message=errors\n )\n return self.job.create(request.json)",
"def mock_create(*args, **kwargs):\n logger.info(\"create was called with rows:\")\n logger.info(\"\\n\".join(args))\n logger.info(\"\\n\".join(map(str, kwargs.items())))\n return JobInfoFactory(job_id=-1) # a mocked response",
"def getJob(workload):\n job = Job()\n job[\"task\"] = workload.getTask(\"reco\").getPathName()\n job[\"workflow\"] = workload.name()\n job[\"location\"] = \"T1_US_FNAL\"\n job[\"owner\"] = \"evansde77\"\n job[\"group\"] = \"DMWM\"\n return job",
"def get_job_detail():\n\n return JobDetail.query.all()",
"def job(username, root_wf_id, wf_id, job_id, job_instance_id):\n dashboard = Dashboard(g.master_db_url, root_wf_id, wf_id)\n job = dashboard.get_job_information(wf_id, job_id, job_instance_id)\n job_states = dashboard.get_job_states(wf_id, job_id, job_instance_id)\n job_instances = dashboard.get_job_instances(wf_id, job_id)\n\n previous = None\n\n for state in job_states:\n timestamp = state.timestamp\n state.timestamp = datetime.fromtimestamp(state.timestamp).strftime('%a %b %d, %Y %I:%M:%S %p')\n\n if previous is None:\n state.interval = 0.0\n else:\n state.interval = timestamp - previous\n\n previous = timestamp\n\n if not job:\n return 'Bad Request', 400\n\n return render_template('workflow/job/job_details.html', root_wf_id=root_wf_id, wf_id=wf_id, job_id=job_id, job=job,\n job_instances=job_instances, job_states=job_states)",
"def create(self, resource, **data):\n body = ''\n if resource == 'robot/job':\n body = data['body']\n else:\n body = urllib.urlencode(data)\n\n return self.request('/' + resource, 'POST', body=body)",
"def create_job(api_instance, job):\n api_response = api_instance.create_namespaced_job(\n body=job, namespace=\"default\", pretty=True\n )\n logger.info(\"Job created with status='%s'\" % str(api_response.status))\n return api_response",
"def create_job_object(job_type: int = 0,\n team_id: int = 0,\n destination_name: str = None,\n destination_lat: float = 0,\n destination_lng: float = 0,\n destination_text: str = None,\n destination_url: str = None,\n text_dispatcher: str = None,\n text_receiver: str = None,\n contact_name: str = None,\n contact_phone: str = None,\n contact_email: str = None,\n day: int = None,\n priority: int = None,\n number: int = None,\n on_site_seconds: int = None,\n window_start: int = None,\n window_end: int = None,\n order_id: int = None,\n dispatcher_uid: str = None,\n place_uid: str = None,\n worker: str = None,\n items_to_dropoff: int = None,\n items_to_pickup: int = None,\n custom_attributes: dict = None) -> dict:\n\n job = {\n \"type\": job_type,\n \"teamId\": team_id,\n \"destinationName\": destination_name,\n \"destinationLat\": destination_lat,\n \"destinationLng\": destination_lng,\n \"destinationText\": destination_text,\n \"destinationUrl\": destination_url,\n \"textDispatcher\": text_dispatcher,\n \"textReceiver\": text_receiver,\n \"contactName\": contact_name,\n \"contactPhone\": contact_phone,\n \"contactEmail\": contact_email,\n \"day\": day,\n \"priority\": priority,\n \"number\": number,\n \"onSiteSeconds\": on_site_seconds,\n \"windowStart\": window_start,\n \"windowEnd\": window_end,\n \"orderId\": order_id,\n \"dispatcherUid\": dispatcher_uid,\n \"placeUid\": place_uid,\n \"worker\": worker,\n \"itemsToDropoff\": items_to_dropoff,\n \"itemsToPickup\": items_to_pickup\n }\n job_without_none = {k: v for k, v in job.items() if v is not None}\n job.clear()\n job.update(job_without_none)\n\n if custom_attributes:\n job.update({f\"custom_{k}\": v for k, v in custom_attributes.items() if k})\n\n return job",
"def test_post_job(self):\n body = UnitTesterJobCreateReq()\n response = self.client.open(\n '/v1/job',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def create_job(self, employer_id, compensation, location, description, category_id, group_id):\n\n job = Job(employer_id=employer_id, group_id=group_id, compensation=compensation, location=location, category_id=category_id, description=description) \n db.session.add(job)\n db.session.commit()",
"def job_details(user_data, cache, job_id):\n user = cache.ensure_user(user_data)\n job = cache.get_job(user, job_id)\n\n if not job or not job.project_id:\n return result_response(JobDetailsResponseRPC(), None)\n\n try:\n job.project = cache.get_project(user, job.project_id)\n except IntermittentProjectIdError:\n pass\n\n return result_response(JobDetailsResponseRPC(), job)",
"def create_job(jobtype, server):\n name = generate_job_name(jobtype)\n job = Job.objects.create(jobtype=jobtype, server=server, name=name)\n return job",
"def createJobs():\n jobs_list = []\n for job in raw_jobs:\n cur_job = Job(int(job[0]), int(job[1]), int(job[2]))\n print(\"Created job: index:\", cur_job.number, \"Length:\", cur_job.length, \"Type\", cur_job.type, file=debug_file)\n jobs_list.append(cur_job)\n print(\"-----------------FINISHED CREATING JOB OBJECTS----------------------\\n\\n\", file=debug_file)\n return jobs_list",
"def create(cls, process, *args, **kwargs):\r\n job = cls(process=process, *args, **kwargs)\r\n job.save()\r\n ret_tasks = []\r\n if job.status != 'finished':\r\n tasks = Task.objects.filter(is_active=True, process=process)\r\n ret_tasks = [JobTask.create(job, t) for t in tasks]\r\n return job, ret_tasks",
"def create_job(self, job):\n call = subprocess.Popen(self.cli + [PlatformJenkinsJavaCLI.CREATE_JOB, job.name], stdin=subprocess.PIPE)\n out, err = call.communicate(input=platform_ci.jjb.get_job_as_xml(job, self.template_dir))\n call.wait()\n if call.returncode != 0:\n logging.info(out)\n logging.error(err)\n raise PlatformJenkinsException(\"Creating job failed: \" + job.name)",
"def createJob(self, joboptions, previousId=None):\n root = self.manifest.getRootResource()\n assert self.manifest.tosca\n job = Job(self, root, joboptions, previousId)\n\n if (\n self.manifest.localEnv\n and not joboptions.parentJob\n and not joboptions.startTime\n ):\n logPath = self.manifest.getJobLogPath(job.getStartTime(), \".log\")\n if not os.path.isdir(os.path.dirname(logPath)):\n os.makedirs(os.path.dirname(logPath))\n initLogging(logfile=logPath)\n path = self.manifest.path\n if joboptions.planOnly:\n logger.info(\"creating %s plan for %s\", joboptions.workflow, path)\n else:\n logger.info(\"starting %s job for %s\", joboptions.workflow, path)\n\n WorkflowPlan = Plan.getPlanClassForWorkflow(joboptions.workflow)\n if not WorkflowPlan:\n raise UnfurlError(\"unknown workflow: %s\" % joboptions.workflow)\n job.plan = WorkflowPlan(root, self.manifest.tosca, joboptions)\n return job",
"def jobs(\n ctx: typer.Context,\n op_id: str = typer.Argument(\n ...,\n autocompletion=completion_op_id,\n callback=check_for_op_id,\n help=\"A valid op-id. e.g. get_markets_prices\",\n ),\n param_string: Optional[str] = typer.Option(\n None,\n \"--param-string\",\n \"-p\",\n help=\"Optional. Full or partial parameters as a json encoded dictionary string. \"\n \"Keys must be valid parameters for selected op_id.\",\n ),\n default_params: bool = typer.Option(\n False,\n \"-d\",\n \"--default-params\",\n help=\"Include all parameters that are required, or have default values. \"\n \"Missing values will be 'NOTSET'.\",\n ),\n callback_path: Optional[Path] = typer.Option(\n None,\n \"-c\",\n \"--callbacks\",\n help=\"Optional. Path to custom callbacks to be used. \",\n ),\n file_name: str = typer.Option(\n \"created-jobs/${esi_job_op_id}-${esi_job_uid}\",\n \"-n\",\n \"--file-name\",\n help=(\n \"File name for the new job, must be unique if multiple jobs. \"\n \"Can include directories, \"\n \"and the file type suffix will be added based on --format-id.\"\n ),\n ),\n data_path: Optional[Path] = typer.Option(\n None,\n \"--data-file\",\n \"-i\",\n help=(\n \"Optional. Path to json, csv, or yaml file with full or partial parameters. \"\n \"Must result in a list of dicts.\"\n ),\n ),\n format_id: FormatChoices = typer.Option(\n FormatChoices.json,\n \"-f\",\n \"--format-id\",\n show_choices=True,\n help=\"Output file format.\",\n ),\n path_out: Path = typer.Argument(\n \"./tmp\",\n help=\"Parent path for saving the new jobs, will be prepended to --file-name.\",\n ),\n):\n operation_manifest: OperationManifest = ctx.obj[\"operation_manifest\"]\n # path_out = optional_object(path_out, Path, \".\")\n if path_out.is_file:\n typer.BadParameter(\"path_out must not be a file.\")\n file_data: Optional[List[Dict]] = get_params_from_file(data_path)\n parameters: Dict = decode_param_string(param_string)\n if callback_path is None:\n callback_collection = default_callback_collection()\n else:\n callback_collection = load_callbacks(callback_path)\n jobs_: List[EsiJob] = []\n try:\n op_info = operation_manifest.op_info(op_id)\n if not file_data:\n job = op_info.create_job(\n parameters,\n callback_collection,\n include_default_params=default_params,\n # only_required_default_params=False,\n # allow_notset=False,\n )\n jobs_.append(job)\n else:\n for params in file_data:\n params.update(parameters)\n job = op_info.create_job(\n params,\n callback_collection,\n include_default_params=default_params,\n # only_required_default_params=False,\n # allow_notset=False,\n )\n jobs_.append(job)\n except Exception as ex:\n raise typer.BadParameter(\n f\"Exception creating job. {ex.__class__.__name__}: {ex}\"\n )\n for job in jobs_:\n file_path = resolve_job_file_path(job, file_name, path_out)\n try:\n save_path = job.serialize_file(file_path, format_id)\n except Exception as ex:\n raise typer.BadParameter(\n f\"Error saving job to {save_path}. {ex.__class__.__name__}, {ex}\"\n )\n logger.info(\"Saved job %s at %s\", job.uid, file_path)\n typer.echo(f\"{len(jobs_)} jobs saved to {path_out}\")\n report_finished_task(ctx)",
"def post(self):\n\n from jinjamator.task.celery import run_jinjamator_task\n from jinjamator.daemon.database import db\n\n relative_task_path = request.endpoint.replace(\n \"api.\", \"\"\n )\n data = request.get_json()\n job_id = str(uuid.uuid4())\n user_id = g._user[\"id\"]\n\n job = run_jinjamator_task.apply_async(\n [\n relative_task_path,\n data,\n data.get(\"output_plugin\", \"console\"),\n user_id,\n ],\n task_id=job_id,\n created_by_user_id=user_id,\n )\n\n db_job = list(\n db.session.query(DB_Job).filter(\n DB_Job.task_id == job.id\n )\n )\n db_job = db_job and db_job[0]\n if not db_job:\n db_job = DB_Job(job.id)\n db_job.status = \"SCHEDULED\"\n db_job.configuration = data\n db_job.jinjamator_task = relative_task_path\n db_job.created_by_user_id = user_id\n db.session.add(db_job)\n db.session.flush()\n db.session.commit()\n\n return jsonify({\"job_id\": job.id})"
] | [
"0.7009168",
"0.6943577",
"0.66033685",
"0.6572269",
"0.65686125",
"0.6561279",
"0.6561279",
"0.6559106",
"0.64874846",
"0.64813113",
"0.6469618",
"0.6463148",
"0.6453288",
"0.64453775",
"0.64210135",
"0.6420657",
"0.641971",
"0.6398805",
"0.6393895",
"0.6366971",
"0.6274766",
"0.6238864",
"0.62132245",
"0.6200938",
"0.6176925",
"0.6165356",
"0.61613977",
"0.6160737",
"0.6155865",
"0.61489016"
] | 0.75051147 | 0 |
Return all job detail. | def get_job_detail():
return JobDetail.query.all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_job_list(self):\n return self.job_list",
"def get_job_list(self):\n return self.job_list",
"def getJobList_impl(self):\n my_infos = TestJob.objects.filter(\n (Q(job_status='Running')|Q(job_status='Submitted')|Q(job_status='Incomplete'))\n &Q(check_or_not=True)\n )\n\n if not connection.in_atomic_block:\n self._commit_transaction(src='getInfosList_impl')\n print(\"###\", my_infos)\n logger.info(my_infos)\n return my_infos",
"def get(self):\n # TODO: auth\n return list(self.app.db.jobs.find())",
"def describe_job(self):\n # GET /jobs/{job_id}\n pass",
"def list_jobs():\n\n name_to_job_details = redis_controller.get_name_to_job_details()\n return list(name_to_job_details.values())",
"def info(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s\" % (self.sessionid,\n self.name))\n return self.server.json_body(resp)",
"async def get_jobs(): \n return mngr.getAllJobs()",
"def job_info(self):\n def _sortkey(x):\n return x['job_name']\n\n resp = self._cmd(uri = '/jenkins_jobs')\n jobs = resp.get('jobs', [])\n return sorted(jobs, key=_sortkey)",
"def list_jobs(self):\n\n return dict(self._from_json(self.manage.run(override=\"list-jobs\")))",
"def jobs(self):\n return self.get_jobs()",
"def list(self):\n return self.rpc.call(MsfRpcMethod.JobList)",
"async def job_detail(request, job_id=None):\n current_jobs = dagobah._serialize().get('jobs', {})\n jobs = [job for job in current_jobs if str(job['job_id']) == job_id]\n if not jobs:\n raise ValueError('not find any jobs')\n return template('job_detail.html', job=jobs[0], hosts=dagobah.get_hosts())",
"def get_jobs(self):\n return list(self._jobs.values())",
"def all_jobs():\n\n jobs = Job.get_all()\n\n oneoffs = OneOff.get_all()\n\n job = JobView(None, jobs, oneoffs, False, Job.count() > 0)\n\n add_trello_task_links_to_g()\n\n return render_template(\"jobs.template.html\", page_title=\"Jobs\", job_info=job)",
"async def _fetch_data(self) -> JobInfo:\n return await self.api.get_job()",
"def get_job_list(self):\n job_list = []\n if mysql.job_list() == None:\n return job_list\n return mysql.job_list()",
"def ListJobs(cls):\n return [key.parent().string_id() for key in cls.query().fetch(\n 100, keys_only=True)]",
"def get_jobs(self, type = None):\n joblist = JobList()\n for jobs in self.sm.get_jobs(type = type):\n joblist.add_job(jobs['identifier'], jobs['phase'])\n return joblist.tostring()",
"def get_all_jobs(self) -> List[DocumentReference]:\n return self.get_all_documents(Type._JOBS)",
"def get_queryset(self):\n return Job.objects.all()",
"def get_jobs_list(self, response):\n pass",
"def get_all_jobs():\n fq = get_failed_queue(connection=conn)\n job_data = {'queued_jobs': q.job_ids,\n 'failed_jobs': fq.job_ids}\n return jsonify(job_data), 200",
"def get_job_data(jid):\n return jrd.hgetall(_generate_job_key(jid))",
"def job_info(url):\n for job in requests.get(url).json():\n yield job",
"def jobs(self):\n raise NotImplementedError()",
"def job_detail(request: HttpRequest, job_id: str) -> HttpResponse:\n table = dynamodb.Table(table_name)\n sis_account_id = request.LTI[\"custom_canvas_account_sis_id\"]\n school_id = sis_account_id.split(\":\")[1]\n school_key = f'SCHOOL#{school_id.upper()}'\n job_query_params = {\n 'KeyConditionExpression': Key('pk').eq(school_key) & Key('sk').eq(job_id),\n 'ScanIndexForward': False,\n }\n logger.debug(f'Retrieving job details for job {job_id}.')\n job = table.query(**job_query_params)['Items'][0]\n\n # Update string timestamp to datetime.\n job.update(created_at=parse_datetime(job['created_at']))\n job.update(updated_at=parse_datetime(job['updated_at']))\n\n tasks_query_params = {\n 'KeyConditionExpression': Key('pk').eq(job_id),\n 'ScanIndexForward': False,\n }\n task_query_result = table.query(**tasks_query_params)\n tasks = task_query_result['Items']\n\n # If there are additional items to be retrieved for this job, the LastEvaluatedKey will be present\n # Use this key as the starting point for subsequent queries to build a full list\n while task_query_result.get('LastEvaluatedKey', False):\n tasks_query_params['ExclusiveStartKey'] = task_query_result.get('LastEvaluatedKey')\n task_query_result = table.query(**tasks_query_params)\n tasks.extend(task_query_result['Items'])\n\n context = {\n 'job': job,\n 'tasks': tasks,\n 'canvas_url': settings.CANVAS_URL\n }\n logger.debug(f'Retrieved job details for job {job_id}.', extra=context)\n return render(request, \"bulk_site_creator/job_detail.html\", context=context)",
"def jobs(self):\n return self._jobs",
"def info(self, jobid):\n return self.rpc.call(MsfRpcMethod.JobInfo, [jobid])",
"def job_output(self, job_id):\n\n url = self.base_url + \"/ml-service/phoenix-ml/output/findBy?jobId={0}\".format(job_id)\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response.json()"
] | [
"0.7248316",
"0.7248316",
"0.7240961",
"0.72205126",
"0.7180172",
"0.7098997",
"0.70902854",
"0.7087385",
"0.7077205",
"0.70463014",
"0.7011621",
"0.7009996",
"0.6946961",
"0.6840078",
"0.68360656",
"0.680167",
"0.67841136",
"0.67203075",
"0.6700369",
"0.6699724",
"0.66963166",
"0.6607545",
"0.6588999",
"0.65781695",
"0.652464",
"0.6505813",
"0.64580774",
"0.6443927",
"0.64299744",
"0.64291906"
] | 0.8332733 | 0 |
Return a job detail by primary key. | def get_job_detail_by_id(job_detail_id):
return JobDetail.query.get(job_detail_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_job(self, identifier: str):\n self._log_operation('Getting job {i}'.format(i=identifier))\n return self._job_queue.get_job_details(identifier)",
"def get_job_detail():\n\n return JobDetail.query.all()",
"def jobid(self):\n return self.get_db('jobid')",
"def get_object(self, pk):\n try:\n return JobTitle.objects.get(Q(id=pk) | Q(uid=pk))\n except JobTitle.DoesNotExist:\n raise Http404",
"async def job_detail(request, job_id=None):\n current_jobs = dagobah._serialize().get('jobs', {})\n jobs = [job for job in current_jobs if str(job['job_id']) == job_id]\n if not jobs:\n raise ValueError('not find any jobs')\n return template('job_detail.html', job=jobs[0], hosts=dagobah.get_hosts())",
"def get_object(self, pk):\n try:\n # x = category__job_title__program_id\n return JobCatalog.objects.get(Q(id=pk) | Q(uid=pk))\n except JobCatalog.DoesNotExist:\n raise Http404",
"def job_detail(request: HttpRequest, job_id: str) -> HttpResponse:\n table = dynamodb.Table(table_name)\n sis_account_id = request.LTI[\"custom_canvas_account_sis_id\"]\n school_id = sis_account_id.split(\":\")[1]\n school_key = f'SCHOOL#{school_id.upper()}'\n job_query_params = {\n 'KeyConditionExpression': Key('pk').eq(school_key) & Key('sk').eq(job_id),\n 'ScanIndexForward': False,\n }\n logger.debug(f'Retrieving job details for job {job_id}.')\n job = table.query(**job_query_params)['Items'][0]\n\n # Update string timestamp to datetime.\n job.update(created_at=parse_datetime(job['created_at']))\n job.update(updated_at=parse_datetime(job['updated_at']))\n\n tasks_query_params = {\n 'KeyConditionExpression': Key('pk').eq(job_id),\n 'ScanIndexForward': False,\n }\n task_query_result = table.query(**tasks_query_params)\n tasks = task_query_result['Items']\n\n # If there are additional items to be retrieved for this job, the LastEvaluatedKey will be present\n # Use this key as the starting point for subsequent queries to build a full list\n while task_query_result.get('LastEvaluatedKey', False):\n tasks_query_params['ExclusiveStartKey'] = task_query_result.get('LastEvaluatedKey')\n task_query_result = table.query(**tasks_query_params)\n tasks.extend(task_query_result['Items'])\n\n context = {\n 'job': job,\n 'tasks': tasks,\n 'canvas_url': settings.CANVAS_URL\n }\n logger.debug(f'Retrieved job details for job {job_id}.', extra=context)\n return render(request, \"bulk_site_creator/job_detail.html\", context=context)",
"def getJob(self, name=None):\n if name == None: \n name = self.jobstable.get_selectedRecordNames()[0]\n if name == None:\n return None, name\n jobid = self.DB.meta.peatsa_jobs[name]\n try:\n job = PEATSA.WebApp.Data.Job(jobid, self.connection)\n except:\n #print 'job not in database'\n return None,name\n return job, name",
"def get_job(job_name: str):\n\n job_details = redis_controller.get_job_details(job_name=job_name)\n return job_details",
"def get_job(self, _id):\n data = {\n 'class': 'Job',\n 'id': _id,\n 'attrs': {},\n }\n job = self.db_client.send_request('list', json.dumps(data))\n\n return Job(\n _id=job['id'],\n _type=job['type'],\n task=job['task'],\n command=job['command'],\n input_parameters=job['inputParameters'],\n status=job['status'],\n runner_id=job['runner'],\n )",
"def retrieve_job(self, job_id):\n job = {}\n with self._lock:\n if job_id not in self._jobs:\n return None\n job = self._jobs[job_id]\n return job",
"def get_job(self) -> Dict[Text, Text]:\n request = self._client.projects().jobs().get(name=self._job_name)\n return request.execute()",
"def get_job_by_id(self, job_id):\n return self.get_resource(category=SYSTEM, resource_level=JOB,\n resource_level_id=job_id)",
"def get_job_by_id(self, job_id):\n try:\n result = self._session.query(JobEntity).\\\n filter(JobEntity.id == job_id).\\\n all()\n result_dict = self.result_dict(result)\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return result_dict",
"def get_a_job(job_id):\n job = JobModel.get_one_job(job_id)\n if not job:\n return custom_response({'Error': 'Job Not Found'}, 404)\n\n job_message = job_schema.dump(job, many=True)\n return custom_response(job_message, 200)",
"def get_object(self) -> Job:\n project = ProjectPermissionsMixin.get_object(self)\n return project.jobs.get(id=self.kwargs[\"job\"])",
"def get_job_id(self):\n return {'job_id': self._job_id}",
"def job(self):\n return self.batch[self.job_id]",
"def get_result_by_primary_key(self, pk):\n session = self.session_factory()\n result = session.query(PipelineRun).filter_by(id=pk).first()\n session.close()\n return result",
"def get_row(self, pk):\n ans = self.execute(self.commands.get_row(\n cols=self._join_cols(self.columns),\n table=self.name,\n pk_col=self.primary_key_column,\n pk=pk\n ))\n if not ans:\n return None\n return self._dictify(self.columns, ans[0])",
"def get_job(self, job_id):\n\n try:\n exposure = Job.objects.filter(id=job_id)\n except:\n exposure = None\n\n return exposure",
"def get_job(jid=None):\n if not jid:\n raise CommandExecutionError(\"ID option must not be none.\")\n\n query = {\"type\": \"op\", \"cmd\": \"<show><jobs><id>{}</id></jobs></show>\".format(jid)}\n\n return __proxy__[\"panos.call\"](query)",
"def _get_job(self, uid):\n try:\n return self._jobs[uid]\n except KeyError:\n raise JobNotFoundError('job \\'%s\\' is not found' % (uid,))",
"def job_id(self):\n return self._job_id",
"def get_job(self) -> Job:\n return self.jobs_list[self.sel_idx]",
"def get_job_details(self, job_id):\n try:\n LOG.info('Getting Job Details for job_id %s ', job_id)\n job_details = self.common.get_job_by_id(job_id)\n if job_details:\n LOG.info('Successfully listed Job Details for job_id %s : %s',\n job_id, job_details)\n return job_details\n else:\n errorMsg = 'Failed to find the job with specified job_id: %s'\\\n % job_id\n self.show_error_exit(msg=errorMsg)\n except Exception as e:\n errorMsg = 'Get Job details for job_id %s failed with error %s' \\\n % (job_id, str(e))\n self.show_error_exit(msg=errorMsg)",
"def retrieve(received_job_id: str) -> Union[Job, None]:\n # todo: add error handling\n found_job = db.Jobs().get_by_id(received_job_id)\n if not found_job:\n return\n return found_job",
"def find(self, primary_key):\n sql = '{} WHERE {} = %s'.format(self._select(), self.PRIMARY_KEY)\n cursor = yield self._pool.execute(sql, [primary_key])\n result = cursor.fetchmany(1)\n return self.convert_result_to_object(result)",
"def _retrieve_job_id(job_name, res_id):\n active_jobs = celery_inspector.active()\n job_id = _retrieve_task_id(job_name, res_id, active_jobs)\n if not job_id:\n reserved_jobs = celery_inspector.reserved()\n job_id = _retrieve_task_id(job_name, res_id, reserved_jobs)\n if not job_id:\n scheduled_jobs = celery_inspector.scheduled()\n job_id = _retrieve_task_id(job_name, res_id, scheduled_jobs)\n return job_id",
"def load_job(self, job_id: Hashable) -> dict:\n data = self._redis.json().get(f\"job:{job_id}\", \".\")\n return data"
] | [
"0.72874004",
"0.7173427",
"0.71372676",
"0.6959532",
"0.6943279",
"0.692583",
"0.6852095",
"0.6842035",
"0.67910886",
"0.67829704",
"0.6779413",
"0.67499465",
"0.6748419",
"0.6660234",
"0.65764666",
"0.6566741",
"0.656614",
"0.6525582",
"0.64881426",
"0.6479494",
"0.647201",
"0.64615077",
"0.64275974",
"0.64029163",
"0.6379268",
"0.63707316",
"0.63685083",
"0.6367509",
"0.6330574",
"0.6320654"
] | 0.77013654 | 0 |
Return all job applied. | def get_job_applied():
return JobCompletedApplication.query.all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def jobs(self):\n return self.get_jobs()",
"def jobs(self):\n return self._jobs",
"def get_jobs(self):\n return list(self._jobs.values())",
"def get_all_jobs(self):\n all_jobs = self.job_set.all().order_by(\"-time_last_updated\", \"project__name\", \"-id\")\n # for job in all_jobs:\n # job.check_exists()\n\n # get the list of jobs listed in the database as running and update them.\n dbrunning = all_jobs.filter(state__in=['in queue', 'started'])\n for runningjob in dbrunning: runningjob.update();\n\n # get the updated list \n all_jobs = self.job_set.all().order_by(\"-time_last_updated\", \"project__name\", \"-id\")\n\n return all_jobs",
"def get_job_list(self):\n return self.job_list",
"def get_job_list(self):\n return self.job_list",
"def jobs(self) -> List[Job]:\n return self._jobs.values()",
"def get_all_jobs(self) -> List[DocumentReference]:\n return self.get_all_documents(Type._JOBS)",
"def get_executed_jobs(self):\n with self.__lock:\n return list(self.__executed_jobs)",
"def get(self):\n # TODO: auth\n return list(self.app.db.jobs.find())",
"def jobs(self):\n raise NotImplementedError()",
"def get_job_list(self):\n job_list = []\n if mysql.job_list() == None:\n return job_list\n return mysql.job_list()",
"def list(self):\n return self.rpc.call(MsfRpcMethod.JobList)",
"def get_job_names(self):\n return []",
"def get_waiting_jobs(self):\n return []",
"def jobs(self):\n return JobCollection(client=self)",
"def get_registered_jobs(self):\n with self.__lock:\n return list(self.__registered_jobs)",
"def active_jobs(self):\n \n active_jobs = []\n for job in self._jobs:\n if job.active:\n job.backend.status( job )\n active_jobs.append( job )\n\n self._active_jobs = active_jobs[:]\n\n return active_jobs",
"def jobs(self):\n return self.properties.get('jobs',\n EntityCollection(self.context, SynchronizationJob,\n ResourcePath(\"jobs\", self.resource_path)))",
"def get_jobs(self, jobstore=None):\n\n return self._scheduler.get_jobs(jobstore)",
"def list_jobs(self):\n\n return dict(self._from_json(self.manage.run(override=\"list-jobs\")))",
"async def get_jobs(): \n return mngr.getAllJobs()",
"def get_all_jobs():\n fq = get_failed_queue(connection=conn)\n job_data = {'queued_jobs': q.job_ids,\n 'failed_jobs': fq.job_ids}\n return jsonify(job_data), 200",
"def jobs(self, tags=None, tags_intersect=None):\n return list(self.all_jobs(tags=tags, tags_intersect=tags_intersect))",
"def getJobList_impl(self):\n my_infos = TestJob.objects.filter(\n (Q(job_status='Running')|Q(job_status='Submitted')|Q(job_status='Incomplete'))\n &Q(check_or_not=True)\n )\n\n if not connection.in_atomic_block:\n self._commit_transaction(src='getInfosList_impl')\n print(\"###\", my_infos)\n logger.info(my_infos)\n return my_infos",
"def get(self):\n for job in data_types.Job.query():\n if not utils.string_is_true(job.get_environment().get('CORPUS_PRUNE')):\n continue\n\n latest_revision = _get_latest_job_revision(job)\n if not latest_revision:\n continue\n\n queue = tasks.queue_for_job(job.name)\n for target_job in fuzz_target_utils.get_fuzz_target_jobs(job=job.name):\n tasks.add_task(\n 'corpus_pruning',\n '%s@%s' % (target_job.fuzz_target_name, latest_revision),\n job.name,\n queue=queue)",
"def list(self):\n\n for job_name in self.upstart.get_all_jobs():\n yield self.get_service(job_name)",
"def _get_jobs():\n return _get_bigquery_service().jobs()",
"def process_jobs_(jobs):\n out = []\n for job in jobs:\n out_ = MultiProcessingFunctions.expand_call(job)\n out.append(out_)\n return out",
"def jobs(self):\n \n def get_jobs(job_ids):\n return defer.DeferredList([Job.safe_fetch(job_id, self.connection) for job_id in job_ids], consumeErrors=True)\n \n def compact(deferred_list):\n ret = []\n for job in deferred_list:\n if isinstance(job, Job):\n ret.append(job)\n else:\n self.remove(job.job_id)\n return ret\n \n d = self.job_ids\n d.addCallback(get_jobs)\n d.addCallback(compact)\n return d"
] | [
"0.7738957",
"0.73770744",
"0.7349059",
"0.7263708",
"0.71636105",
"0.71636105",
"0.7105789",
"0.7049187",
"0.69864005",
"0.69775003",
"0.6923545",
"0.68921167",
"0.68604153",
"0.6717858",
"0.6713856",
"0.661173",
"0.6611498",
"0.65968645",
"0.6593661",
"0.65781695",
"0.65622497",
"0.6542573",
"0.6500595",
"0.6485288",
"0.6433128",
"0.64217377",
"0.6390294",
"0.6370986",
"0.63693035",
"0.63572556"
] | 0.76446235 | 1 |
Return a job applied by job id. | def get_job_applied_by_job_id(job_id):
return JobCompletedApplication.query.filter(JobCompletedApplication.job_id == job_id).first().job_applied_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_job_by_id(self, job_id):\n return self.get_resource(category=SYSTEM, resource_level=JOB,\n resource_level_id=job_id)",
"def get_job_applied_by_id(job_applied_id):\n\n return JobCompletedApplication.query.get(job_applied_id)",
"def get_job(self, job_id):\n\n try:\n exposure = Job.objects.filter(id=job_id)\n except:\n exposure = None\n\n return exposure",
"def get_job(self, id, jobstore=None):\n\n return self._scheduler.get_job(id, jobstore)",
"def retrieve_job(self, job_id):\n job = {}\n with self._lock:\n if job_id not in self._jobs:\n return None\n job = self._jobs[job_id]\n return job",
"def get_job(\n self, job_id: Union[str, int], *, params: Optional[dict] = None\n ) -> \"resource_types.Job\":\n\n return communicator.Job(self.__requester).from_id(\n job_id=job_id, parameters=params\n )",
"def lookup(job_id: str) -> JobState:\n job = JobState(job_id)\n job.update()\n return job",
"def get_a_job(job_id):\n job = JobModel.get_one_job(job_id)\n if not job:\n return custom_response({'Error': 'Job Not Found'}, 404)\n\n job_message = job_schema.dump(job, many=True)\n return custom_response(job_message, 200)",
"def retrieve_job(self, job_id) -> AzureQuantumJob:\n return self._provider.get_job(job_id)",
"def get_job_def_by_id(self, job_id):\n try:\n result = self._session.query(\n JobEntity.id,\n JobEntity.username,\n JobEntity.name,\n JobEntity.workflow_id,\n WorkflowEntity.name,\n JobEntity.output_uri,\n JobEntity.work_uri,\n JobEntity.no_output_hash,\n JobEntity.inputs,\n JobEntity.parameters,\n JobEntity.final_output,\n JobEntity.exec_context,\n JobEntity.exec_method,\n JobEntity.exec_parameters,\n JobEntity.notifications\n ).\\\n filter(JobEntity.id == job_id).\\\n filter(WorkflowEntity.id == JobEntity.workflow_id).\\\n all()\n\n result_dict = [\n {\n 'job_id': row[0],\n 'username': row[1],\n 'name': row[2],\n 'workflow_id': row[3],\n 'workflow_name': row[4],\n 'output_uri': row[5],\n 'work_uri': json.loads(row[6]),\n 'no_output_hash': row[7],\n 'inputs': json.loads(row[8]),\n 'parameters': json.loads(row[9]),\n 'final_output': json.loads(row[10]),\n 'execution': {\n 'context': json.loads(row[11]),\n 'method': json.loads(row[12]),\n 'parameters': json.loads(row[13])\n },\n 'notifications': json.loads(row[14])\n } for row in result\n ]\n\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n # should have just one record\n if not result_dict:\n return {}\n\n return result_dict[0]",
"def get_job(self, _id):\n data = {\n 'class': 'Job',\n 'id': _id,\n 'attrs': {},\n }\n job = self.db_client.send_request('list', json.dumps(data))\n\n return Job(\n _id=job['id'],\n _type=job['type'],\n task=job['task'],\n command=job['command'],\n input_parameters=job['inputParameters'],\n status=job['status'],\n runner_id=job['runner'],\n )",
"def job(self):\n return self.batch[self.job_id]",
"def job_by_id(self, job_id):\n response = self._session.get(\n path='{base_api}/jobs/{job_id}.xml'.format(\n base_api=self.base_api,\n job_id=job_id\n ),\n headers={'Accept': 'application/xml'},\n )\n\n return response.text",
"def get_job_by_id(self, job_id):\n try:\n result = self._session.query(JobEntity).\\\n filter(JobEntity.id == job_id).\\\n all()\n result_dict = self.result_dict(result)\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return result_dict",
"def get_job(self) -> Job:\n return self.jobs_list[self.sel_idx]",
"def get_job(self) -> CustomJob:\n return self._client.get_custom_job(name=self._job_name)",
"def get_job(self, user, job_id):\n calling_user = User.get_user_by_username(user)\n job = Job.get_job_by_id(job_id)\n for_user = job.get_user()\n self.access_handler.check_read_rights(for_user, calling_user)\n return build_job(job)",
"def get_job(self, job_id) -> AzureQuantumJob:\n azure_job = self._workspace.get_job(job_id)\n backend = self.get_backend(azure_job.details.target)\n return AzureQuantumJob(backend, azure_job)",
"def get_job(self, identifier: str):\n self._log_operation('Getting job {i}'.format(i=identifier))\n return self._job_queue.get_job_details(identifier)",
"def retrieve(received_job_id: str) -> Union[Job, None]:\n # todo: add error handling\n found_job = db.Jobs().get_by_id(received_job_id)\n if not found_job:\n return\n return found_job",
"def get_object(self) -> Job:\n project = ProjectPermissionsMixin.get_object(self)\n return project.jobs.get(id=self.kwargs[\"job\"])",
"def get_job(arn=None):\n pass",
"def get_job(self) -> Dict[Text, Text]:\n request = self._client.projects().jobs().get(name=self._job_name)\n return request.execute()",
"def getJob(self, name=None):\n if name == None: \n name = self.jobstable.get_selectedRecordNames()[0]\n if name == None:\n return None, name\n jobid = self.DB.meta.peatsa_jobs[name]\n try:\n job = PEATSA.WebApp.Data.Job(jobid, self.connection)\n except:\n #print 'job not in database'\n return None,name\n return job, name",
"def getJob(uniq):\n return Job(Cuebot.getStub('job').GetJob(\n job_pb2.JobGetJobRequest(id=uniq), timeout=Cuebot.Timeout).job)",
"def get_job(self, job_name):\n try:\n return self.json_dict['job definitions'][job_name]\n except KeyError:\n print('No job \"%s\" in %s' % (job_name, self.filepath))\n return None",
"def poll(self, job_id):\n return self.manage.poll_job(job_id=job_id)",
"def load_job(self, job_id: Hashable) -> dict:\n data = self._redis.json().get(f\"job:{job_id}\", \".\")\n return data",
"def _get_job(self, uid):\n try:\n return self._jobs[uid]\n except KeyError:\n raise JobNotFoundError('job \\'%s\\' is not found' % (uid,))",
"def get_job(jid=None):\n if not jid:\n raise CommandExecutionError(\"ID option must not be none.\")\n\n query = {\"type\": \"op\", \"cmd\": \"<show><jobs><id>{}</id></jobs></show>\".format(jid)}\n\n return __proxy__[\"panos.call\"](query)"
] | [
"0.77378917",
"0.7632125",
"0.7465325",
"0.7188449",
"0.715539",
"0.7043636",
"0.6927355",
"0.6892371",
"0.68905514",
"0.6852687",
"0.6848821",
"0.6789124",
"0.67766374",
"0.67155415",
"0.66451055",
"0.65906435",
"0.65832335",
"0.6499773",
"0.6458737",
"0.6456007",
"0.64170665",
"0.641198",
"0.6397107",
"0.6349358",
"0.6323448",
"0.62593496",
"0.62412107",
"0.62206507",
"0.6199669",
"0.6198126"
] | 0.7639219 | 1 |
Return all note created. | def get_note():
return Note.query.all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def listNotes() -> list:\n list_of_notes = []\n for note in Note.objects.all():\n list_of_notes.append({\n 'uuid': note.uuid, 'title': note.title,\n 'author': note.author, 'body': note.body, 'created_at': localtime(note.created_at)\n })\n return list_of_notes",
"def notes(self):\n return reapy.NoteList(self)",
"def getnotes():",
"def notes_list():\n if request.method == \"POST\":\n note = str(request.data.get(\"text\", \"\"))\n idx = max(notes.keys()) + 1\n notes[idx] = note\n return note_repr(idx), status.HTTP_201_CREATED\n\n # request.method == 'GET'\n return [note_repr(idx) for idx in sorted(notes.keys())]",
"def getNotes(self):\n return self.__notes",
"def getNotes(self, *args):\n return _libsbml.SBase_getNotes(self, *args)",
"def getNotes(self):\n logger.debug(\"Func: getNotes\")\n\n return self._currentNotes",
"def notes(self):\n return NotesTable(self.rpc, self.name)",
"def notes(self):\r\n return notes.Notes(self)",
"def notes(self):\r\n return notes.Notes(self)",
"def notes(self):\n return notes.Notes(self)",
"def ls(self, count = 200):\n return self._manager.ls_notes(self['id'], count)",
"def notes(self):\n return self._notes",
"def notes(self):\n return Notes(self)",
"def notes(self):\r\n return TicketNotes(self)",
"def get_notes(self, note_limit=200):\n return Note.get_by_person_record_id(\n self.subdomain, self.record_id, limit=note_limit)",
"def notes(self):\n return self.__notes",
"def getNotes(self):\n return self._nednotes, self._ongcnotes",
"def notes(self):\r\n return notes.UserNotes(self)",
"def view_notes(tag_list):\n final_notes = []\n for tag in tag_list:\n all_notes = Tags.objects.get(tag_text=tag)\n notes = all_notes.notes.all()\n for note in notes:\n final_notes.append(note.note_text)\n \n return final_notes",
"def add_notes(self, notes):\n if hasattr(notes, \"notes\"):\n for x in notes.notes:\n self.add_note(x)\n return self.notes\n elif hasattr(notes, \"name\"):\n self.add_note(notes)\n return self.notes\n elif isinstance(notes, six.string_types):\n self.add_note(notes)\n return self.notes\n for x in notes:\n if isinstance(x, list) and len(x) != 1:\n if len(x) == 2:\n self.add_note(x[0], x[1])\n else:\n self.add_note(x[0], x[1], x[2])\n else:\n self.add_note(x)\n return self.notes",
"def all_notes():\n \n return render_template('all_notes.html',colors=music_color,)",
"def get_note_names(self):\n res = []\n for n in self.notes:\n if n.name not in res:\n res.append(n.name)\n return res",
"async def _view_all_notes(self, ctx: Context):\n\n author = ctx.author\n\n note_infos = []\n\n embed_links = ctx.channel.permissions_for(ctx.guild.me).embed_links\n\n author_str = f\"{author.name}'\"\n\n if author.name[-1].lower() != \"s\":\n author_str += \"s\"\n\n async with self.config.member(author).notes() as notes:\n total = len(notes)\n for page_num, note in enumerate(notes, start=1):\n msg_info = \"\"\n if note[\"author\"]:\n msg_info += _(\"**Author:** {}\").format(note[\"author\"])\n if note[\"channel\"]:\n msg_info += _(\"\\n**Channel:** {}\").format(note[\"channel\"])\n if note[\"jump_url\"]:\n if embed_links:\n msg_info += _(\n \"\\n[Click here to jump to message]({})\"\n ).format(note[\"jump_url\"])\n else:\n msg_info += _(\n \"\\n**Jump To Message:** {}\"\n ).format(note[\"jump_url\"])\n\n note_info = _(\n \"{}\\n\\n**Note:**\\n```{}```\\n**Reason:**\\n```{}```\"\n ).format(\n msg_info,\n note[\"note\"],\n note[\"reason\"]\n ).strip()\n\n if embed_links:\n page = discord.Embed(\n colour=0xff0000,\n description=note_info,\n title=_(\"{} TvM Notes\").format(author_str),\n timestamp=ctx.message.created_at\n )\n\n page.set_footer(\n text=_(\"Page {page_num}/{leng}\").format(\n page_num=page_num, leng=total\n )\n )\n else:\n page = _(\n \"**{author} TvM Notes**\"\n \"\\n\\n{note}\"\n \"\\n{footer}\"\n ).format(\n author=author_str,\n note=note_info,\n footer=_(\"*Page {page_num}/{leng}*\").format(\n page_num=page_num, leng=total\n )\n )\n\n note_infos.append(page)\n\n await menu(ctx, note_infos, DEFAULT_CONTROLS)",
"def _get_issue_notes(request, pk):\n if request.user.is_coordinator_or_better:\n note_types = IssueNote.COORDINATOR_NOTE_TYPES\n else:\n note_types = IssueNote.PARALEGAL_NOTE_TYPES\n\n return (\n IssueNote.objects.filter(issue=pk)\n .prefetch_related(\"creator__groups\")\n .filter(note_type__in=note_types)\n .order_by(\"-created_at\")\n .all()\n )",
"def note_list(request):\n user = request.user\n notes = Note.objects.filter(author=user)\n serializer = NoteSerializer(notes, many=True)\n return Response(serializer.data)",
"def all_note_by_job_applied_id(job_applied_id):\n\n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Note' ).all()",
"def get_all_notes(user_id, token):\n user = User.get(User.id == user_id).username\n if not validate_token(user, token):\n return HTTPResponse(status=500, body={\"message\":\"Validation error.\"})\n res = []\n for note in Notes.select():\n if note.user.id == user_id:\n new_note = model_to_dict(note)\n res.append({\"id\":new_note['id'], \"title\":new_note['title'],\n \"content\":new_note['content']})\n new_token = generate_token(user)\n body = {\"user_id\":user_id, \"token\":new_token.decode('utf-8'), 'items':res}\n return HTTPResponse(status=200, body=body)",
"def GetNotes(self, request, global_params=None):\n config = self.GetMethodConfig('GetNotes')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def __init__(self):\n self.notes = []"
] | [
"0.77602446",
"0.7531043",
"0.70795774",
"0.7064056",
"0.70277685",
"0.7003691",
"0.6970311",
"0.69562006",
"0.6907323",
"0.6907323",
"0.6899866",
"0.6882427",
"0.6856624",
"0.6796058",
"0.678864",
"0.66967875",
"0.66869307",
"0.66006863",
"0.6593107",
"0.65787625",
"0.653431",
"0.65228",
"0.63457555",
"0.6311221",
"0.62824297",
"0.6275821",
"0.6202154",
"0.61803865",
"0.6137106",
"0.6071289"
] | 0.7753266 | 1 |
Return all notes for job applied id. | def all_note_by_job_applied_id(job_applied_id):
return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Note' ).all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_jd_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Job Description' ).order_by(Note.note_date_created.desc()).first()",
"def all_recruiter_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Recruiter Contact' ).all()",
"def all_resume_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Resume' ).all()",
"def all_interview_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, ((Note.note_category == 'Interview Question Technical') | (Note.note_category == 'Interview Question Informational') | (Note.note_category == 'Interview Question Behavioral'))).order_by(Note.note_category).all()",
"def getNotes(self):\n logger.debug(\"Func: getNotes\")\n\n return self._currentNotes",
"def getNotes(self, *args):\n return _libsbml.SBase_getNotes(self, *args)",
"def getNotes(self):\n return self.__notes",
"def get_notes_by_id(self, ids: List[str]) -> pd.Series:\n\n return self.notes[self.notes.apply(lambda n: n.id in ids)]",
"def notes(self):\n return self._notes",
"def getnotes():",
"def notes(self):\n return reapy.NoteList(self)",
"def notes(self):\n return self.__notes",
"def get_note_alternatives(self, id):\n notes = self.session.query(models.Note).filter(\n models.Note.conflict_parent_id == id,\n ).all()\n return btype.Note.list >> notes",
"def _get_issue_notes(request, pk):\n if request.user.is_coordinator_or_better:\n note_types = IssueNote.COORDINATOR_NOTE_TYPES\n else:\n note_types = IssueNote.PARALEGAL_NOTE_TYPES\n\n return (\n IssueNote.objects.filter(issue=pk)\n .prefetch_related(\"creator__groups\")\n .filter(note_type__in=note_types)\n .order_by(\"-created_at\")\n .all()\n )",
"def getNotes(self):\n return self._nednotes, self._ongcnotes",
"def notes(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"notes\")",
"def get_note():\n\n return Note.query.all()",
"def get(self, copy_id):\n checkCopyValidity(copy_id)\n copy_notes = db.session.query(models.Notes).filter_by(copy_id=copy_id)\n return [note.serialize() for note in copy_notes], 200",
"def notes(self):\n return notes.Notes(self)",
"def all_followup_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Follow-up').all()",
"def notes(self) -> str:\n return self._notes",
"def notes(self):\r\n return notes.Notes(self)",
"def notes(self):\r\n return notes.Notes(self)",
"def ls(self, count = 200):\n return self._manager.ls_notes(self['id'], count)",
"def listNotes() -> list:\n list_of_notes = []\n for note in Note.objects.all():\n list_of_notes.append({\n 'uuid': note.uuid, 'title': note.title,\n 'author': note.author, 'body': note.body, 'created_at': localtime(note.created_at)\n })\n return list_of_notes",
"def notes(self) -> Optional[str]:\n return pulumi.get(self, \"notes\")",
"def notes(self):\n return NotesTable(self.rpc, self.name)",
"def all_notes():\n \n return render_template('all_notes.html',colors=music_color,)",
"def view_notes(tag_list):\n final_notes = []\n for tag in tag_list:\n all_notes = Tags.objects.get(tag_text=tag)\n notes = all_notes.notes.all()\n for note in notes:\n final_notes.append(note.note_text)\n \n return final_notes",
"def get_jobs(self):\n return self.my_user_cron.find_comment(CRONTAB_COMMENT)"
] | [
"0.73608685",
"0.70325047",
"0.7020432",
"0.673755",
"0.66720706",
"0.65655774",
"0.64068127",
"0.6329673",
"0.63007534",
"0.62201124",
"0.61646885",
"0.61219245",
"0.61209005",
"0.6072345",
"0.6039921",
"0.5995878",
"0.59537804",
"0.5936029",
"0.587913",
"0.5855434",
"0.58279383",
"0.5827149",
"0.5827149",
"0.5804603",
"0.57935697",
"0.57665765",
"0.5763438",
"0.56996185",
"0.5666826",
"0.56638294"
] | 0.8214903 | 0 |
Return all job description for job applied id. | def all_jd_by_job_applied_id(job_applied_id):
return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Job Description' ).order_by(Note.note_date_created.desc()).first() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def describe_job(self):\n # GET /jobs/{job_id}\n pass",
"def get_job_description(self, job, context=None):\n return self._client.call_method(\n 'UserAndJobState.get_job_description',\n [job], self._service_ver, context)",
"def all_resume_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Resume' ).all()",
"def get_job_detail():\n\n return JobDetail.query.all()",
"def job_output(self, job_id):\n\n url = self.base_url + \"/ml-service/phoenix-ml/output/findBy?jobId={0}\".format(job_id)\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response.json()",
"def describe_text_translation_job(JobId=None):\n pass",
"def get_job_details(self, job_id):\n try:\n LOG.info('Getting Job Details for job_id %s ', job_id)\n job_details = self.common.get_job_by_id(job_id)\n if job_details:\n LOG.info('Successfully listed Job Details for job_id %s : %s',\n job_id, job_details)\n return job_details\n else:\n errorMsg = 'Failed to find the job with specified job_id: %s'\\\n % job_id\n self.show_error_exit(msg=errorMsg)\n except Exception as e:\n errorMsg = 'Get Job details for job_id %s failed with error %s' \\\n % (job_id, str(e))\n self.show_error_exit(msg=errorMsg)",
"def info(self, jobid):\n return self.rpc.call(MsfRpcMethod.JobInfo, [jobid])",
"def get_job_def_by_id(self, job_id):\n try:\n result = self._session.query(\n JobEntity.id,\n JobEntity.username,\n JobEntity.name,\n JobEntity.workflow_id,\n WorkflowEntity.name,\n JobEntity.output_uri,\n JobEntity.work_uri,\n JobEntity.no_output_hash,\n JobEntity.inputs,\n JobEntity.parameters,\n JobEntity.final_output,\n JobEntity.exec_context,\n JobEntity.exec_method,\n JobEntity.exec_parameters,\n JobEntity.notifications\n ).\\\n filter(JobEntity.id == job_id).\\\n filter(WorkflowEntity.id == JobEntity.workflow_id).\\\n all()\n\n result_dict = [\n {\n 'job_id': row[0],\n 'username': row[1],\n 'name': row[2],\n 'workflow_id': row[3],\n 'workflow_name': row[4],\n 'output_uri': row[5],\n 'work_uri': json.loads(row[6]),\n 'no_output_hash': row[7],\n 'inputs': json.loads(row[8]),\n 'parameters': json.loads(row[9]),\n 'final_output': json.loads(row[10]),\n 'execution': {\n 'context': json.loads(row[11]),\n 'method': json.loads(row[12]),\n 'parameters': json.loads(row[13])\n },\n 'notifications': json.loads(row[14])\n } for row in result\n ]\n\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n # should have just one record\n if not result_dict:\n return {}\n\n return result_dict[0]",
"def all_note_by_job_applied_id(job_applied_id):\n\n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Note' ).all()",
"def completed_job_info(self, jobid=None, output=False):\n\n self.check_all_jobs()\n\n if jobid is None:\n completed_jobids = [key for key in self.job_dict.keys()\n if self.job_dict[key] == 'COMPLETED']\n response_list = [\n self._request(\n 'GET',\n CosmoSim.QUERY_URL + \"/{}\".format(completed_jobids[i]),\n auth=(self.username, self.password), cache=False)\n for i in range(len(completed_jobids))]\n self.response_dict_current = {}\n for i, vals in enumerate(completed_jobids):\n self.response_dict_current[vals] = (\n self._generate_response_dict(response_list[i]))\n else:\n if self.job_dict[jobid] == 'COMPLETED':\n response_list = [\n self._request(\n 'GET', CosmoSim.QUERY_URL + \"/{}\".format(jobid),\n auth=(self.username, self.password), cache=False)]\n self.response_dict_current = {}\n self.response_dict_current[jobid] = (\n self._generate_response_dict(response_list[0]))\n else:\n warnings.warn(\"JobID must refer to a query with a phase \"\n \"of 'COMPLETED'.\")\n return\n\n if output is True:\n dictkeys = self.response_dict_current.keys()\n if len(dictkeys) > 1:\n keys = [i for i in self.response_dict_current.keys()]\n phases = [self.job_dict[key] for key in keys]\n t = Table()\n t['JobID'] = keys\n t['Phase'] = phases\n t.pprint()\n warnings.warn(\"Use specific jobid to get more information, or \"\n \"explore `self.response_dict_current`.\")\n elif len(dictkeys) == 1:\n print(self.response_dict_current[dictkeys[0]]['content'])\n else:\n log.error('No completed jobs found.')\n return\n else:\n return",
"def job_detail(request: HttpRequest, job_id: str) -> HttpResponse:\n table = dynamodb.Table(table_name)\n sis_account_id = request.LTI[\"custom_canvas_account_sis_id\"]\n school_id = sis_account_id.split(\":\")[1]\n school_key = f'SCHOOL#{school_id.upper()}'\n job_query_params = {\n 'KeyConditionExpression': Key('pk').eq(school_key) & Key('sk').eq(job_id),\n 'ScanIndexForward': False,\n }\n logger.debug(f'Retrieving job details for job {job_id}.')\n job = table.query(**job_query_params)['Items'][0]\n\n # Update string timestamp to datetime.\n job.update(created_at=parse_datetime(job['created_at']))\n job.update(updated_at=parse_datetime(job['updated_at']))\n\n tasks_query_params = {\n 'KeyConditionExpression': Key('pk').eq(job_id),\n 'ScanIndexForward': False,\n }\n task_query_result = table.query(**tasks_query_params)\n tasks = task_query_result['Items']\n\n # If there are additional items to be retrieved for this job, the LastEvaluatedKey will be present\n # Use this key as the starting point for subsequent queries to build a full list\n while task_query_result.get('LastEvaluatedKey', False):\n tasks_query_params['ExclusiveStartKey'] = task_query_result.get('LastEvaluatedKey')\n task_query_result = table.query(**tasks_query_params)\n tasks.extend(task_query_result['Items'])\n\n context = {\n 'job': job,\n 'tasks': tasks,\n 'canvas_url': settings.CANVAS_URL\n }\n logger.debug(f'Retrieved job details for job {job_id}.', extra=context)\n return render(request, \"bulk_site_creator/job_detail.html\", context=context)",
"async def job_detail(request, job_id=None):\n current_jobs = dagobah._serialize().get('jobs', {})\n jobs = [job for job in current_jobs if str(job['job_id']) == job_id]\n if not jobs:\n raise ValueError('not find any jobs')\n return template('job_detail.html', job=jobs[0], hosts=dagobah.get_hosts())",
"def general_job_info(self, jobid=None, output=False):\n\n self.check_all_jobs()\n\n if jobid is None:\n print(\"Job Summary:\\n\"\n \"There are {0} jobs with phase: COMPLETED.\\n\"\n \"There are {1} jobs with phase: ERROR.\\n\"\n \"There are {2} jobs with phase: ABORTED.\\n\"\n \"There are {3} jobs with phase: PENDING.\\n\"\n \"There are {4} jobs with phase: EXECUTING.\\n\"\n \"There are {5} jobs with phase: QUEUED.\\n\"\n \"Try providing a jobid for the job you'd like to \"\n \"know more about.\\n To see a list of all jobs, use \"\n \"`check_all_jobs()`.\"\n .format(self.job_dict.values().count('COMPLETED'),\n self.job_dict.values().count('ERROR'),\n self.job_dict.values().count('ABORTED'),\n self.job_dict.values().count('PENDING'),\n self.job_dict.values().count('EXECUTING'),\n self.job_dict.values().count('QUEUED')))\n return\n else:\n response_list = [self._request(\n 'GET', CosmoSim.QUERY_URL + \"/{}\".format(jobid),\n auth=(self.username, self.password), cache=False)]\n\n if response_list[0].ok is False:\n log.error('Must provide a valid jobid.')\n return\n else:\n self.response_dict_current = {}\n self.response_dict_current[jobid] = (\n self._generate_response_dict(response_list[0]))\n\n if output is True:\n dictkeys = self.response_dict_current.keys()\n print(self.response_dict_current[dictkeys[0]]['content'])\n return\n else:\n return",
"def get_job_list(self):\n return self.job_list",
"def get_job_list(self):\n return self.job_list",
"def job_by_id(self, job_id):\n response = self._session.get(\n path='{base_api}/jobs/{job_id}.xml'.format(\n base_api=self.base_api,\n job_id=job_id\n ),\n headers={'Accept': 'application/xml'},\n )\n\n return response.text",
"async def get_jobs(jobId: int) -> str: \n return mngr.getJob(str(jobId))",
"def get_job_information(run_id):\n cmd = [github_cli, 'run', 'view', str(run_id), '--json', 'jobs']\n with subprocess.Popen(cmd, stdout=subprocess.PIPE) as p:\n result, err = p.communicate()\n print(err)\n return json.loads(result)['jobs']",
"def getJobList_impl(self):\n my_infos = TestJob.objects.filter(\n (Q(job_status='Running')|Q(job_status='Submitted')|Q(job_status='Incomplete'))\n &Q(check_or_not=True)\n )\n\n if not connection.in_atomic_block:\n self._commit_transaction(src='getInfosList_impl')\n print(\"###\", my_infos)\n logger.info(my_infos)\n return my_infos",
"def cli(ctx, job_id):\n return ctx.gi.jobs.get_inputs(job_id)",
"def get_jobs(self):\n _job_definitions = {}\n _result = []\n try:\n for _job_description_file in self.json_dict[\"job definition files\"]:\n _JDFO = JsonJobsDefinitionsFile(self.config)\n try:\n \"\"\"\n In the job file job description files are specified relative\n to the parent folder of the job file (self.filepath).\n \"\"\"\n if self.filepath: # unit tests may not set it\n _job_description_file = os.path.join(\n os.path.dirname(\n os.path.dirname(self.filepath)),\n _job_description_file)\n _JDFO.read(_job_description_file)\n _job_definitions[_JDFO.get_filename()] = _JDFO\n except BaseException:\n raise\n for _job_definition_file_set in self.json_dict[\"jobs\"]:\n for _job_definition_file in _job_definition_file_set:\n for _job in _job_definition_file_set[_job_definition_file]:\n __j = _job_definitions[_job_definition_file].get_job(\n _job)\n if __j:\n _result.append(__j)\n else: # job not found in Job Description file\n _result.append(\n 'job \"%s\" not found in Job Description file \"%s\"' %\n (_job, _job_description_file))\n raise NoSuchJobError\n except KeyError:\n _result.append('%s has no \"job definition files\"' % self.filepath)\n return _result",
"def get_job_builds(self, job_id, started=None, finished=None,\n success=None, skipped=None, order='asc', limit=100):\n pass",
"def all_recruiter_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Recruiter Contact' ).all()",
"def job_info(self):\n def _sortkey(x):\n return x['job_name']\n\n resp = self._cmd(uri = '/jenkins_jobs')\n jobs = resp.get('jobs', [])\n return sorted(jobs, key=_sortkey)",
"def get_a_job(job_id):\n job = JobModel.get_one_job(job_id)\n if not job:\n return custom_response({'Error': 'Job Not Found'}, 404)\n\n job_message = job_schema.dump(job, many=True)\n return custom_response(job_message, 200)",
"def get_job_id(self):\n return {'job_id': self._job_id}",
"def get_job_applied():\n\n return JobCompletedApplication.query.all()",
"def get_job_data(jid):\n return jrd.hgetall(_generate_job_key(jid))",
"def list_jobs():\n\n name_to_job_details = redis_controller.get_name_to_job_details()\n return list(name_to_job_details.values())"
] | [
"0.682056",
"0.63486063",
"0.60984653",
"0.6051572",
"0.598966",
"0.5974383",
"0.5968253",
"0.5953331",
"0.59054095",
"0.58386207",
"0.58320206",
"0.58077645",
"0.57883066",
"0.57619286",
"0.56843454",
"0.56843454",
"0.55897605",
"0.5587414",
"0.5582129",
"0.5572485",
"0.55528784",
"0.5465504",
"0.54475224",
"0.5422508",
"0.5409434",
"0.53801876",
"0.53557664",
"0.5337181",
"0.5330355",
"0.5318287"
] | 0.6539759 | 1 |
Return all recruiter details for job applied id. | def all_recruiter_by_job_applied_id(job_applied_id):
return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Recruiter Contact' ).all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_resume_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Resume' ).all()",
"def scrape_recruitment(self):\n d = self.driver\n recruitment_page = self.guildwork_url + '/recruitment'\n d.get(recruitment_page)\n soup = BeautifulSoup(d.page_source, 'lxml')\n apps = soup.find('table', {'id': 'applications'})\n\n all_apps = []\n for row in tqdm(apps.find_all('tr')):\n if not (row.find('th', {'class':'header'})):\n name_field = row.find('a', href=True)\n app_url = self.guildwork_url + name_field.get('href')\n app_name = name_field.text\n app_status = row.find('span',{'class':'label'}).text\n\n # Note that this is only returning information on accepted applications\n if (app_status == 'Accepted'):\n d.get(app_url)\n soup = BeautifulSoup(d.page_source, 'lxml')\n timestamp = soup.find('span', attrs={'data-timestamp': True})['data-timestamp']\n\n app_data = {\n 'url' : app_url,\n 'name' : app_name,\n 'joined' : datetime.datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S'),\n 'lodestone_link' : soup.find('label',text='Lodestone Link').find_next('div').text.strip()\n\n }\n all_apps.append(app_data)\n d.close()\n return all_apps",
"def all_note_by_job_applied_id(job_applied_id):\n\n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Note' ).all()",
"def get_job_detail():\n\n return JobDetail.query.all()",
"def get_job_applied_by_id(job_applied_id):\n\n return JobCompletedApplication.query.get(job_applied_id)",
"def all_jd_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Job Description' ).order_by(Note.note_date_created.desc()).first()",
"def get_job_applied():\n\n return JobCompletedApplication.query.all()",
"def all_interview_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, ((Note.note_category == 'Interview Question Technical') | (Note.note_category == 'Interview Question Informational') | (Note.note_category == 'Interview Question Behavioral'))).order_by(Note.note_category).all()",
"def get_job_details(self, job_id):\n try:\n LOG.info('Getting Job Details for job_id %s ', job_id)\n job_details = self.common.get_job_by_id(job_id)\n if job_details:\n LOG.info('Successfully listed Job Details for job_id %s : %s',\n job_id, job_details)\n return job_details\n else:\n errorMsg = 'Failed to find the job with specified job_id: %s'\\\n % job_id\n self.show_error_exit(msg=errorMsg)\n except Exception as e:\n errorMsg = 'Get Job details for job_id %s failed with error %s' \\\n % (job_id, str(e))\n self.show_error_exit(msg=errorMsg)",
"def get_job(self, job_id):\n\n try:\n exposure = Job.objects.filter(id=job_id)\n except:\n exposure = None\n\n return exposure",
"def get_job_applied_by_job_id(job_id):\n\n return JobCompletedApplication.query.filter(JobCompletedApplication.job_id == job_id).first().job_applied_id",
"def describe_job(self):\n # GET /jobs/{job_id}\n pass",
"def job_profile(request, job_id):\n\n job = get_object_or_404(Jobs, pk=job_id)\n recruiter = RecruiterProfile.objects.filter(user=job.author).first()\n\n template = 'jobs/job_profile.html'\n context = {\n 'title': 'Job profile',\n 'job': job,\n 'recruiter': recruiter,\n }\n\n return render(request, template, context)",
"def work_order_receipt_retrieve(self, work_order_id, id=None):\n pass",
"def get_results(self, job_id):\n ujs = self.__ujs_client()\n res = ujs.get_results(job_id)\n return res",
"def all_followup_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Follow-up').all()",
"def get_job_information(run_id):\n cmd = [github_cli, 'run', 'view', str(run_id), '--json', 'jobs']\n with subprocess.Popen(cmd, stdout=subprocess.PIPE) as p:\n result, err = p.communicate()\n print(err)\n return json.loads(result)['jobs']",
"def job_output(self, job_id):\n\n url = self.base_url + \"/ml-service/phoenix-ml/output/findBy?jobId={0}\".format(job_id)\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response.json()",
"def get_recruitment_thread_summaries(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Forum/Recruit/Summaries/\"))",
"def get_reagent_item_list(self) -> DBRecList:\n raise NotImplementedError('not implemented')",
"def getJobList_impl(self):\n my_infos = TestJob.objects.filter(\n (Q(job_status='Running')|Q(job_status='Submitted')|Q(job_status='Incomplete'))\n &Q(check_or_not=True)\n )\n\n if not connection.in_atomic_block:\n self._commit_transaction(src='getInfosList_impl')\n print(\"###\", my_infos)\n logger.info(my_infos)\n return my_infos",
"def job_detail(request: HttpRequest, job_id: str) -> HttpResponse:\n table = dynamodb.Table(table_name)\n sis_account_id = request.LTI[\"custom_canvas_account_sis_id\"]\n school_id = sis_account_id.split(\":\")[1]\n school_key = f'SCHOOL#{school_id.upper()}'\n job_query_params = {\n 'KeyConditionExpression': Key('pk').eq(school_key) & Key('sk').eq(job_id),\n 'ScanIndexForward': False,\n }\n logger.debug(f'Retrieving job details for job {job_id}.')\n job = table.query(**job_query_params)['Items'][0]\n\n # Update string timestamp to datetime.\n job.update(created_at=parse_datetime(job['created_at']))\n job.update(updated_at=parse_datetime(job['updated_at']))\n\n tasks_query_params = {\n 'KeyConditionExpression': Key('pk').eq(job_id),\n 'ScanIndexForward': False,\n }\n task_query_result = table.query(**tasks_query_params)\n tasks = task_query_result['Items']\n\n # If there are additional items to be retrieved for this job, the LastEvaluatedKey will be present\n # Use this key as the starting point for subsequent queries to build a full list\n while task_query_result.get('LastEvaluatedKey', False):\n tasks_query_params['ExclusiveStartKey'] = task_query_result.get('LastEvaluatedKey')\n task_query_result = table.query(**tasks_query_params)\n tasks.extend(task_query_result['Items'])\n\n context = {\n 'job': job,\n 'tasks': tasks,\n 'canvas_url': settings.CANVAS_URL\n }\n logger.debug(f'Retrieved job details for job {job_id}.', extra=context)\n return render(request, \"bulk_site_creator/job_detail.html\", context=context)",
"async def job_detail(request, job_id=None):\n current_jobs = dagobah._serialize().get('jobs', {})\n jobs = [job for job in current_jobs if str(job['job_id']) == job_id]\n if not jobs:\n raise ValueError('not find any jobs')\n return template('job_detail.html', job=jobs[0], hosts=dagobah.get_hosts())",
"def find_rent(self, id):\n allR=self.__loadFromFile()\n for bk in allR:\n if bk.getId()==id:\n return bk",
"def get_job(job_name: str):\n\n job_details = redis_controller.get_job_details(job_name=job_name)\n return job_details",
"def getMyRegiments(self, empireID):\n myRegimentsDict = {}\n myArmiesDict = {}\n otherArmiesDict = {}\n for regimentID, myRegiment in self.regiments.iteritems():\n if myRegiment.empireID == empireID:\n myRegimentsDict[regimentID] = myRegiment.getMyRegimentInfo()\n # find systemID regiment is currently \"at\", this decides if army icon required\n systemID = myRegiment.getMyCurrentSystemID()\n if not (myArmiesDict.has_key(systemID)):\n # add new army to this system\n myArmiesDict[systemID] = [myRegiment.id]\n else:\n # existing army, add to regiment id list\n myArmiesDict[systemID].append(myRegiment.id)\n else:\n # other regiment, add to other army dict\n if not (otherArmiesDict.has_key(myRegiment.fromSystem)):\n # add new army to this system\n otherArmiesDict[myRegiment.fromSystem] = [myRegiment.empireID]\n else:\n # existing army, append regiment empire owner\n if myRegiment.empireID not in otherArmiesDict[myRegiment.fromSystem]:\n otherArmiesDict[myRegiment.fromSystem].append(myRegiment.empireID)\n \n return (myRegimentsDict, myArmiesDict, otherArmiesDict)",
"def info(self, jobid):\n return self.rpc.call(MsfRpcMethod.JobInfo, [jobid])",
"def get_job_list(self):\n return self.job_list",
"def get_job_list(self):\n return self.job_list",
"def get_user_job_detail(user_id):\n\n return JobDetail.query.filter(JobCompletedApplication.user_id == user_id).join(JobCompletedApplication).order_by(JobCompletedApplication.application_date_submitted.desc()).all()"
] | [
"0.6490853",
"0.602824",
"0.5831629",
"0.57851154",
"0.5604668",
"0.55976665",
"0.55698776",
"0.5514874",
"0.5397591",
"0.5368741",
"0.53621614",
"0.53491193",
"0.5341995",
"0.52119046",
"0.51713234",
"0.5159437",
"0.5067528",
"0.50574297",
"0.50168747",
"0.5014782",
"0.5014346",
"0.501182",
"0.5005716",
"0.5003794",
"0.50014377",
"0.49992955",
"0.49712157",
"0.49641427",
"0.49641427",
"0.49402875"
] | 0.76523733 | 0 |
Return all Resume for job applied id. | def all_resume_by_job_applied_id(job_applied_id):
return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Resume' ).all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_recruiter_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Recruiter Contact' ).all()",
"def get_job_applied():\n\n return JobCompletedApplication.query.all()",
"def resume(self, job_id):\n job = Job.get_job_by_id(job_id)\n self.access_handler.check_resume(job)\n self.master.resume_job(job)",
"def all_note_by_job_applied_id(job_applied_id):\n\n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Note' ).all()",
"def load_all_job_ids(self, search_id: Hashable) -> List[Hashable]:\n job_ids = self._redis.lrange(f\"search:{search_id}.job_id_list\", 0, -1)\n return job_ids",
"def get(self):\n # TODO: auth\n return list(self.app.db.jobs.find())",
"def job_ids(self):\n return self.get_job_ids()",
"def get_job_applied_by_id(job_applied_id):\n\n return JobCompletedApplication.query.get(job_applied_id)",
"def get_job_list(self):\n return self.job_list",
"def get_job_list(self):\n return self.job_list",
"def get_job_detail():\n\n return JobDetail.query.all()",
"def get_jobs_by_process_id(self, process_id):\n\n jobs = list()\n for job in Job.objects.filter(process=process_id):\n jobs.append(job)\n return jobs",
"def resume(self):\n\t\treturn Job(SDK.PrlVm_Resume(self.handle)[0])",
"def all_interview_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, ((Note.note_category == 'Interview Question Technical') | (Note.note_category == 'Interview Question Informational') | (Note.note_category == 'Interview Question Behavioral'))).order_by(Note.note_category).all()",
"def resume_workflow(self):\n self._set_execution_state(states.RUNNING)\n\n tasks = self.wf_ex.task_executions\n\n if not all([t.state == states.RUNNING for t in tasks]):\n return self._find_commands_to_resume(tasks)\n\n return []",
"def get_job_list(self):\n job_list = []\n if mysql.job_list() == None:\n return job_list\n return mysql.job_list()",
"def job_ids(self) -> List[str]:\n return self._db_data.job_ids",
"def resumeJob(_id, client):\n return tryAgainJob(_id)",
"def get_jobs(self):\n return list(self._jobs.values())",
"def get_job(self, job_id):\n\n try:\n exposure = Job.objects.filter(id=job_id)\n except:\n exposure = None\n\n return exposure",
"def resume_job(self, id, jobstore=None):\n self._scheduler.resume_job(id, jobstore)",
"def resume(self, scanid, apikey=''):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/resume/', {'scanId': scanid, 'apikey': apikey})))",
"def get_job_applied_by_job_id(job_id):\n\n return JobCompletedApplication.query.filter(JobCompletedApplication.job_id == job_id).first().job_applied_id",
"def ListJobs(cls):\n return [key.parent().string_id() for key in cls.query().fetch(\n 100, keys_only=True)]",
"def load_metadata_from_all_jobs(\n self, search_id: Hashable, key: Hashable\n ) -> List[Any]:\n search_id\n jobs_ids = self.load_all_job_ids(search_id)\n values = []\n for job_id in jobs_ids:\n try:\n value = self._redis.json().get(f\"job:{job_id}\", f\".metadata.{key}\")\n except redis.exceptions.ResponseError:\n value = None\n\n if value is not None:\n values.append(value)\n return values",
"def get_all_jobs(self) -> List[DocumentReference]:\n return self.get_all_documents(Type._JOBS)",
"def get_employeeProjects(self, id):\n from Project import Project\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select project from projectpromotor where employee=%s', (id,))\n\n projectsId = list()\n for row in cursor:\n projectsId.append(row[0])\n\n projects = list()\n for projId in projectsId:\n cursor.execute('select * from project where projectID=%s',\n (projId,)) # returns exactly one row from the table\n row = cursor.fetchone()\n project = Project(row[0], row[1], row[2], row[3])\n\n cursor.execute('select year from projectYearConnection where projectID=%s', (projId,))\n\n years = list()\n for row in cursor:\n years.append(row[0])\n\n project.activeYear = years\n\n projects.append(project)\n\n return projects",
"def jobs(self):\n return self.get_jobs()",
"def fetch_many(cls, job_ids: Iterable[str], connection: 'Redis', serializer=None) -> List['Job']:\n with connection.pipeline() as pipeline:\n for job_id in job_ids:\n pipeline.hgetall(cls.key_for(job_id))\n results = pipeline.execute()\n\n jobs: List[Optional['Job']] = []\n for i, job_id in enumerate(job_ids):\n if not results[i]:\n jobs.append(None)\n continue\n\n job = cls(job_id, connection=connection, serializer=serializer)\n job.restore(results[i])\n jobs.append(job)\n\n return jobs",
"def job_ids(self):\n return self.connection.lrange(self.key, 0, -1)"
] | [
"0.5769213",
"0.56241286",
"0.55435765",
"0.54594284",
"0.5418937",
"0.53563815",
"0.53016",
"0.5262021",
"0.5253391",
"0.5253391",
"0.5225734",
"0.521197",
"0.51903236",
"0.5136629",
"0.51281595",
"0.51190066",
"0.5110543",
"0.5109377",
"0.50861925",
"0.5067618",
"0.50420934",
"0.50299114",
"0.50238186",
"0.50233763",
"0.5017011",
"0.500264",
"0.4988829",
"0.4978005",
"0.49693596",
"0.49638528"
] | 0.8092352 | 0 |
Return all Follow up Template for job applied id. | def all_followup_by_job_applied_id(job_applied_id):
return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Follow-up').all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def job_templates(self):\n return self._tower.job_templates.filter({'project__exact': self.id})",
"def get_templates(self):\n return [{\"id\": tmplt[\"template_id\"], \"name\": tmplt[\"name\"]}\n for tmplt in Template.objects(user_id=self.user_id, active=True)]",
"def get_template_names(self):\n if self.object.twfy_id or self.object.current_or_future_candidacies:\n return [\"people/person_detail.html\"]\n return [\"people/not_current_person_detail.html\"]",
"def get_updated_jobtemplate(self):\n return self.response_json",
"def template_list(self):\n return self.ezx.get_template_list()",
"def templatelist(cls):\n return cls._templatelist",
"def test_workflows_id_templates_get(self):\n pass",
"def get_templates(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/templates\").json()",
"def _template_ids(self):\n raise NotImplementedError()",
"def id_template_notificacao(self):\n return self._id_template_notificacao",
"def all_jobs():\n\n jobs = Job.get_all()\n\n oneoffs = OneOff.get_all()\n\n job = JobView(None, jobs, oneoffs, False, Job.count() > 0)\n\n add_trello_task_links_to_g()\n\n return render_template(\"jobs.template.html\", page_title=\"Jobs\", job_info=job)",
"def get_template_names(self):\n templates = super(PartialResponseMixin, self).get_template_names()\n if self.get_use_partial():\n templates.insert(0, self.get_partial_template_name())\n return templates",
"def _existing_tables(self):\n checkalljobs = self.check_all_jobs()\n completed_jobs = [key for key in self.job_dict.keys()\n if self.job_dict[key] in ['COMPLETED', 'EXECUTING']]\n soup = BeautifulSoup(checkalljobs.content, \"lxml\")\n self.table_dict = {}\n\n for tag in soup.find_all({\"uws:jobref\"}):\n jobid = tag.get('xlink:href').split('/')[-1]\n if jobid in completed_jobs:\n self.table_dict[jobid] = str(tag.get('id'))",
"def get_queryset(self):\n return Template.objects.all()",
"def get_templates(self):\n\n\t\tif not os.path.isdir('./repo'): os.mkdir('./repo')\n\t\ttemps = self.settings['template']\n\t\t#---ensure that the template object is always in a list\n\t\tif len(temps) == 2 and type(temps[0])==str and type(temps[1])==str: temps = [temps]\n\t\tself.template = []\n\t\tfor t in temps:\n\t\t\tprint 'retrieving '+str(t[0])\n\t\t\t#---check if in repo and move\n\t\t\tif not os.path.isfile(self.rootdir+t[0]+'.pdb') and os.path.isfile('./repo/'+t[0]+'.pdb'):\n\t\t\t\tcopy('./repo/'+t[0]+'.pdb',self.rootdir+t[0]+'.pdb')\n\t\t\t\t#---fasta retrieval is deprecated\n\t\t\t\tif 0: copy('./repo/'+t[0]+'.fasta',self.rootdir+t[0]+'.fasta')\n\t\t\telif not os.path.isfile(self.rootdir+t[0]+'.pdb'):\n\t\t\t\tresponse = urllib2.urlopen('http://www.rcsb.org/pdb/files/'+t[0]+'.pdb')\n\t\t\t\tpdbfile = response.read()\n\t\t\t\twith open(self.rootdir+t[0]+'.pdb','w') as fp: fp.write(pdbfile)\n\t\t\t\tcopy(self.rootdir+t[0]+'.pdb','./repo/'+t[0]+'.pdb')\n\t\t\tself.template.append(t)",
"def all_resume_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Resume' ).all()",
"def get(self, request, *args, **kwargs):\n my_normal_post_lists = NormalPosts.objects.filter(uploded_by=request.user.normalprofile).order_by(\"-id\")\n return render(request, self.template_name, {\n 'my_normal_post_lists': my_normal_post_lists,\n })",
"def test_workflows_id_templates_fk_get(self):\n pass",
"async def jobs(request):\n\n job_list = await get_jobs(request)\n return template('jobs.html',\n jobs=job_list)",
"def get_template_names(self): \n product = self.get_object()\n names = ['%s/detail-for-upc-%s.html' % (self.template_folder, product.upc), \n '%s/detail-for-class-%s.html' % (self.template_folder, product.item_class.name.lower()),\n '%s/detail.html' % (self.template_folder)]\n return names",
"def all_interview_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, ((Note.note_category == 'Interview Question Technical') | (Note.note_category == 'Interview Question Informational') | (Note.note_category == 'Interview Question Behavioral'))).order_by(Note.note_category).all()",
"def getTemplateId(self):\n return 7",
"def all_recruiter_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Recruiter Contact' ).all()",
"def notification_templates(self):\n # NOTE: Derived classes should implement\n from awx.main.models.notifications import NotificationTemplate\n\n return NotificationTemplate.objects.none()",
"def all_note_by_job_applied_id(job_applied_id):\n\n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Note' ).all()",
"def get(self):\n for job in data_types.Job.query():\n if not utils.string_is_true(job.get_environment().get('CORPUS_PRUNE')):\n continue\n\n latest_revision = _get_latest_job_revision(job)\n if not latest_revision:\n continue\n\n queue = tasks.queue_for_job(job.name)\n for target_job in fuzz_target_utils.get_fuzz_target_jobs(job=job.name):\n tasks.add_task(\n 'corpus_pruning',\n '%s@%s' % (target_job.fuzz_target_name, latest_revision),\n job.name,\n queue=queue)",
"def _get_cr_templates(self, code, fallback) -> None:\n if not hasattr(self, '_catredirtemplates'):\n self._catredirtemplates = {}\n if code in self.category_redirect_templates:\n cr_template_tuple = self.category_redirect_templates[code]\n elif fallback and fallback in self.category_redirect_templates:\n cr_template_tuple = self.category_redirect_templates[fallback]\n else:\n self._catredirtemplates[code] = []\n return\n cr_set = set()\n site = pywikibot.Site(code, self)\n tpl_ns = site.namespaces.TEMPLATE\n for cr_template in cr_template_tuple:\n cr_page = pywikibot.Page(site, cr_template, ns=tpl_ns)\n # retrieve all redirects to primary template from API,\n # add any that are not already on the list\n for t in cr_page.backlinks(filter_redirects=True,\n namespaces=tpl_ns):\n newtitle = t.title(with_ns=False)\n if newtitle not in cr_template_tuple:\n cr_set.add(newtitle)\n self._catredirtemplates[code] = list(cr_template_tuple) + list(cr_set)",
"def all_jobs_for_client(ClientID):\n\n client = Client.get(ClientID)\n\n jobs = Job.get_all_for_client(ClientID)\n\n oneoffs = OneOff.get_from_client_id_between_dates(ClientID)\n\n invoices = MonthlyInvoice.get_from_client_id_between_dates(ClientID)\n\n job = JobView(client, jobs, oneoffs, False, Job.get_count_for_client(ClientID) > 0)\n\n add_trello_task_links_to_g()\n\n return render_template(\"jobs.template.html\", page_title=\"Jobs\", job_info=job, invoices=invoices)",
"def list_templates(self):\n raise NotImplementedError()",
"def tr_template(self, allowed=None, **kwargs):\n template_sum = None\n for tr_jid in self.tr_jids:\n if allowed is not None and tr_jid not in allowed:\n continue\n template = Template(sender=tr_jid, **kwargs)\n if template_sum is None:\n template_sum = template\n else:\n template_sum = template_sum | template\n return template_sum"
] | [
"0.59492767",
"0.545359",
"0.5385648",
"0.52259356",
"0.5178491",
"0.51286674",
"0.5078213",
"0.5043155",
"0.49889243",
"0.4969203",
"0.49352974",
"0.49056143",
"0.48956487",
"0.48870766",
"0.487659",
"0.48737434",
"0.48275462",
"0.482688",
"0.47788817",
"0.47703582",
"0.47437435",
"0.47374752",
"0.47153893",
"0.47011027",
"0.46797612",
"0.46594286",
"0.46579194",
"0.4656691",
"0.46541184",
"0.46487582"
] | 0.64399916 | 0 |
Return all Interview question by job applied id. | def all_interview_by_job_applied_id(job_applied_id):
return Note.query.filter(Note.job_applied_id == job_applied_id, ((Note.note_category == 'Interview Question Technical') | (Note.note_category == 'Interview Question Informational') | (Note.note_category == 'Interview Question Behavioral'))).order_by(Note.note_category).all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_resume_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Resume' ).all()",
"def get_questions(self, obj):\n queryset = Question.objects.filter(sheet=obj)\n questions = []\n for q in queryset:\n questions.append(q.text)\n return questions",
"def all_note_by_job_applied_id(job_applied_id):\n\n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Note' ).all()",
"def get_questions(self, question_id):\n return self._questions_by_id.get(question_id)",
"def all_recruiter_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Recruiter Contact' ).all()",
"def _retrieveQuestions(self, questID):\n all_related_quests = set([quest for doc in self._Docs\n for quest in self.doc2quest[doc]])\n random_quests = random.sample(self.questions.keys(), 40)\n\n Quests = [questID]\n Quests += [random.choice(self.doc2quest[dID]) for dID in self._Docs[1:5]]\n\n for quest in random_quests:\n\n if len(Quests) == 10:\n break\n if not quest in all_related_quests:\n Quests += quest\n\n return Quests",
"def _get_questions_from_tag_assessment(self, event_data):\n unit_id, lesson_id = self._get_unit_and_lesson_id_from_url(\n event_data['location'])\n if unit_id is None or lesson_id is None:\n return []\n\n if event_data['type'] == self.QUESTION_GROUP:\n mc_indices = [\n i for i in xrange(len(event_data['containedTypes']))\n if event_data['containedTypes'][i] == self.MC_QUESTION]\n return [{\n 'id': 'u.%s.l.%s.c.%s.i.%s' % (\n unit_id, lesson_id, event_data['instanceid'], index),\n 'score': event_data['individualScores'][index],\n 'answers': event_data['answer'][index]\n } for index in mc_indices if event_data['answer'][index]]\n elif (event_data['type'] == self.MC_QUESTION and\n event_data['answer']):\n # This is a single multiple-choice question.\n return [{\n 'id': 'u.%s.l.%s.c.%s' % (\n unit_id, lesson_id, event_data['instanceid']),\n 'score': event_data['score'],\n 'answers': event_data['answer']\n }]\n else:\n return []",
"def getInquiriesForDisplay(self):\n return [self.context]",
"def answers(self):\n from quiz.models import Answer\n qids = self.values_list('id', flat=True)\n return Answer.objects.filter(\n question__id__in=qids).select_related('question')",
"def get_single_question(self, id):\n query = (\"SELECT * FROM tbl_questions WHERE question_id = %s;\")\n inputs = id\n user_requests = get_query(query, inputs)\n return user_requests",
"def get_questions(self, ids):\n measurables = self._query_measurables(ids)\n return [\n ForetoldQuestion(measurable[\"id\"], self, measurable) if measurable else None\n for measurable in measurables\n ]",
"def all_jd_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Job Description' ).order_by(Note.note_date_created.desc()).first()",
"def answers_all(self):\n return self.answer_set.all()",
"def available_qs(self):\n correct_qs_ids = self.tasks.filter(answer=F('question__solution')).values_list('question__id', flat=True)\n return Q.objects.exclude(id__in=correct_qs_ids)",
"def questions(self, request, pk):\n tag = self.get_object()\n questions = tag.questions.all()\n serializer = QuestionSerializer(questions, many=True, context={'request': request})\n return Response(serializer.data)",
"def evaluate_questions(self):\n for question in self.question_list:\n question.evaluate_question()",
"def get_queryset(self):\n judge_qs = Judge.objects.filter(judge=self.request.user)\n return Contest.objects.filter(\n pk__in=judge_qs.values('contest'),\n publish_date__gte=timezone.now(),\n )",
"def get_questions(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrv_GetQuestions', self.handle))",
"def _get_questions_from_submit_and_attempt_assessment(self, event_data):\n if not event_data['type'].startswith('assessment-'):\n return []\n assessment_id = event_data['type'][len('assessment-'):]\n\n values = event_data['values']\n if isinstance(values, list):\n # This is a v1.4 (or older) assessment.\n mc_indices = [i for i in xrange(len(values))\n if values[i]['type'] == 'choices']\n return [{\n 'id': 's.%s.i.%s' % (assessment_id, index),\n 'score': 1.0 if values[index]['correct'] else 0.0,\n 'answers': [values[index]['value']]\n } for index in mc_indices if values[index]['value'] is not None]\n elif isinstance(values, dict):\n # This is a v1.5 assessment.\n return self._summarize_multiple_questions(\n values, 's.%s' % assessment_id)\n else:\n return []",
"def _find_answerable(self):\n trainable = []\n for i, entry in enumerate(self.entries):\n # store the indices of anything that is answerable\n if entry['has_hint'].item(): # and self.answer_types[i] == 'number':\n trainable.append(i)\n return trainable",
"def get_all_questions(self):\n query = (\"SELECT * FROM tbl_questions;\")\n user_reqeusts = get_just_query(query)\n return user_reqeusts",
"def questions(self):\n return self._questions",
"def get_questions(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVm_GetQuestions', self.handle))",
"def question(dico):\n l = []\n for i in range(len(dico)):\n l.append(dico[i][0])\n affichage_question(dico,l)",
"def cached_get_questions(self):\n # it is important that this is called with the same params every time\n return self.get_questions([], include_triggers=True, include_groups=True)",
"def _summarize_multiple_questions(self, data, id_prefix):\n type_info_dict = data['containedTypes']\n questions_list = []\n\n for instanceid, type_info in type_info_dict.iteritems():\n if isinstance(type_info, list):\n # This is a question group.\n mc_indices = [i for i in xrange(len(type_info))\n if type_info[i] == self.MC_QUESTION]\n questions_list += [{\n 'id': '%s.c.%s.i.%s' % (id_prefix, instanceid, index),\n 'score': data['individualScores'][instanceid][index],\n 'answers': data['answers'][instanceid][index]\n } for index in mc_indices if (\n data['answers'][instanceid][index])]\n\n elif (type_info == self.MC_QUESTION and\n data['answers'][instanceid]):\n # This is an individual multiple-choice question.\n questions_list += [{\n 'id': '%s.c.%s' % (id_prefix, instanceid),\n 'score': data['individualScores'][instanceid],\n 'answers': data['answers'][instanceid]\n }]\n\n return questions_list",
"def _get_questions_from_attempt_lesson(self, event_data):\n unit_id, lesson_id = self._get_unit_and_lesson_id_from_url(\n event_data['location'])\n if unit_id is None or lesson_id is None:\n return []\n\n return self._summarize_multiple_questions(\n event_data, 'u.%s.l.%s' % (unit_id, lesson_id))",
"def list_for_game(self, game_id):\n return (\n self.database.query(self.model, Answer.is_correct, GameAnswer)\n .join(Contest)\n .join(Game)\n .outerjoin(GameAnswer, GameAnswer.question_id == self.model.id)\n .outerjoin(Answer, GameAnswer.answer_id == Answer.id)\n .filter(Game.id == game_id)\n .order_by(self.model.category.desc())\n .order_by(self.model.created_at)\n .all()\n )",
"def findall_path_from_org_id(self, path, org_id):\n for org_question in self.merged_root.iter('OrgQuestion'):\n if org_question.attrib['ORGQ_ID'] == org_id:\n extraction = org_question.findall(path)\n if len(extraction) != 0:\n return extraction\n\n return list()",
"def get_org_questions_ids(self):\n return [q.attrib['ORGQ_ID'] for q in self.merged_root.findall('OrgQuestion')]"
] | [
"0.60519505",
"0.60223424",
"0.5880214",
"0.57323456",
"0.5701403",
"0.5560641",
"0.5559024",
"0.5552572",
"0.54993796",
"0.5482854",
"0.5401203",
"0.53886664",
"0.52820504",
"0.5280837",
"0.52447784",
"0.5219957",
"0.5178943",
"0.51773167",
"0.51404804",
"0.5137996",
"0.51078755",
"0.5083803",
"0.5056638",
"0.5029589",
"0.5023135",
"0.50202584",
"0.5012451",
"0.5009223",
"0.50068855",
"0.49879003"
] | 0.73461443 | 0 |
Return all Interview question by job user id. | def all_interview_by_user_id(user_id):
return Note.query.filter(Note.user_id == user_id, ((Note.note_category == 'Interview Question Technical') | (Note.note_category == 'Interview Question Informational') | (Note.note_category == 'Interview Question Behavioral'))).order_by(Note.note_date_created.desc()).all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_interview_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, ((Note.note_category == 'Interview Question Technical') | (Note.note_category == 'Interview Question Informational') | (Note.note_category == 'Interview Question Behavioral'))).order_by(Note.note_category).all()",
"def get_all_questions(user_id):\n questions = select_query(\n \"SELECT q_id,question, user_id FROM question\")\n my_questions = {q[0]: copy.deepcopy(\n Question(q[1], q_id=q[0], user_id=q[2])) for q in questions}\n\n answers = select_query(\n \"SELECT answer.q_id, answer.answer, answer.a_id, answer.is_answer FROM answer Left JOIN question on answer.q_id=question.q_id\")\n for a in answers:\n my_questions[a[0]]['answers'].append((a[1], a[2], a[3]))\n return my_questions.values()",
"def get_my_questions(user_id):\n questions = select_query(\n \"SELECT q_id,question FROM question WHERE question.user_id = (%s) ORDER BY create_time DESC \", user_id)\n\n answers = select_query(\n \"SELECT answer.q_id, answer.answer, answer.a_id,answer.is_answer FROM answer Left JOIN question on answer.q_id=question.q_id WHERE question.user_id =(%s)\", user_id)\n my_questions = {q[0]: copy.deepcopy(\n Question(q[1], q_id=q[0], user_id=user_id)) for q in questions}\n\n for a in answers:\n my_questions[a[0]]['answers'].append((a[1], a[2], a[3]))\n return my_questions.values()",
"def get_asked_questions(user_id, api_site_parameter, page = 1, body = False, comments = False, pagesize = 100, sort = 'creation'):\n path = \"users/%d/questions\" % user_id\n\n query_filter = ')(Ybxw_gbz'\n\n if body:\n query_filter = '9F)u(CSWCtKt'\n if comments:\n query_filter = ')(YbxuzQQ.'\n if body and comments:\n query_filter = ')(YbxuzQTp'\n\n results = __fetch_results(path, api_site_parameter, page = page, filter = query_filter, pagesize = pagesize, sort = sort)\n return results",
"def get_user_job_detail(user_id):\n\n return JobDetail.query.filter(JobCompletedApplication.user_id == user_id).join(JobCompletedApplication).order_by(JobCompletedApplication.application_date_submitted.desc()).all()",
"def get(self, user_id):\n\n user = UserModel.find_by_id(user_id)\n jobs = [job.json() for job in user.posted_jobs]\n\n return {\"jobs\": jobs}",
"def get_queryset(self):\n qs = Job.objects.filter(user=self.request.user)\n return qs",
"def get_single_question(self, id):\n query = (\"SELECT * FROM tbl_questions WHERE question_id = %s;\")\n inputs = id\n user_requests = get_query(query, inputs)\n return user_requests",
"def get_queryset(self):\n judge_qs = Judge.objects.filter(judge=self.request.user)\n return Contest.objects.filter(\n pk__in=judge_qs.values('contest'),\n publish_date__gte=timezone.now(),\n )",
"def get_user_jobs(request):\n post = request.POST.dict()\n user = post.get('user_id')\n if not user:\n response = {'status':-1, 'status_message':'No user supplied'}\n return HttpResponse(json.dumps(response))\n response = {\n 'status':1,\n 'status_message':'Success',\n 'jobs': list(job.objects.filter(user_id=user).defer('output_file'))\n }\n return HttpResponse(json.dumps(response))",
"def get_all_questions(self):\n query = (\"SELECT * FROM tbl_questions;\")\n user_reqeusts = get_just_query(query)\n return user_reqeusts",
"def get_questions(self, obj):\n queryset = Question.objects.filter(sheet=obj)\n questions = []\n for q in queryset:\n questions.append(q.text)\n return questions",
"def getSubmissionsByUser(self, i):\r\n return [(ind,sub) for ind, sub in enumerate(self.submissions) if sub.authorId == i]",
"def get_answer_list(assessment, student, current_user):\n result_set = assessment.result_sets.filter(student=student).first()\n answer_list = []\n for answer in result_set.rating_answers.all():\n if answer.evaluator == current_user:\n answer_list.append(answer)\n for answer in result_set.text_answers.all():\n if answer.evaluator == current_user:\n answer_list.append(answer)\n return answer_list",
"def get_queryset(self):\n now = timezone.localtime(timezone.now())\n\n # get exams that are currently in progress\n exams = Exam.objects.filter(begin_timestamp__lte=now, end_timestamp__gt=now)\n\n # get ExamProgress objects for this user for each exam\n progress_objects = ExamProgress.objects.filter(\n exam__in=exams, user=self.request.user, current_question__isnull=False\n )\n\n # get default queryset\n queryset = super(QuestionViewSet, self).get_queryset()\n\n # get questions that appear as `current_question` in one of the ExamProgress object\n queryset = queryset.filter(\n pk__in=list(map(lambda p: p.current_question.pk, progress_objects))\n )\n return queryset.prefetch_related(\"answers\")",
"def getAllJobsForUser(self, userId):\n params = {\n 'userId': userId,\n 'limit': 1000000\n }\n try:\n resp = self.gc.get(JobUtils.JOB_LIST_PATH, parameters=params)\n except HttpError as e:\n if e.status == 400:\n print('Error. invalid user id:', userId)\n return {}\n raise\n\n output = {}\n for job in resp:\n if not job:\n continue\n jobId = job.get('_id')\n status = job.get('status')\n statusStr = JobUtils.getJobStatusStr(status)\n output[jobId] = statusStr\n\n return output",
"def get_user_answers(user_id):\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n answer_table = dynamodb.Table(\"Answers\")\n\n filterexpression = Attr(\"UserId\").eq(user_id)\n response = answer_table.scan(FilterExpression=filterexpression)\n answers = response.get(\"Items\")\n\n return answers",
"def get_submissions(username, problem, cookies, is_clicker, sesssion):\r\n url = '/'.join([courseserver, coursepath, course, 'submission_history', username, 'i4x:/', coursepath, 'problem', problem])\r\n user_agent = {'User-agent': 'csci3202-f13/edx-tools/progress ' + requests.utils.default_user_agent()}\r\n r = sesssion.get(url, cookies=cookies, headers=user_agent)\r\n content = r.text\r\n print 'getting data for ' + username\r\n return parse(content)",
"def get_queryset(self):\n jobs = Job.objects.all()\n if not self.kwargs.get(\"pk\"):\n jobs = jobs.filter(\n status=choices.APPROVED, submission_deadline__gte=datetime.today()\n ).order_by(\"-created_at\")\n if not self.request.user.is_anonymous():\n # if user is logged in, exclude his/her applied jobs.\n # also append ignored jobs at the end of job listing.\n jobs = jobs.exclude(\n ~Q(application__state=\"ignored\"),\n application__user=self.request.user,\n ).order_by(\"-created_at\")\n\n if self.request.user.user_type == User.PERSON:\n # If user is of type \"person\",\n # show only jobs related to his/her gender along with not_specified jobs.\n if self.request.user.person.gender != \"NS\":\n jobs = jobs.filter(\n required_gender__in=[\n self.request.user.person.gender,\n choices.NOT_SPECIFIED,\n ]\n )\n return jobs",
"def get(self, user_id):\n user = UserModel.find_by_id(user_id)\n print(\"Getting volunteered jobs\")\n jobs = [job.json() for job in user.volunteered_jobs]\n\n return {\"jobs\": jobs}",
"def get_questions(self, question_id):\n return self._questions_by_id.get(question_id)",
"def all_resume_by_job_applied_id(job_applied_id): \n return Note.query.filter(Note.job_applied_id == job_applied_id, Note.note_category == 'Resume' ).all()",
"def getSelectedUserResources (userId):\n\tsearcher = UserContentTester(userId=userId)\n\t# print searcher.data[0]\n\tprint '%d results' % len(searcher)\n\treturn searcher",
"def _get_questions_from_tag_assessment(self, event_data):\n unit_id, lesson_id = self._get_unit_and_lesson_id_from_url(\n event_data['location'])\n if unit_id is None or lesson_id is None:\n return []\n\n if event_data['type'] == self.QUESTION_GROUP:\n mc_indices = [\n i for i in xrange(len(event_data['containedTypes']))\n if event_data['containedTypes'][i] == self.MC_QUESTION]\n return [{\n 'id': 'u.%s.l.%s.c.%s.i.%s' % (\n unit_id, lesson_id, event_data['instanceid'], index),\n 'score': event_data['individualScores'][index],\n 'answers': event_data['answer'][index]\n } for index in mc_indices if event_data['answer'][index]]\n elif (event_data['type'] == self.MC_QUESTION and\n event_data['answer']):\n # This is a single multiple-choice question.\n return [{\n 'id': 'u.%s.l.%s.c.%s' % (\n unit_id, lesson_id, event_data['instanceid']),\n 'score': event_data['score'],\n 'answers': event_data['answer']\n }]\n else:\n return []",
"def get_queryset(self):\n now = timezone.localtime(timezone.now())\n\n # get exams that are currently in progress\n exams = Exam.objects.filter(begin_timestamp__lte=now, end_timestamp__gt=now)\n\n # get ExamProgress objects for this user for each exam\n progress_objects = ExamProgress.objects.filter(\n exam__in=exams, user=self.request.user, current_exercise__isnull=False\n )\n\n # get default queryset\n queryset = super(ExerciseViewSet, self).get_queryset()\n\n # get questions that appear as `current_question` in one of the ExamProgress object\n queryset = queryset.filter(\n pk__in=list(map(lambda p: p.current_exercise.pk, progress_objects))\n )\n return queryset.prefetch_related(\"testcases\")",
"def get_all_completed_exp_ids(user_id):\n completed_activities_model = (\n user_models.CompletedActivitiesModel.get(\n user_id, strict=False))\n\n if completed_activities_model:\n activities_completed = get_completed_activities_from_model(\n completed_activities_model)\n\n return activities_completed.exploration_ids\n else:\n return []",
"def get(self,request,format=None):\n answers = SingleWordQuizAnswer.objects.filter(user=request.user.info)\n serializer = SingleWordQuizAnswerSerializer(answers,many=True)\n return Response(data=serializer.data,status=status.HTTP_200_OK)",
"def getInquiriesForDisplay(self):\n return [self.context]",
"def get(self,request,format=None):\n answers = MultipleQuizAnswer.objects.filter(user=request.user.info)\n serializer = MultipleQuizAnswerSerializer(answers,many=True)\n return Response(data=serializer.data,status=status.HTTP_200_OK)",
"def answers(self):\n from quiz.models import Answer\n qids = self.values_list('id', flat=True)\n return Answer.objects.filter(\n question__id__in=qids).select_related('question')"
] | [
"0.64423734",
"0.6284777",
"0.62319624",
"0.59722936",
"0.5878626",
"0.5869722",
"0.5776599",
"0.5703998",
"0.5643334",
"0.55415744",
"0.55246925",
"0.5491793",
"0.54882145",
"0.5477339",
"0.54560447",
"0.5452791",
"0.5439329",
"0.53813064",
"0.5355762",
"0.52801436",
"0.5280046",
"0.52533776",
"0.52141505",
"0.52137995",
"0.5196718",
"0.5147635",
"0.513054",
"0.5120593",
"0.5113377",
"0.5088965"
] | 0.66224235 | 0 |
create and return Application Progress | def create_application_progress(application_state, job_applied_id , created_at):
app_progress = ApplicationProgress(application_state = application_state, job_applied_id = job_applied_id, created_at = created_at)
db.session.add(app_progress)
db.session.commit()
return app_progress | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getProgress(self):",
"def GetProgress(self):\n return self.new_progress",
"def get_progress(self):\n\t\treturn call_sdk_function('PrlJob_GetProgress', self.handle)",
"def progress(self, *args, **kwargs):\n kwargs['logger'] = self\n return Progress(*args, **kwargs)",
"def reportProgress(self):\n \n pass",
"def make_progress_bar(self):\n progress_label = Label(self.master, text=\"Progress:\")\n progress_label.grid(row=7, column=0)\n\n progress_bar = Progressbar(length=200)\n progress_bar.grid(row=7, column=1)\n\n return progress_label, progress_bar",
"def get_progress(self):\r\n return None",
"def progress_bar_new() -> str:\n pb_id = int(request.args['pb_id'])\n has_insights = request.args['has_insights'] == 'true'\n\n # Obtain progress bar details. Only show the user@host part if it doesn't equal the user@host of this process\n # (in case someone connected to this dashboard from another machine or user)\n progress_bar_details = _DASHBOARD_TQDM_DETAILS_DICT.get(pb_id)\n if progress_bar_details['user'] == '{}@{}'.format(getpass.getuser(), socket.gethostname()):\n progress_bar_details['user'] = ''\n else:\n progress_bar_details['user'] = '{}:'.format(progress_bar_details['user'])\n\n # Create table for worker insights\n insights_workers = []\n if has_insights:\n for worker_id in range(progress_bar_details['n_jobs']):\n insights_workers.append(f\"<tr><td>{worker_id}</td>\"\n f\"<td id='pb_{pb_id}_insights_worker_{worker_id}_tasks_completed'></td>\"\n f\"<td id='pb_{pb_id}_insights_worker_{worker_id}_start_up_time'></td>\"\n f\"<td id='pb_{pb_id}_insights_worker_{worker_id}_init_time'></td>\"\n f\"<td id='pb_{pb_id}_insights_worker_{worker_id}_waiting_time'></td>\"\n f\"<td id='pb_{pb_id}_insights_worker_{worker_id}_working_time'></td>\"\n f\"<td id='pb_{pb_id}_insights_worker_{worker_id}_exit_time'></td>\"\n f\"</tr>\")\n insights_workers = \"\\n\".join(insights_workers)\n\n return jsonify(result=_progress_bar_html.format(id=pb_id, insights_workers=insights_workers,\n has_insights='block' if has_insights else 'none',\n **{k: escape(v) for k, v in progress_bar_details.items()}))",
"def progress_status(self):\n from tqdm import tqdm\n pbar_a = tqdm(total=len(self.jobs), position=0)\n pbar_a.set_description('Submitted jobs ...')\n pbar_b = tqdm(total=self.n_submit_script, position=1)\n pbar_b.set_description('Running jobs ...')\n pbar_c = tqdm(total=self.n_submit_script, position=2)\n pbar_c.set_description('Completed jobs ...')\n pbar_d = tqdm(total=self.n_submit_script, position=3)\n pbar_d.set_description('Failed? jobs ...')\n while self.n_completed < self.n_submit_script:\n pbar_a.n = self.n_submitted\n pbar_b.n = self.n_running\n pbar_c.n = self.n_completed\n pbar_d.n = self.n_failed + self.n_unknown\n pbar_a.refresh()\n pbar_b.refresh()\n pbar_c.refresh()\n pbar_d.refresh()\n sleep(5)\n self.update_status()",
"def doProgress(self,progress,message):\n pass",
"def progress(self):\n return self.runProgress",
"def progress(self):\n return self.runProgress",
"def get_task_progress():\r\n current_time = time()\r\n progress = {'action_name': action_name,\r\n 'attempted': num_attempted,\r\n 'succeeded': num_succeeded,\r\n 'skipped': num_skipped,\r\n 'failed': num_failed,\r\n 'total': num_total,\r\n 'duration_ms': int((current_time - start_time) * 1000),\r\n }\r\n return progress",
"def transfer_progress(self, stats):",
"def get_application_progress():\n\n return ApplicationProgress.query.all()",
"def make_progress_bar():\n\n if simple_tregex_mode:\n total_files = len(list(to_iterate_over.keys()))\n else:\n total_files = sum(len(x) for x in list(to_iterate_over.values()))\n\n par_args = {'printstatus': kwargs.get('printstatus', True),\n 'root': root, \n 'note': note,\n 'length': total_files,\n 'startnum': kwargs.get('startnum'),\n 'denom': kwargs.get('denominator', 1)}\n\n term = None\n if kwargs.get('paralleling', None) is not None:\n from blessings import Terminal\n term = Terminal()\n par_args['terminal'] = term\n par_args['linenum'] = kwargs.get('paralleling')\n\n if in_notebook:\n par_args['welcome_message'] = welcome_message\n\n outn = kwargs.get('outname', '')\n if outn:\n outn = outn + ': '\n\n tstr = '%s%d/%d' % (outn, current_iter, total_files)\n p = animator(None, None, init=True, tot_string=tstr, **par_args)\n tstr = '%s%d/%d' % (outn, current_iter + 1, total_files)\n animator(p, current_iter, tstr, **par_args)\n return p, outn, total_files, par_args",
"def status(self):\n\t\tstatus = self.thread.status()\n#\t\tprint_array(status)\n\t\tmessage = [\"------ RSYNC PROGRESS ------ \"]\n\t\tif self.log_message:\n\t\t\tmessage.append(self.log_message)\n\t\tmessage.append(\"Current file: %s\" % status['current_file'])\n\t\tmessage.append(\"\\tBytes Copied: %s\" % status['bytes_copied'])\n\t\tmessage.append(\"\\tPercent Done: %s\" % status['percent_done'])\n\t\tmessage.append(\"\\tTransfer Rate: %s\" % status['transfer_rate'])\n\t\tmessage.append(\"\\tTime Remaining: %s\" % status['est_remain'])\n\t\tmessage.append(\"\\tTransfer Number: %s\" % status['xfer_num'])\n\t\tmessage.append(\"\\tTransfers Remaining: %s\" % status['xfer_remain'])\n\t\tmessage.append(\"\\tTransfers Total: %s\" % status['xfer_total'])\n\t\tmessage.append(\"\\t----------------------------------\")\n\t\ttry:\n\t\t\toverall_percent = int(round((int(status['xfer_num'])*1.0)/int(status['xfer_total']),2)*100)\n\t\texcept: overall_percent = 0\n\t\tmessage.append(\"\\tTotal Rsync done: %s%%\\n\" % overall_percent)\n\t\tp = open(self.progress_file,'w+',0)\n\t\tfor line in message:\n\t\t\t#print line\n\t\t\tp.write(\"%s\\n\" % line)\n\t\tp.flush()\n\t\tp.close()",
"def wait_progress(self):\n pass",
"def wait_progress(self):\n pass",
"def progress(self, arg, num_done, info=''):\n pass",
"def push_progress(self, status, object_id, progress):\n pass",
"def query_job_progress():\n pass",
"def __init__(self: \"InProgress\", progress: int = 0) -> None:\n self.progress = max(0, min(progress, 100))",
"def progress(self):\n if self.running:\n pass\n else:\n self._engine.progress()",
"def start_progress_bar(self):\r\n self.progress[\"value\"] = self.progress_step",
"def _prepare(self, progress: BaseProgressMonitor):\n self._started = True\n self._total_bytes = None\n self._downloaded_bytes = 0\n self._progress = progress\n if self.show_progress_bar:\n self._tqdm = tqdm(total=None, unit=\"bytes\", dynamic_ncols=True, file=sys.stdout)\n else:\n self._tqdm = None",
"def progress(self, id):",
"def progress(self, id):",
"def progress(self):\n if self.dynamic:\n self._update_db_obj()\n return self._db_obj.progress",
"def get_progress(self):\n return self.cloudserver.progress"
] | [
"0.6922813",
"0.6465331",
"0.62213963",
"0.6202986",
"0.6198377",
"0.61496395",
"0.6125598",
"0.61032665",
"0.6076108",
"0.5990762",
"0.5978565",
"0.5978565",
"0.59583265",
"0.5951946",
"0.58782065",
"0.58567727",
"0.58351296",
"0.58163893",
"0.58163893",
"0.58061016",
"0.5783107",
"0.57709086",
"0.5767247",
"0.576533",
"0.5764978",
"0.57642287",
"0.57250315",
"0.57250315",
"0.5713643",
"0.5710305"
] | 0.6606787 | 1 |
Return all Application Progress created. | def get_application_progress():
return ApplicationProgress.query.all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getProgress(self):",
"def get_progress(self, asc=True):\n\n # block until system is ready\n while not self.ready.isSet():\n self.ready.wait(0.1)\n\n events = self.get_all_events()\n if not asc:\n events = reversed(list(events))\n\n return [(event, self.get_average_progress(event)) for event in events]",
"def progress(self) -> JSON:\n return {\n 'up': True,\n 'unindexed_bundles': sum(self.queues[config.notifications_queue_name()].get('messages', {}).values()),\n 'unindexed_documents': sum(chain.from_iterable(\n self.queues[config.tallies_queue_name(retry=retry)].get('messages', {}).values()\n for retry in (False, True)\n ))\n }",
"def get_task_progress():\r\n current_time = time()\r\n progress = {'action_name': action_name,\r\n 'attempted': num_attempted,\r\n 'succeeded': num_succeeded,\r\n 'skipped': num_skipped,\r\n 'failed': num_failed,\r\n 'total': num_total,\r\n 'duration_ms': int((current_time - start_time) * 1000),\r\n }\r\n return progress",
"def addAllProgressBar(self):\n all_run = sum(\n [\n self.run_dict[key][\"Progress\"]\n for key in self.run_dict\n if self.run_dict[key][\"Run\"]\n ]\n )\n logging.info(\"All run: {0}\".format(all_run))\n self.progressAllBar = QProgressBar(self) # Progress bar created\n self.progressAllBar.setMinimum(1)\n self.progressAllBar.setMaximum(all_run)\n self.ui.layout_v.addWidget(self.progressAllBar)\n self.progressAllBar.setValue(1)",
"def get_waiting_jobs(self):\n return []",
"def build_progress_report(self):\n\n report = {\n 'manifest': self._generate_manifest_section(),\n 'isos': self._generate_isos_section(),\n }\n return report",
"def GetProgress(self):\n return self.objects_finished",
"def progress_status(self):\n from tqdm import tqdm\n pbar_a = tqdm(total=len(self.jobs), position=0)\n pbar_a.set_description('Submitted jobs ...')\n pbar_b = tqdm(total=self.n_submit_script, position=1)\n pbar_b.set_description('Running jobs ...')\n pbar_c = tqdm(total=self.n_submit_script, position=2)\n pbar_c.set_description('Completed jobs ...')\n pbar_d = tqdm(total=self.n_submit_script, position=3)\n pbar_d.set_description('Failed? jobs ...')\n while self.n_completed < self.n_submit_script:\n pbar_a.n = self.n_submitted\n pbar_b.n = self.n_running\n pbar_c.n = self.n_completed\n pbar_d.n = self.n_failed + self.n_unknown\n pbar_a.refresh()\n pbar_b.refresh()\n pbar_c.refresh()\n pbar_d.refresh()\n sleep(5)\n self.update_status()",
"def get_all_events(self):\n s = OrderedSet([self.EVENT_TOTAL_PROGRESS])\n s.update(self.get_events())\n return s",
"def get_progress(self):\n\t\treturn call_sdk_function('PrlJob_GetProgress', self.handle)",
"def build_progress_report(self):\n\n report = {\n 'packages' : self._packages_section(),\n 'metadata' : self._metadata_section(),\n 'publishing' : self._publishing_section(),\n }\n return report",
"def progress(self):\n if self.dynamic:\n self._update_db_obj()\n return self._db_obj.progress",
"def get_archieve(self):\n all_tasks = self.task_controller.get_list()\n return [task for task in all_tasks if task.is_completed == Status.DONE]",
"def progress_records(self, run_idxs):\n\n return self.run_contig_records(run_idxs, PROGRESS)",
"def get_job_applied():\n\n return JobCompletedApplication.query.all()",
"def progress(self):\n return self.runProgress",
"def progress(self):\n return self.runProgress",
"def progress(self, keys=None):\n\n message = {}\n\n # show all tasks by default\n if keys == None:\n keys = self.list_task_keys()\n\n # store progress of each task in a dictionary\n for key in keys:\n progress = self.processTaskProgress(self.registry[key,\n None].tasks[key])\n message[key] = {\n 'status':progress\n }\n\n return message",
"def processTaskProgress(self, task):\n\n tasklist = []\n\n #turn the task into a tuple\n processedTask = {\n 'id':task.id,\n 'status':task.status(),\n 'progress':task.progress(),\n 'msg':task.progressMessage()\n }\n\n #add that task to the list\n tasklist.append(processedTask)\n\n #add all children if the task is a container\n if isinstance(task,TaskContainer):\n for subtask in task.subtasks:\n tasklist += self.processTaskProgress(subtask.task)\n\n return tasklist",
"def GetProgress(self):\n return self.new_progress",
"def progress(self):\n # prepare\n currently_submitted = 0\n currently_in_flight = 0\n # pylint: disable=redefined-variable-type\n if self.max_in_flight > 0:\n limit_in_flight = self.max_in_flight\n else:\n limit_in_flight = utils.PlusInfinity()\n if self.max_submitted > 0:\n limit_submitted = self.max_submitted\n else:\n limit_submitted = utils.PlusInfinity()\n\n # if no resources are enabled, there's no point in running\n # this further\n nr_enabled_resources = sum(int(rsc.enabled)\n for rsc in self._core.resources.itervalues())\n if nr_enabled_resources == 0:\n raise gc3libs.exceptions.NoResources(\n \"No resources available for running jobs.\")\n\n # update status of SUBMITTED/RUNNING tasks before launching\n # new ones, otherwise we would be checking the status of\n # some tasks twice...\n transitioned = []\n for index, task in enumerate(self._in_flight):\n try:\n old_state = task.execution.state\n self._core.update_job_state(task)\n if self._store and task.changed:\n self._store.save(task)\n state = task.execution.state\n if state != old_state:\n self.__update_task_counts(task, old_state, -1)\n self.__update_task_counts(task, state, +1)\n if state == Run.State.SUBMITTED:\n # only real applications need to be counted\n # against the limit; policy tasks are exempt\n # (this applies to all similar clause below)\n if isinstance(task, Application):\n currently_submitted += 1\n currently_in_flight += 1\n # elif state == Run.State.RUNNING or state ==\n # Run.State.UNKNOWN:\n elif state == Run.State.RUNNING:\n if isinstance(task, Application):\n currently_in_flight += 1\n if self.can_retrieve and self.retrieve_running:\n # try to get output\n try:\n self._core.fetch_output(\n task,\n overwrite=self.retrieve_overwrites,\n changed_only=self.retrieve_changed_only)\n # pylint: disable=broad-except\n except Exception as err:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Engine',\n # - method\n 'progress',\n # - actual error class\n err.__class__.__name__,\n # - additional keywords\n 'RUNNING',\n 'fetch_output',\n ):\n gc3libs.log.error(\n \"Ignored error in fetching output of\"\n \" RUNNING task '%s': %s: %s\",\n task, err.__class__.__name__, err)\n gc3libs.log.debug(\n \"(Original traceback follows.)\",\n exc_info=True)\n else:\n # propagate exceptions for debugging purposes\n raise\n elif state == Run.State.STOPPED:\n # task changed state, mark as to remove\n transitioned.append(index)\n self._stopped.append(task)\n elif state == Run.State.TERMINATING:\n # task changed state, mark as to remove\n transitioned.append(index)\n self._terminating.append(task)\n elif state == Run.State.TERMINATED:\n # task changed state, mark as to remove\n transitioned.append(index)\n self._terminated.append(task)\n else:\n # if we got to this point, state has an invalid value\n gc3libs.log.error(\n \"Invalid state '%r' returned by task %s.\",\n state, task)\n if not gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Engine',\n # - method\n 'progress',\n # - actual error class\n 'InternalError',\n # - additional keywords\n 'state',\n 'update',\n ):\n # propagate exception to caller\n raise gc3libs.exceptions.InternalError(\n \"Invalid state '{state!r}' returned by task {task}\"\n .format(state=state, task=task))\n except gc3libs.exceptions.ConfigurationError:\n # Unrecoverable; no sense in continuing -- pass\n # immediately on to client code and let it handle\n # this...\n raise\n # pylint: disable=broad-except\n except Exception as err:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Engine',\n # - method\n 'progress',\n # - actual error class\n err.__class__.__name__,\n # - additional keywords\n 'state',\n 'update',\n ):\n gc3libs.log.error(\n \"Ignoring error in updating state of task '%s':\"\n \" %s: %s\",\n task,\n err.__class__.__name__,\n err,\n exc_info=True)\n else:\n # propagate exception to caller\n raise\n # remove tasks that transitioned to other states\n for index in reversed(transitioned):\n del self._in_flight[index]\n\n # execute kills and update count of submitted/in-flight tasks\n transitioned = []\n for index, task in enumerate(self._to_kill):\n try:\n old_state = task.execution.state\n self._core.kill(task)\n if self._store:\n self._store.save(task)\n state = task.execution.state\n if state != old_state:\n self.__update_task_counts(task, old_state, -1)\n self.__update_task_counts(task, state, +1)\n if old_state == Run.State.SUBMITTED:\n if isinstance(task, Application):\n currently_submitted -= 1\n currently_in_flight -= 1\n elif old_state == Run.State.RUNNING:\n if isinstance(task, Application):\n currently_in_flight -= 1\n self._terminated.append(task)\n transitioned.append(index)\n # pylint: disable=broad-except\n except Exception as err:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Engine',\n # - method\n 'progress',\n # - actual error class\n err.__class__.__name__,\n # - additional keywords\n 'kill'\n ):\n gc3libs.log.error(\n \"Ignored error in killing task '%s': %s: %s\",\n task, err.__class__.__name__, err)\n # print again with traceback info at a higher log level\n gc3libs.log.debug(\n \"(Original traceback follows.)\",\n exc_info=True)\n else:\n # propagate exceptions for debugging purposes\n raise\n # remove tasks that transitioned to other states\n for index in reversed(transitioned):\n del self._to_kill[index]\n\n # update state of STOPPED tasks; again need to make before new\n # submissions, because it can alter the count of in-flight\n # tasks.\n transitioned = []\n for index, task in enumerate(self._stopped):\n try:\n old_state = task.execution.state\n self._core.update_job_state(task)\n if self._store and task.changed:\n self._store.save(task)\n state = task.execution.state\n if state != old_state:\n self.__update_task_counts(task, old_state, -1)\n self.__update_task_counts(task, state, +1)\n if state in [Run.State.SUBMITTED, Run.State.RUNNING]:\n if isinstance(task, Application):\n currently_in_flight += 1\n if task.execution.state == Run.State.SUBMITTED:\n currently_submitted += 1\n self._in_flight.append(task)\n # task changed state, mark as to remove\n transitioned.append(index)\n elif state == Run.State.TERMINATING:\n self._terminating.append(task)\n # task changed state, mark as to remove\n transitioned.append(index)\n elif state == Run.State.TERMINATED:\n self._terminated.append(task)\n # task changed state, mark as to remove\n transitioned.append(index)\n # pylint: disable=broad-except\n except Exception as err:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Engine',\n # - method\n 'progress',\n # - actual error class\n err.__class__.__name__,\n # - additional keywords\n 'state',\n 'update',\n 'STOPPED',\n ):\n gc3libs.log.error(\n \"Ignoring error in updating state of\"\n \" STOPPED task '%s': %s: %s\",\n task, err.__class__.__name__, err,\n exc_info=True)\n else:\n # propagate exception to caller\n raise\n # remove tasks that transitioned to other states\n for index in reversed(transitioned):\n del self._stopped[index]\n\n # now try to submit NEW tasks\n # gc3libs.log.debug(\"Engine.progress: submitting new tasks [%s]\"\n # % str.join(', ', [str(task) for task in self._new]))\n transitioned = []\n if (self.can_submit and\n currently_submitted < limit_submitted and\n currently_in_flight < limit_in_flight):\n # update state of all enabled resources, to give a chance to\n # all to get a new job; for a complete discussion, see:\n # https://github.com/uzh/gc3pie/issues/485\n self._core.update_resources()\n # now try to submit\n with self.scheduler(self._new,\n self._core.resources.values()) as _sched:\n # wrap the original generator object so that `send`\n # and `throw` do not yield a value -- we only get new\n # stuff from the call to the `next` method in the `for\n # ... in schedule` line.\n sched = gc3libs.utils.YieldAtNext(_sched)\n for task_index, resource_name in sched:\n task = self._new[task_index]\n resource = self._core.resources[resource_name]\n # try to submit; go to SUBMITTED if successful,\n # FAILED if not\n try:\n self._core.submit(task, targets=[resource])\n if self._store:\n self._store.save(task)\n # XXX: can remove the following assert when\n # we're sure Issue 419 is fixed\n assert task_index not in transitioned\n self._in_flight.append(task)\n transitioned.append(task_index)\n if isinstance(task, Application):\n currently_submitted += 1\n currently_in_flight += 1\n # if we get to this point, we know state is not NEW anymore\n state = task.execution.state\n self.__update_task_counts(task, Run.State.NEW, -1)\n self.__update_task_counts(task, state, +1)\n\n sched.send(task.execution.state)\n # pylint: disable=broad-except\n except Exception as err1:\n # record the error in the task's history\n task.execution.history(\n \"Submission to resource '%s' failed: %s: %s\" %\n (resource.name,\n err1.__class__.__name__,\n str(err1)))\n gc3libs.log.error(\n \"Got error in submitting task '%s', informing\"\n \" scheduler: %s: %s\",\n task,\n err1.__class__.__name__,\n str(err1))\n # inform scheduler and let it handle it\n try:\n sched.throw(* sys.exc_info())\n # pylint: disable=broad-except\n except Exception as err2:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Engine',\n # - method\n 'progress',\n # - actual error class\n err2.__class__.__name__,\n # - additional keywords\n 'scheduler',\n 'submit',\n ):\n gc3libs.log.debug(\n \"Ignored error in submitting task '%s':\"\n \" %s: %s\",\n task,\n err2.__class__.__name__,\n err2,\n exc_info=True)\n else:\n # propagate exceptions for debugging purposes\n raise\n # enforce Engine limits\n if (currently_submitted >= limit_submitted\n or currently_in_flight >= limit_in_flight):\n break\n # remove tasks that transitioned to SUBMITTED state\n for index in reversed(transitioned):\n del self._new[index]\n\n # finally, retrieve output of finished tasks\n if self.can_retrieve:\n transitioned = []\n for index, task in enumerate(self._terminating):\n # try to get output\n try:\n self._core.fetch_output(\n task,\n overwrite=self.retrieve_overwrites,\n changed_only=self.retrieve_changed_only)\n except gc3libs.exceptions.UnrecoverableDataStagingError as ex:\n gc3libs.log.error(\n \"Error in fetching output of task '%s',\"\n \" will mark it as TERMINATED\"\n \" (with error exit code %d): %s: %s\",\n task, posix.EX_IOERR,\n ex.__class__.__name__, str(ex), exc_info=True)\n task.execution.returncode = (\n Run.Signals.DataStagingFailure,\n posix.EX_IOERR)\n task.execution.state = Run.State.TERMINATED\n task.changed = True\n # pylint: disable=broad-except\n except Exception as ex:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Engine',\n # - method\n 'progress',\n # - actual error class\n ex.__class__.__name__,\n # - additional keywords\n 'fetch_output',\n ):\n gc3libs.log.debug(\n \"Ignored error in fetching output of task '%s':\"\n \" %s: %s\",\n task,\n ex.__class__.__name__,\n ex)\n gc3libs.log.debug(\n \"(Original traceback follows.)\",\n exc_info=True)\n else:\n # propagate exceptions for debugging purposes\n raise\n\n for index, task in enumerate(self._terminating):\n if task.execution.state == Run.State.TERMINATED:\n transitioned.append(index)\n try:\n self._core.free(task)\n # update counts\n self.__update_task_counts(task, Run.State.TERMINATING, -1)\n self.__update_task_counts(task, Run.State.TERMINATED, +1)\n # pylint: disable=broad-except\n except Exception as err:\n gc3libs.log.error(\n \"Got error freeing up resources used by task '%s': %s: %s.\"\n \" (For cloud-based resources, it's possible that the VM\"\n \" has been destroyed already.)\",\n task, err.__class__.__name__, err)\n if self.forget_terminated:\n try:\n self.remove(task)\n except Exception as err:\n gc3libs.log.debug(\n \"Could not remove task '%s': %s: %s\",\n task, err.__class__.__name__, err)\n else:\n self._terminated.append(task)\n\n if self._store and task.changed:\n self._store.save(task)\n # remove tasks for which final output has been retrieved\n for index in reversed(transitioned):\n del self._terminating[index]",
"async def running(self) -> list[dict[str, Any]]:\n data = await self.controller.request(\"get\", \"watering/program\")\n return cast(list[dict[str, Any]], data[\"programs\"])",
"def jobs(self):\n return self.get_jobs()",
"def list(self):\n self.background_scheduler.print_jobs()",
"def getProgress(self):\n return self._progress",
"def get(self, request, format = None):\n goalProgress = GoalProgress.objects.all()\n serializer = GoalProgressSerializer(goalProgress, many=True)\n return Response(serializer.data)",
"def get_progress(self):\r\n return None",
"def get_pending_tasks(cls) -> list[ImageVirtualPathEventTask]:\n return cls._pending_tasks",
"def create_application_progress(application_state, job_applied_id , created_at):\n app_progress = ApplicationProgress(application_state = application_state, job_applied_id = job_applied_id, created_at = created_at)\n db.session.add(app_progress)\n db.session.commit()\n\n return app_progress"
] | [
"0.6268866",
"0.6009856",
"0.6005044",
"0.5949115",
"0.591462",
"0.58448476",
"0.57968134",
"0.5741962",
"0.57029724",
"0.57012784",
"0.56873035",
"0.56828934",
"0.5674864",
"0.56605846",
"0.55666447",
"0.55653167",
"0.55082387",
"0.55082387",
"0.5501211",
"0.54991305",
"0.5455066",
"0.5419794",
"0.54053956",
"0.53557986",
"0.5334594",
"0.5322795",
"0.5297641",
"0.5278548",
"0.5277703",
"0.5258375"
] | 0.801629 | 0 |
Return a Application Progress by primary key. | def get_application_progress_by_id(app_progress_id):
return ApplicationProgress.query.get(app_progress_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_application_progress():\n\n return ApplicationProgress.query.all()",
"def get_result_by_primary_key(self, pk):\n session = self.session_factory()\n result = session.query(PipelineRun).filter_by(id=pk).first()\n session.close()\n return result",
"def find(self, primary_key):\n sql = '{} WHERE {} = %s'.format(self._select(), self.PRIMARY_KEY)\n cursor = yield self._pool.execute(sql, [primary_key])\n result = cursor.fetchmany(1)\n return self.convert_result_to_object(result)",
"def get(cls, pk):\n return DBSession().query(cls).get(pk)",
"async def get_one(self, pk):\n\n return await self._expand(await self.db.get_one(pk=pk))",
"def getByID(self, pid):\r\n i = self.pids.index(pid)\r\n return self.getByInd(i)",
"def get_progress(self):\n\t\treturn call_sdk_function('PrlJob_GetProgress', self.handle)",
"def primary(self):\n primary_k = self.__class__.get_primary()\n return getattr(self, primary_k)",
"def load_by_pk(cls, _pk):\n try:\n return cls.q.get(_pk)\n except SQLAlchemyError:\n cls.s.rollback()\n raise",
"def get_primary_for(userid):",
"def get_primary_for(userid):",
"async def get_pk_value(self, selection):\n\n record = await self.get_one(selection)\n return record[self.primary_key]",
"def get_by_pk(cls, async=True, **kwargs):\n\n result = cls.get(\n condition=QueryBuilder.build_pk_clause(cls.__table__, **kwargs),\n async=async)\n\n # return async result\n if async:\n return result\n\n # check if sync result is None\n if not result:\n return None\n\n # check query did not return multiple values (in case of incorrect PK clause)\n expected_result_size = 1\n if len(result) != expected_result_size:\n raise IncorrectResultSizeException(len(result), expected_result_size)\n\n # return the first and only element\n return result[0]",
"def get_by_id(self, pkId: int):\n if not self.model:\n raise NameError('database model has not been set.')\n if not pkId:\n raise ValueError('invalid primary key value.')\n\n with self.session() as session:\n query = self.get_query(session)\n rec = query.get(pkId)\n return rec",
"async def find_by_id(self, _id: int) -> Record:\n conn: Connection\n async with self.db_pool.acquire() as conn:\n return await conn.fetchrow(\n f\"SELECT * FROM {self.table_name} WHERE {self.primary_key}=$1\",\n _id,\n )",
"def get(cls, task_id):\n return ProgressBar(task_id, _getter=True)",
"def find_by_application_id(cls, application_id: int):\n where_condition = \"\"\n where_condition += f\"\"\" app.id = {str(application_id)} \"\"\"\n\n result_proxy = db.session.execute(\n f\"\"\"select\n mapper.id,mapper.process_key,mapper.process_name\n from application app, form_process_mapper mapper\n where app.form_process_mapper_id=mapper.id and\n {where_condition}\n \"\"\"\n )\n try:\n result = []\n for row in result_proxy:\n info = dict(row)\n result.append(info)\n\n return result[0]\n except IndexError as err:\n return (\n \"List index out of range\",\n HTTPStatus.BAD_REQUEST,\n )\n except BusinessException as err:\n return err.error, err.status_code",
"def get_parcel(self, id):\n for p in self.db:\n if p['id'] == id:\n return p, 200\n else:\n return {\"Error\": \"No delivery exists with that id.\"}, 404",
"def get_progress_indicator(self):\n return self.__aceQLHttpApi.get_progress_indicator()",
"def get_object(self, pk):\n try:\n # x = category__job_title__program_id\n return JobCatalog.objects.get(Q(id=pk) | Q(uid=pk))\n except JobCatalog.DoesNotExist:\n raise Http404",
"def get_by_pk(cls, request, pk):\n session = get_session(request)\n\n return session.query(cls).filter(cls.pk == pk).first()",
"def select_one(cls, pk):\n with sqlite3.connect(cls.dbpath) as conn:\n conn.row_factory = sqlite3.Row\n curs = conn.cursor()\n sql = f\"\"\"SELECT * FROM {cls.tablename} WHERE pk =?;\"\"\"\n curs.execute(sql, (pk,)) #don't forget to put a comma after single value inputs\n row = curs.fetchone()\n return cls(**row)",
"def retrieve(self, request, pk=None): #Equals to -> GET/{primarykey}\n return Response({'http_method': 'GET'})",
"def create_application_progress(application_state, job_applied_id , created_at):\n app_progress = ApplicationProgress(application_state = application_state, job_applied_id = job_applied_id, created_at = created_at)\n db.session.add(app_progress)\n db.session.commit()\n\n return app_progress",
"def min_pk(self):\n start = self.execute(self.commands.min_pk(self.name, self.primary_key_column))\n return start[0][0]",
"def get(self, pk):\n return self.model.query.options(self.from_cache(pk=pk)).get(pk)",
"def find_one_byprimaryid(cls, primaryid, defaultval = None):\n return cls.dbm().modelclass_find_one_byprimaryid(cls, primaryid, defaultval)",
"def get_application_state_by_applied(job_applied_id):\n\n return ApplicationProgress.query.filter(JobCompletedApplication.job_applied_id == job_applied_id).join(JobCompletedApplication).order_by(ApplicationProgress.app_progress_id.desc()).first()",
"def get_primary_id(self):",
"def get(self, ident):\n\n query = self.bq.steps[0](self.session)\n return query._get_impl(ident, self._load_on_pk_identity)"
] | [
"0.63571316",
"0.584929",
"0.5738413",
"0.56607336",
"0.5591691",
"0.5382415",
"0.52425617",
"0.5224627",
"0.519701",
"0.5172489",
"0.5172489",
"0.5164485",
"0.5137614",
"0.51188457",
"0.5116484",
"0.51071393",
"0.51062864",
"0.5100811",
"0.50962",
"0.50886536",
"0.508539",
"0.5065782",
"0.50653076",
"0.5028714",
"0.50037485",
"0.49655053",
"0.49566993",
"0.49319485",
"0.491136",
"0.4871343"
] | 0.7809704 | 0 |
Get the last job_id record | def get_last_job_id():
return JobDetail.query.with_entities(JobDetail.job_id).order_by(JobDetail.job_id.desc()).first()[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_last_job_applied_id():\n\n return JobCompletedApplication.query.with_entities(JobCompletedApplication.job_applied_id).order_by(JobCompletedApplication.job_applied_id.desc()).first()[0]",
"def jobid(self):\n return self.get_db('jobid')",
"def last_job(self): # TOFIX model the job and return an object instead of dictionary\n return self._data.get('summary_fields', {}).get('last_job')",
"def get_id(self):\n\n self.redis.setnx('job_id', '-1')\n return self.redis.incr('job_id')",
"def job_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"job_id\")",
"def job_id(self):\n return self._job_id",
"def latest_job(self):\n return self.jobmanagers[self.current_network].latest_job",
"def job_id(self) -> JobId:\r\n return self._job_id",
"def job(self):\n return self.batch[self.job_id]",
"def getJobID(self):\n return self.__nupicJobID",
"def latest_id(self):\n return self.checkpoints[-1]",
"def _get_job_id(self) -> str:\n return self.split_name[2][3:]",
"def get_latest_job_tick(self, job_origin_id):",
"def job_id(self):\n return self._properties.get(\"jobReference\", {}).get(\"jobId\")",
"def get_job_id(self):\n return {'job_id': self._job_id}",
"def id(self):\n return self.job_proto.id",
"def get_current_id(self) -> int:\n try:\n return self.cursor.execute(f\"SELECT MAX(id) FROM {table_locations};\").fetchone()\n except Exception as e:\n msg = f'We faced some problems with the getting last id value. Mistake: {e}'\n self.proceed_error(msg)\n return -1",
"def get_jobs_id(self, ti) -> None:\n return self.get_hook().get_jobs_id(ti)",
"def pop_job_id(self):\n ret = yield self.connection.lpop(self.key)\n defer.returnValue(ret)",
"def getLastWorker(self):\n return self.entries[-1]",
"def __get_last_id(cls):\n db = database.db_connection()\n cursor = db.cursor()\n sql_query = \"SELECT max(id_user) FROM user\"\n cursor.execute(sql_query)\n row = cursor.fetchone()\n cursor.close()\n return int(row[0])",
"def getLastId(self,table):\n\tif self.dbType==\"sqlite\":\n\t query = \"SELECT LAST_INSERT_ROWID() FROM %s LIMIT 1\"%table\n\telse:\n\t query = \"SELECT LAST_INSERT_ID() FROM %s\"%table\n\tlocaltime= \"%s \"%time.strftime(\"%H:%M:%S\",time.localtime())\n\tpid = \"%s \"%os.getpid()\n self.log.write(pid+localtime+query+'\\n')\n\t# since SQLite locks a whole table we use separate cursor to get\n\t# information while transaction still in progress\n\tcur = self.db.cursor()\n\tcur.execute(query)\n\ttup = cur.fetchone()\n\tid = tup[0]\n\tcur.close()\n# tup = self.fetchOne(query)\n\tid = tup[0]\n return id",
"def getLastObjectId(self):\n return self.objId",
"def get_current_id(self):\n\n id = self.ids[-1]\n\n if id is None:\n raise KeyError()\n\n return id",
"def get_last_task(self):\n return self.get_task_by_index(-1)",
"def last_id(self):\n rows = self.db.query(\"\"\"\n SELECT LAST_INSERT_ID() AS id\n \"\"\")\n for row in rows:\n return row['id']",
"def latest_job():\n try:\n return models.SyncJob.objects.latest('start')\n except models.SyncJob.DoesNotExist:\n return False",
"def get_job_id(self, filename):\n return Jobs.get_job_id(filename)",
"def _get_job_id(self):\n return uuid.uuid4().hex",
"def most_recent_id(q):\n since_id = None\n last_archive_file = last_archive(q)\n if last_archive_file:\n line = open(last_archive_file).readline()\n if line:\n since_id = json.loads(line)[\"id_str\"]\n return since_id"
] | [
"0.768667",
"0.75634575",
"0.75450337",
"0.7345317",
"0.7155515",
"0.71417",
"0.7064087",
"0.69970345",
"0.69467616",
"0.693553",
"0.6832919",
"0.68281484",
"0.6805035",
"0.67877",
"0.67750955",
"0.67263293",
"0.6711748",
"0.6658601",
"0.6635541",
"0.661187",
"0.65753484",
"0.6573753",
"0.65671736",
"0.6564948",
"0.65472835",
"0.6518686",
"0.6509422",
"0.6470163",
"0.6464802",
"0.6462149"
] | 0.8820396 | 0 |
Get the last job applied id record | def get_last_job_applied_id():
return JobCompletedApplication.query.with_entities(JobCompletedApplication.job_applied_id).order_by(JobCompletedApplication.job_applied_id.desc()).first()[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_last_job_id():\n\n return JobDetail.query.with_entities(JobDetail.job_id).order_by(JobDetail.job_id.desc()).first()[0]",
"def last_job(self): # TOFIX model the job and return an object instead of dictionary\n return self._data.get('summary_fields', {}).get('last_job')",
"def latest_id(self):\n return self.checkpoints[-1]",
"def get_job_applied_by_job_id(job_id):\n\n return JobCompletedApplication.query.filter(JobCompletedApplication.job_id == job_id).first().job_applied_id",
"def jobid(self):\n return self.get_db('jobid')",
"def get_id(self):\n\n self.redis.setnx('job_id', '-1')\n return self.redis.incr('job_id')",
"def latest_job(self):\n return self.jobmanagers[self.current_network].latest_job",
"def get_current_id(self):\n\n id = self.ids[-1]\n\n if id is None:\n raise KeyError()\n\n return id",
"def get_latest_job_tick(self, job_origin_id):",
"def get_current_id(self) -> int:\n try:\n return self.cursor.execute(f\"SELECT MAX(id) FROM {table_locations};\").fetchone()\n except Exception as e:\n msg = f'We faced some problems with the getting last id value. Mistake: {e}'\n self.proceed_error(msg)\n return -1",
"def getLastWorker(self):\n return self.entries[-1]",
"def getJobID(self):\n return self.__nupicJobID",
"def job(self):\n return self.batch[self.job_id]",
"def getLastObjectId(self):\n return self.objId",
"def job_id(self):\n return self._job_id",
"def get_last_worked_on_step_id(self):\n logger.debug(\"Searching for ID of the step last worked on.\")\n last_id = None\n for step in self.steps:\n if any((task for task in step.tasks if task.status == \"DONE\")) and (not last_id or step.id > last_id):\n last_id = step.id\n if not last_id:\n raise ValueError(\"No ID is found for last worked on step for ticket {}\".format(self.id))\n return last_id",
"def get_job_applied_by_id(job_applied_id):\n\n return JobCompletedApplication.query.get(job_applied_id)",
"def get_last_task(self):\n return self.get_task_by_index(-1)",
"def job_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"job_id\")",
"def __get_last_id(cls):\n db = database.db_connection()\n cursor = db.cursor()\n sql_query = \"SELECT max(id_user) FROM user\"\n cursor.execute(sql_query)\n row = cursor.fetchone()\n cursor.close()\n return int(row[0])",
"def get_last_activity(self):\n return Activity.objects.filter(campaign=self.campaign, status=\"P\", contact=self.contact).latest(\"id\")",
"def get_jobs_id(self, ti) -> None:\n return self.get_hook().get_jobs_id(ti)",
"def latest_job():\n try:\n return models.SyncJob.objects.latest('start')\n except models.SyncJob.DoesNotExist:\n return False",
"def latest_report_id(self) -> str:\n return pulumi.get(self, \"latest_report_id\")",
"def get_dimCustomer_last_id(db_engine):\n\n query = \"SELECT max(customer_id) AS last_id FROM dimCustomer\"\n tdf = pd.read_sql(query, db_engine)\n return tdf.iloc[0]['last_id']",
"def _get_job_id(self) -> str:\n return self.split_name[2][3:]",
"def GetCommandId(self):\r\n \r\n return self._last_id",
"def get_last_tab_id():\n return list(get_tabs())[-1]",
"def job_id(self) -> JobId:\r\n return self._job_id",
"def last_id(self):\n rows = self.db.query(\"\"\"\n SELECT LAST_INSERT_ID() AS id\n \"\"\")\n for row in rows:\n return row['id']"
] | [
"0.7888452",
"0.7109309",
"0.6963653",
"0.68734276",
"0.6839119",
"0.6704593",
"0.6665769",
"0.6628784",
"0.65945107",
"0.65694714",
"0.6464214",
"0.64519495",
"0.644685",
"0.64032155",
"0.63841957",
"0.6373546",
"0.6372956",
"0.6369404",
"0.63560134",
"0.63174295",
"0.6311606",
"0.62412566",
"0.6223984",
"0.6209615",
"0.61852765",
"0.6181939",
"0.6178066",
"0.6175718",
"0.6158713",
"0.6143939"
] | 0.8291623 | 0 |
Calculates the cost given the target. This method must be called after `forward` has been called. | def cost(self, cost_object, target):
return cost_object.f(self.a[-1], target).mean(axis=0).sum() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cost(self) -> float:",
"def __compute_cost(self, x, y):\n\n predictions = self.__compute_prediction(x)\n cost = np.mean(-y * np.log(predictions) - (1 - y) * np.log(1 - predictions))\n\n return cost",
"def calculate_total_cost(state):\r\n return state.cost()",
"def cost(self):\n\t\treturn self.g + self.h",
"def calc_cost(self):\n \n correct_pred = tf.equal(self.predictions, tf.argmax(self.y,1))\n batchaccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) \n return self.cost, batchaccuracy, self.predictions",
"def calculate_total_cost(state):\n pass",
"def total_cost(self, X, Y, thetas = None):\n \n if thetas == None:\n thetas = self.thetas\n \n J = 0.0\n m = X.shape[0]\n for x, true_indx in zip(X, Y):\n y = np.zeros(self.noutputs)\n y[true_indx] = 1.\n h_theta = self._forward_prop(x, thetas)[-1]\n J += self.cost(h_theta, y)\n \n return np.sum(J)/m",
"def get_cost(self, action: Action) -> N:\n pass",
"def cost(self):\n return self._cost",
"def cost(self):\n return self._cost",
"def get_cost(self) -> float:\n return math.e / self.fitness",
"def calc_cost(self):\n cost = 0\n for i,[source, sinks] in enumerate(self.nets):\n self.costs[i] = self.calc_half_perimeter(source, sinks)\n cost += self.costs[i]\n self.cost = cost\n return True",
"def cost(self):\n\n return self._cost",
"def compute_cost(self,X, y):\n \n num_samples = len(X)\n # Do Forward propagation to calculate our predictions\n z1 = X.dot(self.W1) + self.b1\n a1 = np.tanh(z1)\n z2 = a1.dot(self.W2) + self.b2\n exp_z = np.exp(z2)\n a2 = exp_z / np.sum(exp_z, axis=1, keepdims=True)\n softmax_scores = a2\n # Calculate the cross-entropy loss\n cross_ent_err = -np.log(softmax_scores[range(num_samples), y])\n data_loss = np.sum(cross_ent_err)\n return 1./num_samples * data_loss",
"def cost_total(X, cost_weights=(1.0, 1.0, 1.0)):\n return cost_weights[0] * cost_distance(X) + \\\n cost_weights[1] * cost_same_team_by_distance(X) + \\\n cost_weights[2] * cost_previous_neighbour_by_distance(X, normalize=True)",
"def calculate_cost(x, y, weights):\r\n predictions = compute_prediction(x, weights)\r\n cost = np.mean(-y * np.log(predictions) - (1 - y) * np.log(1 - predictions))\r\n return cost",
"def _calculate_costs(self):\n cost = 0\n cost += self._cost_route_fine()\n cost += self._cost_petrol()\n cost += self._cost_wage()\n cost += self._cost_refueling()\n cost += self._cost_caught_by_police()\n cost += self._cost_vehicle_malfunction()\n return cost",
"def get_cost(self, Y, T):\n return - np.multiply(T, np.log(Y)).sum() / Y.shape[0]",
"def compute_cost(AL, Y):\n pass",
"def get_cost_updates(self):\n\n y = self.get_hidden_values()\n z = self.get_reconstructed_input(y)\n\n L = T.sum((self.x-z)**2, axis=1)\n\n cost = T.mean(L)\n\n return cost",
"def calculate_cost(self):\n costs = {}\n if np.abs(self.agent.get_position()[1]) > self.y_lim:\n costs['cost_outside_bounds'] = 1.\n if self.agent.velocity_violation:\n costs['cost_velocity_violation'] = 1.\n # sum all costs in one total cost\n costs['cost'] = min(1, sum(v for k, v in costs.items() if k.startswith('cost_')))\n return costs",
"def total_cost(self):\n if self.goal:\n return self.goal + (self.community_contribution or 0)\n else:\n return 0",
"def calc_cost(self, dx, dy):\n self.distance+=np.sqrt(dx**2+dy**2)",
"def cost(self, output, labels, weights):\n raise NotImplementedError('Must be overridden by concrete subclass')",
"def total_cost(self):\n return np.einsum('i->', self.c[self.s])",
"def _cost(self, action):\n raise NotImplementedError",
"def getCost(self):\n return self._cost",
"def _local_cost(self, p, q):\n diagnol = q[0] == p[0] or q[1] == p[1]\n \n # c0, c1 and c2 are costs from Canny operator, gradient magnitude and gradient direction respectively\n if diagnol:\n c0 = self.cost_edges[p[0]][p[1]]-SQRT_0_5*(self.cost_edges[p[0]][p[1]]-self.cost_edges[q[0]][q[1]])\n c1 = self.cost_grad_mag[p[0]][p[1]]-SQRT_0_5*(self.cost_grad_mag[p[0]][p[1]]-self.cost_grad_mag[q[0]][q[1]])\n c2 = SQRT_0_5 * self._get_grad_direction_cost(p, q)\n else:\n c0 = self.cost_edges[q[0]][q[1]]\n c1 = self.cost_grad_mag[q[0]][q[1]]\n c2 = self._get_grad_direction_cost(p, q)\n \n if np.isnan(c2):\n c2 = 0.0\n \n w0, w1, w2 = self.weight\n cost_pq = w0*c0 + w1*c1 + w2*c2\n \n return cost_pq * cost_pq",
"def calcCostFun(self):\n\n self.start()\n F, K = self.model()\n \n return self.costFunction",
"def get_cost(self):\n if self.distance == 0:\n for i in range(1, len(self.cities) + 1):\n point1 = self.cities[i - 1]\n point2 = self.cities[i % len(self.cities)]\n self.distance += self.distance_to(point1, point2)\n return self.distance"
] | [
"0.7035611",
"0.6710912",
"0.67076313",
"0.670511",
"0.66524404",
"0.66274863",
"0.6589824",
"0.6588693",
"0.6582186",
"0.6582186",
"0.6580311",
"0.6567873",
"0.6526451",
"0.6509514",
"0.6483877",
"0.6477303",
"0.64738995",
"0.64710313",
"0.6433443",
"0.6429381",
"0.6400443",
"0.6399265",
"0.6398854",
"0.6394998",
"0.6389678",
"0.6372984",
"0.6364957",
"0.6361852",
"0.6356001",
"0.6354087"
] | 0.7270355 | 0 |
Get Enrollment Dataframe (enrollment_.csv) | def get_enrollment_df(ftype):
assert ftype=='train' or ftype=='test'
enroll_df = pd.read_csv('data/%s/enrollment_%s.csv' % (ftype, ftype))
return enroll_df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_education() -> pd.DataFrame:\n\n school_df = pd.read_csv(\"data/Expected years of schooling (years).csv\", header=2, usecols=[1, 32], names=[\"Country\", \"Education\"])\n\n index = school_df[school_df[\"Country\"]==\"Iran (Islamic Republic of)\"].index.values[0]\n school_df.loc[index, \"Country\"] = \"Iran\"\n index = school_df[school_df[\"Country\"] == \"United States\"].index.values[0]\n school_df.loc[index, \"Country\"] = \"US\"\n index = school_df[school_df[\"Country\"] == \"Russian Federation\"].index.values[0]\n school_df.loc[index, \"Country\"] = \"Russia\"\n\n school_df = school_df.dropna()\n\n return school_df",
"def create_dataframe():\r\n\r\n df = pd.read_csv('data/data.csv', header=0)\r\n return df",
"def read_dataset():\n\n df = pd.read_csv('fake_job_postings.csv', index_col='job_id')\n return df",
"def employment():\n return pd.read_csv(csv_path(\"CIA_Unemployment.csv\"), index_col=0, usecols=[1, 2])",
"def get_mist_eep_table():\n fp = Path(DATA_PATH, \"mist_eep_table.csv\")\n return pd.read_csv(fp, comment=\"#\")",
"def run(self) -> pd.DataFrame:\n with open(self.file_path, 'r') as in_file:\n headers = in_file.readline()\n headers = headers.replace(\"\\n\", \"\")\n\n if ',' in headers:\n headers = headers.split(',')\n else:\n headers = headers.split()\n\n if headers == self.NORMAL_HEADERS:\n return self.normal_csv()\n else:\n return self.read_data_columns_to_rows()",
"def loan_data():\n return pd.read_csv(data_path / \"credit_data.csv\")",
"def import_data():\n import pandas as pd\n \n df = pd.read_csv('Company_Bankruptcy_Prediction.csv')\n return df",
"def get_study_results():\n study_results_path = \"data/Study_results.csv\"\n df = pd.read_csv(study_results_path)\n return df",
"def open_csv(filename=\"NOTEEVENTS.csv\", index=['SUBJECT_ID', 'HADM_ID']):\n df = pd.read_csv(DATA_DIR / filename,\n index_col=index,\n # nrows=1000,\n infer_datetime_format=True)\n logger.info(f\"opening {filename}\")\n logger.info(f\"Dataframe columns: {df.columns}\")\n # logger.info(f\"Clinical note types: {df['CATEGORY'].unique()}\")\n return df",
"def df():\n path, _ = os.path.split(os.path.abspath(__file__))\n project_path = os.path.join(path, os.pardir, os.pardir)\n\n values_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_values.csv\")\n labels_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_labels.csv\")\n\n train = pd.read_csv(values_path, index_col='id', parse_dates=[\"date_recorded\"])\n labels = pd.read_csv(labels_path, index_col='id')\n\n return train.join(labels)",
"def extract_data():\n logging.info(f'Reading data from {impftermine.agg_export_file_name()}...')\n df_wl = pd.read_csv(impftermine.agg_export_file_name())\n vacc_report_file = os.path.join(credentials.vmdl_path, 'vaccination_report_bs_age_group_long.csv')\n logging.info(f'Reading data from {vacc_report_file}...')\n df_impf = pd.read_csv(vacc_report_file)\n return df_wl, df_impf",
"def open_csv_as_df(account_name):\n try:\n base_dir = os.path.dirname(os.path.abspath(__file__))\n file_dir = 'data_collection/match_datasets'\n data_file = os.path.join(base_dir, file_dir, account_name + '.csv')\n data = pd.read_csv(data_file)\n return data\n except FileNotFoundError as e:\n print(e)\n print('Could not find', account_name + '.csv')\n return None",
"def get_data(filename):\r\n return pd.read_csv(filename)",
"def prepare_data(file_path: str):\n movie_industry_df = pd.read_csv(file_path, encoding='latin-1')\n return movie_industry_df",
"def pd_load_acquisition_csv(acquisition_path, **kwargs):\n\n columns = [\n 'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',\n 'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',\n 'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',\n 'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',\n 'relocation_mortgage_indicator', 'year_quarter'\n ]\n dtypes = {\n 'loan_id': np.int64,\n 'orig_channel': CategoricalDtype(['B', 'C', 'R']),\n 'seller_name': str,\n 'orig_interest_rate': np.float64,\n 'orig_upb': np.int64,\n 'orig_loan_term': np.int64,\n 'orig_date': str,\n 'first_pay_date': str,\n 'orig_ltv': np.float64,\n 'orig_cltv': np.float64,\n 'num_borrowers': np.float64,\n 'dti': np.float64,\n 'borrower_credit_score': np.float64,\n 'first_home_buyer': CategoricalDtype(['N', 'U', 'Y']),\n 'loan_purpose': CategoricalDtype(['C', 'P', 'R', 'U']),\n 'property_type': CategoricalDtype(['CO', 'CP', 'MH', 'PU', 'SF']),\n 'num_units': np.int64,\n 'occupancy_status': CategoricalDtype(['I', 'P', 'S']),\n 'property_state': CategoricalDtype(\n ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'HI',\n 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN',\n 'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH',\n 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VI',\n 'VT', 'WA', 'WI', 'WV', 'WY']),\n 'zip': np.int64,\n 'mortgage_insurance_percent': np.float64,\n 'product_type': CategoricalDtype(['FRM']),\n 'coborrow_credit_score': np.float64,\n 'mortgage_insurance_type': np.float64,\n 'relocation_mortgage_indicator': CategoricalDtype(['N', 'Y']),\n 'year_quarter': np.int64\n }\n\n a = pd.read_csv(acquisition_path, names=columns, delimiter='|', dtype=dtypes, parse_dates=[6,7], error_bad_lines=True, warn_bad_lines=True, na_filter=True)\n return a",
"def read_data():\n data = pd.read_csv('input_data/Preply_tutor_views_datasaet.csv')\n return data",
"def import_year_attn_data(filename):\n\tytd_attn_df = pd.read_csv(filename, usecols=['Student ID',\n\t\t'Current School', 'Attendance Pct'], index_col='Student ID')\n\t# keep only active studenst and drop inactive students\n\tactive = ytd_attn_df['Current School'] == \"HYDE PARK HS\"\n\tytd_attn_df = ytd_attn_df[active]\n\t# drop Current School column\n\tytd_attn_df = ytd_attn_df.drop(labels = \"Current School\", axis=1)\n\tytd_attn_df = ytd_attn_df.rename(index=int, columns={\"Attendance Pct\"\n\t\t: \"ytd_attn\"})\n\tytd_attn_df.index.names = ['ID']\t\t\t\t\n\t\n\treturn ytd_attn_df",
"def load_hr_data(self, subject_id:str) -> pd.DataFrame:\n hr_fp = os.path.join(self.hr_dir, subject_id+self.hr_file_suffix)\n df_hr = pd.read_csv(hr_fp,sep=\",\",header=None,names=[\"sec\",\"hr\"])\n df_hr = df_hr.sort_values(by=\"sec\")\n df_hr = df_hr.drop_duplicates(subset=\"sec\")\n df_hr = df_hr.reset_index(drop=True)\n return df_hr",
"def convert_to_csv(self, branch):\n names = [\"CSE_results.csv\", \"IT_results.csv\"]\n self.results = {\"ROLL_NO\": self.roll_nos, \"Name\": self.names, \"SGPA\": self.sgpa}\n print(self.results)\n df = DataFrame.from_dict(self.results)\n df.to_csv(names[branch], index=False)",
"def get_csv(request, cur_course_user, assessment_id):\n assessment = shortcuts.get_object_or_404(models.Assessment, pk=assessment_id)\n\n # Create the HttpResponse object with the appropriate CSV header.\n response = http.HttpResponse(content_type='text/csv')\n\n filename = \"%s-scores.csv\" % assessment.name\n # Replace spaces in the assessment name with dashes and convert to lower case\n filename = filename.replace(' ', '-').lower()\n\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n\n question_parts = assessment.get_prefetched_question_parts().order_by('-question_number')\n num_questions = assessment.get_num_questions()\n\n fieldnames=['Last Name', 'First Name', 'ID', 'Email', 'Total Score']\n if hasattr(assessment, 'homework'):\n fieldnames.append('Finalized?')\n fieldnames.append('Submission time')\n fieldnames.append('Late days')\n\n for i in range(num_questions):\n fieldnames.append('Question %d' % (i + 1))\n\n writer = csv.DictWriter(response, fieldnames=fieldnames)\n\n submissions = assessment.get_prefetched_submissions().order_by('course_user__user__last_name',\n 'course_user__user__first_name')\n\n writer.writeheader()\n\n for submission in submissions:\n for course_user in submission.group_members.all():\n user = course_user.user\n score = submission.points if submission.graded else 'ungraded'\n\n row = {\n 'Last Name': user.last_name,\n 'First Name': user.first_name,\n 'ID': user.student_id,\n 'Email': user.email,\n 'Total Score': score\n }\n\n if hasattr(assessment, 'homework'):\n cur_timezone = pytz.timezone(assessment.course.get_timezone_string())\n local_time = timezone.localtime(submission.time, timezone=cur_timezone)\n row['Submission time'] = local_time.strftime('%m/%d/%Y %I:%M %p')\n\n diff = submission.time - submission.assessment.homework.soft_deadline\n late_days = diff.total_seconds() / 24.0 / 60.0 / 60.0\n late_days = max(0, math.ceil(late_days))\n row['Late days'] = late_days\n\n row['Finalized?'] = 'Yes' if submission.is_finalized() else 'No'\n\n for i in range(num_questions):\n if submission.is_question_graded(i + 1):\n row['Question %d' % (i + 1)] = submission.get_question_points(i + 1)\n else:\n row['Question %d' % (i + 1)] = 'ungraded'\n writer.writerow(row)\n\n return response",
"def import_experiments_table(path):\n return pd.read_csv(path, sep=\"\\t\", skiprows=1, header=0)",
"def load_to_dataframe(self) -> DataFrame:\n return read_csv(self._csv_path, converters={\n # Check if embedding size is the empty string,\n # as it would be for Count models\n \"Embedding size\": lambda v: int(float(v)) if len(v) > 0 else nan\n })",
"def load_utlization(path):\n df = pd.read_csv(f\"{raw_data}\\\\{path}\", parse_dates=[\"AdmissionDate\"])\n\n df.rename(\n columns={\"MemberID\": \"member_id\", \"LOSDays\": \"los\", \"FacilityName\": \"facility\"},\n inplace=True,\n )\n\n df.columns = clean_table_columns(df.columns)\n\n facility_col = [col for col in df.columns if \"facility\" in col][0]\n\n df = cognify_facility_changes(df, facility_col)\n\n df = df[df.member_id != 1003]\n return df",
"def pd_load_acquisition_csv(acquisition_path, **kwargs):\n\n cols = [\n 'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',\n 'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',\n 'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',\n 'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',\n 'relocation_mortgage_indicator'\n ]\n\n dtypes = {\n \"loan_id\": np.int64,\n \"monthly_reporting_period\": str,\n \"servicer\": str,\n \"interest_rate\": np.float64,\n \"current_actual_upb\": np.float64,\n \"loan_age\": np.float64,\n \"remaining_months_to_legal_maturity\": np.float64,\n \"adj_remaining_months_to_maturity\": np.float64,\n \"maturity_date\": str,\n \"msa\": np.float64,\n \"current_loan_delinquency_status\": np.int32,\n \"mod_flag\": CategoricalDtype(['N', 'Y']),\n \"zero_balance_code\": CategoricalDtype(['01', '02', '06', '09', '03', '15', '16']),\n \"zero_balance_effective_date\": str,\n \"last_paid_installment_date\": str,\n \"foreclosed_after\": str,\n \"disposition_date\": str,\n \"foreclosure_costs\": np.float64,\n \"prop_preservation_and_repair_costs\": np.float64,\n \"asset_recovery_costs\": np.float64,\n \"misc_holding_expenses\": np.float64,\n \"holding_taxes\": np.float64,\n \"net_sale_proceeds\": np.float64,\n \"credit_enhancement_proceeds\": np.float64,\n \"repurchase_make_whole_proceeds\": np.float64,\n \"other_foreclosure_proceeds\": np.float64,\n \"non_interest_bearing_upb\": np.float64,\n \"principal_forgiveness_upb\": np.float64,\n \"repurchase_make_whole_proceeds_flag\": CategoricalDtype(['N', 'Y']),\n \"foreclosure_principal_write_off_amount\": np.float64,\n \"servicing_activity_indicator\": CategoricalDtype(['N', 'Y']),\n }\n print(acquisition_path)\n\n #return pd.read_csv(acquisition_path, names=cols, delimiter='|', dtype=dtypes, parse_dates=[6,7])\n return pd.read_csv('acq.csv', names=cols, delimiter='|', dtype=dtypes, parse_dates=[6,7])",
"def reader(self):\n df = pd.read_csv(self.path)\n return df",
"def get_df(csv_file):\n\n if csv_file is None:\n csv_file = \"default_input.csv\"\n print(\"hi there csv: \", csv_file)\n df = pd.read_csv(csv_file) # df should have cols: name, slots, slot_type, cap\n row_nums = len(df[\"name\"])\n students = list(df[\"name\"])\n slots = helpers.get_slots(df)\n\n # add availability col (sum of prefs)\n availability_col = []\n for student in df[\"name\"]:\n student_id = df.loc[df[\"name\"] == student].index[0]\n stud_avail = 0\n for slot in slots:\n stud_avail += df.at[student_id, slot]\n availability_col.append(stud_avail)\n df[\"availability\"] = availability_col\n\n # add hours and happiness col (initialized to all 0's)\n hours_col = [0] * row_nums\n df[\"hours\"] = hours_col\n happiness_col = [0] * row_nums\n df[\"happiness\"] = happiness_col\n\n return df",
"def separate_file(self):\n df = pd.read_csv(\"nfl_drafts.csv\", names = ['Pick', 'Team', 'Player_name', 'POS', \n 'Age', 'Last_played', 'AP1', 'PB', 'ST', 'CarAV', 'DrAV', 'G_perS', 'PaCmp', 'PaAtt', \n 'PaYds', 'PaTD', 'Int', 'Att', 'Yds', 'RuTD', 'Rec', 'ReYds', 'ReTD', 'Solo', 'DeInt', \n 'Sk', 'Coll/Univ', 'Stat'], error_bad_lines = False)\n return df",
"def read_csv():",
"def get_training_data(db_conn):\n return pd.read_sql('''select * from churn_model.churn_data;''', db_conn)"
] | [
"0.62597656",
"0.6190843",
"0.60569084",
"0.60142833",
"0.59021455",
"0.5870558",
"0.58594114",
"0.58429956",
"0.58184737",
"0.5782017",
"0.57763255",
"0.5766798",
"0.57097375",
"0.5647704",
"0.55870324",
"0.5583758",
"0.5578448",
"0.55618584",
"0.55554485",
"0.5553673",
"0.55329543",
"0.5530439",
"0.55287856",
"0.5528711",
"0.55215687",
"0.55058026",
"0.5504042",
"0.54846627",
"0.54775375",
"0.5476868"
] | 0.79547787 | 0 |
Get Log Dataframe (log_.csv) | def get_log_df(ftype):
assert ftype=='train' or ftype=='test'
log_df = pd.read_csv('data/%s/log_%s.csv' % (ftype, ftype))
log_df['time'] = pd.to_datetime(log_df['time'])
log_df['action_date'] = log_df.time.apply(lambda x: x.date())
log_df['action_dow'] = log_df['time'].apply(lambda x: x.weekday())
return log_df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_log(dir_):\n df = pandas.read_csv(os.path.join(dir_, 'log.csv'),\n error_bad_lines=False,\n warn_bad_lines=True)\n if not len(df):\n print(\"empty df at {}\".format(dir_))\n return\n df['model'] = dir_\n return df",
"def hoomdlog(filename):\r\n\r\n data = pd.read_csv(filename, sep = '\\s+')\r\n return data",
"def import_data():\n\tif os.path.exists(\"log.csv\"):\n\t\t#print (\"--training data imported to data frame\\n\")\n\t\tdf = pd.read_csv(\"log.csv\", index_col=0)\n\telse:\n\t\tprint(\"training CSV not found\")\n\t\texit()\n\t\n\treturn df",
"def generate_log_df(log_columns, log_values):\n return pd.DataFrame(dict(zip(log_columns, log_values)), index=[0])",
"def log_to_dataframe(log_file, regex, headers):\n log_messages = []\n linecount = 0\n\n with open(log_file, 'r') as fin:\n logs = fin.readlines()\n logs = [j.strip() for j in logs]\n\n for line in logs:\n try:\n line = line.strip()\n match = regex.search(line.strip())\n message = [match.group(header) for header in headers]\n log_messages.append(message)\n linecount += 1\n except Exception as e:\n print(e)\n pass\n logdf = pd.DataFrame(log_messages, columns=headers)\n logdf.insert(0, 'LineId', None)\n\n logdf['LineId'] = [i + 1 for i in range(linecount)]\n return logdf",
"def create_dataframe_from_log(log_path, log_sheet, log_type):\n print(\"Created DataFrame using {} log with sheet {} located at {}\".format(log_type, log_sheet, log_path))\n df = pd.read_excel(log_path)\n return df",
"def load_log(log_dir, log_file, img_dir):\n f = os.path.join(log_dir, log_file)\n df = pd.read_csv(f, header=None, names=['center','left','right', 'angle', 'throttle', 'break', 'speed'])\n i = os.path.join(log_dir, img_dir)\n fix_logs_paths(i, df)\n return df",
"def get_data(path: str = \"\") -> List[pd.DataFrame]:\r\n X = pd.read_csv(\"log2.csv\")\r\n y = X[[\"Action\"]]\r\n X = X.drop(\"Action\", axis=1)\r\n return [X, y]",
"def exportcsvsumdata(self, log):\r\n csvdata= None\r\n\r\n if (log):\r\n csvdata = ('%s\\t'%(log['CALLSIGN']))\r\n csvdata += ('%s\\t'%(log['OPERATORS']))\r\n csvdata += ('%s\\t'%(log['LOCATION']))\r\n csvdata += ('%d\\t'%(log['COUNT']))\r\n csvdata += ('%s\\t'%(log['NAMES']))\r\n if(log['LASTWORKED']): \r\n csvdata += ('%s/%s UTC'%(log['LASTWORKED'],\r\n log['LWTIME'])) \r\n\r\n return csvdata",
"def create_dataframe():\r\n\r\n df = pd.read_csv('data/data.csv', header=0)\r\n return df",
"def read_csv(filename, take_log):\r\n dataset = {}\r\n df = pd.read_csv(filename, header=None)\r\n dat = df[df.columns[1:]].values\r\n dataset['sample_labels'] = dat[0, :].astype(int)\r\n dataset['cell_labels'] = dat[1, :].astype(int)\r\n dataset['cluster_labels'] = dat[2, :].astype(int)\r\n gene_sym = df[df.columns[0]].tolist()[3:]\r\n gene_exp = dat[3:, :]\r\n\r\n\r\n if take_log:\r\n gene_exp = np.log2(gene_exp + 1)\r\n dataset['gene_exp'] = gene_exp\r\n dataset['gene_sym'] = gene_sym\r\n return dataset",
"def read_rf_csv():\n if os.path.exists(\"rf.csv\"):\n #print (\"--decision trees CSV imported\\n\")\n results = pd.read_csv(\"rf.csv\", index_col=0)\n else:\n print(\"log not found\")\n\n return results",
"def tflog2pandas(path: str) -> pd.DataFrame:\n DEFAULT_SIZE_GUIDANCE = {\n \"compressedHistograms\": 1,\n \"images\": 1,\n \"scalars\": 0, # 0 means load all\n \"histograms\": 1,\n }\n runlog_data = pd.DataFrame({\"metric\": [], \"value\": [], \"step\": []})\n try:\n event_acc = EventAccumulator(path, DEFAULT_SIZE_GUIDANCE)\n event_acc.Reload()\n tags = event_acc.Tags()[\"scalars\"]\n # tags = event_acc.Tags()[\"images\"]\n for tag in tags:\n event_list = event_acc.Scalars(tag)\n values = list(map(lambda x: x.value, event_list))\n step = list(map(lambda x: x.step, event_list))\n r = {\"metric\": [tag] * len(step), \"value\": values, \"step\": step}\n r = pd.DataFrame(r)\n runlog_data = pd.concat([runlog_data, r])\n # Dirty catch of DataLossError\n except Exception:\n print(\"Event file possibly corrupt: {}\".format(path))\n traceback.print_exc()\n return runlog_data",
"def open_csv(filename=\"NOTEEVENTS.csv\", index=['SUBJECT_ID', 'HADM_ID']):\n df = pd.read_csv(DATA_DIR / filename,\n index_col=index,\n # nrows=1000,\n infer_datetime_format=True)\n logger.info(f\"opening {filename}\")\n logger.info(f\"Dataframe columns: {df.columns}\")\n # logger.info(f\"Clinical note types: {df['CATEGORY'].unique()}\")\n return df",
"def generate_datalogger_csv(logdir, datestring, keys, values, ts_keyname):\n if datestring == datetime.date.today().isoformat():\n logging.error(\"todays Logs are actually written and cannot used in datalogger\")\n return\n headers = [ts_keyname, ] + list(keys) + list(values)\n linebuffer = []\n linebuffer.append(\"\\t\".join(headers)) \n filename = os.path.join(logdir, \"haproxylog_%s.gz\" % datestring)\n logging.info(\"parsing file %s\", filename)\n try:\n parser = parser_generator(keys, values, gzip.open(filename, \"rb\"))\n for line in aggregator(keys, values, ts_keyname, parser):\n linebuffer.append(line)\n except IOError as exc:\n logging.exception(exc)\n return StringIO.StringIO(\"\\n\".join(linebuffer))",
"def read_test_rf_csv():\n if os.path.exists(\"test_rf.csv\"):\n #print (\"--testing CSV imported\\n\")\n results = pd.read_csv(\"test_rf.csv\", index_col=0)\n else:\n print(\"log not found\")\n\n return results",
"def get_monitor_df(self):\n self.monitor_df = pd.read_csv(self.monitor_file, delimiter=\"\\t\")",
"def logs(self, train=True):\n def f(record):\n df = recorddf(record)\n epoch = list(df.epoch)[0]\n path = self.logsdir / (\n \"%s_%s.%s.csv\" % (self.task, self.train.id if train else \"val.%s\" % (self.train.id), epoch))\n path = str(path)\n df.to_csv(path, index=False)\n self.log_record(path)\n\n return f",
"def _get_df_from_csv(self, filename):\n df = pd.read_csv(filename)\n df.set_index('Date', drop=True, inplace=True)\n df.index = pd.to_datetime(df.index)\n return df",
"def csv_to_df(self, path=None):\n # reads the csv file and puts it to the dataframe\n df = pd.read_csv(path)\n return df",
"def pandas_handler(store_handler, hit):\n nid = hit.nid\n sname = hit.source_name\n\n path = store_handler.get_path_of(nid) + sname\n df = __obtain_dataframe(path)\n return df",
"def read_history(self):\n if path.isfile(self.HISTORY_FILE_PATH):\n return pd.read_csv(self.HISTORY_FILE_PATH)\n\n df = pd.DataFrame({}, columns=self.HISTORY_COLS)\n df.to_csv(self.HISTORY_FILE_PATH, index=False)\n return df",
"def load_logfile(filename):\n with open(filename) as source:\n header = {}\n for item in itertools.takewhile(lambda x: not x.startswith('---'), source):\n if not item.strip(): # Don't care about whitespace-only lines\n continue\n try:\n key = item.split(':')[0].strip()\n value = item.split(':', maxsplit=1)[1].strip()\n header[key] = value\n except Exception:\n print('Error trying to parse header line \"{}\"'.format(item))\n raise\n dataframe = pandas.read_csv(source, sep='[ \\t]*,[ \\t]*', engine='python')\n unnamed = [col for col in dataframe.keys() if col.startswith('Unnamed: ')]\n if unnamed:\n dataframe.drop(unnamed, axis=1, inplace=True)\n return header, dataframe",
"def response_to_df_csv():\n results = api.call_api()\n df = t.get_dataframe(results)\n t.save_csv(df)\n return df",
"def create_data_frame(logs_feed, drop_elements=15):\n\n # Drop the first n elements from the feed, which are the header\n # Also remove the last element, which is empty\n logs_feed = logs_feed[drop_elements:]\n logs_feed.remove('')\n\n df = pd.DataFrame({'col': logs_feed})\n df = pd.DataFrame(df.col.str.split(',', -1).tolist(),\n columns=['domain', 'domain_ip', 'domain_registrar', 'domain_registrar_ip', 'malware', 'url_feed'])\n return df",
"def export_to_csv(self, log):\n if os.path.isfile(self.GENERATE_FILE):\n os.remove(self.GENERATE_FILE)\n\n with open(self.GENERATE_FILE, \"w\") as f:\n f.write(\"date, time, username, succes, label\\n\")\n\n for entry in log:\n f.write(str(entry[0].date()) + \", \"\n + str(self.hms_to_seconds(entry[0])) + \", \"\n + str(entry[1]) + \", \"\n + str(entry[2]) + \", \"\n + str(entry[3])\n + \"\\n\")",
"def getDataframe(self):\n self._loadCSVFile()\n self._cleanProcessDf()\n return self._df",
"def _get_liwc_df(self) -> pd.DataFrame:\n data = pd.read_csv(self.path)\n data.index = pd.to_numeric(data['Filename'].str.rstrip('.txt'))\n return data",
"def read_traffic_sensor_from_csv(path: str) -> pd.DataFrame:\n\n df = pd.read_csv(path)\n df[\"measuredTime\"] = pd.to_datetime(df[\"measuredTime\"])\n df.set_index(\"measuredTime\", inplace=True)\n return df",
"def df():\n path, _ = os.path.split(os.path.abspath(__file__))\n project_path = os.path.join(path, os.pardir, os.pardir)\n\n values_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_values.csv\")\n labels_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_labels.csv\")\n\n train = pd.read_csv(values_path, index_col='id', parse_dates=[\"date_recorded\"])\n labels = pd.read_csv(labels_path, index_col='id')\n\n return train.join(labels)"
] | [
"0.7406182",
"0.7205137",
"0.7000645",
"0.6835169",
"0.68185526",
"0.67984",
"0.66380084",
"0.65209377",
"0.63542926",
"0.6274999",
"0.6213813",
"0.61862767",
"0.61492324",
"0.6118362",
"0.60789824",
"0.6058416",
"0.60430205",
"0.5994365",
"0.59824306",
"0.59578943",
"0.5953274",
"0.5949398",
"0.59386337",
"0.5934912",
"0.59116405",
"0.5893747",
"0.58910525",
"0.5872713",
"0.58679473",
"0.58580387"
] | 0.77047133 | 0 |
Get Trainning Labels Dataframe (truth_train.csv) | def get_labels_df():
labels_df = pd.read_csv('data/train/truth_train.csv', header=None)
return labels_df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def df():\n path, _ = os.path.split(os.path.abspath(__file__))\n project_path = os.path.join(path, os.pardir, os.pardir)\n\n values_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_values.csv\")\n labels_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_labels.csv\")\n\n train = pd.read_csv(values_path, index_col='id', parse_dates=[\"date_recorded\"])\n labels = pd.read_csv(labels_path, index_col='id')\n\n return train.join(labels)",
"def get_training_data():\n \n X = pd.read_csv('../data/train_values.csv').set_index('sequence_id')\n y = pd.read_csv('../data/train_labels.csv').set_index('sequence_id')\n return X, y",
"def __get_labels(self):\n\n uncertain_pairs_index = self.__query_pairs()\n\n to_label_raw = self.all_raw_data.loc[uncertain_pairs_index]\n to_label_features = self.all_features.loc[uncertain_pairs_index]\n\n # Remove uncertain pairs from the candidate pool\n self.all_features.drop(uncertain_pairs_index, axis=0, inplace=True)\n\n labels_list = []\n for index, row in to_label_raw.iterrows():\n\n print(\"\\n{0:30}\\t{1}\\n{2:30}\\t{3}\\n{4:30}\\t{5}\\n{6:30}\\t{7}\\n\".format(row.name_a, row.name_b,\n row.address_a, row.address_b,\n row.zip_a, row.zip_b,\n row.city_a, row.city_b))\n\n\n label = self.__user_input(\"Is this a match? (0/1)\")\n labels_list.append((index, label))\n\n labels_index = [index for index, label in labels_list]\n labels_values = [label for index, label in labels_list]\n\n # Create dataframe with index and labels\n add_labels = pd.Series(labels_values, index=labels_index, name='label')\n\n # Union the new training set to the full training set\n self.labeled_features = pd.concat([self.labeled_features, to_label_features], axis = 0, ignore_index=False)\n self.labeled_labels = pd.concat([self.labeled_labels, add_labels], axis = 0, ignore_index=False)\n\n return self",
"def load_data(label_name='Species'):\n\n # Create a local copy of the training set.\n train_path = tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1],\n origin=TRAIN_URL)\n # train_path now holds the pathname: (训练集和测试集路径) ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.(解析)\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n print(\"-\")\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)",
"def get_train_data():\n # train set\n train = pd.read_csv(\"train.csv\")\n\n return train",
"def read(train_path, test_path, label_name):\n train_dataset = pd.read_csv(train_path)\n test_dataset = pd.read_csv(test_path)\n\n train_labels = train_dataset.pop(label_name)\n\n imputer = DataFrameImputer().fit(train_dataset)\n train_dataset = imputer.transform(train_dataset)\n test_dataset = imputer.transform(test_dataset)\n\n train_dataset = pd.get_dummies(train_dataset)\n test_dataset = pd.get_dummies(test_dataset)\n\n train_dataset = train_dataset.drop(train_dataset.columns.difference(test_dataset.columns), axis=1)\n test_dataset = test_dataset.drop(test_dataset.columns.difference(train_dataset.columns), axis=1)\n\n scaler = StandardScaler().fit(train_dataset)\n train_dataset = scaler.transform(train_dataset)\n test_dataset = scaler.transform(test_dataset)\n\n return train_dataset, train_labels, test_dataset",
"def load_dataset_train():\n df_train = load_csv_file(\"31_train.csv\")\n df_train_target = load_csv_file(\"31_target_train.csv\")\n\n return df_train.values, df_train_target.values",
"def train_build(df):\n print(\"Constructing training set...\")\n recent_labels = pr.labels.get_last_keypresses() #List of strings\n labeled_df = pr.labels.apply_labels_all(df, recent_labels)\n X, y = pr.build_model.make_training_set(labeled_df)\n\n return X, y",
"def get_labels_docs(self):\n df_train = pd.read_csv(self.train_file, names=['label', 'title', 'doc'])\n df_test = pd.read_csv(self.test_file, names=['label', 'title', 'doc'])\n train_labels = df_train['label'].values\n train_docs = df_train['doc'].values\n test_labels = df_test['label'].values\n test_docs = df_test['doc'].values\n return train_labels, train_docs, test_labels, test_docs",
"def train_dataset():\n return TabularDataset.from_path('tests/data/dummy_tabular/train.csv', sep=',')",
"def get_train_test(df):\n train = df[[\"Sex\", \"Race/Ethnicity\", \"AgeGroup\"]\n ].to_numpy()\n test = df[col_names].to_numpy()\n return (train, test)",
"def truth():\n frame = pd.read_csv(PATH + 'truth.csv', decimal=',')\n return frame",
"def load_labels(label_file) :\n df = pd.read_csv(label_file, index_col=\"p_index\",\n dtype=str, na_values=['nan', 'NaN', '']).dropna()\n\n return df",
"def import_training_data(target_col = 'label'):\n dir = os.path.dirname(os.path.dirname(__file__)) # go up one level to get root of this experiment\n path = os.path.join(dir, 'data','train.csv')\n utils_logr.info('Loading data from {} as pandas df'.format(path))\n df = pd.read_csv(path)\n y = df[target_col]\n df = df.drop(target_col, axis=1)\n return df, y",
"def create_train_test(dataframe_all):\n label_encoder=LabelEncoder()\n split = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=42)\n for train_index, test_index in split.split(dataframe_all['word_values'], dataframe_all['document_label']):\n strat_train_set = dataframe_all.loc[train_index]\n strat_test_set = dataframe_all.loc[test_index]\n\n strat_train_set = strat_train_set.dropna(subset=['word_values'])\n strat_test_set = strat_test_set.dropna(subset=['word_values'])\n pipe=su.pipe()\n x_train, y_train = pipe.fit_transform(strat_train_set), label_encoder.fit_transform(\n strat_train_set['document_label'])\n x_test, y_test = pipe.transform(strat_test_set), label_encoder.fit_transform(\n strat_test_set['document_label'])\n\n return x_train,x_test,y_train,y_test",
"def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]",
"def load_labels(self, subject_id:str) -> pd.DataFrame:\n fp = os.path.join(self.lb_dir, subject_id+self.lb_file_suffix)\n df_lb = pd.read_csv(fp,sep=\" \",header=None,names=[\"sec\",\"sleep_stage\"])\n df_lb[\"sleep_stage\"] = \\\n df_lb[\"sleep_stage\"].apply(lambda ss: self.to_conventional_lables[ss])\n return df_lb",
"def load_data_and_labels(filename, dataset_name,is_train):\n label_count={}\n parameter_file = \"./parameters.json\"\n params = json.loads(open(parameter_file).read())\n if dataset_name == 'ag_news' or dataset_name == 'dbpedia' or dataset_name == 'sogou_news' or dataset_name == 'amazon_review_full' or dataset_name == 'amazon_review_polarity' :\n df = pd.read_csv(filename, names=['label', 'title', 'text'], dtype={'title': object,'text': object})\n selected = ['label', 'title','text','too_short','to_drop']\n\n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['too_short']= df[selected[2]].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN)\n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN)\n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n # Map the actual labels to one hot labels\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df[selected[2]].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df[selected[2]].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n \n elif dataset_name == 'yelp_review_full' or dataset_name == 'yelp_review_polarity':\n df = pd.read_csv(filename, names=['label','text'], dtype={'text': object})\n selected = ['label','text','too_short','to_drop']\n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['too_short']= df[selected[1]].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n # Map the actual labels to one hot labels\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df['text'].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df['text'].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n\n elif dataset_name == 'yahoo_answers':\n df = pd.read_csv(filename, names=['label', 'title', 'content','answer'], dtype={'title': object,'answer': object,'content': object})\n selected = ['label', 'title','content','answer','too_short','to_drop'] \n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['temp'] = df[['content','answer']].apply(lambda x: ' '.join(str(v) for v in x), axis=1)\n df['too_short']= df['temp'].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df['temp'].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df['temp'].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n\n return x_raw, y_raw, df, labels",
"def get_train_labels(self):\n raise NotImplementedError",
"def load_data_and_labels(data_file=train_file):\n \"\"\"\n There are 7 categories - \n 1. DEMO\n 2. DISE\n 3. TRMT\n 4. GOAL\n 5. PREG\n 6. FMLY\n 7. SOCL\n \"\"\"\n d = {}\n d['DEMO'] = [1, 0, 0, 0, 0, 0, 0]\n d['DISE'] = [0, 1, 0, 0, 0, 0, 0]\n d['TRMT'] = [0, 0, 1, 0, 0, 0, 0]\n d['GOAL'] = [0, 0, 0, 1, 0, 0, 0]\n d['PREG'] = [0, 0, 0, 0, 1, 0, 0]\n d['FAML'] = [0, 0, 0, 0, 0, 1, 0]\n d['SOCL'] = [0, 0, 0, 0, 0, 0, 1]\n\n max_len = -1\n\n #Load data from files\n samples = []\n with open(data_file, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for i, row in enumerate(spamreader):\n if (row[0] == \"Category\"):\n continue\n print (i, row[1])\n #samples.append([row[0], row[2]])\n #getting class and title = row[0] and row[1] respectively\n samples.append([row[1], row[2], row[0]])\n #split by words\n\n return samples",
"def load_labels(self, labels):\n self.labels = pd.DataFrame(labels, index=[\"label\"]).T",
"def training_features(training_data: pd.DataFrame):\n return pd.get_dummies(\n training_data.drop(columns=[\"outstanding_balance\", \"status\", \"account_no\"])\n )",
"def dataframe_train():\n df = pd.DataFrame(columns=[\"Id\", \"y_true\", \"y_pred\"])\n counter = 0\n for filename in os.listdir('data/train/'):\n Id = os.path.basename(filename).split('.')[0] # name of file\n res1 = extract_xml(fileId=Id)*1.0\n res2 = contours_model(fileDir='data/train/', fileId=Id)*1.0\n data = [Id, res1, res2]\n df.loc[len(df)] = data\n counter += 1\n '''if counter == 251:\n break'''\n confusion_matrix = metrics.confusion_matrix(df['y_true'], df['y_pred'])\n print(confusion_matrix)\n return df",
"def _predict_label(self, df_train, df_test, label=None):\n #train k-nearest neighbors classifier \n neigh = KNeighborsClassifier(n_neighbors=5)\n X, y = df_train[['longitude', 'latitude']], df_train[label]\n neigh.fit(X, y)\n #predict the label for wildfire incidents\n pred_label = neigh.predict(df_test[['longitude', 'latitude']])\n return pred_label",
"def load_label_data(config):\n label_data = pd.read_csv(config.LabelDataConfig.data_path)\n ids = list(label_data['Training cases final'])\n labels = config.build_labels(label_data)\n\n train_ids, val_ids, train_labels, val_labels = train_test_split(\n ids,\n labels,\n stratify=labels,\n train_size=config.ImageDataConfig.train_percent)\n\n train_label_data = {image_id.upper(): label\n for image_id, label in izip(train_ids, train_labels)}\n val_label_data = {image_id.upper(): label\n for image_id, label in izip(val_ids, val_labels)}\n\n return train_label_data, val_label_data",
"def load_test_data(label_fname, data_fname):\n labels = load_csv(label_fname)\n data = load_csv(data_fname, 'excel-tab')\n\n # Join all data together on the ids given in the files\n joined_data = {}\n for label in labels:\n id = label[0]\n joined_data[id] = {'class': label[1]}\n for rec in data:\n id = rec[0]\n if id in joined_data:\n joined_data[id]['data'] = rec[1]\n\n # Clean and convert the data to reals\n max_features = 0\n for id in joined_data:\n words = clean_text(joined_data[id]['data'])\n reals = convert_to_reals(words)\n joined_data[id]['data'] = reals\n if len(reals) > max_features:\n max_features = len(reals)\n\n # Pad the data\n for id in joined_data:\n reals = joined_data[id]['data']\n joined_data[id]['data'] = reals + (max_features - len(reals)) * [0.0]\n\n # Prepare the data for training\n training_data = np.array([joined_data[id]['data'] for id in joined_data])\n training_labels = [joined_data[id]['class'] == 'OFF' for id in joined_data]\n return training_labels, training_data, max_features",
"def create_res_labels_df(test_generator, test_history):\n \n df_test_results = pd.DataFrame()\n test_len = test_history.shape[0]\n df_test_results['y_true'] = test_generator.labels[:test_len]\n df_test_results['y_pred'] = tf.math.argmax(test_history, axis=1).numpy().ravel()\n df_test_results['image_path'] = test_generator.filepaths[:test_len]\n \n return df_test_results",
"def prepare_data(train_csv, test_csv):\n\n train = pd.read_csv(train_csv)\n test = pd.read_csv(test_csv)\n train = train[test.shape[0]:]\n valid = train[0:test.shape[0]]\n\n x_train = train.drop(columns=\"label\") / 255\n y_train = train.label\n x_valid = valid.drop(columns=\"label\") / 255\n y_valid = valid.label\n x_test = test.drop(columns=\"label\") / 255\n y_test = test.label\n\n y_train = tf.keras.utils.to_categorical(y_train)\n y_valid = tf.keras.utils.to_categorical(y_valid)\n y_test = tf.keras.utils.to_categorical(y_test)\n x_train = x_train.values\n x_valid = x_valid.values\n x_test = x_test.values\n\n return x_train, y_train, x_valid, y_valid, x_test, y_test",
"def create_train_test_df(target_path):\n df_train = pd.read_csv(os.path.join(target_path, 'xray', 'train.txt'), delimiter=' ',\n header = 0 )\n df_test = pd.read_csv(os.path.join(target_path, 'xray', 'test.txt'), delimiter=' ', header = 0)\n df_train.columns=['patient_id', 'filename', 'class', 'data_source']\n df_test.columns=['patient_id', 'filename', 'class', 'data_source']\n\n return df_train, df_test",
"def train_data():\n raw = datasets.load_iris()\n iris = pd.DataFrame(raw.data, columns=raw.feature_names)\n iris = iris.join(pd.DataFrame(raw.target))\n iris.columns = [\"SepalLength\", \"SepalWidth\", \"PetalLength\", \"PetalWidth\", \"Species\"]\n iris[\"Species\"] = iris[\"Species\"].astype(\"category\")\n iris.Species.cat.categories = raw.target_names\n return iris.iloc[:, 0:4], iris[\"Species\"]"
] | [
"0.6975254",
"0.67474806",
"0.66706246",
"0.655183",
"0.64902073",
"0.64812607",
"0.64470786",
"0.64456475",
"0.64414805",
"0.6425497",
"0.6413294",
"0.6360745",
"0.63333935",
"0.6317695",
"0.6304125",
"0.622412",
"0.62197214",
"0.6218117",
"0.621556",
"0.6214037",
"0.62124664",
"0.6208008",
"0.61999595",
"0.61843836",
"0.61745507",
"0.61714315",
"0.6154802",
"0.6150933",
"0.611389",
"0.61105484"
] | 0.875425 | 0 |
Get Object Dataframe (object.csv) | def get_obj_df():
obj_df = pd.read_csv('data/object.csv')
obj_df = obj_df.drop_duplicates()[['course_id', 'module_id', 'category', 'start']]
obj_df['start'] = pd.to_datetime(obj_df[obj_df['start'] != 'null']['start'])
return obj_df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_dataframe():\r\n\r\n df = pd.read_csv('data/data.csv', header=0)\r\n return df",
"def get_obj_df(self) -> pd.DataFrame:\n df = pd.DataFrame(self.obj, columns=[\"x\", \"y\", \"m\", \"dx\", \"dy\"])\n df['iter'] = self.current_iteration\n return df",
"def cif_df(cif_object) -> DataFrame:\n if cif_object is None:\n return DataFrame()\n row_list = cif_object.row_list\n attr_list = cif_object.attribute_list\n return DataFrame(data=row_list, columns=attr_list)",
"def obj_df(df):\n mask = np.array(df.dtypes == 'object')\n df_obj = df.iloc[:, mask]\n return df_obj",
"def as_dataframe(self, force=False):\n _, content = self._data_export_helper('csv', force)\n return self._as_dataframe(content)",
"def test_from_object_df(self):\n df_test = make_simple_dataframe()\n df_read = BaseDataClass.from_object(df_test).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )",
"def pandas_convert(self):\n data = {}\n\n for names in self.data[0]:\n col_values = []\n\n if names in objects:\n for items in self.data[0][names]:\n col_values = []\n\n col_name = names + \"_\" + items\n\n for i in range(len(self.data)):\n col_values.append(self.data[i][names][items])\n\n data[col_name] = col_values\n else:\n for i in range(len(self.data)):\n col_values.append(self.data[i][names])\n \n data[names] = col_values\n\n self.pandas_df = pd.DataFrame(data=data)\n self.__clean_df()\n\n return self.pandas_df",
"def get_pybossa_df(obj):\n progress = tqdm.tqdm(desc='Downloading', unit=obj)\n r = get_objects(obj)\n last_fetched = r.json()\n data = last_fetched\n progress.update(len(last_fetched))\n respect_rate_limits(r, progress)\n while _not_exhausted(last_fetched):\n r = get_objects(obj, len(data))\n last_fetched = r.json()\n data += last_fetched\n progress.update(len(last_fetched))\n respect_rate_limits(r, progress)\n progress.close()\n df = pandas.DataFrame(data)\n df.set_index('id', inplace=True, verify_integrity=True)\n return df",
"def test_dataframe(self):\n\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_df(url)\n self.assertIsInstance(readerobject,pd.DataFrame)",
"def load_object_tag(self, path_file):\n if '.csv' not in path_file:\n raise FileNotFoundError('Only CSV format is supported currently')\n\n t0 = time()\n df = pd.read_csv(path_file, sep=',', header=None)\n\n if df.shape[1] != 2:\n raise RuntimeError('Object tag data should only consist of object ID and its tags (separated by ;)')\n\n df.columns = ['object_id', 'object_tags']\n df['object_id'] = df['object_id'].map(lambda x: '{}_{}'.format(self.id_prefix, x))\n df['object_tags'] = df['object_tags'].map(lambda tags: [t.strip() for t in tags.split(';')])\n\n logging.info('Loading object tag data with {} rows from {} takes {} secs'.format(df.shape[0],\n path_file, time() - t0))\n return df",
"def test_read_object(self):\n\n # the expected dataframe\n result_expected = pd.read_csv(\n StringIO(self.test_csv_content), usecols=[\"col1\"])\n # mock upload csv to s3\n self.bucket.put_object(\n Body=self.test_csv_content, Key=self.test_csv_key)\n result = self.src_bucket_connector.read_object(\n self.test_csv_key, columns=[\"col1\"])\n self.assertTrue(result.equals(result_expected))",
"def loader():\n bucket = data_load_variables[\"bucket\"]\n\n if data_load_variables[\"use_lite_dataset\"]:\n dataset_name = data_load_variables[\"lite_dataset_name\"]\n else:\n dataset_name = data_load_variables[\"dataset_name\"]\n\n s3 = boto3.client('s3')\n\n obj = s3.get_object(Bucket=bucket, Key=dataset_name)\n # get object and file (key) from bucket\n\n df = pd.read_csv(obj['Body'])\n return df",
"def from_pandas(self, obj, index=True):\n return Reader(_from_pandas(obj, index=index))",
"def load_user_object(self, path_file):\n if '.csv' not in path_file:\n raise FileNotFoundError('Only CSV format is supported currently')\n\n t0 = time()\n df = pd.read_csv(path_file, sep=',', header=None)\n\n if df.shape[1] != 2:\n raise RuntimeError('User object data should only consist of user ID and object ID')\n\n df.columns = ['user_id', 'object_id']\n df['user_id'] = df['user_id'].map(lambda x: '{}_{}'.format(self.id_prefix, x))\n df['object_id'] = df['object_id'].map(lambda x: '{}_{}'.format(self.id_prefix, x))\n\n logging.info('Loading user object data with {} rows from {} takes {} secs'.format(df.shape[0],\n path_file, time() - t0))\n return df",
"def test_from_object_class(self):\n df_test = make_simple_dataframe()\n Base_object = BaseDataClass.from_object(df_test)\n df_read = BaseDataClass.from_object(Base_object).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )",
"def show(obj):\n if isinstance(obj, pd.Series):\n df = pd.DataFrame(obj)\n return df\n elif hasattr(obj, '__dict__'):\n return pd.DataFrame(pd.Series(obj.__dict__),\n columns=['value'])\n else:\n return obj",
"def dataframe(self):\n return self.generator.dataframe",
"def dataframe(self):\n return self.get_target().dataframe()",
"def load() -> DataFrame:\n return load_file(__file__, \"default.csv.gz\")",
"def getDataframe(self):\n self._loadCSVFile()\n self._cleanProcessDf()\n return self._df",
"def get_dataframe(project, bucket, blob):\n try:\n logging.info(f'Creating the pandas dataframe for the blob {blob}')\n\n fs = gcsfs.GCSFileSystem(project=project)\n file = bucket + '/' + blob\n\n with fs.open(file) as f:\n df = pd.read_csv(f)\n\n except:\n logging.fatal(f'Error when try to create the dataframe')\n raise\n\n return df",
"def reader(self):\n df = pd.read_csv(self.path)\n return df",
"def get_decopath_df() -> pd.DataFrame:\n return pd.read_csv(DECOPATH_PATH, sep='\\t')",
"def create_data_frame(input_filepath):\n df = pd.read_json(input_filepath)\n logger = logging.getLogger(__name__)\n logger.info('Imported dataframe:')\n logger.info(df.info())\n logger.info(df.describe())\n logger.info(df.head())\n return df",
"def read_csv():",
"def makeDF(csv_path):\n DF = pd.read_csv(csv_path)\n\n DF['height'] = DF.apply(lambda DF: abs(DF['ymax'] - DF['ymin']), axis=1)\n DF['width'] = DF.apply(lambda DF: abs(DF['xmax'] - DF['xmin']), axis=1)\n DF['objArea'] = DF.apply(lambda DF: (DF['width'] * DF['height']), axis=1)\n imageArea = 2704 * 1524\n DF['objPortion'] = DF.apply(lambda DF: (DF['objArea'] / imageArea), axis=1)\n\n # DF.to_csv('/NewDF.csv')\n DF.to_json('json_annot_all.json')\n\n # Looking at the first 5 rows to get the insigt on the data.\n print(DF.head(5))\n print(DF.label.unique())\n return DF",
"def dataframe():\n headers = get_headers()\n headers = {'headers': headers}\n headers = pd.DataFrame.from_dict(headers, orient='index')\n headers = headers.replace(r'\\n', ' ', regex=True)\n headers = headers.replace(r'\\r', ' ', regex=True)\n headers = headers.replace(r'\\t', ' ', regex=True)\n headers = headers.replace(r'\\\\t', ' ', regex=True)\n headers = headers.replace(r' ', ' ', regex=True)\n headers = headers.replace(r' ', ' ', regex=True)\n\n paragraphs = get_paragraphs()\n paragraphs = {'paragraphs': paragraphs}\n paragraphs = pd.DataFrame.from_dict(paragraphs, orient='index')\n paragraphs = paragraphs.replace(r'\\n', ' ', regex=True)\n paragraphs = paragraphs.replace(r'\\r', ' ', regex=True)\n paragraphs = paragraphs.replace(r'\\t', ' ', regex=True)\n paragraphs = paragraphs.replace(r'\\\\t', ' ', regex=True)\n paragraphs = paragraphs.replace(r' ', ' ', regex=True)\n paragraphs = paragraphs.replace(r' ', ' ', regex=True)\n\n return headers.to_csv('headers.csv', index=False), paragraphs.to_csv('paragraphs.csv', index=False)",
"def to_dataframe(self):\n return df_util.to_dataframe(requests.get(self.__url).json())",
"def object_export(request, simulation, object_name):\n query = get_query(object_name, simulation)\n # To avoid conflict if two users export a file at the same time, we\n # generate a random name for the export file.\n seed = np.random.randint(10000)\n filename = '{0}/website_files/exports/{1}.tsv'.format(settings.BASE_DIR,\n seed)\n with codecs.open(filename, 'w', encoding='utf8') as f:\n if object_name == 'centroid':\n fields = ['id', 'name', 'x', 'y', 'db_id']\n elif object_name == 'crossing':\n fields = ['id', 'name', 'x', 'y', 'db_id']\n elif object_name == 'link':\n fields = ['id', 'name', 'origin', 'destination', 'lanes', 'length',\n 'speed', 'capacity', 'vdf']\n elif object_name == 'function':\n fields = ['id', 'expression']\n writer = csv.writer(f, delimiter='\\t')\n if object_name in ('centroid', 'crossing'):\n writer.writerow(['id', 'name', 'x', 'y', 'db_id'])\n values = query.values_list('user_id', 'name', 'x', 'y', 'id')\n elif object_name == 'function':\n writer.writerow(['id', 'name', 'expression'])\n values = query.values_list('user_id', 'name', 'expression')\n elif object_name == 'link':\n writer.writerow(['id', 'name', 'lanes', 'length', 'speed',\n 'capacity', 'function', 'origin', 'destination'])\n values = query.values_list('user_id', 'name', 'lanes', 'length',\n 'speed', 'capacity', 'vdf__user_id')\n # Origin and destination id must be converted to user_id.\n centroids = get_query('centroid', simulation)\n crossings = get_query('crossing', simulation)\n ids = list(centroids.values_list('id', 'user_id'))\n ids += list(crossings.values_list('id', 'user_id'))\n # Map id of nodes to their user_id.\n id_mapping = dict(ids)\n origins = query.values_list('origin', flat=True)\n origins = np.array([id_mapping[n] for n in origins])\n destinations = query.values_list('destination', flat=True)\n destinations = np.array([id_mapping[n] for n in destinations])\n # Add origin and destination user ids to the values array.\n origins = np.transpose([origins])\n destinations = np.transpose([destinations])\n values = np.hstack([values, origins, destinations])\n writer.writerows(values)\n with codecs.open(filename, 'r', encoding='utf8') as f:\n # Build a response to send a file.\n response = HttpResponse(f.read())\n response['content_type'] = 'text/tab-separated-values'\n response['Content-Disposition'] = \\\n 'attachement; filename={}.tsv'.format(metro_to_user(object_name))\n # We delete the export file to save disk space.\n os.remove(filename)\n return response",
"def test_from_file_csv(self):\n with TemporaryDirectory() as tmp:\n fp, df_test = save_simple_dataframe(tmp, 'test.csv')\n df_read = BaseDataClass.from_file(fp).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )"
] | [
"0.6555708",
"0.64185214",
"0.6270231",
"0.62557626",
"0.62464267",
"0.61960536",
"0.6175627",
"0.61419046",
"0.6131568",
"0.6105312",
"0.609715",
"0.6067034",
"0.6055361",
"0.5941004",
"0.5922465",
"0.58321035",
"0.5826193",
"0.58045596",
"0.5778079",
"0.57685983",
"0.57650083",
"0.5754343",
"0.5746826",
"0.57395947",
"0.5732833",
"0.5718886",
"0.56832826",
"0.5671344",
"0.56654924",
"0.56572324"
] | 0.6709269 | 0 |
Replaces the given province's tradegood with the new one defined in the tradegoods.bmp map. | def replace_tradegood(prov_num, new_tradegood):
directory = os.getcwd()+"\\shatterednippon\\history\\provinces\\"
for file in os.listdir(directory):
if file.startswith(str(prov_num)):
old_tradegood = find_tradegood(directory+file)
if old_tradegood is None:
print("Province: %s has no \"trade_goods\" variable" % file)
return
elif new_tradegood == old_tradegood:
return
for line in fileinput.input(directory+file, inplace=True):
line = line.rstrip().replace(old_tradegood, new_tradegood)
print(line)
print("Province %d: changed tradegood from %s to %s" % (prov_num, old_tradegood, new_tradegood))
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def edit_city(self, code, key, val):\r\n if key == \"code\":\r\n self.vertices[val] = self.vertices.pop(code)\r\n setattr(self.vertices[val], key, val)\r\n else:\r\n setattr(self.vertices[code], key, val)",
"def update_province_info(self, data):\n\n prov = {data[i][0]: data[i][1] for i in range(len(data))}\n\n data = transpose(data)\n names = data[0]\n ind = self.get_date_index(names)\n if ind == -1:\n return prov\n names = names[ind:]\n data = data[1][ind:]\n\n for i in range(len(names)):\n if names[i][0] == \"1\":\n date = convert_to_date(names[i])\n if date > START_DATE:\n break\n for key in data[i]:\n if key == \"add_core\" and \"add_core\" in prov:\n prov[key] = prov[key] + \"|\" + data[i][key]\n else:\n prov[key] = data[i][key]\n else:\n prov[names[i]] = data[i]\n return prov",
"def power_polygon_old(osm_path): \n df = retrieve(osm_path,'multipolygons',['other_tags']) \n\n for row in df.itertuples():\n if df.loc[row.Index, \"other_tags\"] == None:\n df = df.drop(row.Index)\n elif not 'power' in df.loc[row.Index, \"other_tags\"]:\n df = df.drop(row.Index)\n \n df = df.reset_index(drop=True).rename(columns={'other_tags': 'asset'}) \n \n for row in range(len(df.index)):\n if '\"power\"=>\"substation\"' in df[\"asset\"][row]:\n df[\"asset\"][row] = 'substation' \n elif '\"power\"=>\"plant\"' in df[\"asset\"][row]:\n df[\"asset\"][row] = 'plant'\n else:\n df = df.drop(index=row)\n \n return df.reset_index(drop=True)",
"def new_occupancies(map_new, map_old, occupancy_threshold):\n # Find the index of the old map origin in the new map\n origin_new = np.array((map_new.info.origin.position.x, map_new.info.origin.position.y))\n origin_old = np.array((map_old.info.origin.position.x, map_old.info.origin.position.y))\n origin_offset = origin_old - origin_new\n origin_indices = np.rint(origin_offset / map_new.info.resolution).astype(int)\n\n if np.any(origin_indices != 0) or \\\n map_new.info.height != map_old.info.height or \\\n map_new.info.width != map_old.info.width:\n # Pad the old map\n x_before = origin_indices[0]\n x_after = map_new.info.width - map_old.info.width - x_before\n y_before = origin_indices[1]\n y_after = map_new.info.height - map_old.info.height - y_before\n paddings = ((np.maximum(0, y_before),\n np.maximum(0, y_after)),\n (np.maximum(0, x_before),\n np.maximum(0, x_after)))\n map_old.data = np.pad(map_old.data, paddings, 'constant', constant_values=-1)\n\n # Clip the old map\n x_clip_before = np.maximum(0, -x_before)\n x_clip_after = map_new.info.width + x_clip_before\n y_clip_before = np.maximum(0, -y_before)\n y_clip_after = map_new.info.height + y_clip_before\n map_old.data = map_old.data[y_clip_before:y_clip_after, x_clip_before:x_clip_after]\n\n # Find points that have changed to occupied\n points = np.argwhere(np.logical_and(\n map_new.data >= occupancy_threshold, \n map_old.data < occupancy_threshold))\n points = np.fliplr(points)\n points = points * map_new.info.resolution\n points[:,0] += map_new.info.origin.position.x\n points[:,1] += map_new.info.origin.position.y\n\n return points",
"def province():\r\n return _random.choice(\r\n [\r\n [\"Ontario\", \"ON\"],\r\n [\"Quebec\", \"QC\"],\r\n [\"Nova Scotia\", \"NS\"],\r\n [\"New Brunswick\", \"NB\"],\r\n [\"Manitoba\", \"MB\"],\r\n [\"British Columbia\", \"BC\"],\r\n [\"Prince Edward Island\", \"PE\"],\r\n [\"Saskatchewan\", \"SK\"],\r\n [\"Alberta\", \"AB\"],\r\n [\"Newfoundland and Labrador\", \"NL\"]\r\n ]\r\n )",
"def dealWithHouses(house):\r\n house = house.split(\"लिंग\")[0]\r\n ophouse = transliterate(house,sanscript.DEVANAGARI,sanscript.ITRANS) \r\n for i in mapping:\r\n ophouse = ophouse.replace(i[0],i[1])\r\n ophouse = saneHouse(ophouse.strip())\r\n return ophouse",
"def filterToSat( bmp, savefile = '' ):\n for h in range(bmp.height):\n for w in range(bmp.width):\n HSL = RGBtoHSL( bmp.pixels[h][w] )\n sat = int(255*HSL[1]) # convert to 0-255 range\n bmp.pixels[h][w] = (sat,sat,sat)\n if( savefile != '' ):\n bmp.save(savefile)\n return bmp",
"def cleanBadPix(redux_science, bad_pixel_map, method = 'median', replacement_box = 5, replace_constant = -99):\n #add negative pixels to the bad pixel map\n bad_pixel_map = np.logical_or(bad_pixel_map, redux_science <= 0)\n # im = np.copy(redux_science)\n # im[np.where(bad_pixel_map)[1]] = 0.\n if method == 'median':\n med_fil = median_filter(redux_science, size = replacement_box)\n\n cleaned = redux_science*~bad_pixel_map + med_fil*bad_pixel_map\n\n #elif method == 'interpolate':\n\n # print('so clean')\n\n return cleaned",
"def replace_punnet_with_collapsed_cases(punnet,map_caseX_to_punnet,caseX_collapsed):\n\n for case_coord in map_caseX_to_punnet.keys():\n\n punnet_coords = map_caseX_to_punnet[case_coord]\n\n for punnet_coord in punnet_coords:\n\n punnet[punnet_coord] = caseX_collapsed[case_coord]\n\n return punnet",
"def replaceOp(image, op, box):\r\n\r\n small = op(image.crop(box))\r\n replace(image, small, box)",
"def cloud_map(sky):\n cloud_map = {\n 'NSC': 0,\n 'NCD': 0,\n 'CLR': 0,\n 'FEW': 2,\n 'SCT': 6,\n 'BKN': 8,\n 'OVC': 10\n }\n return list(map(lambda s: (cloud_map[s[0]], s[1].value() if s[1] else 0), sky))",
"def add_LonghurstProvince2table(df, LatVar='Latitude', LonVar='Longitude'):\n # Get xml data for provinces\n provinces, tree = ParseLonghurstProvinceFile()\n # Get the observational data\n if isinstance(df, type(None)):\n df = get_processed_df_obs_mod() # NOTE this df contains values >400nM\n Rnum2prov = RosieLonghurstProvinceFileNum2Province(\n None, invert=True, rtn_dict=True)\n # - Work with the provinces\n # Add a single variable for the coordinate\n CoordVar = 'Coord'\n\n def f(x):\n return (x[LonVar], x[LatVar])\n df[CoordVar] = df.apply(f, axis=1)\n # map the calculation of provinces\n\n def GetProv(x):\n return Get_LonghurstProvince4coord(x[CoordVar], provinces=provinces,\n num2prov=Rnum2prov, tree=tree, verbose=False)\n df['MIT Province'] = df.apply(GetProv, axis=1)\n # Provence name\n df['PName (R)'] = df['Province'].map(\n RosieLonghurstProvinceFileNum2Province)\n df['PName (MIT)'] = df['MIT Province'].map(\n RosieLonghurstProvinceFileNum2Province)\n\n # - Check the assignment\n # How many are just the same?\n bool = df['MIT Province'] == df['Province']\n PrtStr = '#={} ({:.2f}%) are the calculated to be the same thing '\n Ns = float(df.loc[bool, :].shape[0])\n N = float(df.shape[0])\n print(PrtStr.format(N, Ns / N * 100))\n # Which of these are just missing assignments in the input files?\n Nnan = float(df['Province'].dropna().shape[0])\n PrtStr = 'The % non matching, observations without provinces #={} ({:.2f}%)'\n print(PrtStr.format(N-Nnan, (N-Nnan)/N*100))\n # The locations where both assignments have been made?\n dfT = df.loc[np.isfinite(df['Province']), :]\n # For certain points the new approach failed.\n tmp = dfT.loc[~np.isfinite(dfT['MIT Province']), :]\n print('The following provinces were not assigned (# of times) by MIT method:')\n PrtStr = 'This is a {} observations ({:.2f}%)'\n print(PrtStr.format(tmp.shape[0], tmp.shape[0]/N * 100))\n print(tmp['PName (R)'].value_counts())\n # What are the locations of these points?\n PrtStr = 'Full name of {} is {}'\n for prov in tmp.value_counts().index:\n print(PrtStr.format(prov, Get_LonghurstProvinceName4Num(prov)))\n # What data sets contribute to this\n PrtStr = 'Datasets contributing to these numbers: {}'\n print(PrtStr.format(', '.join(set(tmp['Data_Key']))))\n # For others, the assigned provinces differed\n bool = dfT['MIT Province'] != dfT['Province']\n vars2use = [u'Data_Key', 'MIT Province',\n 'Province', 'PName (MIT)', 'PName (R)']\n tmp = dfT.loc[bool, :][vars2use].dropna()\n # Print the differences to screen\n print(\"When assignment differs - The MIT method gives:\")\n PrtStr = \"MIT:'{}' ({}), but R gives '{}' ({})\"\n for prov in list(set(tmp['PName (R)'])):\n tmp_ = tmp.loc[tmp['PName (R)'] == prov, :]\n for idx in tmp_.index:\n MITp_ = tmp_.loc[tmp_.index == idx, :]['PName (MIT)'].values[0]\n print(PrtStr.format(MITp_, Get_LonghurstProvinceName4Num(MITp_),\n prov, Get_LonghurstProvinceName4Num(prov)))\n # What data sets contribute to this\n PrtStr = 'Datasets contributing to these numbers: {}'\n print(PrtStr.format(', '.join(set(tmp['Data_Key']))))",
"def update_ground_crew_belgrade():\r\n bg_airport = Airport_place.objects.get(town=\"Belgrade\")\r\n\r\n if bg_airport and Airplains.objects.filter(Q(status=1) | Q(status=4)).exists():\r\n Groundcrew.objects.get_or_create(airport=bg_airport, runway_clear=False)\r\n else:\r\n Groundcrew.objects.get_or_create(airport=bg_airport, runway_clear=True)",
"def add_to_set(\n map_crop: PIL.Image,\n gt_map_crop: PIL.Image,\n skip_black: bool = True,\n skip_water: bool = True,\n skip_no_class: bool = True,\n) -> bool:\n\n if skip_black and contains_black(map_crop):\n return False\n\n if skip_water and only_water_bodies(gt_map_crop):\n return False\n\n if skip_no_class and no_classes(gt_map_crop):\n return False\n\n return True",
"def group_rare_level_test(test, group_dict):\n for key, value in group_dict.items():\n test[\"BIN_\" + key] = test[key].copy()\n test[\"BIN_\" + key].replace(value, \"_OTHER_\", inplace=True)",
"def pickUp(self):\n pos = self.getRoverLocation()\n item = self.map[pos.y,pos.x]\n if type(item) == Part:\n self.inventory.addPart(str(item))\n self.map[pos.y,pos.x] = None",
"def buy_map():\r\n \r\n global world_map\r\n\r\n if world_map:\r\n raw_input('You already have one!')\r\n elif player.get_money() >= 10:\r\n raw_input('You bought a map for $10! Woohoo!\\n')\r\n player.lose_money(10)\r\n world_map = True\r\n else:\r\n raw_input('\"You seem to be a bit low in the money department...\"\\n')",
"def power_point_old(osm_path): \n df = retrieve(osm_path,'points',['other_tags']) \n\n for row in df.itertuples():\n if df.loc[row.Index, \"other_tags\"] == None:\n df = df.drop(row.Index)\n elif not 'power' in df.loc[row.Index, \"other_tags\"]:\n df = df.drop(row.Index)\n \n df = df.reset_index(drop=True).rename(columns={'other_tags': 'asset'}) \n \n for row in range(len(df.index)):\n if '\"power\"=>\"tower\"' in df[\"asset\"][row]:\n df[\"asset\"][row] = 'power_tower' \n elif '\"power\"=>\"pole\"' in df[\"asset\"][row]:\n df[\"asset\"][row] = 'power_pole'\n else:\n df = df.drop(index=row)\n \n return df.reset_index(drop=True)",
"def test_degrade_map_int_prod(self):\n nside_coverage = 32\n nside_map = 1024\n nside_new = 512\n full_map = np.full(hpg.nside_to_npixel(nside_map), 2, dtype=np.int64)\n # Generate sparse map\n\n sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage,\n nside_sparse=nside_map, sentinel=0)\n\n # Degrade original HEALPix map\n\n deg_map = np.full(hpg.nside_to_npixel(nside_new), 2**4, dtype=np.int64)\n # Degrade sparse map and compare to original\n\n new_map = sparse_map.degrade(nside_out=nside_new, reduction='prod')\n # Test the coverage map generation and lookup\n\n testing.assert_almost_equal(deg_map, new_map.generate_healpix_map())",
"def from_enmap(emap):\n\n new_map = so_map()\n hdulist = emap.wcs.to_fits()\n header = hdulist[0].header\n new_map.pixel = header[\"CTYPE1\"][-3:]\n try:\n new_map.ncomp = header[\"NAXIS3\"]\n except:\n new_map.ncomp = 1\n new_map.data = emap.copy()\n new_map.nside = None\n new_map.geometry = new_map.data.geometry[1:]\n new_map.coordinate = header[\"RADESYS\"]\n if new_map.coordinate == \"ICRS\":\n new_map.coordinate = \"equ\"\n\n return new_map",
"def update_map(self, boundaries):\n image = Image.open(self.image_file)\n update_pixels = ImageDraw.Draw(image)\n for i in range(len(boundaries) - 1):\n update_pixels.point(boundaries[i], fill=self.path_color)\n if self.season ==\"winter\":\n image.save(\"temp_winter.png\")\n elif self.season ==\"spring\":\n image.save(\"temp_spring.png\")\n else:\n image.save(\"temp_fall.png\")",
"def replace_instruction(bet_id, new_price):\n args = locals()\n return {\n to_camel_case(k): v for k, v in args.items() if v is not None\n }",
"def region_of_province(province_in: str) -> str:\n region = None\n for r in ITALY_MAP:\n for p in ITALY_MAP[r]:\n if province_in == p:\n region = r\n return region",
"def change_oakhaven_pswap_trades(status):\n print(\"Processing pswaps for OAKHAVEN\")\n portfolio = acm.FPhysicalPortfolio[\"PB_PSWAP_OAKHAVEN_CR\"]\n trades = [t for t in portfolio.Trades() if \"OLD\" in t.Instrument().Name()]\n set_status(trades, status)",
"def przeksztMapy(mapObj):\n\n # Kopia mapy - by nie zmieniac oryginalnej\n mapObjCopy = copy.deepcopy(mapObj)\n\n # Usuniecie z mapy obiektow ktore nie są murem (zmiana na obiekt typu podloga)\n for x in range(len(mapObjCopy)):\n for y in range(len(mapObjCopy[0])):\n if mapObjCopy[x][y] in ('$', '.', '@', '+', '*'):\n mapObjCopy[x][y] = ' '\n\n return mapObjCopy",
"def bkg_subtract(self, analyte, bkg, ind=None):\n\n if 'bkgsub' not in self.data.keys():\n self.data['bkgsub'] = {}\n\n self.data['bkgsub'][analyte] = self.focus[analyte] - bkg\n\n if ind is not None:\n self.data['bkgsub'][analyte][ind] = np.nan\n\n return",
"def add_Longhurst_Province_raster_to_array(ds):\n import geopandas\n from rasterio import features\n from affine import Affine\n # Get the shape files\n provinces = geopandas.read_file('/work/home/ts551/data/longhurst_v4_2010')\n shapes = [(shape, n) for n, shape in enumerate(provinces.geometry)]\n # Now add the existing array\n ds_tmp = ds[list(ds.data_vars)[0]].copy().mean(dim='time')\n # Add raster the provinces onto this\n ds_tmp['LonghurstProvince'] = rasterize(shapes, ds_tmp.coords)\n # Then update the variable\n ds['LonghurstProvince'] = ds_tmp['LonghurstProvince']\n # Add Some attributes\n attrs = {\n 'Long name': 'Longhurst Provinces',\n 'data downloaded from': 'http://www.marineregions.org/downloads.php#longhurst',\n 'version': 'Version 4 - March 2010',\n 'Citations': \"Longhurst, A.R et al. (1995). An estimate of global primary production in the ocean from satellite radiometer data. J. Plankton Res. 17, 1245-1271 ; Longhurst, A.R. (1995). Seasonal cycles of pelagic production and consumption. Prog. Oceanogr. 36, 77-167 ; Longhurst, A.R. (1998). Ecological Geography of the Sea. Academic Press, San Diego. 397p. (IMIS) ; Longhurst, A.R. (2006). Ecological Geography of the Sea. 2nd Edition. Academic Press, San Diego, 560p.\",\n }\n ds['LonghurstProvince'].attrs = attrs\n return ds",
"def test_invert(self):\n map_ = mapreader.get_data(self.map_file, inverted=False)\n self.assertFalse(map_._inverted)\n map_.invert()\n self.assertTrue(map_._inverted)\n map_ = mapreader.get_data(self.map_file, inverted=True)\n self.assertTrue(map_._inverted)\n # check the inversion is complete and that we add a new label\n with open('rm.map', 'w') as f:\n map_.write(f)\n map__ = mapreader.get_data('rm.map')\n self.assertEqual(map__._nlabl, 2)\n os.remove('rm.map')",
"def deduct_from_rack(self, old_word, new_word):\r\n chrs_used = new_word.replace(old_word, \"\")\r\n print(\"Rack previous size = {}\".format(len(self.rack)))\r\n for chr_val in chrs_used:\r\n used_tile = Tile(chr_val, self.game_bag.letter_freq_and_val[chr_val][1])\r\n self.rack.remove(used_tile)\r\n\r\n if len(new_word) - len(old_word) >= 7: #used all 7 tiles\r\n self.cur_score += 50\r\n \r\n print(\"Rack new size = {}\".format(len(self.rack)))",
"def change_map290_safex_pswap_trades(status):\n print(\"Processing pswaps for XFM_MAP290\")\n pswap = acm.FPortfolioSwap[\"PB_XFM_MAP290_SAFEX2\"]\n trades = [t for t in pswap.Trades()]\n set_status(trades, status)"
] | [
"0.49456048",
"0.4688628",
"0.46635583",
"0.465411",
"0.4610733",
"0.46057832",
"0.4569795",
"0.4563333",
"0.45383635",
"0.4537916",
"0.45238334",
"0.4523624",
"0.45051134",
"0.4464374",
"0.44389847",
"0.44224742",
"0.44107935",
"0.44045115",
"0.43949524",
"0.438784",
"0.4384053",
"0.43742272",
"0.4359033",
"0.43545058",
"0.4348892",
"0.43025556",
"0.4286253",
"0.42843696",
"0.42804632",
"0.4280314"
] | 0.67790896 | 0 |
Finds the given province file's tradegood and returns it, else returns None. | def find_tradegood(filepath):
with open(filepath) as f:
for line in f:
if "trade_good" in line:
return line.replace("trade_goods = ", "").strip()
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_purity_from_filename(fn):\n # type: (str) -> float\n for k in PURITY_DICT.keys():\n if fn.find(k) != -1:\n return PURITY_DICT[k]\n return None",
"def replace_tradegood(prov_num, new_tradegood):\n\tdirectory = os.getcwd()+\"\\\\shatterednippon\\\\history\\\\provinces\\\\\"\n\tfor file in os.listdir(directory):\n\t\tif file.startswith(str(prov_num)):\n\t\t\told_tradegood = find_tradegood(directory+file)\n\t\t\tif old_tradegood is None:\n\t\t\t\tprint(\"Province: %s has no \\\"trade_goods\\\" variable\" % file)\n\t\t\t\treturn\n\t\t\telif new_tradegood == old_tradegood:\n\t\t\t\treturn\n\t\t\t\n\t\t\tfor line in fileinput.input(directory+file, inplace=True):\n\t\t\t\tline = line.rstrip().replace(old_tradegood, new_tradegood)\n\t\t\t\tprint(line)\n\t\t\tprint(\"Province %d: changed tradegood from %s to %s\" % (prov_num, old_tradegood, new_tradegood))\n\t\t\treturn",
"def find_sample_from_filename(fn):\n # type: (str) -> str\n for k in PURITY_DICT.keys():\n if fn.find(k) != -1:\n return k\n return None",
"def checkFile_and_return(adistro):\n try:\n if os.path.isfile(adistro.releaseFile):\n return adistro\n except IOError:\n return None",
"def existing_village_file(kovetz):\n try:\n cat77 = nbt.NBTFile(kovetz)\n except IOError:\n raise Exception(\"Hmm. Unfortunately, the file requested does not exist :(\")\n tick4 = cat77['data']['Tick'].value\n return cat77, tick4",
"def get_province_info(self, data, filename):\n\n number, name = self.split_file_name(filename)\n number = int(number)\n\n if \"owner\" in data:\n tag = data[\"owner\"]\n if data[\"owner\"] not in self.country_dict:\n self.add_tag(tag)\n self.country_dict[tag][\"province_count\"] += 1\n self.country_dict[tag][\"dev_tax\"] += int(data[\"base_tax\"])\n self.country_dict[tag][\"dev_production\"] += int(data[\"base_production\"])\n self.country_dict[tag][\"dev_manpower\"] += int(data[\"base_manpower\"])\n \n if \"hre\" in data and data[\"hre\"] == \"yes\":\n self.hre_dict[number] = True\n else:\n self.hre_dict[number] = False\n self.name_dict[number] = name",
"def ParseLonghurstProvinceFile():\n from xml.dom.minidom import parse, parseString\n provinces = {}\n tree = parse('longhurst.xml')\n for node in tree.getElementsByTagName('MarineRegions:longhurst'):\n # 1. Get province code, name and bounding box from file\n provCode = node.getElementsByTagName('MarineRegions:provcode')[\n 0].firstChild.data\n provName = node.getElementsByTagName('MarineRegions:provdescr')[\n 0].firstChild.data\n fid = node.getAttribute(\"fid\")\n b = node.getElementsByTagName('gml:coordinates')[0].firstChild.data\n # 2. Parse bounding box coordinates\n b = b.split(' ')\n x1, y1 = b[0].split(',')\n x2, y2 = b[1].split(',')\n x1 = float(x1)\n y1 = float(y1)\n x2 = float(x2)\n y2 = float(y2)\n # Add province to dictionary\n provinces[fid] = {'provName': provName, 'provCode': provCode,\n 'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2}\n return provinces, tree",
"def get_province(self, station_id, time):\n # Make sure the stations have been collected\n if not hasattr(self, 'stations'):\n self.collect_stations()\n\n keys = list(self.stations.keys())\n\n index = numpy.where(\n [any([True for id in self.stations[prov][time] if id == station_id]) for prov in keys]\n )[0]\n\n if index.size == 0:\n raise Exception('Cannot find the station \"{}\" with {} data'.format(station_id, time))\n\n return keys[int(index)]",
"def get_province_number(corr_pixel):\n\tcorr_pixel = str(corr_pixel).strip(\"()\").replace(\", \", \";\") #Reformats the pixel to ensure it can be compared.\n\twith open(os.getcwd()+\"\\\\shatterednippon\\\\map\\\\definition.csv\", \"r\") as definitions:\n\t\tprov_num = 1\n\t\tfor line in definitions:\n\t\t\tif corr_pixel in line:\n\t\t\t\treturn prov_num\n\t\t\tprov_num += 1\n\treturn None",
"def search(word):\n try:\n words = list_every_word(file_name)\n if len(words) > 20000:\n print(\"This might take a while.\")\n except IOError:\n print(\"This file doesn't exist... Are you sure you defined a valid filename? Use 'file <your filename>'\")\n except:\n print(\"An undefined error occured\")\n if dictionnary == False: \n print(\"You forgot to switch to dictionnary mode. Just use 'dictionnary'\")\n return\n else:\n try:\n ld = smallest_ld(word,words) \n print(\"The closest word found in the file is: {0}\".format(ld[0][1]))\n return ld[0][1]\n except:\n print(\"An unexpected error occured, be sure to have valid input in your file\")\n return",
"def detect_type(self,seatmap_file):\n try:\n root = ET.parse(seatmap_file).getroot()\n print(\"File accepted: \" + seatmap_file)\n if root.tag.endswith('Envelope'):\n return self.flight_parse1(root)\n if root.tag.endswith('SeatAvailabilityRS'):\n return self.flight_parse2(root)\n raise ValueError(\"Unsupported XML Format\")\n # TODO: aditional validations\n except FileNotFoundError as error:\n print(\"The name of the file does not exist\")",
"def fetch_prudence():\n fname = retrieve(\n path=cache_url,\n url=\"https://raw.githubusercontent.com/euro-cordex/tables/master/regions/prudence.csv\",\n known_hash=\"d87691a873110c9e3e4460a0ed35cd15f11f2a42aa86aced76feae9e87e8bed2\",\n )\n return fname",
"def identify_ess(path: str) -> Optional[str]:\n software = None\n with open(path, 'r') as f:\n for _ in range(25):\n line = f.readline()\n if 'x T B' in line:\n software = 'xtb'\n break\n return software",
"def region_of_province(province_in: str) -> str:\n region = None\n for r in ITALY_MAP:\n for p in ITALY_MAP[r]:\n if province_in == p:\n region = r\n return region",
"def find_vcs_country_ept(self, country_dict, marketplace_id, log_line_vals, line_no):\n\n log_line_obj = self.env['common.log.lines.ept']\n res_country_obj = self.env['res.country']\n\n country = res_country_obj.browse( \\\n country_dict.get(marketplace_id, False))\n if not country:\n country = res_country_obj.search( \\\n [('amazon_marketplace_code', '=', marketplace_id)], limit=1)\n if not country:\n country = res_country_obj.search( \\\n [('code', '=', marketplace_id)], limit=1)\n if country:\n country_dict.update({marketplace_id: country.id})\n if not country:\n message = 'Country with code %s not found in line %d' % (\n marketplace_id, line_no)\n self.create_log(log_line_obj, log_line_vals, message)\n return country",
"def find_layer_from_fullpath(self, full_path):\n try:\n _layer, *_ = filter(lambda x: x.FullPath == full_path, self._file3dm.Layers)\n return _layer\n except ValueError:\n return None",
"def _get_EF_potfile(self, potfile):\n f = open_general(potfile)\n tmptxt = f.readlines()\n f.close()\n EF = float(tmptxt[3].split()[1])\n return EF",
"def get_address(address_file):\n if not path.exists(address_file) :\n print(\"file not found :\", address_file)\n return None\n addr_file = open(address_file,'r')\n address = addr_file.readlines()\n return address[0]",
"def test_find_file_zipped_no_allow(self):\n\n this_file_name = satellite_io.find_file(\n top_directory_name=TOP_DIRECTORY_NAME,\n valid_date_string=VALID_DATE_STRING,\n prefer_zipped=True, allow_other_format=False,\n raise_error_if_missing=False\n )\n\n self.assertTrue(this_file_name == FILE_NAME_ZIPPED)",
"def process_file(self, data, filename):\n\n if data:\n data = self.update_province_info(data)\n self.get_province_info(data, filename)",
"def open_and_read_file():\n file_path = sys.argv[1]\n #print file_path\n file_data = open(file_path, 'r')\n gettysburg = file_data.read()\n\n return gettysburg",
"def getwellid(infile, wellinfo):\r\n m = re.search(\"\\d\", getfilename(infile))\r\n s = re.search(\"\\s\", getfilename(infile))\r\n if m.start() > 3:\r\n wellname = getfilename(infile)[0:m.start()].strip().lower()\r\n else:\r\n wellname = getfilename(infile)[0:s.start()].strip().lower()\r\n wellid = wellinfo[wellinfo['Well'] == wellname]['WellID'].values[0]\r\n return wellname, wellid",
"def get_demo_file(fname):\n\n d = download_demo_files()\n if fname in d:\n return d[fname]\n else:\n return None",
"def retrieve_iso(site, branch):\n url = URL(site + branch)\n html = url.download()\n dom = DOM(html)\n infobox = dom.by_tag('table.vcard')[0]('tr')\n\n for row in infobox:\n\n # The ISO code can be stored in 2 different ways in general.\n try:\n if row('a')[0].content == 'ISO 3166 code':\n try:\n return row('a')[1].content\n except:\n return row('td')[0].content\n\n except:\n pass\n return 'ERROR'",
"def extract_show(filename):\n try:\n f = open(\"recap_data.csv\", mode='r', encoding=\"utf-8\")\n content = f.read()\n f.close()\n lines = content.split('\\n')\n for line in lines:\n cols = line.split(';')\n if cols[0] == filename:\n return cols[3]\n return None\n\n except Exception as e:\n print(\"Exception du try extract_show\")\n print(e)\n return None",
"def test_find_file_zipped_allow(self):\n\n this_file_name = satellite_io.find_file(\n top_directory_name=TOP_DIRECTORY_NAME,\n valid_date_string=VALID_DATE_STRING,\n prefer_zipped=True, allow_other_format=True,\n raise_error_if_missing=False\n )\n\n self.assertTrue(this_file_name == FILE_NAME_UNZIPPED)",
"def parse_tb_file(path, module):\n with open(path, 'r') as f:\n try:\n tb_coverage_data = json.load(f)\n except Exception:\n print('WARN: Failed to parse translation block JSON file %s' % path)\n return None\n\n if not tb_coverage_data:\n print('WARN: Translation block JSON file %s is empty' % path)\n return None\n\n if module not in tb_coverage_data:\n print('WARN: Target %s not found in translation block JSON file %s' %\n (module, path))\n return None\n\n return tb_coverage_data[module]",
"def findFirstHigh(thisStFile):\n with open(thisStFile) as f:\n reader = csv.DictReader(f, delimiter='\\t')\n for row in reader:\n return datetime.datetime.strptime(row['time'], fmt)",
"def RosieLonghurstProvinceFileNum2Province(input, invert=False, rtn_dict=False):\n Rnum2prov = {\n 1: 'BPLR', 2: 'ARCT', 3: 'SARC', 4: 'NADR', 5: 'GFST', 6: 'NASW', 7: 'NATR',\n 8: 'WTRA', 9: 'ETRA', 10: 'SATL', 11: 'NECS', 12: 'CNRY', 13: 'GUIN', 14: 'GUIA',\n 15: 'NWCS', 16: 'MEDI', 17: 'CARB', 18: 'NASE', 19: 'CHSB', 20: 'BRAZ',\n 21: 'FKLD',\n 22: 'BENG', 30: 'MONS', 31: 'ISSG', 32: 'EAFR', 33: 'REDS', 34: 'ARAB',\n 35: 'INDE',\n 36: 'INDW', 37: 'AUSW', 50: 'BERS', 51: 'PSAE', 52: 'PSAW', 53: 'KURO',\n 54: 'NPPF',\n 55: 'NPSE', 56: 'NPSW', 57: 'OCAL', 58: 'TASM', 59: 'SPSG', 60: 'NPTG',\n 61: 'PNEC',\n 62: 'PEQD', 63: 'WARM', 64: 'ARCH', 65: 'ALSK', 66: 'CCAL', 67: 'CAMR',\n 68: 'CHIL',\n 69: 'CHIN', 70: 'SUND', 71: 'AUSE', 72: 'NEWZ', 80: 'SSTC', 81: 'SANT',\n 82: 'ANTA',\n 83: 'APLR', 99: 'LAKE'\n }\n # Invert?\n if invert:\n Rnum2prov = {v: k for k, v in list(Rnum2prov.items())}\n # Return the dictionary\n if rtn_dict:\n return Rnum2prov\n else:\n try:\n return Rnum2prov[input]\n except KeyError:\n if not np.isfinite(input):\n return np.NaN\n else:\n print(input, type(input), np.isfinite(input))\n vstr = \"'KeyError for dictionary not for NaN '{}' (type:{})\"\n raise ValueError(vstr.format(input, type(input)))",
"def province():\r\n return _random.choice(\r\n [\r\n [\"Ontario\", \"ON\"],\r\n [\"Quebec\", \"QC\"],\r\n [\"Nova Scotia\", \"NS\"],\r\n [\"New Brunswick\", \"NB\"],\r\n [\"Manitoba\", \"MB\"],\r\n [\"British Columbia\", \"BC\"],\r\n [\"Prince Edward Island\", \"PE\"],\r\n [\"Saskatchewan\", \"SK\"],\r\n [\"Alberta\", \"AB\"],\r\n [\"Newfoundland and Labrador\", \"NL\"]\r\n ]\r\n )"
] | [
"0.61429954",
"0.5806887",
"0.5357879",
"0.5357617",
"0.53102237",
"0.52028406",
"0.51881367",
"0.51809806",
"0.49570414",
"0.49463305",
"0.4909227",
"0.4857346",
"0.4817479",
"0.4806995",
"0.47705704",
"0.47592556",
"0.4758116",
"0.47489792",
"0.47389686",
"0.47359687",
"0.47320902",
"0.47311231",
"0.47125202",
"0.47108182",
"0.46937993",
"0.46935466",
"0.46565115",
"0.46499577",
"0.46436167",
"0.46009108"
] | 0.737722 | 0 |
Checks definition.csv if provinces.bmp's corresponding pixel's RBG value is in the definition list. Returns the province number if it finds the pixel in the list, returns None otherwise. | def get_province_number(corr_pixel):
corr_pixel = str(corr_pixel).strip("()").replace(", ", ";") #Reformats the pixel to ensure it can be compared.
with open(os.getcwd()+"\\shatterednippon\\map\\definition.csv", "r") as definitions:
prov_num = 1
for line in definitions:
if corr_pixel in line:
return prov_num
prov_num += 1
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def region_of_province(province_in: str) -> str:\n region = None\n for r in ITALY_MAP:\n for p in ITALY_MAP[r]:\n if province_in == p:\n region = r\n return region",
"def get_layers(filen, flist):\n lay_lim =()\n if (filen in flist[0]) or (filen in flist[1]) or \\\n (filen in flist[2]) or (filen in flist[3]):\n lay_lim = (24,45)\n elif (filen in flist[4]):\n lay_lim = (29,50)\n return lay_lim",
"def detect_colour(field):\n # create list of BGR tuples and count them\n pixels = Counter(map(tuple, np.reshape(field, (-1, 3)).tolist()))\n # filter out the colours which just have a few occurrences\n pixels = dict(filter(lambda pixel: pixel[1] > 100, dict(pixels).items()))\n # and merge the nearby colours\n pixels = merge_colours(pixels)\n\n # the background color should be the one with the most pixels present\n return Counter(pixels).most_common(1)[0][0]",
"def get_detector_number_from_pixel_mapping(\n mapping: PixelMapping,\n) -> List[int]:\n detector_numbers = [id for id in mapping.pixel_ids if id is not None]\n return detector_numbers",
"def whichElementIsInTheLevel(self):\n\n listElements = list()\n\n #We read each line\n for row in self._get_grille_csv():\n \n #We read each cell of each line\n for cell in row:\n if cell in listElements or cell == \"\":\n pass\n \n else:\n listElements.append(cell)\n\n self.loadingLevelElements(listElements)",
"def locate_board_callback(self, data):\n self.home_count = 0\n self.is_home = True\n while self.home_count < self.home_count_thresh or not self.board_position:\n try:\n for d in self.tag_vals.detections:\n if d.id == (0,):\n self.board_position = self.tag_vals.detections[0].pose.pose.pose\n self.board_present = True\n except:\n pass\n\n self.is_home = False\n\n return self.board_position",
"def get_province_info(self, data, filename):\n\n number, name = self.split_file_name(filename)\n number = int(number)\n\n if \"owner\" in data:\n tag = data[\"owner\"]\n if data[\"owner\"] not in self.country_dict:\n self.add_tag(tag)\n self.country_dict[tag][\"province_count\"] += 1\n self.country_dict[tag][\"dev_tax\"] += int(data[\"base_tax\"])\n self.country_dict[tag][\"dev_production\"] += int(data[\"base_production\"])\n self.country_dict[tag][\"dev_manpower\"] += int(data[\"base_manpower\"])\n \n if \"hre\" in data and data[\"hre\"] == \"yes\":\n self.hre_dict[number] = True\n else:\n self.hre_dict[number] = False\n self.name_dict[number] = name",
"def check_exist(name, map):\r\n f = open(PATH,mode='r')\r\n file = yaml.load(f)\r\n f.close()\r\n if file is None:\r\n return (False, -1, -9, -9, [])\r\n elif name in file:\r\n if \"CSV\" in file[name]:\r\n return (True, file[name][\"id\"], file[name][\"hash\"], file[name][\"csv_hash\"], file[name][\"children\"])\r\n else:\r\n return (True, file[name][\"id\"], file[name][\"hash\"], -9, file[name][\"children\"])\r\n elif name+\"_\"+map in file:\r\n n = name+\"_\"+map\r\n if \"CSV\" in file[n]:\r\n return (True, file[n][\"id\"], file[n][\"hash\"], file[n][\"csv_hash\"], file[n][\"children\"])\r\n else:\r\n return (True, file[n][\"id\"], file[n][\"hash\"], -9, file[n][\"children\"])\r\n return (False, -1, -9, -9, [])",
"def load():\n global WHITE_CARDS\n global BLACK_CARDS\n\n with open('./cards.csv', 'r') as f:\n reader = csv.reader(f)\n for r in reader:\n black = r[0]\n white = r[1]\n if white:\n WHITE_CARDS.append(white)\n if black:\n BLACK_CARDS.append(black)",
"def get_cie1931_color_matching_function():\n\n filename = os.path.dirname(os.path.abspath(__file__))\\\n + os.path.normpath(\"/data/cie_1931_color_matching_function.csv\")\n data = np.loadtxt(filename, delimiter=',', skiprows=1).T\n\n return np.uint16(data[0]), data[1:]",
"def calcPixelsAddress(svIDList, pixIDList, dimX, dimY):\n ini = True\n for svIDs in svIDList:\n for svID in svIDs:\n pixIDs = pixIDList[svID]\n pixs = np.zeros((pixIDs.shape[0], 3))\n szFrame = dimX*dimY\n pixs[:,2] = pixIDs // szFrame\n pixs[:,1] = (pixIDs % szFrame) // dimX\n pixs[:,0] = (pixIDs % szFrame) % dimX\n\n if ini:\n pixPoints = pixs\n ini = False\n else:\n pixPoints = np.vstack((pixPoints, pixs))\n\n if ini:\n return None\n else:\n return pixPoints",
"def isColourInArea(r, g, b, x, y):\n pic = pyautogui.screenshot(region=(x-30, y-30, 30, 30))\n\n #convert screenshot to numpy array for opencv to use\n #cvImg = cv.cvtColor(np.array(pic), cv.COLOR_RGB2BGR)\n\n #cv.imshow(\"Search Area\", cvImg)\n #cv.waitKey(0)\n width, height = pic.size\n start_time = time.time()\n for w in range(0, width, 5):\n for h in range(0, height, 5):\n pr, pg, pb = pic.getpixel((w, h))\n #print(\"{} at {},{}. Colour = ({}, {}, {})\".format(pixel,(x+1)-w,(y+1)-h,r,g,b))\n if(isCloseEnough(pr, r) and isCloseEnough(pg, g) and isCloseEnough(pb, b)):\n print(\"FOUND AFTER {} TRIES - elapsed: {}\".format((w+1)*(h+1), time.time()-start_time))\n return True\n\n \n print(\"NOT FOUND - elapsed: {}\".format(time.time()-start_time))\n return False",
"def readColourMap (self):\r\n \r\n num = self.bih_vals [bih_ColorsUsed]\r\n\r\n if num > 0:\r\n self.colourmap = [BLACK_FOUR] * num\r\n \r\n for i in range (0, num):\r\n self.colourmap [i] = self.the_file.read (4)",
"def getColorsFromCsv(filename):\n csvreader=csv.reader(file(filename))\n\n csvcolors=[]\n i=0\n for row in csvreader:\n name=row[0]\n name=name.strip()\n c=int(row[1] )* 2.55\n c=int(c)\n m=int(row[2] )* 2.55\n m=int(m)\n y=int(row[3] )* 2.55\n y=int(y)\n k=int(row[4] )* 2.55\n k=int(k) \n if checkValue(c, m, y, k) ==False:\n scribus.messageBox(\"csv2color\", \"At least one CMYK value in your csv file is not correct \\n(must be between 0 and 100)\\nAborting script - nothing imported.\", icon=scribus.ICON_WARNING)\n sys.exit()\n else:\n pass\n color=(name, c, m, y, k)\n csvcolors.append(color)\n i=i+1\n return csvcolors",
"def identify_habarea(indiv_xy_position, habarea_map): \n \n row=int(indiv_xy_position[0])\n col=int(indiv_xy_position[1])\n habarea=habarea_map[row][col]\n \n return habarea",
"def is_in(m):\n\tf=open('places.dat','r')\n\tr = f.read()\n\tf.close()\n\tif str(m) in r:\n\t\tj = r.find(m)/7\n\t\treturn j\n\telse:\n\t\treturn -1",
"def detect_colors(self):\n dirname = 'temp'\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n\n filepath = dirname + '\\\\' + self.id + '.jpg'\n urllib.urlretrieve(self.img_url, filepath)\n\n palette = colorific.extract_colors(filepath)\n self.palette = palette\n self.rgb_colors = colors_to_score_string(palette.colors)",
"def check_for_draw(self, current_board):\r\n init_board = [0, 1, 2, 3, 4, 5, 6, 7, 8]\r\n if any(i in current_board for i in init_board):\r\n return False\r\n else:\r\n return True",
"def findMap(obc, nWin):\n try:\n return config.mapLoockUp[(obc,nWin)]\n except KeyError:\n return None",
"def get_label_coords(csv_file, name):\n labels = [] # np.zeros((50, 8), dtype=float)\n for row in csv_file:\n if row[0] == name:\n labels.append(row)\n else:\n pass\n\n return labels",
"def detect_column_with_places(header: str,\n col_values: List[str]) -> Optional[TypeProperty]:\n types_found: Dict[str, TypeProperty] = {}\n for det in PLACE_DETECTORS:\n found = det.detect_column(col_values)\n if found is not None:\n types_found.update({found.dc_type.dcid: found})\n\n # If country was detected and the header has a country in the name, return\n # country. If not, we have to do more work to disambiguate country vs state.\n if c.T_COUNTRY in types_found and c.T_COUNTRY.lower(\n ) in utils.to_alphanumeric_and_lower(header):\n return types_found[c.T_COUNTRY]\n\n # If state was detected and the header has a state in the name, return\n # state.\n if c.T_STATE in types_found and c.T_STATE.lower(\n ) in utils.to_alphanumeric_and_lower(header):\n return types_found[c.T_STATE]\n\n # Finally, if none of the headers match, give preference to country\n # detection over state detection.\n if c.T_COUNTRY in types_found:\n return types_found[c.T_COUNTRY]\n\n if c.T_STATE in types_found:\n return types_found[c.T_STATE]\n\n # At this point, there was no detection possible. Return None.\n return None",
"def ejscreen_areas_of_concern_data_exists(cls):\n return cls.EJSCREEN_AREAS_OF_CONCERN_SOURCE.is_file()",
"def fregion_id_by_name(name=None):\n f_region_types = FilteredElementCollector(doc).OfClass(FilledRegionType)\n for fregion_type in f_region_types:\n fregion_name = Element.Name.GetValue(fregion_type)\n if not name or name.lower() == fregion_name.lower():\n return fregion_type.Id\n # Loops through all, not found: use last\n else:\n print('Color not specified or not found.')\n return fregion_type.Id",
"def search_reference(dic_values, dic_mask, row, col, band_name):\n key_images = [key for key, value in dic_mask.items()]\n\n value_pixel = [value[row, col] for key, value in dic_values[band_name].items() if key in key_images]\n mask_pixel = [value[row, col] for key, value in dic_mask.items() if key in key_images]\n\n indices_not_cloud = [index for index, value in enumerate(mask_pixel) if value == 1][-1]\n\n reference_date = key_images[indices_not_cloud]\n reference_value = value_pixel[indices_not_cloud]\n\n return reference_date, reference_value",
"def load_shapefile_neighborhood(area):\n if os.path.isfile(\"data/shp/Inzameling_huisvuil_080520.shp\"):\n source = gpd.read_file('data/shp/Inzameling_huisvuil_080520.shp')\n elif os.path.isfile(\"../data/shp/Inzameling_huisvuil_080520.shp\"):\n source = gpd.read_file('../data/shp/Inzameling_huisvuil_080520.shp')\n if area:\n source = source[source['sdcode'].isin(list(area))]\n return list(source.geometry)",
"def province():\r\n return _random.choice(\r\n [\r\n [\"Ontario\", \"ON\"],\r\n [\"Quebec\", \"QC\"],\r\n [\"Nova Scotia\", \"NS\"],\r\n [\"New Brunswick\", \"NB\"],\r\n [\"Manitoba\", \"MB\"],\r\n [\"British Columbia\", \"BC\"],\r\n [\"Prince Edward Island\", \"PE\"],\r\n [\"Saskatchewan\", \"SK\"],\r\n [\"Alberta\", \"AB\"],\r\n [\"Newfoundland and Labrador\", \"NL\"]\r\n ]\r\n )",
"def find(self, value):\n for row in range(self.getHeight()):\n for column in range(self.getWidth()):\n if self[row][column] == value:\n return (row, column)\n return None",
"def provinces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"provinces\")",
"def provinces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"provinces\")",
"def is_country_column_present_in_vendor_profile_dialed_digits_page(self):\n return self.is_specific_column_present(self.dialed_digits_grid_div_id, self.column_name_country)"
] | [
"0.5004101",
"0.48853442",
"0.48022938",
"0.47766182",
"0.4772715",
"0.46768475",
"0.46313435",
"0.4620217",
"0.46167162",
"0.4558334",
"0.45415226",
"0.45321065",
"0.45184773",
"0.4518011",
"0.44653592",
"0.44647416",
"0.44464013",
"0.44451034",
"0.44309643",
"0.44243097",
"0.4421356",
"0.4414055",
"0.44112232",
"0.44048867",
"0.43999255",
"0.4372948",
"0.43694365",
"0.43692905",
"0.43692905",
"0.4356562"
] | 0.6695489 | 0 |
Returns the names of the tradegoods and the RGB color values for each defined tradegood in 00_tradegoods.txt as two seperate lists. | def get_defined_tradegoods():
names = []
colors = []
with open(os.getcwd()+"\\shatterednippon\\common\\tradegoods\\00_tradegoods.txt", "r") as f:
for line in f:
if line[0].isalpha():
names.append(line.strip("={} \n"))
elif "color" in line:
numbers = tuple(map(int, re.sub("[^\d. ]\s*", "", line).split()))
colors.append(tuple(round(i * 255) for i in numbers))
return names, colors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_player_colors() -> List[Tuple[float, float, float]]:\n return PLAYER_COLORS",
"def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]",
"def materials_list_from_file(filename):\n color_data = []\n with open(filename, 'r', newline='') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n image_info = (row[POS_NAME], int(row[POS_RED]),\n int(row[POS_GREEN]), int(row[POS_BLUE]))\n color_data.append(image_info)\n return color_data",
"def _read_lick_list(cls, fname=__default__, comment='#'):\n with open(fname, 'r') as f:\n data = {}\n hdr = []\n for line in f:\n if line[0] != comment:\n l = line.split()\n attr = dict(\n band=(float(l[1]), float(l[2])),\n blue=(float(l[3]), float(l[4])),\n red=(float(l[5]), float(l[6])),\n unit='mag' if int(l[7]) > 0 else 'ew',\n )\n name = l[8]\n data[name] = attr\n else:\n hdr.append(line[1:-1])\n return data, hdr",
"def ordered_colors():\n\n return [(\"yellow\",0.263) ,(\"orange\", 0.047), (\"red\",0.0),(\"green\", 0.444), (\"purple\", 0.972)]",
"def _read_lick_list(cls, fname=__default_lick__, comment='#'):\n with open(fname, 'r') as f:\n data = {}\n hdr = []\n for line in f:\n if line[0] != comment:\n _line = line.split()\n attr = dict(\n band=(float(_line[1]), float(_line[2])),\n blue=(float(_line[3]), float(_line[4])),\n red=(float(_line[5]), float(_line[6])),\n unit='mag' if int(_line[7]) > 0 else 'ew',\n )\n name = _line[8]\n data[name] = attr\n else:\n hdr.append(line[1:-1])\n return data, hdr",
"def readColourMap (self):\r\n \r\n num = self.bih_vals [bih_ColorsUsed]\r\n\r\n if num > 0:\r\n self.colourmap = [BLACK_FOUR] * num\r\n \r\n for i in range (0, num):\r\n self.colourmap [i] = self.the_file.read (4)",
"def getColors():\n return ['#8c99fc', '#cacefd', '#fff1d7', '#feda98', '#fda85a', '#fc6647']",
"def load_file(file_name):\n file = open(file_name, 'r')#open the file\n colors = file.read() #reads entire contents of the file and assigns it to names. This is the processing of the file\n file.close() #always close the file\n\n return colors",
"def get_colors(lines):\n\n patt = re.compile('\\#\\w+')\n\n return [\n patt.search(line).group(0)\n for line in lines\n if patt.search(line)\n ]",
"def get_color_range(self):\n color_range = []\n\n try:\n # Open the file and load the data into an array\n saved_file = open(self.file_name_color)\n try:\n data = json.load(saved_file)\n for p in data:\n color_range.append(Color(p[0], p[1], p[2]))\n\n except json.decoder.JSONDecodeError:\n color_range = self.back_up_color_range\n\n saved_file.close()\n\n except FileNotFoundError:\n color_range = self.back_up_color_range\n\n return color_range",
"def get_rgb_light():\n return list(light.rgb())",
"def _build_color_table() -> list[tuple[int, int, int, int, int]]:\n FG = FOREGROUND_COLOR\n BG = BACKGROUND_COLOR\n\n return [\n (0x00, 0x00, 0x00, FG.BLACK, BG.BLACK),\n (0x00, 0x00, 0xAA, FG.BLUE, BG.BLUE),\n (0x00, 0xAA, 0x00, FG.GREEN, BG.GREEN),\n (0x00, 0xAA, 0xAA, FG.CYAN, BG.CYAN),\n (0xAA, 0x00, 0x00, FG.RED, BG.RED),\n (0xAA, 0x00, 0xAA, FG.MAGENTA, BG.MAGENTA),\n (0xAA, 0xAA, 0x00, FG.YELLOW, BG.YELLOW),\n (0x88, 0x88, 0x88, FG.GRAY, BG.GRAY),\n (0x44, 0x44, 0xFF, FG.BLUE | FG.INTENSITY, BG.BLUE | BG.INTENSITY),\n (0x44, 0xFF, 0x44, FG.GREEN | FG.INTENSITY, BG.GREEN | BG.INTENSITY),\n (0x44, 0xFF, 0xFF, FG.CYAN | FG.INTENSITY, BG.CYAN | BG.INTENSITY),\n (0xFF, 0x44, 0x44, FG.RED | FG.INTENSITY, BG.RED | BG.INTENSITY),\n (0xFF, 0x44, 0xFF, FG.MAGENTA | FG.INTENSITY, BG.MAGENTA | BG.INTENSITY),\n (0xFF, 0xFF, 0x44, FG.YELLOW | FG.INTENSITY, BG.YELLOW | BG.INTENSITY),\n (0x44, 0x44, 0x44, FG.BLACK | FG.INTENSITY, BG.BLACK | BG.INTENSITY),\n (0xFF, 0xFF, 0xFF, FG.GRAY | FG.INTENSITY, BG.GRAY | BG.INTENSITY),\n ]",
"def get_color_data(self):\n color = []\n data = self.read_byte_data(APDS_9960.CLEAR_DATA_LOW_BYTE_REG_ADDRESS, 8)\n for i in range(4):\n channel_low = data[2 * i]\n channel_high = data[2 * i + 1]\n color.append((channel_high << 8) | channel_low)\n return color",
"def parser(self):\n hold = [i for i, val in enumerate(self.board) if val != self.empty and val.colour == BLACK]\n hold2 = [i for i, val in enumerate(self.board) if val != self.empty and val.colour == WHITE]\n \n #This is why dictionaries are better\n black_coords = []\n white_coords = []\n \n for i in hold:\n black_coords.append(self.coords[i])\n\n for i in hold2:\n white_coords.append(self.coords[i])\n \n return black_coords, white_coords",
"def colors(self):\n return self[\"colors\"]",
"def colors(self):\n return self[\"colors\"]",
"def getColorDict():\n scribus.statusMessage(\"Reading existing colors...\")\n colornames = scribus.getColorNames()\n scribus.progressTotal(len(colornames))\n i=0\n colordict={}\n for name in colornames:\n colordict[name]=None\n i=i+1\n scribus.progressSet(i)\n return colordict #we can ask this dict if the color already exists",
"def getColors():\n colors = ['#d53e4f',\n '#fc8d59',\n '#fee08b',\n '#ffffbf',\n '#e6f598',\n '#99d594',\n '#3288bd',\n ]\n return colors",
"def colors(self):\r\n\t\treturn self._colors",
"def linearTosRGB3(c):\n return [linearTosRGB(c[0]), linearTosRGB(c[1]), linearTosRGB(c[2])]",
"def get_colors(self, url):\n fd = urlopen(url)\n f = io.BytesIO(fd.read())\n im = Image.open(f)\n palette = im.quantize(colors=len(self.lights)).getpalette()\n return self.extract_colors(palette, len(self.lights))",
"def load_colors():\r\n with open(\"colors.p\", \"rb\") as f:\r\n colors = pickle.load(f)\r\n with open(\"colors_hex.p\", \"rb\") as f:\r\n colors_hex = pickle.load(f)\r\n return colors,colors_hex",
"def separate_colors(self):\n colors = self.get_sorted_pixels()\n colors_dict = dict((val[1], Image.new('RGB', self.size, (255,255,255))) \n for val in colors)\n pixel_dict = dict((img, []) for img in colors_dict.keys())\n\n pix = self.image.load()\n for i in range(self.width):\n for j in range(self.height):\n if pix[i,j] in colors_dict:\n colors_dict[pix[i,j]].putpixel((i,j),(0,0,0))\n pixel_dict[pix[i,j]].append((i, j))\n\n return [(color, colors_dict[color], pixels) for color, pixels in pixel_dict.items()]",
"def pretty_colours(how_many):\r\n golden_ratio_conjugate = (1 + math.sqrt(5)) / 2\r\n hue = random.random() # use random start value\r\n final_colours = []\r\n for tmp in range(how_many):\r\n hue += golden_ratio_conjugate * (tmp / (5 * random.random()))\r\n hue = hue % 1\r\n temp_c = [x for x in hsv_to_rgb(hue, 0.5, 0.95)]\r\n final_colours.append(temp_c)\r\n # originally returned ['rgb(123,123,123)', 'rgb(123,123,123)']\r\n # now [[0.123,0.123,0.123],[0.123,0.123,0.123]]\r\n return final_colours",
"def get_colors(self):\n x = np.linspace(0, 1, self.length)\n y = x**self.gamma\n\n value = np.linspace(0, 1, len(self.colors))\n r = np.interp(y, value, self.colors[:,0])\n g = np.interp(y, value, self.colors[:,1])\n b = np.interp(y, value, self.colors[:,2])\n\n return np.dstack((r, g, b)).reshape(len(r), 3).astype(np.uint8)",
"def get_color(self):\n colors = []\n color_specs = [self._red_spec, self._green_spec,\n self._blue_spec, self._white_spec]\n for spec in color_specs:\n driver = DRIVERS[spec.addr]\n colors.append(driver.get_duty_cycle(spec.pin))\n \n return colors",
"def _get_goal_colours() -> List[Tuple[int, int, int]]:\n colour_lst = COLOUR_LIST[:]\n random.shuffle(colour_lst)\n return colour_lst",
"def get_color_list(self):\n lst = []\n\n _lib.caca_get_dither_color_list.argtypes = [_Dither]\n _lib.caca_get_dither_color_list.restype = ctypes.POINTER(ctypes.c_char_p)\n\n for item in _lib.caca_get_dither_color_list(self):\n if item is not None and item != \"\":\n lst.append(item)\n else:\n #memory occurs otherwise\n break\n\n return lst",
"def some_colors(number = 5):\n import colorsys\n N = number\n HSV_tuples = [(x*1.0/N, 1.0, 1.0) for x in range(N)]\n RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)\n\n # if only one color is required don't put in in the list\n if number == 1:\n RGB_tuples = RGB_tuples\n return RGB_tuples"
] | [
"0.5856119",
"0.57543385",
"0.5730062",
"0.566281",
"0.5610903",
"0.5603703",
"0.55331224",
"0.5490069",
"0.5385997",
"0.53710496",
"0.52969",
"0.52936643",
"0.5287496",
"0.52840865",
"0.5262353",
"0.5247332",
"0.5247332",
"0.5245796",
"0.5244358",
"0.5224747",
"0.5222103",
"0.521661",
"0.5182193",
"0.5176997",
"0.5154798",
"0.5153068",
"0.5144124",
"0.5125026",
"0.5114957",
"0.51090944"
] | 0.86052763 | 0 |
Load an internal yaml node parsing, defaulting to a scalar value. | def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "YamlModifier":
value = loader.construct_scalar(typing.cast(yaml.ScalarNode, node))
return cls(value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_(self, node):\n yamal_name = os.path.join(self._root, self.construct_scalar(node))\n\n with open(yamal_name, 'r') as yamal_file:\n return yaml.load(yamal_file, ImportLoader)",
"def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"InjectString\":\n raw = loader.construct_scalar(typing.cast(yaml.ScalarNode, node))\n value = json.loads(typing.cast(str, raw).strip(\"\\\"'\"))\n return cls(value)",
"def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"BotoError\":\n value = loader.construct_mapping(node, deep=True)\n return cls(value)",
"def yaml_loads(value):\n return yaml.load(value)",
"def parse_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"YamlModifier\":\n return cls._from_yaml(loader, node)",
"def container_constructor_handler(loader, node):\n filename = loader.construct_scalar(node)\n if os.path.dirname(filename):\n raise AssertionError('Referenced file \"{}\" must be in the same '\n 'directory with YAML file.'.format(filename))\n with open(filename) as fp:\n content = (yaml.load(fp) if filename[-5:] == '.yaml'\n else fp.read(-1))\n return content",
"def __init__(self, node: yaml.Node) -> None:\n self.yaml_node = node",
"def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"ToJson\":\n try:\n value = loader.construct_mapping(node, deep=True)\n except yaml.constructor.ConstructorError:\n value = loader.construct_sequence(node, deep=True)\n return cls(value)",
"def get_value(self) -> ScalarType:\n if self.yaml_node.tag == 'tag:yaml.org,2002:str':\n return str(self.yaml_node.value)\n if self.yaml_node.tag == 'tag:yaml.org,2002:int':\n return int(self.yaml_node.value)\n if self.yaml_node.tag == 'tag:yaml.org,2002:float':\n return float(self.yaml_node.value)\n if self.yaml_node.tag == 'tag:yaml.org,2002:bool':\n return self.yaml_node.value in ['TRUE', 'True', 'true']\n if self.yaml_node.tag == 'tag:yaml.org,2002:null':\n return None\n raise RuntimeError('This node with tag \"{}\" is not of the right type'\n ' for get_value()'.format(self.yaml_node.tag))",
"def include(self, node):\n filename = os.path.join(self._root, self.construct_scalar(node))\n with open(filename, 'r') as f:\n return yaml.load(f, AttrLoader)",
"def _include_yaml(loader: SafeLineLoader, node: yaml.nodes.Node) -> JSON_TYPE:\n fname = os.path.join(os.path.dirname(loader.name), node.value)\n try:\n return _add_reference(load_yaml(fname), loader, node)\n except FileNotFoundError as exc:\n raise XKNXException(f\"{node.start_mark}: Unable to read file {fname}.\") from exc",
"def set_value(self, value: ScalarType) -> None:\n if isinstance(value, bool):\n value_str = 'true' if value else 'false'\n else:\n value_str = str(value)\n start_mark = self.yaml_node.start_mark\n end_mark = self.yaml_node.end_mark\n # If we're of a class type, then we want to keep that tag so that the\n # correct Constructor is called. If we're a built-in type, set the tag\n # to the appropriate YAML tag.\n tag = self.yaml_node.tag\n if tag.startswith('tag:yaml.org,2002:'):\n tag = scalar_type_to_tag[type(value)]\n new_node = yaml.ScalarNode(tag, value_str, start_mark, end_mark)\n self.yaml_node = new_node",
"def construct_include(loader: Loader, node: yaml.Node) -> Any:\n\n filename = os.path.abspath(\n os.path.join(loader._root, loader.construct_scalar(node))\n )\n extension = os.path.splitext(filename)[1].lstrip(\".\")\n\n with open(filename, \"r\") as f:\n if extension in (\"yaml\", \"yml\"):\n return yaml.load(f, Loader)\n elif extension in (\"json\",):\n return json.load(f)\n else:\n return \"\".join(f.readlines())",
"def from_yaml(cls, loader, node):\n m = loader.construct_mapping(node)\n ret = cls()\n for k in cls._yaml_keys:\n setattr(ret, k, m[k])\n return ret",
"def FromYAML(cls, source):\n\n # Late import to avoid a circular dependency.\n try:\n import bulletml.bulletyaml\n import yaml\n except ImportError:\n raise ParseError(\"PyYAML is not available\")\n else:\n try:\n return yaml.load(source)\n except Exception as exc:\n raise ParseError(str(exc))",
"def from_yaml(cls, loader, node):\n instance = cls._yamlMakeInstance()\n yield instance\n mapping = loader.construct_mapping(node)\n instance._yamlSetAttributes(mapping)",
"def load_attribute(self,\n node_data: dict[str],\n node: awe.data.graph.dom.Node,\n snake_case: str,\n parser: Callable[[Any, dict[str, Any]], Any] = lambda x: x,\n default: Callable[[awe.data.graph.dom.Node], Any] = lambda _: None\n ):\n\n camel_case = awe.utils.to_camel_case(snake_case)\n val = node_data.get(camel_case) or default(node)\n if val is not None:\n try:\n result = parser(val, node_data)\n except ValueError as e:\n d = default(node)\n warnings.warn(f'Cannot parse {snake_case}={val!r} ' +\n f'using default={d!r} in {self.path!r}: {str(e)}')\n node.dom.page.valid = False\n result = parser(d, node_data)\n return result\n return None",
"def load(self, data):\n\t\tif 'value' in data:\n\t\t\tself.value = data['value']",
"def from_yaml(self, yaml):\n self.hwAddress = yaml.get('hwAddress')\n if self.hwAddress:\n self.hwAddress = self.hwAddress.lower()\n self.ip = yaml.get('IP')\n self.formulas = {}\n for f in yaml:\n if isinstance(yaml[f], dict):\n self.formulas[f] = yaml[f]\n\n self.hwtype = yaml.get('hwtype')",
"def NastyYamlLoad(yamlStr):\n import paperDoll as PD\n sys.modules[PD.__name__] = PD\n instance = None\n try:\n blue.statistics.EnterZone('yaml.load')\n instance = yaml.load(yamlStr, Loader=yaml.CLoader)\n except Exception:\n log.LogError('PaperDoll: Yaml parsing failed for data', yamlStr)\n finally:\n blue.statistics.LeaveZone()\n del sys.modules[PD.__name__]\n\n return instance",
"def _yaml_load(src):\n if not isinstance(src, str):\n try:\n src_name = src.name\n except AttributeError:\n src_name = '<yaml stringio>'\n # Force-load file streams as that allows the parser to print\n # much more context when it encounters an error\n src = src.read()\n else:\n src_name = '<yaml string>'\n try:\n return yaml.safe_load(src)\n except yaml.YAMLError:\n logging.error('Parser error when reading YAML from {}.'.format(src_name))\n raise",
"def construct_yaml_binary(loader, node):\n return Binary(loader.construct_yaml_binary(node))",
"def from_yaml(cls, y):\n return cls(yaml.load(y, AttrLoader))",
"def from_yaml(cls, b):\n return cls.from_dict(yaml.safe_load(b))",
"def load(cls, yaml_or_json):\n try:\n result = yaml.safe_load_all(yaml_or_json)\n except:\n try:\n result = json.loads(yaml_or_json)\n if isinstance(result, dict):\n result = (result for _ in range(1))\n except:\n result = None\n\n return result",
"def _dump_yaml(cls, dumper: yaml.Dumper, source: \"YamlModifier\") -> typing.Any:\n return dumper.represent_scalar(source.label(), source.value)",
"def _load_data_yaml(self, pathname): \n pathname = self._yaml_extension(pathname)\n\n with open(pathname) as file:\n traj_data = yaml.load(file, Loader=yaml.FullLoader)\n \n return traj_data",
"def load_yaml(fname):\n with open(fname) as f:\n val = yaml.safe_load(os.path.expandvars(f.read()))\n return val",
"def _add_reference(obj, loader: SafeLineLoader, node: yaml.nodes.Node): # type: ignore\n if isinstance(obj, list):\n obj = NodeListClass(obj)\n if isinstance(obj, str):\n obj = NodeStrClass(obj)\n setattr(obj, \"__config_file__\", loader.name)\n setattr(obj, \"__line__\", node.start_mark.line)\n return obj",
"def from_yaml(self, content):\r\n if yaml is None:\r\n raise UnsupportedDeserializationFormat(\"Usage of the YAML aspects requires yaml.\")\r\n\r\n return yaml.load(content, Loader=DeliciousCakeLoader)"
] | [
"0.73322433",
"0.63728184",
"0.61908674",
"0.61466956",
"0.6134764",
"0.6104053",
"0.6015713",
"0.59930307",
"0.598406",
"0.59112525",
"0.5903671",
"0.5833151",
"0.5768858",
"0.5761115",
"0.57025176",
"0.5649191",
"0.5563314",
"0.55608124",
"0.5558609",
"0.5475866",
"0.54293907",
"0.53970295",
"0.5382753",
"0.5381201",
"0.53559405",
"0.5354073",
"0.53345495",
"0.53211427",
"0.5298712",
"0.5277455"
] | 0.7018517 | 1 |
Parse yaml node into this class object for Lobotomy processing. | def parse_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> "YamlModifier":
return cls._from_yaml(loader, node) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, node: yaml.Node) -> None:\n self.yaml_node = node",
"def from_yaml(cls, y):\n return cls(yaml.load(y, AttrLoader))",
"def FromYAML(cls, source):\n\n # Late import to avoid a circular dependency.\n try:\n import bulletml.bulletyaml\n import yaml\n except ImportError:\n raise ParseError(\"PyYAML is not available\")\n else:\n try:\n return yaml.load(source)\n except Exception as exc:\n raise ParseError(str(exc))",
"def import_(self, node):\n yamal_name = os.path.join(self._root, self.construct_scalar(node))\n\n with open(yamal_name, 'r') as yamal_file:\n return yaml.load(yamal_file, ImportLoader)",
"def from_yaml(cls, yml: str):\n\n return cls.from_dict(feast_yaml.yaml_loader(yml, load_single=True))",
"def from_yaml(cls, loader, node):\n instance = cls._yamlMakeInstance()\n yield instance\n mapping = loader.construct_mapping(node)\n instance._yamlSetAttributes(mapping)",
"def from_yaml(self, content):\r\n if yaml is None:\r\n raise UnsupportedDeserializationFormat(\"Usage of the YAML aspects requires yaml.\")\r\n\r\n return yaml.load(content, Loader=DeliciousCakeLoader)",
"def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"BotoError\":\n value = loader.construct_mapping(node, deep=True)\n return cls(value)",
"def from_yaml(cls, loader, node):\n m = loader.construct_mapping(node)\n ret = cls()\n for k in cls._yaml_keys:\n setattr(ret, k, m[k])\n return ret",
"def _deserialize(self):\n try:\n self._as_dict = yaml.load(self.path)\n except ScannerError as e:\n raise exc.ContentSerializeError(self, self.path, e.problem)",
"def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"ToJson\":\n try:\n value = loader.construct_mapping(node, deep=True)\n except yaml.constructor.ConstructorError:\n value = loader.construct_sequence(node, deep=True)\n return cls(value)",
"def from_yaml(cls, model: nn.Module, yaml_path: str) -> pl.LightningModule:\n with open(yaml_path, \"r\") as stream:\n kwargs = yaml.full_load(stream)\n\n return cls(model, **kwargs)",
"def from_yaml(cls, b):\n return cls.from_dict(yaml.safe_load(b))",
"def __init__(self, yaml_file_path: Path) -> None:\n with yaml_file_path.open(\"r\") as yaml_file:\n self._yaml = YAML().load(yaml_file.read())",
"def __init__(self, recognizer: IRecognizer, node: yaml.Node) -> None:\n self.__recognizer = recognizer\n self.yaml_node = node",
"def __attrs_post_init__(self):\n if not self.path:\n self.path = Path.cwd() / CONFIG['meta_yaml_path']\n if not self.path.exists():\n raise AttributeError(f'Path {self.path} doesn\\'t exist.')\n self.update()\n try:\n validators.SMetaYaml(strict=True).load(self.get_content())\n except ValidationError as err:\n inform.error('meta.yaml has incorrect content.')\n inform.error('Invalid value for following params:')\n for key, value in err.messages.items():\n inform.error(f'{key}: {value}')\n inform.critical()",
"def from_content(cls, content: str) -> Any:\n cls._check_yaml()\n return yaml.safe_load(content)",
"def from_yaml(self, yaml):\n self.hwAddress = yaml.get('hwAddress')\n if self.hwAddress:\n self.hwAddress = self.hwAddress.lower()\n self.ip = yaml.get('IP')\n self.formulas = {}\n for f in yaml:\n if isinstance(yaml[f], dict):\n self.formulas[f] = yaml[f]\n\n self.hwtype = yaml.get('hwtype')",
"def from_yaml(\n cls,\n yml: str,\n defaults: Optional[bool]=False,\n path: Optional[str]=None,\n keys: Optional[str]=None) -> 'Parser':\n fname = Path(yml)\n if defaults:\n # load from 'ctwrap/defaults' database\n fname = Path(__file__).parents[0] / 'defaults' / fname\n elif path is not None:\n fname = Path(path) / fname\n\n try:\n _ = fname.is_file() # will raise error\n with open(fname) as stream:\n out = yaml.load(stream, Loader=yaml.SafeLoader)\n except OSError:\n out = yaml.load(yml, Loader=yaml.SafeLoader)\n\n if keys is None:\n return cls(out)\n\n return cls({k: out[k] for k in keys})",
"def yaml_operation_parse(self, path_to_yaml, schema_name):\n\n # TODO: Add validation logic for YAML\n\n with open(path_to_yaml, 'r') as f:\n api_doc = yaml.load(f)\n\n self.tags = []\n self.summary = api_doc['summary']\n self.description = api_doc['description']\n if self.valid_content_type(api_doc['consumes']):\n self.consumes = api_doc['consumes']\n if self.valid_content_type(api_doc['produces']):\n self.produces = api_doc['produces']\n self.parameters = api_doc['parameters']\n self.responses = api_doc['responses']\n\n # TODO: Make sure all operation parameters have been filled with valid values\n\n self.yaml_operation_update(schema_name)",
"def load_yaml(content):\n from yaml import load, FullLoader\n return load(content, Loader=FullLoader)",
"def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"YamlModifier\":\n value = loader.construct_scalar(typing.cast(yaml.ScalarNode, node))\n return cls(value)",
"def from_yaml(cls, yaml_string=None, filename=None, encoding='utf-8', errors='strict', loader=yaml.SafeLoader, **kwargs):\n bx_args = {}\n for arg in kwargs.copy():\n if arg in BOX_PARAMETERS:\n bx_args[arg] = kwargs.pop(arg)\n data = _from_yaml(yaml_string=yaml_string, filename=filename, encoding=encoding, errors=errors, Loader=loader, **kwargs)\n if not isinstance(data, dict):\n raise BoxError('yaml data not returned as a dictionarybut rather a {0}'.format(type(data).__name__))\n return cls(data, **bx_args)",
"def load_yaml(self):\n env = self.state.document.settings.env\n relpath, abspath = env.relfn2path(directives.path(self.arguments[0]))\n\n env.note_dependency(relpath)\n\n encoding = self.options.get('encoding', env.config.source_encoding)\n with io.open(abspath, 'rt', encoding=encoding) as stream:\n spec = yaml.load(stream, _YamlOrderedLoader) # nosec\n self.spec = spec\n self.paths = spec[self.path_path]\n self.definitions = spec[self.models_path]\n self.openapi_version = spec.get('swagger', None) or spec['openapi']\n self.options.setdefault('uri', 'file://%s' % abspath)",
"def _include_yaml(loader: SafeLineLoader, node: yaml.nodes.Node) -> JSON_TYPE:\n fname = os.path.join(os.path.dirname(loader.name), node.value)\n try:\n return _add_reference(load_yaml(fname), loader, node)\n except FileNotFoundError as exc:\n raise XKNXException(f\"{node.start_mark}: Unable to read file {fname}.\") from exc",
"def parse(self, config_file):\n\t\tself.options = yaml.load(open(config_file))",
"def yaml_parse(yamlstr):\n try:\n # PyYAML doesn't support json as well as it should, so if the input\n # is actually just json it is better to parse it with the standard\n # json parser.\n return json.loads(yamlstr, object_pairs_hook=OrderedDict)\n except ValueError:\n loader = SafeLoaderWrapper\n loader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, \n _dict_constructor)\n loader.add_multi_constructor(\"!\", intrinsics_multi_constructor)\n return yaml.load(yamlstr, loader)",
"def _read(self, text):\n return yaml.safe_load(text)",
"def get_cfg_from_yaml(self):\n try:\n with open(self.parsed_cfg_path, 'r') as cfg_yaml:\n self.from_yaml_cfg_dict = yaml.load(cfg_yaml)\n except Exception as exc:\n print(exc)\n traceback.print_exc()\n self.from_yaml_cfg_dict = {}",
"def from_yaml(cls, path: str) -> \"EtlSettings\":\n with fsspec.open(path) as f:\n yaml_file = yaml.safe_load(f)\n return cls.parse_obj(yaml_file)"
] | [
"0.7190225",
"0.68135875",
"0.6604197",
"0.6597981",
"0.65922415",
"0.65773606",
"0.6518172",
"0.65123314",
"0.6395062",
"0.6377166",
"0.6372749",
"0.6344428",
"0.6335441",
"0.6274441",
"0.6237253",
"0.60822874",
"0.60467535",
"0.6043854",
"0.6027036",
"0.5990031",
"0.5938824",
"0.5909672",
"0.5869253",
"0.5859342",
"0.5839954",
"0.58166397",
"0.58023036",
"0.5789427",
"0.5780999",
"0.57684815"
] | 0.73717535 | 0 |
Register the comparator with the PyYaml loader. | def register(cls):
yaml.add_constructor(cls.label(), cls.parse_yaml)
yaml.add_representer(cls, cls.dump_yaml) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setComparator(self, comparator: dict):\n self._comparator = comparator",
"def set_result_comparator(self, comparator):\n\n self._comparator = comparator",
"def register_loader(key, module):\n register(key, module, loader_dict)",
"def _yaml_ordering_support():\n _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG\n\n def dict_representer(dumper, data):\n return dumper.represent_dict(data.iteritems())\n\n def dict_constructor(loader, node):\n return OrderedDict(loader.construct_pairs(node))\n\n yaml.add_representer(OrderedDict, dict_representer)\n yaml.add_constructor(_mapping_tag, dict_constructor)",
"def register_from_yaml(self, path_to_yaml: str) -> None:\n self._manifests.append(path_to_yaml)\n self._sync = False",
"def register_yaml(self, yaml_text):\n\n defs = yaml.load_all(yaml_text)\n for def_set in defs:\n for name,_def in def_set.iteritems():\n # TODO: Hook into pyyaml's event emitting stuff to try to get the canonical form without re-dumping\n def_text = yaml.dump(_def, canonical=True, allow_unicode=True)\n self.register_def(name, _def, def_text)",
"def __ordered_load(self, stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):\n class OrderedLoader(Loader):\n pass\n OrderedLoader.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n lambda loader, node: object_pairs_hook(loader.construct_pairs(node)))\n return yaml.load(stream, OrderedLoader)",
"def test_event_pre_yaml_parse(self) -> None:\n\n @Event.PreYAMLParse.subscribe\n def hook(string: str) -> Optional[str]:\n return self.EXAMPLE_YAML_FILE\n\n assert Event.PreYAMLParse.validate()\n\n reference = self.EXAMPLE_ENTRY_DICT.copy()\n entries = YAMLParser().parse(\"Hello world!\")\n entry = list(entries.values())[0]\n assert entry.data == reference",
"def register(linter):\n linter.register_checker(SimilarChecker(linter))",
"def test_register_with_another_extension(self):\n DummyLoader.register()\n DummyLoader.register(extensions=('.example2',))\n self.assertIs(getattr(sys, HOOK_NAME)['.example2'], DummyLoader)",
"def test_event_pre_yaml_dump(self) -> None:\n\n @Event.PreYAMLDump.subscribe\n def hook(entry: Entry) -> None:\n entry.label = \"Cao2019\"\n\n assert Event.PreYAMLDump.validate()\n\n entry = Entry(\"Cao_2019\", self.EXAMPLE_ENTRY_DICT.copy())\n entry_str = YAMLParser().dump(entry)\n assert cast(str, entry_str).split(\"\\n\")[1] == \"Cao2019:\"",
"def test_cmp_to_key(self):\n def compare_pokemon(a, b):\n # ``a`` and ``b`` are tuples of ``(key, class)``.\n return (\n (a[1].popularity < b[1].popularity)\n - (a[1].popularity > b[1].popularity)\n )\n\n registry =\\\n SortedClassRegistry(\n attr_name = 'element',\n sort_key = cmp_to_key(compare_pokemon),\n )\n\n @registry.register\n class Onix(Pokemon):\n element = 'rock'\n popularity = 50\n\n @registry.register\n class Cubone(Pokemon):\n element = 'water'\n popularity = 100\n\n @registry.register\n class Exeggcute(Pokemon):\n element = 'grass'\n popularity = 10\n\n # The registry iterates over registered classes in descending\n # order by ``popularity``.\n self.assertListEqual(\n list(registry.values()),\n [Cubone, Onix, Exeggcute],\n )",
"def comparator_converter(self, val):\r\n return val",
"def container_constructor_handler(loader, node):\n filename = loader.construct_scalar(node)\n if os.path.dirname(filename):\n raise AssertionError('Referenced file \"{}\" must be in the same '\n 'directory with YAML file.'.format(filename))\n with open(filename) as fp:\n content = (yaml.load(fp) if filename[-5:] == '.yaml'\n else fp.read(-1))\n return content",
"def import_(self, node):\n yamal_name = os.path.join(self._root, self.construct_scalar(node))\n\n with open(yamal_name, 'r') as yamal_file:\n return yaml.load(yamal_file, ImportLoader)",
"def from_yaml(cls, loader, node):\n m = loader.construct_mapping(node)\n ret = cls()\n for k in cls._yaml_keys:\n setattr(ret, k, m[k])\n return ret",
"def __init__(self, ItemComparer):\n self.item_comparer = ItemComparer",
"def _initialize_protocols(self):\n with open(str(pathlib.Path(__file__).parent / 'protocol.yml'), encoding='UTF-8') as handle:\n self._protocols = yaml.safe_load(handle)",
"def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=AttrDict):\n class Ordered_Loader(Loader):\n pass\n\n def construct_mapping(loader, node):\n loader.flatten_mapping(node)\n return object_pairs_hook(loader.construct_pairs(node))\n Ordered_Loader.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n construct_mapping)\n return yaml.load(stream, Ordered_Loader)",
"def test_cli_with_comparator_config_from_config(runner):\n\n @click.command()\n @common_options\n @options_from_config(ComparatorConfig, ComparatorConfigCli)\n def my_cmd_fun(**kwargs):\n config = ComparatorConfig.from_dict(kwargs)\n print(config)\n\n with tempfile.TemporaryDirectory() as temp_dir:\n config_path = Path(temp_dir) / \"config.yaml\"\n with YamlConfigFile(config_path) as config_file:\n config_file.save_config(COMPARATOR_CONFIG1)\n\n expected_config_str = str(COMPARATOR_CONFIG1)\n result = runner.invoke(my_cmd_fun, [\"--config-path\", config_path.resolve().as_posix()])\n assert result.output.splitlines() == [expected_config_str]\n assert not result.exception\n assert result.exit_code == 0",
"def test_load(self):\n with NamedTemporaryFile(suffix=\".yaml\") as config:\n with open(config.name, \"w\") as write_stream:\n write_stream.write(\n \"\"\"\n pipeline:\n - !LinearController\n low_utilisation: 0.9\n high_utilisation: 1.1\n - !MockPool\n \"\"\"\n )\n with load(config.name):\n assert True\n assert True",
"def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"YamlModifier\":\n value = loader.construct_scalar(typing.cast(yaml.ScalarNode, node))\n return cls(value)",
"def register(linter):\n linter.register_checker(McCabe(linter))",
"def test_auto_register(self):\n\n class TestConverter(BaseConverter):\n pass\n\n class TestConverterWithMeta(BaseConverter):\n class Meta:\n name = 'test'\n\n self.assertEquals(TestConverter, ConverterRegistry.get('TestConverter'))\n self.assertEquals(TestConverterWithMeta, ConverterRegistry.get('test'))",
"def comparator(self) -> Operator:\n return self.__comparator",
"def comparer(self, comparer):\n\n self._comparer = comparer",
"def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )",
"def ordered_load(stream, Loader=yaml_Loader, object_pairs_hook=OrderedDict):\n class OrderedLoader(Loader):\n pass\n\n def construct_mapping(loader, node):\n loader.flatten_mapping(node)\n return object_pairs_hook(loader.construct_pairs(node))\n OrderedLoader.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n construct_mapping)\n return yaml.load(stream, OrderedLoader)",
"def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):\n\n class OrderedLoader(Loader):\n pass\n\n def construct_mapping(loader, node):\n loader.flatten_mapping(node)\n return object_pairs_hook(loader.construct_pairs(node))\n\n OrderedLoader.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n construct_mapping)\n return yaml.load(stream, OrderedLoader)",
"def test_loaders():\n\n tempdir = tempfile.mkdtemp()\n\n loader = \"\"\"\nfrom mindbender import api\n\nclass DemoLoader(api.Loader):\n def process(self, asset, subset, version, representation):\n pass\n\n\"\"\"\n\n with open(os.path.join(tempdir, \"my_loader.py\"), \"w\") as f:\n f.write(loader)\n\n try:\n pipeline.register_loaders_path(tempdir)\n loaders = pipeline.discover_loaders()\n\n assert \"DemoLoader\" in list(\n L.__name__ for L in loaders\n ), \"Loader not found in %s\" % \", \".join(\n l.__name__ for l in loaders)\n\n finally:\n shutil.rmtree(tempdir)"
] | [
"0.58455926",
"0.5207403",
"0.51714724",
"0.5046912",
"0.49322683",
"0.48709297",
"0.47904545",
"0.4760413",
"0.46908015",
"0.46855125",
"0.4660275",
"0.4592575",
"0.45680568",
"0.44943762",
"0.44933996",
"0.44847608",
"0.44837308",
"0.4481726",
"0.4451131",
"0.44410124",
"0.44403684",
"0.4439997",
"0.44317654",
"0.44092354",
"0.44045362",
"0.438236",
"0.43820322",
"0.43733403",
"0.43654606",
"0.43555"
] | 0.5855883 | 0 |
Returns the closed form of a '_{side}_inline.nii.gz' mask in numpy array and also the clipped array | def close_mask_in(im_slice_2d, side):
new_slice = im_slice_2d.copy()
x_no_0, y_no_0 = np.nonzero(im_slice_2d)
if len(x_no_0) == 0: return new_slice, new_slice
#breakpoint()
x1 = x_no_0.min()
x2 = x_no_0.max()
if side == "l":
x_mid = x2; x_aux1 = x_mid - 9 + 1; x_aux2 = x2 + 1
elif side == "r":
x_mid = x1; x_aux2 = x_mid + 9; x_aux1 = x1
y_mid = y_no_0[np.where(x_no_0==x_mid)[0]].min()
y_min = y_no_0.min()
# inferior line
new_slice[x1:x2+1, y_min] = 1
# medial line
new_slice[x_mid, y_min:y_mid+1] = 1
new_slice = binary_fill_holes(new_slice)
# in_short array:
other_slice = new_slice.copy()
other_slice[x_aux1:x_aux2, :] = 0
return new_slice, other_slice | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _source_mask(self, ilens):\n x_masks = make_non_pad_mask(ilens)\n return x_masks.unsqueeze(-2)",
"def crop_to_nonzero(arrayin, mask=None):\r\n\r\n if type(arrayin) == np.ndarray :\r\n array = arrayin\r\n elif type(arrayin) == list :\r\n array = arrayin[0]\r\n\r\n if mask==None :\r\n mask = array\r\n #most left point \r\n for i in range(mask.shape[1]):\r\n tot = np.sum(np.abs(mask[:,i]))\r\n if tot > 0.0 :\r\n break\r\n left = i\r\n #most right point \r\n for i in range(mask.shape[1]-1,-1,-1):\r\n tot = np.sum(np.abs(mask[:,i]))\r\n if tot > 0.0 :\r\n break\r\n right = i\r\n #most up point \r\n for i in range(mask.shape[0]):\r\n tot = np.sum(np.abs(mask[i,:]))\r\n if tot > 0.0 :\r\n break\r\n top = i\r\n #most down point\r\n for i in range(mask.shape[0]-1,-1,-1):\r\n tot = np.sum(np.abs(mask[i,:]))\r\n if tot > 0.0 :\r\n break\r\n bottom = i\r\n if type(arrayin) == np.ndarray :\r\n arrayout = array[top:bottom+1,left:right+1]\r\n elif type(arrayin) == list :\r\n arrayout = []\r\n for i in arrayin :\r\n arrayout.append(i[top:bottom+1,left:right+1])\r\n return arrayout",
"def embed(im, mask, clipfloor=0., randomfloor=False):\n j=0\n out=np.zeros(len(mask))\n for i in range(len(mask)):\n if mask[i]:\n out[i] = im[j]\n j += 1\n else:\n # prevent total variation gradient singularities\n if randomfloor: out[i] = clipfloor * np.random.normal()\n else: out[i] = clipfloor\n\n return out",
"def _maskedCollapse(array_in, method): \n import numpy.ma as ma\n \n # Perform an numpy.ma array collapse along the z-axis\n if method == 'sum':\n print('(3d_collapse): Masked sum collapse of extracted slices ...')\n collapsed_array = ma.sum(array_in, axis=0)\n \n elif method == 'mean':\n print('(3d_collapse): Masked mean of extracted slices:')\n collapsed_array = ma.mean(array_in, axis=0)\n \n elif method == 'median':\n print('(3d_collapse): Masked median of extracted slices:')\n collapsed_array = ma.extras.median(array_in, axis=0)\n \n # Returns an array of type numpy.array \n return collapsed_array.data",
"def ice_unmasked(res='4x5', debug=False):\n # Create a np.ma mask\n m = np.logical_not((land_unmasked(res)*ocean_unmasked(res)))\n if debug:\n print((mask, mask.shape))\n return m",
"def _get_mask(self, anno, idx):\n coco = self.coco\n img_info = coco.loadImgs(self.img_ids[idx])[0]\n\n m = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)\n\n for obj in anno:\n if 'segmentation' in obj:\n if obj['iscrowd']:\n rle = pycocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n m += pycocotools.mask.decode(rle)\n elif obj['num_keypoints'] == 0:\n rles = pycocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n for rle in rles:\n m += pycocotools.mask.decode(rle)\n\n return m < 0.5",
"def mask(self):\n mask = np.zeros((self.height, self.width))\n pts = [\n np.array(anno).reshape(-1, 2).round().astype(int)\n for anno in self.segmentation\n ]\n mask = cv2.fillPoly(mask, pts, 1)\n return mask",
"def get_mask(self, anno, img_info) -> np.ndarray:\n m = np.zeros((img_info[\"height\"], img_info[\"width\"]), dtype=np.float32)\n\n for obj in anno:\n if obj[\"iscrowd\"]:\n rle = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n m += mask\n elif obj[\"num_keypoints\"] == 0:\n rles = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n for rle in rles:\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n\n m += mask\n\n return (m < 0.5).astype(np.float32)",
"def __generate_mask(self):\n mask = np.concatenate([np.ones(len(self.fixed[0])),\n np.zeros(self.num_points),\n np.ones(len(self.fixed[1]))])\n return mask",
"def filter_isolated_pixels(array):\n filtered_array = np.copy(array)\n id_regions, num_ids = ndimage.label(filtered_array,\n structure=np.ones((3, 3)))\n id_sizes = np.array(ndimage.sum(array, id_regions, range(num_ids+1)))\n area_mask = (id_sizes == 1)\n filtered_array[area_mask[id_regions]] = 0\n return filtered_array",
"def cube_fix_badpix_isolated(array, bpm_mask=None, sigma_clip=3, num_neig=5, \n size=5, frame_by_frame=False, protect_mask=0, \n cxy=None, mad=False, ignore_nan=True, verbose=True, \n full_output=False):\n if array.ndim != 3:\n raise TypeError('Array is not a 3d array or cube')\n if size % 2 == 0:\n raise TypeError('Size of the median blur kernel must be an odd integer')\n \n if bpm_mask is not None:\n bpm_mask = bpm_mask.astype('bool')\n \n if verbose: start = time_ini()\n \n if num_neig > 0:\n neigh = True\n else:\n neigh = False\n \n nz = array.shape[0]\n \n if cxy is None:\n cy, cx = frame_center(array[0])\n elif isinstance(cxy, tuple):\n cx, cy = cxy\n elif isinstance(cxy, np.ndarray):\n if cxy.shape[0] != nz or cxy.shape[1] != 2 or cxy.ndim != 2:\n raise ValueError(\"cxy does not have right shape\")\n elif not frame_by_frame:\n msg = \"cxy must be a tuple or None if not in frame_by_frame mode\"\n raise ValueError(msg)\n else:\n cx = cxy[:,0]\n cy = cxy[:,1]\n\n \n array_out = array.copy()\n final_bpm = np.zeros_like(array_out, dtype=bool)\n n_frames = array.shape[0]\n count_bp = 0\n if frame_by_frame:\n if np.isscalar(cx):\n cx = [cx]*nz\n cy = [cy]*nz\n if bpm_mask is not None:\n if bpm_mask.ndim == 2:\n bpm_mask = [bpm_mask]*n_frames\n bpm_mask = np.array(bpm_mask)\n for i in Progressbar(range(n_frames), desc=\"processing frames\"):\n if bpm_mask is not None:\n bpm_mask_tmp = bpm_mask[i]\n else:\n bpm_mask_tmp = None\n res = frame_fix_badpix_isolated(array[i], bpm_mask=bpm_mask_tmp, \n sigma_clip=sigma_clip,\n num_neig=num_neig, size=size, \n protect_mask=protect_mask, \n verbose=False, cxy=(cx[i],cy[i]), \n ignore_nan=ignore_nan,\n full_output=True)\n array_out[i] = res[0]\n final_bpm[i] = res[1] \n count_bp = np.sum(final_bpm) \n else: \n if bpm_mask is None:\n ori_nan_mask = np.where(np.isnan(np.nanmean(array, axis=0)))\n ind = clip_array(np.nanmean(array, axis=0), sigma_clip, sigma_clip,\n neighbor=neigh, num_neighbor=num_neig, mad=mad)\n final_bpm = np.zeros_like(array[0], dtype=bool)\n final_bpm[ind] = 1\n if ignore_nan:\n final_bpm[ori_nan_mask] = 0\n if protect_mask:\n cir = disk((cy, cx), protect_mask, shape=final_bpm.shape)\n final_bpm[cir] = 0\n final_bpm = final_bpm.astype('bool')\n else:\n if bpm_mask.ndim == 3:\n final_bpm = np.median(bpm_mask, axis=0)\n else:\n final_bpm = bpm_mask.copy()\n \n for i in Progressbar(range(n_frames), desc=\"processing frames\"):\n frame = array_out[i]\n smoothed = median_filter(frame, size, mode='mirror')\n frame[np.where(final_bpm)] = smoothed[np.where(final_bpm)]\n if verbose: \n count_bp += np.sum(final_bpm)\n \n if verbose: \n msg = \"Done replacing {:.0f} bad pixels using the median of neighbors\"\n print(msg.format(count_bp))\n if not frame_by_frame:\n msg = \"(i.e. {:.0f} static bad pixels per channel))\"\n print(msg.format(count_bp/n_frames)) \n timing(start)\n \n if full_output:\n return array_out, final_bpm\n else:\n return array_out",
"def get_sample_mask(self):",
"def get_regions_mask(self, input):",
"def mask(self):",
"def get_mask ( self, iloc ):\n mask = self._mask[iloc]\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n sat = g.ReadAsArray()\n m3 = sat == 0\n\n the_mask = mask.replace(\"SAT\", \"DIV\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n div = g.ReadAsArray()\n m1 = div == 0\n\n the_mask = mask.replace(\"SAT\", \"NUA\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n nua = g.ReadAsArray()\n m2 = np.logical_not ( np.bitwise_and ( nua, 1 ).astype ( np.bool ) )\n return m1 * m2 * m3",
"def get_mask ( self, iloc ):\n mask = self._mask[iloc]\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n sat = g.ReadAsArray()\n m3 = sat == 0\n\n the_mask = mask.replace(\"SAT\", \"DIV\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n div = g.ReadAsArray()\n m1 = div == 0\n\n the_mask = mask.replace(\"SAT\", \"NUA\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n nua = g.ReadAsArray()\n m2 = np.logical_not ( np.bitwise_and ( nua, 1 ).astype ( np.bool ) )\n return m1 * m2 * m3",
"def get_contest_mask():\n return createmaskdf(\"data/fcstrodeo_nctemplates/fcstrodeo_mask.nc\")",
"def get_sub_image(data, mask, mask_false_value=False):\n # TODO: ask about error handling!!! Also this method might be useless!\n assert data.shape == mask.shape\n return np.where(mask, mask_false_value, data)",
"def frame_fix_badpix_isolated(array, bpm_mask=None, sigma_clip=3, num_neig=5,\n size=5, protect_mask=0, cxy=None, mad=False, \n ignore_nan=True, verbose=True, full_output=False):\n if array.ndim != 2:\n raise TypeError('Array is not a 2d array or single frame')\n if size % 2 == 0:\n raise TypeError('Size of the median blur kernel must be an odd integer')\n\n if bpm_mask is not None:\n bpm_mask = bpm_mask.astype('bool')\n\n if verbose: start = time_ini()\n\n if num_neig > 0:\n neigh = True\n else:\n neigh = False\n\n frame = array.copy()\n if cxy is None:\n cy, cx = frame_center(frame)\n else:\n cx, cy = cxy\n \n if bpm_mask is None:\n ori_nan_mask = np.where(np.isnan(frame))\n ind = clip_array(frame, sigma_clip, sigma_clip, neighbor=neigh,\n num_neighbor=num_neig, mad=mad)\n bpm_mask = np.zeros_like(frame)\n bpm_mask[ind] = 1\n if ignore_nan:\n bpm_mask[ori_nan_mask] = 0\n if protect_mask:\n cir = disk((cy, cx), protect_mask, shape=bpm_mask.shape)\n bpm_mask[cir] = 0\n bpm_mask = bpm_mask.astype('bool')\n\n smoothed = median_filter(frame, size, mode='mirror')\n frame[np.where(bpm_mask)] = smoothed[np.where(bpm_mask)]\n array_out = frame\n count_bp = np.sum(bpm_mask)\n \n if verbose:\n msg = \"/nDone replacing {} bad pixels using the median of neighbors\"\n print(msg.format(count_bp))\n timing(start)\n \n if full_output:\n return array_out, bpm_mask\n else:\n return array_out",
"def clip_extrema(self, nlow=0, nhigh=0):\n\n if nlow is None:\n nlow = 0\n if nhigh is None:\n nhigh = 0\n\n argsorted = np.argsort(self.data_arr.data, axis=0)\n mg = np.mgrid[[slice(ndim)\n for i, ndim in enumerate(self.data_arr.shape) if i > 0]]\n for i in range(-1*nhigh, nlow):\n # create a tuple with the indices\n where = tuple([argsorted[i, :, :].ravel()] +\n [i.ravel() for i in mg])\n self.data_arr.mask[where] = True",
"def _compute_masked_hidden(self, hidden, mask):\r\n mask = mask.unsqueeze(-1).expand_as(hidden)\r\n hidden_masked = hidden[mask].contiguous().view(-1, hidden.size(-1))\r\n return hidden_masked",
"def extratropics_unmasked(res='4x5', mask2D=False):\n\n # Create a mask of 1s for chosen area and or 0s elsewhere\n m = np.zeros(get_dims4res(res))\n lats = np.concatenate((np.arange(-89, -26, 1), np.arange(26, 90, 1)))\n lats = [get_gc_lat(i, res=res) for i in lats]\n for i in lats:\n m[:, i, :] = 1\n # Create a np.ma mask\n m = np.ma.masked_not_equal(m, 1)\n\n # Return 2D or 3D?\n if mask2D:\n return m[..., 0].mask\n else:\n return m.mask",
"def eo_filter(source):\n nodata_bools = source.apply(lambda array: array == array.nodata).to_array(dim='band')\n\n nothingness = nodata_bools.all(dim='band')\n noncontiguous = nodata_bools.any(dim='band')\n\n return np.uint8(NO_DATA) * nothingness | np.uint8(MASKED_NO_CONTIGUITY) * noncontiguous",
"def generate_mask(self):\n\n polymer_length = len(self.sequence)\n protein_length = len(self.particle_order) - polymer_length\n\n if self.filter_specification == 'type':\n mask = np.in1d(self.particle_order, self.monomer_id)\n elif self.filter_specification == 'id':\n if self.molecule == 'polymer':\n offset = protein_length\n else:\n offset = 0\n mask = np.array([False] * (polymer_length + protein_length))\n absolute_id = [x+offset for x in self.monomer_id]\n mask[absolute_id] = True\n else:\n raise NotImplementedError(\"Filter is unknown. Use 'type' or 'id'!\")\n\n # if molecule == 'full', nothing needs to be done\n if self.molecule == 'polymer':\n mask[:protein_length] = [False] * protein_length\n elif self.molecule == 'protein':\n mask[protein_length:] = [False] * polymer_length\n\n return mask",
"def get_mask(self, img):\n raise NotImplementedError()",
"def _prepare_mask_file(mask):\n result = np.ndarray((mask.shape[0], mask.shape[1]), dtype=np.uint8)\n for i in range(mask.shape[0]):\n for j in range(mask.shape[1]):\n\n if mask[i][j] > 0:\n result[i][j] = 1\n else:\n result[i][j] = 0\n \n return result",
"def offset_mask(mask):\n def axis_data(axis):\n \"\"\"Gets the bounds of a masked area along a certain axis\"\"\"\n x = mask.sum(axis)\n trimmed_front = N.trim_zeros(x,\"f\")\n offset = len(x)-len(trimmed_front)\n size = len(N.trim_zeros(trimmed_front,\"b\"))\n return offset,size\n\n xo,xs = axis_data(0)\n yo,ys = axis_data(1)\n\n array = mask[yo:yo+ys,xo:xo+xs]\n offset = (yo,xo)\n return offset, array",
"def load_mask(self, image_id):\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"face\":\n return super(self.__class__, self).load_mask(image_id)\n info = self.image_info[image_id]\n mask = np.zeros([info['height'], info['width'], len(info['boundingbox'])], dtype=np.uint8)\n for i, p in enumerate(info['boundingbox'].values()):\n rr, cc = skimage.draw.polygon(p['y'], p['x'])\n mask[rr, cc, i] = 1\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)",
"def unsharp_mask(img, size=3):\n\n # apply the averaging filter\n avg = average(img, size)\n\n # subtract the average from the image, for a \"diference\" mask\n int_img = np.asarray(img, np.int)\n diff_mask = int_img - avg\n\n # Finally add the mask to the original image\n sharp = int_img + diff_mask\n\n return np.asarray(np.clip(sharp, 0, 255), dtype=np.uint8)",
"def get_mask(self):\n\t\treturn pygame.mask.from_surface(self.img)"
] | [
"0.5911587",
"0.5905847",
"0.5815674",
"0.5776319",
"0.56993324",
"0.56690925",
"0.5630956",
"0.5623893",
"0.56130314",
"0.55582917",
"0.55565006",
"0.5489081",
"0.548841",
"0.5459489",
"0.5441693",
"0.5441693",
"0.54369754",
"0.543186",
"0.541298",
"0.5378559",
"0.5376243",
"0.53635854",
"0.5359357",
"0.5355069",
"0.53324145",
"0.5320719",
"0.5317439",
"0.531533",
"0.5313271",
"0.53107893"
] | 0.64788425 | 0 |
This method is used for both 'xcworkspace' and 'xcodeproj' classes. It returns a list of schemes that are labeled as 'user' or 'shared'. | def schemes(self):
schemes = [];
# shared schemes
if XCSchemeHasSharedSchemes(self.path.obj_path) == True:
shared_path = XCSchemeGetSharedPath(self.path.obj_path);
shared_schemes = XCSchemeParseDirectory(shared_path);
for scheme in shared_schemes:
scheme.shared = True;
scheme.container = self.path;
schemes.append(scheme);
# user schemes
if XCSchemeHasUserSchemes(self.path.obj_path) == True:
user_path = XCSchemeGetUserPath(self.path.obj_path);
user_schemes = XCSchemeParseDirectory(user_path);
for scheme in user_schemes:
scheme.container = self.path;
schemes.append(scheme);
return schemes; | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_known_schemes_for_multi_store():\n return location.SCHEME_TO_CLS_BACKEND_MAP.keys()",
"def getSchemes(clazz):\n return [\"sftp\"]",
"def get_uri_schemes(self):\n return list(sorted(self.backends.with_playlists.keys()))",
"def get_uri_schemes(self) -> list[backend.UriScheme]:\n futures = [b.uri_schemes for b in self.backends]\n results = pykka.get_all(futures)\n uri_schemes = itertools.chain(*results)\n return sorted(uri_schemes)",
"def getSchemes():\n return [\"dav\", \"davs\"]",
"def get_uri_schemes(self) -> list[UriScheme]:\n return sorted(self.backends.with_playlists.keys())",
"def get_palette_names(scheme: ColorScheme | ColorSchemeShort) -> list[str]:\n mod = get_palette_module(scheme)\n names = mod.__all__\n return names.copy()",
"def getScheme(self):\n return self._scheme",
"def scheme(self):\n return self._scheme",
"def list_themes():\n themes = [*os.scandir(os.path.join(CONF_DIR, \"colorschemes\")),\n *os.scandir(os.path.join(MODULE_DIR, \"colorschemes\"))]\n\n return [t for t in themes if os.path.isfile(t.path)]",
"def getScheme(self):\n return _libsbml.SBMLUri_getScheme(self)",
"def detect_identifier_schemes(val):\n schemes = []\n for scheme, test in PID_SCHEMES:\n if test(val):\n schemes.append(scheme)\n\n # GNDs and ISBNs numbers can clash...\n if \"gnd\" in schemes and \"isbn\" in schemes:\n # ...in which case check explicitly if it's clearly a GND\n if val.lower().startswith(\"gnd:\"):\n schemes.remove(\"isbn\")\n\n if \"viaf\" in schemes and \"url\" in schemes:\n # check explicitly if it's a viaf\n for viaf_url in viaf_urls:\n if val.startswith(viaf_url):\n schemes.remove(\"url\")\n if \"viaf\" in schemes and \"handle\" in schemes:\n # check explicitly if it's a viaf\n for viaf_url in viaf_urls:\n if val.startswith(viaf_url):\n schemes.remove(\"handle\")\n\n for first, remove_schemes in SCHEME_FILTER:\n if first in schemes:\n schemes = list(filter(lambda x: x not in remove_schemes, schemes))\n\n if (\n \"handle\" in schemes\n and \"url\" in schemes\n and not val.startswith(\"http://hdl.handle.net/\")\n and not val.startswith(\"https://hdl.handle.net/\")\n ):\n schemes = list(filter(lambda x: x != \"handle\", schemes))\n elif \"handle\" in schemes and (\"ark\" in schemes or \"arxiv\" in schemes):\n schemes = list(filter(lambda x: x != \"handle\", schemes))\n\n return schemes",
"def scheme(self):\n ret = libxml2mod.xmlURIGetScheme(self._o)\n return ret",
"def get_all_target_namespaces():\n setup_roots = get_all_setups_roots()\n techanim_ns = [x.split(\":\")[0] for x in setup_roots]\n namespaces = get_all_namespaces()\n filtered_ns = []\n for ns in namespaces:\n if ns in [\"UI\", \"ui\", \"shared\", \"Shared\"] + techanim_ns:\n continue\n filtered_ns.append(ns)\n return filtered_ns",
"def schemas(self):\n if not self._schemas:\n self._schemas = get_schema(self.attributes.workspace.namespace, self.attributes.workspace.name)\n return self._schemas",
"def available_modules(self, user):\n return [sitecomp for sitecomp in self.enabled_modules() if sitecomp.has_perm(user)]",
"def installed_appnames():\n appnames = set()\n for finder in sys.meta_path:\n if hasattr(finder, 'appname'):\n appnames.add(finder.appname)\n return appnames",
"def get_categories(self, scheme):\n for category in self.category:\n if category.scheme == scheme:\n yield category",
"def scm_types(self):\n return self._scm_types",
"def scheme(self) -> Optional[pulumi.Input[Union[str, 'HTTPSchemeType']]]:\n return pulumi.get(self, \"scheme\")",
"def describe_analysis_schemes(DomainName=None, AnalysisSchemeNames=None, Deployed=None):\n pass",
"def get_registered_themes():\n return get_registered_plugins(theme_registry)",
"def get_schemas(self):\n result = self.sql(\"SHOW DATABASES\").execute()\n return [row[0] for row in result.fetch_all()]",
"def scheme(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"scheme\")",
"def available_protocols(self):\n return [\"ssh://\", \"sftp://\"]",
"def libs(self):\n\n return LibraryList(\"/usr/lib/libSystem.dylib\")",
"def _modes(self):\n answer = []\n for i in dir(self):\n if i.startswith('handle_'):\n answer.append(i.replace('handle_', ''))\n return answer",
"def get_platforms(self):\n if self.platform == 'All':\n return PLATFORMS\n else:\n return self.platform.split(':')",
"def protocol_names(self):\n l = self.protocols()\n retval = [str(k.name) for k in l]\n return retval",
"def _gen_freeze_scheme():\n freeze_scheme = {}\n\n for key in SCHEME_KEYS:\n paths = []\n for scheme_name, install_scheme in INSTALL_SCHEMES.iteritems():\n val = install_scheme[key]\n if scheme_name == 'unix_home':\n val = val.replace('$base', '$home', 1)\n else:\n val = val.replace('$base', '$prefix', 1)\n val = val.replace('$platbase', '$exec_prefix', 1)\n paths.append(val)\n freeze_scheme[key] = paths\n\n return freeze_scheme"
] | [
"0.644051",
"0.6145727",
"0.60370487",
"0.60277617",
"0.59321177",
"0.5927999",
"0.58466095",
"0.5695372",
"0.55060184",
"0.549612",
"0.5412193",
"0.53077865",
"0.52500373",
"0.52307934",
"0.5067795",
"0.50209284",
"0.5003262",
"0.49941736",
"0.49602288",
"0.49374494",
"0.49007604",
"0.48840493",
"0.4854249",
"0.4839906",
"0.48021588",
"0.47990078",
"0.47962034",
"0.4790576",
"0.47819754",
"0.4774791"
] | 0.7739919 | 0 |
returns x and y derivatives of a 2D gauss kernel array for convolutions | def gauss_derivative_kernels(size, size_y=None):
size = int(size)
if not size_y:
size_y = size
else:
size_y = int(size_y)
y, x = mgrid[-size: size + 1, -size_y: size_y + 1]
# x and y derivatives of a 2D gaussian with standard dev half of size
# (ignore scale factor)
gx = - x * exp(-(x ** 2 / float((0.5 * size) ** 2) + y ** 2 / float((0.5 * size_y) ** 2)))
gy = - y * exp(-(x ** 2 / float((0.5 * size) ** 2) + y ** 2 / float((0.5 * size_y) ** 2)))
return gx, gy | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gauss_derivatives(im, n, ny=None):\n\n gx, gy = gauss_derivative_kernels(n, size_y=ny)\n\n imx = signal.convolve(im, gx, mode='same')\n imy = signal.convolve(im, gy, mode='same')\n\n return imx, imy",
"def test_gauss_kernel():\n\n gauss = gauss_kernel(2, 5)\n\n assert gauss.shape == (5, 5)\n assert gauss[2, 2] == 0.039788735772973836",
"def kde_2d_multiple_times(plume_x, plume_y, X, Y):\n Z = []\n positions = np.vstack([X.ravel(), Y.ravel()])\n for i in range(len(plume_x)):\n values = np.vstack([plume_x[i], plume_y[i]])\n kernel = stats.gaussian_kde(values)\n Z.append(np.reshape(kernel(positions).T, X.shape))\n return Z",
"def difference_of_gauss_kernel(radius, scale_step, n_sigmas=8):\n sizex = int(n_sigmas * scale_step * radius)\n sizey = int(n_sigmas * scale_step * radius)\n radius = float(radius)\n xc = 0.5 * sizex\n yc = 0.5 * sizey\n y, x = np.mgrid[0:sizey - 1, 0:sizex - 1]\n x = x - xc\n y = y - yc\n x1 = x / radius\n y1 = y / radius\n g1 = np.exp(-0.5 * (x1 ** 2 + y1 ** 2))\n g1 = g1 / (2 * np.pi * radius ** 2) # g1.sum()\n x1 = x1 / scale_step\n y1 = y1 / scale_step\n g2 = np.exp(-0.5 * (x1 ** 2 + y1 ** 2))\n g2 = g2 / (2 * np.pi * radius ** 2 * scale_step ** 2) # g2.sum()\n return g1 - g2",
"def get_gauss_kernel(sigma, samples):\n p = ny.ceil (2*ny.sqrt(2*ny.log(2))*sigma)\n r = ny.linspace(-p, p, samples)\n x,y = ny.meshgrid(r, r)\n b=bivariate_normal(x,y,sigma,sigma)\n A=(1/ny.sum(b))\n B=A*b\n return x,y,B",
"def gaussian_1yDerivative_kernel(windowX, windowY, sigma):\n # See [http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/MARBLE/low/edges/canny.htm]\n X, Y = createKernalWindowRanges(windowX, windowY, increment)\n \n g_dy_kernel = gaussianFirstDerivative(Y, 0, sigma) * gaussianNormalised(X, 0, sigma)\n gSum = np.sum(np.abs(g_dy_kernel))\n \n if gSum == 0:\n print \"Warning dy_g_kernel:: Not normalising by sum of values, as sum = \" + str(gSum)\n return (g_dy_kernel)\n else:\n return (g_dy_kernel / gSum)",
"def _compute_spatial_gradient_kernel(ndim=1):\n\n # 1D differential element\n dq = np.array([-1., 1.]) / 2.\n\n # replicate dq ndim times\n while len(dq.shape) != ndim:\n dq = np.array([dq, ] * ndim)\n\n # return gradient kernel\n return np.array([dq.swapaxes(ndim - 1 - q, -1)\n for q in xrange(ndim)])",
"def gaussian_1xDerivative_kernel(windowX, windowY, sigma):\n # See [http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/MARBLE/low/edges/canny.htm]\n X, Y = createKernalWindowRanges(windowX, windowY, increment)\n \n g_dx_kernel = gaussianFirstDerivative(X, 0, sigma) * gaussianNormalised(Y, 0, sigma)\n gSum = np.sum(np.abs(g_dx_kernel))\n \n if gSum == 0:\n print \"Warning dx_g_kernel:: Not normalising by sum of values, as sum = \" + str(gSum)\n return (g_dx_kernel)\n else:\n return (g_dx_kernel / gSum)",
"def kde_gauss(events_x, events_y, xout=None, yout=None):\n valid_combi = ((xout is None and yout is None) or\n (xout is not None and yout is not None)\n )\n if not valid_combi:\n raise ValueError(\"Both `xout` and `yout` must be (un)set.\")\n\n if yout is None and yout is None:\n xout = events_x\n yout = events_y\n\n try:\n estimator = gaussian_kde([events_x.flatten(), events_y.flatten()])\n density = estimator.evaluate([xout.flatten(), yout.flatten()])\n except np.linalg.LinAlgError:\n # LinAlgError occurs when matrix to solve is singular (issue #117)\n density = np.zeros(xout.shape)*np.nan\n return density.reshape(xout.shape)",
"def gaussian2d(x, y, A, sigma, x0):\n Z = A * np.exp(-( (x-x0[0])**2/(2*sigma[0]**2) + (y-x0[1])**2/(2*sigma[1]**2)))\n return Z",
"def makeGaussianKernel(sigma: float) -> np.ndarray:\n\n # Your code here.\n kernel_size = 8*sigma+1\n kernel = np.zeros([kernel_size,kernel_size], dtype=float)\n center = kernel_size//2\n \n \n s = 2*(sigma**2)\n sum_val = 0\n for i in range(0,kernel_size):\n for j in range(0,kernel_size):\n x = i-center\n y = j-center\n kernel[i,j] = np.exp(-(x**2+y**2) / s)\n sum_val += kernel[i,j]\n #/(np.pi * s)\n sum_val = 1/sum_val\n print(\"here is the kernel\", kernel*sum_val)\n return kernel*sum_val",
"def gauss2d(param, x, y):\n #2010-01-11 22:46 IJC: Created\n from numpy import array, abs, concatenate, exp\n x = array(x, dtype=float).copy()\n y = array(y, dtype=float).copy()\n p = array(param).copy()\n\n r = abs((x-p[2]) + 1j*(y-p[3]))\n\n if len(p)==4:\n p = concatenate((p, [0]))\n\n z = p[4] + p[0]/(p[1]*4*pi) * exp(-r**2 / (2*p[1]**2))\n \n return z",
"def make_gaussian2D( x, y, weights = None ):\n if weights != None:\n ddof = len(weights)-1\n total_weight = np.sum(weights)\n total_weight_sqr = np.sum(weights**2)\n average = np.average([x,y], weights=weights, axis = 1)\n shifted_xy = [x,y] - average[:,None]\n cov = np.dot( shifted_xy * weights, shifted_xy.T )*total_weight/(total_weight**2 - ddof*total_weight_sqr )\n integral = total_weight\n else:\n average = np.average([x,y], axis = 1)\n cov = np.cov( [x,y], shifted_xy.T )\n integral = len(x)\n \n #average = np.average([x,y], weights=weights, axis = 1)\n #shifted_xy = [x,y] - average[:,None]\n #cov = np.dot( shifted_xy * weights, shifted_xy.T )*total_weight/(total_weight**2 - ddof*total_weight_sqr )\n eigenvalues, eigenvectors = np.linalg.eig( cov )\n #print shifted_xy.shape, eigenvectors.shape\n projected_xy = project( shifted_xy, v = eigenvectors )\n variance = np.average( projected_xy**2, weights=weights, axis=1)\n #print \"projected\", projected_xy[:,0]\n #print \"innerSingle\", np.inner( eigenvectors[:,0], np.array([x[0],y[0]]) - average ), np.inner( eigenvectors[:,1], np.array([x[0],y[0]]) - average )\n #print \"var\", np.sqrt(variance), np.sqrt(np.abs(eigenvalues)), std( projected_xy[0,:], e ), np.std( projected_xy[0,:] )\n return lambda xy: gaussian(xy, average, np.sqrt(variance), eigenvectors, integral = integral ), average, np.sqrt(variance), eigenvectors",
"def guess_2D_gauss(data):\n total = data.sum()\n Y, X = np.indices(data.shape)\n yCenter = (Y*data).sum()/total\n xCenter = (X*data).sum()/total\n col = data[int(yCenter),:]\n xWidth = np.sqrt(((X[0]-xCenter)**2*col).sum()/col.sum())\n row = data[:,int(xCenter)]\n yWidth = np.sqrt(((Y[:,0]-yCenter)**2*row).sum()/row.sum())\n\n offset = np.min(data)\n amplitude = np.max(data)-offset\n\n return [amplitude, xCenter, yCenter, xWidth, yWidth, offset, 0.]",
"def gradX_Y(self, X, Y, dim):\n sigma2 = self.sigma2\n K = self.eval(X, Y)\n Diff = X[:, [dim]] - Y[:, [dim]].T\n #Diff = np.reshape(X[:, dim], (-1, 1)) - np.reshape(Y[:, dim], (1, -1))\n G = -K*Diff/sigma2\n return G",
"def Kernel(x, y):\n\n Result = (np.dot(x_train[x, :], x_train[y, :])+1)**5 # Polynomial\n #Result = (np.dot(x_train[x, :], x_train[y, :])+1) # Linear\n #Gaussian\n \"\"\"\n sigma = 1\n if np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) == 1:\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2)) ** 2 / (2 * sigma ** 2))\n elif (np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) == 1) or (np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) > 1):\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2, axis=1) ** 2) / (2 * sigma ** 2))\n elif np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) > 1:\n Result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=2) ** 2) / (2 * sigma ** 2))\n \"\"\"\n return Result",
"def gaussian2d(p, x, y):\n #2010-06-08 20:00 IJC: Created\n #2013-04-19 23:49 IJMC: Improved documentation, per BACM's request.\n \n x = array(x, dtype=float).copy()\n y = array(y, dtype=float).copy()\n p = array(p).copy()\n\n if len(p)==4:\n p = concatenate((p, [0]))\n\n z = p[4] + p[0]/(2*pi*p[1]**2) * exp(-((x-p[2])**2 + (y-p[3])**2) / (2*p[1]**2))\n \n return z",
"def gkern2d(kernlen=21, nsig=3):\n x = np.linspace(-nsig, nsig, kernlen+1)\n kern1d = np.diff(st.norm.cdf(x))\n kern2d = np.outer(kern1d, kern1d)\n return kern2d/kern2d.max()",
"def gaussian2d(filter_size=5, sig=1.0):\n ax = np.arange(-filter_size // 2 + 1., filter_size // 2 + 1.)\n xx, yy = np.meshgrid(ax, ax)\n kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))\n return kernel / np.sum(kernel)",
"def convDerivative(inImage: np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):\r\n kernel_x = np.array([[0, 0, 0], [1, 0, -1], [0, 0, 0]])\r\n kernel_y = np.array([[0, 1, 0], [0, 0, 0], [0, -1, 0]])\r\n\r\n # derivative by rows:\r\n Ix = cv2.filter2D(inImage, -1, kernel_x) # , borderType=cv2.BORDER_REPLICATE\r\n\r\n # derivative by columns:\r\n Iy = cv2.filter2D(inImage, -1, kernel_y)\r\n\r\n eps = 0.0000000001\r\n magnitude = pow(Ix ** 2 + Iy ** 2, 0.5)\r\n direction = np.arctan(Iy / (Ix + eps))\r\n\r\n return direction, magnitude, Ix, Iy",
"def Gauss2D(self, x, center_x, width_x, y, center_y, width_y, height=1.0):\n g = math.exp(-0.5*((center_x-x)/width_x)**2)/(width_x*(2.0*math.pi)**0.5)\n g *= math.exp(-0.5*((center_y-y)/width_y)**2)/(width_y*(2.0*math.pi)**0.5)\n g *= height\n return g",
"def g(x):\n return 5. - x[:, 1] - .5 * x[:, 0] ** 2.",
"def test_uv_degrid_gaussian_kernel():\n\n layout = read_layout(layout_path=f\"{test_data}/test_mwa.txt\")\n xyz = enh_xyz(layout=layout, latitude=mwa_geo.latitude.radians)\n uvw = xyz_uvw(xyz=xyz, freq=freq, dec0=mwa_geo.latitude.radians, ha0=0)\n uv = uv_degrid(\n max_lambda=1400, nside=20, uvw=uvw, sigma=3, kersize=21, kernel=\"gaussian\"\n )\n\n assert uv.shape == (20, 20)\n assert uv[0, 0] == 1.295932713086053e-05",
"def kernel_rbf(x, y,gamma):\r\n return np.exp(- gamma * np.linalg.norm(x- y)**2)",
"def gauss_kernel(radius, n_sigmas=8):\n sizex = int(n_sigmas * radius)\n sizey = int(n_sigmas * radius)\n radius = float(radius)\n xc = 0.5 * sizex\n yc = 0.5 * sizey\n y, x = np.mgrid[0:sizey - 1, 0:sizex - 1]\n x = x - xc\n y = y - yc\n x = x / radius\n y = y / radius\n g = np.exp(-0.5 * (x ** 2 + y ** 2))\n return g / (2 * np.pi * radius ** 2) # g.sum()",
"def get_grads(img):\n dx = ndimage.sobel(img, 0) # horizontal derivative\n dy = ndimage.sobel(img, 1) # vertical derivative\n return dx, dy",
"def kl_gauss(x, y, sig2=1.):\n return (x - y) ** 2 / (2 * sig2)",
"def gradient(self, x, Y):\n if self.is_sparse:\n x = x.todense()\n Y = Y.todense()\n assert(len(shape(x))==1)\n assert(len(shape(Y))==2)\n assert(len(x)==shape(Y)[1])\n \n x_2d=reshape(x, (1, len(x)))\n k = self.kernel(x_2d, Y)\n differences = Y - x\n G = (1.0 / self.width ** 2) * (k.T * differences)\n return G",
"def compute_derivatives(im1, im2):\n assert im1.shape == im2.shape\n \n Ix = np.empty_like(im1)\n Iy = np.empty_like(im1)\n It = np.empty_like(im1)\n\n #\n # Your code here\n #\n \n # Taken from: Lecture 3 (filtering continued) - Slide 39\n # print(\"Calculating convolutions for derivatives. This might take a while.\")\n # D_x = 1/6 * np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])\n # D_y = 1/6 * np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])\n\n # Vereinfachte Kernel. Haben kein smoothing, nur die Ableitung\n D_x = 1/2 * np.array([1, 0, -1]).reshape((1,3))\n D_y = 1/2 * np.array([1, 0, -1]).reshape((3,1))\n\n \n Ix = convolve2d(im1, D_x, mode=\"same\", boundary=\"symm\")\n Iy = convolve2d(im1, D_y, mode=\"same\", boundary=\"symm\")\n It = im2 - im1\n\n # Debugging\n ## print(\"Following prints should all have the same shape: \")\n ## print(\"shape Im: \", im1.shape)\n ## print(\"shape Ix: \", Ix.shape)\n ## print(\"shape Iy: \", Iy.shape)\n ## print(\"shape It: \", It.shape)\n ## print(\"\\n\")\n\n assert Ix.shape == im1.shape and \\\n Iy.shape == im1.shape and \\\n It.shape == im1.shape\n\n return Ix, Iy, It",
"def _convolve_2d(kernel, image):\n\n nx = image.shape[0]\n ny = image.shape[1]\n nkx = kernel.shape[0]\n nky = kernel.shape[1]\n wkx = nkx // 2\n wky = nky // 2\n\n result = np.zeros(image.shape, dtype=float32)\n\n for i in prange(0, nx, 1):\n iimin = max(i - wkx, 0)\n iimax = min(i + wkx + 1, nx)\n for j in prange(0, ny, 1):\n jjmin = max(j - wky, 0)\n jjmax = min(j + wky + 1, ny)\n num = 0.0\n for ii in range(iimin, iimax, 1):\n iii = wkx + ii - i\n for jj in range(jjmin, jjmax, 1):\n jjj = wky + jj - j\n num += kernel[iii, jjj] * image[ii, jj]\n result[i, j] = num\n\n return result"
] | [
"0.6464445",
"0.6400031",
"0.6292199",
"0.62704843",
"0.622417",
"0.6203519",
"0.6186446",
"0.6149553",
"0.6084668",
"0.59506327",
"0.5876299",
"0.586569",
"0.58355033",
"0.58145",
"0.58079106",
"0.5799519",
"0.57833916",
"0.5765829",
"0.57556885",
"0.57517755",
"0.5751735",
"0.5738141",
"0.56953245",
"0.5683558",
"0.5671559",
"0.5648753",
"0.5648751",
"0.5623392",
"0.5616549",
"0.557949"
] | 0.6867467 | 0 |
returns x and y derivatives of an image using gaussian derivative filters of size n. The optional argument ny allows for a different size in the y direction. | def gauss_derivatives(im, n, ny=None):
gx, gy = gauss_derivative_kernels(n, size_y=ny)
imx = signal.convolve(im, gx, mode='same')
imy = signal.convolve(im, gy, mode='same')
return imx, imy | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gauss_derivative_kernels(size, size_y=None):\n size = int(size)\n if not size_y:\n size_y = size\n else:\n size_y = int(size_y)\n y, x = mgrid[-size: size + 1, -size_y: size_y + 1]\n\n # x and y derivatives of a 2D gaussian with standard dev half of size\n # (ignore scale factor)\n gx = - x * exp(-(x ** 2 / float((0.5 * size) ** 2) + y ** 2 / float((0.5 * size_y) ** 2)))\n gy = - y * exp(-(x ** 2 / float((0.5 * size) ** 2) + y ** 2 / float((0.5 * size_y) ** 2)))\n\n return gx, gy",
"def grad_n(f: FlowFieldVal, dim: int, h: float) -> FlowFieldVal:\n if dim == 0:\n df = kernel_op.apply_kernel_op_x(f, 'kDx')\n elif dim == 1:\n df = kernel_op.apply_kernel_op_y(f, 'kDy')\n elif dim == 2:\n df = kernel_op.apply_kernel_op_z(f, 'kDz', 'kDzsh')\n else:\n raise ValueError('Unsupport dimension: {}'.format(dim))\n\n return [df_i / (2.0 * h) for df_i in df]",
"def DDG(self, n, e, r, f):\n pre = (-e[:, None] + np.divide.outer((n - 1), r))**2\n pre -= np.divide.outer((n - 1), r**2)\n return pre*f",
"def ddx(n, dx, f):\n fx = np.zeros(n)\n for j in range(n):\n fx[j] = (f[get_index(j+1, n)]-f[get_index(j-1, n)])/(2*dx)\n return fx",
"def nth_derivative(f, x, n):\n h = 10e-2\n out_h = 1/(h**n)\n out = 0\n for k in range(0, n+1):\n out += (-1)**(k+n)*choose(n,k)*f(x +k*h)\n return out_h*out",
"def DG(self, n, e, r, f):\n\n pre = -e[:, None] + np.divide.outer((n - 1), r)\n return pre*f",
"def grad(f,dx,dy,dz,x=[],y=[],z=[],param=[],dim=[]):\n if (f.ndim != 3):\n print(\"grad: must have scalar 3-D array f[mz,my,mx] for gradient\")\n raise ValueError\n\n if not param:\n param=read_param(quiet=True)\n if not dim:\n dim=read_dim()\n if len(x) < 1:\n gd = read_grid(quiet=True)\n x = gd.x\n y = gd.y\n z = gd.z\n\n grad = N.empty((3,)+f.shape)\n grad[0,...] = xder(f,dx,x=x,y=y,z=z,param=param,dim=dim)\n grad[1,...] = yder(f,dy,x=x,y=y,z=z,param=param,dim=dim)\n grad[2,...] = zder(f,dz,x=x,y=y,z=z,param=param,dim=dim)\n\n return grad",
"def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None):\n input = numpy.asarray(input)\n output = _get_output_fourier(output, input)\n axis = normalize_axis_index(axis, input.ndim)\n sigmas = _ni_support._normalize_sequence(sigma, input.ndim)\n sigmas = numpy.asarray(sigmas, dtype=numpy.float64)\n if not sigmas.flags.contiguous:\n sigmas = sigmas.copy()\n\n _nd_image.fourier_filter(input, sigmas, n, axis, output, 0)\n return output",
"def _compute_derivatives(image, mode=\"constant\", cval=0):\n\n derivatives = [\n ndi.sobel(image, axis=i, mode=mode, cval=cval)\n for i in range(image.ndim)\n ]\n\n return derivatives",
"def ndgrad(f, delta=DELTA):\n def grad_f(*args, **kwargs):\n x = args[0]\n grad_val = numpy.zeros(x.shape)\n it = numpy.nditer(x, op_flags=['readwrite'], flags=['multi_index'])\n for xi in it:\n i = it.multi_index\n xi += delta/2\n fp = f(*args, **kwargs)\n xi -= delta\n fm = f(*args, **kwargs)\n xi += delta/2\n grad_val[i] = (fp - fm)/delta\n return grad_val\n return grad_f",
"def create_derivative_graph(f, xrange, n):\n plot_points = []\n for x in xrange:\n plot_points.append(nth_derivative(f, x, n))\n return plot_points",
"def fmgf(array, sigma):\n x, y = np.arange(len(array)), array.copy()\n yg = ndimage.filters.gaussian_filter(y, sigma)\n y -= yg\n\n # digitizing\n m = 101\n dy = 6.0 * mad(y) / m\n ybin = np.arange(np.min(y) - 5 * dy, np.max(y) + 5 * dy + dy, dy)\n z = np.zeros([len(ybin), len(x)])\n z[np.digitize(y, ybin), x] = 1.0\n\n # filtering\n g = partial(ndimage.filters.gaussian_filter, sigma=(0, sigma))\n c = partial(ndimage.filters.convolve1d, weights=np.ones(m), axis=0)\n zf = c(c(c(g(z))))\n\n # estimates\n ym1, y0, yp1 = [ybin[np.argmax(zf, 0) + i] for i in (-1, 0, 1)]\n zm1, z0, zp1 = [zf[np.argmax(zf, 0) + i, x] for i in (-1, 0, 1)]\n t = (zm1 - z0) / (zm1 - 2 * z0 + zp1)\n\n filtered = yg + ((1 - t) ** 2) * ym1 + (2 * t * (1 - t)) * y0 + (t**2) * yp1\n return filtered",
"def blur_image(im, n, ny=None) :\n g = gauss_kern(n, sizey=ny)\n improc = signal.convolve(im,g, mode='same')\n return(improc)",
"def compute_gradient_kernel_respect_to_noise(n):\n\n return np.identity(n)",
"def lie_derivative(h, f, x, n):\n if n == 0:\n return h\n elif n == 1:\n return h.jacobian(x) * f\n else:\n return lie_derivative(lie_derivative(h, f, x, 1), f, x, n - 1)",
"def calc_gamma_nd(x, fx, dfx, y, fpy, dfpy):\n n, m, d = x.shape[0], y.shape[0], x.shape[1]\n # calculate derivative from fx\n fpx, dfpx = zeros(n), zeros(n)\n for i in range(n):\n others = range(i) + range(i+1,n)\n d = sqrt((x - x[i,:])**2)\n fpxi = abs(fx[others,:] - fx[i,:]) / d[others]\n dfpxi = sqrt(dfx[others,:]**2 + dfx[i,:]**2) / d[others]\n imax = (fpxi / dfpxi).argmax()\n fpx[i], dfpx[i] = fpxi[imax], dfpxi[imax]\n fpy = sqrt((fpy**2).sum(1))\n dfpy = sqrt((dfpy**2).sum(1))\n # estimate beta*gamma\n ratio = ((dfpx**2).sum() + (dfpy**2).sum()) / \\\n ((fpx**2).sum() + (fpy**2).sum())\n betagamma = (((fpx**2).sum() + (fpy**2).sum()) / (n+m) * exp(-ratio)) ** 0.5\n # detect discontinuity, raise gamma if needed\n if m > 0:\n dfmax = max(abs(fpx).max(), abs(fpy).max())\n else:\n dfmax = abs(fpx).max()\n betagamma = max(betagamma, 0.16 * dfmax)\n gamma = betagamma / calc_beta(fx, dfx) * 4\n print ' using gamma = ', gamma\n return gamma",
"def get_derivative(self,var,g=None):\n if (g==None):g=self.g\n A=np.zeros([self.n+1,self.n])\n B=np.zeros([self.n+1])\n for i in range(self.n):\n B[i]=self.gamma*2.*g*self.N*(self.n-self.N)+np.sum([self.XXZ.Z(k,i)*(var[k]-var[i]) for k in range(self.n) if k!=i])\n A[self.n][i]=1\n for j in range(self.n):\n if(i==j): A[i][j]=2.*var[i]+2.+g*np.sum([self.XXZ.Z(k,i) for k in range(self.n) if k!=i])\n else: A[i][j]=-g*self.XXZ.Z(j,i)\n Ainv=np.linalg.pinv(A)\n der=np.dot(Ainv,B)\n return der",
"def gather_derivatives(self):\n self.xdot[0,0:self.n] = self.mdot[0:self.n] \n self.xdot[1,0:self.n] = self.rdot[0:self.n,0]\n self.xdot[2,0:self.n] = self.rdot[0:self.n,1]\n self.xdot[3,0:self.n] = self.rdot[0:self.n,2]\n self.xdot[4,0:self.n] = self.vdot[0:self.n,0]\n self.xdot[5,0:self.n] = self.vdot[0:self.n,1]\n self.xdot[6,0:self.n] = self.vdot[0:self.n,2]\n return self.xdot",
"def gauss_2d(N, sigma = 0.25):\r\n x, y = make_xy(N)\r\n sigma_pixel = sigma * np.float(N)\r\n arrayout = np.exp(-(x**2 + y**2) / sigma_pixel**2) / (np.pi * sigma_pixel**2)\r\n return arrayout",
"def get_grads(img):\n dx = ndimage.sobel(img, 0) # horizontal derivative\n dy = ndimage.sobel(img, 1) # vertical derivative\n return dx, dy",
"def model_gauss_noise(sigma, nx, ny=1, nz=1):\n\te = EMData()\n\te.set_size(nx, ny, nz)\n\te.process_inplace(\"testimage.noise.gauss\", {\"sigma\":sigma})\n\treturn e",
"def fit_ndgaussian(xdata, fdata):\n m, n = xdata.shape\n n2 = 2 * n\n fsuminv = 1 / numpy.sum(fdata)\n\n # Estimate initial parameters\n mean = fsuminv * numpy.sum(fdata * xdata.transpose(), axis=1)\n dx = (xdata - mean).transpose()\n cov = fsuminv * (fdata * dx).dot(dx.transpose())\n\n evals, evecs = linalg.eigh(cov)\n covdet = numpy.prod(evals)\n\n scale = fdata.max() * numpy.sqrt(covdet * (2 * numpy.pi) ** n)\n\n # Make sure the matrix of eigenvectors is orthogonal and proper (det +1)\n if linalg.det(evecs) < 0:\n evecs[:, 0] = -evecs[:, 0]\n\n ## Use the Cayley transform to extract n(n - 1) / 2 independent parameters\n ## from the orthogonal eigenvector matrix\n #eye = numpy.eye(n)\n #evecs_c = (eye - evecs).dot(linalg.inv(eye + evecs))\n #upper = numpy.triu_indices(n, k=1)\n\n # Use the parametrization in orthogonal_matrix()\n angles = angles_from_orthogonal_matrix(evecs)\n\n # Make a list with the minimal number of parameters to specify a Gaussian\n #params = numpy.hstack((scale, mean, numpy.sqrt(evals), evecs_c[upper]))\n params = numpy.hstack((scale, mean, numpy.sqrt(evals), angles))\n #params = numpy.hstack((numpy.sqrt(scale), mean, numpy.sqrt(evals),\n # angles))\n #params = numpy.hstack((scale, mean, evals, angles))\n\n def params_to_scale_mean_cov(params_):\n \"\"\"\n Extract the scale, mean and covariance matrix from the minimal\n parameter array\n\n \"\"\"\n # Extract scale and mean\n #scale_sqrt_ = params_[0]\n #scale_ = scale_sqrt_ * scale_sqrt_\n scale_ = params_[0]\n\n mean_ = params_[1:n + 1]\n\n # Get eigenvalues\n evals_sqrt_ = numpy.array(params_[n + 1:n2 + 1])\n evals_ = evals_sqrt_ * evals_sqrt_\n #evals_ = numpy.array(params_[n + 1:n2 + 1])\n\n ## Reconstruct the transformed eigenvector matrix\n #cov_c_ = numpy.zeros((n, n))\n #cov_c_[upper] = params_[n2 + 1:]\n #cov_c_.transpose()[upper] = -cov_c_[upper]\n #\n ## Use an inverse Cayley transform to get the true eigenvector matrix\n #evecs_ = (eye - cov_c_).dot(linalg.inv(eye + cov_c_))\n\n # Get eigenvector matrix from orthogonal_matrix()\n evecs_ = orthogonal_matrix_from_angles(n, params_[n2 + 1:])\n\n # Get the covariance matrix from the eigenvectors and eigenvalues\n cov_ = evecs_.dot(numpy.diag(evals_).dot(evecs_.transpose()))\n\n return scale_, mean_, cov_\n\n def param_gauss(xdata_, *params_):\n \"\"\"\n Define a Gaussian function specified by a minimal number of parameters\n\n \"\"\"\n scale_, mean_, cov_ = params_to_scale_mean_cov(params_)\n return scale_ * gaussian(xdata_, mean=mean_, cov=cov_)\n\n def error(params_):\n eps = fdata - param_gauss(xdata, *params_)\n return numpy.sum(eps * eps)\n\n # Find the parameter array that solves the least-squares fitting problem\n #params, __ = optimize.curve_fit(param_gauss, xdata, fdata, p0=params)\n l = n * (n - 1) // 2\n bounds = ([(0.0, None)] + # Scale must be positive\n [(None, None)] * n + # Means for each axis -- any value\n [(None, None)] * n + # Square roots of evals -- any value\n [(0.0, 2 * numpy.pi)] * l) # Angles constrained to one cycle\n params = optimize.minimize(error, params, bounds=bounds).x\n\n scale, mean, cov = params_to_scale_mean_cov(params)\n\n return scale, mean, cov",
"def dE_mdn(self, x, y, t, w1 = None, w2 = None):\n if w2 == None:\n w2 = self.w2\n M = int(self.M)\n # avoid underrun\n \n alpha, sigma, mu = self.getMixtureParams(y.T)\n #import pdb; pdb.set_trace()\n \n #T = t.T[None, None, :] # note: np.tile is slower than this notation\n T = t.T[None, :]\n \n phi = self._phi(T, mu, sigma)\n aphi = alpha*phi\n pi = aphi / np.sum(aphi, 0)\n \n # derivatives of E with respect to the output variables (s. Bishop 1995, chp. 6.4)\n dE_dy_alpha = alpha - pi\n dE_dy_sigma = - 0.5 * pi * ((np.sum((T-mu)**2 , 1) / sigma) - self.c)\n dE_dy_mu = pi[:,np.newaxis,:] * (mu - T) / sigma[:,np.newaxis,:]\n\n dk = np.zeros([self.ny, x.shape[0]])\n dk[0:M,:] = dE_dy_alpha\n dk[M:2*M,:] = dE_dy_sigma\n \n dk[2*M:] = np.reshape(dE_dy_mu, [M*self.c, x.shape[0]])\n \n # back-propagate the dks\n #t0=datetime.now()\n dEnw1, dEnw2 = self.backward(x, dk, None, w2)\n #print 'eval of dE_mdn:' + str((datetime.now()-t0))\n #dj = (1 - self.z[1:]**2) * np.dot(w2[:,1:].T, dk)\n # evaluate derivatives with respect to the weights\n #dEnw1 = (dj[:,:,np.newaxis]*x[np.newaxis,:,:]).transpose(1,0,2)\n #dEnw2 = (dk[:,:,np.newaxis]*self.z.T[np.newaxis,:,:]).transpose(1,0,2)\n return dEnw1, dEnw2",
"def gather_derivatives(self):\n self.xdot[0,0:self.n] = self.mdot[0:self.n] \n self.xdot[1,0:self.n] = self.rdot[0:self.n,0]\n self.xdot[2,0:self.n] = self.rdot[0:self.n,1]\n self.xdot[3,0:self.n] = self.rdot[0:self.n,2]\n self.xdot[4,0:self.n] = self.vdot[0:self.n,0]\n self.xdot[5,0:self.n] = self.vdot[0:self.n,1]\n self.xdot[6,0:self.n] = self.vdot[0:self.n,2]\n self.xdot[7,0:self.n] = self.rhodot[0:self.n] \n self.xdot[8,0:self.n] = 0\n self.xdot[9,0:self.n] = 0\n self.xdot[10,0:self.n] = self.udot[0:self.n]\n return self.xdot",
"def derivative(func, x0, dx=1.0, n=1, args=(), order=3):\n if order < n + 1:\n raise ValueError(\"'order' (the number of points used to compute the derivative), \"\n \"must be at least the derivative order 'n' + 1.\")\n if order % 2 == 0:\n raise ValueError(\"'order' (the number of points used to compute the derivative) \"\n \"must be odd.\")\n # pre-computed for n=1 and 2 and low-order for speed.\n if n == 1:\n if order == 3:\n weights = array([-1,0,1])/2.0\n elif order == 5:\n weights = array([1,-8,0,8,-1])/12.0\n elif order == 7:\n weights = array([-1,9,-45,0,45,-9,1])/60.0\n elif order == 9:\n weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0\n else:\n weights = central_diff_weights(order,1)\n elif n == 2:\n if order == 3:\n weights = array([1,-2.0,1])\n elif order == 5:\n weights = array([-1,16,-30,16,-1])/12.0\n elif order == 7:\n weights = array([2,-27,270,-490,270,-27,2])/180.0\n elif order == 9:\n weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0\n else:\n weights = central_diff_weights(order,2)\n else:\n weights = central_diff_weights(order, n)\n val = 0.0\n ho = order >> 1\n for k in range(order):\n val += weights[k]*func(x0+(k-ho)*dx,*args)\n return val / product((dx,)*n,axis=0)",
"def Derivate2D(xdata, zdata, k=3, sigma=None, s=None, n=1):\r\n der = np.zeros_like(zdata)\r\n for u, i in enumerate(zdata):\r\n der[u] = Derivate(xdata, i, k=k, sigma=sigma, s=s, n=n)\r\n return der",
"def get_densities(\n x: np.ndarray,\n y: np.ndarray,\n nx: int,\n ny: int,\n x_range: Tuple = (0, 100),\n y_range: Tuple = (0, 100),\n n: int = 30,\n) -> np.ndarray:\n\n x_values = np.linspace(x_range[0], x_range[1], nx)\n y_values = np.linspace(y_range[0], y_range[1], ny)\n\n density = np.empty((nx, ny))\n tree = get_kdtree(x, y)\n\n for x in tqdm(range(nx)):\n for y in range(ny):\n density[x, y] = get_density_from_neighbours(\n x_values[x], y_values[y], tree, n\n )\n\n return density, tree",
"def get_derivative(self, model, params, n):\n params1 = np.array(params)\n params2 = np.array(params)\n\n params1[n] += self.eps\n params2[n] -= self.eps\n\n res1 = model.run(params1)\n res2 = model.run(params2)\n\n d = (res1 - res2) / (2 * self.eps)\n\n return d.ravel()",
"def gradient(f, *varargs):\n N = len(f.shape) # number of dimensions\n n = len(varargs)\n if n == 0:\n dx = [1.0]*N\n elif n == 1:\n dx = [varargs[0]]*N\n elif n == N:\n dx = list(varargs)\n else:\n raise SyntaxError, \"invalid number of arguments\"\n\n # use central differences on interior and first differences on endpoints\n\n outvals = []\n\n # create slice objects --- initially all are [:, :, ..., :]\n slice1 = [slice(None)]*N\n slice2 = [slice(None)]*N\n slice3 = [slice(None)]*N\n\n otype = f.dtype.char\n if otype not in ['f', 'd', 'F', 'D']:\n otype = 'd'\n\n for axis in range(N):\n # select out appropriate parts for this dimension\n out = zeros(f.shape, f.dtype.char)\n slice1[axis] = slice(1, -1)\n slice2[axis] = slice(2, None)\n slice3[axis] = slice(None, -2)\n # 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0\n out[slice1] = (f[slice2] - f[slice3])/2.0\n slice1[axis] = 0\n slice2[axis] = 1\n slice3[axis] = 0\n # 1D equivalent -- out[0] = (f[1] - f[0])\n out[slice1] = (f[slice2] - f[slice3])\n slice1[axis] = -1\n slice2[axis] = -1\n slice3[axis] = -2\n # 1D equivalent -- out[-1] = (f[-1] - f[-2])\n out[slice1] = (f[slice2] - f[slice3])\n\n # divide by step size\n outvals.append(out / dx[axis])\n\n # reset the slice object in this dimension to \":\"\n slice1[axis] = slice(None)\n slice2[axis] = slice(None)\n slice3[axis] = slice(None)\n\n if N == 1:\n return outvals[0]\n else:\n return outvals",
"def _get_sum_gaussian_image(s_gen, xs, ys, sdev, n=50):\n m1, m2 = xs.min(), xs.max()\n xx = np.linspace(m1, m2, n)\n XX, YY = np.meshgrid(xx, xx)\n XX, YY = [u.ravel()[np.newaxis, :] for u in [XX, YY]]\n xs, ys, S_gen = [u[:, np.newaxis] for u in [xs, ys, s_gen]]\n res = np.sum(\n S_gen * np.exp(((xs - XX) ** 2 + (ys - YY) ** 2) /\n (-2 * sdev ** 2)), axis=0)\n return res.reshape(n, n)"
] | [
"0.61548144",
"0.5981589",
"0.5959965",
"0.58838403",
"0.58367634",
"0.57715976",
"0.5677056",
"0.5670111",
"0.56048477",
"0.56043017",
"0.55708915",
"0.5549236",
"0.5539912",
"0.5506071",
"0.549919",
"0.54970866",
"0.53928995",
"0.5380421",
"0.53599995",
"0.5349",
"0.5330634",
"0.5312637",
"0.5283824",
"0.52770364",
"0.5267364",
"0.5252262",
"0.5249008",
"0.52425003",
"0.5235052",
"0.52331513"
] | 0.8174752 | 0 |
Decorator that can be used to cache ReusableBytesIO objects intended for reading. The decorator makes sure the objects are immutable and reset to position 0. The decorated function can either return pure ReusableBytesIO objects or dicts. | def buffer_object_cacher(key=None, maxsize=None):
if not config.enable_caching:
return lambda x: x
def decorator(fun):
# Cache the results.
cached_fun = cachetools.cached(cachetools.LRUCache(maxsize=maxsize),
key=lambda *x,**y: cachetools.keys.hashkey(key(*x,**y)))(fun)
# Reset the buffer(s) on every cache-hit so it's readable again.
def rewind_wrapper(*args, **kwargs):
results = cached_fun(*args, **kwargs)
if isinstance(results, dict):
for buffer in results.values():
buffer.seek(0)
else:
results.seek(0)
return results
return rewind_wrapper
return decorator | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_buffer():\n return BytesIO()",
"def cached_load(filepath: str) -> io.BytesIO:\n with open(filepath, 'rb') as f:\n return io.BytesIO(f.read())",
"def disk_memoize(path):\n def decorator(f):\n @functools.wraps(f)\n def g(*a, **kw):\n kwargs = to_kwargs(f, *a, **kw)\n args = to_args(f, *a, **kw)\n filepath = path.format(*args, **kwargs) \n disk = Disk()\n content = disk.read(filepath)\n if content:\n return content\n else:\n content = f(*a, **kw)\n content = disk.write(filepath, content)\n return disk.read(filepath)\n return g\n return decorator",
"def _memorize(func):\n\n def _wrapper(self, *args, **kwargs):\n \"\"\"Wrapper to cache the function's output.\n \"\"\"\n if self.use_cache:\n cache = load_cache(self.cache_filename)\n original_key = generate_hash(\n self.__class__.__name__, func.__name__, args, kwargs)\n cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()\n cached_val = cache.get(cache_key)\n if cached_val:\n return cached_val\n val = func(self, *args, **kwargs)\n if self.use_cache:\n cache.set(cache_key, val)\n return val\n return _wrapper",
"def cache(func):\n storage = {}\n\n def wrapper(*args, **kwargs):\n key = str(*args, **kwargs)\n if storage.get(key):\n return storage[key]\n else:\n result = func(*args, **kwargs)\n storage[key] = result\n return result\n\n return wrapper",
"def cached():\n def decorator(fn): # define a decorator for a function \"fn\"\n cache_name = fn.func_name\n def wrapped(*args, **kwargs): # define a wrapper that will finally call \"fn\" with all arguments\n if os.path.exists(cache_name):\n with gzip.GzipFile(cache_name, 'rb') as cachehandle:\n return pickle.load(cachehandle)\n\n # execute the function with all arguments passed\n res = fn(*args, **kwargs)\n\n # write to cache file\n with gzip.GzipFile(cache_name, 'wb') as cachehandle:\n pickle.dump(res, cachehandle, pickle.HIGHEST_PROTOCOL)\n return res\n return wrapped\n return decorator # return this \"customized\" decorator that uses \"cachefile\"",
"def get_cached(factory, cache_file_name, **kwargs):\n if os.path.exists(cache_file_name):\n _logger.info('Loading {}'.format(cache_file_name))\n cached = deserialize(cache_file_name)\n return cached\n\n _logger.info('Creating {}'.format(cache_file_name))\n data = factory()\n serialize(cache_file_name, data, **kwargs)\n return data",
"def cache(cache_path):\n def cache_decorator(generator):\n def wrapper():\n return cached(cache_path, generator)\n return wrapper\n return cache_decorator",
"def stay(read_func, *args, **kwargs):\n def wrapper(mm, *args, **kwargs):\n idx = mm.tell()\n ret = read_func(mm, *args, **kwargs)\n mm.seek(idx)\n return ret\n return wrapper",
"def pickle_cache(cache_path: Path):\n\n def decorator(func):\n @functools.wraps(func)\n def inner(*args, **kwargs):\n if cache_path.is_file():\n with cache_path.open(\"rb\") as f:\n return pickle.load(f)\n res = func(*args, **kwargs)\n _logger.info(\"Caching object type %s to %s\", type(res), cache_path)\n cache_path.parent.mkdir(parents=True, exist_ok=True)\n with cache_path.open(\"wb\") as f:\n pickle.dump(res, f)\n return res\n\n return inner\n\n return decorator",
"def memoize_rc(func):\n # shelf_file = '../resources/rccache'\n logger.debug(f\"Opening shelf file: {SHELF_FILE}\")\n\n def wrapper(*args, **kwargs):\n\n key = '_'.join([func.__name__, *args[1:], *kwargs.values()])\n today = datetime.date.today()\n\n with shelve.open(str(SHELF_FILE)) as shelf:\n if key in shelf:\n # if key in shelf and it's not too old\n logger.debug(f\"Retrieving <{func.__name__}, {args}, {kwargs}> from Shelf\")\n entry = shelf[key]\n cachedate = entry['cachedate']\n if today - cachedate < expire:\n # cached data has not expired\n data = entry['data']\n return data\n\n else:\n # call the function and cache\n logger.debug(f\"Caching <{func.__name__}, {args}, {kwargs}> to Shelf\")\n data = func(*args, **kwargs)\n entry = {\n 'cachedate': datetime.date.today(),\n 'data': data\n }\n shelf[key] = entry\n return data\n\n return wrapper",
"def unbound_cache(func):\n\n cache = {}\n\n @functools.wraps(func)\n def caching_wrapper(*args):\n try:\n return cache[args]\n except KeyError:\n result = func(*args)\n cache[args] = result\n return result\n\n return caching_wrapper",
"def cached(func):\n return _lru_cache(None)(func)",
"def memorized(f):\n cache = {}\n @wraps(f)\n def wrapped(*args):\n try:\n result = cache[args]\n except KeyError:\n result = cache[args] = f(*args)\n return result\n return wrapped",
"def _wrapper(self, *args, **kwargs):\n if self.use_cache:\n cache = load_cache(self.cache_filename)\n original_key = generate_hash(\n self.__class__.__name__, func.__name__, args, kwargs)\n cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()\n cached_val = cache.get(cache_key)\n if cached_val:\n return cached_val\n val = func(self, *args, **kwargs)\n if self.use_cache:\n cache.set(cache_key, val)\n return val",
"def cached(func):\n cache_dct = {}\n\n @wraps(func)\n def _lru_cache_decorator(*args):\n key = args\n if key in cache_dct:\n return cache_dct[key]\n else:\n cache_dct[key] = func(*args)\n return cache_dct[key]\n return _lru_cache_decorator",
"def instance_cache(func):\n def _wrapper(self, *args, **kwargs):\n key = (func.__name__,) + args\n for pair in sorted(kwargs.items()):\n key += pair\n if key in self._cache:\n return self._cache[key]\n data = func(self, *args, **kwargs)\n self._cache[key] = data\n return data\n return _wrapper",
"def cached(cache_path, generator):\n if path.exists(cache_path):\n with open(cache_path, 'rb') as f:\n return pickle.load(f)\n output = generator()\n with open(cache_path, 'wb+') as f:\n pickle.dump(output, f)\n return output",
"def memoized(func, *, size_limit=10**8, eviction_policy='least-recently-used', cache_dir=CACHE_DIR,\n typed=False, round_digits=15, ignore_args=None):\n func_hash = hashlib.md5(func.__code__.co_code).hexdigest()\n func_id = \"{}.{:0>4s}\".format(func.__qualname__, func_hash[-4:])\n cache_dir = os.path.join(cache_dir, func_id)\n func.cache = diskcache.Cache(cache_dir, size_limit=size_limit, eviction_policy=eviction_policy)\n func.async_results = {}\n\n atexit.register(func.cache.close)\n\n @atexit.register\n def consolidate_async():\n for key, result in func.async_results.items():\n try:\n if result.successful():\n func.cache[dict(sorted(key))] = result.get()\n # Exception class changed in Python 3.7:\n # https://docs.python.org/3/library/multiprocessing.html#multiprocessing.pool.AsyncResult.successful\n except (AssertionError, ValueError):\n pass\n\n arg_names = inspect.getfullargspec(func).args\n if ignore_args is not None:\n ignore_args = frozenset([ignore_args] if isinstance(ignore_args, str) else ignore_args)\n assert all(arg in arg_names for arg in ignore_args), \"Unknown argument name passed to 'ignore_args' option.\"\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n key = kwargs.copy()\n key.update(zip(arg_names, args))\n if ignore_args is not None:\n key = {k: v for k, v in key.items() if k not in ignore_args}\n if not typed:\n key = {k: _normalize_type(v, round_digits) for k, v in key.items()}\n key = dict(sorted(key.items()))\n\n try:\n return func.cache[key]\n except KeyError:\n try:\n return func.async_results[tuple(key.items())]\n except KeyError:\n logging.debug(\"%s: cache miss on key %s\", wrapper.__qualname__, repr(key))\n value = func(*args, **kwargs)\n if isinstance(value, pool.AsyncResult):\n func.async_results[tuple(key.items())] = value\n else:\n func.cache[key] = value\n return value\n\n return wrapper",
"def cache(self, func=None, dependencies=(), ignore=None, save_hashes_only=False):\n if func is None:\n return partial(\n self.cache,\n dependencies=dependencies,\n ignore=ignore,\n save_hashes_only=save_hashes_only,\n )\n\n assert callable(func)\n\n # Update dependencies to enable chaining of dependencies.\n dependencies = (\n *dependencies,\n *reduce(\n add,\n map(\n tuple,\n map(\n default_attrgetter(\"_orig_func._dependencies\", default=()),\n dependencies,\n ),\n ),\n (),\n ),\n )\n\n def _inner_placeholder(hashed, args, kwargs):\n if save_hashes_only:\n return func(*args, **kwargs), HASHES_ONLY\n return func(*args, **kwargs)\n\n _inner_placeholder.__name__ = func.__name__\n\n cached_inner = self.memory.cache(ignore=[\"args\", \"kwargs\"])(_inner_placeholder)\n\n def bound_get_hashed(*orig_args, **orig_kwargs):\n return _get_hashed(\n func,\n *orig_args,\n dependencies=dependencies,\n hash_func=self.get_hash,\n ignore=ignore,\n **orig_kwargs,\n )\n\n @wraps(func)\n def cached_func(*orig_args, **orig_kwargs):\n hashed = bound_get_hashed(*orig_args, **orig_kwargs)\n\n if save_hashes_only:\n if cached_inner.store_backend.contains_item(\n cached_inner._get_output_identifiers(hashed, orig_args, orig_kwargs)\n ):\n # Do not use the original factory functions since these will reference\n # data that has never been saved. Only extract the saved hash values.\n cache_proxies = cached_inner(hashed, orig_args, orig_kwargs)\n if isinstance(cache_proxies, HashProxy):\n cached_hash_values = (cache_proxies.hashed_value,)\n else:\n cached_hash_values = tuple(\n proxy.hashed_value for proxy in cache_proxies\n )\n # Return a lazy proxy that contains the cached hash values along with\n # lazy references to the output of the cached function (the\n # HASHES_ONLY return value is ignored by the backend).\n\n def process_func():\n if hasattr(process_func, \"stored\"):\n logger.debug(\"Returning previously computed data.\")\n return process_func.stored\n\n logger.debug(\"Processing data.\")\n\n # Call the uncached function here since we have already cached the\n # hash values. Ignore the additional HASHES_ONLY return value.\n process_func.stored = _inner_placeholder(\n hashed, orig_args, orig_kwargs\n )[0]\n return process_func.stored\n\n if len(cached_hash_values) == 1:\n return HashProxy(\n Factory(process_func),\n hash_func=self.get_hash,\n hash_value=cached_hash_values[0],\n )\n\n # Otherwise create a lazy proxy for each individual object to associate each\n # stored object with its individual hash value.\n\n def get_factory_func(i):\n def factory_func():\n return process_func()[i]\n\n return factory_func\n\n return tuple(\n HashProxy(\n Factory(get_factory_func(i)),\n hash_func=self.get_hash,\n hash_value=hash_value,\n )\n for i, hash_value in enumerate(cached_hash_values)\n )\n\n # If this is the first time the function is called, call it normally and\n # ignore the additional HASHES_ONLY return value.\n\n return cached_inner(hashed, orig_args, orig_kwargs)[0]\n\n return cached_inner(hashed, orig_args, orig_kwargs)\n\n def check_in_store(*args, **kwargs):\n \"\"\"Check whether a given call to the cached function is already cached.\n\n Args:\n args, kwargs: Arguments to check.\n\n Returns:\n IN_STORE: If the given call was found in the cache.\n\n Raises:\n NotCachedError: If the given call was not found in the cache.\n\n \"\"\"\n output_ids = cached_inner._get_output_identifiers(\n bound_get_hashed(*args, **kwargs), args, kwargs\n )\n if not cached_inner.store_backend.contains_item(output_ids):\n raise NotCachedError(f\"The given call is not cached: {output_ids}\")\n return IN_STORE\n\n cached_func.check_in_store = check_in_store\n cached_func._orig_func = func\n cached_func._orig_func._dependencies = dependencies\n\n return cached_func",
"def cache(func):\n\n def func_wrapper(self, hook=None, result_name=None):\n \"\"\"Wrapper to cache the result of a function.\"\"\"\n if self._cache is not None:\n c = self._cache.copy()\n c['cache'] = True\n return c\n else:\n ret = func(self, hook=hook, result_name=result_name)\n if not isinstance(ret, dict):\n raise TypeError( # pragma: no cover\n \"A dictionary was expected not '{0}'.\\nIssue with class '{1}'\"\n \"\".format(\n type(ret), type(self)))\n self._cache = ret\n ret = ret.copy()\n ret['cache'] = False\n return ret\n return func_wrapper",
"def write_cache(self) -> SmartSsdReadLookahead:\n return self._write_cache",
"def pickle_cache(fname, overwrite=False):\n def decorator(fn):\n def decorated(*args, **kwargs):\n if (not overwrite) and os.path.exists(fname):\n with open(fname, 'rb') as f:\n return pickle.load(f)\n else:\n result = fn(*args, **kwargs)\n with open(fname, 'wb') as f:\n pickle.dump(result, f)\n return result\n return decorated\n\n return decorator",
"def cache(self, name: str = None) -> B[B, E]:",
"def cache(file_name, load_func, *func_args, **func_kwargs):\n if path.exists(file_name):\n with open(file_name, 'rb') as f:\n return pickle.load(f)\n else:\n data = load_func(*func_args, **func_kwargs)\n with open(file_name, 'wb') as f:\n pickle.dump(data, f)\n return data",
"def sized_cache(size=10):\n def out_wrapper(func):\n\n storage = {}\n keys_queue = []\n\n def wrapper(*args, **kwargs):\n key = str(*args, **kwargs)\n if not storage.get(key):\n result = func(*args, **kwargs)\n storage[key] = result\n keys_queue.append(key)\n if len(keys_queue) > size:\n del(storage[keys_queue.pop(0)])\n return storage[key]\n\n return wrapper\n\n return out_wrapper",
"def lru_cache(maxsize=128, typed=False):\n\n cache = OrderedDict()\n\n def decorate(func):\n\n def cached_func(*args, **kwargs):\n\n signature = tuple(args), tuple(kwargs)\n rslt = cache.pop(signature, UNKNOWN)\n if rslt is UNKNOWN:\n rslt = func(*args, **kwargs)\n\n cache[signature] = rslt\n\n if len(cache) > maxsize:\n key = next(cache.iterkeys())\n cache.pop(key)\n\n return rslt\n\n return cached_func\n return decorate",
"def _proxy_cache(from_func, to_func):\n to_func.cache_info = from_func.cache_info\n to_func.cache_clear = from_func.cache_clear",
"def cache(self, func=None, ignore=None, verbose=None,\r\n mmap_mode=False):\r\n if func is None:\r\n # Partial application, to be able to specify extra keyword\r\n # arguments in decorators\r\n return functools.partial(self.cache, ignore=ignore,\r\n verbose=verbose, mmap_mode=mmap_mode)\r\n if self.cachedir is None:\r\n return NotMemorizedFunc(func)\r\n if verbose is None:\r\n verbose = self._verbose\r\n if mmap_mode is False:\r\n mmap_mode = self.mmap_mode\r\n if isinstance(func, MemorizedFunc):\r\n func = func.func\r\n return MemorizedFunc(func, cachedir=self.cachedir,\r\n mmap_mode=mmap_mode,\r\n ignore=ignore,\r\n compress=self.compress,\r\n verbose=verbose,\r\n timestamp=self.timestamp)",
"def _pickle_cache(self, cache_key, func):\n cache_path = os.path.join(self.CACHE_BASE_DIR, \"%s.pickle\" % cache_key)\n try:\n with open(cache_path, \"rb\") as cache:\n try:\n return pickle.load(cache)\n except Exception:\n # If we have a deserialization error, remove the corrupt\n # cache.\n os.unlink(cache_path)\n except Exception:\n # Ignore failures to read the cache (file may not exist, etc).\n pass\n ret = func()\n # Try to write back to the cache.\n try:\n if not os.path.exists(self.CACHE_BASE_DIR):\n os.makedirs(self.CACHE_BASE_DIR)\n tmp_path = cache_path + \".tmp\"\n with open(tmp_path, \"wb\") as cache:\n pickle.dump(ret, cache)\n os.rename(tmp_path, cache_path)\n except Exception:\n # Failure to write the cache isn't fatal (maybe we are on a read-only\n # filesystem, etc).\n pass\n return ret"
] | [
"0.6126323",
"0.59865415",
"0.5825444",
"0.58154047",
"0.57380855",
"0.5698068",
"0.5697662",
"0.55716634",
"0.55687535",
"0.55656105",
"0.5557927",
"0.5528658",
"0.54908526",
"0.5479148",
"0.54592925",
"0.54238856",
"0.54096276",
"0.5385697",
"0.53558767",
"0.5354645",
"0.53503454",
"0.53494173",
"0.5307073",
"0.5305388",
"0.529467",
"0.5290891",
"0.5277992",
"0.52587605",
"0.52528906",
"0.52432877"
] | 0.63631254 | 0 |
Returns a tuple representing the hardware specs. | def getHardware(self):
return (self.vendorId, self.deviceId, self.physicalMemory, self.osInfo, self.cpuSpeed[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_hardware_info(self) -> list:\n model = ctypes.create_string_buffer(8)\n model_size = ctypes.c_ulong(8)\n type_num = ctypes.c_ushort()\n channel_num = ctypes.c_ushort()\n notes = ctypes.create_string_buffer(48)\n notes_size = ctypes.c_ulong(48)\n firmware_version = ctypes.c_ulong()\n hardware_version = ctypes.c_ushort()\n modification_state = ctypes.c_ushort()\n\n ret = self._dll.LS_GetHardwareInfo(\n self._serial_number,\n ctypes.byref(model), model_size,\n ctypes.byref(type_num), ctypes.byref(channel_num),\n ctypes.byref(notes), notes_size, ctypes.byref(firmware_version),\n ctypes.byref(hardware_version), ctypes.byref(modification_state)\n )\n\n self._check_error(ret)\n return [model.value, type_num.value, channel_num.value,\n notes.value, firmware_version.value, hardware_version.value,\n modification_state.value]",
"def get_specs():\n from fsm.fsmspec import FSMSpecification\n spec = FSMSpecification(\n name='help',\n hideTabs=True,\n title='Take the courselet core lessons',\n pluginNodes=[START, START_MESSAGE, HELP_RESOLVE, END],\n )\n return (spec,)",
"def hardware(self):\n return self._hardware",
"def _get_system_hardware(self):\n return self._get_system_status()[\"hardware\"]",
"def read_chip_info(self):\n return [self.read_chip_type(), self.read_chip_revision()]",
"def machine_specs(self) -> Optional[Sequence['outputs.AiEndpointDeployedModelDedicatedResourceMachineSpec']]:\n return pulumi.get(self, \"machine_specs\")",
"def describe_operating_systems():\n pass",
"def hardware_info(self, mask=0xFFFFFFFF):\n buf = (ctypes.c_uint32 * 32)()\n res = self._dll.JLINKARM_GetHWInfo(mask, ctypes.byref(buf))\n if res != 0:\n raise errors.JLinkException(res)\n return list(buf)",
"def info_hardware():\n\n print(\"\\nHARDWARE:\")\n\n # CPU INFO\n try:\n import cpuinfo # pip py-cpuinfo\n\n cpu = cpuinfo.get_cpu_info().get(\"brand_raw\")\n print(f\"CPU:\\t{cpu}\")\n except ImportError:\n print(\"cpuinfo not found. (pip/conda: py-cpuinfo)\")\n\n # RAM INFO\n try:\n import psutil # pip py-cpuinfo\n\n ram = round(psutil.virtual_memory().total / (1024.0**3))\n print(f\"RAM:\\t{ram} GB\")\n except ImportError:\n print(\"psutil not found. (pip/conda psutil)\")\n\n # GPU INFO\n if not tf.test.gpu_device_name():\n print(\"-- No GPU --\")\n else:\n gpu_devices = tf.config.list_physical_devices(\"GPU\")\n details = tf.config.experimental.get_device_details(gpu_devices[0])\n gpu_name = details.get(\"device_name\", \"CUDA-GPU found\")\n print(f\"GPU:\\t{gpu_name}\")\n # print(f\"{tf.test.gpu_device_name()[1:]}\")",
"def serials(self) -> dict[str, int | lcn_defs.HardwareType]:\n return {\n \"hardware_serial\": self.hardware_serial,\n \"manu\": self.manu,\n \"software_serial\": self.software_serial,\n \"hardware_type\": self.hardware_type,\n }",
"def getDevices(self):\n\n devices = None\n\n for i in range(3):\n devices = subprocess.check_output(\"adb devices -l\", creationflags=self.createNoWindow)\n\n devices = devices.decode()\n deviceModel = re.findall(\"model:(.*) device\", devices)\n deviceID = re.findall(r\"(\\S+) {2}\", devices, flags=re.IGNORECASE)\n\n return deviceModel, deviceID",
"def state_info_specs(self):\n return list()",
"def extract_specs(self):\n vDeflection_unit = \"lcd-info.{}.conversion-set.conversion.force.scaling.unit.unit\".format(\n self.channel_numbers[\"vDeflection\"])\n self.units[\"vDeflection\"] = self.general[vDeflection_unit]\n\n height_unit = \"lcd-info.{}.conversion-set.conversion.nominal.scaling.unit.unit\".format(\n self.channel_numbers[\"height\"])\n self.units[\"height\"] = self.general[height_unit]",
"def device_info(self):\n return {\n \"name\": get_device_name(self._data, 0),\n \"identifiers\": {(DOMAIN, get_identifier(self._data, 0))},\n \"manufacturer\": MANUFACTURER,\n \"model\": self._data.wiserhub.system.product_type,\n \"sw_version\": self._data.wiserhub.system.firmware_version,\n \"via_device\": (DOMAIN, self._data.wiserhub.system.name),\n }",
"def test_hardware_info(get_touchmat):\n touchmat = get_touchmat\n touchmat_model = check_device_types.get_device_model(touchmat)\n\n hw_info = touchmat.hardware_info()\n if touchmat_model == Devices.touchmat_g1:\n assert hw_info['size'] == {'width' : 16.0, 'height' : 12.0}\n else:\n assert hw_info['size'] == {'width' : 17.7, 'height' : 11.8}",
"def get_monitoring_data_specs(self):\n space = [self.get_input_space()]\n space += self.get_output_space()\n space = CompositeSpace(space)\n source = (self.get_input_source(), self.get_target_source(), 'second_targets')\n return (space, source)",
"def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self._config[CONF_SERIAL])},\n \"name\": self._config[CONF_NAME],\n \"manufacturer\": \"Bosch\",\n }",
"def get_lamp_specs(self):\n return {lamp: self._LAMP_SPECS[lamp] for lamp in self._available_lamps}",
"def device_info(self):\n return {\n \"name\": get_device_name(self._data, self._device_id),\n \"identifiers\": {(DOMAIN, get_identifier(self._data, self._device_id))},\n \"manufacturer\": MANUFACTURER,\n \"model\": self._data.wiserhub.devices.get_by_id(self._device_id).model,\n \"sw_version\": self._device.firmware_version,\n \"via_device\": (DOMAIN, self._data.wiserhub.system.name),\n }",
"def get_testbench_specs(self, tb_type: str) -> Dict[str, Any]:\n return self._specs['testbenches'][tb_type]",
"def hardware_version(self):\n return self.data.get('hw_ver')",
"def device_info(self):\n model = self.data.wiserSmart.getWiserDeviceInfo(self.appliance_id).get(\"modelId\")\n\n return {\n \"name\": self.appliance_name,\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"manufacturer\": MANUFACTURER,\n \"model\": model,\n }",
"def computer_info():\n return {\n 'system': platform.system(),\n 'architecture': platform.architecture(),\n 'name': platform.node(),\n 'release': platform.release(),\n 'version': platform.version(),\n 'machine': platform.machine(),\n 'processor': platform.processor(),\n 'virtual CPUs': mproc.cpu_count(),\n 'total RAM': _get_ram(),\n }",
"def get_model_specs(self):\n raise NotImplementedError()",
"def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"name\": self.name,\n \"manufacturer\": \"Brightech\",\n }",
"def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"manufacturer\": \"Somfy\",\n \"name\": self.name,\n \"model\": self.tahoma_device.widget,\n \"sw_version\": self.tahoma_device.type,\n }",
"def get_hardware(hardware_name: str) -> str:\n fixed_name = \"-\".join(hardware_name.lower().split())\n output = _get_content(fixed_name, \"hardware\")\n\n return output",
"def detect_supported_caps():\n result = []\n # generate list of supported capabilities\n\n # Intel RDT L3 CAT\n if common.PQOS_API.is_l3_cat_supported():\n result.append(common.CAT_L3_CAP)\n\n # Intel RDT L2 CAT\n if common.PQOS_API.is_l2_cat_supported():\n result.append(common.CAT_L2_CAP)\n\n # Intel RDT MBA\n if common.PQOS_API.is_mba_supported():\n result.append(common.MBA_CAP)\n\n if sstbf.is_sstbf_enabled():\n result.append(common.SSTBF_CAP)\n\n if power.is_sstcp_enabled():\n result.append(common.POWER_CAP)\n\n return result",
"def _get_spec_info(self):\n raise NotImplementedError()",
"def find_hardware(self, device_info=None):\n if os.name is not 'nt': # If not on a Windows system, just set up soundcard\n self.setup_soundcard()\n self.hardware.append('Soundcard')\n self.out_samplefreq = 44100\n else:\n if 'NIDAQ' in self.required_hardware and self.setup_nidaq(device_info):\n self.hardware.append('NIDAQ')\n if 'RP21' in self.required_hardware and self.setup_RP21('c:\\pystartle\\startle.rco'):\n self.hardware.append('RP21')\n if 'PA5' in self.required_hardware and self.setup_PA5():\n self.hardware.append('PA5')\n if 'RZ5D' in self.required_hardware and self.setup_RZ5D():\n self.hardware.append('RZ5D')"
] | [
"0.7078182",
"0.6386408",
"0.60863703",
"0.60038155",
"0.5982613",
"0.59652555",
"0.59633815",
"0.594874",
"0.59447217",
"0.5936363",
"0.58740854",
"0.5844497",
"0.5777375",
"0.5739162",
"0.5714293",
"0.56568074",
"0.5644492",
"0.56364363",
"0.5634876",
"0.563088",
"0.56237",
"0.5622581",
"0.5606268",
"0.5603563",
"0.5600595",
"0.5565106",
"0.5556801",
"0.55563504",
"0.5546505",
"0.5545424"
] | 0.7476962 | 0 |
Returns true if the other session or sample has the same hardware specs as this one, false otherwise. | def sameHardware(self, other):
return (self.vendorId == other.vendorId and \
self.deviceId == other.deviceId and \
self.physicalMemory == other.physicalMemory and \
self.osInfo == other.osInfo and \
self.cpuSpeed[0] == other.cpuSpeed[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _on_same_device(self, other: \"PArray\") -> bool:\n this_device = self._current_device_index\n return this_device in other._array",
"def match(uspec1, uspec2):\n \n if uspec1.is_power_onoff() and uspec2.is_power_onoff():\n return True\n \n if uspec1.number_windows() != uspec2.number_windows():\n return False\n \n if uspec1['speed'] != uspec2['speed'] or \\\n uspec1['x_bin'] != uspec2['x_bin'] or \\\n uspec1['y_bin'] != uspec2['y_bin']:\n return False\n \n if uspec1.number_window_pairs() > 0:\n \n if uspec1['x1_start'] != uspec2['x1_start'] or \\\n uspec1['x1_size'] != uspec2['x1_size'] or \\\n uspec1['y1_start'] != uspec2['y1_start'] or \\\n uspec1['y1_size'] != uspec2['y1_size']:\n return False\n \n if uspec1.number_window_pairs() > 1:\n\n if uspec1['x2_start'] != uspec2['x2_start'] or \\\n uspec1['x2_size'] != uspec2['x2_size'] or \\\n uspec1['y2_start'] != uspec2['y2_start'] or \\\n uspec1['y2_size'] != uspec2['y2_size']:\n return False\n \n return True",
"def __eq__(self, other):\n if not isinstance(other, WritableDeviceType):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if type(other) is not type(self):\n return False\n if self._sample_rate != other._sample_rate:\n return False\n if self._samples.shape != other._samples.shape:\n return False\n if np.any(self.samples != other._samples):\n return False\n return True",
"def __eq__(self, other):\n if not isinstance(other, Software):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other: Any) -> bool:\n if isinstance(other, Device):\n return self.device_type == other.device_type and self.device_id == other.device_id\n elif isinstance(other, torch.device):\n return self.device_type == other.type and self.device_id == other.index\n else:\n return NotImplemented",
"def __eq__(self, other):\n if not isinstance(other, PoliciesPeripheralsUsbDeviceCommon):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, WSEquipmentDetails):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return (self.name == other.name) and (self.wavelength_control == other.wavelength_control) \\\n and (self.gonio_angles == other.gonio_angles) and (self.wl_angles == other.wl_angles) \\\n and (self.wavelength_minimum == other.wavelength_minimum) \\\n and (self.wavelength_maximum == other.wavelength_maximum) \\\n and (self.wavelength_bandwidth == other.wavelength_bandwidth)",
"def __eq__(self, other):\n return isinstance(other, type(self)) and set(self.channels) == set(other.channels)",
"def __eq__(self, other):\n return np.array_equal(self.hp, other.hp) and np.array_equal(self.hc, other.hc)",
"def __eq__(self, other: object) -> bool:\n\n if not isinstance(other, self.__class__):\n return False\n\n if not self.simctl_type == other.simctl_type:\n return False\n\n return self.raw_info == other.raw_info",
"def is_identical(self, other):\n if self.is_input != other.is_input:\n return False\n\n if self.is_raw() and other.is_raw():\n return True\n if self.is_raw() or other.is_raw():\n return False\n return self.structure.is_identical(other.structure)",
"def __eq__(self, other):\n if not isinstance(other, QosSwitchingProfile):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return isinstance(other, Context) and \\\n self.device_typeid == other.device_typeid and \\\n self.device_id == other.device_id",
"def __eq__(self, other):\n if not isinstance(other, FlavorExtraSpec):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, Context):\n return False\n if self.device_typeid == other.device_typeid and \\\n self.device_id == other.device_id:\n return True\n return False",
"def __eq__(self, other: Any) -> bool:\n if isinstance(other, OutputSpec):\n return type_utils.get_canonical_name_for_outer_generic(\n self.type) == type_utils.get_canonical_name_for_outer_generic(\n other.type)\n else:\n return False",
"def __eq__(self, other):\n if not isinstance(other, ThermostatScheduleCapabilities):\n return False\n\n return self.__dict__ == other.__dict__",
"def _cmp_(self, other):\n if(not isinstance(other, VVHarmonicWeakMaassForms)):\n return False\n eq = (self.multiplier() == other.WR) and (self._weight_rat == other._weight_rat)\n eq = eq and (self.prec == other.prec) and (self._sym_type == other._sym_type)\n eq = eq and (self._is_dual_rep == other._is_dual_rep)\n return eq",
"def __eq__(self, other):\n if not isinstance(other, AssetManagedDeviceStatus):\n return False\n\n return self.__dict__ == other.__dict__",
"def are_equal(self, sp1, sp2):\n return True",
"def __eq__(self, other):\n if not isinstance(other, DeviceInnerDeviceInfoPnpProfileList):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, SpecificationForGeneratingNewJWT):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return np.array_equal(\n self.np_floats(),\n other.np_floats()) and np.array_equal(\n self.np_ints(),\n other.np_ints()) and np.array_equal(\n self.freqs,\n other.freqs)",
"def __eq__(self, other):\n return (isinstance(other, type(self)) and (self.get_all_features() == other.get_all_features()))",
"def equals(self, cmp: MCUSettings):\n return (\n self.start == cmp.start and\n self.peep == cmp.peep and\n self.ratio == cmp.ratio and\n self.freq == cmp.freq and\n self.pressure == cmp.pressure and\n self.oxygen == cmp.oxygen)",
"def __eq__(self, other):\n if not isinstance(other, DeviceRequest):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n \n if not tools.data_are_equal(self.attrs, other.attrs):\n print('here')\n return False\n \n return tools.data_are_equal(self.components, other.components)",
"def __eq__(self, other):\n if not isinstance(other, HandwrittenSignature):\n return False\n\n return self.__dict__ == other.__dict__"
] | [
"0.6838293",
"0.666635",
"0.65126216",
"0.64928484",
"0.63411206",
"0.6300568",
"0.62982696",
"0.61403143",
"0.6127172",
"0.6109003",
"0.6074839",
"0.60358554",
"0.59964126",
"0.59953624",
"0.5989709",
"0.59614843",
"0.59588736",
"0.59384286",
"0.59161776",
"0.5901873",
"0.5877742",
"0.58584964",
"0.58447313",
"0.5834773",
"0.5827097",
"0.58056957",
"0.5788481",
"0.57799375",
"0.57720655",
"0.5770506"
] | 0.7975851 | 0 |
Calculates the average FPS for this player, over all of the player's different sessions. | def calcFrameRate(self):
tot = 0
count = 0
for session in self.sessions:
for sample in session.samples:
if not sample.isLoading:
tot += sample.fps
count += 1
if count:
self.avgFps = tot / count
self.lowFps = (self.avgFps < 10)
self.highFps = (self.avgFps > 25) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_fps(self):\n \n return self.fps, self.average_fps",
"def get_fps(self):\n return self._num_frames / (datetime.now() - self._start).total_seconds()",
"def update_fps(self, fps):\n self.fps_history.append(fps)\n if len(self.fps_history) > FPS_AVERAGES:\n self.fps_history.pop(0)\n\n self.fps_estimate = np.mean(self.fps_history)\n return self.fps_estimate",
"def calculateWinRate():\n times = 10\n winRate = 0.0\n for i in range(times):\n game = Game('user', 6, 6)\n winRate += game.play(5, False, True, False, False)\n winRate = winRate/times\n print \"Winrate:\", winRate",
"def get_avg_duration(persons, fps):\r\n if len(persons) > 0:\r\n total_nb_frames = 0\r\n for person in persons:\r\n total_nb_frames = total_nb_frames + person[5] - person[4] \r\n # return the average number of frames by person, divided by the FPS rate to get a value in seconds \r\n return (total_nb_frames / len(persons)) / fps \r\n else:\r\n return 0",
"def get_session_mean():\n try:\n float_times, len_times = convert_to_float(times, 'average')\n return add_zero(round(sum(float_times) / len_times, 2))\n except ZeroDivisionError:\n return \"\"",
"def fps(self):\n\t\treturn float(len(self.buf)) / (self.buf[-1][0] - self.buf[0][0])",
"def average_performance(self):\n\n print(f\"Average performance: {self.performance / 10}\")",
"def get_fps(self):\n # Take difference.\n interframe_intervals = np.diff(self.data[\"t\"])\n\n # Inter-frame interval in milliseconds.\n mean_interval = np.mean(interframe_intervals)\n fps = round(1 / (mean_interval / 1000))\n\n return int(fps)",
"def calculate_fps(self):\n time_difference = self.time_array[-1] - self.time_array[0]\n time_difference_in_seconds = time_difference.to_sec()\n if time_difference_in_seconds == 0:\n pass\n self.fps = self.buffer_size / time_difference_in_seconds\n rospy.loginfo(\"[EulerianMotionMagnification] Estimated FPS: \" + str(self.fps) + \" (Measured timespan: \" + str(time_difference_in_seconds) + \"s)\")\n rospy.loginfo(\"[EulerianMotionMagnification] Video array length: \" + str(len(self.video_array)))",
"def get_fps(self):\n if len(self.times) >= 2:\n dif = np.diff(self.times)\n fps = 1. / dif.min()\n # if the FPS crosses 500, do not update it\n if fps <= 500:\n self.fps = fps\n return self.fps\n else:\n return 0.",
"def win_ratio_avg(self):\n win_ratio = 0\n # Adds all the win ratios of team in this conference which will be\n # used to compute the win ratio average.\n for team_obj in self._conf_teams:\n ### INVARIANT: team_obj is a Team class object and\n ### self._conf_teams is a list of Team class objects.\n win_ratio += team_obj._win_ratio\n return win_ratio/len(self._conf_teams)",
"def avg_page_views_per_session(self, *args, **kwargs):\r\n return self._get('AvgPageViewsPerSession', *args, **kwargs)",
"def display_stats(self):\n print(\"Simulation took: {:.2f} seconds to execute\".format(time.time() - self.start_time))\n for i, win in enumerate(self.wins):\n average = 0\n if win:\n average = float(self.tries[i]) / win\n print(\"Player {} wins: {} with (average number of rounds: {:.2f})\".format(i+1, win, average))",
"def update(self, max_updates = 0):\n \n assert self.started, \"You must call 'start' before using a GameClock.\" \n\n real_time_now = self.get_real_time()\n \n self.real_time_passed = real_time_now - self.real_time\n self.real_time = real_time_now\n \n self.clock_time += self.real_time_passed\n \n if not self.paused:\n self.virtual_time += self.real_time_passed * self.speed\n \n update_count = 0\n while self.game_time + self.game_tick < self.virtual_time:\n \n self.game_frame_count += 1\n self.game_time = self.game_frame_count * self.game_tick\n yield (self.game_frame_count, self.game_time)\n \n if max_updates and update_count == max_updates:\n break\n \n self.between_frame = ( self.virtual_time - self.game_time ) / self.game_tick\n \n if self.real_time_passed != 0:\n self.fps = 1.0 / self.real_time_passed\n else:\n self.fps = 0.0\n \n self.fps_sample_count += 1\n \n if self.real_time - self.fps_sample_start_time > 1.0:\n \n self.average_fps = self.fps_sample_count / (self.real_time - self.fps_sample_start_time)\n self.fps_sample_start_time = self.real_time\n self.fps_sample_count = 0",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def collectPlayers(self):\n \n playerDict = {}\n self.hardware = {}\n for session in self.sessions:\n player = playerDict.get(session.avId, None)\n if not player:\n player = Player(session.avId)\n playerDict[session.avId] = player\n player.addSession(session)\n\n self.hardware.setdefault((player, session.getHardware()), []).append(session)\n\n self.players = playerDict.values()\n for player in self.players:\n player.calcFrameRate()",
"def get_fps(self):\n return self.fps",
"def get_global_mean(self, ratings):\n total_ratings = []\n for user, movie, rating in ratings:\n total_ratings.append(rating)\n return sum(total_ratings) / len(total_ratings)",
"def averageTime(self):\n \n pass",
"def average_speed(self):\n return self._average_speed",
"def avg_session_length(self, *args, **kwargs):\r\n return self._get('AvgSessionLength', *args, **kwargs)",
"def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2",
"def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2",
"def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2",
"def average_speed(self):\n return self.total_distance * 3600 / self.total_time",
"def average_rating(self):\n ratings = Rating.objects.filter(game=self)\n\n if len(ratings):\n # Sum all of the ratings for the game\n total_rating = 0\n for rating in ratings:\n total_rating += rating.value\n\n # Calculate the averge and return it.\n average = total_rating / len(ratings)\n return average\n\n # else: \n return 0",
"def _cache_average_attempts():\n games = Game.query(Game.game_over == False).fetch()\n if games:\n count = len(games)\n total_attempts_remaining = sum([game.attempts_remaining\n for game in games])\n average = float(total_attempts_remaining)/count\n memcache.set(MEMCACHE_MOVES_REMAINING,\n 'The average moves remaining is {:.2f}'.format(average))"
] | [
"0.635384",
"0.6108307",
"0.6086766",
"0.59754205",
"0.59748524",
"0.5926207",
"0.5866448",
"0.5856975",
"0.58385223",
"0.5776392",
"0.57533216",
"0.57494795",
"0.5631522",
"0.55880743",
"0.5585366",
"0.5568593",
"0.5568593",
"0.5568593",
"0.55296254",
"0.5512434",
"0.5473437",
"0.54618603",
"0.5442835",
"0.54427016",
"0.543393",
"0.543393",
"0.543393",
"0.54123926",
"0.5391276",
"0.5379492"
] | 0.72921765 | 0 |
Reads the clientfps lines from the indicated logfile, and writes card_performance.csv, without building up large tables. | def quickAnalyzeCards(self, filename):
assert filename.endswith('.txt')
file = open(filename, 'r')
quickCards = {}
for line in file:
line = line.strip()
if not line:
continue
columns = line.split('|')
if columns[1] != 'client-fps':
continue
sample = Sample(line, columns)
if sample.isLoading:
continue
if sample.vendorId == None or sample.deviceId == None:
continue
# Now accumulate this sample into the cards table.
options = quickCards.setdefault((sample.vendorId, sample.deviceId), {})
totFps, count = options.get(sample.gameOptionsCode, (0, 0))
totFps += sample.fps
count += 1
options[sample.gameOptionsCode] = (totFps, count)
file = open('card_performance.csv', 'w')
deviceList = quickCards.keys()
deviceList.sort()
for deviceTuple in deviceList:
options = quickCards[deviceTuple]
codes = options.keys()
codes.sort()
for gameOptionsCode in codes:
totFps, count = options[gameOptionsCode]
avgFps = totFps / count
print >> file, '%s, %s, %s, %s' % (
self.__formatDevice(deviceTuple),
gameOptionsCode, avgFps, count) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def grep_log_files_put_to_csv_output(path):\n log_process = LogProcess()\n log_process.log_path = path\n log_process.output_path = os.path.join(path, \"temp\")\n obj_file.cleanup_dir(log_process.output_path)\n csv_alarm, no_contact = log_process.altc()\n csv_alarm_history = log_process.lgjc()\n csv_audit = log_process.lgoc()\n csv_crash = log_process.lggc()\n csv_scg = log_process.scg()\n csv_st_disable = log_process.st_disable()\n csv_st_cell = log_process.st_cell()\n csv_st_cn = log_process.st_term_point_to_cn()\n csv_st_x2 = log_process.st_term_point_to_x2()\n csv_up_record = log_process.lguc()\n csv_license = log_process.invlrc()\n license_invalid = func_license_invalid.license_valid_to_csv(csv_license)\n csv_pci = log_process.hgetc_pci()\n csv_active_plmn = log_process.hgetc_active_plmn_list()\n csv_endc_plmn = log_process.hgetc_endc_allowed_plmn_list()\n csv_plmn_list = log_process.hgetc_plmn_id_list()\n csv_invxc_files = log_process.invxc()\n csv_sw = csv_invxc_files[0]\n csv_board = csv_invxc_files[1]\n # create a list to save log_grep_file\n grep_file_list = [csv_alarm, csv_alarm_history, csv_audit,\n csv_crash, csv_scg, csv_sw, csv_board,\n csv_st_cell, csv_st_cn, csv_st_x2, csv_st_disable,\n csv_license, license_invalid, csv_up_record, csv_pci,\n csv_active_plmn, csv_endc_plmn, csv_plmn_list]\n\n output_path = os.path.join(config.output_path, config.date_time)\n obj_file.mkdir(output_path)\n out_file_list = []\n for each_file in grep_file_list:\n obj_file.copy_file(each_file, output_path)\n out_file_list.append(os.path.join(output_path, os.path.split(each_file)[1]))\n return out_file_list",
"def Decode_And_Save_As_CSV():\n\n rolloverTimestamp = 0\n with open(os.path.join(os.path.dirname(__file__), 'log_file.bin'), 'rb') as inFile:\n arbID, timeStamp, flags, length, data = Read_CAN_Message(inFile)\n lastTimestamp = int(timeStamp, 16)\n with open(os.path.join(os.path.dirname(__file__), 'log_file.csv'), 'w') as outFile:\n outFile.write('Device_Type,Frame_Type,Device_ID,Timestamp(ms),Flags,Length,Data\\n')\n while arbID:\n deviceType, frameType, deviceID = Decode_Arb_ID(arbID)\n timeStamp = int(timeStamp, 16)\n if timeStamp - lastTimestamp < -1000:\n rolloverTimestamp += 1<<16\n lastTimestamp = timeStamp\n timeStamp+=rolloverTimestamp\n outFile.write('%s,%s,%i,%.6f,%s,%s,%s\\n' % (deviceType, frameType, deviceID, timeStamp/1e3, flags, length, data))\n arbID, timeStamp, flags, length, data = Read_CAN_Message(inFile)",
"def process_log_file(cur, filepath):\n \n # open log file\n df = pd.read_json(filepath,lines=True)\n\n # filter by NextSong action - i.e. get only listening music events from the logs\n df = df[(df.page == \"NextSong\")]\n\n # insert time records\n __insert_time_data(cur, df)\n \n # insert user records\n __insert_user_data(cur, df)\n \n # insert songplay records\n __insert_songplay_data(cur, df)\n \n # erase dataframe\n df = df.iloc[0:0]",
"def main():\n # Load and parse json object from file with specific\n file_name = \"./benchmark.log\"\n doc = re.sub(\"[\\n|\\t]\", \"\", \"\".join(benchmark.read_text_file(file_name)))\n json_object = json.loads(\"\".join(doc))\n\n intervals = json_object[\"intervals\"]\n\n socket_keys = benchmark.get_socket_keys(intervals)\n\n result = benchmark.get_result_dictionary(intervals, socket_keys)\n\n print_to_csv(result, socket_keys)",
"def write_stats(self):\n with open(self.log_file,'a') as output:\n writer = csv.writer(output)\n n_comps,comp_size = self.connected_component() # Calculate number of connected components (sub-colonies)\n writer.writerow([self.pop_size,\n self.get_average_age(),\n self.get_average_survival(),\n # Nearest neighbor logging disabled for speed\n # Use c++ tool to calculate nearest neighbors after runs\n # or uncomment line below to calculate in python (slower)\n # self.get_average_repro()] + [self.get_average_neighbors(r) for r in range(0,16)] +\n self.get_average_repro()] +\n [n_comps,\",\".join(map(str,comp_size))])",
"def processMachineLog(file_name):\n\n # labelling of data lists\n date = []\n timeDay = []\n iteration = []\n iterationTime = [] # time it takes to complete a single cycle\n timeElapsed = [] # time elapsed since start of test for the day\n \n \"\"\" sample of important lines from log\n ##### Begin Iteration # 1 at Wed Jan 27 16:32:27 EST 2016\n ##### Completed Iteration # 1 time: 13.643001secs Total time: 13.648001secs at Wed Jan 27 16:32:41 EST 2016\n \"\"\"\n\n # opens csv file and sorts the data into the lists\n # with open(file_name, 'rb') as csvfile:\n with open(file_name, 'rb') as logFile:\n data = logFile.readlines()\n for line in data:\n if line.startswith(\"##### Completed\"):\n iteration.append(line.split(\" \")[4])\n iterationTime.append(line.split(\" \")[7][:-4]) # remove \"secs\" from the value\n # Get the Day of week, month, and date to put into date list\n dayMoDate = line.split(\" \")[14] + \" \" + line.split(\" \")[15] + \" \" + line.split(\" \")[16] + \" \" + line.split(\" \")[19].rstrip()\n date.append(dayMoDate)\n # get the time in AM/PM format (because Jess can't read 24HR format to save her life)\n convertTime = time.strftime(\"%I:%M:%S %p\", (time.strptime(line.split(\" \")[17] , \"%H:%M:%S\")))\n timeDay.append(convertTime)\n # get the total time (H:M:S format) that has elapsed since the beginning of the test\n timeSecs = float(line.split(\" \")[11][:-4]) # remove \"secs\" from the value\n timeElapsed.append(datetime.timedelta(seconds=timeSecs))\n \n # form the dataframe with all the lists as columns\n df = pd.DataFrame({'Date': date, 'Time': timeDay, 'Cycle': iteration, 'Cycle Duration': iterationTime, 'Time Elapsed': timeElapsed})\n return df",
"def writecsv(obsid, date, output='obslog.csv', rawpath=None):\n logger = log.getLogger('obslog.writecsv')\n progid = obsid[:obsid.rfind('-')]\n logger.debug('Program ID: %s', progid)\n obslog = date + '_' + progid + '_obslog.txt'\n if rawpath:\n obslog = rawpath + '/' + obslog\n data = readtxt(obslog)\n\n output_data = {}\n first_spectrum = None\n files = sorted(data.keys(), reverse=True) # Note the reverse sort here\n for f in files: # Go through the whole list in case there were interruptions for re-aqcuisitions\n if data[f]['Observation ID'] == obsid:\n output_data[f] = data[f]\n logger.debug('Including %s', f)\n first_spectrum = f\n logger.debug('First spectrum: %s', first_spectrum)\n\n last_acq = None\n for i in range(files.index(first_spectrum)+1, len(files)): # again, files is reverse sorted\n if data[files[i]]['ACQ'] == 'Y':\n last_acq = files[i]\n logger.info('Last acqusition file: %s', last_acq)\n break\n\n # Get the header info for the requested images plus the last acquisition image:\n if rawpath:\n fitsfiles = [rawpath + '/' + f for f in ([last_acq] + sorted(output_data.keys()))]\n else:\n fitsfiles = [last_acq] + sorted(output_data.keys())\n headerinfo = header.info(fitsfiles)\n\n for f in output_data.keys(): # Add new keys for the absolute P and Q offsets:\n headerinfo[f]['P'] = headerinfo[f]['POFFSET'] - headerinfo[last_acq]['POFFSET']\n headerinfo[f]['Q'] = headerinfo[f]['QOFFSET'] - headerinfo[last_acq]['QOFFSET']\n logger.debug('Updated Info: %s', headerinfo)\n\n def mergedict(a, b):\n a.update(b)\n return a\n\n logger.info('Writing %s...', output) # Write the info for the requested Obs-ID into a csv file:\n with open(output, mode='w') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=['FITSFILE'] + headerinfo[f].keys())\n writer.writeheader()\n for k, d in sorted(headerinfo.items()):\n writer.writerow(mergedict({'FITSFILE': k}, d))\n\n return",
"def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')",
"def export_to_csv(self, log):\n if os.path.isfile(self.GENERATE_FILE):\n os.remove(self.GENERATE_FILE)\n\n with open(self.GENERATE_FILE, \"w\") as f:\n f.write(\"date, time, username, succes, label\\n\")\n\n for entry in log:\n f.write(str(entry[0].date()) + \", \"\n + str(self.hms_to_seconds(entry[0])) + \", \"\n + str(entry[1]) + \", \"\n + str(entry[2]) + \", \"\n + str(entry[3])\n + \"\\n\")",
"def loadFromLogFile(filename, maxRows = 0):\n\tultracam = False\n\tultraspec = True\n\tinputFile = open(filename, 'r')\n\t\n\txValues = []\n\tyValues = []\n\tframeList = []\n\theaderBlock = \"\"\n\trunName = \"--unknown--\"\n\ttelescope = \"--unknown--\"\n\ttargetName = \"--unknown--\"\n\tfilterName = \"--unknown--\"\n\tPI = \"--unknown--\"\n\tcolumnCount = 0\n\tuniqueCCDs = []\n\tfor line in inputFile:\n\t\tif line[0] == '#':\n\t\t\theaderBlock+=line\n\t\t\tif (\"target\" in line) and (\"estimated\" not in line):\n\t\t\t\ttargetName = generalUtils.getBetweenChars(line, '=', '/').strip()\n\t\t\t\tprint \"Target: %s\"%targetName\n\t\t\tif (\"filters\" in line):\n\t\t\t\tfilterName = generalUtils.getBetweenChars(line, '=', '/').strip()\n\t\t\t\tprint \"Filters: %s\"%filterName\n\t\t\tif (\"Telescope\" in line) and (\"observing\" not in line):\n\t\t\t\ttelescopeName = generalUtils.getBetweenChars(line, '=', '/').strip()\n\t\t\t\tprint \"Telescope name: %s\"%telescopeName\n\t\t\tif (\" pi \" in line):\n\t\t\t\tPI = generalUtils.getBetweenChars(line, '=', '/').strip()\n\t\t\t\tprint \"PI: %s\"%PI\n\t\t\tif (\" Data file name \" in line):\n\t\t\t\trunName = generalUtils.getBetweenChars(line, '=', '\\n').strip()\n\t\t\t\tprint \"run data file: %s\"%runName\n\t\t\tif (\" Server file name \" in line):\n\t\t\t\trunName = generalUtils.getBetweenChars(line, '=', '\\n').strip()\n\t\t\t\tprint \"run data file: %s\"%runName\n\t\t\t\t\n\t\tif line[0] != '#':\n\t\t\tparams = line.split()\n\t\t\t# print params\n\t\t\tframeIndex = int(params[0])\n\t\t\tCCD = int(params[4])\n\t\t\tif CCD not in uniqueCCDs: uniqueCCDs.append(CCD)\n\t\t\tframeList.append(frameIndex)\n\t\t\tcolumnCount = len(params)\n\tfirstFrame = frameList[0]\n\t\n\tnumApertures = int( ((columnCount-7)/14) )\n\tprint \"ColumnCount: \", columnCount, \"which means %d apertures.\"%numApertures\n\t# frameList = generalUtils.removeDuplicatesFromList(frameList)\n\tprint \"The run in file %s contains %d frames. Start frame: %d End frame: %d\"%(filename, len(frameList), min(frameList), max(frameList))\n\tif len(uniqueCCDs) == 3:\n\t\tprint \"This file has 3 CCDs. It is an ULTRACAM file.\"\n\t\tultracam = True\n\t\tultraspec = False\n\tif len(uniqueCCDs) == 1: \n\t\tprint \"This file has 1 CCD. It is an ULTRASPEC file.\"\n\t\tultracam = False\n\t\tultraspec = True\n\n\tif (ultracam): CCDs = [1, 2, 3]\n\telse: CCDs = [1]\n\tfor CCD in CCDs: \n\t\tfor aperture in range(1, numApertures+1):\n\t\t\tapertureIndex = 14*(aperture-1) + 7\n\t\t\tprint \"Reading data for aperture %d, CCD %d\"%(aperture, CCD)\n\t\t\tinputFile.seek(0)\n\t\t\tMJDs = []\n\t\t\tcounts = []\n\t\t\tskys = []\n\t\t\tsigmas = []\n\t\t\terrors = []\n\t\t\ttimeFlags = []\n\t\t\texposures = []\n\t\t\tFWHMs = []\n\t\t\tbetas = []\n\t\t\txs = []\n\t\t\tys = []\n\t\t\tlineCounter = 0\n\t\t\tfor line in inputFile:\n\t\t\t\tlineCounter+= 1\n\t\t\t\tsys.stdout.write(\"\\rLine number: %d \"%(lineCounter))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tif line[0] != '#':\n\t\t\t\t\tparams = line.split()\n\t\t\t\t\t# print params\n\t\t\t\t\tCCDValue = int(params[4])\n\t\t\t\t\tapertureValue = int(params[apertureIndex])\n\t\t\t\t\tif CCDValue == CCD: \n\t\t\t\t\t\tframeIndex = int(params[0])\n\t\t\t\t\t\tMJDs.append(float(params[1]))\n\t\t\t\t\t\ttimeFlags.append(int(params[2]))\n\t\t\t\t\t\texposures.append(float(params[3]))\n\t\t\t\t\t\tFWHMs.append(float(params[5]))\n\t\t\t\t\t\tbetas.append(float(params[6]))\n\t\t\t\t\t\txs.append(float(params[apertureIndex + 1]))\n\t\t\t\t\t\tys.append(float(params[apertureIndex + 2]))\n\t\t\t\t\t\tcounts.append(float(params[apertureIndex + 7]))\n\t\t\t\t\t\tsigmas.append(float(params[apertureIndex + 8]))\n\t\t\t\t\t\tskys.append(float(params[apertureIndex + 9]))\n\t\t\t\t\t\terrors.append(int(params[apertureIndex + 13]))\n\t\t\t\t\t\n\t\t\tphotometry = {}\n\t\t\t\n\t\t\tphotometry['MJD'] = numpy.array(MJDs)\n\t\t\tphotometry['exposure'] = numpy.array(exposures)\n\t\t\tphotometry['FWHM'] = numpy.array(FWHMs)\n\t\t\tphotometry['beta'] = numpy.array(betas)\n\t\t\tphotometry['x'] = numpy.array(xs)\n\t\t\tphotometry['y'] = numpy.array(ys)\n\t\t\tphotometry['counts'] = numpy.array(counts)\n\t\t\tphotometry['sigma'] = numpy.array(sigmas)\n\t\t\tphotometry['sky'] = numpy.array(skys)\n\t\t\tphotometry['error'] = numpy.array(errors)\t\n\t\t\n\t\t\tid = slots.getNextSlotID()\n\t\t\tprint \"new ID:\", id\n\t\t\tslot = photometryClasses.slotObject(id)\n\t\t\tslot.setPhotometry(photometry)\n\t\t\tslot.setTimeColumn('MJD')\n\t\t\tslot.setYColumn('counts')\n\t\t\tslot.setYError('sigma')\n\t\t\tslot.target = targetName\n\t\t\tslot.filter = filterName\n\t\t\tslot.aperture = aperture\n\t\t\tslot.headers = headerBlock\n\t\t\tslot.runName = runName\n\t\t\tslot.telescope = findTelescope(telescopeName)\n\t\t\tslot.CCD = \"CCD %d\"%CCD\n\t\t\tnumSlots = slots.addSlot(slot)\n\t\t\tprint \"Added the data to a new slot. Total number of slots is now: %d\"%(numSlots)\n\t\t\tprint slot\n\t\n\tinputFile.close()\n\treturn",
"def process_log_file(cur, filepath):\r\n df=pd.read_json(filepath,lines=True)\r\n df2=df\r\n df=df[df['page']=='NextSong']\r\n ser=pd.to_datetime(df['ts'],unit='ms')\r\n times=[]\r\n for i in ser:\r\n times.append([i,i.hour,i.day,i.week,i.month,i.year,i.day_name()])\r\n for i in times:\r\n cur.execute(time_table_insert,i)\r\n df=df[['userId','firstName','lastName','gender','level']]\r\n for i,row in df.iterrows():\r\n cur.execute(users_table_insert,list(row))\r\n for i, row in df2.iterrows():\r\n cur.execute(song_select, (row.song, row.artist, row.length))\r\n res = cur.fetchone()\r\n if res:\r\n song_id, artist_id = res\r\n else:\r\n song_id, artist_id = None, None\r\n\r\n songplay_data = (\r\n i, pd.to_datetime(row.ts, unit='ms'),int(row.userId), row.level, song_id, artist_id, row.sessionId,\r\n row.location, row.userAgent)\r\n cur.execute(songplays_table_insert, songplay_data)",
"def process_spike_sim_log(spike_log, csv):\n logging.info(\"Processing spike log : %s\" % spike_log)\n instr_cnt = 0\n spike_instr = \"\"\n\n RD_RE = re.compile(r\"(?P<pri>\\d) 0x(?P<addr>[a-f0-9]+?) \" \\\n \"\\((?P<bin>.*?)\\) x\\s*(?P<reg>\\d*?) 0x(?P<val>[a-f0-9]+)\")\n CORE_RE = re.compile(r\"core.*0x(?P<addr>[a-f0-9]+?) \\(0x(?P<bin>.*?)\\) (?P<instr>.*?)$\")\n INSTR_RE = re.compile(r\"(?P<instr>[a-z\\.]+?)(\\s+?)(?P<operand>.*)\")\n GPR_RE = re.compile(r\"^[a-z][0-9a-z]$\")\n CSR_RE = re.compile(r\"csr\")\n ILLE_RE = re.compile(r\"trap_illegal_instruction\")\n\n # Remove all the init spike boot instructions\n cmd = (\"sed -i '/core.*0x0000000000001010/,$!d' %s\" % spike_log)\n os.system(cmd)\n # Remove all instructions after ecall (end of program excecution)\n cmd = (\"sed -i '/ecall/q' %s\" % spike_log)\n os.system(cmd)\n\n gpr = {}\n gpr[\"zero\"] = 0\n\n with open(spike_log, \"r\") as f, open(csv, \"w\") as csv_fd:\n trace_csv = RiscvInstructiontTraceCsv(csv_fd)\n trace_csv.start_new_trace()\n for line in f:\n # Extract instruction infromation\n m = CORE_RE.search(line)\n if m:\n spike_instr = m.group(\"instr\")\n rv_instr_trace = RiscvInstructiontTraceEntry()\n rv_instr_trace.instr_str = spike_instr\n rv_instr_trace.addr = m.group(\"addr\")\n rv_instr_trace.binary = m.group(\"bin\")\n if spike_instr == \"wfi\":\n trace_csv.write_trace_entry(rv_instr_trace)\n continue\n nextline = f.readline()\n if nextline != \"\":\n if ILLE_RE.search(nextline):\n continue\n m = RD_RE.search(nextline)\n if m:\n # Extract RD information\n instr_cnt += 1\n rv_instr_trace.rd = gpr_to_abi(\"x%0s\" % m.group(\"reg\"))\n rv_instr_trace.rd_val = m.group(\"val\")\n rv_instr_trace.privileged_mode = m.group(\"pri\")\n gpr[rv_instr_trace.rd] = rv_instr_trace.rd_val\n s = INSTR_RE.search(spike_instr)\n if s:\n rv_instr_trace.instr = s.group(\"instr\")\n operand_str = s.group(\"operand\").replace(\" \", \"\")\n if operand_str != \"\" :\n operands = operand_str.split(\",\")\n if CSR_RE.search(s.group(\"instr\")):\n # CSR instruction\n operand = operands[-1]\n if GPR_RE.search(operand) or operand == \"zero\":\n rv_instr_trace.rs1 = operand\n rv_instr_trace.rs1_val = gpr[operand]\n else:\n rv_instr_trace.imm = operand\n else:\n # Non CSR instruction\n for i in range(1, len(operands)):\n operand = operands[i]\n if GPR_RE.search(operand) or operand == \"zero\":\n if i == 1:\n rv_instr_trace.rs1 = operand\n rv_instr_trace.rs1_val = gpr[operand]\n else:\n rv_instr_trace.rs2 = operands[i]\n rv_instr_trace.rs2_val = gpr[operand]\n else:\n rv_instr_trace.imm = operand\n trace_csv.write_trace_entry(rv_instr_trace)\n logging.info(\"Processed instruction count : %d\" % instr_cnt)",
"def read_linelog():",
"def log_parser(logline, http_re_match):\n global log_stats_df\n match = re.search(http_re_match, logline)\n if match is not None:\n # Need to format HTTP log time into timestamp for comparisons\n date_time = time.mktime(datetime.datetime.strptime(match.group('time'), '%d/%b/%Y:%H:%M:%S +%f').timetuple())\n\n # Insert every group in case we want to show some more stats one day\n log_stat_df = pd.DataFrame({\"date_time\": [date_time],\n \"host\": [match.group('host')],\n \"user_id\": [match.group('user')],\n \"method\": [match.group('method')],\n \"section\": [match.group('section')],\n \"subsection\": [match.group('subsection')],\n \"protocol\": [match.group('protocol')],\n \"response_code\": [match.group('status')],\n \"content_size\": [match.group('size')]\n }, index=[date_time]\n )\n log_stats_df = pd.concat([log_stats_df,log_stat_df])\n else:\n print(f\"WARNING: Unmatched log line '{logline}'\")",
"def print_log_summary(self, device_id, log_file, df_phys):\n if self.verbose:\n print(\n \"\\n---------------\",\n f\"\\nDevice: {device_id} | Log file: {log_file.split(device_id)[-1]} [Extracted {len(df_phys)} decoded frames]\\nPeriod: {df_phys.index.min()} - {df_phys.index.max()}\\n\",\n )",
"def debug_file(self, pkt_count, attack_count, data_list, ds_calc_time, ds_vals, metric_means, distances):\n # Current frame no. //\n # Current frame metric data //\n # Current sliding window data\n # Distances for each metric\n # DS probabilities, BPA's, time to calculate\n # Fusion results for each metric\n # Averages for each metric\n # Final result for frame\n # Current number of malicious frames detected\n metric_list = ['RSSI', 'Rate', 'NAV', 'Seq', 'TTL']\n x = [1, 2, 3, 4, 5]\n with open('debug.txt', 'a') as debug_file:\n debug_file.write('\\nFrame number: %d\\n' % pkt_count)\n debug_file.write('Current frame data. \\n')\n debug_file.writelines('%s : %d \\n ' % (metric, value) for metric, value in zip(self._features_to_analyse,\n data_list))\n debug_file.write('\\nCurrent sliding window data: \\n')\n debug_file.writelines('\\n%s:\\n %s \\nMean value = %f \\n' % (str(metric_array[0]), str(metric_array[1]), mean) for metric_array, mean in zip(self._sw_dict.items(), metric_means))\n debug_file.write('\\nDempster Shafer calculation times: \\n')\n\n if self._ds_timer is True:\n debug_file.writelines('Iteration %d time (s) = %f\\n' % (count, ds_time) for count, ds_time in zip(x, ds_calc_time))\n debug_file.write('Total time to calculate DS = %f (s)\\n' % sum(ds_calc_time))\n\n debug_file.write('Number of malicious frames detected: %d \\n' % attack_count)\n\n\n debug_file.close()",
"def write_report(self):\r\n self.end_time = time.strftime('%Y-%m-%d_%H:%M:%S')\r\n server_log.info('')\r\n server_log.info('=========================================================')\r\n server_log.info('All test clients completed!')\r\n server_log.info(' Start time: {}'.format(self.start_time))\r\n server_log.info(' End time: {}'.format(self.end_time))\r\n server_log.info('')\r\n server_log.info('Total of {} client(s) ran. Data for each client:'.format(len(self.client_list)))\r\n for client in self.client_list.values():\r\n server_log.info('---------------------------------------------------------')\r\n server_log.info(' Client {}'.format(client.client_id))\r\n server_log.info(' Test status: {}'.format(client.status))\r\n server_log.info(' Time ran: {:.2f} sec'.format(client.time_ran)) \r\n server_log.info(' Avg CPU usage: {:.2f}%'.format(client.cpu_avg))\r\n server_log.info(' Avg MEM usage: {:.2f}%'.format(client.mem_avg))\r\n server_log.info(' Files written: {}'.format(client.files_written))\r\n server_log.info(' File size: {}'.format(client.file_size))\r\n server_log.info(' Chunk size: {}'.format(client.chunk_size))\r\n server_log.info('=========================================================')\r\n server_log.info('')",
"def log_display_to_file(pressure_msl, presstrend_str, presstrendval,\n cumulus_forecast, pressure_forecast,\n temp_c, dew_point, humidity,\n rrate, beaufort, wdir, winddeg, gust,\n is_lightning_possible_str,\n fog_str,\n line_pressure,\n line_metrics1, line_metrics2, line_metrics3, line_metrics4, line_metrics5, line_metrics6,\n line_moon,\n alert_str):\n log_filename = definitions.DISPLAY_ROOT + '/' + 'ptendency.tsv'\n log_rec = time.ctime() + '\\t' + \\\n pressure_msl.__str__() + '\\t' + \\\n presstrend_str + presstrendval.__str__() + '\\t' + \\\n pressure_forecast + '\\t' + \\\n temp_c.__str__() + '\\t' + \\\n dew_point.__str__() + '\\t' + \\\n humidity.__str__() + '\\t' + \\\n rrate.__str__() + '\\t' + \\\n beaufort.__str__() + '\\t' + \\\n gust.__str__() + '\\t' + \\\n wdir + '\\t' + \\\n winddeg.__str__() + '\\t' + \\\n is_lightning_possible_str.__str__() + '\\t' + \\\n fog_str.__str__() + '\\t' + \\\n '\"' + line_pressure + '\"' + '\\t' + \\\n '\"' + line_metrics1 + '\"' + '\\t' + \\\n '\"' + line_metrics2 + '\"' + '\\t' + \\\n '\"' + line_metrics3 + '\"' + '\\t' + \\\n '\"' + line_metrics4 + '\"' + '\\t' + \\\n '\"' + line_metrics5 + '\"' + '\\t' + \\\n '\"' + line_metrics6 + '\"' + '\\t' + \\\n alert_str + '\\t' + \\\n cumulus_forecast + '\\t' + \\\n '\\n'\n\n fp_out = open(log_filename, 'a')\n fp_out.write(log_rec)\n fp_out.close()\n\n print(log_rec.rstrip())\n\n return",
"async def logs(self, ctx):\r\n openfile = open(\"logs.csv\", \"rb\")\r\n logfile = discord.File(fp=openfile, filename='logs')\r\n await ctx.send(file=logfile)",
"def make_log():\n log_file = os.path.join(phys_dir,'ge_phys2bids_'+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")+'.log')\n with open(log_file,'w') as log:\n log.write('-------- GE phys2bids --------\\n\\n')\n log.write('DICOM directory: %s\\n'%dcm_dir)\n log.write('Physiology directory: %s\\n'%phys_dir)\n log.write('Output directory: %s\\n\\n'%out_dir)\n log.write('%d EPI files were found\\n\\n'%len(dcm_dict))\n for rn in dcm_dict.keys():\n log.write('------------------------------\\n')\n log.write('%s\\n'%dcm_dict[rn]['out_name'])\n log.write('Start time: %s\\n'%dcm_dict[rn]['start_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('End time: %s\\n'%dcm_dict[rn]['end_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('PPG file: %s\\n'%dcm_dict[rn]['ppg_file'])\n log.write('Respiration file: %s\\n'%dcm_dict[rn]['resp_file'])\n log.write('ECG file: %s\\n'%dcm_dict[rn]['ecg_file'])\n log.write('------------------------------\\n\\n')",
"def process_log_file(cur, filepath):\n\n df = pd.read_json(filepath, lines=True)\n\n df = df[df[\"page\"] == \"NextSong\"]\n\n t = df['ts'] = pd.to_datetime(df['ts'], unit='ms')\n\n accessor = t.dt\n time_data = (t, accessor.hour, accessor.day, accessor.week,\n accessor.month, accessor.year, accessor.weekday)\n\n time_df = pd.DataFrame.from_dict({\n \"timestamp\": t,\n \"hour\": accessor.hour,\n \"day\": accessor.day,\n \"week\": accessor.week,\n \"month\": accessor.month,\n \"year\": accessor.year,\n \"weekday\": accessor.weekday\n })\n\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))\n\n user_df = df[['userId', 'firstName', 'lastName', 'gender', 'level']]\n\n for i, row in user_df.iterrows():\n cur.execute(user_table_insert, row)\n\n for index, row in df.iterrows():\n\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n\n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n start_time = row[\"ts\"]\n user_id = row[\"userId\"]\n level = row[\"level\"]\n song_id = songid\n artist_id = artistid\n session_id = row['sessionId']\n location = row['location']\n user_agent = row['userAgent']\n\n songplay_data = (start_time, user_id, level, song_id, artist_id, session_id,\n location, user_agent)\n cur.execute(songplay_table_insert, songplay_data)",
"def process_log_file(cur, filepath):\n # open log file\n datalog = pd.read_json(filepath, lines=True)\n\n df = pd.DataFrame(data=datalog)\n df.head()\n\n # convert timestamp column to datetime\n t = pd.to_datetime(df['ts'], unit='ms')\n \n # insert time data records\n time_log = []\n column_labels = ('start time','hour','day','week of year','month','year','weekday')\n index = 0\n for timestamp in t:\n time_data = (t[index],t.dt.hour[index],t.dt.day[index],t.dt.week[index],t.dt.month[index],t.dt.year[index],t.dt.weekday[index])\n time_log.append(time_data)\n index = index + 1\n \n time_df = pd.DataFrame.from_dict(time_log)\n #print(time_df)\n time_df.head()\n\n\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))\n \n\n # load user table\n user_df_data = df[['userId', 'firstName', 'lastName','gender','level']].values\n user_df = pd.DataFrame.from_dict(user_df_data)\n\n # insert user records\n for i, row in user_df.iterrows():\n cur.execute(user_table_insert, row)\n \n # insert songplay records\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n \n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n songplay_data = (pd.to_datetime(row.ts, unit='ms'),row.userId,row.level,songid,artistid,row.sessionId,row.location,row.userAgent)\n cur.execute(songplay_table_insert, songplay_data)",
"def __init__(self, perf_location=\"../performances/\", verbose=True):\n self.verbose = verbose\n performance_dict = {}\n self.logs = []\n self.log_arrays = []\n if self.verbose:\n print(\"Loading tiny performances...\")\n for local_file in os.listdir(perf_location):\n if local_file.endswith(\".csv\"):\n perf_data = {\n \"title\": local_file[:-4],\n \"performer\": local_file.split(\"-\")[1],\n \"instrument\": local_file.split(\"-\")[2],\n \"filename\": perf_location + local_file}\n # parse performance\n perf_log = pd.DataFrame.from_csv(perf_data[\"filename\"], parse_dates=False)\n # calculate velocity\n perf_log[\"velocity\"] = self.create_velocity_column(perf_log)\n # arrange time\n perf_log['time'] = perf_log.index\n perf_log['delta_t'] = perf_log.time.diff()\n perf_log.delta_t = perf_log.delta_t.fillna(0)\n # add to archive\n self.logs.append(perf_log)\n self.log_arrays.append(np.array(perf_log[['x', 'y', 'delta_t', 'moving']]))\n\n # Centroid\n perf_data[\"centroid_X\"] = perf_log[\"x\"].mean()\n perf_data[\"centroid_Y\"] = perf_log[\"y\"].mean()\n\n # Centroid S.D.\n perf_data[\"centroid_X_SD\"] = perf_log[\"x\"].std()\n perf_data[\"centroid_Y_SD\"] = perf_log[\"y\"].std()\n\n # Starting/Ending coordinate\n perf_data[\"first_X\"] = perf_log[\"x\"].iloc[0]\n perf_data[\"first_Y\"] = perf_log[\"y\"].iloc[0]\n perf_data[\"last_X\"] = perf_log[\"x\"].iloc[-1]\n perf_data[\"last_Y\"] = perf_log[\"y\"].iloc[-1]\n\n # Length\n perf_data[\"duration\"] = perf_log.index[-1] - perf_log.index[0]\n\n # Percentage of moving touches\n perf_data[\"percent_moving\"] = perf_log[\"moving\"].mean()\n\n # Mean Velocity\n perf_data[\"mean_velocity\"] = perf_log[\"velocity\"].mean()\n performance_dict.update({perf_data[\"title\"]: perf_data})\n\n # Number of records\n perf_data[\"total\"] = perf_log[\"x\"].count()\n\n self.performances = pd.DataFrame.from_dict(performance_dict, orient='index')\n # self.performances.instrument = performances.instrument.astype('category')\n if self.verbose:\n print(\"Finished loading performances:\")\n print(self.performances.describe())",
"def csvOutput(cycle, fctimes, beachdata, offshoredata, surfdata, fname='isurf_output.csv', outdir='.'):\n\n datestr = cycle.strftime('%Y%m%d00')\n\n with open(outdir+'/%s' %fname,'w') as outp:\n outp.write(datestr+'\\r\\n')\n for isite in range(len(beachdata['name'])):\n outp.write('\\r\\n')\n outp.write('%s' %beachdata['name'][isite] + '\\r\\n')\n outp.write('%d' %beachdata['type'][isite] + '\\r\\n')\n #outp.write('TI Hsmo Tpmo Dmo Hseq Tpeq DmEq Hsbr Dpbr\\r\\n')\n #outp.write('LT,Wspd,Wdir,Hsmo,Tpmo,Dmo,Tide,Hseq,Tpeq,DmEq,Hsbr,Dpbr,Hlbr,Hhbr,BT\\r\\n')\n outp.write('LT,Wspd,Wdir,Hsmo,Tpmo,Dmo,Hseq,Tpeq,DmEq,Hsbr,Dpbr,Hlbr,Hhbr,BT\\r\\n')\n\n\t # write out to file\n for itime in range(len(fctimes)):\n\n # write out the data values to file\n\t #outp.write ('%02d' %fctimes[lp] + ' %4.2f %4.1f %3d' %tuple([hm0[lp,isite], tp[lp,isite], dirn[lp,isite]]) + \\\n # ' %4.2f %4.1f %3d' %tuple([hsshwd[lp,isite], tpshwd[lp,isite], reldir[lp,isite]]) + ' %4.2f %4.2f' %tuple([hsbkinit[lp,isite], dpsat[lp,isite]]) + '\\r\\n')\n\t outp.write('%02d' %fctimes[itime] + \\\n ',%4.1f' %offshoredata['wspd'][itime,isite] + \\\n #',%3d' %offshoredata['wdir'][itime,isite] + \\\n ',%4.2f' %offshoredata['hm0'][itime,isite] + \\\n ',%4.1f' %offshoredata['tp'][itime,isite] + \\\n ',%3d' %offshoredata['dirn'][itime,isite] + \\\n ',%4.2f' %surfdata['shorewardHs'][itime,isite] + \\\n ',%4.1f' %surfdata['shorewardT'][itime,isite] + \\\n ',%3d' %surfdata['relativeDirn'][itime,isite] + \\\n ',%4.2f' %surfdata['breakerHs'][itime,isite] + \\\n ',%4.2f' %surfdata['saturatedDepth'][itime,isite] + \\\n ',%4.2f' %surfdata['Hb1in3'][itime,isite] + \\\n ',%4.2f' %surfdata['Hb1in10'][itime,isite] + \\\n ',%1d' %surfdata['breakerType'][itime,isite] + '\\r\\n')\n outp.close()",
"def log_frame(frame, logfile=PCAP_LOG):\n global frame_count\n frame_count += 1\n pcap_logger = PcapWriter(logfile, append=True)\n pcap_logger.write(frame)\n pcap_logger.close()",
"def process_log_file(cur, filepath):\n \n # open log file\n \n df = pd.read_json(filepath, lines = True)\n \n # filter by NextSong action\n df = df[df['page']=='NextSong']\n # convert timestamp column to datetime\n t = pd.to_datetime(df.ts, unit='ms')\n df.ts = t\n \n # insert time data records\n time_data = [t, t.dt.hour, t.dt.day, t.dt.weekofyear,\n t.dt.month, t.dt.year, t.dt.weekday]\n \n # column_labels = ['timestamp','Hour', \n # 'Day','Month','Year''Weekday']'\n column_labels = ['timestamp','hour','day','weekofyear','month','year','weekday']\n time_df = pd.DataFrame(dict(zip(column_labels, time_data)))\n\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))\n \n # load user table\n user_df = df[['userId','firstName', \n 'lastName','gender','level']]\n\n # insert user records\n for i, row in user_df.iterrows(): \n cur.execute(user_table_insert, row)\n \n # insert songplay records\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist,\n row.length))\n results = cur.fetchone()\n \n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n songplay_data = (index, row.ts, row.userId, row.level,\n songid, artistid, row.sessionId, \n row.location, row.userAgent)\n \n \n cur.execute(songplay_table_insert, songplay_data)",
"def process_log_file(cur, filepath):\n df = pd.read_json(filepath, lines=True)\n\n df = df[df['page'] == 'NextSong'].astype({'ts':'datetime64[ms]'})\n\n t = pd.Series(df['ts'], index=df.index)\n\n time_data = [[d, d.hour, d.day, d.week, d.month, d.year, d.weekday()] for d in t]\n column_labels= ['ts', 'hour', 'day', 'week', 'month', 'year', 'weekday']\n time_df = pd.DataFrame(data=time_data, columns=column_labels)\n\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))\n\n columns = ['userId', 'firstName', 'lastName', 'gender', 'level']\n user_df = df[[*columns]]\n user_df = user_df[user_df.firstName.notnull()]\n\n for i, row in user_df.iterrows():\n cur.execute(user_table_insert, row)\n\n for index, row in df.iterrows():\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n\n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n songplay_data = (row.ts, row.userId, row.level, songid, artistid, row.sessionId, row.location, row.userAgent)\n cur.execute(songplay_table_insert, songplay_data)",
"def _read_log(self):\n\n line_regex = compile(r\"\\[I\\]\\s*\\(\\d+ms\\)[^\\d]+(?P<counter>\\d+)\"\n r\"[^\\d]+(?P<timestamp>\\d+(\\.\\d+)?)[^\\d]+\"\n r\"(?P<acceleration>\\d+);\")\n values = []\n with open(self.filepath) as file:\n for line in file:\n match = line_regex.match(line)\n if match:\n values.append({\n 'counter':\n int(match['counter']),\n 'timestamp':\n int(float(match['timestamp']) * 1000),\n 'acceleration':\n int(match['acceleration'])\n })\n\n self.values = values",
"def write_log(logfile, log_dict):\n with open(logfile, 'a') as f:\n c = csv.writer(f)\n if log_dict['epoch'] == 0: # write header for first epoch (dubbed as 0th epoch)\n c.writerow(log_dict.keys())\n\n c.writerow(log_dict.values())",
"def make_obslog(path):\n name_pattern = '^HI\\.\\d{8}\\.\\d{5}\\.fits$'\n\n # scan the raw files\n fname_lst = sorted(os.listdir(path))\n\n # prepare logtable\n logtable = Table(dtype=[\n ('frameid', 'i2'), ('fileid', 'S17'), ('imgtype', 'S3'),\n ('object', 'S20'), ('i2cell', 'bool'), ('exptime', 'f4'),\n ('obsdate', Time),\n ('deckname', 'S2'), ('filter1', 'S5'), ('filter2', 'S5'),\n ('nsat_1', 'i4'), ('nsat_2', 'i4'), ('nsat_3', 'i4'),\n ('q95_1', 'i4'), ('q95_2', 'i4'), ('q95_3', 'i4'),\n ])\n\n # prepare infomation to print\n pinfo = FormattedInfo(all_columns,\n ['frameid', 'fileid', 'imgtype', 'object', 'i2cell', 'exptime',\n 'obsdate', 'deckname', 'nsat_2', 'q95_2'])\n\n # print header of logtable\n print(pinfo.get_separator())\n print(pinfo.get_title())\n print(pinfo.get_separator())\n\n # start scanning the raw files\n prev_frameid = -1\n for fname in fname_lst:\n if not re.match(name_pattern, fname):\n continue\n fileid = fname[0:17]\n filename = os.path.join(path, fname)\n hdu_lst = fits.open(filename)\n # parse images\n data_lst, mask_lst = parse_3ccd_images(hdu_lst)\n\n head0 = hdu_lst[0].header\n\n frameid = prev_frameid + 1\n\n # get obsdate in 'YYYY-MM-DDTHH:MM:SS' format\n date = head0.get('DATE-OBS')\n utc = head0.get('UTC', head0.get('UT'))\n obsdate = Time('%sT%s'%(date, utc))\n\n exptime = head0.get('ELAPTIME')\n i2in = head0.get('IODIN', False)\n i2out = head0.get('IODOUT', True)\n i2cell = i2in\n imagetyp = head0.get('IMAGETYP')\n targname = head0.get('TARGNAME', '')\n lampname = head0.get('LAMPNAME', '')\n\n if imagetyp == 'object':\n # science frame\n imgtype = 'sci'\n objectname = targname\n elif imagetyp == 'flatlamp':\n # flat\n imgtype = 'cal'\n objectname = '{} ({})'.format(imagetyp, lampname)\n elif imagetyp == 'arclamp':\n # arc lamp\n imgtype = 'cal'\n objectname = '{} ({})'.format(imagetyp, lampname)\n elif imagetyp == 'bias':\n imgtype = 'cal'\n objectname = 'bias'\n else:\n print('Unknown IMAGETYP:', imagetyp)\n\n # get deck and filter information\n deckname = head0.get('DECKNAME', '')\n filter1 = head0.get('FIL1NAME', '')\n filter2 = head0.get('FIL2NAME', '')\n\n # determine the numbers of saturated pixels for 3 CCDs\n mask_sat1 = (mask_lst[0] & 4)>0\n mask_sat2 = (mask_lst[1] & 4)>0\n mask_sat3 = (mask_lst[2] & 4)>0\n nsat_1 = mask_sat1.sum()\n nsat_2 = mask_sat2.sum()\n nsat_3 = mask_sat3.sum()\n\n # find the 95% quantile\n q95_lst = [np.sort(data.flatten())[int(data.size*0.95)]\n for data in data_lst]\n q95_1, q95_2, q95_3 = q95_lst\n\n # close the fits file\n hdu_lst.close()\n\n item = [frameid, fileid, imgtype, objectname, i2cell, exptime, obsdate,\n deckname, filter1, filter2,\n nsat_1, nsat_2, nsat_3, q95_1, q95_2, q95_3]\n\n logtable.add_row(item)\n # get table Row object. (not elegant!)\n item = logtable[-1]\n\n # print log item with colors\n string = pinfo.get_format(has_esc=False).format(item)\n print(print_wrapper(string, item))\n\n prev_frameid = frameid\n\n print(pinfo.get_separator())\n\n # sort by obsdate\n #logtable.sort('obsdate')\n\n # determine filename of logtable.\n # use the obsdate of the LAST frame.\n obsdate = logtable[-1]['obsdate'].iso[0:10]\n outname = '{}.obslog'.format(obsdate)\n if os.path.exists(outname):\n i = 0\n while(True):\n i += 1\n outname = '{}.{}.obslog'.format(obsdate, i)\n if not os.path.exists(outname):\n outfilename = outname\n break\n else:\n outfilename = outname\n\n # save the logtable\n\n # loginfo is not pinfo because not all columns need to be printed in the\n # screen, but all columns should be written in logfile.\n loginfo = FormattedInfo(all_columns)\n outfile = open(outfilename, 'w')\n outfile.write(loginfo.get_title()+os.linesep)\n outfile.write(loginfo.get_dtype()+os.linesep)\n outfile.write(loginfo.get_separator()+os.linesep)\n for row in logtable:\n outfile.write(loginfo.get_format(has_esc=False).format(row)+os.linesep)\n outfile.close()"
] | [
"0.55250233",
"0.54603183",
"0.5450724",
"0.5413982",
"0.5379444",
"0.53514105",
"0.52793115",
"0.52764195",
"0.5269211",
"0.5259841",
"0.5198763",
"0.5174568",
"0.5164108",
"0.5136736",
"0.5116632",
"0.5114831",
"0.51126087",
"0.5108278",
"0.5103108",
"0.50868624",
"0.5073313",
"0.5066198",
"0.50599337",
"0.5052141",
"0.50475615",
"0.50365233",
"0.5028293",
"0.5018458",
"0.50169945",
"0.5014293"
] | 0.5976573 | 0 |
Write the samples for all players with less than 10 fps average frame rate to the indicated text file. This generates a new log file that may be analyzed independently. | def writeLowPlayers(self, filename):
assert filename.endswith('.txt')
file = open(filename, 'w')
samples = []
for player in self.players:
if player.lowFps:
for session in player.sessions:
for sample in session.samples:
sample.write(file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')",
"def debug_file(self, pkt_count, attack_count, data_list, ds_calc_time, ds_vals, metric_means, distances):\n # Current frame no. //\n # Current frame metric data //\n # Current sliding window data\n # Distances for each metric\n # DS probabilities, BPA's, time to calculate\n # Fusion results for each metric\n # Averages for each metric\n # Final result for frame\n # Current number of malicious frames detected\n metric_list = ['RSSI', 'Rate', 'NAV', 'Seq', 'TTL']\n x = [1, 2, 3, 4, 5]\n with open('debug.txt', 'a') as debug_file:\n debug_file.write('\\nFrame number: %d\\n' % pkt_count)\n debug_file.write('Current frame data. \\n')\n debug_file.writelines('%s : %d \\n ' % (metric, value) for metric, value in zip(self._features_to_analyse,\n data_list))\n debug_file.write('\\nCurrent sliding window data: \\n')\n debug_file.writelines('\\n%s:\\n %s \\nMean value = %f \\n' % (str(metric_array[0]), str(metric_array[1]), mean) for metric_array, mean in zip(self._sw_dict.items(), metric_means))\n debug_file.write('\\nDempster Shafer calculation times: \\n')\n\n if self._ds_timer is True:\n debug_file.writelines('Iteration %d time (s) = %f\\n' % (count, ds_time) for count, ds_time in zip(x, ds_calc_time))\n debug_file.write('Total time to calculate DS = %f (s)\\n' % sum(ds_calc_time))\n\n debug_file.write('Number of malicious frames detected: %d \\n' % attack_count)\n\n\n debug_file.close()",
"def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)",
"def data_log(self, file, spectra):\n if self.datalogflag:\n with open(file, 'a') as f:\n f.write('{0}, '.format(spectra))\n self.vprint(\n 2, 'Writing spectra to data log at {}'.format(file))",
"def write_to_file(train_file, test_file, log_dict):\n i = 0\n train_events = []\n test_events = []\n\n for key in log_dict:\n trace = log_dict[key]\n if random.randint(0,1) == 0: # Add file to training set with 50% chance\n for e_idx in range(len(trace)):\n train_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Add file to test set\n if random.randint(0,100) > 50: # No anomaly injection with 50% chance\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Anomaly injection\n trace, types = introduce_anomaly(trace, single=False)\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",1,\\\"\" + str(types) + \"\\\"\")\n\n with open(train_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in train_events:\n fout.write(e + \"\\n\")\n\n with open(test_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in test_events:\n fout.write(e + \"\\n\")",
"def create_game_logs_file(team_id):\n # team game log\n path = os.path.join(TEAM_BASE_PATH, TEAM_DICT[team_id] + '.json')\n if not os.path.exists(path):\n print(\"Retrieving team \" + TEAM_DICT[team_id] +\n \" game log, season stats ... Please wait.\")\n game_logs = team.TeamGameLogs(team_id, '2016-17').json\n with open(path, 'w') as outfile:\n json.dump(game_logs, outfile)\n\n # playoff game log\n playoff_path = os.path.join(TEAM_PLAYOFF_PATH, TEAM_DICT[team_id] + '.json')\n if not os.path.exists(playoff_path):\n playoff_games = team.TeamGameLogs(team_id, '2016-17',\n constants.SeasonType.Playoffs).json\n if len(playoff_games['resultSets'][0]['rowSet']):\n with open(playoff_path, 'w') as playoff_files:\n json.dump(playoff_games, playoff_files)\n\n # season stats\n season_path = os.path.join(TEAM_SEASON_PATH, TEAM_DICT[team_id] + '.json')\n if not os.path.exists(season_path):\n season_stats = team.TeamSeasons(team_id).json\n with open(season_path, 'w') as season_files:\n json.dump(season_stats, season_files)",
"def writeRawRatings(self):\n judgeNotesLogger.info(\"writeRawRatings: Writing file containing songs for each rating\")\n try:\n os.chdir(self.fileDir)\n sortedRatings = sorted(self.ratingsRaw.keys(), key=float)\n fileName = \"ratingsRaw_\" + self.judgeName + \".txt\"\n with open(fileName, 'w') as outFile:\n\n # Write out normal raw ratings first.\n for rating in sortedRatings:\n outFile.write(\"[\"+str(rating)+\"/10]:\"+str(self.ratingsRaw[rating])+\"\\n\")\n ratingSum = self.getRatingSum()\n\n # Write out special raw ratings second.\n sortedRatings = sorted(self.specialRatingsRaw.keys(), key=str.lower)\n for rating in sortedRatings:\n outFile.write(\"[\"+str(rating)+\"]:\"+str(self.specialRatingsRaw[rating])+\"\\n\")\n\n # Write out average as well.\n outFile.write(\"TOTAL:\"+str(round(ratingSum, 1))+\"\\n\")\n outFile.write(\"JUDGEDFILES:\"+str(self.numJudgedFiles)+\"\\n\")\n outFile.write(\"SPECIALFILES:\"+str(self.numSpecialFiles)+\"\\n\")\n outFile.write(\"TOTALFILES:\"+str(self.numTotalFiles)+\"\\n\")\n outFile.write(\"AVERAGE:\"+str(round(self.average, 2))+\"\\n\")\n outFile.close()\n judgeNotesLogger.info(\"writeRawRatings: Successfully wrote file '%s'\", fileName)\n except:\n judgeNotesLogger.warning(\"writeRawRatings: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))",
"def dumpRecording(self, files):\n for tone, f in zip(self.tones, files):\n tone.dump_to_file(f)",
"def write_stats(self):\n with open(self.log_file,'a') as output:\n writer = csv.writer(output)\n n_comps,comp_size = self.connected_component() # Calculate number of connected components (sub-colonies)\n writer.writerow([self.pop_size,\n self.get_average_age(),\n self.get_average_survival(),\n # Nearest neighbor logging disabled for speed\n # Use c++ tool to calculate nearest neighbors after runs\n # or uncomment line below to calculate in python (slower)\n # self.get_average_repro()] + [self.get_average_neighbors(r) for r in range(0,16)] +\n self.get_average_repro()] +\n [n_comps,\",\".join(map(str,comp_size))])",
"def writeOut(self):\r\n with open(self.fname, 'w') as f:\r\n for i in range(10):\r\n score = self.getNextHighest()\r\n if score is not None:\r\n f.write('%s %s\\n' % (score.name,\r\n score.score))\r\n pass",
"def collect_samples(self):\n self.__running = True\n with open(self.__filename, 'a') as output:\n next_sample_time = time.time()\n while self.__running:\n sensor_name = self.__sensor.get_sensor_type_name()\n sensor_id = self.__sensor.get_sensor_id()\n data = self.__sensor.retrieve_data_string() \n if DEBUG:\n print('data: \"{}\"'.format(data),\n file = sys.stderr, flush=True)\n when = datetime.datetime.now(datetime.timezone.utc).isoformat()\n result = OUTPUT_FORMAT.format(when,\n sensor_name, \n sensor_id, \n data)\n output.write(result)\n output.flush()\n \n next_sample_time = next_sample_time + self.__interval\n delay_time = next_sample_time - time.time()\n if DEBUG:\n print('delay_time = {}'.format(delay_time),\n file=sys.stderr, flush=True)\n \n if 0 < delay_time: # don't sleep if already next sample time\n time.sleep(delay_time)",
"def record_audio_to_file(self, time, file_name):\n sample_width, frames = self.record_audio(time)\n wf = wave.open(file_name, 'wb')\n wf.setnchannels(self.channels)\n wf.setsampwidth(sample_width)\n wf.setframerate(self.rate)\n wf.writeframes(frames)\n wf.close()",
"def test_log_filenames_multiple_date_in_past(self):\n time_lower = datetime.datetime.now() - datetime.timedelta(seconds=7210)\n time_upper = time_lower + datetime.timedelta(seconds=20)\n (tracks, statuses) = self.app.log_filenames(\n [self.track_path('silence.mp3')]*5,\n timestamp='2 hours ago'\n )\n self.assertEqual(len(tracks), 5)\n self.assertEqual(self.get_track_count(), 5)\n track_objs = []\n for (idx, track) in enumerate(tracks):\n with self.subTest(idx=idx):\n track_obj = self.get_track_by_id(track.pk)\n track_objs.append(track_obj)\n self.assertGreaterEqual(track_obj['timestamp'], time_lower)\n self.assertLess(track_obj['timestamp'], time_upper)\n if idx > 0:\n self.assertGreater(track_obj['timestamp'],\n track_objs[idx-1]['timestamp'])",
"def test_log_filenames_multiple_no_date(self):\n now = datetime.datetime.now()\n (tracks, statuses) = self.app.log_filenames([self.track_path('silence.mp3')]*5)\n self.assertEqual(len(tracks), 5)\n self.assertEqual(self.get_track_count(), 5)\n track_objs = []\n for (idx, track) in enumerate(tracks):\n with self.subTest(idx=idx):\n track_obj = self.get_track_by_id(track.pk)\n track_objs.append(track_obj)\n self.assertLess(track_obj['timestamp'], now)\n if idx > 0:\n self.assertGreater(track_obj['timestamp'],\n track_objs[idx-1]['timestamp'])",
"def writeLog(self):\n if self.logBuffer != None and self.logging :\n f = open(self.logfileName, 'w')\n self.logBuffer += \"Final Fitness: %f\\n\" % self.getTotalReward()\n self.logBuffer += \"\\n\"\n f.write(self.logBuffer)\n f.close()",
"def data_log(self, file, **kwargs):\n time_string = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n average_data = kwargs.get('average_data')\n if self.datalogflag:\n with open(file, 'a') as f:\n f.write('{0}, {1}'.format(time_string, average_data))\n f.write('\\n')\n self.vprint(2, 'Writing average air quality data to data log at {}'.format(file))",
"def write_data(self, file_path, success_cutoff):\n agg_df = pd.DataFrame(columns=tf.Move)\n for game in self.game_list:\n agg_df = agg_df.add(game, fill_value = 0)\n agg_df.to_csv(file_path)\n pass",
"def main(output_file):\n with open(output_file, 'w+') as fl:\n poor_perf_stats = pstats.Stats('poor_perf.log', stream=fl)\n good_perf_stats = pstats.Stats('good_perf.log', stream=fl)\n\n poor_perf_stats.sort_stats('cumtime')\n\n fl.write('--------------------------------------------\\n')\n fl.write('POOR PERFORMANCE STATS\\n')\n fl.write(f\"Time: {poor_perf_stats.total_tt}\\n\")\n fl.write(f\"Function Calls: {poor_perf_stats.total_calls}\\n\")\n fl.write(f\"Top cumulative times\\n\")\n poor_perf_stats.print_stats(20)\n\n fl.write('--------------------------------------------\\n')\n fl.write('GOOD PERFORMANCE STATS\\n')\n fl.write(f\"Time: {good_perf_stats.total_tt}\\n\")\n fl.write(f\"Function Calls: {good_perf_stats.total_calls}\\n\")\n fl.write(f\"Top 20 cumulative times\\n\")\n good_perf_stats.print_stats(20)",
"def log_to_file(self, filename=None):\n if not filename:\n filename = '%s/../../output/sentimentpy.log' % os.path.dirname(os.path.realpath(__file__))\n file_handler = RotatingFileHandler(filename, 'a', 1000000, 1)\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(self.formatter)\n self.log.addHandler(file_handler)\n return self",
"def audio_file_save(folder_path, current_time, data, name_by_date):\r\n\r\n name_by_time = current_time + '.wav' #timestamp for the audio file name\r\n usage = disk_usage(folder_path)\r\n if usage.used / usage.total < args.storage_threshold:\r\n file_path = os.path.join(folder_path, name_by_time)\r\n\r\n if args.resampling:\r\n sampling_rate = args.resampling_rate\r\n audio = audio_resampling(data)\r\n else:\r\n sampling_rate = args.recording_samplerate\r\n audio = data\r\n\r\n sf.write(file_path , audio, sampling_rate)\r\n\r\n else:\r\n name = os.path.join(folder_path, name_by_date + '.txt')\r\n f = open(name, 'a')\r\n f.write(current_time + '\\t Activity Detected \\n')\r\n f.close()",
"def _logger(self):\r\n\r\n # Create filename for log\r\n filenameF = self._vna.getDateFormatted() + \".txt\"\r\n filenameF = \"Logs/\" + filenameF \r\n f = open(filenameF, \"a+\") # Log saved in directory named logs located in same directory as this file\r\n \r\n # if self._vna.isTwoComponents():\r\n # for i in range(len(self._voltages)):\r\n # f.write('%s\\t\\t\\t' % self._voltages[i][0])\r\n # else:\r\n for i in range(len(self._voltages)):\r\n f.write('%s\\t\\t' % self._voltages[i][0])\r\n f.write('\\n')\r\n\r\n # if self._vna.isTwoComponents():\r\n # for i in range(len(self._voltages[0])):\r\n # line = \"\"\r\n # for j in range(len(self._voltages)):\r\n # line = line + str(self._frequency[j][i]) + '\\t' + str(self._intensity[j][2*i]) + \\\r\n # str(self._intensity[j][2*i + 1]) + '\\t'\r\n # f.write(line)\r\n # f.write('\\n')\r\n # else: \r\n for i in range(len(self._voltages[0])):\r\n line = \"\"\r\n for j in range(len(self._voltages)):\r\n line = line + str(self._frequency[j][i]) + '\\t' + str(self._intensity[j][i]) + '\\t' \r\n f.write(line)\r\n f.write('\\n')",
"def _log_performance(i, train_perf, mean_pol_perf, best_perf, results_path,\n tb_logger):\n # tb_logger.add_scalars('performance', {\n # 'train_perf': train_perf,\n # 'eval_perf': mean_pol_perf,\n # 'best_train_perf': best_perf,\n # }, global_step=i)\n\n with open(results_path, 'a') as results_file:\n if i == 0:\n headings = ['Iter', 'Sampled Pol', 'Mean Pol', 'Best (Sampled)']\n headings = ' | '.join(headings)\n results_file.write(headings + '\\n')\n print('Timestamp' + (' ' * 17) + ' | ' + headings)\n\n log = ['{:<4d}', '{:>11.2f}', '{:>8.2f}', '{:>14.2f}']\n log = ' '.join(log).format(i, train_perf, mean_pol_perf, best_perf)\n results_file.write(log + '\\n')\n print('{:<26s} {}'.format(\n timer.asctime(timer.localtime(timer.time())), log))",
"def write(self):\n f, ds = self.opendset()\n #\n # Now add the images\n #\n start_time = time.clock() # time this\n nframes = 0 # number completed\n print_every = 1; marker = \" .\";\n print('Frames written (of %s):' % self.ntowrite, end=\"\")\n for i in range(self.nfiles):\n if nframes >= self.ntowrite: break\n\n logging.debug('processing file %d of %d' % (i+1, self.nfiles))\n img_i = fabio.open(self.files[i])\n nfi = img_i.nframes\n for j in range(nfi):\n msg = '... file %d/image %d' % (i, j)\n logging.debug(msg)\n if j < self.nempty:\n logging.debug('... empty frame ... skipping')\n else:\n ds[nframes, :, :] = img_i.data\n nframes += 1\n if numpy.mod(nframes, print_every) == 0:\n print(marker, nframes, end=\"\")\n print_every *= 2\n sys.stdout.flush()\n logging.debug('... wrote image %s of %s' %\\\n (nframes, self.ntowrite))\n if nframes >= self.ntowrite:\n logging.debug('wrote last frame: stopping')\n break\n if j < nfi - 1:\n # on last frame in file, fabio will look for next file\n img_i = img_i.next()\n\n f.close()\n print(\"\\nTime to write: %f seconds \" %(time.clock()-start_time))",
"def dataRecorder(layerNum, squareDifference ,epoch):\n writeStream = open('layer.' + str(layerNum) + '.csv', 'a')\n writeStream.write(str(epoch) + \",\" + str(squareDifference) + \"\\n\")\n writeStream.close()",
"def write_frame_trajectory_file( filename, times, frames ):\n\n plot = open( filename, \"w\")\n plot.write(\"\"\"# frame trajectory file \n# Each line represents the origin and axis vectors of a moving coordinate frame.\n# A <> in the format represents an (x,y,z) triple.\n# format: timestamp <origin> <X axis> <Y axis> <Z axis>\n# units: seconds, millimeters\n\"\"\")\n\n for i,tool in enumerate( frames ):\n xaxis = tool[0:3,0] # unit X axis basis vector\n yaxis = tool[0:3,1] # unit Y axis basis vector\n zaxis = tool[0:3,2] # unit Z axis basis vector\n origin = tool[0:3,3] # origin vector, expressed in ground frame\n plot.write( \"%f \" % times[i] )\n plot.write( \"%f %f %f \" % tuple(origin) )\n plot.write( \"%f %f %f \" % tuple(xaxis) )\n plot.write( \"%f %f %f \" % tuple(yaxis) )\n plot.write( \"%f %f %f\\n\" % tuple( zaxis) )\n plot.close()\n return",
"def quickAnalyzeCards(self, filename):\n\n assert filename.endswith('.txt')\n file = open(filename, 'r')\n\n quickCards = {}\n for line in file:\n line = line.strip()\n if not line:\n continue\n\n columns = line.split('|')\n if columns[1] != 'client-fps':\n continue\n \n sample = Sample(line, columns)\n if sample.isLoading:\n continue\n\n if sample.vendorId == None or sample.deviceId == None:\n continue\n\n # Now accumulate this sample into the cards table.\n options = quickCards.setdefault((sample.vendorId, sample.deviceId), {})\n totFps, count = options.get(sample.gameOptionsCode, (0, 0))\n totFps += sample.fps\n count += 1\n options[sample.gameOptionsCode] = (totFps, count)\n\n file = open('card_performance.csv', 'w')\n\n deviceList = quickCards.keys()\n deviceList.sort()\n for deviceTuple in deviceList:\n options = quickCards[deviceTuple]\n codes = options.keys()\n codes.sort()\n for gameOptionsCode in codes:\n totFps, count = options[gameOptionsCode]\n avgFps = totFps / count\n print >> file, '%s, %s, %s, %s' % (\n self.__formatDevice(deviceTuple),\n gameOptionsCode, avgFps, count)",
"def write_telemetry(self, telemetry):\n\n _id = telemetry['id']\n _type = telemetry['type']\n\n # If there is no log open for the current ID check to see if there is an existing (closed) log file, and open it.\n if _id not in self.open_logs:\n _search_string = os.path.join(self.log_directory, \"*%s_%s*_sonde.log\" % (_id, _type))\n _existing_files = glob.glob(_search_string)\n if len(_existing_files) != 0:\n # Open the existing log file.\n _log_file_name = _existing_files[0]\n self.log_info(\"Using existing log file: %s\" % _log_file_name)\n # Create entry in open logs dictionary\n self.open_logs[_id] = {'log':open(_log_file_name,'a'), 'last_time':time.time()}\n else:\n # Create a new log file.\n _log_suffix = \"%s_%s_%s_%d_sonde.log\" % (\n datetime.datetime.utcnow().strftime(\"%Y%m%d-%H%M%S\"),\n _id,\n _type,\n int(telemetry['freq_float']*1e3) # Convert frequency to kHz\n )\n _log_file_name = os.path.join(self.log_directory, _log_suffix)\n self.log_info(\"Opening new log file: %s\" % _log_file_name)\n # Create entry in open logs dictionary\n self.open_logs[_id] = {'log':open(_log_file_name,'a'), 'last_time':time.time()} \n\n\n # Produce log file sentence.\n _log_line = self.telemetry_to_string(telemetry)\n\n # Write out to log.\n self.open_logs[_id]['log'].write(_log_line)\n self.open_logs[_id]['log'].flush()\n # Update the last_time field.\n self.open_logs[_id]['last_time'] = time.time()\n self.log_debug(\"Wrote line: %s\" % _log_line.strip())",
"def init():\n for team_id in TEAM_DICT.keys():\n create_game_logs_file(team_id)",
"def gen_fps():\n global data_src ,output_dir \n logger = TaskFileLogger(\"GenFP\")\n\n h_vars = load_hydro_var()\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n for h_type,var_d in h_vars.items():\n print \"considering %s\" %h_type\n\n t_output_dir = os.path.join(output_dir,h_type)\n if not os.path.exists(t_output_dir):\n print \"creating path %s\" %t_output_dir\n os.mkdir(t_output_dir)\n logger.log(\"%s started\" %(h_type))\n\n for fname in glob.glob(data_src):\n complex_id = os.path.basename(fname).split('.')[0] \n fp_path = os.path.join(t_output_dir,complex_id + \".fp\" )\n if os.path.exists(fp_path):\n #print \"%s processed\" %complex_id\n continue\n print \"processing %s,fp saved as %s\" %(fname , fp_path )\n c = Complex(fname,hydro_dict = var_d)\n c.get_fp()\n c.write_fp_to_file(fp_path)\n\n logger.log(\"%s finished\" %(h_type))",
"def write_winner(self, player):\n try:\n with open(self.file_path, mode='a') as winner_file:\n winner_writer = csv.writer(winner_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n winner_writer.writerow([datetime.now(), player.name, player.score])\n except IOError as io:\n print('Failed to open file.\\n{}'.format(str(io)))\n self.sense.show_message(str(io), scroll_speed=0.04)"
] | [
"0.6171279",
"0.59173375",
"0.5693433",
"0.5600852",
"0.555415",
"0.5504344",
"0.54048634",
"0.53820014",
"0.5346854",
"0.53364265",
"0.53228056",
"0.52732503",
"0.52012616",
"0.5186444",
"0.517446",
"0.5160905",
"0.51368034",
"0.5127677",
"0.51274544",
"0.5126001",
"0.5123288",
"0.51130354",
"0.5108363",
"0.50895566",
"0.50774586",
"0.50729424",
"0.505699",
"0.50498915",
"0.50465024",
"0.5026243"
] | 0.72644055 | 0 |
Returns total number of players whose avg fps is less than 10, total number of players whose avg fps is between 10 and 25, and total number of players whose avg fps is more than 25. | def __countPlayers(self, players):
numLow = sum(map(lambda p: p.lowFps, players))
numHigh = sum(map(lambda p: p.highFps, players))
numMed = len(players) - numLow - numHigh
return '%s, %s, %s' % (numLow, numMed, numHigh) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stats(detections, faces):\n vp, fp, fn, vn = 0, 0, 0, 0\n max_label = np.max(faces[:, 0])\n for i in range(max_label + 1):\n detections_i = get_label_with_index(detections, i)\n faces_i = get_label_with_index(faces, i)\n local_vp = 0\n for face in faces_i:\n found = False\n for detection in detections_i:\n if intersection_ratio(face, detection) >= 0.5:\n found = True\n break\n if found:\n vp += 1\n local_vp += 1\n else:\n fn += 1\n fp += len(detections_i) - local_vp\n\n precision = vp / (vp + fp)\n rappel = vp / (vp + fn)\n f_score = 2 * ((precision * rappel) / (precision + rappel))\n\n return precision, rappel, f_score",
"def get_avg_duration(persons, fps):\r\n if len(persons) > 0:\r\n total_nb_frames = 0\r\n for person in persons:\r\n total_nb_frames = total_nb_frames + person[5] - person[4] \r\n # return the average number of frames by person, divided by the FPS rate to get a value in seconds \r\n return (total_nb_frames / len(persons)) / fps \r\n else:\r\n return 0",
"def calcFrameRate(self):\n\n tot = 0\n count = 0\n for session in self.sessions:\n for sample in session.samples:\n if not sample.isLoading:\n tot += sample.fps\n count += 1\n if count:\n self.avgFps = tot / count\n self.lowFps = (self.avgFps < 10)\n self.highFps = (self.avgFps > 25)",
"def get_stats(self):\n\n win_points = 0\n lose_points = 0\n\n for username in self.bets:\n bet_for_win, points = self.bets[username]\n if bet_for_win:\n win_points += points\n else:\n lose_points += points\n\n return win_points, lose_points",
"def get_team_stats(players: list[Player]) -> dict[int]:\n\n team_stats = {}\n\n total_reaction = 0\n total_mechanical_skill = 0\n total_tactical_skill = 0\n total_game_knowledge = 0\n total_xp = 0\n\n for player in players:\n total_reaction += player.reaction\n total_mechanical_skill += player.mechanical_skill\n total_tactical_skill += player.tactical_skill\n total_game_knowledge += player.game_knowledge\n total_xp += player.xp\n\n team_stats.update(\n {\"reaction\": total_reaction,\n \"mechanical_skill\": total_mechanical_skill,\n \"tactical_skill\": total_tactical_skill,\n \"game_knowledge\": total_game_knowledge,\n \"xp\": total_xp})\n\n return team_stats",
"def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent",
"def get_fps(self):\n \n return self.fps, self.average_fps",
"def get_tot_objf_and_num_frames(\n tot_scores: Tensor,\n frames_per_seq: Tensor\n ) -> Tuple[torch.Tensor, int, int]:\n mask = torch.ne(tot_scores, -math.inf)\n # finite_indexes is a tensor containing successful segment indexes, e.g.\n # [ 0 1 3 4 5 ]\n finite_indexes = torch.nonzero(mask).squeeze(1)\n ok_frames = frames_per_seq[finite_indexes].sum()\n all_frames = frames_per_seq.sum()\n return tot_scores[finite_indexes].sum(), ok_frames, all_frames",
"def get_player_stats_from_game(team, year, week):",
"def number_of_players(self) -> int:\n return self.param.number_of_players",
"def collect_stats(games: List[BaseGame], date_min = None, date_max = None):\n if not games: games = self.games\n\n under2_5 = len(list(filter(lambda g: g.is_total_under(), games)))\n under3_5 = len(list(filter(lambda g: g.is_total_under(3.5), games)))\n under1_5 = len(list(filter(lambda g: g.is_total_under(1.5), games)))\n\n home_score = sum([g.FTHG for g in games])\n away_score = sum([g.FTAG for g in games])\n\n home_wins = sum(1 for _ in filter(lambda g: g.is_home_win(), games))\n away_wins = sum(1 for _ in filter(lambda g: g.is_away_win(), games))\n draws = sum(1 for _ in filter(lambda g: g.is_draw(), games))\n\n return {\n 'under2.5': float(under2_5) / len(games),\n 'under3.5': float(under3_5) / len(games),\n 'under1.5': float(under1_5) / len(games),\n 'avgScoredHome': float(home_score) / len(games),\n 'avgScoredAway': float(away_score) / len(games),\n \"home_wins\": float(home_wins) / len(games),\n \"away_wins\": float(away_wins) / len(games),\n \"draws\": float(draws) / len(games),\n }",
"def winning_games_stats(self):\n self.winning_games_up_to_2013 = (\n self.df\n .pipe(lambda x:x.assign(winning_num_counts = 1))\n .query(\"Season <= 2013\")\n .groupby(['Season','WTeamID'])\n .agg({\"WScore\":\"sum\",\"WFGM\":\"sum\",\"WFGA\":\"sum\",\"WFGM3\":\"sum\",\"WFGA3\":\"sum\",\"WFTM\":\"sum\",\"WFTA\":\"sum\",\"LScore\":\"sum\",\"winning_num_counts\":\"sum\",\n \"WOR\":\"sum\",\"WDR\":\"sum\",\"LFGM\":\"sum\",\"LFGA\":\"sum\",\n \"WAst\":\"sum\",\"WTO\":\"sum\",\"WStl\":\"sum\",\"WBlk\":\"sum\",\"WPF\":\"sum\"})\n .reset_index()\n .rename(columns={\"LScore\":\"losing_opponent_score\"})\n # rebounds\n .pipe(lambda x:x.assign(total_winning_rebounds = x.WOR + x.WDR))\n .pipe(lambda x:x.assign(winning_off_rebounds_percent = x.WOR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(winning_def_rebounds_percent = x.WDR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(team_missed_attempts = x.WFGA - x.WFGM))\n .pipe(lambda x:x.assign(opp_team_missed_attempts = x.LFGA - x.LFGM))\n .pipe(lambda x:x.assign(winning_rebound_possession_percent = x.WOR/x.team_missed_attempts))\n .pipe(lambda x:x.assign(winning_rebound_possessiongain_percent = x.WDR/x.opp_team_missed_attempts))\n # blocks, steals, assists and turnovers\n .pipe(lambda x:x.assign(winning_block_opp_FGA_percent = x.WBlk/x.LFGA))\n .pipe(lambda x:x.assign(winning_assist_per_fgm = x.WAst/x.WFGM))\n .pipe(lambda x:x.assign(winning_assist_turnover_ratio = x.WAst/x.WTO))\n # rename columns to prevent duplication when joining with losing stats. example: WFGM_x\n .rename(columns={\"LFGA\":\"LFGA_opp\",\"LFGM\":\"LFGM_opp\"})\n )",
"def calculateWinRate():\n times = 10\n winRate = 0.0\n for i in range(times):\n game = Game('user', 6, 6)\n winRate += game.play(5, False, True, False, False)\n winRate = winRate/times\n print \"Winrate:\", winRate",
"def count_players(definition):\n _, player_definition = parse_player_definition(definition)\n return (int(player_definition['left_players']) +\n int(player_definition['right_players']))",
"def per100_top_stat_players(game_type, stat, player_pk, excluded_pks, season_id=None):\n season = None\n if season_id:\n season = bmodels.Season.objects.get(id=season_id)\n\n if player_pk:\n players = bmodels.Player.objects.filter(pk=player_pk)\n else:\n players = bmodels.Player.objects.all().exclude(\n Q(first_name__contains=\"Team\") | Q(pk__in=excluded_pks))\n player_list = []\n for player in players:\n if season:\n result = player.statline_set.filter(game__game_type=game_type, game__date__range=(\n season.start_date, season.end_date)).aggregate(Sum(stat), Sum('off_pos'))\n else:\n result = player.statline_set.filter(\n game__game_type=game_type).aggregate(Sum(stat), Sum('off_pos'))\n if result['off_pos__sum'] and result['off_pos__sum'] is not 0:\n percentage = (result[stat + '__sum'] /\n result['off_pos__sum']) * 100\n else:\n percentage = 0.0\n player_list.append((player.first_name, percentage))\n return sorted(player_list, key=lambda x: x[1], reverse=True)",
"def find_pcts(p1, p2, start_b = [], iter = 10000):\n win_record = []\n for i in range(iter):\n deck = Deck()\n need = 5 - len(start_b)\n b2 = draw_sure(deck, need, p1+p2+start_b)\n win_record.append(_who_wins(start_b + b2, p1, p2, printout = False))\n return [win_record.count(1) / float(len(win_record)), \n win_record.count(2) / float(len(win_record))\n ]",
"def winrate(matches):\n if not matches:\n print('no matches')\n return None\n\n win_loss = [match['result'] for match in matches]\n return sum(win_loss)/len(win_loss)",
"def get_total_players(self):\n\n self._logger.debug(\"Getting total player count\")\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT COUNT(player_id) FROM player\")\n count = cursor.fetchone()[0]\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return count",
"def noOfPlayers(self):\n\t\tnumber = 0\n\t\tfor n in range(6):\n\t\t\tif self.playerList[n] != None:\n\t\t\t\tnumber = number + 1\n\t\treturn number",
"def get_player_count(url):\r\n page = urlopen(url)\r\n\r\n soup = BeautifulSoup(page, features=\"html.parser\")\r\n\r\n table_divs = soup.findAll('td', attrs={'class': 'ranking-page-table__column ranking-page-table__column--dimmed'})\r\n\r\n sum = 0\r\n for i in range(len(table_divs)//5):\r\n sum += int((table_divs[i*5].text).strip().replace(',', ''))\r\n return sum",
"def countPlayers():\n DB = dbc()\n c = DB.cursor()\n c.execute('SELECT COUNT(*) from players WHERE active = 1')\n total = c.fetchone()\n DB.close()\n return int(total[0])",
"def calc_temp_play_count (total_played, courts, timeslots, max_teams):\n \n temp_play_count = [total_played[i] + courts.count(i) + sum([time.count(i) for time in timeslots]) for i in range(max_teams)]\n return temp_play_count",
"def counts_per_players(cls, date_scope):\n return cls._counts_per(\"player_name\", date_scope)",
"def find_pcts_multi(P, start_b = [], iter = 10000):\n assert len(P) >= 2\n wins_per_player = [0] * len(P)\n all_hole = reduce(lambda x,y: x+y, P)\n for i in range(iter):\n deck = Deck()\n need = 5 - len(start_b)\n b2 = draw_sure(deck, need, all_hole+start_b)\n s = [evaluator.evaluate(start_b+b2, h) for h in P]\n for i, e in enumerate(s):\n if e == min(s):\n wins_per_player[i] += 1\n return [float(x) / sum(wins_per_player) for x in wins_per_player]",
"def counts_per_teams(cls, date_scope):\n return cls._counts_per(\"player_team\", date_scope)",
"def _get_total_games(self) -> int:\n files = get_tfr_filenames(self.config)\n total_games = 0\n for file in files:\n total_games += int(str(file).split('-')[1].split('.')[0])\n return total_games",
"def getPoints(self):\n count = 0\n for card in self.cards:\n if card.rank > 9:\n count += 10\n elif card.rank == 1:\n count += 11\n else:\n count += card.rank\n # Deduct 10 if Ace is available and needed as 1\n for card in self.cards:\n if count <= 21:\n break\n elif card.rank == 1:\n count -= 10\n return count",
"def calculate_score(player_cards):\n score = sum(player_cards)\n return score",
"def playerStandings():\n db_conn = connect()\n db_cursor = db_conn.cursor()\n db_cursor.execute(\"select player_id, player_name, wins, (wins + losses) as total_played from normalized_wins_and_losses order by wins desc, total_played desc;\")\n player_standings = db_cursor.fetchall()\n db_conn.commit()\n db_conn.close()\n return player_standings",
"def playerStandings(matchid):\n DB = dbc()\n c = DB.cursor()\n c.execute('SELECT matches.playerid, name, win, total_matches, \\\n score, played, bye \\\n FROM matches JOIN players \\\n ON matches.playerid = players.playerid \\\n WHERE matches.matchid = %s \\\n ORDER BY matches.score DESC', (matchid,))\n player_stats = c.fetchall()\n DB.close()\n return player_stats"
] | [
"0.60739285",
"0.59108806",
"0.5869163",
"0.5824232",
"0.5684692",
"0.56706446",
"0.56420135",
"0.55896056",
"0.557531",
"0.55514854",
"0.5549805",
"0.551023",
"0.5500134",
"0.54782814",
"0.5471912",
"0.544629",
"0.5406683",
"0.5397825",
"0.5371873",
"0.53278965",
"0.5309981",
"0.530516",
"0.5262477",
"0.5261088",
"0.5254532",
"0.5244396",
"0.5244099",
"0.5241984",
"0.5240486",
"0.5233752"
] | 0.63204354 | 0 |
Reads PCIList, which contains a list of the known PCI devices by vendor ID/device ID. See | def readPCIList(self):
self.vendors = {}
self.devices = {}
vendorId = None
vendorName = None
for line in PCIList.split('\n'):
stripped = line.lstrip()
if not stripped or stripped[0] == ';':
continue
if line[0] != '\t':
# A vendor line.
vendorId, vendorName = line.split('\t', 1)
vendorId = int(vendorId, 16)
self.vendors[vendorId] = vendorName.strip()
else:
# A device line, continuing the previous vendor.
deviceId, deviceName = line[1:].split('\t', 1)
deviceId = deviceId.split(' ', 1)[0]
try:
deviceId = int(deviceId, 16)
except:
deviceId = None
self.devices[(vendorId, deviceId)] = deviceName.strip()
self.addExtraDevices() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_pci_devices(self):\n\n system = self._get_host_details()\n if ('links' in system['Oem']['Hp'] and\n 'PCIDevices' in system['Oem']['Hp']['links']):\n # Get the PCI URI and Settings\n pci_uri = system['Oem']['Hp']['links']['PCIDevices']['href']\n status, headers, pci_device_list = self._rest_get(pci_uri)\n\n if status >= 300:\n msg = self._get_extended_error(pci_device_list)\n raise exception.IloError(msg)\n\n return pci_device_list\n\n else:\n msg = ('links/PCIDevices section in ComputerSystem/Oem/Hp'\n ' does not exist')\n raise exception.IloCommandNotSupportedError(msg)",
"def test_get_pci_device_list(self):\n pass",
"def retrieve_pci_addresses(self):\n debug('Retrieve PCI addresses...')\n try:\n lshw_json = self.run_ssh('lshw -json').stdout\n except SSHError:\n fatal('Cannot connect to node:', self.ip_address)\n lshw = json.loads(lshw_json)\n pci_addresses = []\n for component in lshw[\"children\"][0][\"children\"]:\n if component[\"class\"] == \"bridge\":\n for subsystem in component[\"children\"]:\n if subsystem[\"class\"] == \"network\":\n index = int(subsystem[\"id\"].split(':')[1])\n pci_addresses.append((index, subsystem[\"businfo\"]))\n pci_addresses = [v.strip('pci@') for k, v in sorted(pci_addresses)]\n # iterate over interfaces and set pci address\n i = 0\n for interface in self.interfaces:\n self.interfaces[interface]['pci_address'] = pci_addresses[i]\n i += 1\n if i >= len(pci_addresses):\n break",
"def enumerate_devices(vendor_id: int = 0x2C97) -> List[bytes]:\n devices: List[bytes] = []\n\n for hid_device in hid.enumerate(vendor_id, 0):\n if (hid_device.get(\"interface_number\") == 0 or\n # MacOS specific\n hid_device.get(\"usage_page\") == 0xffa0):\n devices.append(hid_device[\"path\"])\n\n assert len(devices) != 0, (\n f\"Can't find Ledger device with vendor_id {hex(vendor_id)}\")\n\n return devices",
"def test_get_pci_coprocessor_card_list(self):\n pass",
"def _get_gpu_pci_devices(self):\n pci_device_list = self._get_pci_devices()\n\n gpu_list = []\n items = pci_device_list['Items']\n for item in items:\n if item['ClassCode'] in CLASSCODE_FOR_GPU_DEVICES:\n if item['SubclassCode'] in SUBCLASSCODE_FOR_GPU_DEVICES:\n gpu_list.append(item)\n return gpu_list",
"def get_devices_lsscsi(self):\n\n try:\n message = \"Find SCSI Devices\"\n if self._include_enclosures:\n command = \"lsscsi --generic --transport | egrep 'disk|0x14|enclo'\"\n else:\n command = \"lsscsi --generic --transport | fgrep 'disk|0x14'\"\n pdata = self._run_command(command=command, message=message, logger=self._logger, shell=True)\n #\n # Format:\n # $ lsscsi --generic --transport\n # [0] [1] [2] [3] [4]\n # [0:0:0:0] disk sas:0x5000cca25103b471 /dev/sda /dev/sg0 \n # [0:0:1:0] disk sas:0x5000cca251029301 /dev/sdb /dev/sg1 \n # ...\n # [0:0:14:0] enclosu sas:0x5001636001caa0bd - /dev/sg14\n # [7:0:0:0] cd/dvd usb: 1-1.3:1.2 /dev/sr0 /dev/sg15\n #\n # Special Case:\n # Handle lines without a transport (spaces only). (screen scrapping danger)\n # [0:0:10:0] enclosu sas:0x50030480091d71fd - /dev/sg10\n # [1:0:0:0] disk <spaces> /dev/sdk /dev/sg11 <- INTEL disk!\n #\n # Another SNAFU! (and why I hate screen scrapping!!!)\n # [15:0:53597:0]disk sas:0x5000cca23b359649 /dev/sdg /dev/sg6 \n # [15:0:53598:0]disk sas:0x5000cca23b0c0a99 /dev/sdh /dev/sg7 \n # [15:0:53599:0]disk sas:0x5000cca23b0b7531 /dev/sdi /dev/sg8 \n # ...\n # [15:0:53686:0]enclosu sas:0x5000ccab040001bc - /dev/sg165\n # [15:0:53766:0]enclosu sas:0x5000ccab040001fc - /dev/sg144\n #\n # Evidently, the author of lsscsi did not think of consistent output! ;(\n #\n for line in pdata['stdout'].splitlines():\n dinfo = line.split()\n device = dict()\n if len(dinfo) < 5:\n m = re.search('(?P<device>disk|\\(0x14\\)|enclosu)', dinfo[0])\n if m:\n device['Device Type'] = m.group('device')\n sas_index = 1\n dev_index = 2\n sg_index = 3\n else:\n continue\n else:\n device['Device Type'] = dinfo[1]\n sas_index = 2\n dev_index = 3\n sg_index = 4\n\n # lsscsi does not understand 'Host Managed' device type.\n if '0x14' in device['Device Type']:\n device['Device Type'] = 'disk'\n\n # Parse remaining information.\n if 'sas:' in dinfo[sas_index]:\n device['SAS Address'] = dinfo[sas_index][4:]\n self._sas_addresses += 1\n else:\n device['SAS Address'] = \"\"\n\n # Note: Enclosure has no driver, so reports '-' for name.\n if '/dev/' in dinfo[dev_index]:\n if self._drives and not dinfo[dev_index] in self._drives:\n continue\n if self._exclude and dinfo[dev_index] in self._exclude:\n continue\n device['Linux Device Name'] = dinfo[dev_index]\n else:\n device['Linux Device Name'] = \"\"\n if '/dev/sg' in dinfo[sg_index]:\n device['SCSI Device Name'] = dinfo[sg_index]\n else:\n device['SCSI Device Name'] = \"\"\n\n self._devices.append(device)\n\n except RuntimeError as exc:\n self._logger.error(\"Failed to acquire SCSI devices: {0}\".format(exc))\n raise exc",
"def list_devices(cls):\n # get all matching devices\n return usb.core.find(\n find_all=True,\n custom_match=lambda dev: (\n dev.idVendor == cls.vendor_id and dev.idProduct in cls.product_ids\n ),\n )",
"def get_block_device_list(vars = {}, log = sys.stderr):\n\n # make sure we can access to the files/directories in /proc\n if not os.access(PROC_PARTITIONS_PATH, os.F_OK):\n return None\n\n # table with valid scsi/sata/ide/raid block device names\n valid_blk_names = {}\n # add in valid sd and hd block device names\n for blk_prefix in ('sd','hd'):\n for blk_num in map (\\\n lambda x: chr(x), range(ord('a'),ord('z')+1)):\n devicename=\"%s%c\" % (blk_prefix, blk_num)\n valid_blk_names[devicename]=None\n\n # add in valid scsi raid block device names\n for M in range(0,1+1):\n for N in range(0,7+1):\n devicename = \"cciss/c%dd%d\" % (M,N)\n valid_blk_names[devicename]=None\n\n for devicename in valid_blk_names.keys():\n # devfs under 2.4 (old boot cds) used to list partitions\n # in a format such as scsi/host0/bus0/target0/lun0/disc\n # and /dev/sda, etc. were just symlinks\n try:\n devfsname= os.readlink( \"/dev/%s\" % devicename )\n valid_blk_names[devfsname]=None\n except OSError:\n pass\n\n # only do this once every system boot\n if not os.access(DEVICES_SCANNED_FLAG, os.R_OK):\n\n # this is ugly. under devfs, device\n # entries in /dev/scsi/.. and /dev/ide/...\n # don't show up until you attempt to read\n # from the associated device at /dev (/dev/sda).\n # so, lets run sfdisk -l (list partitions) against\n # most possible block devices, that way they show\n # up when it comes time to do the install.\n devicenames = valid_blk_names.keys()\n devicenames.sort()\n for devicename in devicenames:\n os.system( \"sfdisk -l /dev/%s > /dev/null 2>&1\" % devicename )\n\n # touch file\n fb = open(DEVICES_SCANNED_FLAG,\"w\")\n fb.close()\n\n devicelist= {}\n\n partitions_file= file(PROC_PARTITIONS_PATH,\"r\")\n line_count= 0\n for line in partitions_file:\n line_count= line_count + 1\n\n # skip the first two lines always\n if line_count < 2:\n continue\n\n parts= string.split(line)\n\n if len(parts) < 4:\n continue\n\n device= parts[3]\n\n # skip and ignore any partitions\n if not valid_blk_names.has_key(device):\n continue\n\n try:\n major= int(parts[0])\n minor= int(parts[1])\n blocks= int(parts[2])\n except ValueError, err:\n continue\n\n gb_size= blocks/BLOCKS_PER_GB\n\n # check to see if the blk device is readonly\n try:\n # can we write to it?\n dev_name= \"/dev/%s\" % device\n fb = open(dev_name,\"w\")\n fb.close()\n readonly=False\n except IOError, e:\n # check if EROFS errno\n if errno.errorcode.get(e.errno,None) == 'EROFS':\n readonly=True\n else:\n # got some other errno, pretend device is readonly\n readonly=True\n\n devicelist[dev_name]= {'major': major,'minor': minor,'blocks': blocks, 'size': gb_size, 'readonly': readonly}\n return devicelist",
"def get_devices(self): \n devices = []\n \n # get all the keys from the dictionary\n keys = self.SCPI_Data.keys()\n \n # extract the device specifier\n dev_keys = [key.split(':')[0] for key in keys]\n \n # iterate through the devices\n for key in dev_keys:\n if (key not in devices) and (key != 'SUP'):\n # this is a unique device, add it to the list\n devices = devices + [key]\n # end if\n # end for\n \n devices = devices + ['SIM']\n \n # replace the GPS if present with its longer name\n devices = ['GPSRM' if device == 'GPS' else device \n for device in devices]\n return devices",
"def ret_device_list():\n token = get_auth_token() # Get Token\n url = \"https://sandboxdnac.cisco.com/api/v1/network-device\"\n hdr = {'x-auth-token': token, 'content-type' : 'application/json'}\n resp = requests.get(url, headers=hdr) # Make the Get Request\n device_list = resp.json()\n return device_list",
"def getDevices():\n devices = create_string_buffer(BUF_SIZE)\n daqmx(\n dll.DAQmxGetSysDevNames,\n (\n devices,\n BUF_SIZE\n )\n )\n return parseStringList(devices.value)",
"def detect(self):\n # Get PCI devices\n lines = subprocess.check_output([\"lspci\", \"-n\"]).decode().split(\"\\n\")\n for line in lines:\n if len(line) > 0:\n class_id = \"0x{0}\".format(line.split()[1].rstrip(\":\")[0:2])\n if class_id == self.class_id:\n dev = line.split()[2].split(\":\")\n vendor_id = \"0x{0}\".format(dev[0])\n product_id = \"0x{0}\".format(dev[1])\n if vendor_id == self.vendor_id and product_id in self.devices:\n return True\n return False",
"def get_devices():\n try:\n with open(DEVICES, 'r') as f:\n data = json.load(f)['devices']\n except (IOError, ValueError) as err:\n raise SwiftlmCheckFailure('Failure opening %s: %s' % (DEVICES, err))\n\n devices = []\n for d in data:\n l = d.get('label', LABEL_CHECK_DISABLED)\n devices.append(Device(\n device=d['name'],\n mount=MOUNT_PATH+d['swift_drive_name'],\n label=l\n ))\n\n return devices",
"def device_info(node):\n\n if \"cpu\" in node and \"total_mbufs\" in node[\"cpu\"]:\n total_mbufs = node[\"cpu\"][\"total_mbufs\"]\n if total_mbufs != 0:\n print(\"Total Number of Buffers: {}\".format(total_mbufs))\n\n vpp = VppPCIUtil(node)\n vpp.get_all_devices()\n linkup_devs = vpp.get_link_up_devices()\n if len(linkup_devs):\n print(\"\\nDevices with link up (can not be used with VPP):\")\n vpp.show_vpp_devices(linkup_devs, show_header=False)\n # for dev in linkup_devs:\n # print (\" \" + dev)\n kernel_devs = vpp.get_kernel_devices()\n if len(kernel_devs):\n print(\"\\nDevices bound to kernel drivers:\")\n vpp.show_vpp_devices(kernel_devs, show_header=False)\n else:\n print(\"\\nNo devices bound to kernel drivers\")\n\n dpdk_devs = vpp.get_dpdk_devices()\n if len(dpdk_devs):\n print(\"\\nDevices bound to DPDK drivers:\")\n vpp.show_vpp_devices(dpdk_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices bound to DPDK drivers\")\n\n other_devs = vpp.get_other_devices()\n if len(other_devs):\n print(\"\\nDevices not bound to Kernel or DPDK drivers:\")\n vpp.show_vpp_devices(other_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices not bound to Kernel or DPDK drivers\")\n\n vpputl = VPPUtil()\n interfaces = vpputl.get_hardware(node)\n if interfaces == {}:\n return\n\n print(\"\\nDevices in use by VPP:\")\n\n if len(interfaces.items()) < 2:\n print(\"None\")\n return\n\n print(\n \"{:30} {:4} {:4} {:7} {:4} {:7}\".format(\n \"Name\", \"Numa\", \"RXQs\", \"RXDescs\", \"TXQs\", \"TXDescs\"\n )\n )\n for intf in sorted(interfaces.items()):\n name = intf[0]\n value = intf[1]\n if name == \"local0\":\n continue\n numa = rx_qs = rx_ds = tx_qs = tx_ds = \"\"\n if \"numa\" in value:\n numa = int(value[\"numa\"])\n if \"rx queues\" in value:\n rx_qs = int(value[\"rx queues\"])\n if \"rx descs\" in value:\n rx_ds = int(value[\"rx descs\"])\n if \"tx queues\" in value:\n tx_qs = int(value[\"tx queues\"])\n if \"tx descs\" in value:\n tx_ds = int(value[\"tx descs\"])\n\n print(\n \"{:30} {:>4} {:>4} {:>7} {:>4} {:>7}\".format(\n name, numa, rx_qs, rx_ds, tx_qs, tx_ds\n )\n )",
"def list_devices():\r\n DeviceManagerCLI.BuildDeviceList()\r\n return DeviceManagerCLI.GetDeviceList()",
"def get_devices(self):\n\n \"\"\"\n # Note: This code is no longer required with the latest spt updates.\n # But that said, leaving for now so I don't risk breaking folks!\n if not self._use_lsscsi:\n message = \"Find Number of IOM's\"\n command = \"lsscsi | fgrep enclo | egrep 'HGST|WDC' | wc -l\"\n pdata = self._run_command(command=command, message=message, logger=self._logger, shell=True)\n ioms = (int)(pdata['stdout'].strip())\n if ioms > 1:\n self._use_lsscsi = True\n if not self._use_lsscsi and os.path.exists('/etc/multipath.conf'):\n self._use_lsscsi = True\n \"\"\"\n # Allow above logic or options to override lsscsi vs. spt usage.\n if not self._use_lsscsi or self._force_spt:\n self.get_devices_spt()\n else:\n self.get_devices_lsscsi()\n return",
"def list_devices(self):\n return [x for x in self.devices.keys()]",
"def _ListUsbDisks(self):\n disk_list = []\n for disk in glob.glob('/sys/block/sd*'):\n with open(disk + '/removable', 'r') as fd:\n if int(fd.readline()) == 1:\n device = '/dev/%s' % disk.split('/')[-1]\n manuf = self._GetDiskInfo(disk, 'manufacturer')\n product = self._GetDiskInfo(disk, 'product')\n capacity = self._GetDiskCapacity(device)\n if capacity:\n desc = '%s: %s %s %d GB' % (device, manuf, product, capacity)\n disk_list.append([device, manuf, product, capacity, desc])\n return disk_list",
"def find_all(cls, vps: Sequence[Tuple[int, int]],\n nocache: bool = False) -> \\\n List[Tuple[UsbDeviceDescriptor, int]]:\n cls.Lock.acquire()\n try:\n devs = set()\n for vid, pid in vps:\n # TODO optimize useless loops\n devs.update(UsbTools._find_devices(vid, pid, nocache))\n devices = set()\n for dev in devs:\n ifcount = max([cfg.bNumInterfaces for cfg in dev])\n # TODO: handle / is serial number strings\n sernum = UsbTools.get_string(dev, dev.iSerialNumber)\n description = UsbTools.get_string(dev, dev.iProduct)\n descriptor = UsbDeviceDescriptor(dev.idVendor, dev.idProduct,\n dev.bus, dev.address,\n sernum, None, description)\n devices.add((descriptor, ifcount))\n return list(devices)\n finally:\n cls.Lock.release()",
"def do_list(self, _):\n devices = []\n for source in self._target.devices:\n devices.append({\n 'name': source.device['name'],\n 'path': source.device['path'],\n })\n return devices",
"def scan_chip_ble_devices(devCtrl):\n devices = []\n bleMgr = BleManager(devCtrl)\n bleMgr.scan(\"-t 10\")\n\n for device in bleMgr.peripheral_list:\n devIdInfo = bleMgr.get_peripheral_devIdInfo(device)\n if devIdInfo:\n devInfo = devIdInfo.__dict__\n devInfo[\"name\"] = device.Name\n devices.append(devInfo)\n\n return devices",
"def get_generic_pci_devices_count(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetGenericPciDevicesCount', self.handle)",
"def scan():\n debug(\"CBA4.scan()\")\n num = MpOrLibUsb.get_device_count()\n devices = []\n i = 0\n while i < num:\n cba = CBA4(interface=MpOrLibUsb(i))\n i += 1\n sn = cba.get_serial_number()\n if sn:\n devices.append(sn)\n cba.close()\n #end loop\n return devices\n #end scan()",
"def get_devices():\n devices = []\n for device_id in range(pm.lib.Pm_CountDevices()):\n devices.append(DeviceInfo(device_id))\n\n return devices",
"def devices():\n\n ret = {}\n\n p = subprocess.Popen([\"lsusb\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n out = p.stdout.read()\n err = p.stderr.read()\n\n if err:\n raise salt.exceptions.CommandExecutionError(\"Failed to run lsusb: {}\".format(err))\n\n # Ensure pattern is compiled\n global pattern\n if not pattern:\n log.info(\"Compiling regex pattern {}\".format(LSUSB_OUTPUT_REGEX))\n pattern = re.compile(LSUSB_OUTPUT_REGEX)\n\n # Parse output\n devices = []\n for dev_line in out.split(\"\\n\"):\n if dev_line == \"\":\n # empty line, skip\n continue\n\n match = pattern.match(dev_line)\n if not match:\n log.warning(\"Couldn't match line {}\".format(dev_line))\n continue\n\n devices.append({\n \"bus\": match.group(\"bus\"),\n \"device\": match.group(\"device\"),\n \"vendor\": match.group(\"vendor\"),\n \"product\": match.group(\"product\"),\n \"name\": match.group(\"name\"),\n })\n\n ret[\"values\"] = devices\n return ret",
"def _get_usb_devices(self):\n\n # Get every device on the bus\n device_re = re.compile(\"Bus\\s+(?P<bus>\\d+)\\s+Device\\s+(?P<device>\\d+).+ID\\s(?P<id>\\w+:\\w+)\\s(?P<tag>.+)$\", re.I)\n df = subprocess.check_output(\"lsusb\")\n devices = []\n\n for i in df.decode().split('\\n'):\n if i:\n info = device_re.match(i)\n if info:\n dinfo = info.groupdict()\n dinfo['device'] = '/dev/bus/usb/%s/%s' % (dinfo.pop('bus'), dinfo.pop('device'))\n devices.append(dinfo)\n\n # Filter only for the STLink devices\n st_link_devices = []\n for device in devices:\n if self.STLINK_VENDOR_ID in device['id']:\n st_link_devices.append(device)\n\n self.usb_devices = st_link_devices",
"def _find_devices(cls, vendor: int, product: int,\n nocache: bool = False) -> Set[UsbDevice]:\n backend = cls._load_backend()\n vidpid = (vendor, product)\n if nocache or (vidpid not in cls.UsbDevices):\n # not freed until Python runtime completion\n # enumerate_devices returns a generator, so back up the\n # generated device into a list. To save memory, we only\n # back up the supported devices\n devs = set()\n vpdict = {} # Dict[int, List[int]]\n vpdict.setdefault(vendor, [])\n vpdict[vendor].append(product)\n for dev in backend.enumerate_devices():\n device = UsbDevice(dev, backend)\n if device.idVendor in vpdict:\n products = vpdict[device.idVendor]\n if products and (device.idProduct not in products):\n continue\n devs.add(device)\n if sys.platform == 'win32':\n # ugly kludge for a boring OS:\n # on Windows, the USB stack may enumerate the very same\n # devices several times: a real device with N interface\n # appears also as N device with as single interface.\n # We only keep the \"device\" that declares the most\n # interface count and discard the \"virtual\" ones.\n filtered_devs = dict()\n for dev in devs:\n vid = dev.idVendor\n pid = dev.idProduct\n ifc = max([cfg.bNumInterfaces for cfg in dev])\n k = (vid, pid, dev.bus, dev.address)\n if k not in filtered_devs:\n filtered_devs[k] = dev\n else:\n fdev = filtered_devs[k]\n fifc = max([cfg.bNumInterfaces for cfg in fdev])\n if fifc < ifc:\n filtered_devs[k] = dev\n devs = set(filtered_devs.values())\n cls.UsbDevices[vidpid] = devs\n return cls.UsbDevices[vidpid]",
"def find_valons_with_dmesg():\n \n try:\n dmesg = check_output('dmesg | grep \"FT232RL\"',shell=True)\n except subprocess.CalledProcessError:\n # grep failed so no ports found\n return []\n lines = dmesg.split('\\n')\n lines = [x for x in lines if len(x) > 0]\n m = usbre.search(lines[-1])\n usbport = m.group('port')\n try:\n dmesg = check_output(('dmesg | grep \"usb %s.*now attached to\"' % usbport),shell=True)\n except subprocess.CalledProcessError:\n # grep failed so no ports found\n return []\n lines = dmesg.split('\\n')\n lines = [x for x in lines if len(x) > 0]\n lines = lines[-1:]\n ports = []\n for ln in lines[::-1]:\n idx = ln.find('ttyUSB')\n if idx >= 0:\n port = '/dev/' + ln[idx:]\n if port not in ports:\n ports.append(port)\n return ports",
"def get_device_map():\n ret = []\n vlist = subprocess.check_output(['ceph-volume', 'lvm', 'list',\n '--format=json'])\n for osd_id, data in json.loads(vlist.decode('utf8')).items():\n osd_id = normalize_osd_id(osd_id)\n for elem in data:\n for device in elem['devices']:\n ret.append({'id': osd_id, 'path': device})\n return ret"
] | [
"0.6974894",
"0.6744383",
"0.6441267",
"0.59418535",
"0.588758",
"0.5535207",
"0.55204946",
"0.5497702",
"0.5481925",
"0.5473312",
"0.54184794",
"0.5417257",
"0.54155695",
"0.5406843",
"0.5379454",
"0.53526473",
"0.5350712",
"0.53472716",
"0.53472596",
"0.53421974",
"0.5328102",
"0.53151417",
"0.52709043",
"0.5216705",
"0.52070343",
"0.5192961",
"0.5187462",
"0.5186628",
"0.5179187",
"0.51567006"
] | 0.8187731 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.