query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Provide answer in the return value. This function returns one tuple describing you observations | def answer(self):
return (1, 4) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def answer(self):\n\n return (1, 2, 1, 2)",
"def answer(self):\n\n return (1, 1, 2, 4)",
"def task9_answer():\n\n return (1, 3, 4, 5)",
"def results(self):\n\n\t\tresults = {'answer':42}\n\n\t\treturn results",
"def __get_data(self, universe:list, factor_pool:list, ret_label:str, start_date, end_date)->tuple:\n X = None\n y = None\n return (X, y)",
"def get_question_and_answer():\n question = random.randint(MIN_VALUE, MAX_VALUE)\n correct_answer = 'yes' if is_even(question) else 'no'\n return question, correct_answer",
"def get_round_data() -> tuple:\n difference = randint(MIN_DIFF, MAX_DIFF)\n start_num = randint(MIN_START, MAX_START)\n progression = make_progression(start_num, difference)\n\n question_index = randint(0, LENGTH_OF_PROGRESSION - 1)\n answer = str(start_num + difference * question_index)\n\n progression = list(map(str, progression))\n progression[question_index] = LOSE_ELEMENT\n quest = SEPARATOR.join(progression)\n\n return quest, answer",
"def get_answers(self):\r\n pass",
"def generate_answer(self):\n\n for model in Response.get_all_models():\n match = model.matches(self.request.question, self.request.element)\n if match: return model.solve(self.request.question, self.request.element)\n\n return \"I am unable to answer this question. If you think I should be able to answer\\n\" + \\\n \"this, please submit an issue or pull request at:\\n\" + \\\n \"https://github.com/jackcook/the-scientist/compare\"",
"def get_predictions():\n\n print(\"OK1\");\n print(\"OK2\");\n return;",
"def get_response(self) -> Tuple[int, int]:\n pass",
"def main(data: List[str]) -> Tuple[int, int]:\n data = [int(number) for number in data[0].split(\",\")]\n\n answer_one = part_one(data)\n answer_two = part_two(data)\n return answer_one, answer_two",
"def tupleize_answers(self, answer, var_dict_list):\r\n _ = self.capa_system.i18n.ugettext\r\n\r\n out = []\r\n for var_dict in var_dict_list:\r\n try:\r\n out.append(evaluator(\r\n var_dict,\r\n dict(),\r\n answer,\r\n case_sensitive=self.case_sensitive,\r\n ))\r\n except UndefinedVariable as err:\r\n log.debug(\r\n 'formularesponse: undefined variable in formula=%s',\r\n cgi.escape(answer)\r\n )\r\n raise StudentInputError(\r\n _(\"Invalid input: {bad_input} not permitted in answer.\").format(bad_input=err.message)\r\n )\r\n except ValueError as err:\r\n if 'factorial' in err.message:\r\n # This is thrown when fact() or factorial() is used in a formularesponse answer\r\n # that tests on negative and/or non-integer inputs\r\n # err.message will be: `factorial() only accepts integral values` or\r\n # `factorial() not defined for negative values`\r\n log.debug(\r\n ('formularesponse: factorial function used in response '\r\n 'that tests negative and/or non-integer inputs. '\r\n 'Provided answer was: %s'),\r\n cgi.escape(answer)\r\n )\r\n raise StudentInputError(\r\n _(\"factorial function not permitted in answer \"\r\n \"for this problem. Provided answer was: \"\r\n \"{bad_input}\").format(bad_input=cgi.escape(answer))\r\n )\r\n # If non-factorial related ValueError thrown, handle it the same as any other Exception\r\n log.debug('formularesponse: error %s in formula', err)\r\n raise StudentInputError(\r\n _(\"Invalid input: Could not parse '{bad_input}' as a formula.\").format(\r\n bad_input=cgi.escape(answer)\r\n )\r\n )\r\n except Exception as err:\r\n # traceback.print_exc()\r\n log.debug('formularesponse: error %s in formula', err)\r\n raise StudentInputError(\r\n _(\"Invalid input: Could not parse '{bad_input}' as a formula\").format(\r\n bad_input=cgi.escape(answer)\r\n )\r\n )\r\n return out",
"def get_answer(self, problem):\t\n\t\tprint problem.statement\n\t\tresp = self.get_float()\n\t\tfor i in range(3):\n\t\t\tif resp == problem.answer: break\n\t\t\tprint \"Nope, try again.\"\n\t\t\tif i > 1:\n\t\t\t\t#this is the hint\n\t\t\t\tprint \"Remember, you are dividing the recipe by {0}.\".format(problem.ratio)\n\t\t\tresp = self.get_float()\n\t\tif resp != problem.answer:\n\t\t\tself.fail_response(problem)\n\t\telse:\n\t\t\tprint \"Right!\"",
"def get_result(self):\r\n return (self.x, self.lambd)",
"def solution(self):\n return [(\"the\", 1561900)] * 100",
"def __call__(self, inputs, return_attention=False):\n intermediates = self._evaluate(inputs)\n result = [intermediates.output]\n\n if return_attention:\n result.append(intermediates.attention)\n\n if len(result) == 1:\n return result[0]\n\n return tuple(result)",
"def response(cv, cv_fit, dic, query):\n index, accuracy = solve(cv, cv_fit, query)\n book = list(dic)\n answer = dic[book[index]]\n if check_accuracy(accuracy):\n print(\"Sorry, I don’t understand what you mean, could you please describe it more clearly\")\n elif accuracy < 0.6:\n print(f\"Do you ask {book[index]}? I only know {answer[0]}\")\n elif len(answer) > 1:\n print(answer[random.randint(0, len(answer) - 1)])\n else:\n print(answer[0])",
"def retrieve_result(self, x, y):\n pass",
"def show_answer(self,values):\r\n values_converted = []\r\n for entry in values:\r\n new = int(entry.get())\r\n # print(new)\r\n values_converted.append(new)\r\n print(values_converted)\r\n antwoord = self.root.huidig_model.model.predict([values_converted])\r\n # for i in values_converted:\r\n # print(i)\r\n # print(antwoord)\r\n return messagebox.showinfo('Voorspelling','Voorspelling op basis van ingevoerde waarde(s) : {}'.format(antwoord))",
"def score_student_answer(self,question_type,question_data,student_answer):\n\t\treturn (0.0,\"\")",
"def get_result(self):\r\n return (self.x, self.lambd, self.z)",
"def ml_result(self, var, e):\n\t\tdist = self.enumerate_ask(var, e)\n\t\treturn max(dist.items(), key=lambda x:x[1])[0]",
"def solution(self):\n return [(\"the\", 1579644)] * 100",
"def get_answers(self):\r\n return self.answer_values",
"def choose_question():\r\n random_index_question = randint(1, question.num_question + 1)\r\n random_question = question.question[random_index_question]\r\n correct_answer = question.answer[random_index_question]\r\n return random_question, correct_answer",
"def get_values():\n a1 = insert_data(\"a1\")\n r = insert_data(\"r\")\n n = insert_data(\"n\")\n return a1, r, n",
"def show_answers(queries, answers, aggregation_predictions_string):\n\n ans_list = []\n for query, answer, predicted_agg in zip(queries, answers, aggregation_predictions_string):\n print(query)\n print(answer,type(answer))\n print(predicted_agg)\n answer = [i.strip() for i in answer.split(',')]\n print(answer)\n if (len(answer) == 1):\n if (predicted_agg == 'COUNT'):\n answer = len([i for i in answer])\n\n if (len(answer) > 1):\n if (predicted_agg == 'SUM'):\n try:\n answer = sum([float(i) for i in answer])\n except ValueError:\n answer = predicted_agg\n elif (predicted_agg == 'COUNT'):\n answer = len([i for i in answer])\n elif (predicted_agg == 'AVERAGE'):\n answer = sum([float(i) for i in answer]) / len([i for i in answer])\n elif (predicted_agg == 'NONE'):\n answer = answer\n else:\n answer = 'None'\n # if predicted_agg == \"NONE\":\n # print(\"Predicted answer: \" + answer)\n # else:\n # print(\"Predicted answer: \" + predicted_agg + \" > \" + answer)\n\n ans_list.append(answer)\n\n return ans_list",
"def compare_result(res, label):\n infer_res = []\n infer_conf = []\n gt_conf = []\n size = len(label)\n for i in range(size):\n infer_res.append(np.where(res[i] == np.max(res[i]))[3][0])\n infer_conf.append(res[i][0][0][0][infer_res[i]])\n gt_conf.append(res[i][0][0][0][label[i]])\n return infer_res, infer_conf, gt_conf",
"def test_get_results_returns_list_of_tuples(self):\n sim = ss.Simulation()\n sim.run_simulation(5)\n assert type(sim.get_results()) == list\n assert type(sim.get_results()[0]) == tuple"
] | [
"0.6732488",
"0.6670362",
"0.6554476",
"0.62387717",
"0.60515916",
"0.6027523",
"0.60140425",
"0.5975857",
"0.59751713",
"0.5964573",
"0.5942693",
"0.58566",
"0.5853647",
"0.58326477",
"0.5828065",
"0.58170116",
"0.5795604",
"0.5785389",
"0.5769348",
"0.5767244",
"0.57561594",
"0.5721435",
"0.569702",
"0.56867486",
"0.5642359",
"0.56279016",
"0.56258744",
"0.5618655",
"0.55751383",
"0.55579525"
] | 0.6743457 | 0 |
Returns the added item. | def added_item(self, uid: str) -> object:
return self._input[uid] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(self, item):",
"def get_item(self):\n return self.item",
"def get_item(self):\n return self.item",
"def get_item(self):\n raise NotImplementedError",
"def add_item(self):\n item = LibGen.create_item()\n if not self.item_exists(item.call_number):\n self.item_list[item.call_number] = item\n print(f\"Item({item.call_number}) bas been added.\")\n else:\n print(\"This item already exists.\")",
"def push(self, item):\n pass",
"def add_item(self, item: OrderItem):\n return self.order_items.append(item)",
"def append(self, item):\n try:\n i = self.index(item)\n return self[i]\n except ValueError:\n list.append(self, item)\n return item",
"def GetItem(self):\r\n \r\n return self._item",
"def getitem(self):\n self.inventory += 1",
"def add(self, item: Any) -> None:\n pass",
"def DoAdd(self,event):\r\n newItem = self.data.add()\r\n if newItem and newItem not in self.items:\r\n self.items = self.data.getItemList()\r\n index = self.items.index(newItem)\r\n self.list.InsertItems([newItem],index)",
"def add_item(self):\n item = models.Item(item_name=self.test_item,\n list_id=1,\n description=self.test_item_desc)\n item.add()",
"def Item(self) -> object:",
"def Item(self) -> object:",
"def getItem(self):\n return self.getItem(0)",
"def add_item(self,itm):\n itms = self.get_items_list()\n if len(itms) != self.items: self.items = len(itms)\n if self.items >= self.rooms * MAX_ITEMS_PER_ROOM:\n return None\n k = itm\n x = 0\n while k in itms:\n x += 1\n k = '%s_%d'%(itm,x)\n itm_rec = SuiGallery.make_item_record(itm)\n itm_rec['z'] = self.items;\n itms[k] = itm_rec\n self.put_items_list(itms)\n self.items += 1\n return {'items':self.items,'k':k,'id':itm,'x':itm_rec['x'],'y':itm_rec['y'],'z':itm_rec['z']}",
"def addItem(*args):",
"def addItem(*args):",
"def addItem(*args):",
"def append (self, item):\n pass",
"def push(self, item):\n\t\tself.items.append(item)",
"def add_item(self) -> None:\n item = self.get_selected_item(self.tree_cache)\n if item is None:\n return\n\n text, ok = QInputDialog.getText(self, \"Appending new data\", \"Data:\")\n if ok:\n parent_id = item.data().get_id()\n data = Data(text, parent_id)\n data_node = DataNode(instance=data)\n self.data_cache.append(data_node)\n self._data_controller.update_node_hierarchy(self.data_cache, remove_from_list=True)\n self.sync_tree_cache()",
"def add_item(self, item):\n self.items.append(item)",
"def push(self, new_item):\n self.items.append(new_item)",
"def push(self,item):\n self.items.append(item)",
"def __append_to_item_list(self):\n Item.get_item_list().append(self)",
"def add(self, item):\n self.contents.append(item)",
"def Push(self, item):\n self.list.append(item)",
"def append(self, item):\n self.items.append(item)"
] | [
"0.74160415",
"0.7138136",
"0.7138136",
"0.706266",
"0.6872149",
"0.66928333",
"0.66887194",
"0.6687491",
"0.66214705",
"0.65819716",
"0.6554912",
"0.6549853",
"0.6518295",
"0.6506271",
"0.6506271",
"0.648759",
"0.6485234",
"0.64600724",
"0.64600724",
"0.64600724",
"0.64447075",
"0.64264095",
"0.6423578",
"0.6422313",
"0.6406756",
"0.63974285",
"0.6367496",
"0.636339",
"0.6341521",
"0.6331004"
] | 0.7867647 | 0 |
Get item result by uid. | def get(self, uid: str) -> object:
return self._results[uid] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_item_with_id(self, uid):\n for item in self.get_items():\n if item.id == uid:\n return item\n\n return None",
"def get_by_uid(self, uid, **kwargs):\n items = self.search(UID=uid, **kwargs).get('items')\n\n if not items:\n raise LookupError(\"Did not find object with UID {}\".format(uid))\n\n if len(items) > 1:\n raise LookupError(\"Found multiple objects with the same UID\")\n\n return items[0]",
"def query_by_uid(self, uid):\n return self.query(f\"(uid={uid})\")",
"async def get(self, request, uid):\n return await super(User, self).get_item(request.app.pool, 'user', uid)",
"def get_by_uid(cls, uid, user=None):\n\n raise NotImplementedError()",
"def getObjectByUid( self, uid, implements=() ):\n if uid and type(uid) is StringType:\n kw = {}\n kw['nd_uid'] = uid\n if implements:\n if not type(implements) is StringType:\n kw['implements'] = tuple(implements)\n else:\n kw['implements'] = implements\n results = self.searchResults( **kw )\n if results:\n try: return results[0].getObject()\n except:\n logger.error('getObjectByUid uid: %s' % uid, exc_info=True)\n #pass\n return None",
"def find_by_id(self, uid):\n return self.mapper.find_by_id(uid)",
"def by_id(cls, uid):\n\n return cls.get_by_id(uid, parent=users_key())",
"def by_id(cls, uid):\n return cls.get_by_id(uid, parent = users_key())",
"def get_item(self, id: str, user: User) -> Optional[T]:",
"def users_by_uid(request, uid):\r\n user = User()\r\n user = user.query({\"uid\":str(uid)})\r\n if(len(user) == 0):\r\n badRequest(\"No user for the given uid\")\r\n return HttpResponse(json.dumps(user))",
"def _getObjectByUid(self):\n # check for unique id handler tool\n if not PRODUCTS_CMFUID_INSTALLED:\n return\n uidtool = queryUtility(IUniqueIdHandler)\n if uidtool is None:\n return\n\n # check for remote uid info on object\n uid = getattr(aq_base(self), 'remote_uid', None)\n if uid is None:\n return\n\n return uidtool.queryObject(uid, None)",
"def fetch_user(uid):\n users = find_users(uid=uid)\n if users:\n return users[0]._asdict()\n return (\"Not found\", 404)",
"def get(uid: str):\n try:\n # Get the data for the user in the users DB table and return it\n query = db.child(\"users\").child(uid).get().val()\n return make_response(jsonify(query), 200)\n\n except HTTPError as e:\n # Handle exception and return correct response object\n return create_error_message(e)",
"def get(self, uid):\n user_record = UserRecord.get_user(uid, auth=admin_sdk.auth)\n user = Admin.query.filter_by(uid=user_record.uid).first()\n\n if not user_record or not user:\n raise HandlerException(404, \"Not found user\")\n\n return {\"uid\": user_record.uid, \"a\": user_record, \"b\": user}",
"def get_item(self, item_id): # pragma: no cover\n raise NotImplementedError",
"def added_item(self, uid: str) -> object:\n return self._input[uid]",
"def get_piece_by_uid(self, uid):\n for piece in self.pieces:\n if piece.uid == uid:\n return piece",
"def get(self, uid):\n user_record = UserRecord.get_user(uid, auth=web_sdk.auth)\n user = User.query.filter_by(uid=user_record.uid).first()\n\n if not user_record or not user:\n raise HandlerException(404, \"Not found user\")\n\n return {\"uid\": user_record.uid, \"a\": user_record, \"b\": user}",
"def getitem(itemID):\n\n return harvest(GET_ITEM_URL, itemID)",
"def get(self, eventId, uid):\n raise NotImplementedError",
"def member(self, uid):\n try:\n member = self.search(uid=uid)[0]\n except IndexError:\n return None\n\n if self.objects:\n return member\n\n return member[1]",
"def get_item(self, item_id):\n test_info = db.get_test(item_id)\n if not test_info:\n pecan.abort(404)\n test_list = db.get_test_results(item_id)\n test_name_list = [test_dict[0] for test_dict in test_list]\n return {\"cpid\": test_info.cpid,\n \"created_at\": test_info.created_at,\n \"duration_seconds\": test_info.duration_seconds,\n \"results\": test_name_list}",
"def get(self, uid=None):\n if uid is None:\n users = User.query.filter(User.username != 'admin').join(Location).add_columns(Location.location_name).order_by(User.username).all()\n lista = []\n for u, l in users:\n user = u.json()\n user.update({'location': l})\n lista.append(user)\n return lista\n else:\n user = User.query.filter(User.id == uid).join(Location).add_columns(Location.location_name).one_or_none()\n if user:\n data = user[0].json()\n data.update({'location': user[1]})\n return data, 200\n else:\n return None, 404",
"def get(userid):\n\n return ldapi.lookup(ld, 'uid', userid, cfg['ldap_users_base'])",
"def object_by_uid(self, uid, comp_filter=None, comp_class=None):\n if comp_filter:\n assert not comp_class\n if hasattr(comp_filter, \"attributes\"):\n comp_filter = comp_filter.attributes[\"name\"]\n if comp_filter == \"VTODO\":\n comp_class = Todo\n elif comp_filter == \"VJOURNAL\":\n comp_class = Journal\n elif comp_filter == \"VEVENT\":\n comp_class = Event\n else:\n raise error.ConsistencyError(\"Wrong compfilter\")\n\n query = cdav.TextMatch(uid)\n query = cdav.PropFilter(\"UID\") + query\n\n root, comp_class = self.build_search_xml_query(\n comp_class=comp_class, filters=[query]\n )\n\n try:\n items_found = self.search(root)\n if not items_found:\n raise error.NotFoundError(\"%s not found on server\" % uid)\n except Exception as err:\n if comp_filter is not None:\n raise\n logging.warning(\n \"Error %s from server when doing an object_by_uid(%s). search without compfilter set is not compatible with all server implementations, trying event_by_uid + todo_by_uid + journal_by_uid instead\"\n % (str(err), uid)\n )\n items_found = []\n for compfilter in (\"VTODO\", \"VEVENT\", \"VJOURNAL\"):\n try:\n items_found.append(\n self.object_by_uid(uid, cdav.CompFilter(compfilter))\n )\n except error.NotFoundError:\n pass\n if len(items_found) >= 1:\n if len(items_found) > 1:\n logging.error(\n \"multiple items found with same UID. Returning the first one\"\n )\n return items_found[0]\n\n # Ref Lucas Verney, we've actually done a substring search, if the\n # uid given in the query is short (i.e. just \"0\") we're likely to\n # get false positives back from the server, we need to do an extra\n # check that the uid is correct\n items_found2 = []\n for item in items_found:\n ## In v0.10.0 we used regexps here - it's probably more optimized,\n ## but at one point it broke due to an extra CR in the data.\n ## Usage of the icalendar library increases readability and\n ## reliability\n item_uid = item.icalendar_component.get(\"UID\", None)\n if item_uid and item_uid == uid:\n items_found2.append(item)\n if not items_found2:\n raise error.NotFoundError(\"%s not found on server\" % uid)\n error.assert_(len(items_found2) == 1)\n return items_found2[0]",
"def find(self, uid):\n return self._root.find(uid)",
"def get_item(\n self, id_: Union[UUID, str], full_dataset: bool = True\n ) -> Optional[DatasetItem]:\n items = list(\n self.search_items(\n dataset_ids=[id_], full_dataset=full_dataset, order=ItemSort.UNSORTED\n )\n )\n if not items:\n return None\n if len(items) > 1:\n raise RuntimeError(\n \"Something is wrong: Multiple dataset results for a single UUID\"\n )\n\n [item] = items\n return item",
"def unrestrictedGetObjectByUid( self, uid, implements=() ):\n if uid and type(uid) is StringType:\n kw = {}\n kw['nd_uid'] = uid\n if implements:\n if not type(implements) is StringType:\n kw['implements'] = tuple(implements)\n else:\n kw['implements'] = implements\n results = self.unrestrictedSearch( **kw )\n if results:\n try: return results[0].getObject()\n except:\n logger.error('unrestrictedGetObjectByUid uid: %s' % uid, exc_info=True)\n #pass\n else:\n #logger.info('unrestrictedGetObjectByUid not found uid: %s' % uid)\n pass\n return None",
"def result(self) -> Item:\n return self._result"
] | [
"0.7593181",
"0.75671893",
"0.72641826",
"0.719392",
"0.67980444",
"0.6741963",
"0.67064244",
"0.64979196",
"0.64942247",
"0.6450699",
"0.6356397",
"0.6261345",
"0.62500316",
"0.6166366",
"0.6165816",
"0.6129108",
"0.61288935",
"0.6101235",
"0.60794276",
"0.6066852",
"0.60001093",
"0.59986806",
"0.59916234",
"0.5955681",
"0.59330034",
"0.59195495",
"0.5892399",
"0.5890485",
"0.5852518",
"0.58525133"
] | 0.81453663 | 0 |
Resource has pending work. | def pending_work(self) -> bool:
return len(self.ongoing) > 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_pending(self):\n return self._state == \"PENDING\"",
"def is_pending(self):\n status = self.get_status()\n return status[\"status\"] == 3",
"def pending(self):\n return 0",
"def is_pending(self):\n return self.type_id == STATE_PENDING",
"def is_pending(self):\n if self.status == \"PENDING\":\n return True\n else:\n return False",
"def pending(self):\n return self._state == PENDING_STATE",
"def is_pending(self):\n return self.is_disarming() or self.is_arming()",
"def busy(self) -> bool:\n return self.state != SubflowState.Available",
"def HasPendingCommands(self):\n\t\n return self.queue.qsize() > 0",
"def busy(self):\n pass",
"def in_waiting(self) -> int:\n pass",
"def is_active(self):\n return not self.pending",
"def _isDone(self) -> bool:\n pass",
"def is_waiting(self):\n return self._is_waiting",
"def is_running(self):\n # do we have a job ID to work with?\n if self.jobid == None:\n return False\n else:\n q_status = self.queue.get_status(self.jobid)\n\n if q_status == self.queue.state[\"active\"]:\n self.meta[\"status\"] = 'PENDING'\n return True\n else:\n return False",
"def isBusy(self):\n return self.busy",
"def in_progress(self):\n return False",
"def freebusy(self):\r\n return FreeBusyResource(self)",
"def get_isDone(self):\n pass",
"def _is_done(self):\n pass",
"def is_ready(self):\n return self.prep_job.is_done()",
"def busy(self) -> bool:\n return self._busy",
"def is_finished(self):\n self.refresh()\n return self.progress.remaining_budget is not None and self.progress.remaining_budget <= 0",
"def is_pending(self) -> bool:\n return self.state == Order.OrderState.PENDING.choice_value",
"def rest_api_status(self):\n with self.resource_lock:\n pass",
"def isWaiting(self):\r\n return self.scheduler.isWaiting()",
"def pending_runs(self) -> bool:\n # If there are futures available, it translates\n # to runs still not finished/processed\n return len(self.futures) > 0",
"def is_call_waiting(self) -> bool:",
"def pending(self):\n self.state = Step.State.PENDING",
"def _checkpoint(self,):\n self.outstanding.wait()"
] | [
"0.7410092",
"0.7407186",
"0.7266239",
"0.7127514",
"0.7081727",
"0.6982759",
"0.68075866",
"0.67374724",
"0.6715299",
"0.666442",
"0.6646339",
"0.6521107",
"0.6490966",
"0.6468425",
"0.6454948",
"0.6444217",
"0.64277786",
"0.6398431",
"0.6390363",
"0.63795155",
"0.63734704",
"0.63634205",
"0.6342875",
"0.633686",
"0.6332682",
"0.63324416",
"0.63224673",
"0.63080525",
"0.62779063",
"0.6255388"
] | 0.767412 | 0 |
Gets information about items in ``Executor`` for debugging. Subclasses can override this method and implement a well suited method to get items current status. | def get_current_status_for_debug(self) -> List[str]:
msgs = []
if self.added_items:
msgs.append(f"{self.class_name} {self.cfg.name} added items:")
for item in self.added_items:
msgs.append(f"\t{item}")
else:
msgs.append(f"No added items in {self.class_name}")
if self.ongoing:
msgs.append(f"{self.class_name} {self.cfg.name} pending items:")
for item in self.ongoing:
msgs.append(f"\t{item}")
else:
msgs.append(f"No pending items in {self.class_name}")
return msgs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getInfoOnStatus(self):\n raise NotImplementedError();",
"def execute_info(self):\n return self._execute_info",
"def info(self):\n return self.current_run.info",
"def get_details(self):\n status = []\n for key, container in self.containers.items():\n container.details = container.daemon.connection.inspect_container(self.config['release_name'])\n status.append(container.details)\n return status",
"def celery_task_status(self):\n return self._get_celery_queue_data()",
"def status(self):\n\t\treturn self._status",
"def status(self):\n return self._get(path='status')",
"def status(self):\n return self._query_status()['status']",
"def info(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s\" % (self.sessionid,\n self.name))\n return self.server.json_body(resp)",
"def getstatus(self):\n with self.lock:\n return (self.status, self.time_start)",
"def status(self):\r\n return self._status",
"def status(self):\r\n return self._status",
"def status(self):\n return self.__status",
"def status(self):\n return self.__status",
"def status(self):\n self._refresh_state()\n return self._data.get('status')",
"def status(self):\n return self.status",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status"
] | [
"0.6424657",
"0.631913",
"0.61626965",
"0.59027237",
"0.58842343",
"0.588345",
"0.5873142",
"0.58566666",
"0.5813858",
"0.5776814",
"0.5770883",
"0.5770883",
"0.57550246",
"0.57550246",
"0.575277",
"0.57400995",
"0.5728079",
"0.5728079",
"0.5728079",
"0.5728079",
"0.5728079",
"0.5728079",
"0.5728079",
"0.5728079",
"0.5728079",
"0.5728079",
"0.5728079",
"0.5728079",
"0.5728079",
"0.5728079"
] | 0.6751712 | 0 |
Tests that liking a message works correctly. | def test_like(self):
message = Message(text="hello world", user_id=self.testuser2.id)
db.session.add(message)
db.session.commit()
message_id = message.id
with self.client as c:
with c.session_transaction() as session:
session[CURR_USER_KEY] = self.testuser.id
response = c.post(f"/messages/{message_id}/like", follow_redirects=True)
self.assertEqual(response.status_code, 200)
likes = Likes.query.filter(Likes.message_id==message_id).all()
self.assertEqual(len(likes), 1)
self.assertEqual(likes[0].user_id, self.testuser.id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_like_message(self):\n\n self.u.likes.append(self.m_u2)\n db.session.commit()\n\n self.assertEqual(len(self.u.likes), 1)\n self.assertEqual(self.u.likes[0], self.m_u2)",
"def test_update_like_can_message(self):\n\n self.assertEqual(first=0, second=self.like.likes.all().count())\n url = reverse('like-detail', args=(self.like.id,))\n data = {\n 'likes': [2]\n }\n json_data = json.dumps(data)\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n self.like_1.likes.add(self.user)\n response = self.client.patch(path=url, content_type='application/json', data=json_data)\n self.assertTrue(response.data.get('can_message', False))\n self.assertEqual(first=200, second=response.status_code)\n self.assertEqual(first=1, second=self.like.likes.all().count())",
"def test_unlike(self):\n\n message = Message(text=\"hello world\", user_id=self.testuser2.id)\n \n db.session.add(message)\n db.session.commit()\n\n like = Likes(message_id=message.id, user_id=self.testuser.id)\n \n db.session.add(like)\n db.session.commit()\n \n message_id = message.id\n\n with self.client as c:\n with c.session_transaction() as session:\n session[CURR_USER_KEY] = self.testuser.id\n \n response = c.post(f\"/messages/{message_id}/like\", follow_redirects=True)\n \n self.assertEqual(response.status_code, 200)\n\n likes = Likes.query.filter(Likes.message_id==message_id).all()\n self.assertEqual(len(likes), 0)",
"def test_is_liked_by(self):\n\n u = self.user\n u2 = User(\n email=\"[email protected]\" ,\n username=\"testuser2\" ,\n password=\"HASHED_PASSWORD\"\n )\n\n m = Message(\n user_id=u.id ,\n text='test content'\n )\n\n db.session.add_all([u2 , m])\n db.session.commit()\n\n #not like \n self.assertEqual(len(u2.likes) , 0)\n self.assertEqual(len(m.likes_users) , 0)\n self.assertEqual(u2.is_like(m) , False)\n self.assertEqual(m.is_liked_by(u2) , False)\n\n like = Likes(user_id=u2.id , message_id=m.id)\n db.session.add(like)\n db.session.commit()\n\n self.assertEqual(len(u2.likes) , 1)\n self.assertEqual(len(m.likes_users) , 1)\n self.assertEqual(u2.is_like(m) , True)\n self.assertEqual(m.is_liked_by(u2), True)",
"def test_unauthorized_like(self):\n\n message = Message(text=\"hello world\", user_id=self.testuser2.id)\n \n db.session.add(message)\n db.session.commit()\n\n like = Likes(message_id=message.id, user_id=self.testuser.id)\n \n db.session.add(like)\n db.session.commit()\n \n message_id = message.id\n\n with self.client as c:\n response = c.post(f\"/messages/{message_id}/like\", follow_redirects=True)\n \n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Access unauthorized\", str(response.data))\n\n likes = Likes.query.filter(Likes.message_id==message_id).all()\n self.assertEqual(len(likes), Likes.query.count())",
"def test_associate_message_likes(self):\n\n testlike1 = self.user2.likes.append(self.message2)\n testlike2 = self.user2.likes.append(self.message1)\n db.session.commit()\n\n likes = Likes.query.all()\n self.assertEqual(len(likes), 2)\n self.assertEqual(likes[0].message_id, self.message2.id)\n self.assertEqual(likes[1].message_id, self.message1.id)",
"def test_like_model(self):\n u = User(\n username = \"like_test_user\",\n email = \"[email protected]\",\n password = \"HASHED_POTATOES\" \n )\n\n db.session.add(u)\n db.session.commit()\n\n m = Message(text=\"warble warble\", user_id=u.id)\n\n u.messages.append(m)\n db.session.commit()\n\n l = Like(user_id=u.id, message_id=m.id)\n\n db.session.add(l)\n db.session.commit()\n\n self.assertEqual(l.user_id, u.id)\n self.assertEqual(l.message_id, m.id)",
"def test_update_like_cant_message(self):\n\n self.assertEqual(first=0, second=self.like.likes.all().count())\n url = reverse('like-detail', args=(self.like.id,))\n data = {\n 'likes': [2]\n }\n json_data = json.dumps(data)\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n response = self.client.patch(path=url, content_type='application/json', data=json_data)\n self.assertFalse(response.data.get('can_message', False))\n self.assertEqual(first=200, second=response.status_code)\n self.assertEqual(first=1, second=self.like.likes.all().count())",
"def test_new_likes(self):\n\n new_user = User.signup(\"newuser\", \"[email protected]\", \"password123\", None)\n db.session.add(new_user)\n db.session.commit()\n\n new_msg = Message(text=\"A new message from a new user\", user_id=new_user.id)\n db.session.add(new_msg)\n db.session.commit()\n\n new_user.likes.append(new_msg)\n db.session.commit()\n\n likes = Likes.query.all()\n self.assertEqual(len(likes), 1)\n self.assertEqual(likes[0].user_id, new_user.id)\n self.assertEqual(likes[0].message_id, new_msg.id)",
"def test_like_already_liked_activity(self):\n from .mockers import user_status_context\n from .mockers import subscribe_context, create_context\n username = 'messi'\n username_not_me = 'xavi'\n self.create_user(username)\n self.create_user(username_not_me)\n self.create_context(create_context)\n self.admin_subscribe_user_to_context(username, subscribe_context)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_context)\n res = self.create_activity(username, user_status_context)\n activity_id = res.json['id']\n res = self.testapp.post('/activities/%s/likes' % activity_id, '', oauth2Header(username_not_me), status=201)\n res = self.testapp.post('/activities/%s/likes' % activity_id, '', oauth2Header(username_not_me), status=200)\n\n self.assertEqual(res.json['object']['likes'][0]['username'], username_not_me)\n self.assertEqual(res.json['object']['liked'], True)\n self.assertEqual(res.json['object']['likesCount'], 1)",
"def test_like_activity_by_various(self):\n from .mockers import user_status_context\n from .mockers import subscribe_context, create_context\n username = 'messi'\n username_not_me = 'xavi'\n self.create_user(username)\n self.create_user(username_not_me)\n self.create_context(create_context)\n self.admin_subscribe_user_to_context(username, subscribe_context)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_context)\n res = self.create_activity(username, user_status_context)\n activity_id = res.json['id']\n res = self.testapp.post('/activities/%s/likes' % activity_id, '', oauth2Header(username_not_me), status=201)\n res = self.testapp.post('/activities/%s/likes' % activity_id, '', oauth2Header(username), status=201)\n\n self.assertEqual(res.json['object']['likes'][0]['username'], username_not_me)\n self.assertEqual(res.json['object']['likes'][1]['username'], username)\n self.assertEqual(res.json['object']['liked'], True)\n self.assertEqual(res.json['object']['likesCount'], 2)",
"def test_like_activity(self):\n from .mockers import user_status_context\n from .mockers import subscribe_context, create_context\n username = 'messi'\n username_not_me = 'xavi'\n self.create_user(username)\n self.create_user(username_not_me)\n self.create_context(create_context)\n self.admin_subscribe_user_to_context(username, subscribe_context)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_context)\n res = self.create_activity(username, user_status_context)\n activity_id = res.json['id']\n res = self.testapp.post('/activities/%s/likes' % activity_id, '', oauth2Header(username_not_me), status=201)\n activity = self.testapp.get('/activities/%s' % activity_id, '', oauth2Header(username), status=200)\n\n self.assertEqual(res.json['object']['likes'][0]['username'], username_not_me)\n self.assertEqual(res.json['object']['liked'], True)\n self.assertEqual(res.json['object']['likesCount'], 1)\n\n self.assertEqual(activity.json['likes'][0]['username'], username_not_me)\n self.assertEqual(activity.json['liked'], False)\n self.assertEqual(activity.json['likesCount'], 1)",
"def test_associate_user_likes(self):\n\n testlike1 = self.user1.likes.append(self.message2)\n testlike2 = self.user2.likes.append(self.message1)\n db.session.commit()\n\n likes = Likes.query.all()\n\n self.assertEqual(len(likes), 2)\n self.assertEqual(likes[0].user_id, self.user1.id)\n self.assertEqual(likes[1].user_id, self.user2.id)",
"def test_is_liked_by_method(self):\n\n self.u2.likes.append(self.m_u1)\n db.session.commit()\n \n self.assertTrue(self.m_u1.is_liked_by(self.u2))\n self.assertFalse(self.m_u1.is_liked_by(self.u))",
"def test_is_liked_topic_viewset(self):\n\n topic = TopicFactory(author=self.user)\n self.client.post(reverse('api:topics-like', kwargs={'topic_id': topic.id}))\n response = self.client.get(reverse('api:topics-fans', kwargs={'topic_id': topic.id}))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n data = response.data[0]\n self.assertEqual(data.get('username'), self.user.username)\n self.assertEqual(data.get('game_nickname'), self.user.game_nickname)",
"def test_unauthenticated_user_liking(self):\n self.like_dislike(self.dislike_url(5))",
"def test_unlike_activity_get_other_likes(self):\n from .mockers import user_status_context\n from .mockers import subscribe_context, create_context\n username = 'messi'\n username_not_me = 'xavi'\n self.create_user(username)\n self.create_user(username_not_me)\n self.create_context(create_context)\n self.admin_subscribe_user_to_context(username, subscribe_context)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_context)\n res = self.create_activity(username, user_status_context)\n activity_id = res.json['id']\n res = self.testapp.post('/activities/%s/likes' % activity_id, '', oauth2Header(username_not_me), status=201)\n res = self.testapp.post('/activities/%s/likes' % activity_id, '', oauth2Header(username), status=201)\n res = self.testapp.delete('/activities/%s/likes/%s' % (activity_id, username_not_me), '', oauth2Header(username_not_me), status=204)\n\n activity = self.testapp.get('/activities/%s' % activity_id, '', oauth2Header(username), status=200)\n\n self.assertEqual(activity.json['likes'][0]['username'], username)\n self.assertEqual(activity.json['liked'], True)\n self.assertEqual(activity.json['likesCount'], 1)",
"def test_like_unlike_game(self):\n url = reverse('like-game')\n data = {\n 'igdb': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'cover_id': self.game.cover_id,\n 'backdrop_id': self.game.backdrop_id\n }\n\n like = self.client.post(url, data, format='json')\n self.assertEqual(True, like.data['value'])\n\n unlike = self.client.post(url, data, format='json')\n self.assertEqual(False, unlike.data['value'])",
"def test_message_user():",
"def test_like_mission(self):\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess['user_id'] = self.user2.id\n\n db.session.add(self.mission)\n resp = c.post(f'/v1/mission/like{self.mission.id}')\n\n self.assertEqual(resp.json['success'], 'added')\n mission = Mission.query.get(self.mission.id)\n self.assertEqual(mission.likes, {0, self.user2.id})",
"def test_like_unlike_works_without_error(self):\n create_response = self.client.post(reverse('posts:comment_create'),\n data={\n 'post': self.post.id,\n 'user': self.user.id,\n 'comment_description':\n 'This is a test_comment'\n }, format='json')\n self.assertEqual(create_response.status_code, status.HTTP_201_CREATED)\n # initial stars_count is zero, so when liked must increase by 1\n like_response = self.like_unlike_comment()\n self.assertEqual(like_response.status_code, status.HTTP_200_OK)\n self.assertEqual(like_response.data['stars_count'], 1)\n like_again = self.like_unlike_comment()\n self.assertEqual(like_again.status_code, status.HTTP_200_OK)\n self.assertEqual(like_again.data['stars_count'], 2)\n # current likes/stars count is 2 so if unliked should become 1\n unlike_comment_response = self.like_unlike_comment(pk='1',\n action='unlike')\n self.assertEqual(unlike_comment_response.status_code,\n status.HTTP_200_OK)\n self.assertEqual(unlike_comment_response.data['stars_count'], 1)\n # current likes/stars count is 1 so if unliked should become 0\n unlike_comment_response = self.like_unlike_comment(pk='1',\n action='unlike')\n self.assertEqual(unlike_comment_response.status_code,\n status.HTTP_200_OK)\n self.assertEqual(unlike_comment_response.data['stars_count'], 0)\n # current likes/stars count is 0 so if unliked should remain 0\n unlike_comment_response = self.like_unlike_comment(pk='1',\n action='unlike')\n self.assertEqual(unlike_comment_response.status_code,\n status.HTTP_200_OK)\n self.assertEqual(unlike_comment_response.data['stars_count'], 0)",
"def test_like_comment(comment, mocker):\n like_mock = mocker.patch(\"instapi.client.client.comment_like\")\n\n comment.like()\n\n like_mock.assert_called_once_with(comment.pk)",
"def test_a_user_can_like_article(self):\n article = self.create_article()\n\n slug = article.data['data']['slug']\n like = self.client.post('/api/articles/{}/like/'.format(slug),\n HTTP_AUTHORIZATION='Bearer ' +\n self.token,\n format='json')\n\n self.assertEqual(like.status_code, 200)",
"def test_not_create_like_for_current_anon_already_liked(self):\n pokemon = Pokemon.objects.all().first()\n\n r = self.client.post(\n reverse('create_like'),\n {'pokemon-like-id': pokemon.id},\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n\n r = self.client.post(\n reverse('create_like'),\n {'pokemon-like-id': pokemon.id},\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n\n self.assertEqual(Like.objects.all().count(), 1)",
"def test_authorized_user_like_field(self):\n lesson_id = 1\n self.create_and_auth(email='[email protected]',\n password='qwerty_1234')\n\n response = self.client.get(reverse('lessons-detail',\n args=(lesson_id,)))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn('like', response.data)\n self.assertFalse(response.data['like'])\n\n self.client.post(reverse('lesson_likes', kwargs={\n 'lesson_id': lesson_id\n }))\n\n response = self.client.get(reverse('lessons-detail',\n args=(lesson_id,)))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn('like', response.data)\n self.assertTrue(response.data['like'])",
"def test_im_chat_messages(self):\n pass",
"def test_guessing(self):\n self.classifier.guess(self.message)",
"def test_reply_from_banned_user(self):\n # Create 2 users. By default neither is banned, but one will be soon\n viewing_user = self.create_user()\n banned_user = self.create_user()\n\n # Create a new group and add both our users to it\n group = self.create_group()\n viewing_user.add_to_group(group.pk)\n banned_user.add_to_group(group.pk)\n\n # Create a new thread sent to the group we created above\n thread = self.create_thread(group=group)\n\n # Create a reply sent by a soon-to-be-banned user\n message = mommy.make(\n 'connectmessages.Message', thread=thread, sender=banned_user)\n\n # Confirm both users can see the message, as neither is banned\n self.assertTrue(message.visible_to_user(viewing_user))\n self.assertTrue(message.visible_to_user(banned_user))\n\n # Ban the banned user\n banned_user.is_banned = True\n banned_user.save()\n\n # Confirm the non-banned user can no longer see the banned user's reply\n # but the banned user can see his or her own message\n self.assertFalse(message.visible_to_user(viewing_user))\n self.assertTrue(message.visible_to_user(banned_user))",
"def test_like_topic_viewset(self):\n\n topic = TopicFactory(author=self.user)\n response = self.client.post(reverse('api:topics-like', kwargs={'topic_id': topic.id}))\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n obj_type = ContentType.objects.get_for_model(topic)\n likes = Like.objects.filter(\n content_type=obj_type, object_id=topic.id, user=self.user)\n self.assertTrue(likes.exists())",
"def test_say(self):\n self.client.say(\"thechannel\", \"the message\")\n self.assertEqual(self.client.lines, [\"PRIVMSG #thechannel :the message\"])"
] | [
"0.7500867",
"0.73392713",
"0.73344636",
"0.7329332",
"0.7234027",
"0.71456903",
"0.70850676",
"0.68999016",
"0.68899083",
"0.6789816",
"0.67514914",
"0.6699322",
"0.6665291",
"0.651895",
"0.64480066",
"0.6421176",
"0.6401107",
"0.6379729",
"0.6366734",
"0.63360304",
"0.63171196",
"0.6313183",
"0.63087845",
"0.62295926",
"0.6202994",
"0.61808664",
"0.6110086",
"0.60959953",
"0.60645384",
"0.6055482"
] | 0.7895267 | 0 |
Tests that unliking a message works correctly. | def test_unlike(self):
message = Message(text="hello world", user_id=self.testuser2.id)
db.session.add(message)
db.session.commit()
like = Likes(message_id=message.id, user_id=self.testuser.id)
db.session.add(like)
db.session.commit()
message_id = message.id
with self.client as c:
with c.session_transaction() as session:
session[CURR_USER_KEY] = self.testuser.id
response = c.post(f"/messages/{message_id}/like", follow_redirects=True)
self.assertEqual(response.status_code, 200)
likes = Likes.query.filter(Likes.message_id==message_id).all()
self.assertEqual(len(likes), 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_unlike_mission(self):\n\n self.mission.likes = {0, self.user2.id}\n db.session.commit()\n mission = Mission.query.get(self.mission.id)\n # make sure like is there in mission.likes.\n self.assertEqual(mission.likes, {0, self.user2.id})\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess['user_id'] = self.user2.id\n\n resp = c.post(f'/v1/mission/like{self.mission.id}')\n\n self.assertEqual(resp.json['success'], 'removed')\n mission = Mission.query.get(self.mission.id)\n self.assertEqual(mission.likes, {0})",
"def test_like_unlike_game(self):\n url = reverse('like-game')\n data = {\n 'igdb': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'cover_id': self.game.cover_id,\n 'backdrop_id': self.game.backdrop_id\n }\n\n like = self.client.post(url, data, format='json')\n self.assertEqual(True, like.data['value'])\n\n unlike = self.client.post(url, data, format='json')\n self.assertEqual(False, unlike.data['value'])",
"def test_unauthenticated_user_liking(self):\n self.like_dislike(self.dislike_url(5))",
"def test_unauthenticated_user_disliking(self):\n self.like_dislike(self.like_url(6))",
"def test_unlike_topic_viewset(self):\n\n topic = TopicFactory(author=self.user)\n self.client.post(reverse('api:topics-like', kwargs={'topic_id': topic.id}))\n response = self.client.post(reverse('api:topics-unlike', kwargs={'topic_id': topic.id}))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n obj_type = ContentType.objects.get_for_model(topic)\n likes = Like.objects.filter(\n content_type=obj_type, object_id=topic.id, user=self.user)\n self.assertFalse(likes.exists())",
"def test_unauthorized_like(self):\n\n message = Message(text=\"hello world\", user_id=self.testuser2.id)\n \n db.session.add(message)\n db.session.commit()\n\n like = Likes(message_id=message.id, user_id=self.testuser.id)\n \n db.session.add(like)\n db.session.commit()\n \n message_id = message.id\n\n with self.client as c:\n response = c.post(f\"/messages/{message_id}/like\", follow_redirects=True)\n \n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Access unauthorized\", str(response.data))\n\n likes = Likes.query.filter(Likes.message_id==message_id).all()\n self.assertEqual(len(likes), Likes.query.count())",
"def test_unlike_comment(comment, mocker):\n unlike_mock = mocker.patch(\"instapi.client.client.comment_unlike\")\n\n comment.unlike()\n\n unlike_mock.assert_called_once_with(comment.pk)",
"def test_unlike_activity(self):\n from .mockers import user_status_context\n from .mockers import subscribe_context, create_context\n username = 'messi'\n username_not_me = 'xavi'\n self.create_user(username)\n self.create_user(username_not_me)\n self.create_context(create_context)\n self.admin_subscribe_user_to_context(username, subscribe_context)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_context)\n res = self.create_activity(username, user_status_context)\n activity_id = res.json['id']\n self.testapp.post('/activities/%s/likes' % activity_id, '', oauth2Header(username_not_me), status=201)\n self.testapp.delete('/activities/%s/likes/%s' % (activity_id, username_not_me), '', oauth2Header(username_not_me), status=204)\n activity = self.testapp.get('/activities/%s' % activity_id, '', oauth2Header(username), status=200)\n\n self.assertEqual(activity.json['likes'], [])\n self.assertEqual(activity.json['liked'], False)\n self.assertEqual(activity.json['likesCount'], 0)",
"def test_40_message_vote(self):\n cr, uid = self.cr, self.uid\n # Data: post a message on Pigs\n msg_id = self.group_pigs.message_post(body='My Body', subject='1')\n msg = self.mail_message.browse(cr, uid, msg_id)\n msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)\n\n # Do: Admin vote for msg\n self.mail_message.vote_toggle(cr, uid, [msg.id])\n msg.refresh()\n # Test: msg has Admin as voter\n self.assertEqual(set(msg.vote_user_ids), set([self.user_admin]), 'mail_message vote: after voting, Admin should be in the voter')\n # Do: Bert vote for msg\n self.mail_message.vote_toggle(cr, self.user_raoul_id, [msg.id])\n msg_raoul.refresh()\n # Test: msg has Admin and Bert as voters\n self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_admin, self.user_raoul]), 'mail_message vote: after voting, Admin and Bert should be in the voters')\n # Do: Admin unvote for msg\n self.mail_message.vote_toggle(cr, uid, [msg.id])\n msg.refresh()\n msg_raoul.refresh()\n # Test: msg has Bert as voter\n self.assertEqual(set(msg.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')\n self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')",
"def test_unlike_report(self):\n\n self.report.likes = {0, self.user2.id}\n db.session.commit()\n report = Report.query.get(self.report.id)\n # make sure like is there in report.likes.\n self.assertEqual(report.likes, {0, self.user2.id})\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess['user_id'] = self.user2.id\n\n resp = c.post(f'/v1/report/like{self.report.id}')\n\n self.assertEqual(resp.json['success'], 'removed')\n report = Report.query.get(self.report.id)\n self.assertEqual(report.likes, {0})",
"def test_like_unlike_works_without_error(self):\n create_response = self.client.post(reverse('posts:comment_create'),\n data={\n 'post': self.post.id,\n 'user': self.user.id,\n 'comment_description':\n 'This is a test_comment'\n }, format='json')\n self.assertEqual(create_response.status_code, status.HTTP_201_CREATED)\n # initial stars_count is zero, so when liked must increase by 1\n like_response = self.like_unlike_comment()\n self.assertEqual(like_response.status_code, status.HTTP_200_OK)\n self.assertEqual(like_response.data['stars_count'], 1)\n like_again = self.like_unlike_comment()\n self.assertEqual(like_again.status_code, status.HTTP_200_OK)\n self.assertEqual(like_again.data['stars_count'], 2)\n # current likes/stars count is 2 so if unliked should become 1\n unlike_comment_response = self.like_unlike_comment(pk='1',\n action='unlike')\n self.assertEqual(unlike_comment_response.status_code,\n status.HTTP_200_OK)\n self.assertEqual(unlike_comment_response.data['stars_count'], 1)\n # current likes/stars count is 1 so if unliked should become 0\n unlike_comment_response = self.like_unlike_comment(pk='1',\n action='unlike')\n self.assertEqual(unlike_comment_response.status_code,\n status.HTTP_200_OK)\n self.assertEqual(unlike_comment_response.data['stars_count'], 0)\n # current likes/stars count is 0 so if unliked should remain 0\n unlike_comment_response = self.like_unlike_comment(pk='1',\n action='unlike')\n self.assertEqual(unlike_comment_response.status_code,\n status.HTTP_200_OK)\n self.assertEqual(unlike_comment_response.data['stars_count'], 0)",
"def test_unlike_activity_get_other_likes(self):\n from .mockers import user_status_context\n from .mockers import subscribe_context, create_context\n username = 'messi'\n username_not_me = 'xavi'\n self.create_user(username)\n self.create_user(username_not_me)\n self.create_context(create_context)\n self.admin_subscribe_user_to_context(username, subscribe_context)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_context)\n res = self.create_activity(username, user_status_context)\n activity_id = res.json['id']\n res = self.testapp.post('/activities/%s/likes' % activity_id, '', oauth2Header(username_not_me), status=201)\n res = self.testapp.post('/activities/%s/likes' % activity_id, '', oauth2Header(username), status=201)\n res = self.testapp.delete('/activities/%s/likes/%s' % (activity_id, username_not_me), '', oauth2Header(username_not_me), status=204)\n\n activity = self.testapp.get('/activities/%s' % activity_id, '', oauth2Header(username), status=200)\n\n self.assertEqual(activity.json['likes'][0]['username'], username)\n self.assertEqual(activity.json['liked'], True)\n self.assertEqual(activity.json['likesCount'], 1)",
"def test_update_like_cant_message(self):\n\n self.assertEqual(first=0, second=self.like.likes.all().count())\n url = reverse('like-detail', args=(self.like.id,))\n data = {\n 'likes': [2]\n }\n json_data = json.dumps(data)\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n response = self.client.patch(path=url, content_type='application/json', data=json_data)\n self.assertFalse(response.data.get('can_message', False))\n self.assertEqual(first=200, second=response.status_code)\n self.assertEqual(first=1, second=self.like.likes.all().count())",
"async def test_sent_correct_message(self):\n unsilenced_overwrite = PermissionOverwrite(send_messages=True, add_reactions=True)\n test_cases = (\n (True, silence.MSG_UNSILENCE_SUCCESS, unsilenced_overwrite),\n (False, silence.MSG_UNSILENCE_FAIL, unsilenced_overwrite),\n (False, silence.MSG_UNSILENCE_MANUAL, self.text_overwrite),\n (False, silence.MSG_UNSILENCE_MANUAL, PermissionOverwrite(send_messages=False)),\n (False, silence.MSG_UNSILENCE_MANUAL, PermissionOverwrite(add_reactions=False)),\n )\n\n targets = (None, MockTextChannel())\n\n for (was_unsilenced, message, overwrite), target in itertools.product(test_cases, targets):\n ctx = MockContext()\n ctx.channel.overwrites_for.return_value = overwrite\n if target:\n target.overwrites_for.return_value = overwrite\n\n with (\n mock.patch.object(self.cog, \"_unsilence\", return_value=was_unsilenced),\n mock.patch.object(self.cog, \"send_message\") as send_message,\n self.subTest(was_unsilenced=was_unsilenced, overwrite=overwrite, target=target),\n ):\n await self.cog.unsilence.callback(self.cog, ctx, channel=target)\n\n call_args = (message, ctx.channel, target or ctx.channel)\n send_message.assert_awaited_once_with(*call_args, alert_target=was_unsilenced)",
"def unlike(id):\n spinner = Halo(text=\"Unliking photo...\", spinner=\"dots\").start()\n try:\n api.unlike(id)\n spinner.succeed(\"Unliked\")\n except Exception:\n spinner.fail(\"Failed to unlike photo.\")",
"def test_unseen(self):\n d = self._examineOrSelect()\n self._response(b'* OK [UNSEEN 8] Message 8 is first unseen')\n self.assertEqual(\n self.successResultOf(d),\n {'READ-WRITE': False, 'UNSEEN': 8})",
"def test_remove_item_and_reply_no(qtbot, pathmanager):\n pathmanager.show()\n count = pathmanager.count()\n\n def interact_message_box():\n messagebox = pathmanager.findChild(QMessageBox)\n buttons = messagebox.findChildren(QPushButton)\n for button in buttons:\n if 'no' in button.text().lower():\n qtbot.mouseClick(button, Qt.LeftButton)\n break\n\n timer = QTimer()\n timer.setSingleShot(True)\n timer.timeout.connect(interact_message_box)\n timer.start(100)\n qtbot.mouseClick(pathmanager.remove_button, Qt.LeftButton)\n\n # Back to main thread\n assert pathmanager.count() == count",
"def test_is_liked_by(self):\n\n u = self.user\n u2 = User(\n email=\"[email protected]\" ,\n username=\"testuser2\" ,\n password=\"HASHED_PASSWORD\"\n )\n\n m = Message(\n user_id=u.id ,\n text='test content'\n )\n\n db.session.add_all([u2 , m])\n db.session.commit()\n\n #not like \n self.assertEqual(len(u2.likes) , 0)\n self.assertEqual(len(m.likes_users) , 0)\n self.assertEqual(u2.is_like(m) , False)\n self.assertEqual(m.is_liked_by(u2) , False)\n\n like = Likes(user_id=u2.id , message_id=m.id)\n db.session.add(like)\n db.session.commit()\n\n self.assertEqual(len(u2.likes) , 1)\n self.assertEqual(len(m.likes_users) , 1)\n self.assertEqual(u2.is_like(m) , True)\n self.assertEqual(m.is_liked_by(u2), True)",
"def test_unpopular(self):\n self.assertFalse(self.user3.is_popular())\n self.user3.receive_upvotes(randint(101, 10000))\n self.assertTrue(self.user3.is_popular())",
"def test_bob_unread(self):\n messages = list(self.bob_inbox.unread)\n self.assertEqual(2, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertNotIn(self.read_message, messages)\n self.assertIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"async def test_on_message_takes_action(self, find_token_in_message, take_action):\n cog = TokenRemover(self.bot)\n found_token = \"foobar\"\n find_token_in_message.return_value = found_token\n\n await cog.on_message(self.msg)\n\n find_token_in_message.assert_called_once_with(self.msg)\n take_action.assert_awaited_once_with(cog, self.msg, found_token)",
"def test_unacknowledge(requests_mock):\n from DarktraceMBs import Client, unacknowledge_model_breach_command\n\n # GIVEN an integration is configured and you would like to unacknowledge a breach\n mock_api_response = util_load_json('test_data/ack_success.json')\n requests_mock.post('https://mock.darktrace.com/modelbreaches/2509/unacknowledge', json=mock_api_response)\n\n client = Client(\n base_url='https://mock.darktrace.com',\n verify=False,\n auth=('examplepub', 'examplepri')\n )\n\n # WHEN the desired model breach has id 111\n args = {\n 'pbid': '2509',\n }\n\n integration_response = unacknowledge_model_breach_command(client, args)\n expected_response = util_load_json('test_data/formatted_unack_success.json')\n\n # THEN the breach should be acknowledged, context updated, and message posted\n assert integration_response.outputs == expected_response\n assert integration_response.outputs_prefix == 'Darktrace.ModelBreach'\n assert integration_response.outputs_key_field == 'pbid'",
"def test_alice_unread(self):\n messages = list(self.alice_inbox.unread)\n self.assertEqual(1, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertNotIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"def test_remove_item_and_reply_yes(qtbot, pathmanager):\n pathmanager.show()\n count = pathmanager.count()\n\n def interact_message_box():\n messagebox = pathmanager.findChild(QMessageBox)\n buttons = messagebox.findChildren(QPushButton)\n for button in buttons:\n if 'yes' in button.text().lower():\n qtbot.mouseClick(button, Qt.LeftButton)\n break\n\n timer = QTimer()\n timer.setSingleShot(True)\n timer.timeout.connect(interact_message_box)\n timer.start(100)\n qtbot.mouseClick(pathmanager.remove_button, Qt.LeftButton)\n\n # Back to main thread\n assert pathmanager.count() == (count - 1)",
"def test_bob_unread(self):\n messages = list(self.bob_storage.unread)\n self.assertEqual(2, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertNotIn(self.read_message, messages)\n self.assertIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"def test_punish():\n game = Game()\n game.punish()\n assert game.mistake_count == 1",
"def test_alice_unread(self):\n messages = list(self.alice_storage.unread)\n self.assertEqual(1, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertNotIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)",
"def unlabel_messages(self, org, messages, label):\n pass",
"def test_unauthorized_user_like_field(self):\n response = self.client.get(reverse('lessons-detail', args=(1,)))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertNotIn('like', response.data)",
"def test_update_like_can_message(self):\n\n self.assertEqual(first=0, second=self.like.likes.all().count())\n url = reverse('like-detail', args=(self.like.id,))\n data = {\n 'likes': [2]\n }\n json_data = json.dumps(data)\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n self.like_1.likes.add(self.user)\n response = self.client.patch(path=url, content_type='application/json', data=json_data)\n self.assertTrue(response.data.get('can_message', False))\n self.assertEqual(first=200, second=response.status_code)\n self.assertEqual(first=1, second=self.like.likes.all().count())"
] | [
"0.6753856",
"0.65746415",
"0.650034",
"0.649709",
"0.64039636",
"0.62669873",
"0.6260182",
"0.61631036",
"0.61529106",
"0.608861",
"0.6024869",
"0.5958756",
"0.587565",
"0.5863406",
"0.5832868",
"0.57908106",
"0.57621634",
"0.57574433",
"0.5739922",
"0.5647676",
"0.5647052",
"0.563948",
"0.56251425",
"0.560804",
"0.5568778",
"0.556797",
"0.55478585",
"0.5512384",
"0.55007094",
"0.54986495"
] | 0.70695925 | 0 |
Tests that attempting to like/unlike a message whilst unauthorized is rejected. | def test_unauthorized_like(self):
message = Message(text="hello world", user_id=self.testuser2.id)
db.session.add(message)
db.session.commit()
like = Likes(message_id=message.id, user_id=self.testuser.id)
db.session.add(like)
db.session.commit()
message_id = message.id
with self.client as c:
response = c.post(f"/messages/{message_id}/like", follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertIn("Access unauthorized", str(response.data))
likes = Likes.query.filter(Likes.message_id==message_id).all()
self.assertEqual(len(likes), Likes.query.count()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_unauthenticated_user_liking(self):\n self.like_dislike(self.dislike_url(5))",
"def test_unauthorized_user_like_field(self):\n response = self.client.get(reverse('lessons-detail', args=(1,)))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertNotIn('like', response.data)",
"def test_unauthenticated_user_disliking(self):\n self.like_dislike(self.like_url(6))",
"def test_an_unauthenticated_user_cannot_like_article(self):\n article = self.create_article()\n\n slug = article.data['data']['slug']\n like = self.client.post('/api/articles/{}/like/'.format(slug),\n format='json')\n\n self.assertEqual(\n like.data['detail'], \"Authentication credentials were not provided.\")\n self.assertEqual(like.status_code, 401)",
"def test_unlike(self):\n\n message = Message(text=\"hello world\", user_id=self.testuser2.id)\n \n db.session.add(message)\n db.session.commit()\n\n like = Likes(message_id=message.id, user_id=self.testuser.id)\n \n db.session.add(like)\n db.session.commit()\n \n message_id = message.id\n\n with self.client as c:\n with c.session_transaction() as session:\n session[CURR_USER_KEY] = self.testuser.id\n \n response = c.post(f\"/messages/{message_id}/like\", follow_redirects=True)\n \n self.assertEqual(response.status_code, 200)\n\n likes = Likes.query.filter(Likes.message_id==message_id).all()\n self.assertEqual(len(likes), 0)",
"def test_an_unauthenticated_user_cannot_dislike_article(self):\n article = self.create_article()\n\n slug = article.data['data']['slug']\n dislike = self.client.post('/api/articles/{}/dislike/'.format(slug),\n format='json')\n\n self.assertEqual(\n dislike.data['detail'], \"Authentication credentials were not provided.\")\n self.assertEqual(dislike.status_code, 401)",
"def test_unlike_mission(self):\n\n self.mission.likes = {0, self.user2.id}\n db.session.commit()\n mission = Mission.query.get(self.mission.id)\n # make sure like is there in mission.likes.\n self.assertEqual(mission.likes, {0, self.user2.id})\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess['user_id'] = self.user2.id\n\n resp = c.post(f'/v1/mission/like{self.mission.id}')\n\n self.assertEqual(resp.json['success'], 'removed')\n mission = Mission.query.get(self.mission.id)\n self.assertEqual(mission.likes, {0})",
"def test_update_like_cant_message(self):\n\n self.assertEqual(first=0, second=self.like.likes.all().count())\n url = reverse('like-detail', args=(self.like.id,))\n data = {\n 'likes': [2]\n }\n json_data = json.dumps(data)\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n response = self.client.patch(path=url, content_type='application/json', data=json_data)\n self.assertFalse(response.data.get('can_message', False))\n self.assertEqual(first=200, second=response.status_code)\n self.assertEqual(first=1, second=self.like.likes.all().count())",
"def test_like_unlike_game(self):\n url = reverse('like-game')\n data = {\n 'igdb': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'cover_id': self.game.cover_id,\n 'backdrop_id': self.game.backdrop_id\n }\n\n like = self.client.post(url, data, format='json')\n self.assertEqual(True, like.data['value'])\n\n unlike = self.client.post(url, data, format='json')\n self.assertEqual(False, unlike.data['value'])",
"async def test_regular_member_cannot_use_command_outside_of_bot_commands(self, constants):\n constants.MODERATION_ROLES = [self.moderator_role.id]\n constants.STAFF_ROLES = [self.moderator_role.id]\n ctx = helpers.MockContext(author=self.author, channel=helpers.MockTextChannel(id=100))\n\n msg = \"Sorry, but you may only use this command within <#50>.\"\n with self.assertRaises(InWhitelistCheckFailure, msg=msg):\n await self.cog.user_info(self.cog, ctx)",
"def test_like_unlike_works_without_error(self):\n create_response = self.client.post(reverse('posts:comment_create'),\n data={\n 'post': self.post.id,\n 'user': self.user.id,\n 'comment_description':\n 'This is a test_comment'\n }, format='json')\n self.assertEqual(create_response.status_code, status.HTTP_201_CREATED)\n # initial stars_count is zero, so when liked must increase by 1\n like_response = self.like_unlike_comment()\n self.assertEqual(like_response.status_code, status.HTTP_200_OK)\n self.assertEqual(like_response.data['stars_count'], 1)\n like_again = self.like_unlike_comment()\n self.assertEqual(like_again.status_code, status.HTTP_200_OK)\n self.assertEqual(like_again.data['stars_count'], 2)\n # current likes/stars count is 2 so if unliked should become 1\n unlike_comment_response = self.like_unlike_comment(pk='1',\n action='unlike')\n self.assertEqual(unlike_comment_response.status_code,\n status.HTTP_200_OK)\n self.assertEqual(unlike_comment_response.data['stars_count'], 1)\n # current likes/stars count is 1 so if unliked should become 0\n unlike_comment_response = self.like_unlike_comment(pk='1',\n action='unlike')\n self.assertEqual(unlike_comment_response.status_code,\n status.HTTP_200_OK)\n self.assertEqual(unlike_comment_response.data['stars_count'], 0)\n # current likes/stars count is 0 so if unliked should remain 0\n unlike_comment_response = self.like_unlike_comment(pk='1',\n action='unlike')\n self.assertEqual(unlike_comment_response.status_code,\n status.HTTP_200_OK)\n self.assertEqual(unlike_comment_response.data['stars_count'], 0)",
"def test_post_comment_to_project_chat_by_blocked_user_fails(self):\n # setup\n self.test_user = return_canned_user(username=\"test_user\", id=33333)\n self.test_user.create()\n self.test_user.role = UserRole.READ_ONLY.value\n # action\n response = self.client.post(\n self.endpoint_url,\n headers={\"Authorization\": generate_encoded_token(self.test_user.id)},\n json={\"message\": TEST_MESSAGE},\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response_body[\"Error\"], \"User is on read only mode\")\n self.assertEqual(response_body[\"SubCode\"], \"ReadOnly\")",
"def test_unlike_activity(self):\n from .mockers import user_status_context\n from .mockers import subscribe_context, create_context\n username = 'messi'\n username_not_me = 'xavi'\n self.create_user(username)\n self.create_user(username_not_me)\n self.create_context(create_context)\n self.admin_subscribe_user_to_context(username, subscribe_context)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_context)\n res = self.create_activity(username, user_status_context)\n activity_id = res.json['id']\n self.testapp.post('/activities/%s/likes' % activity_id, '', oauth2Header(username_not_me), status=201)\n self.testapp.delete('/activities/%s/likes/%s' % (activity_id, username_not_me), '', oauth2Header(username_not_me), status=204)\n activity = self.testapp.get('/activities/%s' % activity_id, '', oauth2Header(username), status=200)\n\n self.assertEqual(activity.json['likes'], [])\n self.assertEqual(activity.json['liked'], False)\n self.assertEqual(activity.json['likesCount'], 0)",
"def test_noticedDoesntPrivmsg(self):\n\n def privmsg(user, channel, message):\n self.fail(\"privmsg() should not have been called\")\n\n self.protocol.privmsg = privmsg\n self.protocol.irc_NOTICE(\"spam\", [\"#greasyspooncafe\", \"I don't want any spam!\"])",
"def test_post_comment_to_task_chat_by_blocked_user_fails(self):\n # setup\n self.test_user = return_canned_user(\"test_user\", 33333)\n self.test_user.create()\n self.test_user.role = UserRole.READ_ONLY.value\n # action\n response = self.client.post(\n self.endpoint_url,\n headers={\"Authorization\": generate_encoded_token(self.test_user.id)},\n json={\"comment\": TEST_MESSAGE},\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response_body[\"Error\"], \"User is on read only mode\")\n self.assertEqual(response_body[\"SubCode\"], \"ReadOnly\")",
"def test_unlike_activity_get_other_likes(self):\n from .mockers import user_status_context\n from .mockers import subscribe_context, create_context\n username = 'messi'\n username_not_me = 'xavi'\n self.create_user(username)\n self.create_user(username_not_me)\n self.create_context(create_context)\n self.admin_subscribe_user_to_context(username, subscribe_context)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_context)\n res = self.create_activity(username, user_status_context)\n activity_id = res.json['id']\n res = self.testapp.post('/activities/%s/likes' % activity_id, '', oauth2Header(username_not_me), status=201)\n res = self.testapp.post('/activities/%s/likes' % activity_id, '', oauth2Header(username), status=201)\n res = self.testapp.delete('/activities/%s/likes/%s' % (activity_id, username_not_me), '', oauth2Header(username_not_me), status=204)\n\n activity = self.testapp.get('/activities/%s' % activity_id, '', oauth2Header(username), status=200)\n\n self.assertEqual(activity.json['likes'][0]['username'], username)\n self.assertEqual(activity.json['liked'], True)\n self.assertEqual(activity.json['likesCount'], 1)",
"def test_listing_from_wall_when_blocked_some_users(self):",
"def test_auth_sharable_cannot_share(self):\n self.do_sharable(False, 'pattieblack', FakeMembership(False),\n tenant='froggy')",
"def test_a_user_can_dislike_an_article(self):\n article = self.create_article()\n\n slug = article.data['data']['slug']\n dislike = self.client.post('/api/articles/{}/dislike/'.format(slug),\n HTTP_AUTHORIZATION='Bearer ' +\n self.token,\n format='json')\n\n self.assertEqual(dislike.status_code, 200)",
"def test_guests_can_not_post_message(self):\n url = reverse('posts-list')\n data = {'title': 'some title', 'body': 'somebody :P'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_update_like_can_message(self):\n\n self.assertEqual(first=0, second=self.like.likes.all().count())\n url = reverse('like-detail', args=(self.like.id,))\n data = {\n 'likes': [2]\n }\n json_data = json.dumps(data)\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n self.like_1.likes.add(self.user)\n response = self.client.patch(path=url, content_type='application/json', data=json_data)\n self.assertTrue(response.data.get('can_message', False))\n self.assertEqual(first=200, second=response.status_code)\n self.assertEqual(first=1, second=self.like.likes.all().count())",
"def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def denied(message):\n hf.query_users(message, hf.get_users(), \"denied\")",
"def test_cannot_disllike_article_with_non_exitent_slug(self):\n self.create_article()\n\n dislike = self.client.post('/api/articles/{}/dislike/'.format(\"abc\"),\n HTTP_AUTHORIZATION='Bearer ' +\n self.token,\n format='json')\n\n dislike = json.loads(dislike.content.decode('utf-8'))\n self.assertEqual(dislike['error'], 'Article with slug abc not found')\n self.assertEqual(dislike['status'], 404)",
"def test_status_unauthenticated(self):\n rv = self.client.post('/statusize/', data={'message': 'foo'},\n follow_redirects=True)\n eq_(rv.status_code, 403)",
"def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME",
"def test_is_liked_by(self):\n\n u = self.user\n u2 = User(\n email=\"[email protected]\" ,\n username=\"testuser2\" ,\n password=\"HASHED_PASSWORD\"\n )\n\n m = Message(\n user_id=u.id ,\n text='test content'\n )\n\n db.session.add_all([u2 , m])\n db.session.commit()\n\n #not like \n self.assertEqual(len(u2.likes) , 0)\n self.assertEqual(len(m.likes_users) , 0)\n self.assertEqual(u2.is_like(m) , False)\n self.assertEqual(m.is_liked_by(u2) , False)\n\n like = Likes(user_id=u2.id , message_id=m.id)\n db.session.add(like)\n db.session.commit()\n\n self.assertEqual(len(u2.likes) , 1)\n self.assertEqual(len(m.likes_users) , 1)\n self.assertEqual(u2.is_like(m) , True)\n self.assertEqual(m.is_liked_by(u2), True)",
"def testPostAccessDenied(self):\n self.runPost(None, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPost(user, data=self.post_data)\n self.response_403()",
"async def test_regular_member_cannot_target_another_member(self, constants):\n constants.MODERATION_ROLES = [self.moderator_role.id]\n ctx = helpers.MockContext(author=self.author)\n\n await self.cog.user_info(self.cog, ctx, self.target)\n\n ctx.send.assert_called_once_with(\"You may not use this command on users other than yourself.\")",
"def test_not_create_like_for_current_anon_already_liked(self):\n pokemon = Pokemon.objects.all().first()\n\n r = self.client.post(\n reverse('create_like'),\n {'pokemon-like-id': pokemon.id},\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n\n r = self.client.post(\n reverse('create_like'),\n {'pokemon-like-id': pokemon.id},\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n\n self.assertEqual(Like.objects.all().count(), 1)"
] | [
"0.74085444",
"0.72086763",
"0.712131",
"0.7073008",
"0.6997627",
"0.6860409",
"0.6685054",
"0.65761566",
"0.6543981",
"0.6496687",
"0.6453958",
"0.6438093",
"0.6388551",
"0.6380686",
"0.6352018",
"0.6291714",
"0.6255084",
"0.624493",
"0.62448287",
"0.62345237",
"0.6231471",
"0.6223502",
"0.620888",
"0.6115852",
"0.6095012",
"0.6074466",
"0.60733116",
"0.60662574",
"0.6044639",
"0.60389984"
] | 0.79207224 | 0 |
Tests that accessing a following page without credentials is rejected. | def test_unauthorized_following(self):
follow = Follows(
user_being_followed_id = self.testuser2.id,
user_following_id = self.testuser.id
)
db.session.add(follow)
db.session.commit()
with self.client as c:
response = c.get(f"/users/{self.testuser.id}/following", follow_redirects=True)
data = str(response.data)
self.assertEqual(response.status_code, 200)
self.assertNotIn("@bob", data)
self.assertIn("Access unauthorized", data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_following_existing_user_not_authenticated(self):\n self.client.credentials()\n response = self.client.post(\n reverse(\n 'follow',\n kwargs={'username': self.followed['user'].get('username')}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_authentication_is_not_required(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)",
"def test_invalid_return_url(self):\r\n self.attempt_login(403, return_to=\"http://apps.cs50.edx.or\")",
"def test_follow_without_auth(self):\n response = self.client.post(self.follow_url, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_anonymous_required_failure(self):\n rv = self.client.get('/required', follow_redirects=True)\n self.assertNotEqual(b'required', rv.data)",
"def test_unfollowing_existing_user_not_authenticated(self):\n self.client.credentials()\n response = self.client.delete(\n reverse(\n 'follow',\n kwargs={'username': self.followed['user'].get('username')}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_no_access_without_login(self):\n response = self.client.get(reverse('question_list'), follow=True)\n expected_url = reverse('home') + \"?next=\" + reverse('question_list')\n self.assertRedirects(response, expected_url, status_code=302, \n target_status_code=200)\n expected_url = reverse('home') + \"?next=\" + reverse('question_add')\n response = self.client.get(reverse('question_add'), follow=True)\n self.assertRedirects(response, expected_url, status_code=302, \n target_status_code=200)",
"def test_unauthorized_followers(self):\n\n follow = Follows(\n user_being_followed_id = self.testuser.id,\n user_following_id = self.testuser2.id\n )\n\n db.session.add(follow)\n db.session.commit()\n\n with self.client as c:\n response = c.get(f\"/users/{self.testuser.id}/followers\", follow_redirects=True)\n data = str(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertNotIn(\"@bob\", data)\n self.assertIn(\"Access unauthorized\", data)",
"def test_not_logged_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)",
"def test_follow_non_existent_user(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.authorize_user(self.user)\n response = self.client.post(self.follow_url, format='json')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_retrieve_user_unauthorized(self):\n # HTTP GET Request\n response = self.client.get(ME_URL)\n\n # If you call the URL without authorization\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_need_login_to_see_reading_details(self):\n response = self.client.get(reverse('api_v1:reading-detail', args=[1]), follow=True)\n self.assertEqual(response.status_code, 403)",
"def test_helpful_page_view(self):\n target_url = url_for('dashboard.helpful_pages')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)",
"def test_followers_following_list_unauthorized(self):\n\n self.u2.following.append(self.u)\n db.session.commit()\n\n with self.client as client:\n response = client.get(\"/users/2/following\")\n\n self.assertEqual(response.location, \"http://localhost/\")\n self.assertIn('Access unauthorized.', get_flashed_messages())\n\n response2 = client.get(\"/users/2/followers\")\n\n self.assertEqual(response2.location, \"http://localhost/\")\n self.assertIn('Access unauthorized.', get_flashed_messages())",
"def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_auto_auth_disabled(self):\r\n response = self.client.get(self.url)\r\n self.assertEqual(response.status_code, 404)",
"def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_get_list_of_followers_without_auth(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n response = self.client.get(self.followers_url)\n self.assertEqual(response.data['detail'],\n \"Authentication credentials were not provided.\")\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_endpoint_access_fail(self):\n url = reverse('users:activate-from-email', args=(1, 1))\n res = self.client.get(url)\n self.assertEqual(res.status_code, status.HTTP_503_SERVICE_UNAVAILABLE)",
"def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass",
"def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass",
"def test_following_non_existing_user(self):\n response = self.client.post(\n reverse(\n 'follow',\n kwargs={'username': 'NotThere'}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def assert_accessible(self, url):\n try:\n response = self.client.get(url, follow=True)\n self.assertEqual(response.status_code, 200)\n self.login()\n response = self.client.get(url, follow=True)\n self.assertEqual(response.status_code, 200)\n self.logout()\n except Exception as exc: # pragma: no cover\n exc.args += ((url),)\n raise",
"def test_get_un_authenticated(self):\n\n url = reverse('post-detail', args=(self.user.id,))\n response = self.client.get(path=url)\n self.assertEqual(first=401, second=response.status_code)",
"def test_retrieve_user_unauthorized(self):\r\n res = self.client.get(ME_URL)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_unhappy_path_unauthorized(self):\n\n response = self.client.get(self.url)\n expected_data = {\"detail\": \"Authentication credentials were not provided.\"}\n\n self.assertDictEqual(response.data, expected_data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_unauthenticated(self):\n self.browser.open(\"http://nohost/plone/full_review_list\")\n self.assertTrue(\"Login Name\" in self.browser.contents)",
"def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))",
"def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)"
] | [
"0.7319878",
"0.71709114",
"0.7151044",
"0.71304643",
"0.70958227",
"0.70355564",
"0.7022304",
"0.6899457",
"0.68274623",
"0.68251395",
"0.6824948",
"0.680066",
"0.6796531",
"0.67669344",
"0.67561007",
"0.6736968",
"0.673141",
"0.6721031",
"0.6720389",
"0.6694388",
"0.6694388",
"0.66716594",
"0.6669913",
"0.66281486",
"0.66190535",
"0.66042584",
"0.6599209",
"0.6587571",
"0.6582708",
"0.6582708"
] | 0.73165977 | 1 |
Tests that accessing a followers page without credentials is rejected. | def test_unauthorized_followers(self):
follow = Follows(
user_being_followed_id = self.testuser.id,
user_following_id = self.testuser2.id
)
db.session.add(follow)
db.session.commit()
with self.client as c:
response = c.get(f"/users/{self.testuser.id}/followers", follow_redirects=True)
data = str(response.data)
self.assertEqual(response.status_code, 200)
self.assertNotIn("@bob", data)
self.assertIn("Access unauthorized", data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_list_of_followers_without_auth(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n response = self.client.get(self.followers_url)\n self.assertEqual(response.data['detail'],\n \"Authentication credentials were not provided.\")\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_followers_following_list_unauthorized(self):\n\n self.u2.following.append(self.u)\n db.session.commit()\n\n with self.client as client:\n response = client.get(\"/users/2/following\")\n\n self.assertEqual(response.location, \"http://localhost/\")\n self.assertIn('Access unauthorized.', get_flashed_messages())\n\n response2 = client.get(\"/users/2/followers\")\n\n self.assertEqual(response2.location, \"http://localhost/\")\n self.assertIn('Access unauthorized.', get_flashed_messages())",
"def test_followers_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.followers(-1)",
"def test_unauthorized_following(self):\n\n follow = Follows(\n user_being_followed_id = self.testuser2.id,\n user_following_id = self.testuser.id\n )\n\n db.session.add(follow)\n db.session.commit()\n\n with self.client as c:\n response = c.get(f\"/users/{self.testuser.id}/following\", follow_redirects=True)\n data = str(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertNotIn(\"@bob\", data)\n self.assertIn(\"Access unauthorized\", data)",
"def test_following_existing_user_not_authenticated(self):\n self.client.credentials()\n response = self.client.post(\n reverse(\n 'follow',\n kwargs={'username': self.followed['user'].get('username')}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_follow_without_auth(self):\n response = self.client.post(self.follow_url, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_follow_non_existent_user(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.authorize_user(self.user)\n response = self.client.post(self.follow_url, format='json')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_unfollowing_existing_user_not_authenticated(self):\n self.client.credentials()\n response = self.client.delete(\n reverse(\n 'follow',\n kwargs={'username': self.followed['user'].get('username')}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_get_list_of_followers_with_auth(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.authorize_user(self.user)\n response = self.client.get(self.followers_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_following_non_existing_user(self):\n response = self.client.post(\n reverse(\n 'follow',\n kwargs={'username': 'NotThere'}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_unfollowing_non_existing_user(self):\n response = self.client.delete(\n reverse(\n 'follow',\n kwargs={'username': 'NotThere'}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_get_list_of_following_users_without_auth(self):\n self.authorize_user(self.user)\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.register_user(self.user1)\n response = self.client.get(self.following_list_url)\n self.assertEqual(response.content,\n b'{\"following\": []}')\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_view_followers(self):\n response = self.client.get(\n reverse('followers')\n )\n self.assertIn(b'followers', response.content)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_unfollowing_yourself(self):\n response = self.client.delete(\n reverse(\n 'follow',\n kwargs={'username': self.follower['user'].get('username')}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)",
"def test_need_login_to_see_readinglist(self):\n response = self.client.get(reverse('api_v1:reading-list'), follow=True)\n self.assertEqual(response.status_code, 403)",
"def test_un_following_existing_user(self):\n response = self.client.delete(\n reverse(\n 'follow',\n kwargs={'username': self.followed['user'].get('username')}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)",
"def test_followers(self):\n self.resource._request.register_uri(\n 'GET', '/users/dotzero/followers?page=2', 'fixture_post.json')\n\n response = self.resource.followers('dotzero', 2)\n\n self.assertTrue('data' in response)\n self.assertTrue('server_time' in response)",
"def test_following_yourself(self):\n response = self.client.post(\n reverse(\n 'follow',\n kwargs={'username': self.follower['user'].get('username')}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)",
"def test_get_list_of_following_users_with_auth(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.authorize_user(self.user)\n self.register_user(self.user1)\n self.client.post(self.follow_url, format='json')\n response = self.client.get(self.following_list_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_anonymous_cannot_get_userprofileview(dclient):\n resp = dclient.get(\"/api/record/profile/\", follow=True)\n assert resp.status_code == 403",
"def test_followers_following_list_authorized(self):\n\n # user2 following user1\n # follow = Follows(user_being_followed_id=1, user_following_id=2)\n\n self.u2.following.append(self.u)\n db.session.commit()\n\n with self.client as client:\n\n client.post(\n '/login',\n data = {\n \"username\" : self.u.username,\n \"password\" : \"password\"\n },\n )\n\n response = client.get(\"/users/2/following\")\n html = response.get_data(as_text=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('\"/users/1\"' ,html)\n \n response = client.get(\"/users/1/followers\")\n html = response.get_data(as_text=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('\"/users/2\"' ,html)",
"def test_view_those_you_follow(self):\n response = self.client.get(\n reverse('following')\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_need_login_to_see_reading_details(self):\n response = self.client.get(reverse('api_v1:reading-detail', args=[1]), follow=True)\n self.assertEqual(response.status_code, 403)",
"def test_unauthenticated_user_denial(self):\n\n self.response = self.client.get(\"/api/users/users_list/\")\n self.assertEqual(self.response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n 'Authentication credentials were not provided.', self.response.data['detail'])",
"def test_unauthorized_user_like_field(self):\n response = self.client.get(reverse('lessons-detail', args=(1,)))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertNotIn('like', response.data)",
"def test_retrieve_author_unlogged(self):\n request = self.client.get(self.epoint)\n\n self.assertEqual(request.status_code, status.HTTP_200_OK)",
"def test_get_friend_review_list_fail(self):\n client = Client()\n user1_id = Profile.objects.get(nickname='user1').id\n user2_id = Profile.objects.get(nickname='user2').id\n user3_id = Profile.objects.get(nickname='user3').id\n no_user_id = user1_id + user2_id + user3_id\n response = client.get('/api/friend/'+str(user3_id)+'/review/')\n self.assertEqual(response.status_code, 401)\n client.login(username='TEST_USER_1',\n email='TEST_EMAIL_1', password='TEST_PW_1')\n response = client.get('/api/friend/'+str(user2_id)+'/review/')\n self.assertEqual(response.status_code, 403)\n response = client.get('/api/friend/'+str(no_user_id)+'/review/')\n self.assertEqual(response.status_code, 404)",
"def test_retrieve_users_unauthorized(setup_client):\n client = setup_client\n res = client.get(ME_URL)\n assert res.status_code == status.HTTP_401_UNAUTHORIZED",
"def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_unfollow_user_without_auth(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.register_user(self.user1)\n self.client.post(self.follow_url, format='json')\n response = self.client.delete(self.unfollow_url,\n data=self.followed_user)\n self.assertEqual(response.data['detail'],\n \"Authentication credentials were not provided.\")\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)"
] | [
"0.80370384",
"0.75863016",
"0.7411454",
"0.73758006",
"0.7353231",
"0.72279394",
"0.7214382",
"0.714913",
"0.70005536",
"0.69480914",
"0.6931344",
"0.68351185",
"0.6685287",
"0.663357",
"0.658625",
"0.6569212",
"0.6565524",
"0.6525673",
"0.6496285",
"0.6492629",
"0.64847374",
"0.645656",
"0.6447884",
"0.638842",
"0.6329453",
"0.63247466",
"0.6311431",
"0.6290491",
"0.6247358",
"0.62387305"
] | 0.7781052 | 1 |
check if premium member for valid member | def test_3(self):
c1 = Store.Customer("harold", "qcf", True)
self.assertTrue(c1.is_premium_member(), "not premium member") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_4(self):\n c1 = Store.Customer(\"harold\", \"qcf\", False)\n self.assertFalse(c1.is_premium_member(), \"IS premium member\")",
"async def is_premium(self) -> bool:\n e = await self.request.request(url=f'https://www.roblox.com/mobileapi/userinfo', method='get')\n return e['IsPremium']",
"def membership_valid(self):\n\n today = date.today()\n\n if self.dues_paid is None:\n return False\n\n months = 12 if self.dues_paid_year else 6\n dues_due = datetime.combine(self.dues_paid, datetime.min.time()) + relativedelta(months=+months)\n dues_due = dues_due.date()\n\n return dues_due > today",
"def _include_member(member):\n return (member.provisioning_status in\n MEMBER_STATUSES and member.admin_state_up)",
"def validate(self,admin):\n\n rv=admin.helper.setAmount(admin.userName,\n 'ARS',self.actual+self.cnt)\n if rv is None:\n return False\n else:\n return True",
"def validate(self, data):\n if not models.Subscription.objects.filter(\n is_active=True, user=self.context[\"request\"].user\n ).exists():\n raise serializers.ValidationError(\n ugettext(\n \"You must have an active premium subscription in order to \"\n \"transfer it to another user.\"\n )\n )\n\n if models.Subscription.objects.filter(\n is_active=True, user=self._recipient_email_inst.user\n ).exists():\n raise serializers.ValidationError(\n ugettext(\n \"The intended recipient already has an active premium \"\n \"subscription.\"\n )\n )\n\n if models.AppleReceipt.objects.filter(\n subscription__user=self._recipient_email_inst.user\n ).exists():\n raise serializers.ValidationError(\n ugettext(\n \"The intended recipient has an Apple subscription that \"\n \"must be removed before they can accept a transfer.\"\n )\n )\n\n return data",
"def validate_member_user(self, member):\n if TeamMember.objects.filter(team=self.team, member=member).exists():\n raise forms.ValidationError(_('User is already a team member'),)\n return member",
"def member_id_check(member_id):\n try:\n global mem_num\n mem_num=int(member_id)\n if 1000<=mem_num and 9999>=mem_num:\n member_id_check.mID=\"Accepted\"\n else:\n member_id_check.mID=\"Member number not recognised\"\n except ValueError:\n member_id_check.mID=\"Member number not valid\"\n return member_id_check.mID",
"def validate(self,data):\n if self.context['request'].user!=data['offered_by']:\n raise serializer.ValidationError('Ride offered on behalf of others are not allowed')\n user = data['offered_by']\n circle = self.context['circle']\n \n try:\n membership=MemberShip.objects.get(\n user=user,\n circle=circle,\n is_active=True)\n except MemberShip.DoesNotExist:\n raise serializers.ValidationError('user is not an active member of circle')\n\n # La llegada tiene que ser despues de la salida, si la salida es mayor o igual a la llegada marca error\n if data['arrival_date']<=data['departure_date']:\n raise serializers.ValidationError('Departure date must happen offer arrival date')\n \n self.context['membership']=membership\n return data",
"def validate(self,admin,bal_org,bal_dst):\n\n rv=admin.helper.setAmount(admin.userName,self.org,bal_org)\n if rv!= None:\n rv=admin.helper.setAmount(admin.userName,self.dst,bal_dst)\n if rv != None:\n return True\n else:\n return False",
"def is_prime_member(user: User) -> bool:\n if not user:\n raise TypeError('user should not be None')\n return user.name.startswith('W')",
"def membercheck(entry):\n if entry['funct'] == \"Mitglieder\" and (entry[\"firstName\"].isdigit() or entry[\"lastName\"].isdigit()):\n return 1\n return 0",
"def clean_member(self):\n lookup = self.cleaned_data['member']\n\n # Look up user emails first, see if a verified user can be added\n try:\n validator = EmailValidator(code='lookup not an email')\n validator(lookup)\n\n member = (\n User.objects.filter(\n emailaddress__verified=True,\n emailaddress__email=lookup,\n is_active=True,\n ).first()\n )\n if member is not None:\n return self.validate_member_user(member)\n\n invite = TeamInvite(\n organization=self.team.organization,\n team=self.team,\n email=lookup,\n )\n\n return self.validate_member_invite(invite)\n except ValidationError as error:\n if error.code != 'lookup not an email':\n raise\n\n # Not an email, attempt username lookup\n try:\n member = User.objects.get(username=lookup, is_active=True)\n return self.validate_member_user(member)\n except User.DoesNotExist:\n raise forms.ValidationError('User not found')",
"def validate(self):\n if self.amount > 0:\n return True\n return False",
"def positive_balance_check(user):\n return has_positive_balance(user)",
"def test_func(self):\n member_to_finish = self.get_object()\n return self.request.user.rfid == member_to_finish.rfid",
"def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n if obj in profile.get_club_privileges():\n return True\n return False",
"async def is_zero(self, member: discord.Member, guild: discord.Guild) -> bool:\n if hasattr(member, 'id'):\n cursor = await self.bot.db.execute(\n 'SELECT warns FROM users WHERE user_id = ? AND guild_id = ?', (member.id, guild.id)\n )\n results = await cursor.fetchone()\n if results[0] == 0:\n return True\n return False",
"def verify_player_pending(self, player_email):\n try:\n self.pending_players.index(player_email)\n return True\n except ValueError:\n return False",
"def validate_passanger(self,data):\n try:\n user=User.objects.get(pk=data) \n except User.DoesNotExist:\n raise serializers.ValidationError('Invalid Passanger')\n\n\n circle= self.context['circle']\n try:\n membership=MemberShip.objects.get(\n user=user,circle=circle,is_active=True\n )\n except MemberShip.DoesNotExist:\n raise serializers.ValidationError('no es miembro')\n\n self.context['member']=membership\n self.context['user']=user\n return data",
"def test_is_member_ok(self):\n self.add_group('testgroup', ['user:[email protected]'])\n\n # baphomet is not a member\n request = endpoints_api.MembershipRequest.combined_message_class(\n group='testgroup',\n identity='user:[email protected]')\n response = self.call_api('membership', msg_dict(request), 200)\n self.assertEqual({u'is_member': False}, response.json)\n\n # mithras is a member\n request = endpoints_api.MembershipRequest.combined_message_class(\n group='testgroup',\n identity='user:[email protected]')\n response = self.call_api('membership', msg_dict(request), 200)\n self.assertEqual({u'is_member': True}, response.json)",
"def valid(self):\n return (self.expiry is None or self.expiry > timezone.now()) and (\n self.use_limit is None or self.times_used < self.use_limit\n )",
"def allowed(self, user, amount):\n return True",
"def valid_user_data(user_data):\n return 'account_ids' in user_data and 'monthly_expenses' in user_data",
"def check_validity(self):",
"def has_member(self, player):\n return player in self.members",
"def check_credit(self):\n self.ensure_one()\n getattr(self, '%s_check_credit' % self.provider, lambda: None)()",
"def test__Emoji__is_premium():\n role_id_0 = 202212190005\n role_id_1 = 202212190006\n \n role_0 = Role.precreate(role_id_0)\n role_1 = Role.precreate(role_id_1, manager_type = RoleManagerType.subscription)\n \n for emoji, expected_output in (\n (Emoji.precreate(202212190007, roles = None), False),\n (Emoji.precreate(202212190008, roles = [role_id_0]), False),\n (Emoji.precreate(202212190009, roles = [role_id_1]), True),\n ):\n vampytest.assert_eq(emoji.is_premium(), expected_output)",
"def test_member_created_if_valid_fields(self):\n member = Member.objects.create(**self.data)\n\n member_dict = model_to_dict(member)\n self.data[\"ministry\"] = self.data[\"ministry\"].pk\n for key in self.data.keys():\n self.assertEqual(member_dict.get(key), self.data.get(key))\n\n self.assertTrue(member.is_active)",
"def checkStudentcanTake(self,course_object):\r\n\r\n if self.budget >= course_object.paymentBill() and self not in course_object.registered_users:\r\n return True\r\n return False"
] | [
"0.6992384",
"0.66777605",
"0.63307333",
"0.62341",
"0.62267804",
"0.6158541",
"0.61556613",
"0.6062128",
"0.60154915",
"0.59984636",
"0.59928083",
"0.5983695",
"0.5938593",
"0.59137547",
"0.58976066",
"0.5860356",
"0.58563644",
"0.5834168",
"0.5796033",
"0.5796006",
"0.5782751",
"0.5773189",
"0.5756652",
"0.57363605",
"0.57169175",
"0.5694162",
"0.56928927",
"0.56866026",
"0.56653255",
"0.56503254"
] | 0.6929887 | 1 |
check if premium member if not valid | def test_4(self):
c1 = Store.Customer("harold", "qcf", False)
self.assertFalse(c1.is_premium_member(), "IS premium member") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_3(self):\n c1 = Store.Customer(\"harold\", \"qcf\", True)\n self.assertTrue(c1.is_premium_member(), \"not premium member\")",
"async def is_premium(self) -> bool:\n e = await self.request.request(url=f'https://www.roblox.com/mobileapi/userinfo', method='get')\n return e['IsPremium']",
"def validate(self, data):\n if not models.Subscription.objects.filter(\n is_active=True, user=self.context[\"request\"].user\n ).exists():\n raise serializers.ValidationError(\n ugettext(\n \"You must have an active premium subscription in order to \"\n \"transfer it to another user.\"\n )\n )\n\n if models.Subscription.objects.filter(\n is_active=True, user=self._recipient_email_inst.user\n ).exists():\n raise serializers.ValidationError(\n ugettext(\n \"The intended recipient already has an active premium \"\n \"subscription.\"\n )\n )\n\n if models.AppleReceipt.objects.filter(\n subscription__user=self._recipient_email_inst.user\n ).exists():\n raise serializers.ValidationError(\n ugettext(\n \"The intended recipient has an Apple subscription that \"\n \"must be removed before they can accept a transfer.\"\n )\n )\n\n return data",
"def validate(self,admin):\n\n rv=admin.helper.setAmount(admin.userName,\n 'ARS',self.actual+self.cnt)\n if rv is None:\n return False\n else:\n return True",
"def validate(self,admin,bal_org,bal_dst):\n\n rv=admin.helper.setAmount(admin.userName,self.org,bal_org)\n if rv!= None:\n rv=admin.helper.setAmount(admin.userName,self.dst,bal_dst)\n if rv != None:\n return True\n else:\n return False",
"def validate(self,data):\n if self.context['request'].user!=data['offered_by']:\n raise serializer.ValidationError('Ride offered on behalf of others are not allowed')\n user = data['offered_by']\n circle = self.context['circle']\n \n try:\n membership=MemberShip.objects.get(\n user=user,\n circle=circle,\n is_active=True)\n except MemberShip.DoesNotExist:\n raise serializers.ValidationError('user is not an active member of circle')\n\n # La llegada tiene que ser despues de la salida, si la salida es mayor o igual a la llegada marca error\n if data['arrival_date']<=data['departure_date']:\n raise serializers.ValidationError('Departure date must happen offer arrival date')\n \n self.context['membership']=membership\n return data",
"def membership_valid(self):\n\n today = date.today()\n\n if self.dues_paid is None:\n return False\n\n months = 12 if self.dues_paid_year else 6\n dues_due = datetime.combine(self.dues_paid, datetime.min.time()) + relativedelta(months=+months)\n dues_due = dues_due.date()\n\n return dues_due > today",
"def validate(self):\n if self.amount > 0:\n return True\n return False",
"def check_credit(self):\n self.ensure_one()\n getattr(self, '%s_check_credit' % self.provider, lambda: None)()",
"def premium(self, premium):\n\n self._premium = premium",
"def valid_user_data(user_data):\n return 'account_ids' in user_data and 'monthly_expenses' in user_data",
"def allowed(self, user, amount):\n return True",
"def check_validity(self):",
"def validate_member_user(self, member):\n if TeamMember.objects.filter(team=self.team, member=member).exists():\n raise forms.ValidationError(_('User is already a team member'),)\n return member",
"def _include_member(member):\n return (member.provisioning_status in\n MEMBER_STATUSES and member.admin_state_up)",
"def validate_passanger(self,data):\n try:\n user=User.objects.get(pk=data) \n except User.DoesNotExist:\n raise serializers.ValidationError('Invalid Passanger')\n\n\n circle= self.context['circle']\n try:\n membership=MemberShip.objects.get(\n user=user,circle=circle,is_active=True\n )\n except MemberShip.DoesNotExist:\n raise serializers.ValidationError('no es miembro')\n\n self.context['member']=membership\n self.context['user']=user\n return data",
"def clean_member(self):\n lookup = self.cleaned_data['member']\n\n # Look up user emails first, see if a verified user can be added\n try:\n validator = EmailValidator(code='lookup not an email')\n validator(lookup)\n\n member = (\n User.objects.filter(\n emailaddress__verified=True,\n emailaddress__email=lookup,\n is_active=True,\n ).first()\n )\n if member is not None:\n return self.validate_member_user(member)\n\n invite = TeamInvite(\n organization=self.team.organization,\n team=self.team,\n email=lookup,\n )\n\n return self.validate_member_invite(invite)\n except ValidationError as error:\n if error.code != 'lookup not an email':\n raise\n\n # Not an email, attempt username lookup\n try:\n member = User.objects.get(username=lookup, is_active=True)\n return self.validate_member_user(member)\n except User.DoesNotExist:\n raise forms.ValidationError('User not found')",
"def seal_is_valid(self):\n pass",
"def seal_is_valid(self):\n pass",
"def positive_balance_check(user):\n return has_positive_balance(user)",
"def validateUser(self,admin):\n \n res=admin.helper.getOneUser(self.name)\n if res == False:\n return True\n else:\n return False",
"def valid(self):\n return (self.expiry is None or self.expiry > timezone.now()) and (\n self.use_limit is None or self.times_used < self.use_limit\n )",
"def _check_validity(self):\n pass",
"def user_allow_credit(self):\n try:\n return self.user.creditAllowed()\n except AttributeError:\n return False",
"def membercheck(entry):\n if entry['funct'] == \"Mitglieder\" and (entry[\"firstName\"].isdigit() or entry[\"lastName\"].isdigit()):\n return 1\n return 0",
"def is_valid(self):\n return self is not Sugar.INVALID_SUGAR",
"def test__Emoji__is_premium():\n role_id_0 = 202212190005\n role_id_1 = 202212190006\n \n role_0 = Role.precreate(role_id_0)\n role_1 = Role.precreate(role_id_1, manager_type = RoleManagerType.subscription)\n \n for emoji, expected_output in (\n (Emoji.precreate(202212190007, roles = None), False),\n (Emoji.precreate(202212190008, roles = [role_id_0]), False),\n (Emoji.precreate(202212190009, roles = [role_id_1]), True),\n ):\n vampytest.assert_eq(emoji.is_premium(), expected_output)",
"async def is_zero(self, member: discord.Member, guild: discord.Guild) -> bool:\n if hasattr(member, 'id'):\n cursor = await self.bot.db.execute(\n 'SELECT warns FROM users WHERE user_id = ? AND guild_id = ?', (member.id, guild.id)\n )\n results = await cursor.fetchone()\n if results[0] == 0:\n return True\n return False",
"def member_id_check(member_id):\n try:\n global mem_num\n mem_num=int(member_id)\n if 1000<=mem_num and 9999>=mem_num:\n member_id_check.mID=\"Accepted\"\n else:\n member_id_check.mID=\"Member number not recognised\"\n except ValueError:\n member_id_check.mID=\"Member number not valid\"\n return member_id_check.mID",
"def verify_player_pending(self, player_email):\n try:\n self.pending_players.index(player_email)\n return True\n except ValueError:\n return False"
] | [
"0.6888039",
"0.66828996",
"0.6435083",
"0.63407624",
"0.6059652",
"0.6038428",
"0.6002299",
"0.5986008",
"0.5903139",
"0.5873443",
"0.5762152",
"0.5747567",
"0.5742472",
"0.5736066",
"0.5730407",
"0.57073194",
"0.56902134",
"0.5665418",
"0.5665418",
"0.56482524",
"0.56424147",
"0.5599477",
"0.55899924",
"0.5579506",
"0.55700934",
"0.5559879",
"0.55598557",
"0.55418336",
"0.5540034",
"0.5523187"
] | 0.7114507 | 0 |
test check out should equal 6.42 for three items at 2 dollars each plus shipping | def test_6(self):
toothpaste = Store.Product(11, "toothpaste", "dental", 2, 4)
milk = Store.Product(12, "milk", "dairy", 2, 3)
eggs = Store.Product(14, "eggs", "dairy", 2, 2)
apple_juice = Store.Product(13, "apple juice", "drink", 1, 1)
s = Store.Store()
s.add_product(toothpaste)
s.add_product(milk)
s.add_product(eggs)
s.add_product(apple_juice)
henry = Store.Customer("henry", "mrh", False)
s.add_member(henry)
s.add_product_to_member_cart(11, "mrh")
s.add_product_to_member_cart(12, "mrh")
s.add_product_to_member_cart(14, "mrh")
self.assertAlmostEqual(s.check_out_member("mrh"), 6.42, "not the correct checkout amount") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gold_card(subtotal):\n return subtotal * 0.05",
"def silver_card(subtotal):\n return subtotal * 0.02",
"def test_get_stock_price_summary4(self):\n\n actual = a1.stock_price_summary([0.02, 0.14, 0.10])\n expected = (0.26,0)\n self.assertEqual(actual, expected)",
"def test_calculation(self):\n expected = [['William Gates, III', \"$\", 653784.49, 2, \"$\", 326892.24],\n['Mark Zuckerberg', \"$\", 16396.10, 3, \"$\", 5465.37],\n['Jeff Bezos', \"$\", 877.33, 1, \"$\", 877.33],\n['Paul Allen', \"$\", 708.42, 3, \"$\", 236.14]]\n actual = mailroom4.calculation()\n self.assertEqual(expected,actual)",
"def test_get_stock_price_summary3(self):\n\n actual = a1.stock_price_summary([-0.02, -0.14, -0.10])\n expected = (0,-0.26)\n self.assertEqual(actual, expected)",
"def test_amount_in_tons(self):",
"def test_update_shopping_cart(self):\n food_cost = self.browser.find_element_by_id('food-cost')\n old_food_cost = int(food_cost.text)\n\n items = self.get_list_of_items()\n index = randint(1, len(items) - 1)\n list_item = self.get_item_dict(items[index])\n item_price = self.expected_contents[index]['price']\n old_cost = self.expected_contents[index]['cost']\n\n increase_by = randint(5, 10)\n directions = [\n {\n 'action': 'increase',\n 'range': range(1, increase_by + 1)\n },\n {\n 'action': 'decrease',\n 'range': range(increase_by - 1, - 1, -1)\n }\n ]\n for direction in directions:\n for i in direction['range']:\n list_item[direction['action']].click()\n sleep(0.1)\n new_cost = int(list_item['cost'].text)\n new_food_cost = int(food_cost.text)\n self.assertTrue(new_food_cost - old_food_cost ==\n new_cost - old_cost == item_price * i)",
"def test_lots_of_coins_given(self):\n item, change, _ = give_item_and_change('apple', '1.00 0.5 0.2 0.1 0.1 0.05 0.02 0.02 0.01')\n self.assertEqual(item, 'apple')\n self.assertEqual(change, [1.0, 0.5, 0.05, 0.02])",
"def test_product_buy(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 5)\n self.assertEqual(result_buy, 175)",
"def test_check_cost():",
"def test_tax_net_pay_45(self):\n net_pay_age = tc.total_calc_tax(100, 45)\n self.assertEqual(91, net_pay_age)",
"def test_callPrice(self):\n call_price1 = calculator.BlackScholes.call_price(**self.params_1)\n call_price2 = calculator.BlackScholes.call_price(**self.params_2)\n self.assertAlmostEqual(call_price1,10.45,delta=0.01)\n self.assertAlmostEqual(call_price2,7.965,delta=0.01)",
"def test_price_return(self, basic_factory, garment_factory, affiliate_item_factory):\n basic = basic_factory()\n garment = garment_factory(basic=basic)\n affiliate_item_factory(garment=garment, price=Decimal(100))\n\n budget_end, luxury_start = update_basic_price_points(basic)\n\n assert budget_end == Decimal(100)\n assert luxury_start == Decimal(100)",
"def get_price(item):\n return float(item[1])",
"def test_tax_net_pay_65(self):\n net_pay_age = tc.total_calc_tax(100, 65)\n self.assertEqual(95, net_pay_age)",
"def test_updating_the_supply_price(self):\n self.assertEqual(self.po.id, 1)\n self.assertEqual(self.po.items.count(), 1)\n item = self.po.items.all()[0]\n self.assertEqual(item.id, 1)\n self.assertEqual(item.unit_cost, Decimal('12.11'))\n self.assertEqual(Log.objects.all().count(), 0)\n \n modified_po = copy.deepcopy(base_purchase_order)\n modified_po['items'][0]['unit_cost'] = Decimal('10.05')\n modified_po['items'][0]['id'] = 1\n modified_po['status'] = 'PROCESSED'\n del modified_po['items'][1]\n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po)\n self.assertEqual(resp.status_code, 200, msg=resp)\n resp_obj = resp.data\n self.assertEqual(resp_obj['revision'], 1)\n #Check the new pdf\n #webbrowser.get(\"open -a /Applications/Google\\ Chrome.app %s\").open(resp_obj['pdf']['url'])\n \n self.assertEqual(resp_obj['id'], 1)\n self.assertEqual(resp_obj['supplier']['id'], 1)\n self.assertEqual(resp_obj['vat'], 7)\n self.assertEqual(resp_obj['discount'], 0)\n self.assertEqual(resp_obj['revision'], 1)\n self.assertEqual(Decimal(resp_obj['grand_total']), Decimal('107.54'))\n self.assertEqual(len(resp_obj['items']), 1)\n item1 = resp_obj['items'][0]\n self.assertEqual(item1['id'], 1)\n self.assertEqual(item1['quantity'], Decimal('10.0000000000'))\n self.assertEqual(Decimal(item1['unit_cost']), Decimal('10.05'))\n self.assertEqual(Decimal(item1['total']), Decimal('100.50'))\n \n #Confirm cost change for item and supply in the database\n po = PurchaseOrder.objects.get(pk=1)\n self.assertEqual(po.grand_total, Decimal('107.54'))\n item1 = po.items.order_by('id').all()[0]\n self.assertEqual(item1.id, 1)\n self.assertEqual(item1.quantity, 10)\n self.assertEqual(item1.unit_cost, Decimal('10.05'))\n supply = item1.supply\n supply.supplier = po.supplier\n self.assertEqual(supply.cost, Decimal('10.05'))\n \n self.assertEqual(Log.objects.all().count(), 1)\n log = Log.objects.all()[0]\n self.assertEqual(log.cost, Decimal('10.05'))\n self.assertEqual(log.supply, supply)\n self.assertEqual(log.supplier, po.supplier)\n self.assertEqual(log.message, \"Price change from 12.11USD to 10.05USD for Pattern: Maxx, Col: Blue [Supplier: Zipper World]\")\n\n # Confirm that there is still only one product for this supply and supplier\n # in the database\n products = Product.objects.filter(supply=supply, supplier=po.supplier)\n self.assertEqual(len(products), 1)",
"def test_tax_net_pay_65(self):\n net_pay_age = tc.total_calc_tax(100, 66)\n self.assertEqual(97, net_pay_age)",
"def test_shopping_cart_displays_total_cost(self):\n expected_cart_cost = 0\n for item in self.fill_session_cart():\n expected_cart_cost += item['price'] * item['amount']\n\n self.client.get(self.SHOP_CART_URL)\n self.assertEqual(self.client.session['cart_cost'], expected_cart_cost)",
"def testConsistency(self):\n #self.assertAlmostEqual(self.fxlinkedcashflow.amount(),0)",
"def test_int_install_2():\n expected_output_price = 65000\n output_price = int_installs('+65,000')\n assert math.fabs(output_price - expected_output_price) < ROUND_OFF_ERROR, \\\n \"\"\"Should show that the installs is 65000.\"\"\"",
"def discount(self, cart):",
"def test_market_1_2(self):\n\n def check_1_2(buyers: List[float], sellers: List[float], expected_num_of_deals: int,\n expected_prices: List[float]):\n market = Market([\n AgentCategory(\"buyer\", buyers),\n AgentCategory(\"seller\", sellers),\n ])\n ps_recipe = [1, 2]\n self._check_market(market, ps_recipe, expected_num_of_deals, expected_prices)\n\n check_1_2(buyers=[9], sellers=[-4, -3],\n expected_num_of_deals=0, expected_prices=[9, -4.5])\n check_1_2(buyers=[9, 8, 7, 6], sellers=[-6, -5, -4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-6, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n\n # PRICE CROSSES ZERO AT FIRST PHASE\n check_1_2(buyers=list(range(20)), sellers=[-3, -2, -1],\n expected_num_of_deals=1, expected_prices=[18, -9])",
"def test_get_stock_price_summary5(self):\n\n actual = a1.stock_price_summary([0.01, 0.03, -0.02, -0.14, 0, 0, 0.10, -0.01])\n expected = (0.14,-0.17)\n self.assertEqual(actual, expected)",
"def test_account_net_worth_3(account_checking, asset_usd):\n deposit(account_checking, asset_usd, 1000)\n\n net_worth = account_checking.net_worth(base_asset=asset_usd)\n assert net_worth == 1000",
"def test_product_bundle_price_calculation(self):\n template = self.product_apple_bundle\n template.write({'is_calpack_price': False})\n template.write({'is_calpack_price': True})\n self.assertEqual(template.list_price, self.total_price, 'Product: a product bundle canculation sale price')\n self.assertEqual(template.standard_price, self.total_cost, 'Product: a product bundle canculation product cost')",
"def test_tax_age_bracket_45(self):\n net_pay_age = elijah.total_calc_tax()\n self.assertEqual(91, net_pay_age)",
"def test_manipulate_total_pre_auth(self):\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n\n url = reverse('cybersource-sign-auth-request')\n data = {\n \"guest_email\": \"[email protected]\",\n \"basket\": reverse('basket-detail', args=[basket_id]),\n \"total\": \"2.00\", # Try and get $10 of product for only $2\n \"shipping_address\": {\n \"first_name\": \"fadsf\",\n \"last_name\": \"fad\",\n \"line1\": \"234 5th Ave\",\n \"line4\": \"Manhattan\",\n \"postcode\": \"10001\",\n \"state\": \"NY\",\n \"country\": reverse('country-detail', args=['US']),\n \"phone_number\": \"+1 (717) 467-1111\",\n }\n }\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)",
"def get_expected_cost(self):",
"def main(price, service, vat):\n service = (price * 10)/100\n if service < 50:\n service = 50\n elif service > 1000:\n service = 1000\n price += service\n vat = (price * 7)/100\n price += vat\n print(\"%.2f\" % (price))",
"def get_total(self):\n\n base_price = 5\n \n if self.species == \"Christmas melon\":\n base_price = base_price * 1.5 \n\n total = (1 + self.tax) * self.qty * base_price \n\n if self.order_type == \"international\" and self.qty>10:\n total += 3\n\n\n return total"
] | [
"0.6775019",
"0.6595498",
"0.635339",
"0.6311632",
"0.62834555",
"0.62146467",
"0.6200542",
"0.6175484",
"0.6144745",
"0.61386776",
"0.6119413",
"0.60917616",
"0.60730153",
"0.6065133",
"0.60586363",
"0.60566044",
"0.6050846",
"0.6050451",
"0.6035097",
"0.6016659",
"0.60060215",
"0.5998142",
"0.5982118",
"0.5967049",
"0.5947231",
"0.5947184",
"0.59412414",
"0.59293884",
"0.5914956",
"0.59122956"
] | 0.68899894 | 0 |
Searches for all drink admins. | def get_drink_admins(self):
admins = self.group('drink')
return admins | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def drinkAdmins(self):\n admins = self.group('drink')\n return admins",
"def get_admins(self):\n admins = User.objects.filter(Q(groups__name=self.admin_group_name()) | Q(is_superuser=True)).distinct()\n return admins",
"def get_admins(self):\n return self.admins_group.user_set.all()",
"def get_admins(self):\n from Employee import Employee\n admins = list()\n cursorRoles = self.dbconnect.get_cursor()\n cursorRoles.execute('select * from employeeRoles where role=\\'admin\\'')\n for row in cursorRoles:\n admins.append(self.get_employee(row[0]))\n return admins",
"def admins_index(_):\n return {\"admin_users\": [u.username for u in models.User.admins()]}",
"def get_list_of_admins() -> List[User]:\n return DBDiscussionSession.query(User).filter(User.group == Group.ADMIN).all()",
"async def _ad_all(self, ctx):\n all_admins = self.database.get_all_admins()\n consumed = []\n out = \"```\"\n for admin in all_admins:\n if admin.guild_id not in consumed:\n out += f\"Guild: {self.bot.get_guild(admin.guild_id)}\\n\"\n consumed.append(admin.guild_id)\n admin = self.bot.get_user(admin.user_id)\n admin = str(admin) if admin is not None else admin.user_id\n out += f\" {admin}\\n\"\n if out != \"```\":\n out += \"```\"\n await ctx.send(out)\n else:\n await ctx.send(\"No admins currently\")",
"async def _ad_list(self, ctx):\n admin_list = self.database.get_admins(ctx.guild.id)\n if len(admin_list) > 0:\n out = \"```\"\n for admin in admin_list:\n admin_name = self.bot.get_user(admin.user_id)\n admin_name = str(admin_name) if admin_name is not None else admin.user_id\n out += f\"{admin_name}\\n\"\n out += \"```\"\n await ctx.send(out)\n else:\n await ctx.send(\"This guild currently has no administrators.\")",
"def get_admins(name):\n obj = DataService.objects(name=name).first()\n if obj is None:\n return []\n return list(obj.admins)",
"def get_admins():\n users = get_users()\n admins = []\n for user in users:\n if user[\"approval_level\"] == \"admin\":\n admins.append(user)\n\n return admins",
"def admins(message):\n hf.query_users(message, hf.get_users(), \"admin\")",
"def get_admin_users(self):\r\n try:\r\n users = self.list_all(\"users\")\r\n users_admin = [user for user in users if user[\"role\"] == \"admin\"]\r\n return users_admin\r\n except PDClientError as e:\r\n raise e",
"def get_org_admins(self, dataset: Dict) -> List[User]:\n organization_id = dataset[\"organization_id\"]\n orgadmins = list()\n organization = self.organizations[organization_id]\n if \"admin\" in organization:\n for userid in self.organizations[organization_id][\"admin\"]:\n user = self.users.get(userid)\n if user:\n orgadmins.append(user)\n return orgadmins",
"def admin_list(message):\n load_users(message._client.users)\n names = list_to_names(user_list.admin_list)\n message.reply('My admins are: {}'.format(\", \".join(names)))",
"def get_for_admin(self, admin):\n if admin.is_superuser:\n return self.get_query_set()\n return self.get_query_set().filter(owners__user=admin)",
"def get_administerable_researchers():\n researcher_admin = get_session_researcher()\n if researcher_admin.site_admin:\n relevant_researchers = Researcher.get_all_researchers_by_username()\n else:\n relevant_researchers = researcher_admin.get_administered_researchers_by_username()\n return relevant_researchers",
"def get_admin_users() -> User:\n return User.objects.filter(group__name__contains=\"admin\")",
"def get_administrators(self, *args, **kwargs):\n return self.bot.get_chat_administrators(self.id, *args, **kwargs)",
"def organization_get_admins_no_login(self, client, id):\n assert client.get('/organizations/' + id + '/admins',\n headers={}).status == '400 BAD REQUEST'",
"def get_admins(self, uid):\n admin_data = self.list_admin_roles(uid)\n admins = []\n for admin in admin_data:\n admins.append(\n ZenossDeviceManagementAdmin(\n self.api_url,\n self.api_headers,\n self.ssl_verify,\n admin\n )\n )\n\n return admins",
"def get_local_admins():\n admin_list = get_users_config()\n response = []\n\n if \"users\" not in admin_list[\"result\"]:\n return response\n\n if isinstance(admin_list[\"result\"][\"users\"][\"entry\"], list):\n for entry in admin_list[\"result\"][\"users\"][\"entry\"]:\n response.append(entry[\"name\"])\n else:\n response.append(admin_list[\"result\"][\"users\"][\"entry\"][\"name\"])\n\n return response",
"def get_for_admin(self, admin):\n if admin.is_superuser:\n return self.get_queryset()\n return self.get_queryset().filter(owners__user=admin)",
"def get_all_npf_admins(self):\n npf_admins = []\n for user in OrgUser.objects.all():\n u = OcAuth(user.id)\n if u.is_admin_org():\n npf_admins.append(user.user)\n return npf_admins",
"def get_users_admins_list(self, session):\n\n users = session.query(User.chat_id).all()\n return users",
"def admin():\n aaa.require(role='admin', fail_redirect='/sorry_page')\n return dict(\n current_user=aaa.current_user,\n users=aaa.list_users(),\n roles=aaa.list_roles()\n )",
"def admin_edit_admins():\n return user_management_handler(\"show_admin_edit_admins\", \"new_admins\", True)",
"def allow_egap_admins(queryset, request):\n if hasattr(request, 'user') and not waffle.flag_is_active(request, EGAP_ADMINS):\n return queryset.exclude(name='EGAP Registration')\n return queryset",
"def __reloadAdmins(self, admin_id):\n for admin_username in admin_main.getLoader().getAllUsernames():\n try:\n admin_obj=admin_main.getLoader().getAdminByName(admin_username)\n if admin_obj.creator_id == admin_id:\n admin_main.getLoader().loadAdmin(admin_obj.getAdminID())\n else:\n for lock_obj in admin_obj.getLocks():\n if lock_obj.getLockerID()==admin_id:\n admin_main.getLoader().loadAdmin(admin_obj.getAdminID())\n break\n except:\n logException(LOG_DEBUG)",
"def get_all_biz_admins(self):\n biz_admins = []\n for user in OrgUser.objects.all():\n u = OcAuth(user.id)\n if u.is_admin_biz():\n biz_admins.append(user.user)\n\n return biz_admins",
"def administrators(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"administrators\")"
] | [
"0.7626615",
"0.68719196",
"0.6805782",
"0.6686546",
"0.66309863",
"0.6579962",
"0.656309",
"0.6484229",
"0.6478381",
"0.6406674",
"0.63949853",
"0.62509245",
"0.613989",
"0.6083141",
"0.60780597",
"0.6051802",
"0.6046379",
"0.604062",
"0.60262364",
"0.5981295",
"0.5968331",
"0.59511167",
"0.594143",
"0.5907171",
"0.58114016",
"0.5779134",
"0.57753474",
"0.57583666",
"0.57503706",
"0.5744349"
] | 0.7636565 | 0 |
Returns a list of groups that a member belongs to. | def get_groups(self, member_dn):
search_result = self.search(base=GROUPS, member=member_dn)
if len(search_result) == 0:
return []
group_list = []
for group in search_result:
group_cn = group['attributes']['cn'][0]
if self.debug:
print("[DEBUG] User {} belongs to group: {}".format(member_dn, group_cn))
group_list.append(group_cn)
return group_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_groups(self):\n user_node = self.get()\n grouplist = []\n if user_node:\n for rel in graph.match(start_node=user_node, rel_type='in'):\n grouplist.append(Usergroup(id=rel.end_node()['id']))\n return grouplist",
"def find_groups(groups=None, name=None, gid=None, member=None):\n if not groups:\n groups = read_group()\n\n groups = find(groups, \"name\", name)\n groups = find(groups, \"gid\", sanitize_id(gid))\n\n if member:\n for query_member in member:\n groups = [g for g in groups if query_member in g.members]\n\n return groups",
"def getGroups():\r\n return Group.getGroups()",
"def getGroups(self):\n return [g[0] for g in grp.getgrall()]",
"def list_groups(self):\n return self.get_admin(\"groups\")",
"def groups(self):\r\n return users.Groups(self)",
"def get_groups(self, username):\n groups = []\n for group in grp.getgrall():\n if username in group.gr_mem:\n groups.append(group.gr_name)\n\n return groups",
"def get_pingroups(self):\n return self.groups[:]",
"def member_groups(self, user):\n \n if not hasattr(user, '_group_member_groups'):\n # be sure to use \"slow\" version of is_member to prevent it recursing back\n user._group_member_groups = [self[group] for group in self.root.keys() \\\n if self[group].is_member(user, slow=True)]\n\n return user._group_member_groups",
"def groups(self):\n return self.get_data(\"groups\")",
"def groups(self):\n # type: (...) -> Set[str]\n return self._groups",
"def getListOfGroups(self, *args):\n return _libsbml.GroupsModelPlugin_getListOfGroups(self, *args)",
"def groups(self):\n return self._groups",
"def groups(self):\n return self._groups",
"def groups(self):\n return self._groups",
"def get_groups(self):\n result = self.conn.usergroup.get(status=0, output='extend', selectUsers=\"extend\")\n groups = {group[\"name\"]: Group(\n name=group[\"name\"],\n id=group[\"usrgrpid\"],\n members=group[\"users\"],\n ) for group in result}\n return groups",
"def groups(self) -> list[Group]:\n return self._connection.groups",
"def groups(self):\n yield self\n for member_group in self._groups():\n yield member_group",
"def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)",
"def get_list_groups(self):\n list_response = requests.get(self.groups_url, headers=self.headers)\n return list_response.json()[\"groups\"]",
"def group(self, group_cn):\n group = self.search(base=GROUPS, cn=group_cn)\n\n if len(group) == 0:\n return []\n else:\n group_members = group[0]['attributes']['member']\n\n members = []\n for member in group_members:\n members.append(self.search(dn=member))\n\n if self.objects:\n return self.member_objects(members)\n\n return members",
"def list_user_groups(self, token):\n requestUser = self.get_username_from_token(token)\n dataBase = self.read_database()\n groups = dataBase['userGroups']\n groupList = list()\n for group in groups:\n members = groups[group]['members']\n owners = groups[group]['owners']\n if requestUser in members or requestUser in owners:\n groupList.append(group)\n return groupList",
"def get_groups(self):\n return [self.primary_group] + list(self.secondary_groups)",
"def getPeopleGroups(self):\n return [FoursquarePeopleGroup(le) for le in self.base.get(\"groups\", [])]",
"def get_groups(self, group_name):\r\n assert group_name in self.groups.keys(), group_name\r\n try:\r\n group_list = self.groups[group_name]\r\n except KeyError:\r\n raise GroupKeyError()\r\n return group_list",
"def get_groups(self):\n return Client._get(self)",
"def get_groups(self, principal):\n groups = set()\n for location in lineage(self):\n location_groups = location._groups\n try:\n if self is location:\n groups.update(location_groups[principal])\n else:\n groups.update([x for x in location_groups[principal]])\n except KeyError:\n continue\n\n return tuple(groups)",
"def get_members(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n query = (\"SELECT username from \" + ENV_DB + \".Groups WHERE gid='{}'\").format(self.g_id)\r\n cursor.execute(query)\r\n data = cursor.fetchall()\r\n database.close()\r\n return list(i[0] for i in data)",
"def GetGroupMembers(self, group):\n return []",
"def get_group_list(self):\n return [(item[0], item[1][0]) for item in self.contacts_by_group_list]"
] | [
"0.7758451",
"0.75771236",
"0.7455061",
"0.74303347",
"0.73591954",
"0.72485995",
"0.72438496",
"0.721254",
"0.7209152",
"0.7169427",
"0.7133967",
"0.711703",
"0.710896",
"0.710896",
"0.710896",
"0.70847243",
"0.70678437",
"0.70632833",
"0.7037165",
"0.6990258",
"0.6983493",
"0.697676",
"0.6953817",
"0.6933513",
"0.69290143",
"0.6901126",
"0.68727493",
"0.6859461",
"0.68527734",
"0.6852294"
] | 0.8088219 | 0 |
Searches for all RTPs | def get_rtps(self):
rtps = self.group('rtp')
return rtps | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_roku_devices():\n\n\tfound_devices = set()\n\n\t# The UPD M-SEARCH message\n\tmsg = __getmessage__()\n\n\t# Set up UDP socket\n\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n\ts.settimeout(2)\n\ts.sendto(msg.encode('utf-8'), ('239.255.255.250', 1900) )\n\n\t# Counter for the total number of UPnP devices found on the network\n\ttotal_devices_found = 0\n\t# Flag for whether or not any Roku devices have been found\n\troku_device_found = False\n\n\ttry:\n\t\twhile True:\n\t\t\tdata, addr = s.recvfrom(65507)\n\t\t\tdata_list = data.decode('utf-8').splitlines()\n\t\t\tip = parse_input_for_device(data_list)\n\t\t\tif ip:\n\t\t\t\tfound_devices.add(ip)\n\t\t\ttotal_devices_found += 1\n\n\texcept socket.timeout:\n\t\t\tprint(\"\\n\\nFound: \" + str(len(found_devices)))\n\n\treturn found_devices",
"def scan_addresses(self, root=None):",
"def find_all(self):\n pass",
"def playerSearch(self, start, count, level, formation, position, nationality, league, team, minBid, maxBid, minBIN, maxBIN):\n searchstring = \"\"\n cardList = list()\n\n if level != \"\" and level != \"any\":\n searchstring += \"&lev=\" + level\n if formation != \"\" and formation != \"any\":\n searchstring += \"&form=\" + formation\n if position != \"\" and position != \"any\":\n if position == \"defense\" or position == \"midfield\" or position == \"attacker\":\n searchstring += \"&zone=\" + position\n else:\n searchstring += \"&pos=\" + position\n if nationality > 0:\n searchstring += \"&nat=\" + str(nationality)\n if league > 0:\n searchstring += \"&leag=\" + str(league)\n if team > 0:\n searchstring += \"&team=\" + str(team)\n if minBIN > 0:\n searchstring += \"&minb=\" + str(minBIN)\n if maxBIN > 0:\n searchstring += \"&maxb=\" + str(maxBIN)\n if minBid > 0:\n searchstring += \"&micr=\" + str(minBid)\n if maxBid > 0:\n searchstring += \"¯=\" + str(maxBid)\n\n requestor = UrlRequestor(\"https://utas.fut.ea.com/ut/game/fifa13/auctionhouse?type=player&start=\" + str(start) + \"&num=\" + str(count) + searchstring, {'Content-Type': 'application/json', 'Cookie': self.EASW_KEY + \"; \" + self.EASF_SESS + \"; \" + self.FUTPHISHING + \"; \", 'X-UT-SID': self.XUT_SID, 'x-http-method-override': 'GET'}, \"\")\n requestor.open()\n lol = requestor.getReturnData().get('auctionInfo')\n\n for card in lol:\n cardList.append(Card(card, self))\n return cardList",
"def find_all(self):",
"def _rotofind(searchname, ridsonly=False):\n\n pn = urllib.quote(searchname) # quote the name.\n url = b64decode('aHR0cDovL3d3dy5yb3Rvd29ybGQuY29tL2NvbnRlbnQvcGxheWVyc2VhcmNoLmFzcHg/') + \"searchname=\" + pn + \"&sport=nfl\"\n # do our request.\n try:\n req = urllib2.Request(url)\n r = urllib2.urlopen(req)\n html = r.read()\n except Exception, e:\n print \"ERROR: _rotofind: in HTTP request: {0}\".format(e)\n return None\n # output container.\n output = []\n # process.\n if 'Search Results for:' in html: # usually not a good sign.\n soup = BeautifulSoup(html)\n table = soup.find('table', attrs={'id':'cp1_tblSearchResults'})\n if table: # this means we found more than one person.\n rows = table.findAll('tr')[2:]\n for row in rows:\n tds = row.findAll('td')\n pname = tds[0].getText()\n pid = tds[0].find('a')['href'].split('/')[3]\n ppos = tds[1].getText()\n pteam = tds[2].getText()\n if ridsonly:\n output.append(pid)\n else:\n output.append(\"{0} {1} {2} {3}\".format(pname, pid, ppos, pteam))\n else: # didn't find anything.\n return None\n #print \"I did not find any results for {0}\".format(searchname)\n else: # this means we found a person.\n soup = BeautifulSoup(html)\n playername = soup.find('div', attrs={'class':'playername'})\n playerid = soup.find('div', attrs={'class':'fb-like'})['data-href']\n playerid = playerid.split('/')[5]\n playertable = soup.find('table', attrs={'id':'cp1_ctl00_tblPlayerDetails'}).findAll('td')[1]\n if ridsonly:\n output.append(playerid)\n else:\n output.append(\"{0} {1} {2}\".format(playername.getText(), playerid, playertable.getText()))\n # now return.\n return output",
"def search(request):\n template = 'tracks.html'\n search_by = request.GET.get('search_by')\n if search_by != 'genres':\n search_dict = {search_by + '__icontains': request.GET.get('lookup')}\n tracks_list = Tracks.objects.filter(**search_dict)\n else:\n gen_list = [x.strip() for x in request.GET.get('lookup').split(',')]\n id_list = Genres.objects.filter(genre__in=gen_list).values_list('id', flat=True)\n tracks_list = Tracks.objects.filter(genres__in=id_list).distinct()\n context = {'track_list': tracks_list, 'call': 'search', }\n return render(request, template, context)",
"def search(self, sid, group):\n zx = \"\".join([chr(random.randint(97,122)) for i in xrange(0, 11)])\n resdat = self.req(sid, ['{\"a\":\"kA-_jfrF\",\"r\":\"0\",\"t\":2007,\"p\":{\"1000\":[0,0],\"2\":\"kA-_jfrF0\"}}',\n '{\"a\":\"kA-_jfrF\",\"r\":\"1\",\"t\":2602,\"p\":{\"1000\":[0,0],\"2\":\"kA-_jfrF2\",\"3\":\"\",\"4\":{\"2\":25,\"1\":0},\"6\":\"[email protected]\"}}']).read()\n print \"RESULT DATA\",resdat",
"def info_search(profiles: list, session: requests.Session):\n counter = 0\n results = []\n for profile in profiles:\n info = {}\n sleep(0.5)\n soup = BeautifulSoup(session.get(profile).text, \"lxml\")\n details = soup.find(\"div\", class_=\"page-content seaman-page-content\")\n info['name'] = details.h1.text\n try:\n for row in details.find_all(\"div\", class_=\"colmn3\"):\n row_content = row.text.split(\":\")\n if row_content[0] == \"Personal mobile number\":\n info['phone'] = row_content[1]\n if row_content[0] == \"E-Mail\":\n info['email'] = row_content[1]\n if 'phone' in info and duplicates_checker(results, info['phone']):\n results.append(info)\n vcard_handler(info)\n except AttributeError:\n continue\n if len(results) % 10 == 0:\n counter += 10\n logger.info(f'{counter} profiles added out of {len(profiles)}')\n logger.info(f'Finished adding {len(profiles)} profiles')\n return results",
"def find_all(self):\n #TODO: specify above\n\n print(\"Type in:\")\n print(\"b - blink, y - add to list, n (else) - do not add to list\")\n cflib.crtp.init_drivers()\n available = cflib.crtp.scan_interfaces()\n for i in available:\n print \"InterfacewithURI [%s] found, name [%s]\" % (i[0],i[1])\n self.add_uri(i[0], i[1])\n print(\"end\")",
"def processSearchResult(self):",
"def sendResults(results):\n log.msg(\"Search Returned from all sources\")\n master_result = []\n for status, result in results:\n if status:\n master_result += result\n\n for key, mediactr in __controllers.items():\n log.msg(\"\\tSending Result to %s\" % key)\n mediactr.searchCompleted(search_context, master_result)",
"def get_all_cards(self, filter='open'):\n print('Searching Trello cards..\\n')\n done_sources = []\n for list in self.my_lists:\n for card in list.list_cards(card_filter=filter):\n name = card.name.split()[0]\n done_sources.append(card)\n return done_sources",
"def search(self,name=None):\n\t\taddresses = discover_devices()\n\t\t#if len(addresses) == 0:\n\t\t#\treturn None\n\t\tnames = []\n\t\tfor adr in addresses:\n\t\t\tnames.append(lookup_name(adr))\n\t\t\tif name != None and name == names[-1]:\n\t\t\t\treturn adr\n\n\t\treturn zip(addresses,names)",
"def search_by_pattern(self, tl):\n print(\"Search by regex pattern\")\n pattern = input(\"Please enter search pattern: \")\n return tl.findall_pattern(pattern)",
"def search():\n pass",
"def search(pattern):\n query = pattern.lower()\n videolist = getvideolist()\n results = []\n for video in videolist:\n for value in video.values():\n if query in str(value).lower():\n results.append(Colors.YELLOW + video[\"file\"] + Colors.END + \" - \" + video[\"source\"] + \" - \" +\n video[\"title\"])\n if results:\n for result in results:\n safeprint(result)\n else:\n safeprint(\"No video matching the given query was found.\")",
"def _scan_hosts(self):\n results = []\n for item in glob.glob(self._pattern):\n results.append(item)\n return results",
"def search_service(self, name_filter):\n rs=search_service(name_filter)\n for el in rs:\n print(el)",
"def main():\n entries = get_feed_entries()\n\n while True:\n response = input(\"What you you like to search for? \")\n\n if not response:\n print(\"Please provide a search term\")\n continue\n\n if response.lower() == \"q\":\n print(\"Bye\")\n break\n\n matches = [\n entry\n for entry in list(\n filter(lambda x: filter_entries_by_tag(response.lower(), x), entries)\n )\n ]\n\n if matches:\n for entry in sorted(matches, key=lambda x: x.date):\n print(entry.title)\n\n print(f\"{len(matches)} {'entry' if len(matches) == 1 else 'entries'} matched\")",
"def filterRansac():\n pass",
"def accumulate_packets():\n l = []\n packets = sniff(count=NUMBER_OF_SNIFFING_ROUNDS, lfilter=fltr, prn=printing)\n print(\"Processing packets!\")\n for packet in packets:\n l.append({\"ip\": get_ip(packet),\n \"country\": get_country(packet),\n \"entering\": is_entering(packet),\n \"port\": get_partner_port(packet),\n \"size\": packet[IP].len, #the len of the ip layer is the len of the entire packet\n \"program\": get_program(packet)})\n return l",
"def scan(backend, timeout=10):\n result = []\n for (mac, name) in backend.scan_for_devices(timeout):\n print(mac + \" \" + name)\n return result",
"def listenRtp(self):\r\n\t\twhile True:\r\n\t\t\tstartTime = time()\r\n\t\t\tdata, address = self.rtpSocket_client.recvfrom(16384)\r\n\t\t\tendTime = time()\r\n\r\n\t\t\tif (data):\r\n\t\t\t\tself.recvRtpPacket.decode(data)\r\n\t\t\t\tself.cacheFile = self.writeFrame(self.recvRtpPacket.getPayload())\r\n\t\t\t\tself.updateMovie(self.cacheFile)\r\n\r\n\t\t\t\tcurrentFrameNbr = self.recvRtpPacket.seqNum()\r\n\t\t\t\tcurrent = self.totalTime - 0.05 * currentFrameNbr\r\n\t\t\t\tcurrMin = current / 60\r\n\t\t\t\tcurrSec = current % 60\r\n\t\t\t\t\r\n\t\t\t\tself.progress['value'] = 0.05 * currentFrameNbr\r\n\r\n\t\t\t\tif currMin < 10:\r\n\t\t\t\t\tself.time.configure(text=\"Time Left: 0%d:%d\" % (currMin, currSec), width=12, heigh=2)\r\n\t\t\t\t\tif currSec < 10:\r\n\t\t\t\t\t\tself.time.configure(text=\"Time Left: 0%d:0%d\" % (currMin, currSec), width=12, heigh=2)\r\n\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.time.configure(text=\"Time Left: %d:%d\" % (currMin, currSec), width=12, heigh=2)\r\n\t\t\t\t\tif currSec < 10:\r\n\t\t\t\t\t\tself.time.configure(text=\"Time Left: %d:0%d\" % (currMin, currSec), width=12, heigh=2)\r\n\r\n\t\t\t\tself.networkStat.receivedPacketCount += 1\r\n\t\t\t\tself.networkStat.totalADR += (sys.getsizeof(data) / (endTime - startTime))\r\n\t\t\t\r\n\t\t\telse:\r\n\t\t\t\tcontinue",
"def search(self, name, provider='thetvdb'):\n which = [self.providers[provider]]\n results = yield from self._invoke_providers('search', name, which=which, bubble=True)\n return list(itertools.chain.from_iterable(l for p, l in results))",
"def find_all(file):\r\n file_name = os.path.basename(file)[:-4]\r\n print (file_name)\r\n \r\n \"\"\"II. Textinhalt der Datei\"\"\"\r\n with open(file,\"r\", encoding = \"utf-8\") as sourcefile:\r\n full_text = sourcefile.read()\r\n \r\n \"\"\"III. Rollenliste der Datei\"\"\"\r\n roles_ids=re.findall(\"xml:id=\\\"te(.*)\\\">\", full_text)\r\n \r\n \"\"\"IV. Speaker_Ids der Datei\"\"\" \r\n all_ids_in_text = re.findall(\"who=\\\"#te(.*?)\\\">\", full_text)\r\n \r\n \"\"\"V. Eine zweite Id Liste anlegen, falls 90er Ids vorhanden sind\"\"\"\r\n ids_no_ninety = [item for item in all_ids_in_text if not re.search(r\".*-9[0-9]\", item)]\r\n \r\n \"\"\"VI. Eine zweite Rolenliste anlegen, falls 90er Ids vorhanden sind\"\"\" \r\n roles_ninety = roles_ids.copy()\r\n for item in all_ids_in_text:\r\n ninety = re.findall(\".*-9[0-9]\", item)\r\n if ninety not in roles_ids:\r\n roles_ninety.extend(ninety) \r\n \r\n roles_ninety =(set(roles_ninety))\r\n \r\n \r\n \"\"\"\r\n VII. Wer folgt auf welche Figur?\r\n - 2 Sprecher Fenster\r\n - ohne 90er Ids\r\n - ungewichtet und ungerichtet\r\n \"\"\"\r\n list_of_teams = []\r\n for item in zip(ids_no_ninety[:-1],ids_no_ninety[1:]):\r\n list_of_teams.append(item)\r\n GroupofSpeaker=list(set(list_of_teams))\r\n \r\n \r\n \"\"\"\r\n - Entfernt Kante vom Knoten zum gleichen Knoten \r\n - Entfernt doppelte Kante zwischen zwei Knoten\r\n \"\"\"\r\n for item in GroupofSpeaker:\r\n if (item[0] == item[1]):\r\n GroupofSpeaker.remove(item)\r\n simple_edges = set(tuple(sorted(t)) for t in GroupofSpeaker)\r\n \r\n \r\n \"\"\"Anzahl der Knoten und der Verbindungen\"\"\" \r\n all_nodes = (len(roles_ids)) \r\n all_edges = (len(simple_edges)) \r\n \r\n columns = [file_name]\r\n dates_2speaker_noNinety_noDirected = pd.DataFrame(all_nodes, index=['2_uw_no90_nodes'], columns=columns)\r\n dates_edges = pd.DataFrame(all_edges, index=['2_uw_no90_edges'], columns=columns)\r\n dates_2speaker_noNinety_noDirected = dates_2speaker_noNinety_noDirected.append(dates_edges)\r\n \r\n \r\n \"\"\" Netzwerk \"\"\"\r\n G1 = nx.Graph()\r\n G1.add_nodes_from(roles_ids)\r\n G1.add_edges_from(simple_edges)\r\n \r\n \r\n \"\"\"Maximalen und Minimalen Grad\"\"\"\r\n degree_sequence=sorted(nx.degree(G1).values(),reverse=True) \r\n maxdegree = degree_sequence[:1]\r\n mindegree = degree_sequence[-1:]\r\n dates_max = pd.DataFrame(maxdegree, index=['2_uw_no90_maxDegree'], columns=columns)\r\n dates_min = pd.DataFrame(mindegree, index=['2_uw_no90_minDegree'], columns=columns)\r\n dates_2speaker_noNinety_noDirected = dates_2speaker_noNinety_noDirected.append(dates_max)\r\n dates_2speaker_noNinety_noDirected = dates_2speaker_noNinety_noDirected.append(dates_min)\r\n \r\n \r\n \"\"\"Durschnittsgrad\"\"\"\r\n avdegree = sum(G1.degree().values())/float(len(G1))\r\n dates_degree = pd.DataFrame(avdegree, index=['2_uw_no90_avdegree'], columns=columns)\r\n dates_2speaker_noNinety_noDirected = dates_2speaker_noNinety_noDirected.append(dates_degree)\r\n \r\n \r\n \"\"\"Zentralitätsmaß (degree centrality)\"\"\"\r\n dc = sorted(nx.degree_centrality(G1).values(),reverse=True)\r\n maxdc = (dc[:1])\r\n dates_degree_centrality = pd.DataFrame(maxdc, index=['2_uw_no90_maxdc'], columns=columns)\r\n dates_2speaker_noNinety_noDirected = dates_2speaker_noNinety_noDirected.append(dates_degree_centrality)\r\n \r\n \r\n \"\"\"Globaler Cluster-Koeffizient\"\"\"\r\n ck = nx.average_clustering(G1)\r\n dates_global_cluster = pd.DataFrame(ck, index=['2_uw_no90_ck'], columns=columns)\r\n dates_2speaker_noNinety_noDirected = dates_2speaker_noNinety_noDirected.append(dates_global_cluster)\r\n \r\n \r\n \"\"\"Dichte\"\"\"\r\n den = nx.density(G1)\r\n dates_density = pd.DataFrame(den, index=['2_uw_no90_density'], columns=columns)\r\n dates_2speaker_noNinety_noDirected = dates_2speaker_noNinety_noDirected.append(dates_density)\r\n \r\n \"\"\"Show the dataframe\"\"\"\r\n print (dates_2speaker_noNinety_noDirected)\r\n \r\n \r\n \"\"\"nx Graph\"\"\"\r\n #nx.draw(G1, with_labels=True, font_weight='bold', alpha=0.5)\r\n #plt.savefig('./output/grafiken/'+file_name+'_simple_path1.pdf')\r\n #plt.show() # display\r\n \r\n \r\n \"\"\"\r\n VIII. Wer folgt auf welche Figur?\r\n - 2 Sprecher Fenster\r\n - mit 90er Ids\r\n - ungewichtet und ungerichtet\r\n \"\"\"\r\n list_of_teams = []\r\n for item in zip(all_ids_in_text[:-1],all_ids_in_text[1:]):\r\n list_of_teams.append(item)\r\n GroupofSpeaker=list(set(list_of_teams))\r\n \r\n \r\n \"\"\"\r\n - Entfernt Kante vom Knoten zum gleichen Knoten \r\n - Entfernt doppelte Kante zwischen zwei Knoten\r\n \"\"\"\r\n for item in GroupofSpeaker:\r\n if (item[0] == item[1]):\r\n GroupofSpeaker.remove(item)\r\n \r\n simple_edges = set(tuple(sorted(t)) for t in GroupofSpeaker)\r\n \r\n \r\n \"\"\"Anzahl der Knoten und der Verbindungen\"\"\" \r\n all_edges = (len(simple_edges)) \r\n all_nodes_with90 = (len(roles_ninety))\r\n \r\n columns = [file_name]\r\n dates_2speaker_Ninety_noDirected = pd.DataFrame(all_nodes_with90, index=['2_uw_90_nodes'], columns=columns)\r\n dates90_edges = pd.DataFrame(all_edges, index=['2_uw_90_edges'], columns=columns)\r\n dates_2speaker_Ninety_noDirected = dates_2speaker_Ninety_noDirected.append(dates90_edges)\r\n \r\n \r\n \"\"\" Netzwerk \"\"\"\r\n G2 = nx.Graph()\r\n G2.add_nodes_from(roles_ninety)\r\n G2.add_edges_from(simple_edges)\r\n \r\n \r\n \"\"\"Maximalen und Minimalen Grad\"\"\"\r\n degree_sequence=sorted(nx.degree(G2).values(),reverse=True) \r\n maxdegree = degree_sequence[:1]\r\n mindegree = degree_sequence[-1:]\r\n dates90_max = pd.DataFrame(maxdegree, index=['2_uw_90_maxDegree'], columns=columns)\r\n dates90_min = pd.DataFrame(mindegree, index=['2_uw_90_minDegree'], columns=columns)\r\n dates_2speaker_Ninety_noDirected = dates_2speaker_Ninety_noDirected.append(dates90_max)\r\n dates_2speaker_Ninety_noDirected = dates_2speaker_Ninety_noDirected.append(dates90_min)\r\n \r\n \r\n \"\"\"Durschnittsgrad\"\"\"\r\n avdegree = sum(G2.degree().values())/float(len(G2))\r\n dates90_degree = pd.DataFrame(avdegree, index=['2_uw_90_avdegree'], columns=columns)\r\n dates_2speaker_Ninety_noDirected = dates_2speaker_Ninety_noDirected.append(dates90_degree)\r\n \r\n \r\n \"\"\"Zentralitätsmaß (degree centrality)\"\"\"\r\n dc = sorted(nx.degree_centrality(G2).values(),reverse=True)\r\n maxdc = (dc[:1])\r\n dates90_degree_centrality = pd.DataFrame(maxdc, index=['2_uw_90_maxdc'], columns=columns)\r\n dates_2speaker_Ninety_noDirected = dates_2speaker_Ninety_noDirected.append(dates90_degree_centrality)\r\n \r\n \r\n \"\"\"Globaler Cluster-Koeffizient\"\"\"\r\n ck = nx.average_clustering(G2)\r\n dates90_global_cluster = pd.DataFrame(ck, index=['2_uw_90_ck'], columns=columns)\r\n dates_2speaker_Ninety_noDirected = dates_2speaker_Ninety_noDirected.append(dates90_global_cluster)\r\n \r\n \r\n \"\"\"Dichte\"\"\"\r\n den = nx.density(G2)\r\n dates90_density = pd.DataFrame(den, index=['2_uw_90_density'], columns=columns)\r\n dates_2speaker_Ninety_noDirected = dates_2speaker_Ninety_noDirected.append(dates90_density)\r\n \r\n \"\"\"Show the dataframe\"\"\"\r\n print (dates_2speaker_Ninety_noDirected)\r\n \r\n \"\"\"nx Graph\"\"\"\r\n #nx.draw(G2, with_labels=True, font_weight='bold', alpha=0.5)\r\n #plt.savefig('./output/grafiken/'+file_name+'_simple_path1.pdf')\r\n #plt.show() # display\r\n \r\n \r\n \"\"\"\r\n XI. Wer folgt auf welche Figur?\r\n - 2 Sprecher Fenster\r\n - ohne 90er Ids\r\n - gewichtet und gerichtet\r\n \"\"\"\r\n list_of_teams = []\r\n for z in zip(ids_no_ninety[:-1],ids_no_ninety[1:]):\r\n list_of_teams.append(z)\r\n \r\n \r\n \"\"\"\r\n - Entfernt Kante vom Knoten zum gleichen Knoten \r\n \"\"\"\r\n for item in list_of_teams:\r\n if (item[0] == item[1]):\r\n list_of_teams.remove(item)\r\n counter=dict(Counter(list_of_teams))\r\n \r\n \r\n \"\"\"Anzahl der Knoten und der Verbindungen\"\"\" \r\n all_edges = (len(counter)) \r\n dates_2speaker_noNinety_Directed = pd.DataFrame(all_nodes, index=['2_gw_nodes'], columns=columns)\r\n dates_directed_eges = pd.DataFrame(all_edges, index=['2_gw_no90_edges'], columns=columns)\r\n dates_2speaker_noNinety_Directed = dates_2speaker_noNinety_Directed.append(dates_directed_eges)\r\n \r\n \r\n \"\"\" Netzwerk \"\"\"\r\n G3 = nx.DiGraph((x, y, {'weight': v}) for (x, y), v in Counter(list_of_teams).items())\r\n G3.add_nodes_from(roles_ids)\r\n \r\n \r\n \"\"\"Maximalen und Minimalen Grad\"\"\"\r\n degree_sequence=sorted(nx.degree(G3, weight='weight').values(),reverse=True) \r\n \r\n maxdegree = degree_sequence[:1]\r\n mindegree = degree_sequence[-1:]\r\n dates_directed_max = pd.DataFrame(maxdegree, index=['2_gw_no90_maxDegree'], columns=columns)\r\n dates_directed_min = pd.DataFrame(mindegree, index=['2_gw_no90_minDegree'], columns=columns)\r\n dates_2speaker_noNinety_Directed = dates_2speaker_noNinety_Directed.append(dates_directed_max)\r\n dates_2speaker_noNinety_Directed = dates_2speaker_noNinety_Directed.append(dates_directed_min)\r\n \r\n \"\"\"Durschnittsgrad\"\"\"\r\n avdegree = sum(G3.degree(weight='weight').values())/float(len(G3))\r\n dates_directed_degree = pd.DataFrame(avdegree, index=['2_gw_no90_avdegree'], columns=columns)\r\n dates_2speaker_noNinety_Directed = dates_2speaker_noNinety_Directed.append(dates_directed_degree)\r\n \r\n \r\n \"\"\"Zentralitätsmaß (in degree centrality and out degree centrality)\"\"\"\r\n dc = sorted(nx.in_degree_centrality(G3).values(),reverse=True)\r\n dc2 = sorted(nx.out_degree_centrality(G3).values(), reverse=True)\r\n indc = (dc[:1])\r\n outdc = (dc2[:1])\r\n \r\n dates_directed_in_degree_centrality = pd.DataFrame(indc, index=['2_gw_no90_indc'], columns=columns)\r\n dates_directed_out_degree_centrality = pd.DataFrame(outdc, index=['2_gw_no90_outdc'], columns=columns)\r\n dates_2speaker_noNinety_Directed = dates_2speaker_noNinety_Directed.append(dates_directed_in_degree_centrality)\r\n dates_2speaker_noNinety_Directed = dates_2speaker_noNinety_Directed.append(dates_directed_out_degree_centrality)\r\n \r\n \r\n \"\"\"Globaler Cluster-Koeffizient (entfällt)\"\"\"\r\n #ck = nx.average_clustering(G3)\r\n #ck = sum(nx.clustering(G3).values())/float(len(G3))\r\n \r\n \r\n \"\"\"Dichte\"\"\"\r\n den = nx.density(G3)\r\n dates_directed_degree_density = pd.DataFrame(den, index=['2_gw_no90_density'], columns=columns)\r\n dates_2speaker_noNinety_Directed = dates_2speaker_noNinety_Directed.append(dates_directed_degree_density)\r\n \r\n \"\"\"Show the dataframe\"\"\"\r\n print (dates_2speaker_noNinety_Directed)\r\n \r\n \r\n \"\"\"nx Graph\"\"\"\r\n #nx.draw(G3, with_labels=True, font_weight='bold', alpha=0.5)\r\n #plt.savefig('./output/grafiken/'+file_name+'_simple_path1.pdf')\r\n #plt.show() # display\r\n \r\n \r\n \"\"\"\r\n XI. Wer folgt auf welche Figur?\r\n - 2 Sprecher Fenster\r\n - mit 90er Ids\r\n - gewichtet und gerichtet\r\n \"\"\"\r\n list_of_teams = []\r\n for z in zip(all_ids_in_text[:-1],all_ids_in_text[1:]):\r\n list_of_teams.append(z)\r\n \r\n \r\n \"\"\"\r\n - Entfernt Kante vom Knoten zum gleichen Knoten \r\n \"\"\"\r\n for item in list_of_teams:\r\n if (item[0] == item[1]):\r\n list_of_teams.remove(item)\r\n \r\n counter=dict(Counter(list_of_teams))\r\n \r\n \r\n \"\"\"Anzahl der Knoten und der Verbindungen\"\"\" \r\n all_edges = (len(counter)) \r\n \r\n dates_2speaker_Ninety_Directed = pd.DataFrame(all_nodes_with90, index=['2_gw_nodes'], columns=columns)\r\n dates_directed_edges = pd.DataFrame(all_edges, index=['2_gw_90_edges'], columns=columns)\r\n dates_2speaker_Ninety_Directed = dates_2speaker_Ninety_Directed.append(dates_directed_edges)\r\n \r\n \r\n \"\"\" Netzwerk \"\"\"\r\n G4 = nx.DiGraph((x, y, {'weight': v}) for (x, y), v in Counter(list_of_teams).items())\r\n G4.add_nodes_from(roles_ids)\r\n \r\n \r\n \"\"\"Maximalen und Minimalen Grad\"\"\"\r\n degree_sequence=sorted(nx.degree(G4, weight='weight').values(),reverse=True) \r\n \r\n maxdegree = degree_sequence[:1]\r\n mindegree = degree_sequence[-1:]\r\n dates_directed_max = pd.DataFrame(maxdegree, index=['2_gw_90_maxDegree'], columns=columns)\r\n dates_directed_min = pd.DataFrame(mindegree, index=['2_gw_90_minDegree'], columns=columns)\r\n dates_2speaker_Ninety_Directed = dates_2speaker_Ninety_Directed.append(dates_directed_max)\r\n dates_2speaker_Ninety_Directed = dates_2speaker_Ninety_Directed.append(dates_directed_min)\r\n \r\n \r\n \"\"\"Durschnittsgrad\"\"\"\r\n avdegree = sum(G4.degree(weight='weight').values())/float(len(G4))\r\n dates_directed_degree = pd.DataFrame(avdegree, index=['2_gw_90_avdegree'], columns=columns)\r\n dates_2speaker_Ninety_Directed = dates_2speaker_Ninety_Directed.append(dates_directed_degree)\r\n \r\n \r\n \"\"\"Zentralitätsmaß (degree centrality)\"\"\"\r\n dc = sorted(nx.in_degree_centrality(G4).values(),reverse=True)\r\n dc2 = sorted(nx.out_degree_centrality(G4).values(), reverse=True)\r\n indc = (dc[:1])\r\n outdc = (dc2[:1])\r\n \r\n dates_directed_in_degree_centrality = pd.DataFrame(indc, index=['2_gw_90_indc'], columns=columns)\r\n dates_directed_out_degree_centrality = pd.DataFrame(outdc, index=['2_gw_90_outdc'], columns=columns)\r\n dates_2speaker_Ninety_Directed = dates_2speaker_Ninety_Directed.append(dates_directed_in_degree_centrality)\r\n dates_2speaker_Ninety_Directed = dates_2speaker_Ninety_Directed.append(dates_directed_out_degree_centrality)\r\n \r\n \r\n \"\"\"Dichte\"\"\"\r\n den = nx.density(G4)\r\n dates90_density = pd.DataFrame(den, index=['2_gw_90_density'], columns=columns)\r\n dates_2speaker_Ninety_Directed = dates_2speaker_Ninety_Directed.append(dates90_density)\r\n \r\n \"\"\"Show the dataframe\"\"\"\r\n print (dates_2speaker_Ninety_Directed)\r\n \r\n \"\"\"nx Graph\"\"\"\r\n #nx.draw(G4, with_labels=True, font_weight='bold', alpha=0.5)\r\n #plt.savefig('./output/grafiken/'+file_name+'_simple_path1.pdf')\r\n #plt.show() # display\r\n \r\n \r\n \"\"\"Speichern/Save the Files\"\"\"\r\n with open('./output/'+file_name+\"Daten\"+\".csv\", \"w\") as resultsFile_II:\r\n dates_2speaker_noNinety_noDirected.to_csv(resultsFile_II)\r\n dates_2speaker_Ninety_noDirected.to_csv(resultsFile_II)\r\n dates_2speaker_noNinety_Directed.to_csv(resultsFile_II)\r\n dates_2speaker_Ninety_Directed.to_csv(resultsFile_II)",
"def search(self, query):",
"def get_search_results(self):\n sleep(10)\n try:\n addresses = self.driver.find_elements_by_class_name('details-title')\n for p in range(len(addresses)):\n address.append(addresses[p].text)\n prices = self.driver.find_elements_by_class_name('price-info')\n for p in range(len(prices)):\n price.append(prices[p].text)\n links = self.driver.find_element_by_tag_name('a.details-titleLink jsCardLinkGA')\n for p in range(len(links)):\n link.append(links[p].text)\n except NoSuchElementException:\n sleep(3)\n self.pop_up()",
"async def async_search(\n cls, source: Optional[AddressTupleVXType] = None, timeout: int = SSDP_MX\n ) -> Set[CaseInsensitiveDict]:\n responses = set()\n\n async def on_response(data: CaseInsensitiveDict) -> None:\n if \"st\" in data and data[\"st\"] in cls.DEVICE_TYPES:\n responses.add(data)\n\n await async_search(async_callback=on_response, source=source, timeout=timeout)\n\n return responses",
"def scan(ip_addr):\n log = logging.getLogger(__name__)\n source_dir = GLOBAL_MUTABLE_CONFIG['--working-dir']\n log.debug('Scanning for local and remote songs in %s', source_dir)\n\n # First get timezone.\n try:\n tzinfo = get_card_time_zone(ip_addr)\n except FlashAirNetworkError:\n raise # To be handled in caller.\n except FlashAirError:\n log.exception('Unexpected exception.')\n return list(), set(), None\n\n # Get songs to upload and items to delete.\n songs, valid_targets, files, empty_dirs = get_songs(source_dir, ip_addr, tzinfo)\n delete_paths = files_dirs_to_delete(valid_targets, files, empty_dirs)\n\n return songs, delete_paths, tzinfo"
] | [
"0.54896796",
"0.5231175",
"0.51695204",
"0.5163761",
"0.51572496",
"0.5063222",
"0.5016381",
"0.49998677",
"0.4977547",
"0.49723232",
"0.4970052",
"0.49620566",
"0.49414483",
"0.49369073",
"0.49265638",
"0.49259934",
"0.49234992",
"0.49125823",
"0.49091136",
"0.49083737",
"0.49021307",
"0.49002802",
"0.488957",
"0.4889231",
"0.48858783",
"0.48774624",
"0.4867728",
"0.48589104",
"0.48534626",
"0.48277715"
] | 0.5989913 | 0 |
Calculates 2D hausdorff distance based on predicted and target segmentation mask works for 2D array only! Supports multiclass (semantic segmentation) | def hausdorff_2d_distance(pred: np.ndarray, target: np.ndarray) -> Dict:
assert len(pred.shape) == 2 or len(target.shape) == 2
labels = np.unique(target)
labels = labels[labels != 0]
scores = {}
for label in labels:
mask_pred = pred == label
mask_gt = target == label
label = str(int(label))
mask_pred = mask_pred.astype(int)
mask_gt = mask_gt.astype(int)
gt_empty = np.sum(mask_gt) == 0
pred_empty = np.sum(mask_pred) == 0
# hausdorff not defined if both are empty ( 0/0 situation)
if gt_empty and pred_empty:
scores[label] = 1.0
else:
hausdorff1 = directed_hausdorff(mask_pred, mask_gt)[0]
hausdorff2 = directed_hausdorff(mask_gt, mask_pred)[0]
hausdorff = max(hausdorff1, hausdorff2)
scores[label] = hausdorff
return scores | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def feature_dist(input_labels, struct=np.ones((3, 3))):\n # remove the pixels inside the coherent\n input_errosion = binary_erosion(input_labels, structure=struct)\n input_labels[input_errosion] = 0\n I, J = np.nonzero(input_labels)\n labels = input_labels[I, J]\n coords = np.column_stack((I, J))\n sorter = np.argsort(labels)\n labels = labels[sorter]\n coords = coords[sorter]\n I = I[sorter]\n J = J[sorter]\n sq_dists = cdist(coords, coords, 'sqeuclidean')\n start_idx = np.flatnonzero(np.r_[1, np.diff(labels)])\n nonzero_vs_feat = np.minimum.reduceat(sq_dists, start_idx, axis=1)\n feat_vs_feat = np.minimum.reduceat(nonzero_vs_feat, start_idx, axis=0)\n # calculate the distance between every two coherent areas\n # distance factor for one pixel to meter: 100\n distance_matrix = 100 * np.sqrt(feat_vs_feat)\n nRow, nCol = sq_dists.shape[0], start_idx.shape[0]\n # add the index of the final element to the slice array\n slice_indices = np.concatenate((start_idx, np.array([nRow])))\n col_args = np.zeros((nRow, nCol)).astype(int)\n row_index = np.zeros((nCol, nCol)).astype(int)\n '''\n How the following commands work:\n find closest pixel from label A to label B:\n Label A --> Label B\n row_index[A-1, B-1] = alpha\n col_index[A-1, B-1] = col_args[alpha, B-1] = beta\n index of pixel from Label A: input_labels[ I[alpha], J[alpha]]\n index of pixel from Label B: input_labels[ I[beta], J[beta]]\n '''\n for i in range(nCol):\n col_args[:, i] = start_idx[i] + \\\n np.argmin(sq_dists[:, slice_indices[i]:\n slice_indices[i+1]], axis=1)\n elements = sq_dists[np.arange(nRow).reshape((nRow, 1)), col_args]\n for i in range(nCol):\n row_index[i, :] = start_idx[i] + \\\n np.argmin(elements[slice_indices[i]:\n slice_indices[i+1], :], axis=0)\n col_index = col_args[row_index, np.arange(nCol).reshape((1, nCol))]\n # Change col_index and row_index to input array index.\n row_index_from_label = I[row_index]\n col_index_from_label = J[row_index]\n row_index_to_label = I[col_index]\n col_index_to_label = J[col_index]\n return distance_matrix, row_index_from_label, col_index_from_label, \\\n row_index_to_label, col_index_to_label",
"def _mahalanobis_classifier(self, X_test: np.array, y_test: np.array):\r\n\r\n dist = np.empty([X_test.shape[0], y_test.shape[0]])\r\n for index, target in enumerate(self.targets):\r\n dist[:, index] = np.array([mahalanobis_distance(sample, self.features[np.where(self.labels == target)])\r\n for sample in X_test])\r\n return dist",
"def hausdorff_distance(image1, image2):\n image1_int = image1.clone(\"unsigned int\")\n image2_int = image2.clone(\"unsigned int\")\n\n libfn = utils.get_lib_fn(\"hausdorffDistance%iD\" % image1_int.dimension)\n d = libfn(image1_int.pointer, image2_int.pointer)\n\n return d",
"def _euclidian_classifier(self, X_test: np.array, y_test: np.array):\r\n dist = np.empty([X_test.shape[0], y_test.shape[0]])\r\n for index, target in enumerate(self.targets):\r\n dist[:, index] = np.array([euclidian_distance(sample, self.features[np.where(self.labels == target)])\r\n for sample in X_test])\r\n return dist",
"def one_hot2dist(seg):\n\n C = seg.shape[0]\n res = np.zeros_like(seg)\n for c in range(1, C): # background is excluded (C=0)\n posmask = seg[c].astype(np.bool)\n if posmask.any():\n negmask = ~posmask\n res[c] = distance(negmask) * negmask - (distance(posmask) - 1) * posmask\n\n return res",
"def calculate(self):\n\n distance_filter = sitk.HausdorffDistanceImageFilter()\n distance_filter.Execute(self.ground_truth, self.segmentation)\n return distance_filter.GetHausdorffDistance()",
"def classify(self, data):\n\n \"*** YOUR CODE HERE ***\"\n # should compute (validationData[i] - trainingData[j])^2\n result = np.zeros(data.shape[0])\n for i in range(data.shape[0]):\n distances = np.linalg.norm(self.trainingData - data[i], axis=1)\n nearest = np.argsort(distances)[:self.num_neighbors]\n nearest_tags = [self.trainingLabels[j] for j in nearest]\n result[i] = max(nearest_tags, key=lambda x: nearest_tags.count(x))\n return result",
"def distance_metric(seg_A, seg_B, dx, k):\n\n # Extract the label k from the segmentation maps to generate binary maps\n seg_A = (seg_A == k)\n seg_B = (seg_B == k)\n\n table_md = []\n table_hd = []\n X, Y, Z = seg_A.shape\n for z in range(Z):\n # Binary mask at this slice\n slice_A = seg_A[:, :, z].astype(np.uint8)\n slice_B = seg_B[:, :, z].astype(np.uint8)\n\n # The distance is defined only when both contours exist on this slice\n if np.sum(slice_A) > 0 and np.sum(slice_B) > 0:\n # Find contours and retrieve all the points\n contours, hierarchy = cv2.findContours(cv2.inRange(slice_A, 1, 1),\n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_NONE)\n pts_A = contours[0]\n for i in range(1, len(contours)):\n pts_A = np.vstack((pts_A, contours[i]))\n\n contours, hierarchy = cv2.findContours(cv2.inRange(slice_B, 1, 1),\n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_NONE)\n pts_B = contours[0]\n for i in range(1, len(contours)):\n pts_B = np.vstack((pts_B, contours[i]))\n\n # Distance matrix between point sets\n M = np.zeros((len(pts_A), len(pts_B)))\n for i in range(len(pts_A)):\n for j in range(len(pts_B)):\n M[i, j] = np.linalg.norm(pts_A[i, 0] - pts_B[j, 0])\n\n # Mean distance and hausdorff distance\n md = 0.5 * (np.mean(np.min(M, axis=0)) + np.mean(np.min(M, axis=1))) * dx\n hd = np.max([np.max(np.min(M, axis=0)), np.max(np.min(M, axis=1))]) * dx\n table_md += [md]\n table_hd += [hd]\n\n # Return the mean distance and Hausdorff distance across 2D slices\n mean_md = np.mean(table_md) if table_md else None\n mean_hd = np.mean(table_hd) if table_hd else None\n return mean_md, mean_hd",
"def _calc_distances(preds, targets, mask, normalize):\n N, K, _ = preds.shape\n _mask = mask.copy()\n _mask[np.where((normalize == 0).sum(1))[0], :] = False\n distances = np.full((N, K), -1, dtype=np.float32)\n normalize[np.where(normalize <= 0)] = 1000000.0\n distances[_mask] = np.linalg.norm(((preds - targets) / normalize[:, None, :])[_mask], axis=-1)\n return distances.T",
"def classify(dataset,classifier,feat_mask=None):\r\n \r\n train = dataset.get_data('train',True)\r\n X_train = train['x']\r\n if feat_mask is not None:\r\n X_train = X_train[:,feat_mask]\r\n y_train = train['y']\r\n \r\n classifier.fit(X_train,y_train)\r\n \r\n test = dataset.get_data('test',True)\r\n X_test = test['x']\r\n if feat_mask is not None:\r\n X_test = X_test[:,feat_mask]\r\n y_test = test['y']\r\n \r\n pred = classifier.predict(X_test)\r\n \r\n acc = np.count_nonzero(pred==y_test) / len(y_test)\r\n return acc,y_test,pred",
"def hausdorff_distance(self, other):\n ...",
"def calculate(self):\n\n distance_filter = sitk.HausdorffDistanceImageFilter()\n distance_filter.Execute(self.ground_truth, self.segmentation)\n return distance_filter.GetAverageHausdorffDistance()",
"def accuracy(output, target, imgdata):\n profile = False\n # needs to be as gpu as possible!\n maxk = 1\n\n\n batch_size = target.size(0)\n if profile:\n torch.cuda.synchronize()\n start = time.time() \n #_, pred = output.topk(maxk, 1, True, False) # on gpu. slow AF\n _, pred = output.max( 1, keepdim=False) # on gpu\n if profile:\n torch.cuda.synchronize()\n print (\"time for topk: \"+str(time.time()-start)+\" secs\")\n\n if profile:\n start = time.time()\n #print \"pred \",pred.size(),\" iscuda=\",pred.is_cuda\n #print \"target \",target.size(), \"iscuda=\",target.is_cuda\n targetex = target.resize_( pred.size() ) # expanded view, should not include copy\n\n\n correct = pred.eq( targetex.type(dtypei)) #.to(torch.device(\"cuda\")) ) # on gpu\n #print \"correct \",correct.size(), \" iscuda=\",correct.is_cuda \n if profile:\n torch.cuda.synchronize()\n print (\"time to calc correction matrix: \"+str(time.time()-start)+\" secs\")\n\n # we want counts for elements wise\n\n num_per_class = {}\n corr_per_class = {}\n total_corr = 0\n total_pix = 0\n\n if profile:\n torch.cuda.synchronize() \n start = time.time()\n for c in range(output.size(1)):\n # loop over classes\n classmat = targetex.eq(int(c)).long() # elements where class is labeled\n #print \"classmat: \",classmat.size(),\" iscuda=\",classmat.is_cuda\n num_per_class[c] = classmat.long().sum()\n corr_per_class[c] = (correct.long()*classmat.type(dtypei)).long().sum() # mask by class matrix, then sum\n total_corr += corr_per_class[c].long()\n total_pix += num_per_class[c].long()\n print (\"total_pix: \" + str(total_pix))\n print (\"total_corr: \" + str(total_corr))\n\n if profile:\n torch.cuda.synchronize() \n print (\"time to reduce: \"+str(time.time()-start)+\" secs\")\n \n # make result vector\n res = []\n\n\n\n for c in range(output.size(1)):\n if num_per_class[c]>0:\n res.append( float(corr_per_class[c])/float(num_per_class[c])*100.0 )\n else:\n res.append( 0.0 )\n\n # totals\n if total_pix==0:\n res.append(0.0)\n print (\"Mysteriously in here - total_pix: \" +str(total_pix) )\n else:\n res.append( 100.0*float(total_corr)/float(total_pix) )\n\n\n if num_per_class[1]==0 and num_per_class[2]==0:\n res.append(0.0)\n print (\"Mysteriously in here: num-per-class\" +str(num_per_class[1]) +\", \" +str(num_per_class[2]) )\n else:\n res.append( 100.0*float(corr_per_class[1]+corr_per_class[2])/float(num_per_class[1]+num_per_class[2]) ) # track/shower acc\n\n return res",
"def LevDistMultilabels(y_true, y_pred):\n \n n = y_pred.shape[0]\n D = 0\n for i in range(n):\n D += LevenshteinDistance(y_pred[i,:], y_true[i,:])[-1, -1]\n return D/n",
"def estimate_label_proportion(source_loader,target_loader,feat_extract,cuda,n_clusters,cluster_param): \n feat_extract.eval()\n #n_clusters = 3\n from sklearn.cluster import AgglomerativeClustering\n \n \n X_s,y_s = extract_feature(source_loader,feat_extract,cuda) \n X_t,y_t = extract_feature(target_loader,feat_extract,cuda) \n \n \n \n cluster = AgglomerativeClustering(n_clusters=n_clusters,linkage=cluster_param)\n label_t = cluster.fit_predict(X_t)\n #print(np.unique(label_t))\n mean_mat_S, num_in_class_S = extract_prototypes(X_s,y_s,n_clusters)\n mean_mat_T, num_in_class_T = extract_prototypes(X_t,label_t,n_clusters)\n \n \"\"\"\n We assume that prototypes of classes have been transported in some in the feature\n space \n \"\"\"\n \n import ot\n M = ot.dist(mean_mat_S, mean_mat_T)\n M /= M.max()\n \n n_1 = n_clusters\n a = np.ones((n_1,)) / n_1\n b = np.ones((n_1,)) / n_1\n \n \n gamma = ot.emd(a,b,M)\n nb_sample_S = [ np.sum(y_s==i) for i in range(n_clusters) ]\n proportion_T = num_in_class_T/np.sum(num_in_class_T)\n assignement_source_to_target = gamma.argmax(axis=1)\n \n # proportions are arranged directly per class\n proportion_T = proportion_T[assignement_source_to_target]\n print(proportion_T,assignement_source_to_target)\n \n\n return proportion_T,nb_sample_S, assignement_source_to_target",
"def isFusion(event,buff):\n index,diff,label = event\n label = label[0]\n if diff>0:\n return False,[]\n img_before = np.copy(buff[:,:,index-1])\n img_after = np.copy(buff[:,:,index])\n mask_before = (img_before==label).astype(np.uint8)\n nb_elts_before = np.amax(img_before)\n kernel = np.ones((7,7),np.uint8)\n neighbouring_mask = cv2.dilate(mask_before,kernel,iterations=8)\n\n new_map = np.multiply(img_before,neighbouring_mask.astype(np.uint8))\n \n #Removing the element we are currently looking at\n new_map[img_before==label]=0\n possible_candidates = []\n for i in range(nb_elts_before):\n if np.any(new_map==i+1):\n possible_candidates.append(i+1)\n #Computes the area of the cells and compares them\n size_cell_disappearing = np.count_nonzero(img_before==label)\n match = [] #lists the ratios sizeAfter/sizeBefore for possible matches\n \n for vals in possible_candidates:\n size_other_cell = np.count_nonzero(img_before==vals)\n size_before = size_other_cell+size_cell_disappearing\n size_after = np.count_nonzero(img_after==vals)\n ratio = float(size_after)/float(size_before)\n if ratio>0.8 and ratio<1.2:\n match.append((vals,abs(1-ratio)))\n if len(match)==0:\n return False,[]\n if len(match)>1:\n #Several matches, so pick the best\n values = [y for x,y in match]\n result_label,osef = match[np.argmin(values)]\n else:\n result_label, osef = match[0]\n return True,result_label",
"def hausdorffDistance(self, id1, id2):\r\n # productive #math\r\n if frequent: profprint()\r\n node1 = slicer.mrmlScene.GetNodeByID(id1)\r\n polydata1 = node1.GetPolyData()\r\n node2 = slicer.mrmlScene.GetNodeByID(id2)\r\n polydata2 = node2.GetPolyData()\r\n nb1 = polydata1.GetNumberOfPoints()\r\n nb2 = polydata2.GetNumberOfPoints()\r\n minimum = None\r\n maximum = None\r\n JJ, jj = None, None\r\n II, ii = None, None\r\n pt1 = [0, 0, 0]\r\n pt2 = [0, 0, 0]\r\n polydata1.GetPoint(1, pt1)\r\n polydata1.GetPoint(nb1 - 1, pt2)\r\n minVal1 = min(pt1[2], pt2[2])\r\n maxVal1 = max(pt1[2], pt2[2])\r\n pt1 = [0, 0, 0]\r\n pt2 = [0, 0, 0]\r\n pt1b, pt2b = None, None\r\n polydata2.GetPoint(1, pt1)\r\n polydata2.GetPoint(nb2 - 1, pt2)\r\n minVal2 = min(pt1[2], pt2[2])\r\n maxVal2 = max(pt1[2], pt2[2])\r\n valueBase = max(minVal1, minVal2)\r\n valueTip = min(maxVal1, maxVal2)\r\n\r\n # truncate polydatas\r\n truncatedPolydata1 = self.clipPolyData(node1, valueBase)\r\n truncatedPolydata2 = self.clipPolyData(node2, valueBase)\r\n\r\n cellId = vtk.mutable(1)\r\n subid = vtk.mutable(1)\r\n dist = vtk.mutable(1)\r\n cl2 = vtk.vtkCellLocator()\r\n cl2.SetDataSet(truncatedPolydata2)\r\n cl2.BuildLocator()\r\n # Hausforff 1 -> 2\r\n minima = []\r\n for i in range(int(nb1 / float(10))):\r\n pt = [0, 0, 0]\r\n polydata1.GetPoint(10 * i, pt)\r\n closest = [0, 0, 0]\r\n cl2.FindClosestPoint(pt, closest, cellId, subid, dist)\r\n if abs(closest[2] - pt[2]) <= 1:\r\n minima.append(self.distance(pt, closest))\r\n else:\r\n minima.append(0)\r\n hausdorff12 = max(minima)\r\n\r\n # Hausforff 2 -> 1\r\n minima = []\r\n cl1 = vtk.vtkCellLocator()\r\n cl1.SetDataSet(truncatedPolydata1)\r\n cl1.BuildLocator()\r\n for i in range(int(nb2 / float(10))):\r\n pt = [0, 0, 0]\r\n polydata2.GetPoint(10 * i, pt)\r\n closest = [0, 0, 0]\r\n cl1.FindClosestPoint(pt, closest, cellId, subid, dist)\r\n if abs(closest[2] - pt[2]) <= 1:\r\n minima.append(self.distance(pt, closest))\r\n else:\r\n minima.append(0)\r\n hausdorff21 = max(minima)\r\n return max(hausdorff12, hausdorff21)",
"def distance_metric_new(seg_A, seg_B, vox_size=1):\n import cc3d # pip install connected-components-3d --no-binary :all: (https://pypi.org/project/connected-components-3d/)\n print(\"Number of cpu : \", multiprocessing.cpu_count())\n\n # Number of classes\n n_class = int(np.max(seg_B.ravel()))\n\n # Dimensions\n X, Y, Z = seg_A.shape\n\n hd = np.zeros(n_class) # Hausdorff distance per class\n msd = np.zeros(n_class) # Mean surface distance per class\n for k in range(n_class):\n # Extract the label k from the segmentation maps to generate binary maps\n seg_A_tmp = copy.deepcopy(seg_A)\n seg_B_tmp = copy.deepcopy(seg_B)\n\n # Exclude the background (0)\n seg_A_tmp[seg_A != (k + 1)] = 0\n seg_B_tmp[seg_B != (k + 1)] = 0\n seg_A_tmp[seg_A_tmp != 0] = 1\n seg_B_tmp[seg_B_tmp != 0] = 1\n\n # Calculate the Hausdorff distance per each slice, only if both slices contain information\n tmp_hd = 0\n first_time_flag = 1\n\n # Get all contour voxels for the 3D objects\n print(\"Extracting contours, k = {}\".format(k))\n for z in range(Z):\n # Binary mask at this slice\n slice_A = seg_A_tmp[:, :, z].astype(np.uint8)\n slice_B = seg_B_tmp[:, :, z].astype(np.uint8)\n\n # Create a list of indices of non-zero pixels\n if np.sum(slice_A) > 0 and np.sum(slice_B) > 0:\n # Get the contours of the slices\n edge_img_A = find_edges_seg(slice_A)\n edge_img_B = find_edges_seg(slice_B)\n\n # The distance is defined only when both contours exist on this slice\n tmp1 = np.array(np.where(edge_img_A != 0))\n tmp1_1 = z * np.ones(tmp1.shape[1]) # Add the slice dimension\n tmp2 = np.array(np.where(edge_img_B != 0))\n tmp2_1 = z * np.ones(tmp2.shape[1]) # Add the slice dimension\n if first_time_flag == 1:\n qA = np.append(tmp1, tmp1_1.reshape(1, tmp1.shape[1]), axis=0).transpose() # List of XYZ coordinates\n qB = np.append(tmp2, tmp2_1.reshape(1, tmp2.shape[1]), axis=0).transpose() # List of XYZ coordinates\n first_time_flag = 0\n else:\n q_tmp = np.append(tmp1, tmp1_1.reshape(1, tmp1.shape[1]), axis=0).transpose()\n p_tmp = np.append(tmp2, tmp2_1.reshape(1, tmp2.shape[1]), axis=0).transpose()\n qA = np.append(qA, q_tmp, axis=0)\n qB = np.append(qB, p_tmp, axis=0)\n\n # Rescale points according to voxel size (for now voxel is assumed to be isotropic) [mm]\n qA = qA * vox_size\n qB = qB * vox_size\n\n # Mean surface distance\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n print(\"Calculating distance matrix\")\n # Distance matrix between point sets\n\n # Serial calculation\n # ####################################\n # M = np.zeros((len(qA), len(qB)))\n # for i in tqdm(range(len(qA))):\n # for j in range(len(qB)):\n # M[i, j] = np.linalg.norm(qA[i, :] - qB[j, :])\n # ####################################\n\n # Compute the mean surface distance in parallel\n M = mp_run(qA, qB)\n\n msd[k] = 0.5 * (np.mean(np.min(M, axis=0)) + np.mean(np.min(M, axis=1)))\n hd[k] = np.max([np.max(np.min(M, axis=0)), np.max(np.min(M, axis=1))])\n return hd, msd",
"def calculate_mAP(det_boxes, det_labels, det_scores, true_boxes, true_labels, true_difficulties, on_dev, data_folder):\n with open(os.path.join(data_folder,'label_map.json'), 'r') as j:\n label_map = json.load(j)\n \n rev_label_map = {v: k for k, v in label_map.items()} \n if on_dev:\n device = det_boxes[0].device # torch.device('cuda:1')\n \n # these are all lists of tensors of the same length, i.e. number of images\n assert len(det_boxes) == len(det_labels) == len(det_scores) == len(true_boxes) == len(true_labels) == len(true_difficulties) \n n_classes = len(label_map)\n \n # Store all (true) objects in a single continuous tensor while keeping track of the image it is from\n true_images = list()\n for i in range(len(true_labels)):\n true_images.extend([i] * true_labels[i].size(0))\n # ==============================================================================================================================\n if on_dev:\n true_images = torch.LongTensor(true_images).to(device)#-(n_objects), n_objects is the total no. of objects across all images\n else:\n true_images = torch.LongTensor(true_images).cuda()#-----(n_objects), n_objects is the total no. of objects across all images\n \n true_boxes = torch.cat(true_boxes, dim=0)#------------------(n_objects, 4)\n true_labels = torch.cat(true_labels, dim=0)#----------------(n_objects)\n true_difficulties = torch.cat(true_difficulties, dim=0)#----(n_objects)\n # ==============================================================================================================================\n assert true_images.size(0) == true_boxes.size(0) == true_labels.size(0)\n\n # Store all detections in a single continuous tensor while keeping track of the image it is from\n det_images = list()\n for i in range(len(det_labels)):\n det_images.extend([i] * det_labels[i].size(0))\n \n if on_dev:\n det_images = torch.LongTensor(det_images).to(device) # (n_detections)\n else:\n det_images = torch.LongTensor(det_images).cuda() # (n_detections)\n \n det_boxes = torch.cat(det_boxes, dim=0) # (n_detections, 4)\n det_labels = torch.cat(det_labels, dim=0) # (n_detections)\n det_scores = torch.cat(det_scores, dim=0) # (n_detections)\n\n assert det_images.size(0) == det_boxes.size(0) == det_labels.size(0) == det_scores.size(0)\n\n # Calculate APs for each class (except background)\n average_precisions = torch.zeros((n_classes - 1), dtype=torch.float) # (n_classes - 1)\n for c in range(1, n_classes):\n # Extract only objects with this class\n true_class_images = true_images[true_labels == c] # (n_class_objects)\n true_class_boxes = true_boxes[true_labels == c] # (n_class_objects, 4)\n true_class_difficulties = true_difficulties[true_labels == c] # (n_class_objects)\n n_easy_class_objects = (1 - true_class_difficulties).sum().item() # ignore difficult objects\n\n # Keep track of which true objects with this class have already been 'detected'\n # So far, none\n if on_dev:\n true_class_boxes_detected = torch.zeros((true_class_difficulties.size(0)), dtype=torch.uint8).to(device)#(n_class_objects)\n else:\n true_class_boxes_detected = torch.zeros((true_class_difficulties.size(0)), dtype=torch.uint8).cuda() # (n_class_objects)\n\n # Extract only detections with this class\n det_class_images = det_images[det_labels == c] # (n_class_detections)\n det_class_boxes = det_boxes[det_labels == c] # (n_class_detections, 4)\n det_class_scores = det_scores[det_labels == c] # (n_class_detections)\n n_class_detections = det_class_boxes.size(0)\n if n_class_detections == 0:\n continue\n\n # Sort detections in decreasing order of confidence/scores\n det_class_scores, sort_ind = torch.sort(det_class_scores, dim=0, descending=True) # (n_class_detections)\n det_class_images = det_class_images[sort_ind] # (n_class_detections)\n det_class_boxes = det_class_boxes[sort_ind] # (n_class_detections, 4)\n\n # In the order of decreasing scores, check if true or false positive\n if on_dev:\n true_positives = torch.zeros((n_class_detections), dtype=torch.float).to(device) # (n_class_detections)\n false_positives = torch.zeros((n_class_detections), dtype=torch.float).to(device) # (n_class_detections)\n else:\n true_positives = torch.zeros((n_class_detections), dtype=torch.float).cuda() # (n_class_detections)\n false_positives = torch.zeros((n_class_detections), dtype=torch.float).cuda() # (n_class_detections)\n \n for d in range(n_class_detections):\n this_detection_box = det_class_boxes[d].unsqueeze(0) # (1, 4)\n this_image = det_class_images[d] # (), scalar\n\n # Find objects in the same image with this class, their difficulties, and whether they have been detected before\n object_boxes = true_class_boxes[true_class_images == this_image] # (n_class_objects_in_img)\n object_difficulties = true_class_difficulties[true_class_images == this_image] # (n_class_objects_in_img)\n # If no such object in this image, then the detection is a false positive\n if object_boxes.size(0) == 0:\n false_positives[d] = 1\n continue\n\n # Find maximum overlap of this detection with objects in this image of this class\n overlaps = find_jaccard_overlap(this_detection_box, object_boxes) # (1, n_class_objects_in_img)\n max_overlap, ind = torch.max(overlaps.squeeze(0), dim=0) # (), () - scalars\n\n # 'ind' is the index of the object in these image-level tensors 'object_boxes', 'object_difficulties'\n # In the original class-level tensors 'true_class_boxes', etc., 'ind' corresponds to object with index...\n original_ind = torch.LongTensor(range(true_class_boxes.size(0)))[true_class_images == this_image][ind]\n # We need 'original_ind' to update 'true_class_boxes_detected'\n\n # If the maximum overlap is greater than the threshold of 0.5, it's a match\n if max_overlap.item() > 0.5:\n # If the object it matched with is 'difficult', ignore it\n if object_difficulties[ind] == 0:\n # If this object has already not been detected, it's a true positive\n if true_class_boxes_detected[original_ind] == 0:\n true_positives[d] = 1\n true_class_boxes_detected[original_ind] = 1 # this object has now been detected/accounted for\n # Otherwise, it's a false positive (since this object is already accounted for)\n else:\n false_positives[d] = 1\n # Otherwise, the detection occurs in a different location than the actual object, and is a false positive\n else:\n false_positives[d] = 1\n\n # Compute cumulative precision and recall at each detection in the order of decreasing scores\n cumul_true_positives = torch.cumsum(true_positives, dim=0) # (n_class_detections)\n cumul_false_positives = torch.cumsum(false_positives, dim=0) # (n_class_detections)\n cumul_precision = cumul_true_positives / (\n cumul_true_positives + cumul_false_positives + 1e-10) # (n_class_detections)\n cumul_recall = cumul_true_positives / n_easy_class_objects # (n_class_detections)\n\n # Find the mean of the maximum of the precisions corresponding to recalls above the threshold 't'\n recall_thresholds = torch.arange(start=0, end=1.1, step=.1).tolist() # (11)\n if on_dev:\n precisions = torch.zeros((len(recall_thresholds)), dtype=torch.float).to(device) # (11)\n else:\n precisions = torch.zeros((len(recall_thresholds)), dtype=torch.float).cuda()\n for i, t in enumerate(recall_thresholds):\n recalls_above_t = cumul_recall >= t\n if recalls_above_t.any():\n precisions[i] = cumul_precision[recalls_above_t].max()\n else:\n precisions[i] = 0.\n average_precisions[c - 1] = precisions.mean() # c is in [1, n_classes - 1]\n\n # Calculate Mean Average Precision (mAP)\n mean_average_precision = average_precisions.mean().item()\n\n # Keep class-wise average precisions in a dictionary\n average_precisions = {rev_label_map[c + 1]: v for c, v in enumerate(average_precisions.tolist())}\n\n return average_precisions, mean_average_precision",
"def test_multilabel_hamming_distance_differentiability(self, inputs):\n preds, target = inputs\n self.run_differentiability_test(\n preds=preds,\n target=target,\n metric_module=MultilabelHammingDistance,\n metric_functional=multilabel_hamming_distance,\n metric_args={\"num_labels\": NUM_CLASSES, \"threshold\": THRESHOLD},\n )",
"def test_multiclass_hamming_distance(self, ddp, inputs, ignore_index, multidim_average, average):\n preds, target = inputs\n if ignore_index == -1:\n target = inject_ignore_index(target, ignore_index)\n if multidim_average == \"samplewise\" and target.ndim < 3:\n pytest.skip(\"samplewise and non-multidim arrays are not valid\")\n if multidim_average == \"samplewise\" and ddp:\n pytest.skip(\"samplewise and ddp give different order than non ddp\")\n\n self.run_class_metric_test(\n ddp=ddp,\n preds=preds,\n target=target,\n metric_class=MulticlassHammingDistance,\n reference_metric=partial(\n _sklearn_hamming_distance_multiclass,\n ignore_index=ignore_index,\n multidim_average=multidim_average,\n average=average,\n ),\n metric_args={\n \"ignore_index\": ignore_index,\n \"multidim_average\": multidim_average,\n \"average\": average,\n \"num_classes\": NUM_CLASSES,\n },\n )",
"def directed_Hausdorff_hyperbox(b1,b2): \n return max(0,np.max(np.hstack((b1.u-b2.u,b2.l-b1.l))))",
"def get_threshold_binary_confusion_matrix(input_, target, device, pixel = None, reduction='sum'):\n if not input_.shape == target.shape:\n raise ValueError\n\n if not ((target.max() == 1.0 and target.min() == 0.0 and(target.unique().numel() == 2)) \n or (target.max() == 0.0 and target.min() == 0.0 and(target.unique().numel() == 1))):\n raise ValueError('{}, {}, {}'.format(target.max(),target.min(),target.unique().numel()))\n \n fusion_mat = torch.empty(0).to(device)\n for i in range(1,100):\n threshold = i/100\n input_threshed = input_.clone()\n input_threshed[input_ < threshold] = 0.0\n input_threshed[input_ >= threshold] = 1.0\n \n target_neg = -1.0 * (target - 1.0)\n input_threshed_neg = -1.0 * (input_threshed - 1.0)\n \n \n true_negative_mat = target_neg * input_threshed_neg\n false_negative_mat = target * input_threshed_neg\n \n \n if reduction == 'none':\n pass\n \n elif reduction == 'sum':\n \n true_negative = torch.sum(true_negative_mat)\n false_negative = torch.sum(false_negative_mat)\n \n if pixel == None:\n true_positive_mat = target * input_threshed\n false_positive_mat = target_neg * input_threshed \n true_positive = torch.sum(true_positive_mat)\n false_positive = torch.sum(false_positive_mat)\n else:\n kernel = torch.ones(1,1,2*pixel+1,2*pixel+1).to(device)\n target_dilation = F.conv2d(target, kernel, stride = 1, padding = pixel)\n target_dilation[target_dilation > 0] = 1\n\n true_positive = torch.sum(target_dilation * input_threshed)\n# if torch.sum(input_threshed).item()>true_positive.item():\n false_positive = torch.sum(input_threshed) - true_positive\n# else:\n# false_positive= torch.tensor(0.0).to(device)\n mat = torch.stack((true_positive,false_positive,true_negative,false_negative),0)\n mat = mat.expand(1,4)\n fusion_mat = torch.cat((fusion_mat,mat),0)\n\n return fusion_mat",
"def distance_metric(seg_A, seg_B, dx):\n table_md = []\n table_hd = []\n X, Y, Z = seg_A.shape\n for z in range(Z):\n # Binary mask at this slice\n slice_A = seg_A[:, :, z].astype(np.uint8)\n slice_B = seg_B[:, :, z].astype(np.uint8)\n\n # The distance is defined only when both contours exist on this slice\n if np.sum(slice_A) > 0 and np.sum(slice_B) > 0:\n # Find contours and retrieve all the points\n _, contours, _ = cv2.findContours(cv2.inRange(slice_A, 1, 1),\n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_NONE)\n pts_A = contours[0]\n for i in range(1, len(contours)):\n pts_A = np.vstack((pts_A, contours[i]))\n\n _, contours, _ = cv2.findContours(cv2.inRange(slice_B, 1, 1),\n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_NONE)\n pts_B = contours[0]\n for i in range(1, len(contours)):\n pts_B = np.vstack((pts_B, contours[i]))\n\n # Distance matrix between point sets\n M = np.zeros((len(pts_A), len(pts_B)))\n for i in range(len(pts_A)):\n for j in range(len(pts_B)):\n M[i, j] = np.linalg.norm(pts_A[i, 0] - pts_B[j, 0])\n\n # Mean distance and hausdorff distance\n md = 0.5 * (np.mean(np.min(M, axis=0)) + np.mean(np.min(M, axis=1))) * dx\n hd = np.max([np.max(np.min(M, axis=0)), np.max(np.min(M, axis=1))]) * dx\n table_md += [md]\n table_hd += [hd]\n\n # Return the mean distance and Hausdorff distance across 2D slices\n mean_md = np.mean(table_md) if table_md else None\n mean_hd = np.mean(table_hd) if table_hd else None\n return mean_md, mean_hd",
"def eval_det_cls(pred, gt, ovthresh=0.25, use_07_metric=False):\n\n # construct gt objects\n class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}}\n npos = 0\n for img_id in gt.keys():\n bbox = np.array(gt[img_id])\n det = [False] * len(bbox)\n npos += len(bbox)\n class_recs[img_id] = {'bbox': bbox, 'det': det}\n # pad empty list to all other imgids\n for img_id in pred.keys():\n if img_id not in gt:\n class_recs[img_id] = {'bbox': np.array([]), 'det': []}\n\n # construct dets\n image_ids = []\n confidence = []\n BB = []\n for img_id in pred.keys():\n for box,score in pred[img_id]:\n image_ids.append(img_id)\n confidence.append(score)\n BB.append(box)\n confidence = np.array(confidence)\n BB = np.array(BB) # (nd,4 or 8,3)\n\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n BB = BB[sorted_ind, ...]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down dets and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in range(nd):\n if d%100==0: \n print(d)\n R = class_recs[image_ids[d]]\n bb = BB[d,:].astype(float)\n ovmax = -np.inf\n BBGT = R['bbox'].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n for j in range(BBGT.shape[0]):\n iou = get_iou(bb, BBGT[j,...]) \n if iou > ovmax:\n ovmax = iou\n jmax = j\n\n #print d, ovmax\n if ovmax > ovthresh:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n print('NPOS: ', npos)\n print('ND:', nd)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = voc_ap(rec, prec, use_07_metric)\n\n return rec, prec, ap",
"def test_multilabel_hamming_distance(self, ddp, inputs, ignore_index, multidim_average, average):\n preds, target = inputs\n if ignore_index == -1:\n target = inject_ignore_index(target, ignore_index)\n if multidim_average == \"samplewise\" and preds.ndim < 4:\n pytest.skip(\"samplewise and non-multidim arrays are not valid\")\n if multidim_average == \"samplewise\" and ddp:\n pytest.skip(\"samplewise and ddp give different order than non ddp\")\n\n self.run_class_metric_test(\n ddp=ddp,\n preds=preds,\n target=target,\n metric_class=MultilabelHammingDistance,\n reference_metric=partial(\n _sklearn_hamming_distance_multilabel,\n ignore_index=ignore_index,\n multidim_average=multidim_average,\n average=average,\n ),\n metric_args={\n \"num_labels\": NUM_CLASSES,\n \"threshold\": THRESHOLD,\n \"ignore_index\": ignore_index,\n \"multidim_average\": multidim_average,\n \"average\": average,\n },\n )",
"def per_target_transform(y_pred, y_true):\r\n # - `y_pred` must be in the following shape (batch_size, num_categories, ...), float32 possibility\r\n # - `y_true` must be in the following shape (batch_size, ...) or (batch_size, num_categories, ...), int64.\r\n assert y_pred.ndim - y_true.ndim in [0, 1]\r\n assert y_pred.ndim > 2, 'only image can be transformed to per_target metric'\r\n\r\n device = y_pred.device\r\n num_classes = y_pred.shape[1]\r\n assert num_classes == 2, 'now only support binary classes'\r\n\r\n # reduce num_categories axis\r\n if y_pred.ndim == y_true.ndim:\r\n y_true = torch.argmax(y_true, 1)\r\n y_pred = torch.argmax(y_pred, 1)\r\n\r\n def _is_match(center_1, area_1, center_2, area_2):\r\n ndim = len(center_1)\r\n if sum([(center_1[i] - center_2[i]) ** 2 for i in range(ndim)]) ** 0.5 < (\r\n 0.62 * (area_1 ** (1 / ndim) + area_2 ** (1 / ndim))): # for 3d case using 0.62 factor\r\n return True\r\n return False\r\n\r\n per_target_preds = []\r\n per_target_trues = []\r\n # split batch\r\n for y_p, y_t in zip(y_pred, y_true):\r\n assert y_p.shape == y_t.shape\r\n # pred Morph Close\r\n y_p = torch.unsqueeze(torch.unsqueeze(y_p, 0), 0).type(torch.float32)\r\n kernel_size = 7\r\n padding = 3\r\n # Dilated\r\n y_p = torch.nn.MaxPool3d(kernel_size, stride=1, padding=padding)(y_p)\r\n # Eroded\r\n y_p = 1.0 - torch.nn.MaxPool3d(kernel_size, stride=1, padding=padding)(1.0 - y_p)\r\n y_p = torch.squeeze(torch.squeeze(y_p, 0), 0).type(torch.int64)\r\n\r\n y_p = y_p.detach().cpu().numpy()\r\n y_t = y_t.detach().cpu().numpy()\r\n region_area_threshold = 10\r\n y_p_label = measure.label(y_p)\r\n y_p_props = measure.regionprops(y_p_label)\r\n y_p_props = [item for item in y_p_props if item.area > region_area_threshold] # reduce small noise\r\n y_t_label = measure.label(y_t)\r\n y_t_props = measure.regionprops(y_t_label)\r\n y_t_props = [item for item in y_t_props if item.area > region_area_threshold] # reduce small noise\r\n\r\n t_matches = []\r\n target_pred = []\r\n target_true = []\r\n for i in range(len(y_p_props)):\r\n i_match = False\r\n for j in range(len(y_t_props)):\r\n if _is_match(y_p_props[i].centroid, y_p_props[i].area, y_t_props[j].centroid, y_t_props[j].area):\r\n i_match = True\r\n t_matches.append(j)\r\n if not i_match: # false positive\r\n target_pred.append(1)\r\n target_true.append(0)\r\n t_matches = set(t_matches)\r\n for _ in range(len(t_matches)): # true positive\r\n target_pred.append(1)\r\n target_true.append(1)\r\n for _ in range(len(y_t_props) - len(t_matches)): # false negative\r\n target_pred.append(0)\r\n target_true.append(1)\r\n\r\n per_target_preds.append(target_pred)\r\n per_target_trues.append(target_true)\r\n max_len = max([len(item) for item in per_target_preds])\r\n if max_len == 0:\r\n max_len = 1 # add one true negative if no targets\r\n for i in range(len(per_target_preds)):\r\n for _ in range(max_len - len(per_target_preds[i])): # pseudo true negative to unify batch len\r\n per_target_preds[i].append(0)\r\n per_target_trues[i].append(0)\r\n per_target_preds = torch.tensor(per_target_preds, dtype=torch.int64, device=device)\r\n per_target_trues = torch.tensor(per_target_trues, dtype=torch.int64, device=device)\r\n per_target_preds = one_hot(per_target_preds, 2, axis=1)\r\n per_target_trues = one_hot(per_target_trues, 2, axis=1)\r\n return per_target_preds, per_target_trues",
"def test_multiclass_hamming_distance_differentiability(self, inputs):\n preds, target = inputs\n self.run_differentiability_test(\n preds=preds,\n target=target,\n metric_module=MulticlassHammingDistance,\n metric_functional=multiclass_hamming_distance,\n metric_args={\"num_classes\": NUM_CLASSES},\n )",
"def count_accuracy(G_true, G):\n B_true = G_true != 0# nx.to_numpy_array(G_true) != 0\n B = G != 0# nx.to_numpy_array(G) != 0\n d = B.shape[0]\n # linear index of nonzeros\n pred = np.flatnonzero(B)\n cond = np.flatnonzero(B_true)\n cond_reversed = np.flatnonzero(B_true.T)\n cond_skeleton = np.concatenate([cond, cond_reversed])\n # true pos\n true_pos = np.intersect1d(pred, cond, assume_unique=True)\n\n # false pos\n false_pos = np.setdiff1d(pred, cond_skeleton, assume_unique=True)\n # reverse\n extra = np.setdiff1d(pred, cond, assume_unique=True)\n reverse = np.intersect1d(extra, cond_reversed, assume_unique=True)\n # compute ratio\n pred_size = len(pred)\n cond_neg_size = 0.5 * d * (d - 1) - len(cond)\n fdr = float(len(reverse) + len(false_pos)) / max(pred_size, 1)\n tpr = float(len(true_pos)) / max(len(cond), 1)\n fpr = float(len(reverse) + len(false_pos)) / max(cond_neg_size, 1)\n # structural hamming distance\n B_lower = np.tril(B + B.T)\n pred_lower = np.flatnonzero(B_lower)\n cond_lower = np.flatnonzero(np.tril(B_true + B_true.T))\n extra_lower = np.setdiff1d(pred_lower, cond_lower, assume_unique=True)\n missing_lower = np.setdiff1d(cond_lower, pred_lower, assume_unique=True)\n shd = len(extra_lower) + len(missing_lower) + len(reverse)\n return shd, tpr, fpr, fdr, pred_size",
"def hausdorffDistance(self,id1,id2):\n #productive #math\n profprint()\n node1 = slicer.mrmlScene.GetNodeByID(id1)\n polydata1=node1.GetPolyData()\n node2 = slicer.mrmlScene.GetNodeByID(id2)\n polydata2=node2.GetPolyData()\n nb1 = polydata1.GetNumberOfPoints()\n nb2 = polydata2.GetNumberOfPoints()\n minimum=None\n maximum=None\n JJ,jj=None,None\n II,ii=None,None\n pt1=[0,0,0]\n pt2=[0,0,0]\n polydata1.GetPoint(1,pt1)\n polydata1.GetPoint(nb1-1,pt2)\n minVal1=min(pt1[2],pt2[2])\n maxVal1=max(pt1[2],pt2[2])\n pt1=[0,0,0]\n pt2=[0,0,0]\n pt1b,pt2b=None,None\n polydata2.GetPoint(1,pt1)\n polydata2.GetPoint(nb2-1,pt2)\n minVal2 = min(pt1[2],pt2[2])\n maxVal2 = max(pt1[2],pt2[2])\n valueBase=max(minVal1,minVal2)\n valueTip=min(maxVal1,maxVal2)\n\n # truncate polydatas\n truncatedPolydata1 = self.clipPolyData(node1,valueBase)\n truncatedPolydata2 = self.clipPolyData(node2,valueBase)\n\n cellId=vtk.mutable(1)\n subid=vtk.mutable(1)\n dist=vtk.mutable(1)\n cl2=vtk.vtkCellLocator()\n cl2.SetDataSet(truncatedPolydata2)\n cl2.BuildLocator()\n # Hausforff 1 -> 2\n minima=[]\n for i in range(int(nb1/float(10))):\n pt=[0,0,0]\n polydata1.GetPoint(10*i,pt)\n closest=[0,0,0]\n cl2.FindClosestPoint(pt,closest,cellId,subid,dist)\n if abs(closest[2]-pt[2])<=1:\n minima.append(self.distance(pt,closest))\n else:\n minima.append(0)\n hausdorff12 = max(minima)\n \n # Hausforff 2 -> 1\n minima=[]\n cl1=vtk.vtkCellLocator()\n cl1.SetDataSet(truncatedPolydata1)\n cl1.BuildLocator()\n for i in range(int(nb2/float(10))):\n pt=[0,0,0]\n polydata2.GetPoint(10*i,pt)\n closest=[0,0,0]\n cl1.FindClosestPoint(pt,closest,cellId,subid,dist)\n if abs(closest[2]-pt[2])<=1:\n minima.append(self.distance(pt,closest))\n else:\n minima.append(0)\n hausdorff21 = max(minima)\n return max(hausdorff12,hausdorff21)"
] | [
"0.6469436",
"0.6366787",
"0.6106287",
"0.61034775",
"0.60753113",
"0.59953755",
"0.5954967",
"0.59157926",
"0.5876313",
"0.58337986",
"0.58266443",
"0.5809017",
"0.58086526",
"0.5808168",
"0.5779059",
"0.5766238",
"0.5747568",
"0.5744262",
"0.5732244",
"0.5729079",
"0.57066333",
"0.5689473",
"0.56854635",
"0.56811595",
"0.56724024",
"0.56633496",
"0.5636182",
"0.56249005",
"0.5624786",
"0.5608065"
] | 0.7233766 | 0 |
Saves the dict with the new numbers of the users to the json file | def _save_users_data_to_json(self):
with open(self._users_numbers_json_path, 'wb') as f:
json.dump(dict(self._users_numbers), f) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_json_file(self):\n with open(\"data/save.txt\", \"r+\") as file:\n dictionary = json.load(file)\n user = dictionary[\"Actual Username\"]\n dictionary[user].append(self.score)\n\n with open(\"data/save.txt\", \"w\") as file:\n json.dump(dictionary, file, indent=3, sort_keys=True)",
"def save(self):\n with open(DEFAULT_DATA_FILE, \"r\") as f:\n data = json.load(f)\n\n if self.user.is_valid_index():\n data['users'][self.user.data_index]['balance'] = self.user.balance\n if self.user.record:\n for i in self.user.record:\n data['users'][self.user.data_index]['record'].append(i)\n data['isLocked'] = self.locked\n with open(DEFAULT_DATA_FILE, \"w\") as f:\n json.dump(data, f, indent=4)",
"def backup_users_dict(users_dict: dict) -> None:\n with open('users.json', 'w') as usrs:\n json.dump(users_dict, usrs)",
"def save_users(user_list):\n with open(user_path, \"w\") as outfile:\n json.dump(user_list, outfile)",
"def SaveData(self):\n \n try:\n with open(self.users_file, 'r+') as outfile:\n json.dump(self.user_db, outfile, indent=4)\n outfile.truncate()\n except:\n messagebox.showerror('Error',\n f'{self.users_file} could not be accessed.' \\\n 'New user information won\\'t be saved')",
"def _add_data_to_user(self, user, number):\n self._users_numbers[user]['sum'] += number\n self._users_numbers[user]['counter'] += 1\n self._save_users_data_to_json()",
"def store_number():\n\tfilename = 'favorite_number.json'\n\twith open(filename, 'w') as f_obj:\n\t\tjson.dump(get_number(), f_obj)",
"def saveUser(self):\n self.user[\"Video\"] = \"\"\n with open(self.user_file, \"w+\") as json_file:\n json.dump(self.user, json_file, indent=4)",
"def my_click():\n name = user_name.get(1.0, END).strip()\n self.res = {}\n try:\n with open(\"data/save.txt\", \"r\") as file:\n self.res = json.load(file)\n self.res[\"Actual Username\"] = str(name)\n if not name.isalnum():\n raise ValueError\n if '' in self.res:\n del self.res['']\n\n if name not in self.res:\n self.res[str(name)] = [0]\n except FileNotFoundError:\n print('File not found !')\n except IOError:\n print('Error IO.')\n try:\n with open(\"data/save.txt\", \"w\") as file_write:\n json.dump(self.res, file_write)\n\n except FileNotFoundError:\n print('File not found !')\n except IOError:\n print('Error IO.')",
"def store_fn(fn):\r\n filename = 'user_fn.json'\r\n with open(filename, 'w') as f:\r\n json.dump(fn, f)\r\n print(f\"{fn} has been saved as your favorite number.\")",
"def save(self):\n settings_path = os.path.join(self.file_path, \"__file_data.json\")\n f = open( settings_path, 'w' )\n f.write( simplejson.dumps( self.fileList ) )\n f.close()\n\n settings_path = os.path.join(self.file_path, \"__user_data.json\")\n f = open( settings_path, 'w' )\n f.write( simplejson.dumps( self.userList ) )\n f.close()",
"def saveData(self):\n file_location = self.json_File_Location.replace(\".json\", \"_Update.json\")\n json_file = open(file_location, \"w+\")\n json_file.write(json.dumps(self.data, indent=4, separators=(', ', ' : ')))\n json_file.close()",
"def write_new():\n if User.new_users:\n print(\"Creando nuevos usuarios {}\".format(User.new_users))\n with open(User.get_users_path(), 'a', encoding='utf8') as file:\n for username, user in User.new_users.items():\n print(\" Usuario {}\".format(user))\n user.create_user_file()\n user.write(file=file)",
"def _generate_users(self):\n users = {}\n args = self._add_user()\n #Grab info from args\n users[args[\"userID\"]] = {}\n users[args[\"userID\"]][\"name\"] = args[\"name\"]\n users[args[\"userID\"]][\"webhook_url\"] = args[\"webhook_url\"]\n users[args[\"userID\"]][\"blacklist\"] = args[\"blacklist\"]\n #Try to grab override info, default to blank if doesn't exist\n users[args[\"userID\"]][\"override_user\"] = args.get(\"overrideUser\", \"\")\n users[args[\"userID\"]][\"override_userid\"] = args.get(\"overrideUserID\", \"\")\n users[args[\"userID\"]][\"override_oauth\"] = args.get(\"overrideOauth\", \"\")\n fileIO.save_json(\"users.json\", users)",
"def get_number():\n new_number = input(\"What's your favorite number? \")\n with open('favorite_number.json', 'w') as f:\n json.dump(new_number, f)\n print(\"Now I know your favorite number!\")",
"def export_to_json(users_id):\n todo_url = 'http://jsonplaceholder.typicode.com/todos'\n user_url = 'http://jsonplaceholder.typicode.com/users/{}'.format(\n users_id)\n user_employ = requests.get(user_url)\n to_do = requests.get(todo_url)\n\n \"\"\"get information\"\"\"\n employee_name = user_employ.json().get('username')\n n_task, comp_tasks = 0, []\n for task in to_do.json():\n if task.get('userId') == int(users_id):\n comp_tasks.append(task)\n\n \"\"\"open file of json and add data\"\"\"\n with open(\"{}.json\".format(users_id), mode=\"w\") as fd:\n \"\"\"Write in the file user_id.json\"\"\"\n\n write_file = {users_id: []}\n for obj in comp_tasks:\n new_obj = {\n 'task': obj.get('title'),\n 'completed': obj.get('completed'),\n 'username': employee_name\n }\n write_file[users_id].append(new_obj)\n json.dump(write_file, fd)",
"def save_to_users(self):\n Data.add_data(self.user_data())",
"def save_new_json():\n json.dump(\n seals_data,\n open(os.path.join(seals_root, 'seals.json'), 'w'),\n sort_keys=True,\n indent=4,\n )",
"def write_json_user(self, response):\n\n name = str(self.get_json_cursor) + \".json\"\n dst_path = os.path.join(self.get_json_folder_path, name)\n self.write_json(name, dst_path, response)",
"def add_numbers():\n global index_counter_customers\n customer = User(index_counter_customers)\n customer.save()\n index_counter_customers += 1\n\n random_number = utils.random_int()\n longitude = longitudes[random_number % 9000]\n latitude = latitudes[random_number % 9000]\n\n global index_counter_items\n global lines\n item = Item(index_counter_items, lines)\n item.save()\n index_counter_items += 1\n\n return jsonify(x=latitude, y=longitude, name = customer.user.name(), city = customer.user.city(),\n unit = item.unit, category = item.category)",
"def update_count(count):\n data = None\n\n with open(JSON_FILE) as json_file:\n data = json.load(json_file)\n\n if data is not None:\n data['count'] = count\n\n with open(JSON_FILE, 'w') as json_file:\n json.dump(data, json_file, sort_keys=True, indent=4)",
"def save(self) -> None:\n filename = \"users/\" + \"_\".join([self.name, self.lang, self.mode, self.time]) + '.json'\n\n state = {\n 'name': self.name,\n 'lang': self.lang,\n 'mode': self.mode,\n 'time': self.time,\n 'has_times': self.has_times,\n 'has_persons': self.has_persons,\n 'persons_translation': self.persons_translation,\n 'persons': self.persons,\n 'min_to_review': self.min_to_review,\n 'practice_list': self.practice_list,\n 'total_right': self.total_right,\n 'total_answers': self.total_answers\n }\n\n with open(filename, 'w') as file:\n json.dump(state, file, indent=2)",
"def get_new_username():\r\n username = input(\"What is your name? \")\r\n filename = 'username.json'\r\n with open(filename, 'w') as f_obj:\r\n json.dump(username, f_obj)\r\n return username",
"def save_users(users):\n with open(STORAGE_PATH, \"wb\") as fp:\n pickle.dump(users, fp)",
"def save(statistic_entries):\n with open('learn.json', 'w') as file:\n json.dump(statistic_entries, file, indent=2)",
"def get_new_username():\n username = input(\"What is your name? \")\n filename = 'character.json'\n with open(filename, 'w') as f_obj:\n json.dump(username, f_obj)\n return username",
"def register_new_user():\n\n username = input('Write down your Instagram username:\\n>> ')\n password = input('Write down your instagram password:\\n>> ')\n\n read_write_to_json(json_file, {'username': username, 'password': password}, 'w')",
"def update_heroes(self):\n _save_dict_to_file(self.get_heroes(), \"heroes.json\")",
"def save(self):\n d1 = {}\n with open(self.__file_path, mode=\"w\") as f:\n for k, v in self.__objects.items():\n d1[k] = v.to_dict()\n json.dump(d1, f)",
"def add(username):\n path = users_folder_file_path + username\n with open(path + '/preferences.txt', 'r+') as json_file:\n data = json.load(json_file)\n\n if data['runner_type'] == 1:\n data['training_level_increase'] = \\\n int(request.form['training_level_increase'])\n data['max_days_per_week'] = 99\n\n json_file.seek(0) # rewind\n json.dump(data, json_file)\n json_file.truncate()\n return redirect(url_for('.gohome', username=username))"
] | [
"0.7745593",
"0.7146998",
"0.7131358",
"0.70276266",
"0.69209224",
"0.6906528",
"0.6896065",
"0.67190963",
"0.6694532",
"0.65293515",
"0.64926326",
"0.6472562",
"0.6410419",
"0.64005834",
"0.6341129",
"0.6338325",
"0.63258356",
"0.6311408",
"0.62698877",
"0.6129035",
"0.61145324",
"0.6084567",
"0.60663384",
"0.600821",
"0.599266",
"0.59876317",
"0.5978575",
"0.5977828",
"0.5970148",
"0.59693235"
] | 0.83313555 | 0 |
Adds the new number of the user to the user's avg data | def _add_data_to_user(self, user, number):
self._users_numbers[user]['sum'] += number
self._users_numbers[user]['counter'] += 1
self._save_users_data_to_json() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_mean_user_rating(self):\n self.mean_user_rating = self.ratings.groupby(['user_id'])['rating'].mean().reset_index()",
"def _get_user_avg(self, user):\n return calculate_user_numbers_avg(self._users_numbers, user)",
"def add_to_average(total_count, total_value, new_value):\n return ((1.0 * total_count * total_value) + new_value) / (total_count + 1)",
"def updateUserRating(definition, increase):\n user = mongo.db.users.find_one({\"_id\": definition[\"submitted_by\"]})\n mongo.db.users.update_one(\n {\"_id\": user[\"_id\"]},\n {\"$inc\": {\"total_rating\": increase}})",
"def calcUserMeanRating(userRatingGroup):\n userID = userRatingGroup[0]\n ratingSum = 0.0\n ratingCnt = len(userRatingGroup[1])\n if ratingCnt == 0:\n return (userID, 0.0)\n for item in userRatingGroup[1]:\n ratingSum += item[1]\n return (userID, 1.0 * ratingSum / ratingCnt)",
"def update_rating_average(self, rating):\n self.num_ratings += 1\n self.rating_total += rating\n self.save(update_fields=[\"num_ratings\", \"rating_total\"])\n self.average_rating = int(round(self.rating_total/self.num_ratings))\n self.save(update_fields=[\"average_rating\"])\n return",
"def compute_average_user_ratings(user_ratings):\n ave_ratings = {}\n \n for user,value in user_ratings.items():\n sum = 0\n movie_num=0\n for movieId, rating in value.items():\n sum += float(rating)\n movie_num += 1\n average = sum / movie_num\n ave_ratings[user]=average\n return ave_ratings",
"def add_rating(self):\n ratings = Comment.query.filter(\n Comment.user_to_id == self.user_to_id).all()\n rate = 0\n tot = 0\n ave = 0\n for r in ratings:\n if r.rating:\n tot += 1\n rate += r.rating\n ave = rate / tot\n user = User.query.get_or_404(self.user_to_id)\n user.rating = ave\n db.session.add(user)\n db.session.commit()\n return user",
"def get_mean_for_user(df,genres, userID):\n #PROFIL UŻYTWKONIKA#\n\n\n mean_for_user = {}\n for genre in genres:\n mean_for_user[genre] = df[(df['userID'] == userID ) & (df['genre'] == genre)]['rating'].mean()\n change_nan(mean_for_user)\n return mean_for_user",
"def broadcastUserRatingAvg(sContext, uRRDDTrain):\n userRatingAvgList = uRRDDTrain.map(lambda x: calcUserMeanRating(x)).collect()\n userRatingAvgDict = {}\n for (user, avgscore) in userRatingAvgList:\n userRatingAvgDict[user] = avgscore\n uRatingAvgBC = sContext.broadcast(userRatingAvgDict)# broadcast\n return uRatingAvgBC",
"def averaging(self, value: int):\n self._averaging = value\n\n self.events.averaging()\n self._update_avg()\n\n self.refresh()",
"def user_mean(trainset, finalpredset):\n\n train = testset_to_sparse_matrix(trainset.build_testset())\n\n num_items, num_users = train.shape\n pred = np.zeros(train.shape)\n\n for user_index in range(num_users):\n # find the non-zero ratings for each user in the training dataset\n train_ratings = train[:, user_index]\n nonzeros_train_ratings = train_ratings[train_ratings.nonzero()]\n\n # calculate the mean if the number of elements is not 0\n if nonzeros_train_ratings.shape[0] != 0:\n user_train_mean = nonzeros_train_ratings.mean()\n pred[:, user_index] = user_train_mean\n\n finalpred_usr_idx, finalpred_movies_idx, _ = get_testset_indices(finalpredset)\n return pred[finalpred_usr_idx, finalpred_movies_idx]",
"def add_mean(mean):\n return sum(mean)/len(mean)",
"def score_up(self, increment_by):\n self.user_score += increment_by",
"def get_mean_user_rating(self, user_id):\n return self.mean_user_rating[self.mean_user_rating['user_id'] == user_id]['rating'].item()",
"def update(self, val, n=1):\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt",
"def add_rating(user, item, rating):\n users.append(user)\n items.append(item)\n ratings.append(rating)",
"def add_user_rating(self, user_id, movie_id, rating):\r\n new_row = {'user_id': int(user_id), 'item_id': int(movie_id), 'rating': rating}\r\n self.df_app_data = self.df_app_data.append(new_row, ignore_index=True)",
"def _set_total_guess(self, user, guess, db_session):\n db_user = db_session.query(db.User).filter(db.User.name == user).first()\n if not db_user:\n db_user = db.User(name=user)\n db_session.add(db_user)\n db_user.total_guess = guess",
"def get_mean(self):\n self.meanval = np.mean(self.adulist)",
"def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count",
"def get_global_mean(self, ratings):\n total_ratings = []\n for user, movie, rating in ratings:\n total_ratings.append(rating)\n return sum(total_ratings) / len(total_ratings)",
"def average(self, key, value):\n self._average_metrics[key] += value\n self._average_metrics_count[key] += 1",
"def update_average(self,result):\n a = 1/self.iters\n b = 1 - a\n self.average = a * result + b * self.average\n self.iters += 1",
"def avg_rate(self, instance, avg=0):\n\n try:\n # user_count = self.filter_by_model(\n # instance=instance).annotate(Count('user')).count()\n # avg = sum(x.rating for x in self.filter_by_model(\n # instance=instance)) / int(user_count)\n my_avg = self.filter_by_model(\n instance).aggregate(Avg('rating'))\n except ZeroDivisionError:\n logging.error(error_handling())\n\n # f = ''\n # if avg <= 1.0:\n # f = \"خیلی بد\"\n # if 1.0 <= avg < 3.0:\n # f = \"بد\"\n # if 3.0 <= avg < 4.0:\n # f = \"متوسط\"\n # if 4.0 <= avg < 5.0:\n # f = \"خوب\"\n # if avg >= 5.0:\n # f = \"خیلی خوب\"\n # if avg == 0:\n # f = 'نظری داده نشده'\n\n # return float(\"%.1f\" % round(my_avg, 2))\n if my_avg['rating__avg'] is None:\n return 0.0\n return my_avg['rating__avg']",
"def print_avg():",
"def update_mean_movie_rating(self):\n self.mean_movie_rating = self.ratings.groupby(['movie_id'])['rating'].mean().reset_index()",
"def modelmean(self, model_params, this_data, this_suff_stat):\n pass",
"def all_average(user_email):\r\n import statistics as st\r\n import json\r\n check_email = Check_For_User(user_email)\r\n if check_email.user_exists is False:\r\n return jsonify(str(user_email) + \" not found\"), 400\r\n raise LookupError(str(user_email) + \" was not found. Please re-enter\")\r\n heart_rate_list = get_all_rates(user_email)\r\n all_average = st.mean(heart_rate_list)\r\n return_dict = {\r\n \"user\": user_email,\r\n \"average\": all_average\r\n }\r\n return jsonify(return_dict), 200",
"def update_attendance_rate(self):\n session_avg_rate = self.session_set\\\n .filter(attendance_rate__isnull=False)\\\n .aggregate(Avg('attendance_rate'))\n self.attendance_rate = session_avg_rate['attendance_rate__avg']\n self.save()"
] | [
"0.7370114",
"0.72103995",
"0.6370001",
"0.6363194",
"0.63482326",
"0.6336928",
"0.6293731",
"0.6291411",
"0.61962384",
"0.61153215",
"0.60947543",
"0.60275847",
"0.59987086",
"0.5959771",
"0.5927861",
"0.5895361",
"0.5879943",
"0.5874055",
"0.58350474",
"0.57980376",
"0.57965964",
"0.5777411",
"0.5770953",
"0.5742117",
"0.5736624",
"0.5698155",
"0.5689029",
"0.5685291",
"0.56728953",
"0.56690574"
] | 0.741907 | 0 |
Calculates the avg of the user's numbers | def _get_user_avg(self, user):
return calculate_user_numbers_avg(self._users_numbers, user) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mean(numbers):\n return float(sum(numbers)) / float(len(numbers))",
"def calcUserMeanRating(userRatingGroup):\n userID = userRatingGroup[0]\n ratingSum = 0.0\n ratingCnt = len(userRatingGroup[1])\n if ratingCnt == 0:\n return (userID, 0.0)\n for item in userRatingGroup[1]:\n ratingSum += item[1]\n return (userID, 1.0 * ratingSum / ratingCnt)",
"def average(num):\n return sum(num) / len(num)",
"def average(nums):\n avg = sum(nums) / len(nums)\n return avg",
"def average(nums):\n\n return sum(nums) / float(len(nums))",
"def calc_average(numbers):\n return sum(numbers) // len(numbers) # integer division //",
"def mean(numbers):\n return int(sum(numbers)) / max(len(numbers), 1)",
"def avg(values):\n return sum(values) / float(len(values))",
"def compute_average_user_ratings(user_ratings):\n ave_ratings = {}\n \n for user,value in user_ratings.items():\n sum = 0\n movie_num=0\n for movieId, rating in value.items():\n sum += float(rating)\n movie_num += 1\n average = sum / movie_num\n ave_ratings[user]=average\n return ave_ratings",
"def average(num1, num2):\n\n return (num1 + num2) / 2",
"def avg(a,b):\r\n return (a+b)/2",
"def number_list_average(numbers):\n return sum(numbers)/len(numbers)",
"def mean(mean_numbers):\n return sum(mean_numbers) / float(len(mean_numbers))",
"def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average",
"def average(values):\n return sum(values) / len(values)",
"def average(values):\n return sum(values) / len(values)",
"def print_avg():",
"def avg(nbs):\n try:\n sum = 0\n for nb in nbs:\n sum += float(nb) \n except TypeError:\n print(\"Hmmm, I guess you haven't only entered valid numbers\")\n return\n print(\"And the average is.... : {0} \".format(sum/len(nbs)))",
"def average(self):\n return self.summation() / self.count()",
"def avg():\n\n # call sum method to add up the values in the collection & div by the num of items\n # call len method to compute the # of vals in collection which is divided by sum total \n mean = sum(inlist) / len(inlist)\n return mean \n\n # alternate method would be calling the reduce method with lamda \n # return reduce(lambda a, b: a + b, inlist) / len(inlist)",
"def average(numbers):\n numbers_sum = 0\n numbers_count = 0\n for number in numbers:\n numbers_sum += number\n numbers_count += 1\n\n return numbers_sum / numbers_count",
"def get_global_mean(self, ratings):\n total_ratings = []\n for user, movie, rating in ratings:\n total_ratings.append(rating)\n return sum(total_ratings) / len(total_ratings)",
"def avg(arr):\n return sum(arr) / float(len(arr))",
"def average(values):\n\treturn sum(values)/len(values)",
"def get_mean_score(rating_scores):\n return sum(rating_scores) / len(rating_scores)",
"def average(self, num_list):\n try:\n total = 0\n accumulator = 0\n\n for number in num_list:\n try:\n total += number\n accumulator += 1\n except Exception as e:\n print (\"Error: \", e)\n\n average = total / accumulator\n except Exception as e:\n print(\"Error: \", e)\n\n return average",
"def calcAverage(dat):\n return sum(dat)/len(dat)",
"def _mean(items):\n return sum(items) / len(items)",
"def get_average(value): # fine\r\n average_assignment = 0\r\n average_exam = 0\r\n student_count = 0\r\n if value == 'Assignment':\r\n for student in StudentRoster:\r\n student_count += 1\r\n average_assignment += int(student.assignment)\r\n if student_count == 0:\r\n print(0)\r\n else:\r\n calc = average_assignment/student_count\r\n print('{:.2f}'.format(calc))\r\n elif value == 'Exam':\r\n for student in StudentRoster:\r\n student_count += 1\r\n average_exam += int(student.exam)\r\n if student_count == 0:\r\n print(0)\r\n else:\r\n calc = average_exam/student_count\r\n print('{:.2f}'.format(calc))",
"def average(grade1, grade2, grade3):\n return (grade1 + grade2 + grade3) / 3"
] | [
"0.73072195",
"0.7117617",
"0.7076336",
"0.70757735",
"0.7068994",
"0.6980192",
"0.69689524",
"0.695918",
"0.69573164",
"0.6951897",
"0.6939342",
"0.68884647",
"0.6868578",
"0.67754674",
"0.67590624",
"0.67590624",
"0.6749915",
"0.6743124",
"0.673626",
"0.6716683",
"0.6716052",
"0.670352",
"0.66984224",
"0.66961837",
"0.6679344",
"0.6674252",
"0.6657213",
"0.6612981",
"0.66084164",
"0.65924853"
] | 0.81641376 | 0 |
Checks if the data of a message is a number or not | def _check_message_is_number(message):
try:
float(message)
return True
except ValueError:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_number(data):\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)",
"def is_number(self) -> bool:\n return False",
"def is_number(G):\n return True",
"def is_number(self,val):\n try:\n float(val)\n return True\n except ValueError:\n return False",
"def is_number(self, value):\n if isinstance(value, (int, float, long, complex)): # noqa\n return True\n return False",
"def _is_number(value):\n try:\n float(value)\n return True\n except (TypeError, ValueError):\n return False",
"def isNumber(number):\n try:\n # Try to cast the string\n int(number)\n # The cast was successful\n return True\n # The cast was unsuccessful, the string is not a number\n except ValueError as err:\n # Write the exception in logging\n logging.exception(str(err))\n return False",
"def is_number(value):\n\n return isinstance(value, (int, long, float))",
"def ISNUMBER(value):\n return isinstance(value, numbers.Number)",
"def is_number(value):\n try:\n int(value)\n return True\n except (ValueError, TypeError):\n return False",
"def could_be_number(val):\n if val == None:\n return False\n\n if isinstance(val, (float, int, long)):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n n = float(val)\n if not isinstance(n, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False",
"def is_number(x):\n if isinstance(x, (int, float)):\n return True\n else:\n return False",
"def validate_number(input_data):\n if input_data.startswith('-'):\n return input_data.i\n else:\n return False",
"def isnumber(number,message):\n print \"isnumber \", number\n try:\n num=int(number) \n except ValueError:\n fulmes=message+' '+number+'is not number'\n print fulmes\n MywError(fulmes)\n return None\n else:\n return num",
"def _is_number(self, symbol):\n if symbol.type == self.scanner.NUMBER:\n return True\n else:\n return False",
"def _is_number(s) -> bool:\n try:\n float(s)\n except ValueError:\n return False\n else:\n return True",
"def isnumber(x):\n try:\n float(x)\n return True\n except ValueError:\n return False",
"def is_numberish(G):\n return True",
"def isnumeric(number):\n try:\n float(number)\n return True\n except (TypeError, ValueError):\n return False",
"def is_number(s: Any) -> bool:\n try:\n int(s)\n return True\n except ValueError:\n pass\n\n try:\n float(s)\n return True\n except ValueError:\n pass\n\n return False",
"def is_number(n):\n return isinstance(n, (int, float))",
"def is_number(str):\n try:\n float(str)\n return True\n except ValueError as e:\n print(e)\n try:\n unicodedata.numeric(str)\n return True\n except (TypeError, ValueError) as e:\n print(e)\n return False",
"def is_number(s):\r\n try:\r\n int(s)\r\n return True\r\n except ValueError:\r\n return False",
"def is_number(s):\n try:\n int(s)\n return True\n except ValueError:\n return False",
"def is_number(s):\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False",
"def check_type(number):\r\n if number.find(\"i\") != -1:\r\n return 1\r\n return 0",
"def is_number(value):\n try:\n float(value)\n return True\n except ValueError:\n return False",
"def isnumeric(self):\n return isnumeric(self)",
"def isnum(value):\n\n try:\n return bool(isinstance(value, (float, int)))\n except RuntimeError:\n return False",
"def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False"
] | [
"0.7506115",
"0.70249045",
"0.698771",
"0.6913425",
"0.6894809",
"0.68555146",
"0.6834443",
"0.6791668",
"0.6768796",
"0.6759166",
"0.67426765",
"0.6738768",
"0.67286825",
"0.67249095",
"0.67126197",
"0.6678519",
"0.6674555",
"0.665162",
"0.66426355",
"0.6605666",
"0.65988195",
"0.65900093",
"0.65754396",
"0.6550616",
"0.6547988",
"0.65449554",
"0.6534035",
"0.6527239",
"0.65248567",
"0.6517821"
] | 0.8340901 | 0 |
Gets the user name from the user id by api call from the slack client | def _get_user_name_from_user_id_by_slack_client(self, user_id):
user_info = self.slack_client.api_call('users.info', user=user_id)
if not user_info['ok']:
return
user_name = user_info['user']['name']
self._user_id_to_user_name[user_id] = user_name
return user_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_username_from_api(self):\n result = self.api_query(action=\"query\", meta=\"userinfo\")\n return result[\"query\"][\"userinfo\"][\"name\"]",
"def _get_user_name_from_user_id(self, user_id):\n if user_id in self._user_id_to_user_name.keys():\n return self._user_id_to_user_name[user_id]\n return self._get_user_name_from_user_id_by_slack_client(user_id)",
"def get_username(self, tg_user_id):\n\n data = {\n 'user_id': tg_user_id\n }\n result = self._send_data('getUser', data)\n if result.update:\n return result.update.get('username','')",
"def get_user(user_id):\r\n data = slack_client.api_call(\"users.info\", user=user_id)\r\n if not data[\"ok\"]:\r\n return False\r\n response = {}\r\n response[\"username\"] = data[\"user\"][\"name\"]\r\n response[\"name\"] = data[\"user\"][\"profile\"][\"real_name_normalized\"]\r\n response[\"user_id\"] = data[\"user\"][\"id\"]\r\n return response",
"def get_username(self, auth_token, user_id=None):\n self.headers['Authorization'] = f'Bearer {auth_token}'\n user = self.http_client.get(\n f'{self.api_endpoint}/users/@me', self.headers)\n return f'{user[\"username\"]}#{user[\"discriminator\"]}'",
"def get_id_from_name(slack_client, name):\n api_call = slack_client.api_call(\"users.list\")\n if api_call.get('ok'):\n # retrieve all users so we can find our bot\n users = api_call.get('members')\n for user in users:\n if 'name' in user and user['name'] == name:\n return user.get('id')\n return None",
"def get_user_details(self, response):\n # Build the username with the team $username@$team_url\n # Necessary to get unique names for all of slack\n username = response.get('user')\n if self.setting('USERNAME_WITH_TEAM', True):\n match = re.search(r'//([^.]+)\\.slack\\.com', response['url'])\n username = '{0}@{1}'.format(username, match.group(1))\n\n out = {'username': username}\n if 'profile' in response:\n out.update({\n 'email': response['profile'].get('email'),\n 'fullname': response['profile'].get('real_name'),\n 'first_name': response['profile'].get('first_name'),\n 'last_name': response['profile'].get('last_name'),\n 'team_name': response.get('team_name')\n })\n return out",
"def get_slack_id(user):\n members = get_slack_users()\n user_name = [member for member in members if member.get('profile').get(\n 'email') == user['email']]\n return user_name[0].get('id') if user_name else ''",
"def get_user(id):\n pass",
"def get_user_name_by_id(self, user_id):\n try:\n res = self.db_handler.get_user_name_by_id(user_id)\n\n self.logger.write_to_log('user full name got', user_id)\n\n return res\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def _get_user_id(self, name):\n try:\n apiResponse = twitchAPI.twitchAPIGet(\"/users\", {\"login\": name}) #Try to get user id from API\n userID = apiResponse[\"data\"][0][\"id\"]\n except (KeyError, APIUnavailable):\n userID = input(\"Please enter the user id of the user: \")\n except IndexError: #If Twitch API does not return user id\n print(\"That user does not exist on Twitch.\")\n userID = False\n return(userID)",
"def slack_info(request):\n params = slack_callback(request)\n\n if not params:\n # Authorization failed.\n return redirect(\"codedoor:login\")\n\n # if user is already in database, return redirect(url)\n # else, if it's a new user, redirect to the finishprofile page for the user to input the rest of their info\n user = authenticate(params[\"user\"][\"email\"])\n if user is None:\n slack_name = params[\"user\"][\"name\"].split(\" \")\n if len(slack_name) == 2:\n first_name, last_name = slack_name\n else:\n first_name = slack_name[0]\n last_name = \"\"\n return render(\n request,\n 'codedoor/finish_profile.html',\n {\n \"id\": params['user']['email'],\n \"first_name\": first_name,\n \"last_name\": last_name,\n \"email\": params[\"user\"][\"email\"],\n \"pic\": params[\"user\"]['image_512']\n }\n )\n else:\n auth_login(request, user)\n return redirect(\"codedoor:viewprofile\", pk=user.profile.id)",
"def _get_user_id(self):\n auth_response = self._slack_client.api_call(\"auth.test\")\n\n if auth_response.get(\"ok\") is not None and auth_response[\"ok\"]:\n bot_id = auth_response[\"user_id\"]\n logger.info(\"Connected to slack with user id: {}\".format(bot_id))\n return bot_id\n else:\n raise PermissionError(auth_response[\"error\"])",
"def get_user_name(url, session=None):\n return get_whole_response_as_json(url, session)['name']",
"def get_user_id(self, details, response):\n return details['username']",
"def get_sound_cloud_user(handler):\n user_id = handler.get_argument('user_id')\n sound_cloud_client = Petitions.instantiate_user(user_id)\n current_user = sound_cloud_client.get('/me').username\n return current_user # Improve messages. Change to Json",
"def find_slack_user(self, slack_team_id, slack_user_id):\n\t\tif not isinstance(slack_team_id, str):\n\t\t\traise ValueError('slack_team_id must be a string, was %s' % (slack_team_id,))\n\t\tif not isinstance(slack_user_id, str):\n\t\t\traise ValueError('slack_team_id must be a string, was %s' % (slack_user_id,))\n\n\t\tcur = self.db.cursor()\n\t\ttry:\n\t\t\tsql = 'SELECT user_id FROM slack_user_001 WHERE slack_team_id = %s AND slack_user_id = %s'\n\t\t\tcur.execute(sql, (slack_team_id, slack_user_id))\n\t\t\tresults = cur.fetchall()\n\t\t\tif len(results) == 0:\n\t\t\t\treturn None\n\t\t\telif len(results) == 1:\n\t\t\t\treturn str(results[0][0])\n\t\t\telse:\n\t\t\t\traise WeirdStateError('Multiple users returned from slack_user which doesn\\'t make sense because that should have been a primary key')\n\t\tfinally:\n\t\t\tcur.close()",
"def username(user_id):\n return UserIndex.instance().name(user_id)",
"def get_username_by_id(self, id):\n return User.query.get(id).username",
"def get_user_id(self, details, response):\n return response.get(\"sub\")",
"def get_slack_token_owner():\n response = slack_client.api_call(\n \"auth.test\",\n )\n if not response.get(\"ok\", False):\n raise SlackError('Failed to get slack token owner {}'.format(response['error']))\n return response['user_id']",
"def get_user(api, user_name):\r\n # Checking input:\r\n if not isinstance(user_name, str) and not isinstance(user_name, int):\r\n raise ValueError(\"You can only get user by his/her id (int) or name (str).\")\r\n\r\n # Main part:\r\n try:\r\n user = api.get_user(user_name)\r\n return user\r\n except tweepy.error.TweepError:\r\n raise UserNotFoundError(\"No Twitter user with such name/id exists.\")",
"def get_identifier(self, request):\r\n username, api_key = self.extract_credentials(request)\r\n return username or 'nouser'",
"def get_username_for_api_key(self, api_key):\n raise NotImplementedError()",
"def user_profile(user_id):\n\n if not settings.SLACK_TOKEN:\n return {'ok': False, 'error': 'config_error'}\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.users_info(user=user_id)\n assert response['ok'] is True\n return response\n except SlackApiError as e:\n assert e.response['ok'] is False\n return e.response",
"def get_user_by_id(cur, id) -> str:\n cur.execute(f'''\n SELECT name FROM user WHERE id = {id} ''')\n return cur.fetchone()[0]",
"def get_user(id=None, name=None):\n found_id = get_user_id(id, name)\n if not found_id:\n return\n response = utils.checked_api_call(users_api, 'get_specific', id=found_id)\n if response:\n return response.content",
"def username(self) -> str:",
"def username(self) -> str:",
"def _get_username(user_id):\n username = select(u.username for u in UserInformationData if u.user_id == user_id).first()\n\n return username"
] | [
"0.748013",
"0.71902126",
"0.71136624",
"0.69366384",
"0.68196195",
"0.6777821",
"0.67232335",
"0.661219",
"0.6596149",
"0.6536261",
"0.65273553",
"0.63704205",
"0.6370292",
"0.6364513",
"0.6342001",
"0.6297765",
"0.6273787",
"0.62715864",
"0.6270835",
"0.62575704",
"0.6244287",
"0.62361425",
"0.62153745",
"0.61985195",
"0.618255",
"0.61807686",
"0.6178042",
"0.6174033",
"0.6174033",
"0.6149533"
] | 0.79836494 | 0 |
For each message in the chat, calculates the user's numbers avg and sends it to the chat | def process_message(self, data):
if not self._check_message_is_number(data['text']):
return
user_name = self._get_user_name_from_user_id(data['user'])
self._add_data_to_user(user_name, float(data['text']))
self.outputs.append([data['channel'], str(self._get_user_avg(user_name))]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def voice_message_tally(user_name, msg_logs, scorecard_map):\n for row in msg_logs:\n msg = ujson.loads(row[0])\n if is_my_outgoing_msg(msg):\n if msg['Type'] == RECORDING:\n # I sent a voice msg, that shows my interest, therefore bump my pondness value\n scorecard_map[user_name].my_pval += 1\n else: # this is an incoming message from my friend\n if msg['Type'] == RECORDING:\n # Someone sent me a voice msg, that shows their interest, therefore bump their pondness value\n scorecard_map[user_name].their_pval += 1",
"def average_length(_: Bot, update: Update):\n\n average = analytics.average_message_length(\n update.message.from_user.id, update.message.chat.id\n )\n response = f'{average:.3}'\n\n update.message.reply_text(response)",
"def aggregate_msgs(self, connected_msgs_list):\n msg_num = len(connected_msgs_list)\n agg_msg = connected_msgs_list[0]\n for i in range(1, msg_num):\n agg_msg += connected_msgs_list[i]\n\n if self.msg_aggrgt == 'AVG':\n return agg_msg / msg_num\n elif self.msg_aggrgt == 'SUM':\n return agg_msg",
"def average_num_reply():\n post_count = Post.select().count()\n reply_count = Comment.select().count()\n if post_count == 0:\n return 0\n else:\n average = round(reply_count / post_count, 2)\n return average",
"def averages():\r\n totalsubs = 0\r\n for sub in subs:\r\n totalsubs += sub\r\n avgsubs = totalsubs / len(subs)\r\n\r\n totalsent = 0\r\n for sent in sentiments:\r\n totalsent += sent\r\n avgsent = totalsent / len(sentiments)\r\n print('The average subjectivity is: ' + str(avgsubs))\r\n print('The average sentiment is: ' + str(avgsent))",
"async def on_message(self, message: Message) -> None:\n messages = [\n msg.created_at\n for msg in await message.channel.history(limit=MESSAGE_HISTORY_AMOUNT)\n if msg.author == message.author\n ]\n\n avg = sum(np.diff(messages)) / MESSAGE_HISTORY_AMOUNT\n\n if avg < MAX_AVERAGE_TIME_DIFFERENCE:\n if await self.bot.database.check_if_blocked(message.author.id):\n async with self.bot.pool.acquire() as conn:\n await conn.execute(\"INSERT INTO Blocked_Users(user_id) VALUES ($1)\", message.author.id)",
"def guesstotal(self, message, db_session):\n user = self.ts.get_user(message)\n if db_session.query(db.MiscValue).filter(db.MiscValue.mv_key == 'guess-total-enabled').one().mv_value == \"True\":\n msg_list = self.ts.get_human_readable_message(message).split(' ')\n if len(msg_list) > 1:\n guess = msg_list[1]\n if guess.isdigit() and int(guess) >= 0:\n self._set_total_guess(user, guess, db_session)\n self._add_to_whisper_queue(user, \"{} your guess has been recorded.\".format(user))\n else:\n self._add_to_whisper_queue(user, \"Sorry {}, that's not a non-negative integer.\".format(user))\n else:\n self._add_to_whisper_queue(user,\n \"Sorry {}, you need to include a number after your guess.\".format(user))\n else:\n self._add_to_whisper_queue(user,\n \"Sorry {}, guessing for the total number of deaths is disabled.\".format(user))",
"def computeFraction( poi_messages, all_messages ):\n\n\n ### you fill in this code, so that it returns either\n ### the fraction of all messages to this person that come from POIs\n ### or\n ### the fraction of all messages from this person that are sent to POIs\n ### the same code can be used to compute either quantity\n\n ### beware of \"NaN\" when there is no known email address (and so\n ### no filled email features), and integer division!\n ### in case of poi_messages or all_messages having \"NaN\" value, return 0.\n fraction = 0.\n \n if math.isnan(float(poi_messages)) or math.isnan(float(all_messages)):\n fraction = 0.\n else:\n fraction = (poi_messages * 1.0) / (all_messages * 1.0)\n\n\n\n return fraction",
"def ping_pong_tally(user_name, msg_logs, scorecard_map):\n for row in msg_logs:\n msg = ujson.loads(row[0])\n if is_my_outgoing_msg(msg):\n # I sent a msg, that shows my interest, therefore bump my pondness value\n scorecard_map[user_name].my_pval += 1\n else: # this is an incoming message from my friend\n # Someone sent me a msg, that shows their interest, therefore bump their pondness value\n scorecard_map[user_name].their_pval += 1",
"def get_mean_time_between_split_messages(messages):\n remote_frame_and_response_indices = __get_remote_frame_and_response_indices(messages)\n\n times_between_messages = []\n\n # Using the list of non-normal indices to find all neighbours of remote frames and remote frame responses.\n for index in remote_frame_and_response_indices:\n times_between_messages.append(messages[index + 1].timestamp - messages[index - 1].timestamp)\n\n return np.mean(times_between_messages) if len(times_between_messages) > 0 else 0",
"def average(data, event):\n if len(data) == 0:\n return 0\n\n score = 0\n # scores = []\n count = 0\n for i in data:\n count += 1\n if event == 'Swim' or event == 'Run':\n num = time_seconds(i[event])\n #print(\"first if\")\n #Sprint(num)\n else:\n num = int(i[event])\n #print(\"second if\")\n #print(num)\n #scores[count] =\n #print(\"end of loop count\" + str(count))\n score += num\n #print (\"score\" + str(score))\n\n # total = 0\n # for x in range(0,len(scores)):\n # total += scores[x]\n score = float(score)\n\n return score / count",
"def scores(bot, update):\n chat_id = update.message.chat_id\n bot.send_message(chat_id,\n chats[chat_id].displayScores(),\n parse_mode=ParseMode.MARKDOWN,\n isgroup=True)",
"def process_chatter(self, msg):\n # note, nothing in here is ROS specific, it's just python code that\n # runs when new info appears\n\n print msg.data # print the recieved message\n\n self.msgs_recieved += 1 # increase msg count\n self.msgs_recieved %= 500 # mod 500 so we don't get enormous numbers\n self.msg = \"%d messages recieved\" % self.msgs_recieved # set message",
"def avgLength(afz, output=True):\n count = 0\n length = 0\n for msg in msgs:\n if msg.afz == afz:\n count = count + 1\n length = length + len(msg.msg)\n avg = length/count\n if output:\n print afz, 'heeft', count, 'berichten verzonden met een gemiddelde lengte van', avg, 'tekens.'\n return avg\n else:\n return avg",
"def on_message(client, userdata, message):\n # TODO: Refactor me!! Reduce the number of globals we're using here by\n # adding them to the client var\n\n log = logging.getLogger('iob')\n \n try:\n global count\n global data\n global outsideCount\n global binOutside\n global lastMessage\n \n lastMessage = time.time()\n \n if count > 9:\n count = 1\n average = reduce(lambda x, y: x + y, data) / len(data)\n log.debug(\"average: \" + str(average))\n \n if average > 85:\n outsideCount = outsideCount + 1\n else:\n outsideCount = 0\n \n x = 0\n \n log.debug(outsideCount)\n \n packet = json.loads(message.payload)\n x = abs(packet[\"rssi\"])\n\n data[count] = x\n\n # TODO: Clean up logic, it appears to be backwards here\n if outsideCount > 10:\n if not binOutside:\n log.debug(\"Bin is outside!\")\n slackMessage(True)\n binOutside = True\n else: \n if binOutside:\n log.debug(\"Bin is inside\")\n slackMessage(False)\n binOutside = False\n \n count += 1\n \n except Exception as e:\n log.exception(e)\n import sys\n sys.exit(1)\n return",
"def event_details_score_mal_email(self, g, w):\n sen = self.sender_score(g, w)\n sub = self.subject_score(g, w)\n lin = self.link_score(g, w)\n att = self.attachments_score(g, w)\n\n try: scores = list(filter(lambda x: x is not None, [sen, sub, max(lin, att)]))\n \n except TypeError:\n scores=0\n return np.mean(scores)",
"def avg_time_to_reply():\n date_from_post = []\n for record in Post.select().join(Comment, on=(Post.id == Comment.post_id)):\n # record.date refers to the date of each post\n date_from_post.append(record.date)\n\n date_from_comment = []\n for record in Comment.select().join(Post, on=(Post.id == Comment.post_id)):\n # record.date refers to the date of each comment\n date_from_comment.append(record.date)\n\n hr = []\n for x, y in zip(date_from_comment, date_from_post):\n time1 = datetime.datetime.strptime(x, \"%Y-%m-%d %H:%M\")\n time2 = datetime.datetime.strptime(y, \"%Y-%m-%d %H:%M\")\n d = time1 - time2\n days_in_hours = d.days * 24\n sec_in_hour = d.seconds / 3600\n hr.append(days_in_hours + sec_in_hour)\n if len(hr) == 0:\n return 0\n else:\n avg_time = round(sum(hr)/(len(hr)), 2)\n return avg_time",
"def compute_fraction(poi_messages, all_messages):\n\n # you fill in this code, so that it returns either\n # the fraction of all messages to this person that come from POIs\n # or\n # the fraction of all messages from this person that are sent to POIs\n # the same code can be used to compute either quantity\n\n # beware of \"NaN\" when there is no known email address (and so\n # no filled email features), and integer division!\n # in case of poi_messages or all_messages having \"NaN\" value, return 0.\n fraction = 0.\n\n if poi_messages != 'NaN' and all_messages != 'NaN':\n fraction = poi_messages/all_messages\n\n return fraction",
"def calculate_member_perc(history: List[discord.Message]) -> dict:\n msg_data = {\"total_count\": 0, \"users\": {}}\n for msg in history:\n # Name formatting\n if len(msg.author.display_name) >= 20:\n short_name = \"{}...\".format(msg.author.display_name[:20]).replace(\"$\", \"\\\\$\")\n else:\n short_name = msg.author.display_name.replace(\"$\", \"\\\\$\").replace(\"_\", \"\\\\_ \").replace(\"*\", \"\\\\*\")\n whole_name = \"{}#{}\".format(short_name, msg.author.discriminator)\n if msg.author.bot:\n pass\n elif whole_name in msg_data[\"users\"]:\n msg_data[\"users\"][whole_name][\"msgcount\"] += 1\n msg_data[\"total_count\"] += 1\n else:\n msg_data[\"users\"][whole_name] = {}\n msg_data[\"users\"][whole_name][\"msgcount\"] = 1\n msg_data[\"total_count\"] += 1\n return msg_data",
"async def share(message):\n # get information about the user\n uid = message['from']['id']\n name = message['from']['first_name']\n\n if not re.fullmatch(r'/share [1-9]+[0-9]*', message['text']):\n # message fails to parse\n return 'Message does not match the required format. Check rules in /help.'\n\n # get amount of money from message\n money = int(message['text'].split(' ')[1])\n # number of users\n users = await queries.get_users()\n number_users = len(users)\n # amount on money per each user\n share_money = money / number_users\n\n # update table debts for all users except creditor\n for user in users:\n if user['uid'] != uid:\n # all users except creditor\n debtor_uid = user['uid']\n # update table debts\n await _change_debts_dictionary(debtor_uid, uid, share_money)\n message_share = name + ', ' + str(money) + ' was shared among all users of the bot.'\n return message_share",
"def streak_bonus_tally(user_name, msg_logs, scorecard_map):\n my_streak_factor = 0\n their_streak_factor = 0\n\n for row in msg_logs:\n msg = ujson.loads(row[0])\n if is_my_outgoing_msg(msg):\n # I sent a msg, that shows my interest, therefore bump my pondness value with applicable streak bonus\n scorecard_map[user_name].my_pval += 0.1 * my_streak_factor\n my_streak_factor += 1\n their_streak_factor = 0\n else: # this is an incoming message from my friend\n # Someone sent me a msg, that shows their interest,\n # therefore bump their pondness value with applicable streak bonus\n scorecard_map[user_name].their_pval += 0.1 * their_streak_factor\n their_streak_factor += 1\n my_streak_factor = 0",
"def lightening_reply_tally(user_name, msg_logs, scorecard_map):\n if not msg_logs:\n return\n\n prev_msg_ts = 0\n is_prev_msg_outgoing = is_my_outgoing_msg(ujson.loads(msg_logs[0][0]))\n\n for row in msg_logs:\n msg = ujson.loads(row[0])\n msg_ts = msg['CreateTime']\n time_delta = msg_ts - prev_msg_ts\n\n if is_my_outgoing_msg(msg):\n if not is_prev_msg_outgoing and time_delta <= ONE_MIN:\n # I replied quickly, bump my p value\n scorecard_map[user_name].my_pval += (60 - time_delta) / 120\n else:\n if is_prev_msg_outgoing and time_delta <= ONE_MIN:\n # Someone replied quickly, bump their p value\n scorecard_map[user_name].their_pval += (60 - time_delta) / 120\n\n prev_msg_ts = msg_ts\n is_prev_msg_outgoing = is_my_outgoing_msg(msg)",
"def computeFraction(poi_messages, all_messages):\n fraction = 0.\n if all_messages != \"NaN\":\n fraction = float(poi_messages)/float(all_messages)\n else:\n fraction = 0\n return fraction",
"def _get_user_avg(self, user):\n return calculate_user_numbers_avg(self._users_numbers, user)",
"def _find_average_score(self, sentenceValue):\n sumValues = 0\n for entry in sentenceValue: \n sumValues += sentenceValue[entry]\n \n try:\n average = (sumValues / len(sentenceValue))\n except:\n average = 0\n return average",
"def _broadcast_scores(self, participants, game_id, round_num):\n pscores = {}\n for p in participants:\n pscores[p.plus_id] = (\n {'score': p.score, 'game_score': p.game_score,\n 'hangout_score': p.hangout_score})\n message = simplejson.dumps(\n {'scores_info':\n {'participant_scores': pscores, 'game_id': game_id,\n 'round': round_num}})\n logging.info(\"scores message: %s\", message)\n for p in participants:\n logging.info(\"sending channel msg to participant %s\", p.plus_id)\n channel.send_message(p.channel_id, message)",
"def get_query_sentiment_avg(tweets):\r\n\r\n total = 0\r\n count = len(tweets)\r\n\r\n for tweet in tweets:\r\n total += tweet.sentiment_score\r\n\r\n # Calculate average\r\n avg = total / count\r\n avg = float(\"{0:.2f}\".format((float(avg))))\r\n\r\n return avg",
"def msgStats():\n r = {}\n r[\"users\"] = User.count()\n return jsonify(r)",
"def get_mean_time_between_normal_messages(messages):\n remote_frame_and_response_indices = __get_remote_frame_and_response_indices(messages)\n\n # The times between two normal messages.\n times_between_messages = []\n\n # Changing the list to a set to improve the time complexity of the \"in\" operation.\n remote_frame_and_response_indices = set(remote_frame_and_response_indices)\n\n # Using the list of non-normal indices to find the time between all neighbour pairs of normal messages.\n for i in range(len(messages) - 1):\n if i not in remote_frame_and_response_indices and i + 1 not in remote_frame_and_response_indices:\n times_between_messages.append(messages[i + 1].timestamp - messages[i].timestamp)\n\n return np.mean(times_between_messages)",
"def repeating_char_tally(user_name, msg_logs, scorecard_map):\n for row in msg_logs:\n msg = ujson.loads(row[0])\n content = msg['Text']\n\n # emojis are written with multiple characters but we want to treat them as one unit\n # this line replaces emojis with a special character for easy counting\n content = re.sub(r'\\[[a-zA-Z]+\\]', '@', content)\n\n max_char, cnt = get_max_repeating_char(content)\n if cnt < 3 or max_char in [' ', '.', '。', '-', '_', '+', '=', ',', '`', '*', '|', '\\\\']:\n continue\n\n if is_my_outgoing_msg(msg):\n scorecard_map[user_name].my_pval += 0.1 * (cnt - 2)\n else: # this is an incoming message from my friend\n scorecard_map[user_name].their_pval += 0.1 * (cnt - 2)"
] | [
"0.66360503",
"0.6507629",
"0.6444656",
"0.60974026",
"0.60704195",
"0.6050756",
"0.59103894",
"0.58927846",
"0.5799758",
"0.5769411",
"0.57484597",
"0.5740269",
"0.5736444",
"0.5718476",
"0.5707916",
"0.56380105",
"0.5597593",
"0.5594113",
"0.5568487",
"0.5565372",
"0.5494416",
"0.54614973",
"0.54540133",
"0.54146796",
"0.54019094",
"0.5382758",
"0.5374172",
"0.5367413",
"0.5363168",
"0.53519493"
] | 0.73034394 | 0 |
Add the product selected in the list of favorite of the user | def add_favorite(request):
print("La fonction pour ajouté un produit est appelé")
query = request.GET.get('_substitute_product','')
print(query)
# query_favorite = query.id
query_name = Product.objects.get(name=query)
print(query_name)
print("ID DU PRODUIT")
username = request.user
user_id = request.user.id
# user = User.objects.get(id=username)
print(username)
print("ID DE L'USER")
if query_name is not None:
try:
UserFavorite.objects.get(user_name=username, product=query_name)
print("Ce produit est déjà dans vos favoris.")
except ObjectDoesNotExist:
new_favorite = UserFavorite.objects.create(user_name=username,product=query_name)
new_favorite.save()
print("Le produit a bien été enregistré.")
else:
pass
return redirect('favorits')
# return render(request,'index.html') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def see_favorits(request):\n user_name = request.user\n print(user_name)\n # product = UserFavorite.objects.filter(user_name=user_name)\n list_favorits = UserFavorite.objects.all().filter(user_name=user_name)\n favorits_query = list_favorits\n favorits_list = []\n for favorite in favorits_query:\n favorits_list.append(Product.objects.get(pk=favorite.product.id))\n print(favorits_list)\n context = {\n # 'product' : product,\n 'user_name' : user_name,\n 'product' : favorits_list\n }\n\n\n return render(request,\"favorits.html\",context)",
"def get_favorite_by_product(self, product):\n products = self.db.query(f\"\"\"\n SELECT product.id, product.name from store\n JOIN product_store ON product_store.store_id = store.id\n JOIN product ON product_store.product_id = product.id\n WHERE store.id = :id\n \"\"\", id=product.id).all(as_dict=True)\n return [self.model(**product) for product in products]",
"def add_favorite(self, product_id: str, substitute_id: str) -> None:\n add_favorite_request = \"INSERT INTO substituted_product VALUES (%s, %s)\"\n self.insert(add_favorite_request, (substitute_id, product_id))",
"def save(self, substitute_choice, product_choice):\n self.db.query(f\"\"\"\n INSERT INTO {self.table} (substitut_id, original_id)\n VALUES (:substitut_id, :original_id)\n ON DUPLICATE KEY UPDATE substitut_id = :substitut_id\n \"\"\", substitut_id=substitute_choice.id, original_id=product_choice.id)\n favorite = (substitute_choice.id, product_choice.id)\n return favorite",
"def set_favorite(request):\n company_id = request.data.get('id')\n company = Company.objects.get(id=company_id)\n\n request.user.profile.companies.add(company)\n return Response({'favorite': True})",
"def get_favorites(self) -> Dict:\n return self.query(\"SELECT * FROM substituted_product\")",
"def add_favorite(self, pk: int) -> Response:\n try:\n TagDAO.favorite_tag_by_id_for_current_user(pk)\n return self.response(200, result=\"OK\")\n except TagNotFoundError:\n return self.response_404()\n except MissingUserContextException as ex:\n return self.response_422(message=str(ex))",
"def favorites(request):\n cur_user = request.user # Gets the current logged-in user\n fav_products = Favorite.objects.all() # Gets all \"Favorite\" model objects\n\n # Gets the favorites of the current user\n fav_prod_filtered = fav_products.filter(users_id=cur_user).order_by('-id')\n\n # Adds pagination for up to 6 products per page\n paginator = Paginator(fav_prod_filtered, 6)\n page = request.GET.get('page')\n\n try:\n products = paginator.page(page)\n except PageNotAnInteger:\n products = paginator.page(1)\n except EmptyPage:\n products = paginator.page(paginator.num_pages)\n\n context = {\n 'favorites': products,\n 'paginate': True,\n }\n return render(request, 'favorites/favorites.html', context)",
"def add(self, product):\n product_id = str(product.id)\n self.wishlist[product_id] = {'price': str(product.price)}\n self.save()",
"def affiche_favoris():\r\n # Liste des favoris utilisés pour la fonction \"select_favorite\"\r\n favorite_dict = {}\r\n # pour les produits dans Count\r\n cursor.execute('USE openfoodfacts;')\r\n cursor.execute(\"\"\"SELECT F1.name as Product, F2.name as Substitute \\\r\n FROM Backup \\\r\n INNER JOIN Food F1 ON Backup.produit_id = F1.id \r\n INNER JOIN Food F2 ON Backup.substitut_id = F2.id\"\"\")\r\n favorite = cursor.fetchall()\r\n index = 1\r\n for i in favorite:\r\n favorite_tuple = (i[0], i[1])\r\n print(\"\\n {}. {}, Peut être remplacé par {}.\".format(index, \\\r\n favorite_tuple[0], favorite_tuple[1]))\r\n favorite_dict[index] = favorite_tuple\r\n index += 1\r\n\r\n if not favorite_dict:\r\n print (\"La liste des favoris est vide.\")\r\n else:\r\n print('Choisissez un chiffre pour plus de détail.')\r\n select_favorite(favorite_dict)",
"def fetch_favourites(self):\n while True:\n self.cur.execute(\"SELECT DISTINCT product_id FROM Product_substitute\")\n response = self.cur.fetchall()\n\n for i, element in enumerate (response):\n print (\"Tapez {} pour voir les substituts de:\".format(i+1))\n self.display_product_from_id(element[0])\n \n choice_id = response[self.secure_input(1, len(response))-1]\n \n self.cur.execute(\"SELECT substitute_id FROM Product_substitute WHERE product_id = %s\", (choice_id[0], ))\n response = self.cur.fetchall()\n\n print(\"Voici les substituts trouves pour:\")\n self.display_product_from_id(choice_id[0])\n for element in response:\n self.display_product_from_id(element[0])\n\n print(\"Faire une autre recherche dans vos favoris? Oui = 1 non =0\")\n again = self.secure_input(0, 1)\n if again == 1:\n continue\n else:\n break",
"def toggle_favorite(self, user, article, is_favoriting):\n if user not in article.favorited_by.all() and is_favoriting:\n article.favorited_by.add(user)\n if user in article.favorited_by.all() and not is_favoriting:\n article.favorited_by.remove(user)\n article.favoritesCount = article.favorited_by.all().count()\n article.save()",
"def select_favorite(favoris_dict):\r\n choice = user_choix_input(len(favoris_dict))\r\n # Extract the specifitions of the product to display it\r\n product = extract_product(favoris_dict[choice][0])\r\n # Extract the specifitions of the substitute to display it\r\n substitute = extract_product(favoris_dict[choice][1])\r\n print_product(product)\r\n print('\\n Vous pouvez remplacer ceci par: \\n')\r\n print_product(substitute)",
"def myfood(request):\n my_fav = Favorite.objects.filter(user=request.user)\n my_fav_products = [fav.product_favorite for fav in my_fav]\n paginator = Paginator(my_fav_products, 6)\n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n context = {\n \"favorites\": page_obj\n }\n return render(request, \"myfood.html\", context)",
"def favourite(self, favourite):\n\n self._favourite = favourite",
"def favorite(self):\n url = \"https://api.imgur.com/3/image/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method='POST')",
"def get_all_favorite(self):\n products = self.db.query(f\"\"\"\n SELECT original.`name` as \"product_as_original\", substitute.`name`\n as \"product_as_substitut\", substitute.`url` as \"url\",\n substitute.`nutrition_grade`,\n GROUP_CONCAT(DISTINCT store.`name` SEPARATOR ', ')\n as stores FROM favorite as fav\n JOIN product as original ON original.id = fav.original_id\n JOIN product as substitute ON substitute.id = fav.substitut_id\n JOIN product_store ON product_store.product_id = substitute.id\n JOIN store ON store.id = product_store.store_id\n GROUP BY original.name, substitute.name, substitute.url,\n substitute.nutrition_grade\n \"\"\").all(as_dict=True)\n return [self.model(**product) for product in products]",
"def test_Favourite(self):\n self.assertEquals(self.fav_1.pk, 1)\n self.assertEquals(self.fav_1.date_added, '2019-12-20 09:00:00')\n self.assertEquals(self.fav_1.user.pk, 1)\n self.assertEquals(self.fav_1.product.pk, 1)",
"async def create(self, favorite: Favorite) -> Favorite:",
"def product_detail(request, pk):\n product = get_object_or_404(Product, pk=pk)\n if request.method == \"POST\":\n if not request.user.is_authenticated:\n return redirect('login')\n\n else:\n form = ReviewForm(request.POST)\n if form.is_valid():\n review = form.save(commit=False)\n review.user = request.user\n review.product = product\n review.save()\n messages.success(request, \"Thanks for your review, it has been sent for approval!\")\n\n return redirect(product_detail, product.pk)\n\n else:\n form = ReviewForm()\n review_count = product.reviews.filter(approved=True).count()\n sum = 0 \n avg = 0 \n if review_count > 0:\n for score in product.reviews.filter(approved=True).values(\"score\"):\n sum += score[\"score\"]\n avg = sum / review_count \n\n is_favourite = False\n if request.user.is_authenticated:\n user = request.user \n if Favourite.objects.filter(user=user, product=product).count() > 0:\n is_favourite = True\n return render(request, \"productdetail.html\", {'product': product,\n 'form': form,\n 'is_favourite': is_favourite,\n 'score': avg,\n 'review_count': review_count})",
"def favorite(self):\n url = \"https://api.imgur.com/3/album/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method=\"POST\")",
"def save_to_favorites_list():\n\n #get show id from the event handler/post request\n show_id = str(request.form.get(\"id\"))\n #get button content from the event handler/post request\n button_content = request.form.get(\"button_content\")\n\n button_content_encoded = button_content.encode('utf-8')\n\n #save utf-8 encoded checkmark as a string variable\n check_mark = \"\\xe2\\x9c\\x93\"\n\n #find the current logged in user\n email = session.get(\"current_user\")\n\n if email:\n\n #use email to find the user_id\n user_id = User.find_user_id_with_email(email)\n\n #if the show has not been favorited yet\n if check_mark not in button_content_encoded:\n #add row in favorites table\n favorite = Favorite.add_to_favorites(show_id, user_id)\n\n #pass back the show_id and that the show has been favorited\n payload = {\"show_id\":show_id,\"favorite\":\"True\"}\n return jsonify(payload)\n else:\n #delete row in favorites table\n Favorite.delete_favorite(show_id)\n\n #pass back the show_id and that the show has been unfavorited\n payload = {\"show_id\":show_id,\"favorite\":\"False\"}\n return jsonify(payload)\n else:\n flash(\"You need to be logged in to see that page.\")\n return redirect(\"/login\")",
"def products_list(request, product):\n product_found = get_object_or_404(Products, product=product)\n\n nut = product_found.nutrition_grade_fr\n\n query_set_product = (\n Products.objects.filter(category=product_found.category)\n .filter(\n Q(nutrition_grade_fr__lte=nut) \n ) # propose products with value less or equal at the search product\n .exclude(product=product_found.product)\n )\n\n if len(query_set_product) >= 6:\n random_six_products = random.sample(\n list(query_set_product), 6\n ) # select 6 products randomly\n \n else:\n query_set_product = Products.objects.filter(\n Q(nutrition_grade_fr__lte=nut) \n ).exclude(product=product_found.product)\n\n random_six_products = random.sample(\n list(query_set_product), 6\n ) # select 6 products randomly \n\n\n if \"submit\" in request.POST: # do something with interview_HTML button is clicked\n save_product = request.POST.get(\"submit\")\n save_product = Products.objects.get(product=save_product)\n if not request.user.is_authenticated:\n return redirect(\"%s?next=%s\" % (settings.LOGIN_URL, request.path))\n user = request.user\n\n user = CustomUser.objects.get(email=user)\n\n save = History(\n user=user,\n chosen_product=product_found,\n remplacement_product=save_product,\n )\n save.save()\n\n context = {\n \"proposed_product\": product_found,\n \"products\": random_six_products,\n }\n\n return render(request, \"products/products.html\", context)",
"def add_to_fav(request, q_id):\n if request.method == 'POST':\n Quotes.objects.add_to_user_fav(request.session['id'], q_id)\n return redirect('/quotes')",
"def favorite(user, wine):\n\n favorite = Favorite(user=user, wine=wine)\n\n db.session.add(favorite)\n db.session.commit()\n\n # return favorite",
"def get_favorite(self):\n raise NotImplementedError()",
"def add_favorite(self, id):\n path = self._get_path('alter_favorite').format(id=id)\n \n return self._clean_return(self._PUT(path))",
"def search_in_fav(request):\n query = request.GET.get('user_search')\n\n if query:\n # Returns the query in lower case and without accents\n query = unidecode(query).lower()\n result = True\n\n cur_user = request.user\n # Returns all favorites\n favorites = Favorite.objects.all()\n\n # Returns current user filtered favorites\n fav_filtered = favorites.filter(\n users_id=cur_user\n ).filter(products__name__icontains=query).order_by('id')\n\n if not fav_filtered.exists():\n result = False\n fav_filtered = favorites.filter(\n users_id=cur_user).order_by('id')\n\n # Init pagination with 6 products\n paginator = Paginator(fav_filtered, 6)\n page = request.GET.get('page')\n\n try:\n fav_filtered = paginator.page(page)\n except PageNotAnInteger:\n fav_filtered = paginator.page(1)\n except EmptyPage:\n fav_filtered = paginator.page(paginator.num_pages)\n\n if result:\n title = \"Résultats de la recherche : {}\".format(query)\n else:\n title = \"Aucun résultat pour la recherche : {}\".format(query)\n\n context = {\n 'is_result': result,\n 'fav_filtered': fav_filtered,\n 'title': title,\n 'paginate': True,\n }\n\n return render(request, 'favorites/search_in_fav.html', context)",
"def insert_in_favourite(self, food_id, substitute_id):\n\n ref = (food_id, substitute_id)\n print(\"\"\"\\n Souhaitez-vous ajouter cette recherche dans vos favoris ?\n 1. Oui\n 0. Non \"\"\")\n\n choice = int(input(\"Entrez votre choix: \\n\"))\n if choice == 1:\n self.cursor.execute(\"\"\"INSERT INTO favourite\n (food_id, substitute_id)\n VALUES (%s, %s)\"\"\", ref)\n else:\n return",
"def save_substitute(request, product_id, substitute_id):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user)\n product = Products.objects.get(pk=product_id)\n substitute = Products.objects.get(pk=substitute_id)\n favorite, created = User_Favorites_Substitutes.objects.update_or_create(prod_base=product,\n prod_substitute=substitute,\n user=user)\n if created:\n messages.success(request, 'Le produit \" {0} \" à été enregistré dans vos favoris !'.\n format(Products.objects.get(pk=substitute_id)))\n else:\n messages.warning(request, 'Le produit \" {0} \" existe déjà dans vos favoris !'.\n format(Products.objects.get(pk=substitute_id)))\n return redirect('store:substitutes', product_id)"
] | [
"0.7478756",
"0.6982559",
"0.6971553",
"0.65613437",
"0.65594006",
"0.6503341",
"0.6487205",
"0.646612",
"0.6430867",
"0.63798237",
"0.6359114",
"0.62904596",
"0.62575287",
"0.61742616",
"0.6173648",
"0.6143775",
"0.6127563",
"0.6113219",
"0.6091968",
"0.6089672",
"0.60802436",
"0.6068859",
"0.60648966",
"0.6054797",
"0.60358846",
"0.6028079",
"0.6014734",
"0.60059893",
"0.5986564",
"0.59859365"
] | 0.7733449 | 0 |
See the favorits of the User | def see_favorits(request):
user_name = request.user
print(user_name)
# product = UserFavorite.objects.filter(user_name=user_name)
list_favorits = UserFavorite.objects.all().filter(user_name=user_name)
favorits_query = list_favorits
favorits_list = []
for favorite in favorits_query:
favorits_list.append(Product.objects.get(pk=favorite.product.id))
print(favorits_list)
context = {
# 'product' : product,
'user_name' : user_name,
'product' : favorits_list
}
return render(request,"favorits.html",context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_favor(self):\n if \"all\" in self.switches:\n favors = Reputation.objects.exclude(favor=0).order_by(\"-date_gossip_set\")\n self.msg(\"Characters with favor: %s\" % \", \".join(str(ob) for ob in favors))\n return\n org = self.get_organization(check_perm=False)\n favors = org.reputations.filter(Q(favor__gt=0) | Q(favor__lt=0)).order_by(\n \"-favor\"\n )\n msg = \"{wThose Favored/Disfavored by %s{n\\n\" % org\n msg += \"\\n\\n\".join(\n \"{c%s{w (%s):{n %s\" % (ob.player, ob.favor, ob.npc_gossip) for ob in favors\n )\n self.msg(msg)",
"def get_favorite(self, obj):\n article_fav_users = obj.favorite.all()\n return self.fetch_usernames(article_fav_users)",
"def cmd_account_favorites(client, args):\n account_favorites = client.get_account_favorites(args.username)\n data = [item.__dict__ for item in account_favorites]\n generate_output({'account_favorites': data}, args.output_file)",
"def favorites(self):\n path = self._get_path('favorites')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return self._clean_return(response)",
"def favorites(request):\n cur_user = request.user # Gets the current logged-in user\n fav_products = Favorite.objects.all() # Gets all \"Favorite\" model objects\n\n # Gets the favorites of the current user\n fav_prod_filtered = fav_products.filter(users_id=cur_user).order_by('-id')\n\n # Adds pagination for up to 6 products per page\n paginator = Paginator(fav_prod_filtered, 6)\n page = request.GET.get('page')\n\n try:\n products = paginator.page(page)\n except PageNotAnInteger:\n products = paginator.page(1)\n except EmptyPage:\n products = paginator.page(paginator.num_pages)\n\n context = {\n 'favorites': products,\n 'paginate': True,\n }\n return render(request, 'favorites/favorites.html', context)",
"def get_favorites(request):\n companies = request.user.profile.companies.all()\n context = {'user_id': request.user.id}\n serializer = CompanySerializers(companies, context=context)\n return Response(serializer.data)",
"def favorites(self):\n if not self._user_favorites_loaded:\n self._user_favorites = self._getFavorites()\n self._user_favorites_loaded = True\n return deepcopy(self._user_favorites)",
"def show_fav_recipes():\n if not g.user:\n flash(\"Please login to view.\",\"warning\")\n return redirect('/login')\n \n data = search_recipes(request) \n favorite_list = [l.id for l in g.user.recipes]\n favorites = [f['id'] for f in data['results'] if f['id'] in favorite_list]\n \n\n return render_template(\"favs/show.html\", favorites=favorites)",
"def affiche_favoris():\r\n # Liste des favoris utilisés pour la fonction \"select_favorite\"\r\n favorite_dict = {}\r\n # pour les produits dans Count\r\n cursor.execute('USE openfoodfacts;')\r\n cursor.execute(\"\"\"SELECT F1.name as Product, F2.name as Substitute \\\r\n FROM Backup \\\r\n INNER JOIN Food F1 ON Backup.produit_id = F1.id \r\n INNER JOIN Food F2 ON Backup.substitut_id = F2.id\"\"\")\r\n favorite = cursor.fetchall()\r\n index = 1\r\n for i in favorite:\r\n favorite_tuple = (i[0], i[1])\r\n print(\"\\n {}. {}, Peut être remplacé par {}.\".format(index, \\\r\n favorite_tuple[0], favorite_tuple[1]))\r\n favorite_dict[index] = favorite_tuple\r\n index += 1\r\n\r\n if not favorite_dict:\r\n print (\"La liste des favoris est vide.\")\r\n else:\r\n print('Choisissez un chiffre pour plus de détail.')\r\n select_favorite(favorite_dict)",
"def favourite_screen(self):\n\n self.cursor.execute(\"\"\" SELECT *\n FROM favourite\n ORDER BY id \"\"\")\n rows = self.cursor.fetchall()\n print(\"Voici vos recherches sauvegardées: \\n\")\n for row in rows:\n ref = row[1], row[2]\n self.cursor.execute(\"\"\" SELECT name\n FROM food\n WHERE id = %s\n UNION\n SELECT name\n FROM food\n WHERE id = %s \"\"\", ref)\n food_names = self.cursor.fetchall()\n i = 0\n for element in food_names:\n if i == 0:\n print(\"Produit initial : \" + element[0].upper(), end=\"\")\n i += 1\n else:\n print(\" substitué par : \" + element[0].upper())\n print(\"----------------------------------------------------------\")",
"def get_favorites(self, user_id=None):\n if not user_id:\n user_id = self.user_id\n\n favorite_decks = self.data_source.get_favorites(user_id)\n\n return favorite_decks",
"def displayAlsoLike(self):\n\n result = self.client.get(\"/view_favorites\")\n self.assertIn(b\"11925205\", result.data)",
"def test_display_favorite(self):\n\n result = self.client.get(\"/view_favorites\")\n self.assertIn(b\"s1925148\", result.data)",
"def get_queryset(self):\n\n user = get_authentication(self.request)\n queryset = Favorites.objects.filter(user=user, is_used=True)\n\n return queryset",
"def get_favourites(self, username):\n self.cur.execute(\"SELECT video_ID FROM favourites WHERE username = \\\"{}\\\"\".format(username))\n favourites = []\n for ID in self.cur.fetchall():\n favourites.append(ID[0])\n return favourites",
"def show_faves():\n\n user_id = session.get(\"user_id\")\n\n if user_id:\n user = crud.get_user_by_id(user_id)\n joined_faves = crud.get_bus_by_user_id(user_id)\n\n return render_template('favorites.html', user=user, joined_faves=joined_faves)\n \n else:\n flash('Please sign in')\n return render_template('login.html')",
"def _getFavorites(self):\n url = self._genFavoritesUrlByUser(self._username)\n doc = html.document_fromstring(requests.get(url).text)\n out = dict()\n pages = get_pages(doc)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = str(f.attrib['href']).split('/')[-2]\n # topic_id =\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n for p in range(2, pages):\n url = 'http://habrahabr.ru/users/{0}/favorites/page{1}/'.format(self._username, p)\n # if show_progress:\n # print('parsing page{0}... url={1}'.format(p, url))\n doc = html.document_fromstring(requests.get(url).text)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = f.attrib['href'][-7:-1]\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n return out",
"def favorite(self):\n url = \"https://api.imgur.com/3/image/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method='POST')",
"def display_search_page():\n favorite_players = []\n favorites = Favorite.query.filter_by(id = current_user.id).all()\n\n if len(favorites) > 0:\n for favorite in favorites:\n player = get_favorites(favorite.favorited_item)\n player_info = player[0]\n favorite_players.append(player_info)\n else:\n favorite_players = []\n\n\n return render_template('searchpage.html',\n favorite_players = favorite_players)",
"def display_account(request):\n form = ProductSearch(request.POST or None)\n favoris = Favorite.objects.filter(\n user_link=request.user).order_by('movie_saved')\n paginator = Paginator(favoris, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(request, 'display_account.html', {'page_obj': page_obj, 'form': form})",
"def favorite(self):\n url = \"https://api.imgur.com/3/album/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method=\"POST\")",
"def get_favorites(self):\n url = \"https://api.imgur.com/3/account/{0}/favorites\".format(self.name)\n resp = self._imgur._send_request(url, needs_auth=True)\n return [_get_album_or_image(thing, self) for thing in resp]",
"def fetch_favourites(self):\n while True:\n self.cur.execute(\"SELECT DISTINCT product_id FROM Product_substitute\")\n response = self.cur.fetchall()\n\n for i, element in enumerate (response):\n print (\"Tapez {} pour voir les substituts de:\".format(i+1))\n self.display_product_from_id(element[0])\n \n choice_id = response[self.secure_input(1, len(response))-1]\n \n self.cur.execute(\"SELECT substitute_id FROM Product_substitute WHERE product_id = %s\", (choice_id[0], ))\n response = self.cur.fetchall()\n\n print(\"Voici les substituts trouves pour:\")\n self.display_product_from_id(choice_id[0])\n for element in response:\n self.display_product_from_id(element[0])\n\n print(\"Faire une autre recherche dans vos favoris? Oui = 1 non =0\")\n again = self.secure_input(0, 1)\n if again == 1:\n continue\n else:\n break",
"def profile():\n from flickrAPI import FlickrAPI\n #flickr = FlickrAPI(key=session['resource_owner_key'], secret=session['resource_owner_secret'])\n flickr = FlickrAPI(key=request.cookies.get('oauth_token'), secret=request.cookies.get('oauth_token_secret'))\n faves = flickr.favorites_getList(user_id=\"44124394781@N01\", page=1, per_page=5, extras='owner_name')\n return str(faves)",
"def search_in_fav(request):\n query = request.GET.get('user_search')\n\n if query:\n # Returns the query in lower case and without accents\n query = unidecode(query).lower()\n result = True\n\n cur_user = request.user\n # Returns all favorites\n favorites = Favorite.objects.all()\n\n # Returns current user filtered favorites\n fav_filtered = favorites.filter(\n users_id=cur_user\n ).filter(products__name__icontains=query).order_by('id')\n\n if not fav_filtered.exists():\n result = False\n fav_filtered = favorites.filter(\n users_id=cur_user).order_by('id')\n\n # Init pagination with 6 products\n paginator = Paginator(fav_filtered, 6)\n page = request.GET.get('page')\n\n try:\n fav_filtered = paginator.page(page)\n except PageNotAnInteger:\n fav_filtered = paginator.page(1)\n except EmptyPage:\n fav_filtered = paginator.page(paginator.num_pages)\n\n if result:\n title = \"Résultats de la recherche : {}\".format(query)\n else:\n title = \"Aucun résultat pour la recherche : {}\".format(query)\n\n context = {\n 'is_result': result,\n 'fav_filtered': fav_filtered,\n 'title': title,\n 'paginate': True,\n }\n\n return render(request, 'favorites/search_in_fav.html', context)",
"def show_likes(user_id):\n\n if CURRENT_USER_KEY not in session:\n raise Unauthorized()\n\n # define user whose favorites are being viewed\n profuser = User.query.get_or_404(user_id)\n # define logged-in user for navbar details\n user = User.query.get(session[CURRENT_USER_KEY])\n if session[CURRENT_USER_KEY] == user_id:\n like_active = 'active'\n else:\n like_active = ''\n\n return render_template('likes.html', user=user, profuser=profuser, likes=profuser.likes, like_active=like_active)",
"def myfood(request):\n my_fav = Favorite.objects.filter(user=request.user)\n my_fav_products = [fav.product_favorite for fav in my_fav]\n paginator = Paginator(my_fav_products, 6)\n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n context = {\n \"favorites\": page_obj\n }\n return render(request, \"myfood.html\", context)",
"def favorited(self: Article, request: Request):\n if not request.user:\n return False\n\n if self in request.user.favorites:\n return True\n\n return False",
"def get_favorites_questions(user_id, api_site_parameter, page = 1, body = False, comments = False, pagesize = 100, sort = 'added'):\n path = \"users/%d/favorites\" % user_id\n \n query_filter = ')(Ybxw_gbz'\n \n if body:\n query_filter = '9F)u(CSWCtKt'\n if comments:\n query_filter = ')(YbxuzQQ.'\n if body and comments:\n query_filter = ')(YbxuzQTp'\n \n results = __fetch_results(path, api_site_parameter, page = page, filter = query_filter, pagesize = pagesize, sort = sort)\n return results",
"def favourite(self, favourite):\n\n self._favourite = favourite"
] | [
"0.7833284",
"0.7363097",
"0.7361918",
"0.7231697",
"0.71256995",
"0.70869046",
"0.70236176",
"0.69554955",
"0.6939694",
"0.69286466",
"0.68833476",
"0.6857968",
"0.6854023",
"0.68082047",
"0.67787",
"0.677694",
"0.67252666",
"0.6696489",
"0.66769177",
"0.6671317",
"0.6668169",
"0.66528744",
"0.6638332",
"0.658368",
"0.6576945",
"0.6446462",
"0.64409864",
"0.64158213",
"0.63574845",
"0.63393825"
] | 0.7412779 | 1 |
reverse black and white color | def reverse_black_and_white(img):
img = 255 - img
return img | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reverse(color):\n return 255 - color",
"def negative(img): \n for pixel in img:\n x, y, col = pixel \n r, g, b = col\n \n new_color = create_color(255 - r, 255 - g, 255 - b)\n set_color(img, x, y, new_color)",
"def unflip_colors(self):\n self.colors[self.bondA] = self.colA\n self.colors[self.bondB] = self.colB\n self.set_bcol(self.bondA)\n self.set_bcol(self.bondB)\n return",
"def revert_color(cls, colors):\n # 0.5 is to map the color to the center of the range\n return [int((c+0.5) / cls.color_level * 256) for c in colors]",
"def inverse_color_hex(hx):\n return inverse_color_rgb(hex_to_rgb(hx))",
"def colorize(self):\n return",
"def lr_flip(self):\n for g in self.grid:\n g.reverse()",
"def invert(self):\n\t\tself.bitmap_data = \"\".join([chr(255-ord(x)) for x in self.bitmap_data])\n\t\treturn self",
"def flip(self):\n if self.color != \"empty\":\n self.color = self.oppositeColor()\n else:\n raise pieceError(self.col, self.row)",
"def unconvert_from_RGB_255(colors):\n un_rgb_color = (colors[0]/(255.0),\n colors[1]/(255.0),\n colors[2]/(255.0))\n\n return un_rgb_color",
"def invert_value(c):\n try:\n rgba = mpl.colors.to_rgba(c)\n except ValueError:\n if isinstance(c, Iterable):\n return [invert_value(color) for color in c]\n else:\n raise\n else:\n r, g, b, a = rgba\n h, s, v = mpl.colors.rgb_to_hsv((r,g,b))\n v = 1 - v\n rgb = tuple(mpl.colors.hsv_to_rgb((h, s, v)))\n if a == 1:\n new_c = mpl.colors.to_hex(rgb)\n else:\n new_c = rgb + (a,)\n return new_c",
"def flip_color(self):\n self._spots[constants.CROSSING_LOCATION - 1].flip_color()\n self._spots[constants.CROSSING_LOCATION + constants.NUM_LANES].flip_color()",
"def strip_color(piece):\n return piece[1:]",
"def resetColor(self):\n self.setColor(255, 255, 255 ,255)",
"def convert_to_black_and_white(picture):\n picture_black_white = picture.convert('1')\n return picture_black_white",
"def test_reversed(self):\n \n color0 = pero.Color.Red\n color1 = pero.Color.Green\n color2 = pero.Color.Blue\n color3 = pero.Color.Cyan\n color4 = pero.Color.Magenta\n color5 = pero.Color.Yellow\n \n colors = (color0, color1, color2, color3, color4, color5)\n palette = pero.Palette(colors)\n \n rev = palette.reversed(\"Reversed\")\n \n self.assertEqual(rev.name, \"Reversed\")\n self.assertEqual(len(rev), len(palette))\n \n self.assertTrue(rev[0] is palette[5])\n self.assertTrue(rev[1] is palette[4])\n self.assertTrue(rev[2] is palette[3])\n self.assertTrue(rev[3] is palette[2])\n self.assertTrue(rev[4] is palette[1])\n self.assertTrue(rev[5] is palette[0])",
"def rotate_color(self, forwards):\n self.color_index += 1 if forwards else -1\n if self.color_index > color.MAX_COLOR:\n # Overflow\n self.color_index = color.MIN_COLOR\n if self.color_index < color.MIN_COLOR:\n # Underflow\n self.color_index = color.MAX_COLOR",
"def all_off():\n Leds.red_left.brightness = 0\n Leds.red_right.brightness = 0\n Leds.green_left.brightness = 0\n Leds.green_right.brightness = 0\n Leds.blue_left.brightness = 0\n Leds.blue_right.brightness = 0",
"def flip_vertical(original_image: Image) -> Image :\r\n \r\n new_image = copy(original_image)\r\n \r\n pixel_width = get_width(original_image)\r\n pixel_height = get_height(original_image) \r\n\r\n \r\n for x in range(pixel_width) :\r\n for y in range(pixel_height) :\r\n original_vertical_pixel = get_color(original_image, x, y)\r\n opposite_vertical_pixel = pixel_height - 1 - y\r\n set_color(new_image, x, opposite_vertical_pixel, original_vertical_pixel)\r\n \r\n return new_image",
"def setSurfaceColors(topcolor=-1,bottomcolor=-1):\n dislin.surclr(topcolor, bottomcolor)",
"def complementary(R, G, B):\r\n RGB = [(R/255), (G/255), (B/255)]\r\n HLS = colorsys.rgb_to_hls(RGB[0], RGB[1], RGB[2])\r\n HLS = [((HLS[0] + 0.5) % 1), HLS[1], HLS[2]]\r\n RGB = colorsys.hls_to_rgb(HLS[0], HLS[1], HLS[2])\r\n return RGB255(RGB)",
"def color_invert(self, lower=None, upper=None, wise='pixel', prob=1, p=None):\n if self._max_aug_nums>0:\n if self._nums>self._max_aug_nums:\n return self\n self._nums += 1\n if p is None:\n p = self._p\n self.image = color_invert(self.image, lower, upper, wise, prob, p)\n return self",
"def Color(red, green, blue, white = 0):\n\treturn (white << 24) | (red << 16)| (green << 8) | blue",
"def off(l):\n l.do_static_colour(255, 0, 0, 0)",
"def swap_black_white(img):\n \n black = create_color(0, 0, 0)\n\n white = create_color(255, 255, 255)\n\n for x, y, col in img:\n red, green, blue = col\n\n # Check if the pixel's colour is black; i.e., all three of its\n # components are 0.\n\n if red == 0 and green == 0 and blue == 0:\n # The pixel is black, make it white.\n set_color(img, x, y, white)\n\n # Check if the pixel's colour is white; i.e., all three of its\n # components are 255.\n\n elif red == 255 and green == 255 and blue == 255:\n # The pixel is white, make it black.\n set_color(img, x, y, black)",
"def inverse_translate_color_index(translated_index):\n assert 1 <= translated_index <= len(PUSH_INDEX_TO_COLOR_INDEX)\n return PUSH_INDEX_TO_COLOR_INDEX[(translated_index - 1)]",
"def colorWipe(strip, color, wait_ms=50, reverse=False):\n for i in range(strip.numPixels()):\n pixel_i = strip.numPixels() -1 - i if reverse else i\n strip.setPixelColor(pixel_i, color)\n strip.show()\n time.sleep(wait_ms/1000.0)",
"def _blacken(self, node):\n #node = self._follow(ref)\n if node is None:\n return node\n newnode = RedBlackNode.from_node(node, color=Color.BLACK)\n return RedBlackNodeRef(newnode)",
"def reset_color():\n global CURRENT_COLOR\n CURRENT_COLOR = 0",
"def remove_color(image):\n return image[:, :, 0]"
] | [
"0.8254546",
"0.66949975",
"0.66061807",
"0.653569",
"0.6441206",
"0.63987577",
"0.6311348",
"0.6268284",
"0.6259552",
"0.6226291",
"0.62072444",
"0.6176793",
"0.61412317",
"0.6112735",
"0.61056167",
"0.60907745",
"0.60805494",
"0.60057807",
"0.5991006",
"0.5955161",
"0.59390897",
"0.58872867",
"0.58746874",
"0.58629256",
"0.5862761",
"0.5852613",
"0.5842322",
"0.5838207",
"0.5825196",
"0.5813235"
] | 0.8097395 | 1 |
Handler for the create_post Lambda function | def handler(event, _context):
model = PostModel()
post_id = model.create(**json.loads(event['body']))
return dump_result({'post_id': post_id}, status_code=201) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post(self):\n data = request.json\n create_entry(data)\n return None, 201",
"def handle_post(cls, **kwargs):\n raise NotImplementedError",
"def post(self):\n data = request.json\n create_ue(data)\n return None, 201",
"def post(self, *args, **kwargs):\n return self.handle_post_request()",
"def _post(self, *args, **kwargs):\n return self._request('post', *args, **kwargs)",
"def post():\n pass",
"def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)",
"def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)",
"def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)",
"def post(self, request, *args, **kwargs):\n return super().create(*args, **kwargs)",
"def post(self, *args, **kwargs):\n return self._hit(\"POST\", *args, **kwargs)",
"def post(self, request, *args, **kwargs):\n return super().create(request, *args, **kwargs)",
"def create_post():\n\n #Get prompt id\n prompt_id = request.form.get('prompt_id')\n\n # Get post text\n post_text = request.form.get('user_post')\n\n # Create post timestamp\n created_at = datetime.now()\n user_facing_date = created_at.strftime(\"%B %d, %Y\")\n\n # Save post and related data to database\n post = crud.create_post(session['user_id'], prompt_id, post_text, session['lat'], session['lng'], session['user_facing_location'], created_at)\n\n return render_template('post_data.html', post=post, user_facing_date=user_facing_date)",
"def post(self, *args, **kwargs):\n self.request(\"post\", *args, **kwargs)",
"def post(self):\n data = request.json\n return save_new_post(data=data)",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):"
] | [
"0.71404606",
"0.70493096",
"0.70490444",
"0.70192987",
"0.6999904",
"0.69663024",
"0.6955587",
"0.6955587",
"0.6955587",
"0.693566",
"0.6901573",
"0.6828901",
"0.67827344",
"0.6765577",
"0.67457926",
"0.6565804",
"0.6565804",
"0.6565804",
"0.6565804",
"0.6565804",
"0.6565804",
"0.6565804",
"0.6565804",
"0.6565804",
"0.6565804",
"0.6565804",
"0.6565804",
"0.6565804",
"0.6565804",
"0.65560657"
] | 0.7746745 | 0 |
Test single shot run. | def test_qasm_simulator_single_shot(self):
shots = 1
self.qobj.config.shots = shots
result = self.backend.run(self.qobj).result()
self.assertEqual(result.success, True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_takes_shot(self):\n player = TestPlayer()\n self.ai.take_shot(player)\n self.assertEqual(1, player.shots_taken)",
"def test_with_shots_option(self):\n params, target = self._generate_params_target([1])\n sampler = Sampler()\n result = sampler.run(\n circuits=[self._pqc], parameter_values=params, shots=1024, seed=15\n ).result()\n self._compare_probs(result.quasi_dists, target)",
"def test_runGame(self):\n # this is tested by playing the game. No good way to unit test this.\n pass",
"async def test_single_shot(self) -> None:\n trigger = auraxium.Trigger(auraxium.event.Death, single_shot=True)\n flag = asyncio.Event()\n\n async def wait_for(event: auraxium.event.Event) -> None:\n _ = event\n self.assertGreaterEqual(event.age, 0.0, 'event age is negative')\n flag.set()\n\n trigger.action = wait_for\n self.client.add_trigger(trigger)\n try:\n await asyncio.wait_for(flag.wait(), 5.0)\n except asyncio.TimeoutError:\n self.skipTest('no game event received after 5 seconds, '\n 'is the game in maintenance?')\n self.assertEqual(len(self.client.triggers), 0)",
"def run_single(self):\n self.run_sim_time(1)",
"def testRunSmoke(self):\n stage = self.ConstructStage()\n with self.OutputCapturer():\n stage.Run()",
"def run_experiment():\n pass",
"def test_run(self):\n engine = Engine(self.config_file, self.api_token, 23)\n engine.msg_wait_iterations = 0\n\n # Put some stuff on the task queue\n self.setup_helper.add_volumetric_tasks(self.aws_creds[\"access_key\"],\n self.aws_creds['secret_key'],\n self.upload_queue_url, engine.backend)\n\n engine.join()\n engine.run()\n\n # Check for tile to exist\n s3 = boto3.resource('s3')\n ingest_bucket = s3.Bucket(self.ingest_bucket_name)\n\n with tempfile.NamedTemporaryFile() as test_file:\n with open(test_file.name, 'wb') as raw_data:\n ingest_bucket.download_fileobj(VOLUMETRIC_CUBOID_KEY, raw_data)\n with open(test_file.name, 'rb') as raw_data:\n # Using an empty CloudVolume dataset so all values should be 0.\n # dtype set in boss-v0.2-test.json under chunk_processor.params.info.data_type\n cuboid = self.s3_object_to_cuboid(raw_data.read(), 'uint8')\n unique_vals = np.unique(cuboid)\n assert 1 == len(unique_vals)\n assert 0 == unique_vals[0]",
"def test_run_started(self):",
"def test_get_run(self):\n pass",
"def run_one_step(self):\n pass",
"def runtest(self):",
"def test_screenshots_generated():\n with temporary_dir() as output_dir:\n output_dir = Path(output_dir)\n copyfile(\n TEST_ROBOT_OUTPUT_FILES / \"robot_screenshots.xml\",\n output_dir / \"output.xml\",\n )\n open(output_dir / \"selenium-screenshot-1.png\", mode=\"w+\")\n open(output_dir / \"selenium-screenshot-2.png\", mode=\"w+\")\n\n flowtask = FlowTaskFactory()\n robot_importer.import_robot_test_results(flowtask, output_dir)\n\n # output.xml asset created\n assert 1 == BuildFlowAsset.objects.filter(category=\"robot-output\").count()\n # suite setup screenshot assets created\n assert 1 == BuildFlowAsset.objects.filter(category=\"robot-screenshot\").count()\n # No screenshots created for 'Via API' test\n tr_method = models.TestMethod.objects.get(name=\"Via API\")\n test_api = models.TestResult.objects.get(method=tr_method, task=flowtask)\n assert 0 == test_api.assets.count()\n\n # One screenshot created for 'Via UI' test\n tr_method = models.TestMethod.objects.get(name=\"Via UI\")\n test_ui = models.TestResult.objects.get(method=tr_method, task=flowtask)\n assert 1 == test_ui.assets.count()",
"def test_simulator_0_shots():\n dev = _aws_device(wires=2, device_type=AwsDeviceType.SIMULATOR, shots=0)\n assert dev.shots == 1\n assert dev.analytic",
"def TestOneStep(self):\n pass",
"def test_multiple_commands_at_same_time(self):",
"def startTestRun(self):",
"def test_runs(self):\n\n with TemporaryDirectory() as tmp_dir:\n\n mock_raw_data(tmp_dir, raw_dim=256, num_channels=3, num_images=40)",
"def test_RawRun_process():\n for style in test_runs:\n test_runs[style].process()\n # now compare all images with ref\n ref = sorted(glob.glob('tests/data/processed_ref/*/*/*'))\n outputs = sorted(glob.glob('tests/data/processed/*/*/*'))\n for ref, out in zip(ref, outputs):\n print ref\n print out\n assert_image_equal(ref, out)",
"def test_run_ended(self):",
"def createNewShot(*args):\n createDir.createShot(pi.shotsFolder)",
"def test_create_run(self):\n pass",
"def one_experiment(monkeypatch, storage):\n monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))\n name = \"test_single_exp\"\n orion.core.cli.main(\n [\"hunt\", \"--init-only\", \"-n\", name, \"./black_box.py\", \"--x~uniform(0,1)\"]\n )\n ensure_deterministic_id(name, storage)\n return storage.fetch_experiments({\"name\": name})[0]",
"def selfTest():\n try:\n loop = asyncio.get_event_loop()\n r34 = Rule34(loop)\n data = loop.run_until_complete(r34.getImageURLS(\"gay\", singlePage=True))\n if data is not None and len(data) != 0:\n print(\"self test passed\")\n exit(0)\n else:\n raise SelfTest_Failed(\"Automated self test failed to gather images\")\n except Exception as e:\n raise SelfTest_Failed(\"Automated self test failed with this error:\\n{}\".format(e))",
"def test_run(self):\n class MockProvider(BaseCoverageProvider):\n SERVICE_NAME = \"I do nothing\"\n was_run = False\n\n def run_once_and_update_timestamp(self):\n \"\"\"Set a variable.\"\"\"\n self.was_run = True\n return None\n\n provider = MockProvider(self._db)\n result = provider.run()\n\n # run_once_and_update_timestamp() was called.\n assert True == provider.was_run\n\n # run() returned a CoverageProviderProgress with basic\n # timing information, since run_once_and_update_timestamp()\n # didn't provide anything.\n assert isinstance(result, CoverageProviderProgress)\n now = utc_now()\n assert result.start < result.finish\n for time in (result.start, result.finish):\n assert (now - time).total_seconds() < 5",
"def startTest(asset):",
"def run_one_test_cycle(self):\n logging.info(\n \"{0} operations remaining: {1}\".format(\n self.args.pm_operation, self.args.repetitions\n )\n )\n\n self.check_last_cycle_duration()\n if self.args.repetitions > 0:\n self.run_pm_command()\n else:\n self.summary()",
"def is_shot(event):\n event_id = event['eventId']\n return event_id == 10",
"def step_run(environment):\n cmd = \"/bin/sh %(testname)s\" % environment\n return execute(environment, cmd, timeout=10)",
"def test_qpu_0_shots():\n _aws_device(wires=2, shots=0)"
] | [
"0.6926298",
"0.6844326",
"0.65808654",
"0.64677674",
"0.64463276",
"0.63522184",
"0.62838167",
"0.6224712",
"0.62038606",
"0.61977786",
"0.6191925",
"0.61900747",
"0.61691684",
"0.6144995",
"0.61292636",
"0.6105518",
"0.60740775",
"0.605795",
"0.5980819",
"0.59777087",
"0.59565836",
"0.59514576",
"0.59460604",
"0.5945699",
"0.5928272",
"0.5925504",
"0.59249336",
"0.5919602",
"0.59039754",
"0.59036934"
] | 0.7838827 | 0 |
Test teleportation as in tutorials | def test_teleport(self):
self.log.info('test_teleport')
pi = np.pi
shots = 2000
qr = QuantumRegister(3, 'qr')
cr0 = ClassicalRegister(1, 'cr0')
cr1 = ClassicalRegister(1, 'cr1')
cr2 = ClassicalRegister(1, 'cr2')
circuit = QuantumCircuit(qr, cr0, cr1, cr2, name='teleport')
circuit.h(qr[1])
circuit.cx(qr[1], qr[2])
circuit.ry(pi/4, qr[0])
circuit.cx(qr[0], qr[1])
circuit.h(qr[0])
circuit.barrier(qr)
circuit.measure(qr[0], cr0[0])
circuit.measure(qr[1], cr1[0])
circuit.z(qr[2]).c_if(cr0, 1)
circuit.x(qr[2]).c_if(cr1, 1)
circuit.measure(qr[2], cr2[0])
job = execute(circuit, backend=self.backend, shots=shots, seed_simulator=self.seed)
results = job.result()
data = results.get_counts('teleport')
alice = {
'00': data['0 0 0'] + data['1 0 0'],
'01': data['0 1 0'] + data['1 1 0'],
'10': data['0 0 1'] + data['1 0 1'],
'11': data['0 1 1'] + data['1 1 1']
}
bob = {
'0': data['0 0 0'] + data['0 1 0'] + data['0 0 1'] + data['0 1 1'],
'1': data['1 0 0'] + data['1 1 0'] + data['1 0 1'] + data['1 1 1']
}
self.log.info('test_teleport: circuit:')
self.log.info(circuit.qasm())
self.log.info('test_teleport: data %s', data)
self.log.info('test_teleport: alice %s', alice)
self.log.info('test_teleport: bob %s', bob)
alice_ratio = 1/np.tan(pi/8)**2
bob_ratio = bob['0']/float(bob['1'])
error = abs(alice_ratio - bob_ratio) / alice_ratio
self.log.info('test_teleport: relative error = %s', error)
self.assertLess(error, 0.05) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def teleport(self, agent_host, move_up):\n\n move_by = 4\n if move_up:\n tel_y= self.curr_y+move_by\n else:\n tel_y= self.curr_y-move_by\n tp_command = \"tp {} {} {}\".format(self.curr_x,tel_y,self.curr_z)\n #print(\"X,Y,Z----: {},{},{}\".format(self.curr_x,tel_y,self.curr_z))\n return tp_command\n '''agent_host.sendCommand(tp_command)\n good_frame = False\n start = timer()\n while not good_frame:\n world_state = agent_host.getWorldState()\n if not world_state.is_mission_running:\n print \"Mission ended prematurely - error.\"\n exit(1)\n if not good_frame and world_state.number_of_video_frames_since_last_state > 0:\n frame_x = world_state.video_frames[-1].xPos\n frame_z = world_state.video_frames[-1].zPos\n if math.fabs(frame_x - teleport_x) < 0.001 and math.fabs(frame_z - teleport_z) < 0.001:\n good_frame = True\n end_frame = timer()'''",
"def test_teleport(self):\n from qiskit.circuit.library import U3Gate\n filename = self._get_resource_path('test_teleport.tex')\n qr = QuantumRegister(3, 'q')\n cr = ClassicalRegister(3, 'c')\n qc = QuantumCircuit(qr, cr)\n # Prepare an initial state\n qc.append(U3Gate(0.3, 0.2, 0.1), [qr[0]])\n # Prepare a Bell pair\n qc.h(qr[1])\n qc.cx(qr[1], qr[2])\n # Barrier following state preparation\n qc.barrier(qr)\n # Measure in the Bell basis\n qc.cx(qr[0], qr[1])\n qc.h(qr[0])\n qc.measure(qr[0], cr[0])\n qc.measure(qr[1], cr[1])\n # Apply a correction\n qc.z(qr[2]).c_if(cr, 1)\n qc.x(qr[2]).c_if(cr, 2)\n qc.measure(qr[2], cr[2])\n\n circuit_drawer(qc, filename=filename, output='latex_source')\n\n self.assertEqualToReference(filename)",
"def test_legit_player(self):\n board = Board()\n player1 = LegitPlayer()\n player2 = LegitPlayer()\n player_guard1 = PlayerGuard(player1)\n player_guard2 = PlayerGuard(player2)\n\n # set ids\n p1id = uuid.uuid4()\n p2id = uuid.uuid4()\n player_guard1.set_id(p1id)\n player_guard2.set_id(p2id)\n\n # test methods don't error out\n player_guard1.start_of_game()\n player_guard2.start_of_game()\n board.place_worker(*player_guard1.place_worker(board))\n board.place_worker(*player_guard2.place_worker(board))\n board.place_worker(*player_guard2.place_worker(board))\n board.place_worker(*player_guard1.place_worker(board))\n player_guard1.play_turn(board)\n player_guard2.play_turn(board)\n player_guard1.end_of_game(\"legit player\")\n player_guard2.end_of_game(\"legit player\")",
"def main_tunnel():\r\n print 'yay you beat the boss'",
"def test_portalPortal(self):\n streets = (\"9:00 Portal\", \"9:00 Portal\")\n for front, cross in (streets, reversed(streets)):\n location = parseLocation(\n \"Theme Camp\",\n \"Camp at Portal\",\n \"9:00 Portal @ 9:00 Portal\",\n front, cross,\n \"50 x 200\"\n )\n self.assertEquals(\n location,\n Location(\n name=\"Camp at Portal\",\n address=RodGarettAddress(\n concentric=None, radialHour=9, radialMinute=0,\n description=\"9:00 Portal, Theme Camp 50x200\",\n ),\n )\n )",
"def test_move_default_extra_steps(self):\n player = ss.ResilientPlayer()\n random.seed(2)\n player.move()\n random.seed(1)\n player.move()\n random.seed(2)\n player.move()\n assert player.position == 32",
"def test_travel():\n print('Testing travel')\n\n # Case given to test this problem\n assert_equals('(-1, 4)', hw1.travel('NW!ewnW', 1, 2))\n\n # Additional cases to test this problem\n assert_equals('(3, 5)', hw1.travel('!!WNEwssw.', 5, 6))\n assert_equals('(0, 0)', hw1.travel(('eNN@'), -1, -2))",
"def test_easy_bot_map1(self):\n game = self.bot_test_map1(Difficulty.easy)\n self.assertEqual(game.first_player.ask_for_move(), (0, 7))",
"def test_turn_ai_and_players(lab):\n print('Test turn')\n lab.update_game()\n test_print(lab)\n print(\"Turn: {}\".format(lab.turn_count))\n print('Test completed')",
"def test_get_player(self):\n pass",
"def test(simulation=False):\n\tsimulation = False\n\tif simulation:\n\t\tdyn.enable_vrep()\n\t\n\tctrl = init_ctrl()\n\n\tif simulation:\n\t\tpeter = SymbiotSpidey(ctrl)\n\telse:\n\t\tpeter = Spidey(ctrl)\n\n\tif simulation:\n\t\tctrl.start_sim()\n\n\tpeter.compliant = False\n\tprint peter.legs_references\n\n\tleg = peter.legs[0]\n\tpos = leg.position()\n\tpos = Vector3D(pos.x+6, pos.y, pos.z)\n\tleg.move(pos)\n\tctrl.wait(200)\n\tprint pos.x, leg.position().x, pos.x == leg.position().x\n\n\tpeter.compliant = True\n\n\tif simulation:\n\t\tctrl.stop_sim()",
"def test_actionWithTargetInAdjacentDarkRoom(self):\n self.otherRoom = objects.Thing(store=self.store, name=u'Elsewhere')\n objects.Container.createFor(self.otherRoom, capacity=1000)\n objects.Exit.link(self.location, self.otherRoom, u'west')\n self.player.moveTo(self.otherRoom)\n self.observer.moveTo(self.otherRoom)\n self.assertCommandOutput(\n \"wear pants\",\n [commandutils.E(u\"Who's that?\")],\n [])",
"def teleport(self, screen):\n if screen in self.screens:\n self._sendCommand('TELEPORT ' + screen)\n else:\n print 'No such screen: %s' % screen",
"def test_make_pathways(self):\n basic_test_runner(self, 'pathways')",
"def test_fly_and_dive(self, startcoord, cmdstring, success, endcoord):\n start_room = xyzgrid.XYZRoom.objects.get_xyz(xyz=startcoord)\n self.char1.move_to(start_room)\n\n self.call(commands.CmdFlyAndDive(), \"\", \"You\" if success else \"Can't\", cmdstring=cmdstring)\n\n self.assertEqual(self.char1.location.xyz, endcoord)",
"def test_hard_bot_map1(self):\n game = self.bot_test_map1(Difficulty.hard)\n self.assertEqual(game.first_player.ask_for_move(), (0, 11))",
"def testPointSystem():\n deleteMatches()\n deletePlayers()\n registerPlayer(\"Pikachu\")\n registerPlayer(\"Charmander\")\n registerPlayer(\"Bulbasaur\")\n registerPlayer(\"Squirtle\")\n registerPlayer(\"MewTwo\")\n standings = playerStandings()\n [id1, id2, id3, id4, id5] = [row[0] for row in standings]\n reportMatch(id1, id2)\n reportMatch(id3, id4, True)\n reportMatch(id5, id5, False, True)\n reportMatch(id1, id5)\n reportMatch(id3, id4)\n reportMatch(id2, id2, False, True)\n reportMatch(id1, id3)\n reportMatch(id5, id2)\n reportMatch(id4, id4, False, True)\n standings = playerStandings()\n if not (standings[0][0]==id2 and standings[0][2]==2 and\n standings[1][0]==id4 and standings[0][2]==2 and\n standings[2][0]==id3 and standings[0][2]==2 and\n standings[3][0]==id5 and standings[0][2]==2 and\n standings[4][0]==id1 and standings[0][2]==2):\n raise ValueError(\n \"Points are not tallied correctly.\"\n )\n\n print \"4. Points are tallied correctly.\"",
"def test3():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('normal')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.pingMeasure()\n exp1.trafico.pingMeasure(filename='ensayo_ping.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador",
"def test5():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(A = 'h1', C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('ataque')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n exp1.pingAllTest() # **************** Parece que es necesario que se de un arranque al controlador\n # **************** para que aprenda las reglas antes del ataque.\n\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.pingMeasure()\n #exp1.trafico.pingMeasure(filename='ping_ataque_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador",
"def all():\n lab = test_loading()\n\n for _ in range(1):\n print('🦅🐀🐙')\n\n test_spawn(lab)\n\n pc = test_spawn_player(lab)\n\n while True:\n pc.store_move(PlayerMove(random.choice(['walk left', 'walk up', 'walk down', 'walk right'])))\n test_turn_ai_and_players(lab)\n if input() == '0':\n break",
"def test_teams_invite_member(self):\n pass",
"def game_loop(args):\n\n pygame.init()\n pygame.font.init()\n world = None\n tot_target_reached = 0\n num_min_waypoints = 21\n counter=0\n\n try:\n client = carla.Client(args.host, args.port)\n client.set_timeout(4.0)\n\n display = pygame.display.set_mode(\n (args.width, args.height),\n pygame.HWSURFACE | pygame.DOUBLEBUF)\n\n hud = HUD(args.width, args.height)\n world = World(client.load_world('Town01'), hud, args)\n # Changing The Map\n #world = World(client.load_world('Town03'), hud, args)\n # Town04 ,Town06 is highway | Town07 is country |Town03 default\n controller = KeyboardControl(world)\n\n if args.agent == \"Roaming\":\n agent = RoamingAgent(world.player)\n elif args.agent == \"Basic\":\n agent = BasicAgent(world.player)\n spawn_point = world.map.get_spawn_points()[0]\n agent.set_destination((spawn_point.location.x,\n spawn_point.location.y,\n spawn_point.location.z))\n else:\n agent = BehaviorAgent(world.player, behavior=args.behavior)\n\n spawn_points = world.map.get_spawn_points()\n random.shuffle(spawn_points)\n\n if spawn_points[0].location != agent.vehicle.get_location():\n destination = spawn_points[0].location\n else:\n destination = spawn_points[1].location\n\n agent.set_destination(agent.vehicle.get_location(), destination, clean=True)\n\n clock = pygame.time.Clock()\n\n while True:\n clock.tick_busy_loop(60)\n if controller.parse_events(client, world, clock):\n return\n\n # As soon as the server is ready continue!\n if not world.world.wait_for_tick(10.0):\n continue\n\n if args.agent == \"Roaming\" or args.agent == \"Basic\":\n if controller.parse_events(client, world, clock):\n return\n\n # as soon as the server is ready continue!\n world.world.wait_for_tick(10.0)\n\n world.tick(clock)\n world.render(display)\n pygame.display.flip()\n control = agent.run_step(world.player)\n control.manual_gear_shift = False\n world.player.apply_control(control)\n else:\n agent.update_information(world)\n\n world.tick(clock)\n world.render(display)\n pygame.display.flip()\n\n # Set new destination when target has been reached\n if len(agent.get_local_planner()._waypoints_queue) < num_min_waypoints and args.loop:\n agent.reroute(spawn_points)\n tot_target_reached += 1\n world.hud.notification(\"The target has been reached \" +\n str(tot_target_reached) + \" times.\", seconds=4.0)\n\n elif len(agent.get_local_planner()._waypoints_queue) == 0 and not args.loop:\n print(\"Target reached, mission accomplished...\")\n break\n\n speed_limit = world.player.get_speed_limit()\n agent.get_local_planner().set_speed(speed_limit)\n\n control = agent.run_step()\n world.player.apply_control(control)\n\n # #################################################\n # # it's my code\n # pt1_sum_ri = (0, 0)\n # pt2_sum_ri = (0, 0)\n # pt1_avg_ri = (0, 0)\n # count_posi_num_ri = 0\n #\n # pt1_sum_le = (0, 0)\n # pt2_sum_le = (0, 0)\n # pt1_avg_le = (0, 0)\n #\n # count_posi_num_le = 0\n #\n #\n # global Camera_image\n # RGB_Camera_im = cv2.cvtColor(Camera_image, cv2.COLOR_BGR2RGB)\n #\n # # Test lane dectection ,object detecion based on SSD, Yolo and Semantic Segmentation\n # #lines,size_im= lane_detectionv3(RGB_Camera_im)\n # #lines,size_im=object_detection_SSD(RGB_Camera_im)\n # #lines, size_im = object_detection_Yolo(RGB_Camera_im)\n # #lines, size_im = object_detection_mask(RGB_Camera_im)\n # #lines, size_im = lane_detectionv2(RGB_Camera_im)\n #\n # if lines is None: #in case HoughLinesP fails to return a set of lines\n # #make sure that this is the right shape [[ ]] and ***not*** []\n # lines = [[0,0,0,0]]\n # else:\n #\n # cv2.imshow('frame_size_im', size_im)\n # cv2.waitKey(1)\n # #cv2.imshow(\"test_im\", test_im) # original size image\n # #cv2.waitKey(1)\n\n #####################################################3\n # test= WorldRepresentation(world.world, world.player, args)\n # counter += 1\n # if ((counter % 10) == 0):\n # print(test.dynamic_objects())\n ##########################################################3\n\n\n finally:\n if world is not None:\n world.destroy()\n\n pygame.quit()",
"def test_runGame(self):\n # this is tested by playing the game. No good way to unit test this.\n pass",
"def test_move_default_dropped_steps(self):\n player = ss.LazyPlayer()\n random.seed(2)\n player.move()\n random.seed(5)\n player.move()\n assert player.position == 44",
"def test_theft_and_stealing(self):",
"def test4():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('normal')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.iperfMeasure()\n exp1.trafico.iperfMeasure(filename='iperf_normal_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador",
"def test_get_goal(self):\n pass",
"def go_near(furniture_name, robot_teleport):\n if furniture_name == \"livingroom_coffeetable\":\n print(\"Request to put robot at livingroom_coffeetable.\")\n x_y_z_yaw_pitch_roll = {\"x\": 4.5, \"y\": 7.3, \"z\": 0, \"yaw\": 3.8, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"bedroom_chest\":\n print(\"Request to put robot at bedroom_chest.\")\n x_y_z_yaw_pitch_roll = {\"x\": 5, \"y\": 11.3, \"z\": 0, \"yaw\": 0.0, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"bedroom_console\":\n print(\"Request to put robot at bedroom_console.\")\n x_y_z_yaw_pitch_roll = {\"x\": 4.2, \"y\": 12.2, \"z\": 0, \"yaw\": math.pi/2, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"bedroom_bedsidetable\":\n print(\"Request to put robot at bedroom_bedsidetable.\")\n x_y_z_yaw_pitch_roll = {\"x\": 3.1, \"y\": 12.1, \"z\": 0, \"yaw\": math.pi, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"bedroom_shelf\":\n print(\"Request to put robot at bedroom_shelf.\")\n x_y_z_yaw_pitch_roll = {\"x\": 2.4, \"y\": 9.8, \"z\": 0, \"yaw\": 3*math.pi/2, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"kitchen_cupboard\":\n print(\"Request to put robot at kitchen_cupboard.\")\n x_y_z_yaw_pitch_roll = {\"x\": 6.7, \"y\": 10.6, \"z\": 0, \"yaw\": math.pi, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"kitchen_table\":\n print(\"Request to put robot at kitchen_table\")\n x_y_z_yaw_pitch_roll = {\"x\": 7.8, \"y\": 10.2, \"z\": 0, \"yaw\": math.pi/8, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"livingroom_table\":\n print(\"Request to put robot at livingroom_table\")\n x_y_z_yaw_pitch_roll = {\"x\": 7.4, \"y\": 7.6, \"z\": 0, \"yaw\": 3*math.pi/2, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n else:\n print(\"Unknown furniture: \" + furniture_name)",
"def test_med_bot_map1(self):\n game = self.bot_test_map1(Difficulty.med)\n self.assertEqual(game.first_player.ask_for_move(), (0, 11))",
"def test_move_pose(self):\n\n global sendPlayCallParams\n \n req = self.get_moves(1)\n \n with patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \\\n patch.object(socket.socket, 'bind', return_value=True) as mock_bind:\n src.drivers.hyundai_robot.udp = UdpConnector(\"localhost\", 8000)\n \n\n with patch.object(UdpConnector, 'appendToQueue') as u:\n \n src.drivers.hyundai_robot.move_pose(req)\n\n assert u.call_count == 1\n\n src.drivers.hyundai_robot.udp.stopConsumeThread()"
] | [
"0.64154094",
"0.6116679",
"0.60524917",
"0.5920475",
"0.58300376",
"0.58175176",
"0.5786501",
"0.5777824",
"0.57068455",
"0.57001305",
"0.567256",
"0.56699467",
"0.56529784",
"0.5641191",
"0.5601332",
"0.5596604",
"0.5578883",
"0.55724204",
"0.55621916",
"0.5547075",
"0.554393",
"0.5535322",
"0.5525315",
"0.5515861",
"0.55137116",
"0.55067325",
"0.54954815",
"0.5489369",
"0.54651546",
"0.5458895"
] | 0.664552 | 0 |
Check if file contents contains potentially malicious scripts | def is_file_containing_malicious_content(self, content: str, file_name: str):
for suspicious_string in self.stringsCausingSuspicions:
# if it's a regexp
if str(suspicious_string.__class__) == "<class '_sre.SRE_Pattern'>":
if len(suspicious_string.findall(content)) > 0:
return True
continue
# regular, simple string check
if suspicious_string in content:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_valid_file(file):\n return file.endswith('.py')",
"def test_6_1_1_script_isfile(host):\n assert host.file(AUDIT_SYSTEM_SCRIPT).is_file",
"def is_js_file(fname):\r\n return REJS.search(fname) and \\\r\n TEST_INDICATOR not in fname",
"def allowed_code_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in app.config['ALLOWED_CODE_EXTENSIONS']",
"def checkscript(self, content):\n if \"VERSION\" not in self.__capabilities:\n raise NotImplementedError(\n \"server does not support CHECKSCRIPT command\")\n content = self.__prepare_content(content)\n code, data = self.__send_command(\"CHECKSCRIPT\", [content])\n if code == \"OK\":\n return True\n return False",
"def has_inspection(fileContent: str) -> bool:\n\t\tif not DISABLE_PYCHARM_INSPECTION_TUPLE:\n\t\t\treturn True\n\n\t\tfor inspectionName in DISABLE_PYCHARM_INSPECTION_TUPLE:\n\t\t\tif inspectionName in fileContent:\n\t\t\t\treturn True\n\t\treturn False",
"def hasContents():",
"def custom_lint_rules(filepath):\n result = True\n with open(filepath, 'r') as stream:\n for lineno, line in enumerate(stream, start=1):\n # Lines must only include ASCII characters, and no tab\n stripped_line = line\n if stripped_line.endswith('\\n'):\n stripped_line = stripped_line[:-1]\n invalid_chars = set(re.findall(r'[^- !\"#$%&()*+,./0-9:;<=>?@A-Z\\[\\\\\\]^_`a-z{|}~' + \"']\", stripped_line))\n if invalid_chars:\n print(\"Error: using invalid characters %r\" % (sorted(invalid_chars),))\n print(\" in %s:%d: %r\" % (filepath, lineno, line))\n result = False\n\n # Match 'echo \"$VAR\"', 'echo 2>&1 \"$VAR\"', etc.\n if re.search(r'echo [^\"|;]*\"\\$', line):\n # http://www.etalabs.net/sh_tricks.html\n # NEVER use echo like this. According to POSIX, echo has\n # unspecified behavior if any of its arguments contain \"\\\" or\n # if its first argument is \"-n\"\n print(\"Error: using echo \\\"$var\\\" is dangerous. Use printf %s\\\\\\\\n \\\"$var\\\" instead\")\n print(\" in %s:%d: %r\" % (filepath, lineno, line))\n result = False\n\n # Match \"read\" without -r, and not \"dconf read\"\n if re.match(r'^([^#]*\\s)?read\\s+(?!-r)', line.replace('dconf read', 'dconf_read')):\n print(\"Error: using read without -r is dangerous. Use read -r instead\")\n print(\" in %s:%d: %r\" % (filepath, lineno, line))\n result = False\n\n # Match \"witch\" but not \"/usr/bin/which\" if the line starts with it\n # And filter-out special lines which are okay\n if (\n not line.lstrip().startswith('/usr/bin/which ')\n and line.strip() not in (\n '[ -n \"$DUMPCAP\" ] || DUMPCAP=\"$(which dumpcap)\"',\n 'if [ -x /usr/bin/which ] ; then',\n )\n and re.match(r'^[^#]*which\\s+', line)):\n print(\"Error: using which is not portable. Use command -v instead\")\n print(\" in %s:%d: %r\" % (filepath, lineno, line))\n result = False\n\n # Match \"command\" without a comment nor an echo nor a zstyle beforehand\n if re.match(r'^\\s*((?!echo|zstyle)[^ #][^#]*\\s)?command\\s+(?!-v)', line):\n print(\"Error: using command without -v is unknown. Use command -v instead\")\n print(\" in %s:%d: %r\" % (filepath, lineno, line))\n result = False\n\n return result",
"def test_file(self):\n a = False\n if \"show()\" in open('attempt.py').read():\n a = True\n self.assertEquals(a,True)",
"def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"rdp_lineage_to_tax.py\", get_files)",
"def check(filename):\n with open(filename) as f:\n try:\n if 'skip' in f.read():\n return False\n except:\n return True\n return True",
"def valid_filename(filename):\n if filename in IGNORED_FILES:\n return False\n if not os.path.exists(filename):\n return False\n _, ext = os.path.splitext(filename)\n return ext == '.py'",
"def file_name_check(file_name):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"9\")\n # END OF SOLUTION",
"def _assert_perl_script(path):\n if not os.path.isfile(path):\n raise NotImplementedError('\"%s\" is not a file' % path)\n\n _stem, ext = os.path.splitext(path)\n if ext == \".pl\":\n return\n with open(path) as stream:\n if \"perl\" in stream.readline():\n return\n raise NotImplementedError(\"%s is not a perl script\" % path)",
"def test_acceptance_stripe_script_has_been_inserted(self):\r\n pattern = re.compile(r'<script src=\"https://js.stripe.com/v3\"></script>',\r\n re.I | re.M)\r\n res = re.search(pattern, self.dom_str)\r\n self.assertTrue(hasattr(res, 'group'),\r\n msg=\"You didn't insert a Stripe script file.\")",
"def is_file_already_processed(file_content, file_name):\n\tchar_found = False\n\tfile_content_to_search = file_content.encode(SOURCE_ENCODING)\n\tfor des_char, char in CHARS_TO_REPLACE:\n\t\tif char in file_content_to_search:\n\t\t\tchar_found = True\n\t\t\tbreak\n\tif char_found:\n\t\tprint \"--> %s already processed, skipping...\" % file_name\n\treturn char_found",
"def __is_file_eligible_to_scan(cls, path_to_test):\n return path_to_test.endswith(\".md\")",
"def oktomodify(installscript):\n \n if os.path.exists(installscript):\n if scripttag in open(installscript).readline():\n return True\n else:\n return False\n else:\n return True",
"def test_extract_no_python():\n assert uflash.extract_script(uflash._RUNTIME) == ''",
"def is_blacklisted(fname):\n return is_dot(fname) or is_excluded_filetype(fname)",
"def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"validate_match_batch.py\", get_files)",
"def file_check( self ):\n try:\n file_handler = open( self.PATH_TO_SOURCE_FILE )\n log.info( 'annex requests found' )\n except Exception, e:\n message = 'no annex requests found; quitting\\n\\n'\n log.info( message )\n sys.exit( message )\n utf8_data = file_handler.read()\n assert type(utf8_data) == str, type(utf8_data)\n data = utf8_data.decode( 'utf-8' )\n return data",
"def _is_bad_code():\n if _total_lines_of_code >= 5:\n return True\n else:\n return False",
"def _file_can_be_compressed(filename):\n content_type = ''\n with open(filename, 'rb') as f:\n content_type = _get_content_type(f)\n return content_type in TEXT_TYPES",
"def check_script_command(self, line):\n line, _ = self.find_vars_in_str(line)\n words = line.split()\n self.E_str = \"check_script_command\"\n\n # Check script calling syntax\n if 1 > len(words) > 3:\n self.print_error(\"Syntax Error: correct syntax is script <filepath>\")\n\n # Check the script type\n if len(words) == 3:\n if words[2] not in VALID_SCRIPT_TYPES:\n self.print_error(f\"I don't know how to handle the '{words[2]}' script type\")\n\n # Check the script exists\n words[1] = gen_parse.rm_quotation_marks(words[1])\n if not os.path.isfile(words[1]):\n self.print_error(f\"IO Error: Can't find script '{words[1]}'\")\n\n # Parse any variables from the script (if a python script)\n if len(words) == 2 or words[2] == \"python\":\n with open(words[1], \"r\") as f:\n _vars = [i.strip('= ') for i in re.findall(VAR_REGEX+\" *=\", f.read())]\n for var in _vars:\n self.set_var(var, \"^EMPTY^\")\n\n return _vars",
"def scriptChecker(filename):\n if not os.path.exists(filename):\n print 'ERROR: %s does not exist' % filename\n import errno\n return errno.ENOENT\n\n # The script-checker program is called directly. If we call the code\n # from this python interpreter, any changes to an observing script will\n # not be noticed.\n #\n # This is due to the way python works: a second import statement of the\n # same module does nothing!\n import subprocess\n script = helpers.getCarmaBuildPath() + '/scripts/script-checker'\n cmd = [script, filename]\n ret = subprocess.call(cmd)\n if ret != 0:\n print 'ERROR: script-checker returned status code:', ret",
"def is_valid_test_file(test_file):\n return '.pyc' not in test_file and '__pycache__' not in test_file",
"def allowed_file(filename):\r\n return '.' in filename and \\\r\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS",
"def is_valid_file(self, file_path):\n return True",
"def test_6_1_1_script_exists(host):\n assert host.file(AUDIT_SYSTEM_SCRIPT).exists"
] | [
"0.6459682",
"0.6125505",
"0.60841477",
"0.60508496",
"0.6039843",
"0.5953196",
"0.59159344",
"0.5843728",
"0.5819328",
"0.5814489",
"0.57866204",
"0.5785759",
"0.575395",
"0.57477343",
"0.5743713",
"0.57432175",
"0.5721233",
"0.571638",
"0.57074296",
"0.5701537",
"0.5692168",
"0.5681616",
"0.5670301",
"0.5666565",
"0.56653935",
"0.5663075",
"0.56525284",
"0.56353796",
"0.56320035",
"0.5609671"
] | 0.7002745 | 0 |
Normalizes the input value between a and b | def normalizeToRange(v, a, b):
return (v - a) / (b - a) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def normalise(a, b):\n dy = b[1] - a[1]\n dx = b[0] - a[0]\n vector = (dy ** 2 + dx ** 2) ** 0.5\n # Normalise, round and cast to int\n dx = int(round(dx / vector))\n dy = int(round(dy / vector))\n \n return (dx, dy)",
"def normalize(self, factor):",
"def normalize(a, new_max=1.0):\n a = (a - a.min())\n a = a/a.max()\n a *= new_max\n return a",
"def __normalize(self, value, lower_bound, upper_bound):\n\n min_max_diff = self.max - self.min\n bound_diff = upper_bound - lower_bound\n return (value - self.min) / min_max_diff * bound_diff + lower_bound",
"def normalize(x):\n return (x - math_ops.reduce_min(x)) / (math_ops.reduce_max(x) - math_ops.reduce_min(x))",
"def __normalize(input, type, a, b):\n return cv2.normalize(input, None, a, b, type)",
"def normalize(image_data, a=0.1, b=0.9):\n # Implement Min-Max scaling for image data\n return a + (((image_data-np.min(image_data)) * (b - a)) / (np.max(image_data) - np.min(image_data)))",
"def normalize(x, x_max, x_min):\n return (x - x_min) / (x_max - x_min)",
"def normalize(x, min_x, max_x):\n\treturn (x - min_x) / (max_x - min_x)",
"def normalize(x):\r\n return x/norm(x)",
"def normalize(v):\n\n return v * (1.0 / magnitude(v))",
"def normalise_between_2_values(arraylike, min_value, max_value, invert=False):\n # normalise array between min and max values\n normalised = (arraylike - min_value) / (max_value - min_value)\n # replace anything above 1 with 1\n normalised[normalised > 1] = 1\n # replace anything below 0 with 0\n normalised[normalised < 0] = 0\n # if desired, invert the normalised values\n if invert:\n normalised = abs(normalised - 1)\n return normalised",
"def normalize(vals):\n min_val = torch.min(vals)\n max_val = torch.max(vals)\n return (vals - min_val) / (max_val - min_val)",
"def _normalize(self, inp):\n \n return inp/inp.sum()",
"def normalise(a):\n return (a - np.nanmin(a)) / np.nanmax(a - np.nanmin(a))",
"def normalize (a_data,a_column,b_method='MinMax') :\n if b_method == 'MinMax' :\n loc_scaler = __minmax()\n a_data[a_column] = loc_scaler.fit_transform(a_data[a_column])\n elif b_method == 'Standard' :\n loc_scaler = __standard()\n a_data[a_column] = loc_scaler.fit_transform(a_data[a_column])",
"def normalize(a):\n a = np.array(a)\n return a / np.linalg.norm(a)",
"def normalize(self,x,xmin,xmax):\n return (x-xmin)/(xmax-xmin)",
"def normalise(da):\n return (da - da.min()) / (da.max() - da.min())",
"def normalize_value(self, value, ref_value):\n _, norm, _ = _normalizers[self.name]\n return norm(value, ref_value)",
"def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x",
"def normalize(x):\n\n return (x - x.values.min()) / (x.values.max() - x.values.min())",
"def normalize_input(inputs: [float]) -> [float]:",
"def normalize(p):\n return p / mod(p)",
"def normalize(av, vmin=0., vmax=1.):\n if vmin == vmax:\n return np.ones_like(av)*vmin\n elif vmax < vmin:\n warnings.warn(\"swapping vmin and vmax, because vmax < vmin.\")\n vmin, vmax = vmax, vmin\n\n norm_one = (av - np.min(av))/(np.max(av)-np.min(av))\n return norm_one * (vmax-vmin) + vmin",
"def normalize(array):\n high = array.max()\n low = array.min()\n rng = high - low\n array[:] = 1.0 - ((high - array) / rng)",
"def normalize(self, x):\n self.max = x.max()\n self.min = x.min()\n return (2 * (x - x.min()) / (x.max() - x.min()) - 1)",
"def normalize(value):\n while value > 1:\n value = value / 10\n return value",
"def _normalize(array):\n\treturn (array - np.min(array))/(np.max(array)-np.min(array))",
"def normalize(w):\n s = sum(w)\n for i in range(len(w)):\n w[i] /= s\n return w"
] | [
"0.73133886",
"0.72756827",
"0.72715324",
"0.72277474",
"0.71215093",
"0.7071475",
"0.6914894",
"0.6854425",
"0.6828973",
"0.6812168",
"0.6810086",
"0.6793221",
"0.67737037",
"0.67714167",
"0.6674313",
"0.6660673",
"0.66357183",
"0.66268647",
"0.6608468",
"0.6604566",
"0.66005486",
"0.6582707",
"0.6578456",
"0.6525031",
"0.6492199",
"0.6480119",
"0.6472199",
"0.64700544",
"0.6462059",
"0.6460311"
] | 0.78294766 | 0 |
add the individual elements of input vectors together such that a = [1, 2, 3] b = [3, 1, 2] returns [4, 3, 5] | def vectorAdd(a, b):
return [a[i] + b[i] for i, j in enumerate(a)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vector_sum(a, b):\n return a[0] + b[0], a[1] + b[1]",
"def vector_add(a, b):\n assert(len(a) == len(b))\n\n from operator import add\n return tuple(map(add, a, b))",
"def add_vectors(u, v): #11.22.5\r\n new_vector = []\r\n \"\"\"Because they have same length so we\r\n should take advantage from this one\"\"\"\r\n for i in range(len(u)):\r\n m = u[i] + v[i] # Get their value of i index at the same time!\r\n new_vector.append(m)\r\n return new_vector",
"def vector_sum(vectors):\n results = vectors[0]\n for vector in vectors[1:]:\n results = vector_add(results, vector)\n return results",
"def add_lists(a,b):\r\n\r\n for i in range(len(a)):\r\n a[i] += b[i]\r\n return a",
"def vector_sum(vectors):\n\tresult = vectors[0]\n\tfor vector in vectors:\n\t\tresult = vector_add(result, vector)\n\treturn result",
"def add_vectors(v, u):\n return (v[0] + u[0], v[1] + u[1])",
"def add_vectorlist(vectors):\n x, y, z = zip(*vectors)\n return sum(x), sum(y), sum(z)",
"def _add_list_values(a, b):\n new_list = []\n for i in range(len(a)):\n new_list.append(a[i] + b[i])\n return new_list",
"def sum(cls, vectors):\n result = cls.null()\n for vector in vectors:\n result += vector\n return result",
"def vector_add(v, w):\n\treturn [v_i + w_i for v_i, w_i in zip(v, w)]",
"def _addVectors(X1,X2):\n _checkSize(X1,X2)\n return [ X1[i] + X2[i] for i in range(len(X1))]",
"def vector_add(v1, v2):\n return v1[0] + v2[0], v1[1] + v2[1]",
"def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v,w)]",
"def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v, w)]",
"def sum_vectors(vector_1, vector_2):\n new_coordinates = []\n index = 0\n while index < vector_1.dimension:\n new_value = vector_1.coordinates[index] + vector_2.coordinates[index]\n new_coordinates.append(new_value)\n index += 1\n new_vector = Vector(new_coordinates)\n return new_vector",
"def add(a, b):\n return np.array([x + y for x, y in zip(a, b)])",
"def add(a, b):\n return [a[i] + b[i] for i in range(2)]",
"def add(a,b):\n return [a[0]+b[0],a[1]+b[1],a[2]+b[2],1.0]",
"def add_multiple_vectors(vectors: List[Vector]) -> Vector:\n\n #Checks if `vectors` has values\n assert vectors\n\n num_el = len(vectors[0])\n\n # Check if all elements are of same length\n assert all(len(v) == num_el for v in vectors)\n\n return [sum(vector[i] for vector in vectors) for i in range(num_el)]",
"def add4(a,b):\n return [a[0]+b[0],a[1]+b[1],a[2]+b[2],a[3]+b[3]]",
"def add(v: Vector, w: Vector) -> Vector:\n assert len(v) == len(w), 'both vectors must have the same length'\n\n return [v_item + w_item for v_item, w_item in zip(v, w)]",
"def vector_sum(vectors: List[Vector]) -> Vector:\n assert vectors, 'no vectors provided'\n\n num_elements = len(vectors[0])\n assert all(\n len(v) == num_elements for v in vectors), 'vectors must be the same length'\n\n return [sum(vec[i] for vec in vectors) for i in range(num_elements)]",
"def vector_sum(vectors: List[Vector]) -> Vector:\n # Check that vectors is not empty\n assert vectors, \"no vectors provided!\"\n\n # Check the vectors are all the same size\n num_elements = len(vectors[0])\n assert all(len(v) == num_elements for v in vectors), \"different sizes!\"\n\n return [sum(vector[i] for vector in vectors)\n for i in range(num_elements)]",
"def testSum(self):\n v1 = Vector(1, 2, 3)\n v2 = Vector(4, 5, 6)\n v1 += v2\n assert(len(v1) == 3)\n assert v1[0] == 5\n assert v1[1] == 7\n assert v1[2] == 9\n\n v1 = Vector(9, 8, 7)\n v2 = Vector(3, 2, 1)\n v1 -= v2\n assert len(v1) == 3\n assert v1[0] == 6\n assert v1[1] == 6\n assert v1[2] == 6",
"def add_vectors(coord, vector):\n return tuple(c1+c2 for c1,c2 in zip(coord, vector))",
"def vector_sum(vectors: List[Vector]) -> Vector:\n # Check that vectors is not empty\n assert vectors, \"no vectors provided!\"\n\n # Check the vectors are all the same size\n num_elements = len(vectors[0])\n assert all(len(v) == num_elements for v in vectors), \"different sizes!\"\n\n # the i-th element of the result is the sum of every vector[i]\n return [sum(vector[i] for vector in vectors)\n for i in range(num_elements)]",
"def add_lists(first, second):\n\n copy = []\n for (i, j) in zip(first, second):\n i += j\n copy.append(i)\n\n return copy",
"def vec_product(vec1: List[int], vec2: List[int]) -> int:\n return sum(map(lambda v1, v2: v1 * v2, vec1, vec2))",
"def test__vector_addition__given_two_vector__return_correct_vector():\n assert Vector((0, 1, 2)) + Vector((3, 4, 5)) == Vector((3, 5, 7))"
] | [
"0.8196496",
"0.79972655",
"0.7761827",
"0.7735001",
"0.7697612",
"0.7670196",
"0.76058555",
"0.7546141",
"0.752927",
"0.748474",
"0.7452506",
"0.7419544",
"0.7374155",
"0.7345036",
"0.7340425",
"0.73381585",
"0.72459424",
"0.716269",
"0.7106756",
"0.70690817",
"0.70615125",
"0.6976053",
"0.6924945",
"0.69007593",
"0.68244624",
"0.68232137",
"0.67824537",
"0.6648141",
"0.66351",
"0.6485694"
] | 0.8270033 | 0 |
Given a start point, a normalized direction, and a distance, returns a point Distance away from Point along Direction | def getPointAwayFrom(startPoint, direction, distance):
x = vectorMultiply(direction, distance)
return vectorAdd(startPoint, x) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_distance_from_point(self, pstart, p_end):\n a = numpy.array((pstart.x, pstart.y, pstart.z))\n b = numpy.array((p_end.x, p_end.y, p_end.z))\n\n distance = numpy.linalg.norm(a - b)\n\n return distance",
"def dist(self, point: np.array):\n return np.linalg.norm(\n np.cross(point - self.r_start, self.direction), axis=1) / \\\n np.linalg.norm(self.direction)",
"def find_direction_vector(line):\n pt1, pt2 = line\n pt1 = np.array(pt1).reshape(2,)\n pt2 = np.array(pt2).reshape(2,)\n direct = pt2 - pt1\n direct_norm = normalize(direct)\n return direct_norm",
"def distance(self, p=None, l=None):\n if l is None:\n d = p - self.zero\n n = np.zeros(3)\n # try:\n # n = d - np.dot(d, self.direction) * self.direction\n # except RuntimeWarning:\n # print(d, self.direction)\n # return norm(n)\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n n = d - np.dot(d, self.direction) * self.direction\n # print(n, norm(n))\n if len(w) > 0 and issubclass(w[-1].category, RuntimeWarning):\n # Todo: check w/ Ram if this is what he meant to do when catch a warning: n = np.zeros(3)\n # n = np.zeros(3)\n # print(d, self.direction)\n pass\n return norm(n)\n else:\n normal = np.cross(self.direction, l.direction)\n n = norm(normal)\n if n < sys.float_info.min:\n # Lines are parallel.\n return self.distance(p=l.zero)\n offset = np.dot(l.zero - self.zero, normal) / n\n return np.abs(offset)",
"def get_distance(self, point):\n if not isinstance(point, Point):\n point = Point(*point)\n\n distances = [(point.distance_to_point(p), p) for p in self.points]\n sortpoints = sorted(distances, key=lambda x: x[0])\n closest = sortpoints[0][1]\n\n vc = Vector(*closest)\n d1 = vc.dot(vc)\n\n secondc = sortpoints[1][1]\n vs = Vector(*secondc)\n v1 = Vector(*point) - (vc+vs)/2\n v2 = vs-vc\n v2.unitize()\n d2 = v1.dot(v2)\n\n return abs(min(d1, d2)) - self.thickness/2",
"def dir_vector(p1: Vec2, p2: Vec2) -> Vec2:\n return Vec2(p2.x - p1.x, p2.y - p1.y)",
"def point_to_point_distance(p1:Point, p2: Point) -> float:\n return round(geopy.distance.distance((p1.y, p1.x), (p2.y, p2.x)).km,2)",
"def distanceToPoint(self, point):\n\n length = self.length\n if not length:\n raise ValueError('Cannot calculate point distance. Invalid line segment.')\n\n s = self.start\n e = self.end\n deltaX = e.x - s.x\n deltaY = e.y - s.y\n\n distance = abs(deltaY*point.x - deltaX*point.y - s.x*e.y + e.x*s.y)/length.raw\n\n B = deltaY*point.x - deltaX*point.y - s.x*e.y + e.x*s.y\n AbsB = abs(B)\n D = math.sqrt(deltaX*deltaX + deltaY*deltaY)\n DPrime = 1.0/math.pow(deltaX*deltaX + deltaY*deltaY, 3.0/2.0)\n bBD = B/(AbsB*D)\n\n pointXErr = point.xUnc*abs(deltaY*B/(AbsB*D))\n pointYErr = point.yUnc*abs(deltaX*B/(AbsB*D))\n startXErr = s.xUnc*abs(AbsB*DPrime + bBD*(point.y - e.y))\n startYErr = s.yUnc*abs(AbsB*DPrime + bBD*(e.x - point.x))\n endXErr = e.xUnc*abs(bBD*(s.y - point.y) - AbsB*DPrime)\n endYErr = e.yUnc*abs(bBD*(point.x - s.x) - AbsB*DPrime)\n error = pointXErr + pointYErr + startXErr + startYErr + endXErr + endYErr\n\n return NumericUtils.toValueUncertainty(distance, error)",
"def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5",
"def distance_to(self, p):\n closest_pt = self.closest_point_to(p)\n return np.linalg.norm(p - closest_pt)",
"def direction(point0, point1):\n d = [0, 0, 0]\n vector = [point1[0] - point0[0], point1[1] - point0[1]]\n d[1] = math.atan2(vector[1], vector[0])\n while d[1] <= -np.pi / 2:\n d[1] += np.pi\n return d",
"def _distance(point, line_point1, line_point2):\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance",
"def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )",
"def get_distance(start, end):\n\n\t\tloc_start, loc_end, dst_node = create_distance(start, end)\n\t\tdistance = cmds.getAttr(\"%s.distance\" % dst_node)\n\n\t\tcmds.delete([loc_start, loc_end, dst_node])\n\n\t\treturn distance",
"def distance(p1,p2):\n return ((p2.x - p1.x)*2 + (p2.y - p1.y))**0.5",
"def driving_distance(self, area_graph, startpoint, endpoint):\n\n # Find nodes closest to the specified Coordinates\n node_start = ox.utils.get_nearest_node(area_graph, startpoint)\n node_stop = ox.utils.get_nearest_node(area_graph, endpoint)\n # Calculate the shortest network distance between the nodes via the edges\n # \"length\" attribute\n try:\n distance = nx.shortest_path_length(\n self.area_graph, node_start, node_stop, weight=\"length\")\n except:\n logger.error(str(self.thread_count) + \" Can not calculate path from (\" + str(startpoint[0]) +\n \",\" + str(startpoint[0]) + \")\" + \" to (\" +\n str(endpoint[0]) + \",\" +\n str(endpoint[1]) + \"). Using fallback function\")\n distance = self.point_distance(startpoint, endpoint)\n return distance",
"def point_to_point(p1: Vec2, p2: Vec2):\n return length(dir_vector(p1, p2))",
"def distance(point_1=(0, 0), point_2=(0, 0)):\n return math.sqrt(\n (point_1[0] - point_2[0]) ** 2 +\n (point_1[1] - point_2[1]) ** 2)",
"def distanceFromPoint(self, point):\n return Vector.createFromTwoPoints(point, self.crossLine(self.getHeight(point))).norm",
"def distance(point_1, point_2, units=1):\n\n distance = (((point_2[0]-point_1[0])*units)**2.0\n + ((point_2[1]-point_1[1])*units)**2.0\n + ((point_2[2]-point_1[2])*units)**2.0)**0.5\n \n return distance",
"def getDistanceToPoint(self, p, returnParaPerp = False):\n if not isinstance(p, VectorN) or len(p) != len(self.mOrigin):\n raise ValueError(\"p must be a point of dimension \" + str(len(self.mOrigin)))\n dirToP = p - self.mOrigin\n if dirToP.dot(self.mDirection) < 0:\n return None\n paraPart = dirToP.dot(self.mDirection) * self.mDirection\n perpPart = dirToP - paraPart\n if returnParaPerp:\n return (perpPart.magnitude(), paraPart, perpPart)\n else:\n return perpPart.magnitude()",
"def distance_point_line_3d(point: Vector, start: Vector, end: Vector) -> float:\n if start.isclose(end):\n raise ZeroDivisionError('Not a line.')\n v1 = point - start\n # point projected onto line start to end:\n v2 = (end - start).project(v1)\n # Pythagoras:\n return math.sqrt(v1.magnitude_square - v2.magnitude_square)",
"def get_y_dir_distance_from_start_point(self, start_point):\n y_dist_dir = self.odom.pose.pose.position.y - start_point.y\n\n return y_dist_dir",
"def distance(self, point_1=(0, 0), point_2=(0, 0)):\n\t\treturn math.sqrt((point_1[0]-point_2[0])**2+(point_1[1]-point_2[1])**2)",
"def distance(p1, p2):\n return math.hypot(p1.x-p2.x, p1.y-p2.y)",
"def calc_point_distance(x1, y1, x2, y2):\n\n return math.hypot(x2 - x1, y2 - y1)",
"def distanceFrom(self, point = (-1, -1)):\n if (point[0] == -1 or point[1] == -1):\n point = np.array(self.image.size()) / 2\n return spsd.euclidean(point, [self.x, self.y])",
"def distance(d1, d2):\n projection_onto_plane = d2 - projection(d1, d2)\n dist = np.linalg.norm(projection_onto_plane)\n\n return dist",
"def get_direction(self, start_direction, **kwargs):\n return self.directions.get(start_direction)",
"def getDistance(p1, p2):\n\tdist = la.norm(p2 - p1)\n\treturn dist"
] | [
"0.6927091",
"0.6801812",
"0.6334007",
"0.630479",
"0.62662554",
"0.6253193",
"0.62369823",
"0.62275094",
"0.6216382",
"0.61950785",
"0.61929655",
"0.61823815",
"0.61805975",
"0.6173806",
"0.61560535",
"0.61286265",
"0.61158824",
"0.61145276",
"0.6091202",
"0.6085183",
"0.607863",
"0.6072974",
"0.6069169",
"0.6061574",
"0.6048127",
"0.602786",
"0.60263234",
"0.6026263",
"0.60232204",
"0.60214823"
] | 0.77925324 | 0 |
Swap empty space with puzzle symbol above it in the grid. y is the index of the tuple containing the empty space in grid, and x is index of empty space in it's tuple. | def swap_up(grid, y, x):
symbol = grid[y - 1][x]
# creates new tuples for the rows changed
new_toprow = grid[y - 1][:x] + tuple('*')
new_botrow = grid[y][:x] + tuple(symbol)
# adds the space after the swapped piece for both rows
if x < len(grid[y]):
new_toprow += tuple(grid[y - 1][x + 1:])
new_botrow += tuple(grid[y][x + 1:])
return grid[:y-1] + (new_toprow, new_botrow) + grid[y+1:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def swap_down(grid, y, x):\n symbol = grid[y + 1][x]\n\n new_botrow = grid[y + 1][:x] + ('*',)\n new_toprow = grid[y][:x] + (symbol,)\n\n if x < len(grid[y]):\n new_toprow += tuple(grid[y][x + 1:])\n new_botrow += tuple(grid[y + 1][x + 1:])\n\n return grid[:y] + (new_toprow, new_botrow) + grid[y + 2:]",
"def reset_position(self, x, y):\n\t\tself.grid[x][y] = self.terminal",
"def swap_left(grid, y, x):\n symbol = grid[y][x - 1]\n\n new_row = grid[y][:x - 1] + ('*',) + (symbol,)\n\n if x < len(grid[y]) - 1:\n new_row += grid[y][x + 1:]\n\n return grid[:y] + (new_row,) + grid[y+1:]",
"def swap_right(grid, y, x):\n symbol = grid[y][x + 1]\n\n # creates a new tuple for the changed row\n new_row = grid[y][:x] + (symbol,) + ('*',)\n\n # adds the space after the swapped piece for the changed row\n if x < len(grid[y]) - 2:\n new_row += tuple(grid[y][x + 2:])\n\n return grid[:y] + (new_row,) + grid[y+1:]",
"def switch(self, x1, y1, x2, y2):\n # both positions should not be empty\n assert (self.is_empty(x1, y1) is not True) or (self.is_empty(x2, y2) is not True)\n # x1,y1 is empty\n if self.is_empty(x1, y1):\n self.grid[y1][x1] = self.grid[y2][x2]\n self.cells[self.grid[y2][x2]].x = x1\n self.cells[self.grid[y2][x2]].y = y1\n self.grid[y2][x2] = ' '\n self.update_cost(self.grid[y1][x1])\n # x2,y2 is empty\n elif self.is_empty(x2, y2):\n self.grid[y2][x2] = self.grid[y1][x1]\n self.cells[self.grid[y1][x1]].x = x2\n self.cells[self.grid[y1][x1]].y = y2\n self.grid[y1][x1] = ' '\n self.update_cost(self.grid[y2][x2])\n else:\n n = self.grid[y2][x2]\n self.grid[y2][x2] = self.grid[y1][x1]\n self.cells[self.grid[y1][x1]].x = x2\n self.cells[self.grid[y1][x1]].y = y2\n self.grid[y1][x1] = n\n self.cells[n].x = x1\n self.cells[n].y = y1\n self.update_cost(self.grid[y1][x1])\n self.update_cost(self.grid[y2][x2])",
"def __swap(self, x1, y1, x2, y2):\n temp = self.puzzle.copy()\n temp[x1, y1] = temp[x2, y2]\n temp[x2, y2] = self.puzzle[x1, y1]\n return temp",
"def board_empty_positions(self, x, y):\n board = self.boards[x][y]\n coords = [(x, y, i, j) for (i, j) in board.empty_squares]\n return self.coords_to_positions(coords)",
"def _swap(self, x1, y1, x2, y2):\n puzzle_copy = [list(row) for row in self.position] # copy the puzzle\n puzzle_copy[x1][y1], puzzle_copy[x2][y2] = puzzle_copy[x2][y2], puzzle_copy[x1][y1]\n\n return puzzle_copy",
"def shade_neighbours(x: int, y: int) -> None:\r\n if x > 0:\r\n safeboard[x-1, y] = 0\r\n if x < shape-1:\r\n safeboard[x+1, y] = 0\r\n if y > 0:\r\n safeboard[x, y-1] = 0\r\n if y < shape-1:\r\n safeboard[x, y+1] = 0\r\n safeboard[x, y] = 0",
"def remove(self, xcord, ycord, g_board):\n for i in range(xcord, xcord + 2):\n for j in range(ycord, ycord + self.size):\n g_board[i][j] = ' '",
"def toggle_xy(self, x, y):\r\n\t\tself.grid[y, x] = False if self.grid[y,x] else True",
"def put_cell(self, x, y, num):\n if self.is_empty(x,y):\n self.grid[y][x] = num\n return True\n return False",
"def solve(self, board) -> None:\n x_length = len(board)\n if x_length == 0: \n return\n\n y_length = len(board[0])\n confirmed = set()\n dfs = []\n for i in range(x_length):\n if board[i][0] == 'O':\n board[i][0] = 'temp'\n dfs.append((i, 0))\n if board[i][y_length - 1] == 'O':\n board[i][y_length - 1] = 'temp'\n dfs.append((i, y_length - 1))\n for j in range(y_length):\n if board[0][j] == 'O':\n board[0][j] = 'temp'\n dfs.append((0, j))\n if board[x_length - 1][j] == 'O':\n board[x_length - 1][j] = 'temp'\n dfs.append((x_length - 1, j))\n while dfs:\n i, j = dfs.pop()\n confirmed.add((i, j))\n if i+1 < x_length and board[i+1][j] == 'O':\n board[i+1][j] = 'temp'\n dfs.append((i + 1, j))\n if i > 0 and board[i-1][j] == 'O':\n board[i-1][j] = 'temp'\n dfs.append((i-1, j))\n if j+1 < y_length and board[i][j+1] == 'O':\n board[i][j+1] = 'temp'\n dfs.append((i, j + 1))\n if j > 0 and board[i][j-1] == 'O':\n board[i][j-1] = 'temp'\n dfs.append((i, j-1))\n for i in range(x_length):\n for j in range(y_length):\n if (i, j) in confirmed:\n board[i][j] = 'O'\n else:\n board[i][j] = 'X'\n return",
"def put_piece(self, x: int, y: int):\n if self.game_board.board_values[x, y] != 0:\n print(x, y)\n raise exceptions.NonEmptySlotError(\"You must select a empty slot!\")\n\n self.game_board.put_piece(x, y, self.current_piece)",
"def swap(marker, mx, x2, my, y2):\n # creates a deep copy\n # each if statement checks whether to move the piece N S E W by\n # comparing the current coordinates and the new coordinates\n map = [x[:] for x in marker]\n map[my][mx], map[y2][x2] = map[y2][x2], map[my][mx]\n if my < y2:\n map[my+1][mx] = \".\"\n elif my > y2:\n map[my-1][mx] = \".\"\n elif mx < x2:\n map[my][mx+1] = \".\"\n else:\n map[my][mx-1] = \".\"\n return map",
"def __erase(self, x: int, y: int) -> None:\n self.__maze[x, y] = 0",
"def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n x=len(board)\n y=len(board[0])\n visit=[[False if board[i][j]=='X' else True for j in range(y)] for i in range(x)]\n for i in range(x):\n for j in range(y):\n if visit[i][j] and board[i][j]=='O':\n queue=[[i,j]]\n visit[i][j]=False\n k=0\n surround=True\n while len(queue)>k:\n if queue[k][0]==0 or queue[k][0]==x-1 or queue[k][1]==y-1 or queue[k][1]==0:\n surround=False\n if queue[k][1]!=y-1 and visit[queue[k][0]][queue[k][1]+1]:\n queue.append([queue[k][0],queue[k][1]+1])\n visit[queue[k][0]][queue[k][1]+1]=False\n if queue[k][1]!=0 and visit[queue[k][0]][queue[k][1]-1]:\n queue.append([queue[k][0],queue[k][1]-1])\n visit[queue[k][0]][queue[k][1]-1]=False\n if queue[k][0]!=x-1 and visit[queue[k][0]+1][queue[k][1]]:\n queue.append([queue[k][0]+1,queue[k][1]])\n visit[queue[k][0]+1][queue[k][1]]=False\n if queue[k][0]!=0 and visit[queue[k][0]-1][queue[k][1]]:\n queue.append([queue[k][0]-1,queue[k][1]])\n visit[queue[k][0]-1][queue[k][1]]=False\n k+=1\n if surround:\n for i1,j1 in queue:\n board[i1][j1]='X'\n return",
"def swap(self, direction):\n directions = {'up': (-1, 0), 'down': (1, 0), 'left': (0, -1), 'right': (0, 1),}\n new_row = self.__blank_box[0] + directions[direction][0]\n new_col = self.__blank_box[1] + directions[direction][1]\n new_position = self.__get_box((new_row*self.__length)+new_col)\n self.__board[self.__blank_box[0]][self.__blank_box[1]] \\\n = new_position\n self.__board[new_row][new_col] = None\n self.__blank_box = (new_row, new_col)\n self.__set_possibilities()\n self.__previous_move = direction",
"def extensions(self):\n def check_empty_space(gridcopy):\n \"\"\"\n Return the place of the empty space.\n\n @type gridcopy: tuple[tuple[str]]\n @rtype: tuple\n\n # >>> grid = ((\"*\", \"2\", \"3\"), (\"4\", \"5\", \"6\"))\n # >>> check_empty_space(grid)\n # (0, 0)\n # >>> grid = ((\"1\", \"2\", \"3\"), (\"4\", \"5\", \"6\"), (\"7\" , \"8\" , \"*\"))\n # >>> check_empty_space(grid)\n # (2, 2)\n \"\"\"\n for i in range(len(gridcopy)):\n if \"*\" in gridcopy[i]:\n return i, gridcopy[i].index(\"*\")\n # Raise Error if there is no empty space in the puzzle.\n return AssertionError, \"No empty space in the puzzle.\"\n\n def tuple_to_list(tup):\n \"\"\"\n Return a list which was originally tuple.\n\n @type tup: tuple\n @rtype: list[str]\n \"\"\"\n return [element for element in tup]\n\n def shift_right_left(gridcopy, row_num, column_num):\n \"\"\"\n Return the list of affected grid. If * cannot move to the specific\n place, it returns an empty list\n\n @type gridcopy: tuple[tuple[str]]\n @type row_num: int\n @type column_num: int\n @rtype: list[tuple[tuple[str]]]\n \"\"\"\n result = []\n # Extract the specific row to change.\n current_row = gridcopy[row_num]\n # Change the current_row to list in order to mutate.\n current_row_lst = tuple_to_list(current_row)\n if location[1] != 0:\n # Going left!\n # (\"5\", \"*\", \"6\") to (\"*\", \"5\", \"6\")\n current_row_lst[column_num] = current_row_lst[column_num - 1]\n current_row_lst[column_num - 1] = \"*\"\n # Switch back to tuple\n left_altered = tuple(current_row_lst)\n board_lst = tuple_to_list(gridcopy)\n board_lst[row_num] = left_altered\n result.append(tuple(board_lst))\n if location[1] != self.m - 1:\n # Going right!\n # (\"5\", \"*\", \"6\") to (\"5\", \"6\", \"*\")\n # Reset the values to swap right.\n current_row = gridcopy[row_num]\n current_row_lst = tuple_to_list(current_row)\n current_row_lst[column_num] = current_row_lst[column_num + 1]\n current_row_lst[column_num + 1] = \"*\"\n # Switch back to tuple\n right_altered = tuple(current_row_lst)\n board_lst = tuple_to_list(gridcopy)\n board_lst[row_num] = right_altered\n result.append(tuple(board_lst))\n return result\n\n def shift_down_right(gridcopy, row_num, column_num):\n \"\"\"\n Return the list of affected grid. If * cannot move to the specific\n place, it returns an empty list\n\n @type gridcopy: tuple[tuple[str]]\n @type row_num: int\n @type column_num: int\n @rtype: list[tuple[tuple[str]]]\n \"\"\"\n result = []\n if location[0] != 0:\n current_row = gridcopy[location[0]]\n upper_row = gridcopy[location[0] - 1]\n current_row_lst = tuple_to_list(current_row)\n upper_row_lst = tuple_to_list(upper_row)\n current_row_lst[column_num] = upper_row_lst[column_num]\n upper_row_lst[column_num] = \"*\"\n current_row, upper_row = tuple(current_row_lst), \\\n tuple(upper_row_lst)\n board_lst = tuple_to_list(gridcopy)\n board_lst[row_num] = current_row\n board_lst[row_num - 1] = upper_row\n upper_altered = tuple(board_lst)\n result.append(upper_altered)\n if location[0] != self.n - 1:\n upper_row = gridcopy[location[0] + 1]\n lower_row = gridcopy[location[0]]\n upper_lst = tuple_to_list(upper_row)\n lower_lst = tuple_to_list(lower_row)\n lower_lst[location[1]] = upper_lst[location[1]]\n upper_lst[location[1]] = \"*\"\n upper_row, lower_row = tuple(upper_lst), tuple(lower_lst)\n big_lst = tuple_to_list(gridcopy)\n big_lst[location[0]] = lower_row\n big_lst[location[0] + 1] = upper_row\n changed = tuple(big_lst)\n result.append(changed)\n return result\n\n grid = self.from_grid\n # Location is the tuple indicator of location of the empty space.\n # (Row, Column)\n location = check_empty_space(grid)\n row = location[0]\n column = location[1]\n possibilities = shift_right_left(grid, row, column) +\\\n shift_down_right(grid, row, column)\n return [MNPuzzle(x, self.to_grid) for x in possibilities]",
"def respawn(x,y):\n\t\tpos = Board.prev_j+1\n\t\twhile pos<y:\n\t\t\tif (Board.board[x][pos]==' ' or Board.board[x][pos]=='.') and Board.board[x+1][pos]!='-':\n\t\t\t\tBoard.board[x][pos]='M'\n\t\t\t\tbreak\n\t\t\tpos+=1",
"def __fix_tuple(self, xy_tup):\n if self.__swapxy:\n return xy_tup[::-1]\n return xy_tup",
"def position_to_grid(i, j):\n i -= i % SPACING - X % SPACING\n j -= j % SPACING - Y % SPACING\n return [i, j]",
"def grid2alg(grid_x=None, grid_y=None):\n return (\n chr(0x61 + grid_x) if grid_x is not None else '',\n chr(7 - grid_y + 0x31) if grid_y is not None else '',\n )",
"def insert_word_vertically(grid, word, x, y):\n # check if the word fits in this space\n for i in range(len(word)):\n grid_char = grid[y + i][x]\n if grid_char is not None and grid_char != word[i]:\n return False\n\n # insert the word\n for i in range(len(word)):\n grid[y + i][x] = word[i]\n\n return True",
"def setBusy(self, x, y):\n changes = []\n for i in range(self.numPieces):\n new_x = x + self.pos[self.rotation][i][0]\n new_y = y + self.pos[self.rotation][i][1]\n changes.append((new_x, new_y))\n self.gridBusy[new_x][new_y] = self.onBoard\n self.correctPending()\n return changes",
"def swap(self, x, y):\n self._data[x], self._data[y] = self._data[y], self._data[x]",
"def _swapxy(data):\n return [(y, x) for (x, y) in data]",
"def wrap(self, current_state):\n # Find the index of the '0' tile\n index = current_state.index('0')\n tile_that_was_swapped = '0'\n\n # If the current state can't perform the 'wrap' action, then exit the function\n if not self.can_move_wrap(index):\n return 0, tile_that_was_swapped\n\n # Else, we can move the '0' tile to the other side of the grid\n else:\n # We need to find the opposite position of the '0' tile\n index_to_swap = index\n\n # If the '0' tile is in the top-left corner, then we need to switch it with the tile in the top-right corner\n if index == 0:\n index_to_swap = self.puzzle_width - 1\n\n # If the '0' tile is in the top-right corner, then we need to switch it with the tile in the top-left corner\n elif index == self.puzzle_width - 1:\n index_to_swap = 0\n\n # If the tile is in the bottom-left corner, then we need to switch with the tile in the bottom-right corner\n elif index == self.puzzle_length - self.puzzle_width:\n index_to_swap = self.puzzle_length - 1\n\n # If the tile is in the bottom-right corner, then we need to switch with the tile in the bottom-left corner\n elif index == self.puzzle_length - 1:\n index_to_swap = self.puzzle_length - self.puzzle_width\n\n # Swap the '0' tile with the other index\n tile_that_was_swapped = current_state[index_to_swap]\n current_state[index] = current_state[index_to_swap]\n current_state[index_to_swap] = '0'\n # end: if-else\n\n return self.cost_of_wrap_move, tile_that_was_swapped",
"def swap_tile(grid: tuple[int, ...], move: int) -> tuple[int, ...]:\n tile_to_swap: int = grid.index(0) + move\n value_to_swap: int = grid[tile_to_swap]\n\n mutable_grid: list[int] = list(grid)\n mutable_grid[grid.index(0)] = value_to_swap\n mutable_grid[tile_to_swap] = 0\n swapped_grid = tuple(mutable_grid)\n\n return swapped_grid",
"def solve(self, board: List[List[str]]) -> None:\n if len(board) is 0: return\n width = len(board[0]) - 1\n height = len(board) - 1\n def help(x: int, y: int):\n if x < 0 or x > height or y < 0 or y > width:\n return\n if board[x][y] is \"O\":\n board[x][y] = \"g\"\n help(x - 1, y)\n help(x, y - 1)\n help(x + 1, y)\n help(x, y + 1)\n\n for i in range(width + 1):\n if board[0][i] is \"O\":\n help(0, i)\n if board[height][i] is \"O\":\n help(height, i)\n for i in range(1, height):\n if board[i][0] is \"O\":\n help(i, 0)\n if board[i][width] is \"O\":\n help(i, width)\n\n print(board)\n for i in range(width + 1):\n for j in range(height + 1):\n if board[j][i] is \"O\":\n board[j][i] = \"X\"\n elif board[j][i] is \"g\":\n board[j][i] = \"O\""
] | [
"0.696965",
"0.6735839",
"0.64703715",
"0.6397285",
"0.61380106",
"0.607606",
"0.59848136",
"0.5904753",
"0.5846186",
"0.5727123",
"0.56595033",
"0.5636198",
"0.56330717",
"0.5625095",
"0.5558129",
"0.5525417",
"0.54873925",
"0.54723173",
"0.5454302",
"0.54322326",
"0.54318273",
"0.5421151",
"0.54210275",
"0.5406118",
"0.53986025",
"0.5347716",
"0.53410876",
"0.53295976",
"0.53131694",
"0.531137"
] | 0.7374504 | 0 |
Swap empty space with puzzle symbol below it in the grid. y is the index of the tuple containing the empty space in grid, and x is index of empty space in it's tuple. | def swap_down(grid, y, x):
symbol = grid[y + 1][x]
new_botrow = grid[y + 1][:x] + ('*',)
new_toprow = grid[y][:x] + (symbol,)
if x < len(grid[y]):
new_toprow += tuple(grid[y][x + 1:])
new_botrow += tuple(grid[y + 1][x + 1:])
return grid[:y] + (new_toprow, new_botrow) + grid[y + 2:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def swap_up(grid, y, x):\n symbol = grid[y - 1][x]\n\n # creates new tuples for the rows changed\n new_toprow = grid[y - 1][:x] + tuple('*')\n new_botrow = grid[y][:x] + tuple(symbol)\n\n # adds the space after the swapped piece for both rows\n if x < len(grid[y]):\n new_toprow += tuple(grid[y - 1][x + 1:])\n new_botrow += tuple(grid[y][x + 1:])\n\n return grid[:y-1] + (new_toprow, new_botrow) + grid[y+1:]",
"def reset_position(self, x, y):\n\t\tself.grid[x][y] = self.terminal",
"def swap_right(grid, y, x):\n symbol = grid[y][x + 1]\n\n # creates a new tuple for the changed row\n new_row = grid[y][:x] + (symbol,) + ('*',)\n\n # adds the space after the swapped piece for the changed row\n if x < len(grid[y]) - 2:\n new_row += tuple(grid[y][x + 2:])\n\n return grid[:y] + (new_row,) + grid[y+1:]",
"def swap_left(grid, y, x):\n symbol = grid[y][x - 1]\n\n new_row = grid[y][:x - 1] + ('*',) + (symbol,)\n\n if x < len(grid[y]) - 1:\n new_row += grid[y][x + 1:]\n\n return grid[:y] + (new_row,) + grid[y+1:]",
"def switch(self, x1, y1, x2, y2):\n # both positions should not be empty\n assert (self.is_empty(x1, y1) is not True) or (self.is_empty(x2, y2) is not True)\n # x1,y1 is empty\n if self.is_empty(x1, y1):\n self.grid[y1][x1] = self.grid[y2][x2]\n self.cells[self.grid[y2][x2]].x = x1\n self.cells[self.grid[y2][x2]].y = y1\n self.grid[y2][x2] = ' '\n self.update_cost(self.grid[y1][x1])\n # x2,y2 is empty\n elif self.is_empty(x2, y2):\n self.grid[y2][x2] = self.grid[y1][x1]\n self.cells[self.grid[y1][x1]].x = x2\n self.cells[self.grid[y1][x1]].y = y2\n self.grid[y1][x1] = ' '\n self.update_cost(self.grid[y2][x2])\n else:\n n = self.grid[y2][x2]\n self.grid[y2][x2] = self.grid[y1][x1]\n self.cells[self.grid[y1][x1]].x = x2\n self.cells[self.grid[y1][x1]].y = y2\n self.grid[y1][x1] = n\n self.cells[n].x = x1\n self.cells[n].y = y1\n self.update_cost(self.grid[y1][x1])\n self.update_cost(self.grid[y2][x2])",
"def __swap(self, x1, y1, x2, y2):\n temp = self.puzzle.copy()\n temp[x1, y1] = temp[x2, y2]\n temp[x2, y2] = self.puzzle[x1, y1]\n return temp",
"def _swap(self, x1, y1, x2, y2):\n puzzle_copy = [list(row) for row in self.position] # copy the puzzle\n puzzle_copy[x1][y1], puzzle_copy[x2][y2] = puzzle_copy[x2][y2], puzzle_copy[x1][y1]\n\n return puzzle_copy",
"def board_empty_positions(self, x, y):\n board = self.boards[x][y]\n coords = [(x, y, i, j) for (i, j) in board.empty_squares]\n return self.coords_to_positions(coords)",
"def shade_neighbours(x: int, y: int) -> None:\r\n if x > 0:\r\n safeboard[x-1, y] = 0\r\n if x < shape-1:\r\n safeboard[x+1, y] = 0\r\n if y > 0:\r\n safeboard[x, y-1] = 0\r\n if y < shape-1:\r\n safeboard[x, y+1] = 0\r\n safeboard[x, y] = 0",
"def remove(self, xcord, ycord, g_board):\n for i in range(xcord, xcord + 2):\n for j in range(ycord, ycord + self.size):\n g_board[i][j] = ' '",
"def put_piece(self, x: int, y: int):\n if self.game_board.board_values[x, y] != 0:\n print(x, y)\n raise exceptions.NonEmptySlotError(\"You must select a empty slot!\")\n\n self.game_board.put_piece(x, y, self.current_piece)",
"def put_cell(self, x, y, num):\n if self.is_empty(x,y):\n self.grid[y][x] = num\n return True\n return False",
"def toggle_xy(self, x, y):\r\n\t\tself.grid[y, x] = False if self.grid[y,x] else True",
"def solve(self, board) -> None:\n x_length = len(board)\n if x_length == 0: \n return\n\n y_length = len(board[0])\n confirmed = set()\n dfs = []\n for i in range(x_length):\n if board[i][0] == 'O':\n board[i][0] = 'temp'\n dfs.append((i, 0))\n if board[i][y_length - 1] == 'O':\n board[i][y_length - 1] = 'temp'\n dfs.append((i, y_length - 1))\n for j in range(y_length):\n if board[0][j] == 'O':\n board[0][j] = 'temp'\n dfs.append((0, j))\n if board[x_length - 1][j] == 'O':\n board[x_length - 1][j] = 'temp'\n dfs.append((x_length - 1, j))\n while dfs:\n i, j = dfs.pop()\n confirmed.add((i, j))\n if i+1 < x_length and board[i+1][j] == 'O':\n board[i+1][j] = 'temp'\n dfs.append((i + 1, j))\n if i > 0 and board[i-1][j] == 'O':\n board[i-1][j] = 'temp'\n dfs.append((i-1, j))\n if j+1 < y_length and board[i][j+1] == 'O':\n board[i][j+1] = 'temp'\n dfs.append((i, j + 1))\n if j > 0 and board[i][j-1] == 'O':\n board[i][j-1] = 'temp'\n dfs.append((i, j-1))\n for i in range(x_length):\n for j in range(y_length):\n if (i, j) in confirmed:\n board[i][j] = 'O'\n else:\n board[i][j] = 'X'\n return",
"def respawn(x,y):\n\t\tpos = Board.prev_j+1\n\t\twhile pos<y:\n\t\t\tif (Board.board[x][pos]==' ' or Board.board[x][pos]=='.') and Board.board[x+1][pos]!='-':\n\t\t\t\tBoard.board[x][pos]='M'\n\t\t\t\tbreak\n\t\t\tpos+=1",
"def swap(self, direction):\n directions = {'up': (-1, 0), 'down': (1, 0), 'left': (0, -1), 'right': (0, 1),}\n new_row = self.__blank_box[0] + directions[direction][0]\n new_col = self.__blank_box[1] + directions[direction][1]\n new_position = self.__get_box((new_row*self.__length)+new_col)\n self.__board[self.__blank_box[0]][self.__blank_box[1]] \\\n = new_position\n self.__board[new_row][new_col] = None\n self.__blank_box = (new_row, new_col)\n self.__set_possibilities()\n self.__previous_move = direction",
"def swap(marker, mx, x2, my, y2):\n # creates a deep copy\n # each if statement checks whether to move the piece N S E W by\n # comparing the current coordinates and the new coordinates\n map = [x[:] for x in marker]\n map[my][mx], map[y2][x2] = map[y2][x2], map[my][mx]\n if my < y2:\n map[my+1][mx] = \".\"\n elif my > y2:\n map[my-1][mx] = \".\"\n elif mx < x2:\n map[my][mx+1] = \".\"\n else:\n map[my][mx-1] = \".\"\n return map",
"def __erase(self, x: int, y: int) -> None:\n self.__maze[x, y] = 0",
"def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n x=len(board)\n y=len(board[0])\n visit=[[False if board[i][j]=='X' else True for j in range(y)] for i in range(x)]\n for i in range(x):\n for j in range(y):\n if visit[i][j] and board[i][j]=='O':\n queue=[[i,j]]\n visit[i][j]=False\n k=0\n surround=True\n while len(queue)>k:\n if queue[k][0]==0 or queue[k][0]==x-1 or queue[k][1]==y-1 or queue[k][1]==0:\n surround=False\n if queue[k][1]!=y-1 and visit[queue[k][0]][queue[k][1]+1]:\n queue.append([queue[k][0],queue[k][1]+1])\n visit[queue[k][0]][queue[k][1]+1]=False\n if queue[k][1]!=0 and visit[queue[k][0]][queue[k][1]-1]:\n queue.append([queue[k][0],queue[k][1]-1])\n visit[queue[k][0]][queue[k][1]-1]=False\n if queue[k][0]!=x-1 and visit[queue[k][0]+1][queue[k][1]]:\n queue.append([queue[k][0]+1,queue[k][1]])\n visit[queue[k][0]+1][queue[k][1]]=False\n if queue[k][0]!=0 and visit[queue[k][0]-1][queue[k][1]]:\n queue.append([queue[k][0]-1,queue[k][1]])\n visit[queue[k][0]-1][queue[k][1]]=False\n k+=1\n if surround:\n for i1,j1 in queue:\n board[i1][j1]='X'\n return",
"def insert_word_vertically(grid, word, x, y):\n # check if the word fits in this space\n for i in range(len(word)):\n grid_char = grid[y + i][x]\n if grid_char is not None and grid_char != word[i]:\n return False\n\n # insert the word\n for i in range(len(word)):\n grid[y + i][x] = word[i]\n\n return True",
"def __fix_tuple(self, xy_tup):\n if self.__swapxy:\n return xy_tup[::-1]\n return xy_tup",
"def grid2alg(grid_x=None, grid_y=None):\n return (\n chr(0x61 + grid_x) if grid_x is not None else '',\n chr(7 - grid_y + 0x31) if grid_y is not None else '',\n )",
"def swap(self, x, y):\n self._data[x], self._data[y] = self._data[y], self._data[x]",
"def extensions(self):\n def check_empty_space(gridcopy):\n \"\"\"\n Return the place of the empty space.\n\n @type gridcopy: tuple[tuple[str]]\n @rtype: tuple\n\n # >>> grid = ((\"*\", \"2\", \"3\"), (\"4\", \"5\", \"6\"))\n # >>> check_empty_space(grid)\n # (0, 0)\n # >>> grid = ((\"1\", \"2\", \"3\"), (\"4\", \"5\", \"6\"), (\"7\" , \"8\" , \"*\"))\n # >>> check_empty_space(grid)\n # (2, 2)\n \"\"\"\n for i in range(len(gridcopy)):\n if \"*\" in gridcopy[i]:\n return i, gridcopy[i].index(\"*\")\n # Raise Error if there is no empty space in the puzzle.\n return AssertionError, \"No empty space in the puzzle.\"\n\n def tuple_to_list(tup):\n \"\"\"\n Return a list which was originally tuple.\n\n @type tup: tuple\n @rtype: list[str]\n \"\"\"\n return [element for element in tup]\n\n def shift_right_left(gridcopy, row_num, column_num):\n \"\"\"\n Return the list of affected grid. If * cannot move to the specific\n place, it returns an empty list\n\n @type gridcopy: tuple[tuple[str]]\n @type row_num: int\n @type column_num: int\n @rtype: list[tuple[tuple[str]]]\n \"\"\"\n result = []\n # Extract the specific row to change.\n current_row = gridcopy[row_num]\n # Change the current_row to list in order to mutate.\n current_row_lst = tuple_to_list(current_row)\n if location[1] != 0:\n # Going left!\n # (\"5\", \"*\", \"6\") to (\"*\", \"5\", \"6\")\n current_row_lst[column_num] = current_row_lst[column_num - 1]\n current_row_lst[column_num - 1] = \"*\"\n # Switch back to tuple\n left_altered = tuple(current_row_lst)\n board_lst = tuple_to_list(gridcopy)\n board_lst[row_num] = left_altered\n result.append(tuple(board_lst))\n if location[1] != self.m - 1:\n # Going right!\n # (\"5\", \"*\", \"6\") to (\"5\", \"6\", \"*\")\n # Reset the values to swap right.\n current_row = gridcopy[row_num]\n current_row_lst = tuple_to_list(current_row)\n current_row_lst[column_num] = current_row_lst[column_num + 1]\n current_row_lst[column_num + 1] = \"*\"\n # Switch back to tuple\n right_altered = tuple(current_row_lst)\n board_lst = tuple_to_list(gridcopy)\n board_lst[row_num] = right_altered\n result.append(tuple(board_lst))\n return result\n\n def shift_down_right(gridcopy, row_num, column_num):\n \"\"\"\n Return the list of affected grid. If * cannot move to the specific\n place, it returns an empty list\n\n @type gridcopy: tuple[tuple[str]]\n @type row_num: int\n @type column_num: int\n @rtype: list[tuple[tuple[str]]]\n \"\"\"\n result = []\n if location[0] != 0:\n current_row = gridcopy[location[0]]\n upper_row = gridcopy[location[0] - 1]\n current_row_lst = tuple_to_list(current_row)\n upper_row_lst = tuple_to_list(upper_row)\n current_row_lst[column_num] = upper_row_lst[column_num]\n upper_row_lst[column_num] = \"*\"\n current_row, upper_row = tuple(current_row_lst), \\\n tuple(upper_row_lst)\n board_lst = tuple_to_list(gridcopy)\n board_lst[row_num] = current_row\n board_lst[row_num - 1] = upper_row\n upper_altered = tuple(board_lst)\n result.append(upper_altered)\n if location[0] != self.n - 1:\n upper_row = gridcopy[location[0] + 1]\n lower_row = gridcopy[location[0]]\n upper_lst = tuple_to_list(upper_row)\n lower_lst = tuple_to_list(lower_row)\n lower_lst[location[1]] = upper_lst[location[1]]\n upper_lst[location[1]] = \"*\"\n upper_row, lower_row = tuple(upper_lst), tuple(lower_lst)\n big_lst = tuple_to_list(gridcopy)\n big_lst[location[0]] = lower_row\n big_lst[location[0] + 1] = upper_row\n changed = tuple(big_lst)\n result.append(changed)\n return result\n\n grid = self.from_grid\n # Location is the tuple indicator of location of the empty space.\n # (Row, Column)\n location = check_empty_space(grid)\n row = location[0]\n column = location[1]\n possibilities = shift_right_left(grid, row, column) +\\\n shift_down_right(grid, row, column)\n return [MNPuzzle(x, self.to_grid) for x in possibilities]",
"def position_to_grid(i, j):\n i -= i % SPACING - X % SPACING\n j -= j % SPACING - Y % SPACING\n return [i, j]",
"def setBusy(self, x, y):\n changes = []\n for i in range(self.numPieces):\n new_x = x + self.pos[self.rotation][i][0]\n new_y = y + self.pos[self.rotation][i][1]\n changes.append((new_x, new_y))\n self.gridBusy[new_x][new_y] = self.onBoard\n self.correctPending()\n return changes",
"def wrap(self, current_state):\n # Find the index of the '0' tile\n index = current_state.index('0')\n tile_that_was_swapped = '0'\n\n # If the current state can't perform the 'wrap' action, then exit the function\n if not self.can_move_wrap(index):\n return 0, tile_that_was_swapped\n\n # Else, we can move the '0' tile to the other side of the grid\n else:\n # We need to find the opposite position of the '0' tile\n index_to_swap = index\n\n # If the '0' tile is in the top-left corner, then we need to switch it with the tile in the top-right corner\n if index == 0:\n index_to_swap = self.puzzle_width - 1\n\n # If the '0' tile is in the top-right corner, then we need to switch it with the tile in the top-left corner\n elif index == self.puzzle_width - 1:\n index_to_swap = 0\n\n # If the tile is in the bottom-left corner, then we need to switch with the tile in the bottom-right corner\n elif index == self.puzzle_length - self.puzzle_width:\n index_to_swap = self.puzzle_length - 1\n\n # If the tile is in the bottom-right corner, then we need to switch with the tile in the bottom-left corner\n elif index == self.puzzle_length - 1:\n index_to_swap = self.puzzle_length - self.puzzle_width\n\n # Swap the '0' tile with the other index\n tile_that_was_swapped = current_state[index_to_swap]\n current_state[index] = current_state[index_to_swap]\n current_state[index_to_swap] = '0'\n # end: if-else\n\n return self.cost_of_wrap_move, tile_that_was_swapped",
"def solve(self, board: List[List[str]]) -> None:\n if len(board) is 0: return\n width = len(board[0]) - 1\n height = len(board) - 1\n def help(x: int, y: int):\n if x < 0 or x > height or y < 0 or y > width:\n return\n if board[x][y] is \"O\":\n board[x][y] = \"g\"\n help(x - 1, y)\n help(x, y - 1)\n help(x + 1, y)\n help(x, y + 1)\n\n for i in range(width + 1):\n if board[0][i] is \"O\":\n help(0, i)\n if board[height][i] is \"O\":\n help(height, i)\n for i in range(1, height):\n if board[i][0] is \"O\":\n help(i, 0)\n if board[i][width] is \"O\":\n help(i, width)\n\n print(board)\n for i in range(width + 1):\n for j in range(height + 1):\n if board[j][i] is \"O\":\n board[j][i] = \"X\"\n elif board[j][i] is \"g\":\n board[j][i] = \"O\"",
"def swap_tile(grid: tuple[int, ...], move: int) -> tuple[int, ...]:\n tile_to_swap: int = grid.index(0) + move\n value_to_swap: int = grid[tile_to_swap]\n\n mutable_grid: list[int] = list(grid)\n mutable_grid[grid.index(0)] = value_to_swap\n mutable_grid[tile_to_swap] = 0\n swapped_grid = tuple(mutable_grid)\n\n return swapped_grid",
"def update_player(self, old_y, old_x, new_y, new_x):\n self.maze[old_y][old_x] = \" \"\n self.maze[new_y][new_x] = \"m\""
] | [
"0.74180424",
"0.66296875",
"0.6549329",
"0.6465497",
"0.6142597",
"0.6029525",
"0.5879505",
"0.5810912",
"0.57844555",
"0.5749099",
"0.5671146",
"0.56583124",
"0.55875087",
"0.5543172",
"0.55128974",
"0.5506123",
"0.5487629",
"0.5451147",
"0.54361534",
"0.5432416",
"0.54190916",
"0.5414266",
"0.54118836",
"0.54002005",
"0.53995365",
"0.53366536",
"0.5307845",
"0.53002757",
"0.5293325",
"0.52873677"
] | 0.71059775 | 1 |
Swap empty space with puzzle symbol to the right of it in the grid. y is the index of the tuple containing the empty space in grid, and x is index of empty space in it's tuple. | def swap_right(grid, y, x):
symbol = grid[y][x + 1]
# creates a new tuple for the changed row
new_row = grid[y][:x] + (symbol,) + ('*',)
# adds the space after the swapped piece for the changed row
if x < len(grid[y]) - 2:
new_row += tuple(grid[y][x + 2:])
return grid[:y] + (new_row,) + grid[y+1:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def swap_up(grid, y, x):\n symbol = grid[y - 1][x]\n\n # creates new tuples for the rows changed\n new_toprow = grid[y - 1][:x] + tuple('*')\n new_botrow = grid[y][:x] + tuple(symbol)\n\n # adds the space after the swapped piece for both rows\n if x < len(grid[y]):\n new_toprow += tuple(grid[y - 1][x + 1:])\n new_botrow += tuple(grid[y][x + 1:])\n\n return grid[:y-1] + (new_toprow, new_botrow) + grid[y+1:]",
"def swap_down(grid, y, x):\n symbol = grid[y + 1][x]\n\n new_botrow = grid[y + 1][:x] + ('*',)\n new_toprow = grid[y][:x] + (symbol,)\n\n if x < len(grid[y]):\n new_toprow += tuple(grid[y][x + 1:])\n new_botrow += tuple(grid[y + 1][x + 1:])\n\n return grid[:y] + (new_toprow, new_botrow) + grid[y + 2:]",
"def swap_left(grid, y, x):\n symbol = grid[y][x - 1]\n\n new_row = grid[y][:x - 1] + ('*',) + (symbol,)\n\n if x < len(grid[y]) - 1:\n new_row += grid[y][x + 1:]\n\n return grid[:y] + (new_row,) + grid[y+1:]",
"def reset_position(self, x, y):\n\t\tself.grid[x][y] = self.terminal",
"def switch(self, x1, y1, x2, y2):\n # both positions should not be empty\n assert (self.is_empty(x1, y1) is not True) or (self.is_empty(x2, y2) is not True)\n # x1,y1 is empty\n if self.is_empty(x1, y1):\n self.grid[y1][x1] = self.grid[y2][x2]\n self.cells[self.grid[y2][x2]].x = x1\n self.cells[self.grid[y2][x2]].y = y1\n self.grid[y2][x2] = ' '\n self.update_cost(self.grid[y1][x1])\n # x2,y2 is empty\n elif self.is_empty(x2, y2):\n self.grid[y2][x2] = self.grid[y1][x1]\n self.cells[self.grid[y1][x1]].x = x2\n self.cells[self.grid[y1][x1]].y = y2\n self.grid[y1][x1] = ' '\n self.update_cost(self.grid[y2][x2])\n else:\n n = self.grid[y2][x2]\n self.grid[y2][x2] = self.grid[y1][x1]\n self.cells[self.grid[y1][x1]].x = x2\n self.cells[self.grid[y1][x1]].y = y2\n self.grid[y1][x1] = n\n self.cells[n].x = x1\n self.cells[n].y = y1\n self.update_cost(self.grid[y1][x1])\n self.update_cost(self.grid[y2][x2])",
"def __swap(self, x1, y1, x2, y2):\n temp = self.puzzle.copy()\n temp[x1, y1] = temp[x2, y2]\n temp[x2, y2] = self.puzzle[x1, y1]\n return temp",
"def _swap(self, x1, y1, x2, y2):\n puzzle_copy = [list(row) for row in self.position] # copy the puzzle\n puzzle_copy[x1][y1], puzzle_copy[x2][y2] = puzzle_copy[x2][y2], puzzle_copy[x1][y1]\n\n return puzzle_copy",
"def swap_right(self, index):\n \n grid = self.from_grid\n new_grid = []\n for row in grid:\n new_grid.append(list(row))\n \n new_grid[index[0]][index[1]], new_grid[index[0]][index[1] + 1] = \\\n new_grid[index[0]][index[1] + 1], new_grid[index[0]][\n index[1]]\n for i in range(len(grid)):\n new_grid[i] = tuple(new_grid[i])\n \n return MNPuzzle(tuple(new_grid), self.to_grid)",
"def swap(self, direction):\n directions = {'up': (-1, 0), 'down': (1, 0), 'left': (0, -1), 'right': (0, 1),}\n new_row = self.__blank_box[0] + directions[direction][0]\n new_col = self.__blank_box[1] + directions[direction][1]\n new_position = self.__get_box((new_row*self.__length)+new_col)\n self.__board[self.__blank_box[0]][self.__blank_box[1]] \\\n = new_position\n self.__board[new_row][new_col] = None\n self.__blank_box = (new_row, new_col)\n self.__set_possibilities()\n self.__previous_move = direction",
"def solve(self, board) -> None:\n x_length = len(board)\n if x_length == 0: \n return\n\n y_length = len(board[0])\n confirmed = set()\n dfs = []\n for i in range(x_length):\n if board[i][0] == 'O':\n board[i][0] = 'temp'\n dfs.append((i, 0))\n if board[i][y_length - 1] == 'O':\n board[i][y_length - 1] = 'temp'\n dfs.append((i, y_length - 1))\n for j in range(y_length):\n if board[0][j] == 'O':\n board[0][j] = 'temp'\n dfs.append((0, j))\n if board[x_length - 1][j] == 'O':\n board[x_length - 1][j] = 'temp'\n dfs.append((x_length - 1, j))\n while dfs:\n i, j = dfs.pop()\n confirmed.add((i, j))\n if i+1 < x_length and board[i+1][j] == 'O':\n board[i+1][j] = 'temp'\n dfs.append((i + 1, j))\n if i > 0 and board[i-1][j] == 'O':\n board[i-1][j] = 'temp'\n dfs.append((i-1, j))\n if j+1 < y_length and board[i][j+1] == 'O':\n board[i][j+1] = 'temp'\n dfs.append((i, j + 1))\n if j > 0 and board[i][j-1] == 'O':\n board[i][j-1] = 'temp'\n dfs.append((i, j-1))\n for i in range(x_length):\n for j in range(y_length):\n if (i, j) in confirmed:\n board[i][j] = 'O'\n else:\n board[i][j] = 'X'\n return",
"def push_right (grid):\r\n \r\n #moves values right\r\n for row in range(4):\r\n for column in range(3,0,-1):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row][column-1]\r\n grid[row][column-1]=0\r\n \r\n \r\n #moves values right\r\n for row in range(4):\r\n for column in range(3,0,-1):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row][column-1]\r\n grid[row][column-1]=0 \r\n \r\n \r\n #checks for similar values and combine\r\n for row in range(4):\r\n for column in range(2,-1,-1):\r\n if grid[row][column]==grid[row][column+1]:\r\n grid[row][column+1]=2*grid[row][column+1]\r\n grid[row][column]=0\r\n \r\n \r\n #moves remaining values right \r\n for row in range(4):\r\n for column in range(3,0,-1):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row][column-1]\r\n grid[row][column-1]=0",
"def remove(self, xcord, ycord, g_board):\n for i in range(xcord, xcord + 2):\n for j in range(ycord, ycord + self.size):\n g_board[i][j] = ' '",
"def __fix_tuple(self, xy_tup):\n if self.__swapxy:\n return xy_tup[::-1]\n return xy_tup",
"def push_right (grid): \r\n for a in range(4): \r\n for i in range(4): \r\n for j in range(3): \r\n if grid[i][j+1]==0: \r\n grid[i][j+1]=grid[i][j] \r\n grid[i][j]=0\r\n for i in range(4): \r\n for j in range(3,0,-1): \r\n if grid[i][j]==grid[i][j-1]: \r\n grid[i][j]=(grid[i][j])*2\r\n grid[i][j-1]=0\r\n for a in range(4): \r\n for i in range(4): \r\n for j in range(3): \r\n if grid[i][j+1]==0: \r\n grid[i][j+1]=grid[i][j] \r\n grid[i][j]=0",
"def shade_neighbours(x: int, y: int) -> None:\r\n if x > 0:\r\n safeboard[x-1, y] = 0\r\n if x < shape-1:\r\n safeboard[x+1, y] = 0\r\n if y > 0:\r\n safeboard[x, y-1] = 0\r\n if y < shape-1:\r\n safeboard[x, y+1] = 0\r\n safeboard[x, y] = 0",
"def put_cell(self, x, y, num):\n if self.is_empty(x,y):\n self.grid[y][x] = num\n return True\n return False",
"def board_empty_positions(self, x, y):\n board = self.boards[x][y]\n coords = [(x, y, i, j) for (i, j) in board.empty_squares]\n return self.coords_to_positions(coords)",
"def wrap(self, current_state):\n # Find the index of the '0' tile\n index = current_state.index('0')\n tile_that_was_swapped = '0'\n\n # If the current state can't perform the 'wrap' action, then exit the function\n if not self.can_move_wrap(index):\n return 0, tile_that_was_swapped\n\n # Else, we can move the '0' tile to the other side of the grid\n else:\n # We need to find the opposite position of the '0' tile\n index_to_swap = index\n\n # If the '0' tile is in the top-left corner, then we need to switch it with the tile in the top-right corner\n if index == 0:\n index_to_swap = self.puzzle_width - 1\n\n # If the '0' tile is in the top-right corner, then we need to switch it with the tile in the top-left corner\n elif index == self.puzzle_width - 1:\n index_to_swap = 0\n\n # If the tile is in the bottom-left corner, then we need to switch with the tile in the bottom-right corner\n elif index == self.puzzle_length - self.puzzle_width:\n index_to_swap = self.puzzle_length - 1\n\n # If the tile is in the bottom-right corner, then we need to switch with the tile in the bottom-left corner\n elif index == self.puzzle_length - 1:\n index_to_swap = self.puzzle_length - self.puzzle_width\n\n # Swap the '0' tile with the other index\n tile_that_was_swapped = current_state[index_to_swap]\n current_state[index] = current_state[index_to_swap]\n current_state[index_to_swap] = '0'\n # end: if-else\n\n return self.cost_of_wrap_move, tile_that_was_swapped",
"def respawn(x,y):\n\t\tpos = Board.prev_j+1\n\t\twhile pos<y:\n\t\t\tif (Board.board[x][pos]==' ' or Board.board[x][pos]=='.') and Board.board[x+1][pos]!='-':\n\t\t\t\tBoard.board[x][pos]='M'\n\t\t\t\tbreak\n\t\t\tpos+=1",
"def _apply_tore(y_coordinate, x_coordinate, game_data):\n\n board_x = game_data['variables']['board_size']['x']\n board_y = game_data['variables']['board_size']['y']\n\n if x_coordinate > board_x:\n x_coordinate -= board_x\n if y_coordinate > board_y:\n y_coordinate -= board_y\n\n if x_coordinate < 1:\n x_coordinate += board_x\n if y_coordinate < 1:\n y_coordinate += board_y\n\n return y_coordinate, x_coordinate",
"def put_piece(self, x: int, y: int):\n if self.game_board.board_values[x, y] != 0:\n print(x, y)\n raise exceptions.NonEmptySlotError(\"You must select a empty slot!\")\n\n self.game_board.put_piece(x, y, self.current_piece)",
"def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n x=len(board)\n y=len(board[0])\n visit=[[False if board[i][j]=='X' else True for j in range(y)] for i in range(x)]\n for i in range(x):\n for j in range(y):\n if visit[i][j] and board[i][j]=='O':\n queue=[[i,j]]\n visit[i][j]=False\n k=0\n surround=True\n while len(queue)>k:\n if queue[k][0]==0 or queue[k][0]==x-1 or queue[k][1]==y-1 or queue[k][1]==0:\n surround=False\n if queue[k][1]!=y-1 and visit[queue[k][0]][queue[k][1]+1]:\n queue.append([queue[k][0],queue[k][1]+1])\n visit[queue[k][0]][queue[k][1]+1]=False\n if queue[k][1]!=0 and visit[queue[k][0]][queue[k][1]-1]:\n queue.append([queue[k][0],queue[k][1]-1])\n visit[queue[k][0]][queue[k][1]-1]=False\n if queue[k][0]!=x-1 and visit[queue[k][0]+1][queue[k][1]]:\n queue.append([queue[k][0]+1,queue[k][1]])\n visit[queue[k][0]+1][queue[k][1]]=False\n if queue[k][0]!=0 and visit[queue[k][0]-1][queue[k][1]]:\n queue.append([queue[k][0]-1,queue[k][1]])\n visit[queue[k][0]-1][queue[k][1]]=False\n k+=1\n if surround:\n for i1,j1 in queue:\n board[i1][j1]='X'\n return",
"def solve(self, board: List[List[str]]) -> None:\n if len(board) is 0: return\n width = len(board[0]) - 1\n height = len(board) - 1\n def help(x: int, y: int):\n if x < 0 or x > height or y < 0 or y > width:\n return\n if board[x][y] is \"O\":\n board[x][y] = \"g\"\n help(x - 1, y)\n help(x, y - 1)\n help(x + 1, y)\n help(x, y + 1)\n\n for i in range(width + 1):\n if board[0][i] is \"O\":\n help(0, i)\n if board[height][i] is \"O\":\n help(height, i)\n for i in range(1, height):\n if board[i][0] is \"O\":\n help(i, 0)\n if board[i][width] is \"O\":\n help(i, width)\n\n print(board)\n for i in range(width + 1):\n for j in range(height + 1):\n if board[j][i] is \"O\":\n board[j][i] = \"X\"\n elif board[j][i] is \"g\":\n board[j][i] = \"O\"",
"def grid2alg(grid_x=None, grid_y=None):\n return (\n chr(0x61 + grid_x) if grid_x is not None else '',\n chr(7 - grid_y + 0x31) if grid_y is not None else '',\n )",
"def push_right (grid):\r\n for i in range(4):\r\n row = grid[i]\r\n \r\n if row == [0, 0 ,0 ,0]:\r\n continue\r\n for k in range(4):\r\n for j in range(2 ,-1, -1):\r\n if row[j+1] == 0:\r\n row[j+1] = row[j]\r\n row[j] = 0\r\n for l in range(2 ,-1, -1):\r\n if row[l+1] == row[l]:\r\n row[l+1] = row[l]*2\r\n row[l] = 0\r\n for j in range(2 ,-1, -1):\r\n if row[j+1] == 0:\r\n row[j+1] = row[j]\r\n row[j] = 0 \r\n grid[i] = row\r\n return grid",
"def solve_2x2(self):\n # move zero tile to the left-up corner\n self.update_puzzle(\"lu\")\n movements = \"rdlu\"\n for _ in range(3):\n self.update_puzzle(\"rdlu\")\n if self.row0_invariant(0):\n return \"lu\" + movements\n movements += \"rdlu\"\n # the final 2x2 cannot be solved\n return \"\"",
"def solve(self, board: List[List[str]]) -> None:\n n = len(board)\n if n == 0:\n return\n m = len(board[0])\n for i in range(m):\n self.traverse(board, 0, i, n, m)\n self.traverse(board, n - 1, i, n, m)\n for i in range(n):\n self.traverse(board, i, 0, n, m)\n self.traverse(board, i, m - 1, n, m)\n for i in range(n):\n for j in range(m):\n board[i][j] = 'X' if board[i][j] == 'O' else board[i][j]\n for i in range(n):\n for j in range(m):\n board[i][j] = 'O' if board[i][j] == '_' else board[i][j]",
"def solve_2x2(self):\r\n moves_str = \"\"\r\n # move zero to the most upper left\r\n zero_row, zero_col = self.current_position(0, 0)\r\n ups = zero_row - 0\r\n lefts = zero_col - 0\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n \r\n # get zero, one, two, three positions\r\n self.update_puzzle(moves_str)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n counter = 0\r\n while counter <= 3 and \\\r\n (zero_row != 0 or zero_col != 0 or \\\r\n one_row!= 0 or one_col != 1 or \\\r\n two_row != 1 or two_col != 0 or \\\r\n three_row != 1 or three_col != 1):\r\n move = CIRCLES[\"RIGHT_CIRCLES\"]\r\n moves_str += move\r\n self.update_puzzle(move)\r\n counter += 1\r\n zero_row, zero_col = self.current_position(0, 0)\r\n one_row, one_col = self.current_position(0, 1)\r\n two_row, two_col = self.current_position(1, 0)\r\n three_row, three_col = self.current_position(1, 1)\r\n \r\n print \"solve_2x2\"\r\n print moves_str\r\n print self._grid\r\n return moves_str",
"def solve_2x2(self):\n cur_row, cur_col = self.current_position(0, 0)\n move_str = 'u' * cur_row + 'l' * cur_col\n self.update_puzzle(move_str)\n if self.check_2x2_solved():\n return move_str\n else:\n while not self.check_2x2_solved():\n move_str += 'rdlu'\n self.update_puzzle('rdlu')\n return move_str",
"def toggle_xy(self, x, y):\r\n\t\tself.grid[y, x] = False if self.grid[y,x] else True"
] | [
"0.73663217",
"0.72030604",
"0.68011993",
"0.6653473",
"0.60992813",
"0.59700453",
"0.5856919",
"0.57826674",
"0.57598525",
"0.5718073",
"0.57124233",
"0.57094514",
"0.56896",
"0.5682443",
"0.56505144",
"0.5583225",
"0.5578579",
"0.55665314",
"0.5561678",
"0.55576843",
"0.55470455",
"0.55149275",
"0.5502888",
"0.5484517",
"0.54744685",
"0.54702777",
"0.5463805",
"0.54264355",
"0.5418915",
"0.5417509"
] | 0.7315443 | 1 |
Swap empty space with puzzle symbol to the left of it in the grid. y is the index of the tuple containing the empty space in grid, and x is index of empty space in it's tuple. | def swap_left(grid, y, x):
symbol = grid[y][x - 1]
new_row = grid[y][:x - 1] + ('*',) + (symbol,)
if x < len(grid[y]) - 1:
new_row += grid[y][x + 1:]
return grid[:y] + (new_row,) + grid[y+1:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def swap_up(grid, y, x):\n symbol = grid[y - 1][x]\n\n # creates new tuples for the rows changed\n new_toprow = grid[y - 1][:x] + tuple('*')\n new_botrow = grid[y][:x] + tuple(symbol)\n\n # adds the space after the swapped piece for both rows\n if x < len(grid[y]):\n new_toprow += tuple(grid[y - 1][x + 1:])\n new_botrow += tuple(grid[y][x + 1:])\n\n return grid[:y-1] + (new_toprow, new_botrow) + grid[y+1:]",
"def swap_down(grid, y, x):\n symbol = grid[y + 1][x]\n\n new_botrow = grid[y + 1][:x] + ('*',)\n new_toprow = grid[y][:x] + (symbol,)\n\n if x < len(grid[y]):\n new_toprow += tuple(grid[y][x + 1:])\n new_botrow += tuple(grid[y + 1][x + 1:])\n\n return grid[:y] + (new_toprow, new_botrow) + grid[y + 2:]",
"def reset_position(self, x, y):\n\t\tself.grid[x][y] = self.terminal",
"def swap_right(grid, y, x):\n symbol = grid[y][x + 1]\n\n # creates a new tuple for the changed row\n new_row = grid[y][:x] + (symbol,) + ('*',)\n\n # adds the space after the swapped piece for the changed row\n if x < len(grid[y]) - 2:\n new_row += tuple(grid[y][x + 2:])\n\n return grid[:y] + (new_row,) + grid[y+1:]",
"def switch(self, x1, y1, x2, y2):\n # both positions should not be empty\n assert (self.is_empty(x1, y1) is not True) or (self.is_empty(x2, y2) is not True)\n # x1,y1 is empty\n if self.is_empty(x1, y1):\n self.grid[y1][x1] = self.grid[y2][x2]\n self.cells[self.grid[y2][x2]].x = x1\n self.cells[self.grid[y2][x2]].y = y1\n self.grid[y2][x2] = ' '\n self.update_cost(self.grid[y1][x1])\n # x2,y2 is empty\n elif self.is_empty(x2, y2):\n self.grid[y2][x2] = self.grid[y1][x1]\n self.cells[self.grid[y1][x1]].x = x2\n self.cells[self.grid[y1][x1]].y = y2\n self.grid[y1][x1] = ' '\n self.update_cost(self.grid[y2][x2])\n else:\n n = self.grid[y2][x2]\n self.grid[y2][x2] = self.grid[y1][x1]\n self.cells[self.grid[y1][x1]].x = x2\n self.cells[self.grid[y1][x1]].y = y2\n self.grid[y1][x1] = n\n self.cells[n].x = x1\n self.cells[n].y = y1\n self.update_cost(self.grid[y1][x1])\n self.update_cost(self.grid[y2][x2])",
"def board_empty_positions(self, x, y):\n board = self.boards[x][y]\n coords = [(x, y, i, j) for (i, j) in board.empty_squares]\n return self.coords_to_positions(coords)",
"def swap_left(self, index):\n grid = self.from_grid\n new_grid = []\n for row in grid:\n new_grid.append(list(row))\n \n new_grid[index[0]][index[1]], new_grid[index[0]][index[1] -1] = \\\n new_grid[index[0]][index[1] -1], new_grid[index[0]][\n index[1]]\n for i in range(len(grid)):\n new_grid[i] = tuple(new_grid[i])\n \n return MNPuzzle(tuple(new_grid), self.to_grid)",
"def __swap(self, x1, y1, x2, y2):\n temp = self.puzzle.copy()\n temp[x1, y1] = temp[x2, y2]\n temp[x2, y2] = self.puzzle[x1, y1]\n return temp",
"def _swap(self, x1, y1, x2, y2):\n puzzle_copy = [list(row) for row in self.position] # copy the puzzle\n puzzle_copy[x1][y1], puzzle_copy[x2][y2] = puzzle_copy[x2][y2], puzzle_copy[x1][y1]\n\n return puzzle_copy",
"def remove(self, xcord, ycord, g_board):\n for i in range(xcord, xcord + 2):\n for j in range(ycord, ycord + self.size):\n g_board[i][j] = ' '",
"def put_piece(self, x: int, y: int):\n if self.game_board.board_values[x, y] != 0:\n print(x, y)\n raise exceptions.NonEmptySlotError(\"You must select a empty slot!\")\n\n self.game_board.put_piece(x, y, self.current_piece)",
"def move_left(self):\r\n\r\n counter = 0\r\n for y in range(1, self._col):\r\n for x in reversed(range(self._row)):\r\n if '[' in self._board[x][y] and self._board[x][y-1] == ' ':\r\n counter += 1\r\n elif '|' in self._board[x][y] and self._board[x][y-1] == ' ':\r\n counter += 1\r\n if counter == 3:\r\n\r\n for y in range(1, self._col):\r\n for x in reversed(range(self._row)):\r\n if '[' in self._board[x][y] and self._board[x][y - 1] == ' ':\r\n self._board[x][y-1] = self._board[x][y]\r\n self._board[x][y] = ' '\r\n elif '|' in self._board[x][y] and self._board[x][y-1] == ' ':\r\n self._board[x][y-1] = self._board[x][y]\r\n self._board[x][y] = ' '\r\n\r\n return self._board",
"def push_left (grid):\r\n for a in range(4): \r\n for i in range(4): \r\n for j in range(3,0,-1): \r\n if grid[i][j-1]==0: \r\n grid[i][j-1]=grid[i][j] \r\n grid[i][j]=0\r\n \r\n for i in range(4): \r\n for j in range(3): \r\n if grid[i][j]==grid[i][j+1]: \r\n grid[i][j]=(grid[i][j])*2\r\n grid[i][j+1]=0 \r\n \r\n for a in range(4): \r\n for i in range(4): \r\n for j in range(3,0,-1): \r\n if grid[i][j-1]==0: \r\n grid[i][j-1]=grid[i][j] \r\n grid[i][j]=0",
"def shade_neighbours(x: int, y: int) -> None:\r\n if x > 0:\r\n safeboard[x-1, y] = 0\r\n if x < shape-1:\r\n safeboard[x+1, y] = 0\r\n if y > 0:\r\n safeboard[x, y-1] = 0\r\n if y < shape-1:\r\n safeboard[x, y+1] = 0\r\n safeboard[x, y] = 0",
"def push_left (grid):\r\n \r\n #moves values left\r\n for row in range(4):\r\n for column in range(3):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row][column+1]\r\n grid[row][column+1]=0\r\n \r\n \r\n #moves values left\r\n for row in range(4):\r\n for column in range(3):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row][column+1]\r\n grid[row][column+1]=0 \r\n \r\n \r\n #checks for similar values and combines\r\n for row in range(4):\r\n for column in range(3):\r\n if grid[row][column]==grid[row][column+1]:\r\n grid[row][column]=2*grid[row][column]\r\n grid[row][column+1]=0\r\n \r\n #moves remaining values left \r\n for row in range(4):\r\n for column in range(3):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row][column+1]\r\n grid[row][column+1]=0",
"def put_cell(self, x, y, num):\n if self.is_empty(x,y):\n self.grid[y][x] = num\n return True\n return False",
"def solve(self, board) -> None:\n x_length = len(board)\n if x_length == 0: \n return\n\n y_length = len(board[0])\n confirmed = set()\n dfs = []\n for i in range(x_length):\n if board[i][0] == 'O':\n board[i][0] = 'temp'\n dfs.append((i, 0))\n if board[i][y_length - 1] == 'O':\n board[i][y_length - 1] = 'temp'\n dfs.append((i, y_length - 1))\n for j in range(y_length):\n if board[0][j] == 'O':\n board[0][j] = 'temp'\n dfs.append((0, j))\n if board[x_length - 1][j] == 'O':\n board[x_length - 1][j] = 'temp'\n dfs.append((x_length - 1, j))\n while dfs:\n i, j = dfs.pop()\n confirmed.add((i, j))\n if i+1 < x_length and board[i+1][j] == 'O':\n board[i+1][j] = 'temp'\n dfs.append((i + 1, j))\n if i > 0 and board[i-1][j] == 'O':\n board[i-1][j] = 'temp'\n dfs.append((i-1, j))\n if j+1 < y_length and board[i][j+1] == 'O':\n board[i][j+1] = 'temp'\n dfs.append((i, j + 1))\n if j > 0 and board[i][j-1] == 'O':\n board[i][j-1] = 'temp'\n dfs.append((i, j-1))\n for i in range(x_length):\n for j in range(y_length):\n if (i, j) in confirmed:\n board[i][j] = 'O'\n else:\n board[i][j] = 'X'\n return",
"def wrap(self, current_state):\n # Find the index of the '0' tile\n index = current_state.index('0')\n tile_that_was_swapped = '0'\n\n # If the current state can't perform the 'wrap' action, then exit the function\n if not self.can_move_wrap(index):\n return 0, tile_that_was_swapped\n\n # Else, we can move the '0' tile to the other side of the grid\n else:\n # We need to find the opposite position of the '0' tile\n index_to_swap = index\n\n # If the '0' tile is in the top-left corner, then we need to switch it with the tile in the top-right corner\n if index == 0:\n index_to_swap = self.puzzle_width - 1\n\n # If the '0' tile is in the top-right corner, then we need to switch it with the tile in the top-left corner\n elif index == self.puzzle_width - 1:\n index_to_swap = 0\n\n # If the tile is in the bottom-left corner, then we need to switch with the tile in the bottom-right corner\n elif index == self.puzzle_length - self.puzzle_width:\n index_to_swap = self.puzzle_length - 1\n\n # If the tile is in the bottom-right corner, then we need to switch with the tile in the bottom-left corner\n elif index == self.puzzle_length - 1:\n index_to_swap = self.puzzle_length - self.puzzle_width\n\n # Swap the '0' tile with the other index\n tile_that_was_swapped = current_state[index_to_swap]\n current_state[index] = current_state[index_to_swap]\n current_state[index_to_swap] = '0'\n # end: if-else\n\n return self.cost_of_wrap_move, tile_that_was_swapped",
"def realign(self, x, dx, y, dy, ignore=set()):\n return ['', '', '']",
"def respawn(x,y):\n\t\tpos = Board.prev_j+1\n\t\twhile pos<y:\n\t\t\tif (Board.board[x][pos]==' ' or Board.board[x][pos]=='.') and Board.board[x+1][pos]!='-':\n\t\t\t\tBoard.board[x][pos]='M'\n\t\t\t\tbreak\n\t\t\tpos+=1",
"def push_left (grid):\n #moves the block if there is a 0 value\n for i in range(3):\n for j in range(1,4):\n for k in range(4):\n if grid[k][j-1]==0 or grid[k][j-1]==\" \":\n grid[k][j-1] = grid[k][j]\n grid[k][j]= 0\n #checks if adjacent blocks have the same values and adds them\n for i in range(1,4):\n for j in range(4):\n if grid[j][i-1]==grid[j][i]:\n grid[j][i-1]+=grid[j][i]\n grid[j][i]= 0 \n #moves the rest of the grid up\n for i in range(1,4):\n for j in range(4):\n if grid[j][i-1]== 0:\n grid[j][i-1] = grid[j][i]\n grid[j][i] = 0\n #if there is a value in the position\n return grid",
"def swap(self, direction):\n directions = {'up': (-1, 0), 'down': (1, 0), 'left': (0, -1), 'right': (0, 1),}\n new_row = self.__blank_box[0] + directions[direction][0]\n new_col = self.__blank_box[1] + directions[direction][1]\n new_position = self.__get_box((new_row*self.__length)+new_col)\n self.__board[self.__blank_box[0]][self.__blank_box[1]] \\\n = new_position\n self.__board[new_row][new_col] = None\n self.__blank_box = (new_row, new_col)\n self.__set_possibilities()\n self.__previous_move = direction",
"def extensions(self):\n def check_empty_space(gridcopy):\n \"\"\"\n Return the place of the empty space.\n\n @type gridcopy: tuple[tuple[str]]\n @rtype: tuple\n\n # >>> grid = ((\"*\", \"2\", \"3\"), (\"4\", \"5\", \"6\"))\n # >>> check_empty_space(grid)\n # (0, 0)\n # >>> grid = ((\"1\", \"2\", \"3\"), (\"4\", \"5\", \"6\"), (\"7\" , \"8\" , \"*\"))\n # >>> check_empty_space(grid)\n # (2, 2)\n \"\"\"\n for i in range(len(gridcopy)):\n if \"*\" in gridcopy[i]:\n return i, gridcopy[i].index(\"*\")\n # Raise Error if there is no empty space in the puzzle.\n return AssertionError, \"No empty space in the puzzle.\"\n\n def tuple_to_list(tup):\n \"\"\"\n Return a list which was originally tuple.\n\n @type tup: tuple\n @rtype: list[str]\n \"\"\"\n return [element for element in tup]\n\n def shift_right_left(gridcopy, row_num, column_num):\n \"\"\"\n Return the list of affected grid. If * cannot move to the specific\n place, it returns an empty list\n\n @type gridcopy: tuple[tuple[str]]\n @type row_num: int\n @type column_num: int\n @rtype: list[tuple[tuple[str]]]\n \"\"\"\n result = []\n # Extract the specific row to change.\n current_row = gridcopy[row_num]\n # Change the current_row to list in order to mutate.\n current_row_lst = tuple_to_list(current_row)\n if location[1] != 0:\n # Going left!\n # (\"5\", \"*\", \"6\") to (\"*\", \"5\", \"6\")\n current_row_lst[column_num] = current_row_lst[column_num - 1]\n current_row_lst[column_num - 1] = \"*\"\n # Switch back to tuple\n left_altered = tuple(current_row_lst)\n board_lst = tuple_to_list(gridcopy)\n board_lst[row_num] = left_altered\n result.append(tuple(board_lst))\n if location[1] != self.m - 1:\n # Going right!\n # (\"5\", \"*\", \"6\") to (\"5\", \"6\", \"*\")\n # Reset the values to swap right.\n current_row = gridcopy[row_num]\n current_row_lst = tuple_to_list(current_row)\n current_row_lst[column_num] = current_row_lst[column_num + 1]\n current_row_lst[column_num + 1] = \"*\"\n # Switch back to tuple\n right_altered = tuple(current_row_lst)\n board_lst = tuple_to_list(gridcopy)\n board_lst[row_num] = right_altered\n result.append(tuple(board_lst))\n return result\n\n def shift_down_right(gridcopy, row_num, column_num):\n \"\"\"\n Return the list of affected grid. If * cannot move to the specific\n place, it returns an empty list\n\n @type gridcopy: tuple[tuple[str]]\n @type row_num: int\n @type column_num: int\n @rtype: list[tuple[tuple[str]]]\n \"\"\"\n result = []\n if location[0] != 0:\n current_row = gridcopy[location[0]]\n upper_row = gridcopy[location[0] - 1]\n current_row_lst = tuple_to_list(current_row)\n upper_row_lst = tuple_to_list(upper_row)\n current_row_lst[column_num] = upper_row_lst[column_num]\n upper_row_lst[column_num] = \"*\"\n current_row, upper_row = tuple(current_row_lst), \\\n tuple(upper_row_lst)\n board_lst = tuple_to_list(gridcopy)\n board_lst[row_num] = current_row\n board_lst[row_num - 1] = upper_row\n upper_altered = tuple(board_lst)\n result.append(upper_altered)\n if location[0] != self.n - 1:\n upper_row = gridcopy[location[0] + 1]\n lower_row = gridcopy[location[0]]\n upper_lst = tuple_to_list(upper_row)\n lower_lst = tuple_to_list(lower_row)\n lower_lst[location[1]] = upper_lst[location[1]]\n upper_lst[location[1]] = \"*\"\n upper_row, lower_row = tuple(upper_lst), tuple(lower_lst)\n big_lst = tuple_to_list(gridcopy)\n big_lst[location[0]] = lower_row\n big_lst[location[0] + 1] = upper_row\n changed = tuple(big_lst)\n result.append(changed)\n return result\n\n grid = self.from_grid\n # Location is the tuple indicator of location of the empty space.\n # (Row, Column)\n location = check_empty_space(grid)\n row = location[0]\n column = location[1]\n possibilities = shift_right_left(grid, row, column) +\\\n shift_down_right(grid, row, column)\n return [MNPuzzle(x, self.to_grid) for x in possibilities]",
"def insert_word_horizontally(grid, word, x, y):\n # check if the word fits in this space\n for i in range(len(word)):\n grid_char = grid[y][x + i]\n if grid_char is not None and grid_char != word[i]:\n return False # the word doesn't fit into this space\n\n # insert the word\n for i in range(len(word)):\n grid[y][x + i] = word[i]\n\n return True",
"def push_left (grid):\r\n \r\n for i in range(4):\r\n row = grid[i]\r\n \r\n if row == [0, 0 ,0 ,0]:\r\n continue\r\n for k in range(4):\r\n for j in range(1, 4):\r\n if row[j-1] == 0:\r\n row[j-1] = row[j]\r\n row[j] = 0\r\n for l in range(1, 4):\r\n if row[l-1] == row[l]:\r\n row[l-1] = row[l]*2\r\n row[l] = 0\r\n for j in range(1, 4):\r\n if row[j-1] == 0:\r\n row[j-1] = row[j]\r\n row[j] = 0 \r\n grid[i] = row\r\n return grid",
"def push_right (grid):\r\n \r\n #moves values right\r\n for row in range(4):\r\n for column in range(3,0,-1):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row][column-1]\r\n grid[row][column-1]=0\r\n \r\n \r\n #moves values right\r\n for row in range(4):\r\n for column in range(3,0,-1):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row][column-1]\r\n grid[row][column-1]=0 \r\n \r\n \r\n #checks for similar values and combine\r\n for row in range(4):\r\n for column in range(2,-1,-1):\r\n if grid[row][column]==grid[row][column+1]:\r\n grid[row][column+1]=2*grid[row][column+1]\r\n grid[row][column]=0\r\n \r\n \r\n #moves remaining values right \r\n for row in range(4):\r\n for column in range(3,0,-1):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row][column-1]\r\n grid[row][column-1]=0",
"def push_right (grid): \r\n for a in range(4): \r\n for i in range(4): \r\n for j in range(3): \r\n if grid[i][j+1]==0: \r\n grid[i][j+1]=grid[i][j] \r\n grid[i][j]=0\r\n for i in range(4): \r\n for j in range(3,0,-1): \r\n if grid[i][j]==grid[i][j-1]: \r\n grid[i][j]=(grid[i][j])*2\r\n grid[i][j-1]=0\r\n for a in range(4): \r\n for i in range(4): \r\n for j in range(3): \r\n if grid[i][j+1]==0: \r\n grid[i][j+1]=grid[i][j] \r\n grid[i][j]=0",
"def grid2alg(grid_x=None, grid_y=None):\n return (\n chr(0x61 + grid_x) if grid_x is not None else '',\n chr(7 - grid_y + 0x31) if grid_y is not None else '',\n )",
"def setBlank(self, pos):\n self.tiles[-1] = pos",
"def __erase(self, x: int, y: int) -> None:\n self.__maze[x, y] = 0"
] | [
"0.72094786",
"0.68437946",
"0.6833086",
"0.65959716",
"0.61538726",
"0.60560024",
"0.5920952",
"0.5818987",
"0.5784264",
"0.5751945",
"0.57293934",
"0.57154316",
"0.57044053",
"0.56647724",
"0.56304044",
"0.56044596",
"0.555539",
"0.5541564",
"0.5538938",
"0.55242604",
"0.55162656",
"0.55042094",
"0.55012727",
"0.5493339",
"0.5474775",
"0.54508984",
"0.5443035",
"0.54378194",
"0.5421747",
"0.5420665"
] | 0.7223119 | 0 |
Construct agg_endog on dept_store level | def construct_agg_endog(self):
agg_endog, ratio_df = construct_grouped_ts(self.sales_train, self.endog, agg_1='dept_id', agg_2='store_id', drop_inactive=True, return_ratio=True)
agg_sales_train = self.sales_train.groupby(['dept_id', 'store_id']).size().reset_index() #indicator for dept_store_id
agg_idx = ratio_df.agg_idx
self.agg_endog = agg_endog.copy()
self.ratio_df = ratio_df.copy()
self.agg_sales_train = agg_sales_train.copy()
self.agg_idx = agg_idx.copy() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_aggregate_table(agg: str, dataset: ObservatoryDataset) -> List[Dict]:\n\n data = []\n repos = []\n for paper in dataset.papers:\n for author in paper.authors:\n inst = author.institution\n at = paper.access_type\n oa_coki = paper.oa_coki\n\n # Choose id and name\n if agg == \"country\":\n id = inst.country_code\n name = inst.country\n elif agg == \"institution\":\n id = inst.ror_id\n name = inst.name\n else:\n raise ValueError(f\"make_aggregate_table: agg type unknown: {agg}\")\n\n # Add repository info\n for repo in paper.repositories:\n if paper.in_unpaywall:\n repos.append(\n {\n \"paper_id\": paper.id,\n \"agg_id\": id,\n \"time_period\": paper.published_date.year,\n \"name\": repo.name,\n \"name_lower\": repo.name.lower(),\n \"endpoint_id\": repo.endpoint_id,\n \"pmh_domain\": repo.pmh_domain,\n \"url_domain\": repo.url_domain,\n \"category\": repo.category,\n \"ror_id\": repo.ror_id,\n \"total_outputs\": 1,\n }\n )\n\n data.append(\n {\n \"doi\": paper.doi,\n \"id\": id,\n \"time_period\": paper.published_date.year,\n \"name\": name,\n \"country\": inst.country,\n \"country_code\": inst.country_code,\n \"country_code_2\": inst.country_code_2,\n \"region\": inst.region,\n \"subregion\": inst.subregion,\n \"coordinates\": None,\n \"total_outputs\": 1,\n # Access Types\n \"oa\": at.oa,\n \"green\": at.green,\n \"gold\": at.gold,\n \"gold_doaj\": at.gold_doaj,\n \"hybrid\": at.hybrid,\n \"bronze\": at.bronze,\n \"green_only\": at.green_only,\n \"black\": at.black,\n # COKI Open Access Types\n \"open\": oa_coki.open,\n \"closed\": oa_coki.closed,\n \"publisher\": oa_coki.publisher,\n \"other_platform\": oa_coki.other_platform,\n \"publisher_only\": oa_coki.publisher_only,\n \"both\": oa_coki.both,\n \"other_platform_only\": oa_coki.other_platform_only,\n # Publisher Open categories\n \"publisher_categories_oa_journal\": oa_coki.publisher_categories.oa_journal,\n \"publisher_categories_hybrid\": oa_coki.publisher_categories.hybrid,\n \"publisher_categories_no_guarantees\": oa_coki.publisher_categories.no_guarantees,\n # Other Platform categories\n \"publisher_categories_preprint\": oa_coki.other_platform_categories.preprint,\n \"publisher_categories_domain\": oa_coki.other_platform_categories.domain,\n \"publisher_categories_institution\": oa_coki.other_platform_categories.institution,\n \"publisher_categories_public\": oa_coki.other_platform_categories.public,\n \"publisher_categories_aggregator\": oa_coki.other_platform_categories.aggregator,\n \"publisher_categories_other_internet\": oa_coki.other_platform_categories.other_internet,\n \"publisher_categories_unknown\": oa_coki.other_platform_categories.unknown,\n }\n )\n\n # Repos\n df_repos = pd.DataFrame(repos)\n df_repos.drop_duplicates(inplace=True)\n agg = {\n \"agg_id\": \"first\",\n \"time_period\": \"first\",\n \"name\": \"first\",\n \"name_lower\": \"first\",\n \"endpoint_id\": \"first\",\n \"pmh_domain\": \"first\",\n \"url_domain\": \"first\",\n \"category\": \"first\",\n \"ror_id\": \"first\",\n \"total_outputs\": \"sum\",\n }\n df_repos = df_repos.groupby([\"agg_id\", \"name\", \"time_period\"], as_index=False).agg(agg)\n df_repos.sort_values(\n by=[\"agg_id\", \"time_period\", \"total_outputs\", \"name_lower\"], ascending=[True, False, False, True], inplace=True\n )\n\n # Aggregate info\n df = pd.DataFrame(data)\n df.drop_duplicates(inplace=True)\n agg = {\n \"id\": \"first\",\n \"time_period\": \"first\",\n \"name\": \"first\",\n \"country\": \"first\",\n \"country_code\": \"first\",\n \"country_code_2\": \"first\",\n \"region\": \"first\",\n \"subregion\": \"first\",\n \"coordinates\": \"first\",\n \"total_outputs\": \"sum\",\n # Access types\n \"oa\": \"sum\",\n \"green\": \"sum\",\n \"gold\": \"sum\",\n \"gold_doaj\": \"sum\",\n \"hybrid\": \"sum\",\n \"bronze\": \"sum\",\n \"green_only\": \"sum\",\n \"black\": \"sum\",\n # COKI OA types\n \"open\": \"sum\",\n \"closed\": \"sum\",\n \"publisher\": \"sum\",\n \"other_platform\": \"sum\",\n \"publisher_only\": \"sum\",\n \"both\": \"sum\",\n \"other_platform_only\": \"sum\",\n # Publisher Open categories\n \"publisher_categories_oa_journal\": \"sum\",\n \"publisher_categories_hybrid\": \"sum\",\n \"publisher_categories_no_guarantees\": \"sum\",\n # Other Platform categories\n \"publisher_categories_preprint\": \"sum\",\n \"publisher_categories_domain\": \"sum\",\n \"publisher_categories_institution\": \"sum\",\n \"publisher_categories_public\": \"sum\",\n \"publisher_categories_aggregator\": \"sum\",\n \"publisher_categories_other_internet\": \"sum\",\n \"publisher_categories_unknown\": \"sum\",\n }\n df = df.groupby([\"id\", \"time_period\"], as_index=False).agg(agg).sort_values(by=[\"id\", \"time_period\"])\n\n records = []\n for i, row in df.iterrows():\n total_outputs = row[\"total_outputs\"]\n\n # Access types\n oa = row[\"oa\"]\n green = row[\"green\"]\n gold = row[\"gold\"]\n gold_doaj = row[\"gold_doaj\"]\n hybrid = row[\"hybrid\"]\n bronze = row[\"bronze\"]\n green_only = row[\"green_only\"]\n black = row[\"black\"]\n\n # COKI access types\n open = row[\"open\"]\n closed = row[\"closed\"]\n publisher = row[\"publisher\"]\n other_platform = row[\"other_platform\"]\n publisher_only = row[\"publisher_only\"]\n both = row[\"both\"]\n other_platform_only = row[\"other_platform_only\"]\n\n # Publisher Open\n publisher_categories_oa_journal = row[\"publisher_categories_oa_journal\"]\n publisher_categories_hybrid = row[\"publisher_categories_hybrid\"]\n publisher_categories_no_guarantees = row[\"publisher_categories_no_guarantees\"]\n\n # Other Platform categories\n publisher_categories_preprint = row[\"publisher_categories_preprint\"]\n publisher_categories_domain = row[\"publisher_categories_domain\"]\n publisher_categories_institution = row[\"publisher_categories_institution\"]\n publisher_categories_public = row[\"publisher_categories_public\"]\n publisher_categories_aggregator = row[\"publisher_categories_aggregator\"]\n publisher_categories_other_internet = row[\"publisher_categories_other_internet\"]\n publisher_categories_unknown = row[\"publisher_categories_unknown\"]\n\n # Get repositories for year and id\n id = row[\"id\"]\n time_period = row[\"time_period\"]\n df_repos_subset = df_repos[(df_repos[\"agg_id\"] == id) & (df_repos[\"time_period\"] == time_period)]\n repositories = []\n for j, repo_row in df_repos_subset.iterrows():\n ror_id = repo_row[\"ror_id\"]\n home_repo = id == ror_id\n repositories.append(\n {\n \"id\": repo_row[\"name\"],\n \"total_outputs\": repo_row[\"total_outputs\"],\n \"category\": repo_row[\"category\"],\n \"home_repo\": home_repo,\n }\n )\n\n # fmt: off\n records.append(\n {\n \"id\": id,\n \"time_period\": row[\"time_period\"],\n \"name\": row[\"name\"],\n \"country\": row[\"country\"],\n \"country_code\": row[\"country_code\"],\n \"country_code_2\": row[\"country_code_2\"],\n \"region\": row[\"region\"],\n \"subregion\": row[\"subregion\"],\n \"coordinates\": row[\"coordinates\"],\n \"total_outputs\": total_outputs,\n \"coki\": {\n \"oa\": {\n \"color\": {\n \"oa\": {\"total_outputs\": oa, \"percent\": calc_percent(oa, total_outputs)},\n \"green\": {\"total_outputs\": green, \"percent\": calc_percent(green, total_outputs)},\n \"gold\": {\"total_outputs\": gold, \"percent\": calc_percent(gold, total_outputs)},\n \"gold_doaj\": {\"total_outputs\": gold_doaj, \"percent\": calc_percent(gold_doaj, total_outputs)},\n \"hybrid\": {\"total_outputs\": hybrid, \"percent\": calc_percent(hybrid, total_outputs)},\n \"bronze\": {\"total_outputs\": bronze, \"percent\": calc_percent(bronze, total_outputs)},\n \"green_only\": {\"total_outputs\": green_only, \"percent\": calc_percent(green_only, total_outputs)},\n \"black\": {\"total_outputs\": black, \"percent\": calc_percent(black, total_outputs)},\n },\n \"coki\": {\n \"open\": {\"total\": open, \"percent\": calc_percent(open, total_outputs)},\n \"closed\": {\"total\": closed, \"percent\": calc_percent(closed, total_outputs)},\n \"publisher\": {\"total\": publisher, \"percent\": calc_percent(publisher, total_outputs)},\n \"other_platform\": {\"total\": other_platform, \"percent\": calc_percent(other_platform, total_outputs)},\n \"publisher_only\": {\"total\": publisher_only, \"percent\": calc_percent(publisher_only, total_outputs)},\n \"both\": {\"total\": both, \"percent\": calc_percent(both, total_outputs)},\n \"other_platform_only\": {\"total\": other_platform_only, \"percent\": calc_percent(other_platform_only, total_outputs)},\n \"publisher_categories\": {\n \"oa_journal\": {\"total\": publisher_categories_oa_journal, \"percent\": calc_percent(publisher_categories_oa_journal, publisher)},\n \"hybrid\": {\"total\": publisher_categories_hybrid, \"percent\": calc_percent(publisher_categories_hybrid, publisher)},\n \"no_guarantees\": {\"total\": publisher_categories_no_guarantees, \"percent\": calc_percent(publisher_categories_no_guarantees, publisher)}\n },\n \"other_platform_categories\": {\n \"preprint\": {\"total\": publisher_categories_preprint, \"percent\": calc_percent(publisher_categories_preprint, other_platform)},\n \"domain\": {\"total\": publisher_categories_domain, \"percent\": calc_percent(publisher_categories_domain, other_platform)},\n \"institution\": {\"total\": publisher_categories_institution, \"percent\": calc_percent(publisher_categories_institution, other_platform)},\n \"public\": {\"total\": publisher_categories_public, \"percent\": calc_percent(publisher_categories_public, other_platform)},\n \"aggregator\": {\"total\": publisher_categories_aggregator, \"percent\": calc_percent(publisher_categories_aggregator, other_platform)},\n \"other_internet\": {\"total\": publisher_categories_other_internet, \"percent\": calc_percent(publisher_categories_other_internet, other_platform)},\n \"unknown\": {\"total\": publisher_categories_unknown, \"percent\": calc_percent(publisher_categories_unknown, other_platform)},\n },\n }\n },\n \"repositories\": repositories\n },\n \"citations\": {},\n \"output_types\": [],\n \"disciplines\": {},\n \"funders\": [],\n \"members\": [],\n \"publishers\": [],\n \"journals\": [],\n \"events\": [],\n }\n )\n # fmt: on\n\n return records",
"def _aggregation_target(self):\n ...",
"def envelope_aggr(self):\n return GeoSeries(arctern.ST_Envelope_Aggr(self))",
"def add_aggregators(self, stat_agg):\n stat_agg.add_aggregator(self.key_precision, '{:05.4f}') \n stat_agg.add_aggregator(self.key_precision+'_std', '{:05.4f}')\n stat_agg.add_aggregator(self.key_recall, '{:05.4f}') \n stat_agg.add_aggregator(self.key_recall+'_std', '{:05.4f}')\n stat_agg.add_aggregator(self.key_f1score, '{:05.4f}') \n stat_agg.add_aggregator(self.key_f1score+'_std', '{:05.4f}')",
"def _agency_agg_key(agency_type, agency_tier, record: dict) -> Optional[str]:\n if record[f\"{agency_type}_{agency_tier}_agency_name\"] is None:\n return None\n result = {\"name\": record[f\"{agency_type}_{agency_tier}_agency_name\"]}\n if f\"{agency_type}_{agency_tier}_agency_abbreviation\" in record:\n result[\"abbreviation\"] = record[f\"{agency_type}_{agency_tier}_agency_abbreviation\"]\n if f\"{agency_type}_{agency_tier}_agency_code\" in record:\n result[\"code\"] = record[f\"{agency_type}_{agency_tier}_agency_code\"]\n result[\"id\"] = record[f\"{agency_type}_toptier_agency_id\"]\n return json.dumps(result)",
"def build_agg_data(self, data, total_subscribers, agg_type, market=None, year_filter=None):\n\n if year_filter:\n df = data[data.year == year_filter]\n else:\n df = data\n\n if market:\n df = df[df.market == market]\n\n agg_data = df.groupby([agg_type, 'year']).agg(\n {'new_subscriptions': sum,\n 'self_install': sum,\n 'professional_install': sum,\n 'disconnects': sum,\n 'post_install_returns': sum,\n 'total_disconnects': sum})\n\n agg_data['net_gain'] = agg_data.new_subscriptions - agg_data.total_disconnects\n\n merged_agg_data = agg_data.reset_index().merge(total_subscribers, on=[agg_type, 'year']).sort(['year',agg_type])\n merged_agg_data['beginning_subs'] = merged_agg_data.total_subscribers.shift(1)\n\n\n end_df = merged_agg_data.set_index(['year', agg_type]).sort_index().transpose().reindex(['beginning_subs', 'new_subscriptions', 'self_install', 'professional_install',\\\n 'total_disconnects', 'post_install_returns', 'disconnects',\\\n 'net_gain', 'total_subscribers']).rename(index={\n 'beginning_subs':'Beginning Subscribers',\n 'new_subscriptions':'Total Connects',\n 'self_install':'Self Installs',\n 'professional_install':'Pro Installs',\n 'total_disconnects':'Total Disconnects',\n 'post_install_returns':'Post Install Returns',\n 'disconnects':'Disconnects',\n 'net_gain':'Net Gain',\n 'total_subscribers':'Ending Subs'\n })\n\n return end_df",
"def auto_agg(sco_type, prop, col_type):\n\n # Don't aggregate certain columns; ignore them\n last = get_last(prop)\n if last in ['x_root', 'x_contained_by_ref', 'type', 'id']:\n return None\n\n if prop == 'number_observed':\n return 'SUM(\"number_observed\") AS \"number_observed\"'\n elif prop in ['first_observed', 'start']:\n return f'MIN(\"{prop}\") AS \"{prop}\"'\n elif prop in ['last_observed', 'end']:\n return f'MAX(\"{prop}\") AS \"{prop}\"'\n\n if ((sco_type == 'network-traffic' and prop.endswith('_port'))\n or (sco_type == 'process' and prop.endswith('pid'))):\n agg = f'COUNT(DISTINCT \"{prop}\")'\n alias = f'\"unique_{prop}\"'\n elif col_type.lower() in ['integer', 'bigint']:\n agg = f'AVG(\"{prop}\")'\n alias = f'\"mean_{prop}\"'\n else:\n agg = f'COUNT(DISTINCT \"{prop}\")'\n alias = f'\"unique_{prop}\"'\n\n if len(alias) > 63:\n # PostgreSQL has a limit of 63 chars per identifier\n return None\n\n return f'{agg} AS {alias}'",
"def convert_entity_aggregations(result, output, args):\n output = convert_facets(result, output, args)\n aggs = result.get('aggregations', {})\n scoped = aggs.get('scoped', {})\n collections = scoped.get('collections', {}).get('collections', {})\n output['facets']['collections'] = convert_collections(collections)\n return output",
"def _rewrite_aggregate(self, node: [saldag.Aggregate, saldag.IndexAggregate]):\n\n in_group_cols = node.group_cols\n out_group_cols = node.out_rel.columns[:-1]\n for i in range(len(out_group_cols)):\n out_group_cols[i].coll_sets |= copy.deepcopy(in_group_cols[i].coll_sets)\n in_agg_col = node.agg_col\n out_agg_col = node.out_rel.columns[-1]\n out_agg_col.coll_sets |= copy.deepcopy(in_agg_col.coll_sets)",
"def init_agg(self, col):\n\t\traise NotImplementedError()",
"def last_agg(self, col):\n\t\traise NotImplementedError()",
"def generateAggregatedCsvData(self, context, obj, entities):\n raise NotImplementedError()",
"def aggregate_query(self):\n raise NotImplementedError",
"def aggregate(self, agpath):\n return data.Aggregate(self, agpath)",
"def _aggregate(modelclass, window_date, supply_point, base_supply_points, fields,\n additonal_query_params=None):\n additonal_query_params = additonal_query_params or {}\n additonal_query_params[\"date\"] = window_date\n return _aggregate_raw(modelclass, supply_point, base_supply_points, fields, \n additonal_query_params)",
"def aggregate(self, arg):\n return self.agg(arg)",
"def testAggregateDetailedCorrectly(self):\n\tscaler = pf.LinearScaler()\n\tQBp = pf.ProductQuoteBasis(base_price = 1.53, date = dt.datetime(2012,01,01), source = \"P&T\", scaler = scaler, size_basis = uv.UnitVal(100, '1/gal'))\n\tesc = pf.NoEscalationEscalator()\n\tpr1 = pf.Product(name = 'gasoline', description = 'People', quote_basis = QBp, escalator = esc)\n\tpro1 = pf.Production(name = 'stream1', product = pr1, rate = uv.UnitVal(15000, 'gal/hr'), startup_discounter = None, init_date = dt.datetime(2015,01,01))\n\n\tQB = pf.VariableExpenseQuoteBasis(base_price = 0.062, date = dt.datetime(2012,01,01), source = \"P&T\", scaler = scaler, size_basis = uv.UnitVal(100, '1/(kW*hr)'))\n\tvex1 = pf.VariableExpense(name = 'Electricity', description = 'Power consumption by plant', quote_basis = QB, production = pro1, rate = uv.UnitVal(1, 'kW*hr/gal'), escalator = esc)\n\n\tQB2 = pf.VariableExpenseQuoteBasis(base_price = 75, date = dt.datetime(2012,01,01), source= 'Tom Miles', scaler = scaler, size_basis = uv.UnitVal(1, '1/ton'))\n\tvex2 = pf.VariableExpense(name = 'Biomass', description = 'Biomass used by plant', quote_basis = QB2, production = pro1, rate = uv.UnitVal(1.0/150.0, 'ton/gal'), escalator = esc)\n\n\n\tdates = [dt.datetime(2012,01,31), dt.datetime(2013,01,31), dt.datetime(2020, 03, 31), dt.datetime(2021, 12,31)]\n \n\tcosts = pf.VariableCosts()\n costs.add_variable_expense(vex1)\n costs.add_variable_expense(vex2)\n\tcosts.detailed = True\n\tend_date = dt.datetime(2034,12,31)\n costs.build_vex_schedule(end_date)\n\t\n for d in dates:\n self.assertTrue((vex1.schedule['variable_consumption'] == costs.schedule['Electricity_variable_consumption']).all())\n\t self.assertTrue((vex1.schedule['variable_costs'] == costs.schedule['Electricity_variable_costs']).all())\n\t self.assertTrue((vex2.schedule['variable_consumption'] == costs.schedule['Biomass_variable_consumption']).all())\n\t self.assertTrue((vex2.schedule['variable_costs'] == costs.schedule['Biomass_variable_costs']).all())",
"def testAggregateCorrectly(self):\n\n\tscaler = pf.LinearScaler()\n\tQBp = pf.ProductQuoteBasis(base_price = 1.53, date = dt.datetime(2012,01,01), source = \"P&T\", scaler = scaler, size_basis = uv.UnitVal(1, '1/gal'))\n\tesc = pf.NoEscalationEscalator()\n\tpr1 = pf.Product(name = 'gasoline', description = 'People', quote_basis = QBp, escalator = esc)\n\n\tpro1 = pf.Production(name = 'stream1', product = pr1, rate = uv.UnitVal(15000, 'gal/hr'), startup_discounter = None, init_date = dt.datetime(2012,01,01))\n\n\tQB = pf.VariableExpenseQuoteBasis(base_price = 0.062, date = dt.datetime(2012,01,01), source = \"P&T\", scaler = scaler, size_basis = uv.UnitVal(1, '1/(kW*hr)'))\n\tvex1 = pf.VariableExpense(name = 'Electricity', description = 'Power consumption by plant', quote_basis = QB, production = pro1, rate = uv.UnitVal(1, 'kW*hr/gal'), escalator = esc)\n\n\tQB2 = pf.VariableExpenseQuoteBasis(base_price = 75, date = dt.datetime(2012,01,01), source= 'Tom Miles', scaler = scaler, size_basis = uv.UnitVal(1, '1/ton'))\n\tvex2 = pf.VariableExpense(name = 'Biomass', description = 'Biomass used by plant', quote_basis = QB2, production = pro1, rate = uv.UnitVal(1.0/150.0, 'ton/gal'), escalator = esc)\n\n\tdates = [dt.datetime(2012,01,31), dt.datetime(2013,01,31), dt.datetime(2020, 03, 31), dt.datetime(2021, 12,31)]\n vals = [202320,202320,202320,202320]\n\tend_date = dt.datetime(2034,12,31)\n\tcosts = pf.VariableCosts()\n costs.add_variable_expense(vex1)\n costs.add_variable_expense(vex2)\n costs.build_vex_schedule(end_date)\n\t\n for d, v in zip(dates, vals):\n self.assertAlmostEqual(v, costs.schedule.loc[d, 'variable_costs'],4)",
"def aggregator():\n return Aggregator(\n agg_col=\"col_a\", values_col=\"col_b\", aggregates=[\"min\", \"max\", \"avg\", \"sum\"]\n )",
"def _congressional_agg_key(location_type, record: dict) -> Optional[str]:\n if record[f\"{location_type}_state_code\"] is None or record[f\"{location_type}_congressional_code\"] is None:\n return None\n return json.dumps(\n {\n \"country_code\": record[f\"{location_type}_country_code\"],\n \"state_code\": record[f\"{location_type}_state_code\"],\n \"state_fips\": record[f\"{location_type}_state_fips\"],\n \"congressional_code\": record[f\"{location_type}_congressional_code\"],\n \"population\": record[f\"{location_type}_congressional_population\"],\n }\n )",
"def get_admins_per_region():\n chart_data = {}\n try:\n pipe = [\n {'$match': {VAX_AREA_KEY: {'$not': {'$eq': 'ITA'}}}},\n {\n '$group': {\n '_id': f'${VAX_AREA_KEY}',\n 'first': {'$sum': f'${VAX_FIRST_DOSE_KEY}'},\n 'second': {'$sum': f'${VAX_SECOND_DOSE_KEY}'},\n 'booster': {'$sum': f'${VAX_BOOSTER_DOSE_KEY}'}\n }\n }\n ]\n cursor = vax_admins_summary_coll.aggregate(pipeline=pipe)\n data = list(cursor)\n df = pd.DataFrame(data)\n df['region'] = df['_id'].apply(lambda x: OD_TO_PC_MAP[x])\n pop_dict = get_region_pop_dict()\n df['population'] = df['region'].apply(lambda x: pop_dict[x])\n df['percentage_2nd'] = df['second'].div(df['population'])\n df['percentage_3rd'] = df['booster'].div(df['population'])\n df.sort_values(by=['population'], ascending=False, inplace=True)\n chart_data = {\n \"title\": gettext('Admins per region'),\n \"categories\": df['region'].values.tolist(),\n \"pop_dict\": pop_dict,\n \"first\": {\n 'name': gettext(\"First Dose\"),\n 'data': df['first'].values.tolist()\n },\n \"second\": {\n 'name': gettext(\"Second Dose\"),\n 'data': df['second'].values.tolist()\n },\n \"booster\": {\n 'name': gettext(\"Booster Dose\"),\n 'data': df['booster'].values.tolist()\n },\n \"population\": {\n 'name': gettext(\"Population\"),\n 'data': df['population'].values.tolist()\n }\n }\n app.logger.debug(f\"region df : \\n{df}\")\n except Exception as e:\n app.logger.error(f\"While getting region chart data: {e}\")\n return chart_data",
"def generateAggregatedCsvData(self, context, obj, entities):\n return sum([long(e.prop1.replace('-', ''), 16) for e in entities])",
"def get_age_chart_data(area=None):\n chart_data = {}\n vax_group = {\n '$group': {\n '_id': {\n VAX_AGE_KEY: f'${VAX_AGE_KEY}',\n VAX_AREA_KEY: f'${VAX_AREA_KEY}'\n },\n f'{VAX_FIRST_DOSE_KEY}': {'$sum': f'${VAX_FIRST_DOSE_KEY}'},\n f'{VAX_SECOND_DOSE_KEY}': {'$sum': f'${VAX_SECOND_DOSE_KEY}'},\n f'{VAX_BOOSTER_DOSE_KEY}': {'$sum': f'${VAX_BOOSTER_DOSE_KEY}'}\n }\n }\n vax_sort = {'$sort': {'_id': 1}}\n try:\n if area is not None:\n area = PC_TO_OD_MAP[area]\n vax_match = {'$match': {VAX_AREA_KEY: area}}\n vax_pipe = [vax_match, vax_group, vax_sort]\n else:\n vax_pipe = [vax_group, vax_sort]\n age_pop_dict = get_age_pop_dict(area)\n app.logger.debug(age_pop_dict)\n vax_cursor = vax_admins_coll.aggregate(pipeline=vax_pipe)\n pop_cursor = pop_coll.find()\n df_vax = pd.json_normalize(list(vax_cursor))\n df_pop = pd.json_normalize((list(pop_cursor)))\n out_df = df_pop.merge(\n df_vax,\n left_on=[VAX_AREA_KEY, VAX_AGE_KEY],\n right_on=['_id.' + VAX_AREA_KEY, '_id.' + VAX_AGE_KEY]\n )\n out_df = out_df.groupby('_id.' + VAX_AGE_KEY).sum()\n categories = df_vax[f'_id.{VAX_AGE_KEY}'].unique().tolist()\n chart_data = {\n \"title\": gettext('Admins per age'),\n \"yAxisTitle\": gettext('Counts'),\n \"categories\": categories,\n \"age_dict\": age_pop_dict,\n \"first\": {\n 'name': gettext(\"First Dose\"),\n 'data': out_df[VAX_FIRST_DOSE_KEY].tolist()\n },\n \"second\": {\n 'name': gettext(\"Second Dose\"),\n 'data': out_df[VAX_SECOND_DOSE_KEY].tolist()\n },\n \"booster\": {\n 'name': gettext(\"Booster Dose\"),\n 'data': out_df[VAX_BOOSTER_DOSE_KEY].tolist()\n },\n \"population\": {\n 'name': gettext(\"Population\"),\n 'data': out_df[OD_POP_KEY].tolist()\n }\n }\n except Exception as e:\n app.logger.error(f\"While getting age chart data: {e}\")\n return chart_data",
"def _aggregate_raw(modelclass, supply_point, base_supply_points, fields,\n additonal_query_params=None):\n additonal_query_params = additonal_query_params or {}\n # hack: remove test district users from national level\n if supply_point == get_country_sp():\n base_supply_points = base_supply_points.exclude(code='99')\n\n period_instance = get_or_create_singular_model(\n modelclass,\n supply_point=supply_point,\n **additonal_query_params\n )[0]\n children_qs = modelclass.objects.filter\\\n (supply_point__in=base_supply_points, **additonal_query_params)\n\n totals = children_qs.aggregate(*[Sum(f) for f in fields])\n [setattr(period_instance, f, totals[\"%s__sum\" % f] or 0) for f in fields]\n period_instance.save()\n return period_instance",
"def fetch_aggregation(self):\n return None",
"def __init__(self, *args, **kwargs):\n super(DateWindowEOCMeasure, self).__init__(*args, **kwargs)\n self.fields_to_group_by = ['bene_sk']",
"def agg_func(config,parent_child_node):\n parent_child_node = list(parent_child_node)\n parent_geocode = parent_child_node[0] \n # a list of the node objects\n nodes = list(list(parent_child_node)[1])\n \n #calculate the length of each of the geocodes (to determine which is the parent)\n geocode_lens = [len(node.geocode) for node in nodes]\n #the parent is the shortest geocode\n parent = nodes[np.argmin(geocode_lens)]\n \n #subset the children nodes\n children = nodes[:np.argmin(geocode_lens)] + nodes[np.argmin(geocode_lens)+1:]\n children = sorted(children, key=lambda geocode_data: int(geocode_data.geocode))\n child_geos = [child.geocode for child in children]\n \n parent.backup_solve = children[0].parent_backup_solve\n syn_agg = sparse.multiSparse(np.zeros(parent.syn.shape))\n \n for child in children:\n syn_agg = syn_agg + child.syn\n parent.syn = syn_agg\n \n return parent",
"def add_aggregation_data(self, payload):\n raise NotImplementedError()",
"def get_grouped_prod(all_customers_data, trans_column, prod_l_cat):\n return all_customers_data.select(trans_column, prod_l_cat)\\\n.groupBy(prod_l_cat).agg(F.countDistinct(trans_column).alias('hhds'))",
"def create_area_entery(conn,var, case, model, type_avg,area, keys,var_entery, avg_over_lev=False, pressure_coords='',\n to_lev='', at_lev=''):\n var_info= fetch_var_case(conn, model, case, var)\n lev_is_dim = bool(var_info['lev_is_dim'].values)\n id_name = make_area_mean_id(area, type_avg, var, case, model, bool(lev_is_dim), pressure_coords=bool(pressure_coords),\n to_lev=to_lev, at_lev=at_lev, avg_over_lev=bool(avg_over_lev))\n dict = {'var':var, 'case_name':case,'model':model, 'type_avg':type_avg,'model_case': model+' '+case,\n 'case_var':case+' '+var,'area':area}\n if pressure_coords!='':#isinstance(pressure_coords, bool):\n dict['pressure_coords'] = boolstr2int(pressure_coords)\n if bool(lev_is_dim):\n if isinstance(avg_over_lev, bool) or isinstance(avg_over_lev, int):\n dict['avg_over_lev'] = int(avg_over_lev)\n if isinstance(to_lev, float) and avg_over_lev:\n dict['to_lev'] = to_lev\n if isinstance(at_lev, float) and not avg_over_lev:\n dict['at_lev'] = at_lev\n\n keys = list(keys)\n var_entery= list(var_entery)\n for key in dict.keys():\n if key not in keys:\n keys.append(key)\n var_entery.append(dict[key])\n keys.append('var_case_model_avgtype_pressure_coords_to_lev')\n var_entery.append(id_name)\n key_str = '('\n val_str = '('#%s, '%id_name\n for i in np.arange(len(keys)):\n key_str= key_str+ keys[i] +', '\n val_str = val_str + '?,'\n key_str=key_str[:-2]+')'\n val_str= val_str[:-1]+')'\n\n sql = ''' INSERT INTO Area_means %s \n VALUES%s'''%(key_str, val_str)\n #print(sql)\n try:\n cur = conn.cursor()\n cur.execute(sql, tuple(var_entery))\n return cur.lastrowid\n except Error as e:\n #print(e)\n #print('Tries updating')\n out = update_area_table_entery(conn, keys, var_entery, id_name)\n return out#update_area_entery(conn, model,case,var,keys, var_entery)\n except Error as e:\n print(e)\n return"
] | [
"0.51928",
"0.519098",
"0.5178761",
"0.50921845",
"0.5083517",
"0.5013119",
"0.49784026",
"0.49634597",
"0.48751375",
"0.48575234",
"0.4815863",
"0.48117444",
"0.48115775",
"0.48054242",
"0.47820938",
"0.47588515",
"0.4725788",
"0.4705937",
"0.46681166",
"0.4654781",
"0.46298748",
"0.4628641",
"0.4609804",
"0.46075296",
"0.4606316",
"0.45643994",
"0.4564326",
"0.45533782",
"0.45529",
"0.4540003"
] | 0.7497369 | 0 |
Load encoder_exog at timestamp (exog_index), dept_stores (exog_columns). encoder_exog with shape (n_ts, n_timestamp, n_features) Currently, no encoder exog is used for dept_store_level, thus is set all 0. | def load_encoder_exog(self, exog_index, exog_cols):
encoder_exog = np.zeros((exog_cols.shape[0], exog_index.shape[0], 1))
return(encoder_exog.astype(np.float32)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_decoder_exog(self, exog_index, exog_cols):\n decoder_exog = np.repeat(np.array(self.calendar_exog.loc[exog_index]).reshape((1,exog_index.shape[0],-1)), repeats=exog_cols.shape[0], axis=0)\n decoder_exog = np.concatenate([decoder_exog, np.array(self.snap_exog.loc[exog_index, exog_cols].T).reshape((exog_cols.shape[0], -1, 1))], axis=-1)\n decoder_exog = np.concatenate([decoder_exog, np.array(self.price_discount.loc[exog_index, exog_cols].T).reshape((exog_cols.shape[0], -1, 1))], axis=-1)\n \n #Append with id features\n id_features = self.dept_store_encoder.transform(self.agg_sales_train.loc[exog_cols, ['dept_id', 'store_id']])\n id_features = np.repeat(np.expand_dims(id_features, axis=1), repeats=exog_index.shape[0], axis=1)\n decoder_exog = np.concatenate([decoder_exog, id_features], axis=-1)\n \n return(decoder_exog.astype(np.float32))",
"def prepare_dataset_encoder(self):\n calendar, sales_train, prices_df = self.calendar, self.sales_train, self.prices_df\n agg_endog, agg_idx, agg_sales_train = self.agg_endog, self.agg_idx, self.agg_sales_train\n \n #Prepare exog dataset ---------------------------------------------------------------\n #Prepare calendar exog: event_type & wday on a date\n calendar_exog = pd.DataFrame(index=calendar.index)\n for event_type in ['Sporting', 'Cultural', 'National', 'Religious']:\n calendar_exog['is_{}'.format(event_type)] = np.where((calendar.loc[calendar_exog.index, ['event_type_1', 'event_type_2']] == event_type).any(axis=1), 1, 0)\n wday_encoder = OneHotEncoder(drop='first', sparse=False) #drop Sat.\n wday_df = pd.DataFrame(wday_encoder.fit_transform(calendar.loc[calendar_exog.index, ['wday']]), columns=['w7'] + ['w{}'.format(i) for i in range(1,6)])\n calendar_exog = pd.concat([calendar_exog, wday_df], axis=1)\n \n #Prepare snap_exog: if there is snap event on that date & dept_store ts\n snap_exog = pd.DataFrame(0., index=calendar.index, columns=agg_endog.columns)\n for idx in snap_exog.columns:\n state = sales_train[agg_idx == idx].state_id.unique()[0]\n snap_exog[idx] = calendar.loc[snap_exog.index, 'snap_{}'.format(state)]\n \n #Prepare price discount on that date & dept_store ts\n price_exog = pd.DataFrame(index=calendar.index, columns=agg_endog.columns) #mean price across item_store for a dept_store ts\n for idx in price_exog.columns:\n price_exog[idx] = prices_df.T.loc[agg_idx == idx].mean()\n price_discount = price_exog / price_exog.max() #normalized\n \n self.calendar_exog = calendar_exog\n self.snap_exog = snap_exog\n self.price_discount = price_discount\n \n #Prepare encoder ----------------------------------------------------------------------\n #Create encoder for dept_store_id\n dept_store_encoder = OneHotEncoder(drop='first', sparse=False).fit(agg_sales_train[['dept_id', 'store_id']])\n \n #Create encoder for event name\n calendar['event_name_1'].fillna('missing', inplace=True)\n event_encoder = LabelEncoder().fit(calendar['event_name_1'])\n \n self.dept_store_encoder = dept_store_encoder\n self.event_encoder = event_encoder",
"def training_ae(num_epochs, reduced_dim, file_index, save_model):\n normal, post_normal = read_in(file_index, 1, 3, 0.3)\n three, four, five, six = split(post_normal, 4)\n signal_shape = normal.shape[1:]\n batch_size = round(len(normal) * 0.15)\n\n encoder, decoder = build_model(reduced_dim)\n\n inp = Input(signal_shape)\n encode = encoder(inp)\n reconstruction = decoder(encode)\n\n autoencoder = Model(inp, reconstruction)\n opt = keras.optimizers.Adam(learning_rate=0.001)\n autoencoder.compile(optimizer=opt, loss='mse')\n\n autoencoder.fit(x=normal, y=normal, epochs=num_epochs, batch_size=batch_size)\n\n if save_model:\n # save out the model\n filename = 'Working_Data/CDAE_patient_' + str(file_index) + '_iter' + str(0) + '_model'\n autoencoder.save_weights(filename, save_format = \"tf\")\n print('Model saved for patient: ' + str(file_index))\n\n # using autoencoder to encode all of the patient data\n encoded = encoder.predict(three)\n reconstruction = decoder.predict(encoded)\n\n # save reconstruction and encoded files\n reconstruction_save = \"Working_Data/reconstructed_10hb_cae_\" + str(file_index) + \"_hour2_4\" + \".npy\"\n # encoded_save = \"Working_Data/encoded_10hb_cae_\" + str(file_index) + \".npy\"\n np.save(reconstruction_save, reconstruction)\n # np.save(encoded_save, encoded)",
"def train_ev_ea(self):\n # Set data loader.\n data_loader = self.data_loader\n \n noise = torch.FloatTensor(self.batch_size, self.nz_num)\n noise = noise.to(self.device) # noise vector z\n \n start_iters = 0\n\n # Start training.\n print('Start encoder_a and encoder_v training...')\n start_time = time.time()\n \n ev_ea_c_iters = self.ev_ea_c_iters\n c_pre_iters = self.c_pre_iters\n \n C_path = os.path.join(self.model_save_dir, '{}-C.ckpt'.format(ev_ea_c_iters))\n \n encoder_a_path = os.path.join(self.model_save_dir, '{}-encoder_a.ckpt'.format(ev_ea_c_iters))\n \n encoder_v_path = os.path.join(self.model_save_dir, '{}-encoder_v.ckpt'.format(ev_ea_c_iters))\n \n \n if os.path.exists(C_path):\n self.C.load_state_dict(torch.load(C_path, map_location=lambda storage, loc: storage))\n print('Load model checkpoints from {}'.format(C_path))\n \n self.encoder_a.load_state_dict(torch.load(encoder_a_path, map_location=lambda storage, loc: storage))\n print('Load model checkpoints from {}'.format(encoder_a_path))\n \n self.encoder_v.load_state_dict(torch.load(encoder_v_path, map_location=lambda storage, loc: storage))\n print('Load model checkpoints from {}'.format(encoder_v_path))\n else:\n C_pre_path = os.path.join(self.model_save_dir, '{}-C.ckpt'.format(c_pre_iters))\n if os.path.exists(C_pre_path):\n self.C.load_state_dict(torch.load(C_pre_path, map_location=lambda storage, loc: storage))\n print('Load model pretrained checkpoints from {}'.format(C_pre_path))\n else:\n for i in range(0, c_pre_iters):\n # Fetch real images, attributes and labels.\n x_real, wrong_images, attributes, _, label_org = data_loader.train.next_batch(self.batch_size,10)\n\n\n x_real = x_real.to(self.device) # Input images.\n attributes = attributes.to(self.device) # Input attributes\n label_org = label_org.to(self.device) # Labels for computing classification loss.\n \n ev_x = self.encoder_v(x_real)\n cls_x = self.C(ev_x.detach())\n # Classification loss from only images for C training\n c_loss_cls = self.classification_loss(cls_x, label_org) \n # Backward and optimize.\n self.c_optimizer.zero_grad()\n c_loss_cls.backward()\n self.c_optimizer.step()\n \n if (i+1) % self.log_step == 0:\n loss = {}\n loss['c_loss_cls'] = c_loss_cls.item()\n prec1, prec5 = accuracy(cls_x.data, label_org.data, topk=(1, 5))\n loss['prec1'] = prec1\n loss['prec5'] = prec5\n log = \"C pretraining iteration [{}/{}]\".format(i+1, c_pre_iters)\n for tag, value in loss.items():\n log += \", {}: {:.4f}\".format(tag, value)\n print(log)\n torch.save(self.C.state_dict(), C_pre_path)\n print('Saved model pretrained checkpoints into {}...'.format(C_pre_path))\n \n for i in range(c_pre_iters, ev_ea_c_iters):\n # Fetch real images, attributes and labels.\n x_real, wrong_images, attributes, _, label_org = data_loader.train.next_batch(self.batch_size,10)\n\n\n x_real = x_real.to(self.device) # Input images.\n attributes = attributes.to(self.device) # Input attributes\n label_org = label_org.to(self.device) # Labels for computing classification loss.\n \n\n # =================================================================================== #\n # Train the domain-specific features discriminator \n # =================================================================================== #\n \n noise.normal_(0, 1)\n # Compute embedding of both images and attributes\n ea_a = self.encoder_a(attributes, noise)\n ev_x = self.encoder_v(x_real)\n \n \n ev_x_real = self.D_s(ev_x, attributes)\n ds_loss_real = -torch.mean(ev_x_real)\n \n \n ea_a_fake = self.D_s(ea_a, attributes)\n ds_loss_fake = torch.mean(ea_a_fake)\n \n # Compute loss for gradient penalty.\n alpha = torch.rand(ev_x.size(0), 1).to(self.device)\n ebd_hat = (alpha * ev_x.data + (1 - alpha) * ea_a.data).requires_grad_(True)\n \n ebd_inter = self.D_s(ebd_hat, attributes)\n ds_loss_gp = self.gradient_penalty(ebd_inter, ebd_hat)\n \n ds_loss = ds_loss_real + ds_loss_fake + self.lambda_gp * ds_loss_gp #+ ds_loss_realw\n #self.reset_grad_eb()\n self.ea_optimizer.zero_grad()\n self.ds_optimizer.zero_grad()\n self.ev_optimizer.zero_grad()\n\n ds_loss.backward()\n self.ds_optimizer.step()\n if (i+1) % self.n_critic == 0:\n # =================================================================================== #\n # Train the encoder_a and C \n # =================================================================================== #\n ev_x = self.encoder_v(x_real)\n ev_x_real = self.D_s(ev_x, attributes)\n ev_loss_real = torch.mean(ev_x_real)\n \n cls_x = self.C(ev_x)\n c_loss_cls = self.classification_loss(cls_x, label_org)\n\n # Backward and optimize.\n ev_c_loss = ev_loss_real + c_loss_cls\n self.ea_optimizer.zero_grad()\n self.ds_optimizer.zero_grad()\n self.ev_optimizer.zero_grad()\n ev_c_loss.backward()\n self.ev_optimizer.step()\n \n # =================================================================================== #\n # Train the encoder_v #\n # =================================================================================== #\n noise.normal_(0, 1)\n ea_a = self.encoder_a(attributes,noise)\n ea_a_fake = self.D_s(ea_a, attributes)\n ea_loss_fake = -torch.mean(ea_a_fake)\n \n cls_a = self.C(ea_a)\n ebn_loss_cls = self.classification_loss(cls_a, label_org)\n \n\n # Backward and optimize.\n ea_loss = ea_loss_fake + ebn_loss_cls\n self.ea_optimizer.zero_grad()\n self.ds_optimizer.zero_grad()\n self.ev_optimizer.zero_grad()\n ea_loss.backward()\n self.ea_optimizer.step()\n \n # Logging.\n loss = {}\n \n loss['ds/ds_loss_real'] = ds_loss_real.item()\n loss['ds/ds_loss_fake'] = ds_loss_fake.item()\n loss['ds/ds_loss_gp'] = ds_loss_gp.item()\n \n # Print out training information.\n if (i+1) % self.log_step == 0:\n et = time.time() - start_time\n et = str(datetime.timedelta(seconds=et))[:-7]\n prec1, prec5 = accuracy(cls_x.data, label_org.data, topk=(1, 5))\n loss['prec1'] = prec1\n loss['prec5'] = prec5\n prec1e, prec5e = accuracy(cls_a.data, label_org.data, topk=(1, 5))\n loss['prec1e'] = prec1e\n loss['prec5e'] = prec5e\n log = \"Encoder_a and Encoder_v Training Elapsed [{}], Iteration [{}/{}]\".format(et, i+1, ev_ea_c_iters)\n for tag, value in loss.items():\n log += \", {}: {:.4f}\".format(tag, value)\n print(log)\n\n \n # Save model checkpoints.\n if (i+1) % self.model_save_step == 0:\n C_path = os.path.join(self.model_save_dir, '{}-C.ckpt'.format(i+1))\n torch.save(self.C.state_dict(), C_path)\n print('Saved model checkpoints into {}...'.format(C_path))\n \n encoder_a_path = os.path.join(self.model_save_dir, '{}-encoder_a.ckpt'.format(i+1))\n torch.save(self.encoder_a.state_dict(), encoder_a_path)\n print('Saved model checkpoints into {}...'.format(encoder_a_path))\n \n encoder_v_path = os.path.join(self.model_save_dir, '{}-encoder_v.ckpt'.format(i+1))\n torch.save(self.encoder_v.state_dict(), encoder_v_path)\n print('Saved model checkpoints into {}...'.format(encoder_v_path))",
"def load_expt_gaps():\n path = os.path.join(DATA_DIR, \"bandgap-zhuo-4604.csv\")\n df = pd.read_csv(path, index_col=False)\n return df",
"def ohe_preprocessing_pipeline(encoder, ddf, categorical_columns_to_transform = [], datetime_columns_to_transform = []):\n from project.utils.preprocessing.ohe_ddf_transformer import TransformerOHE\n from project.utils.preprocessing.datetime_to_cat import add_datetime_cat\n \n ddf, new_categorical_columns = add_datetime_cat(ddf, datetime_columns_to_transform)\n categorical_columns_to_transform = categorical_columns_to_transform + new_categorical_columns\n\n # Get OHE for columns to transform\n transformer = TransformerOHE(ddf, encoder, categorical_columns_to_transform)\n ohe_ddf = transformer.fit_transform()\n \n # Select input columns, here we'll take all floats and ints and OHE columns we prepared\n original_float_columns = list(ddf.select_dtypes(['float']).columns)\n original_int_columns = list(ddf.select_dtypes(['int']).columns)\n original_input_columns = original_float_columns + original_int_columns\n ohe_columns = list(ohe_ddf.columns.values)\n\n # Remove old categories and update \n ddf = ddf[original_input_columns]\n ddf[ohe_columns] = ohe_ddf\n \n return ddf",
"def prepare_training_dataset(self, train_endog, w, sliding_freq=1):\n\n input_window, output_window = self.input_window, self.output_window\n timestamp = np.arange(input_window, train_endog.shape[0]-output_window+1, sliding_freq)\n\n X_endog_f, Y_endog_f, X_decoder_f, encoder_exog_f, decoder_exog_f, decoder_event_f, w_f = [], [], [], [], [], [], []\n for t in timestamp:\n endog_slide = train_endog.iloc[(t-input_window):(t+output_window), :].T.dropna().copy()\n endog_slide = endog_slide - np.array(endog_slide.iloc[:,0]).reshape((-1,1)) #remove the first obs as to remove trend\n sample_batch_index = np.arange(endog_slide.shape[0])\n\n X_endog = endog_slide.iloc[sample_batch_index, :input_window]\n Y_endog = endog_slide.iloc[sample_batch_index, input_window:]\n\n X_decoder = np.zeros((*Y_endog.shape, 1)) #decoder endog input, which is always 0 as training unconditionally\n\n encoder_exog = self.dataset_generator.load_encoder_exog(X_endog.columns, X_endog.index)\n decoder_exog = self.dataset_generator.load_decoder_exog(Y_endog.columns, Y_endog.index)\n decoder_event = self.dataset_generator.load_event_name(Y_endog.columns, Y_endog.index)\n\n X_endog_f.append(np.array(X_endog).reshape((-1, input_window, 1)))\n Y_endog_f.append(np.array(Y_endog).reshape((-1, output_window, 1)))\n X_decoder_f.append(X_decoder)\n encoder_exog_f.append(encoder_exog)\n decoder_exog_f.append(decoder_exog)\n decoder_event_f.append(decoder_event)\n w_f.append(np.array(w[X_endog.index]))\n \n X_endog_f = np.concatenate(X_endog_f, axis=0)\n Y_endog_f = np.concatenate(Y_endog_f, axis=0)\n X_decoder_f = np.concatenate(X_decoder_f, axis=0)\n encoder_exog_f = np.concatenate(encoder_exog_f, axis=0)\n decoder_exog_f = np.concatenate(decoder_exog_f, axis=0)\n decoder_event_f = np.concatenate(decoder_event_f, axis=0)\n w_f = np.concatenate(w_f, axis=0)\n\n return([[X_endog_f, encoder_exog_f, X_decoder_f, decoder_exog_f, decoder_event_f], Y_endog_f, w_f])",
"def load_evictions_data(data_path, date_col, min_year, max_year, create_geoid = True):\n ev_raw = (\n pd.read_csv(\n data_path,\n parse_dates=[date_col],\n infer_datetime_format=True,\n )\n .dropna(how=\"all\")\n .drop_duplicates()\n )\n ev_raw[\"year\"] = ev_raw[date_col].dt.year\n ev_df = ev_raw[(ev_raw.year >= min_year) & (ev_raw.year <= max_year)]\n ev_df[\"year\"] = ev_df[\"year\"].astype(int)\n ev_df[\"month\"] = get_month_as_str_col(ev_df, date_col)\n \n if create_geoid == True:\n # Convert to strings\n ev_df[\"state_code\"] = ev_df[\"state_code\"].astype(str).replace(r'\\.0', '', regex = True) \n ev_df[\"county_code\"] = ev_df[\"county_code\"].astype(str).replace(r'\\.0', '', regex = True) \n ev_df[\"tract_code\"] = ev_df[\"tract_code\"].astype(str).replace(r'\\.0', '', regex = True)\n # Add zeroes as necessary\n ev_df[\"state_code\"] = ev_df[\"state_code\"].apply(lambda x: (((2-len(x)) * \"0\") + x) if len(x) < 2 else x)\n ev_df[\"county_code\"] = ev_df[\"county_code\"].apply(lambda x: (((3-len(x)) * \"0\") + x) if len(x) < 3 else x)\n ev_df[\"tract_code\"] = ev_df[\"tract_code\"].apply(lambda x: (((6-len(x)) * \"0\") + x) if len(x) < 6 else x)\n # Finally, concat everything\n ev_df[\"GEOID\"] = (\n ev_df[\"state_code\"] + ev_df[\"county_code\"] + ev_df[\"tract_code\"]\n )\n \n return ev_df",
"def tuning_ae(num_epochs, encode_size, file_index, plot_loss, save_files):\n normal, abnormal, all = read_in(file_index, True, 2, 0.3)\n normal_train, normal_valid = train_test_split(normal, train_size=0.85, random_state=1)\n\n signal_shape = normal.shape[1:]\n batch_size = round(len(normal) * 0.15)\n\n encoder, decoder = build_model(encode_size)\n\n inp = Input(signal_shape)\n encode = encoder(inp)\n reconstruction = decoder(encode)\n\n autoencoder = Model(inp, reconstruction)\n opt = keras.optimizers.Adam(learning_rate=0.001)\n autoencoder.compile(optimizer=opt, loss='mse')\n\n early_stopping = EarlyStopping(patience=10, min_delta=0.001, mode='min')\n model = autoencoder.fit(x=normal_train, y=normal_train, epochs=num_epochs, batch_size=batch_size,\n validation_data=(normal_valid, normal_valid), callbacks=early_stopping)\n if plot_loss:\n SMALLER_SIZE = 10\n MED_SIZE = 12\n BIG_SIZE = 18\n plt.figure()\n # plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=MED_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=MED_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=SMALLER_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=SMALLER_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=MED_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIG_SIZE) # fontsize of the figure title\n\n plt.plot(model.history['loss'])\n plt.plot(model.history['val_loss'])\n # plt.title('Example of Training and Validation Loss')\n plt.ylabel('Mean Squared Error')\n plt.xlabel('Epochs')\n plt.legend(['Train', 'Validation'], loc='upper right')\n plt.savefig(\"images/CDAE_\" + file_index + \"_loss.png\", dpi=500)\n plt.show()\n\n if save_files:\n # using autoencoder to encode all of the patient data\n encoded = encoder.predict(all)\n reconstruction = decoder.predict(encoded)\n\n # save reconstruction and encoded files\n reconstruction_save = \"Working_Data/reconstructed_tuning_10hb_cae_\" + str(file_index) + \".npy\"\n encoded_save = \"Working_Data/encoded_tuning_10hb_cae_\" + str(file_index) + \".npy\"\n np.save(reconstruction_save, reconstruction)\n np.save(encoded_save, encoded)",
"def load_event_name(self, exog_index, exog_cols):\n \n event_name = self.event_encoder.transform(self.calendar.loc[exog_index, 'event_name_1']).reshape((1,-1))\n event_name = np.repeat(event_name, repeats=exog_cols.shape[0], axis=0)\n \n return(event_name)",
"def add_ae(self, model, dataset, latent_options, model_paths, pre_process=None):\n ae = autoencoder(self.app, model, dataset, latent_options, model_paths, pre_process)\n self.body_children.append(ae)",
"def load_encoder_dataset(sentences, oracle=None):\n dataset = TFRecordDataset([path.join(TFREDIR, sentence+'.tfr')\n for sentence in sentences])\\\n .map(\n lambda record: \\\n tf.parse_single_example(\n record,\n features={\n 's': tf.FixedLenFeature([], tf.string),\n 'e': tf.FixedLenSequenceFeature([NE],\n tf.float32,\n allow_missing=True),\n 'n': tf.FixedLenFeature([], tf.int64)\n }\n )\n )\n\n if oracle is None:\n return dataset.map(lambda feature: (feature['e'], feature['n']))\n else:\n indices = {s: n for n, s in enumerate(sentences)}\n return dataset.map(lambda feature: \\\n (feature['e'],\n feature['n'],\n tf.py_func(\n lambda s: oracle[indices[s.decode('ascii')],:].reshape(NC),\n [feature['s']],\n tf.float32\n ))\n )",
"def load(npz):\n e = np.load(npz, allow_pickle=True)\n return EOMap(\n e['dataStore'],\n e['etas'],\n e['etaEdges'],\n e['omegas'],\n e['omeEdges'],\n e['iHKLList'],\n plane_data(e)\n )",
"def emg_eventrelated(epochs, silent=False):\n # Sanity checks\n epochs = _eventrelated_sanitizeinput(epochs, what=\"emg\", silent=silent)\n\n # Extract features and build dataframe\n data = {} # Initialize an empty dict\n for i in epochs.keys():\n\n data[i] = {} # Initialize an empty dict for the current epoch\n\n # Activation following event\n if \"EMG_Onsets\" not in epochs[i]:\n warn(\n \"Input does not have an `EMG_Onsets` column.\" \" Unable to process EMG features.\",\n category=NeuroKitWarning,\n )\n data[i][\"EMG_Activation\"] = 0\n elif np.any(epochs[i][\"EMG_Onsets\"][epochs[i].index > 0] != 0):\n data[i][\"EMG_Activation\"] = 1\n else:\n data[i][\"EMG_Activation\"] = 0\n\n # Analyze features based on activation\n if data[i][\"EMG_Activation\"] == 1:\n data[i] = _emg_eventrelated_features(epochs[i], data[i])\n else:\n data[i][\"EMG_Amplitude_Mean\"] = np.nan\n data[i][\"EMG_Amplitude_Max\"] = np.nan\n data[i][\"EMG_Amplitude_SD\"] = np.nan\n data[i][\"EMG_Amplitude_Max_Time\"] = np.nan\n data[i][\"EMG_Bursts\"] = np.nan\n\n # Fill with more info\n data[i] = _eventrelated_addinfo(epochs[i], data[i])\n\n df = _eventrelated_sanitizeoutput(data)\n\n return df",
"def decode_onestep(enc_inp, enc_extended_inp, dec_inp, batch_oov_len):\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(enc_inp, dec_inp)\n outputs = model(enc_inp,\n enc_extended_inp,\n batch_oov_len,\n dec_inp,\n params['training'],\n enc_padding_mask,\n combined_mask,\n dec_padding_mask)\n final_dists = outputs[\"logits\"]\n attentions = outputs[\"attentions\"]\n\n # final_dists shape=(3, 1, 30000)\n # top_k_probs shape=(3, 6)\n # top_k_ids shape=(3, 6)\n top_k_probs, top_k_ids = tf.nn.top_k(tf.squeeze(final_dists), k=params[\"beam_size\"] * 2)\n top_k_log_probs = tf.math.log(top_k_probs)\n # dec_hidden shape = (3, 256)\n # attentions, shape = (3, 115)\n # p_gens shape = (3, 1)\n # coverages,shape = (3, 115, 1)\n results = {\"attention_vec\": attentions, # [batch_sz, max_len_x, 1]\n \"top_k_ids\": top_k_ids,\n \"top_k_log_probs\": top_k_log_probs}\n return results",
"def run_etl(self):\n self._replace_nans()\n self._add_is_null_column_df()\n self._extract_zipcode()\n self._one_hot_encode_df()\n self._build_feature_etl_df()",
"def load_ae(self, year):\n ae_paths = list(pathlib.Path(config.AE_DIR).glob(f'{year}*ae.txt'))\n assert len(ae_paths) == 1, (f'No AE files found.\\nae_dir={config.AE_DIR}, '\n f'year={year}, ae_paths={ae_paths}')\n ae_data = pd.read_csv(ae_paths[0], sep=' ', index_col=0, \n parse_dates=True, comment='#', \n names=['dateTime', 'AE'])\n return ae_data",
"def LoadEEGData(filename, EEGdevice):\n if EEGdevice == 7:\n x = 1\n elif EEGdevice == 8:\n # Read in the .easy file\n df = pd.read_csv(filename, delimiter='\\t', header=None)\n\n # Get metadata from the .info file\n fname = filename[:-5] + '.info'\n with open(fname) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n\n # Get the channel names\n channel_info = [x for x in content if 'Channel ' in x]\n channel_names = []\n for ch in range(len(channel_info)):\n channel_names.append(channel_info[ch].split(': ')[1])\n\n channel_names.append('X')\n channel_names.append('Y')\n channel_names.append('Z')\n channel_names.append('STI 014')\n channel_names.append('DateTime')\n\n # Get sampling rates\n sampling_rates = [x for x in content if 'sampling rate: ' in x]\n fs_all = []\n for freq in range(len(sampling_rates)):\n tmp = sampling_rates[freq].split(': ')[1].split(' ')[0]\n if tmp in ['N/A']:\n print('Skipping N/A')\n else:\n fs_all.append(float(sampling_rates[freq].split(': ')[1].split(' ')[0]))\n\n # Store sampling rates\n fs = fs_all[0]\n fs_accel = fs_all[1]\n\n # Assign the column names\n df.columns = channel_names\n \n # Return dataframe and sampling rates\n return df, fs, fs_accel",
"def test_enforce_exogenous_exo_data(load_uni_exo_data_target):\n data, target = load_uni_exo_data_target\n\n exp1 = TSForecastingExperiment()\n exp1.setup(data=data, target=target, seasonal_period=4, enforce_exogenous=True)\n num_models1 = len(exp1.models())\n\n exp2 = TSForecastingExperiment()\n exp2.setup(data=data, target=target, seasonal_period=4, enforce_exogenous=False)\n num_models2 = len(exp2.models())\n\n # We know that some models do not offer exogenous variables support, so the\n # following check is valid for now.\n assert num_models1 < num_models2",
"def eeg_eog_window(raw, duration=0.5):\n average_eog = mne.preprocessing.create_eog_epochs(raw).average()\n print('We found %i EOG events' % average_eog.nave)\n\n eog_events = mne.preprocessing.find_eog_events(raw)\n n_blinks = len(eog_events)\n onset = eog_events[:, 0] / raw.info['sfreq'] - (duration/2)\n duration = np.repeat(duration, n_blinks)\n raw.annotations = mne.Annotations(onset,\n duration,\n ['bad blink'] * n_blinks,\n orig_time=raw.info['meas_date'])\n return(raw)",
"def loadEdt(self):\n\n maxNumChannels = self._maxNumChannels # 4\n\n baseFilePath, ext = os.path.splitext(self.path)\n baseFilePath = baseFilePath.replace('_ch1', '')\n baseFilePath = baseFilePath.replace('_ch2', '')\n\n # load mask\n #labeledPath = dvMaskPath + '_mask.tif'\n #labeledData = tifffile.imread(labeledPath)\n\n maskFromLabelGreaterThan = 0\n\n edtMult = 3 # 3 because we have (raw==0, mask==1, skel==2, edt==3)\n\n # load labeled\n for channelIdx in range(maxNumChannels):\n channelNumber = channelIdx + 1 # for _ch1, _ch2, ...\n stackListIdx = maxNumChannels * edtMult + channelIdx # for index into self._stackList\n\n chStr = '_ch' + str(channelNumber)\n edtPath = baseFilePath + chStr + '_edt.tif'\n\n # if we find _labeeled.tif, load and make a mask\n # o.w. if we find _mask.tif then load that\n if os.path.isfile(edtPath):\n print(' bStack.loadEdt() loading channelNumber:', channelNumber,\n 'maxNumChannels:', maxNumChannels,\n 'stackListIdx:', stackListIdx,\n 'edtPath:', edtPath)\n edtData = tifffile.imread(edtPath)\n print(' edtData:', edtData.shape, edtData.dtype)\n self._stackList[stackListIdx] = edtData\n #print(' shape is:', self._stackList[stackListIdx].shape)",
"def create_ogse_db(args):\n if args.ref_diode:\n # read reference-diode data\n xds = read_ref_diode(args.ogse_dir, args.ref_diode, args.verbose)\n\n # create new database for reference-diode data\n xds.to_netcdf(args.ogse_dir / DB_REF_DIODE,\n mode='w', format='NETCDF4',\n group='/gse_data/ReferenceDiode')\n\n if args.wav_mon:\n # read reference-diode data\n xds = read_wav_mon(args.ogse_dir, args.wav_mon, args.verbose)\n # create new database for reference-diode data\n xds.to_netcdf(args.ogse_dir / DB_WAV_MON,\n mode='w', format='NETCDF4',\n group='/gse_data/WaveMonitor')",
"def get_eop(time, models=None, window=4, source=None):\n # Read the extended and the regular EOP data file (overlapping dates are overwritten by the latter)\n if not _EOP_DATA:\n source = config.tech.get(\"eop_source\", value=source).str\n for file_key in _EOP_FILE_KEYS[source]:\n _EOP_DATA.update(parsers.parse_key(file_key=file_key).as_dict())\n\n return Eop(_EOP_DATA, time, models=models, window=window)",
"def setup_encoder_initializer(self):\n if self.mode != \"inference\":\n # Restore inception variables only.\n saver = tf.train.Saver(self.autoencoder_variables)\n\n def restore_fn(sess):\n tf.logging.info(\"Restoring Autoencoder variables from checkpoint dir %s\",\n self.config.autoencoder_checkpoint_dir)\n saver.restore(sess, tf.train.latest_checkpoint(\n self.config.autoencoder_checkpoint_dir))\n\n if self.use_pretrained_ae:\n self.init_fn = restore_fn\n else:\n self.init_fn = None",
"def add_exon_annotations_to_db(c, exon, exon_id, annot_name):\n\n ignore = [\"gene_id\", \"gene_name\"]\n attributes = exon.annotations\n source = attributes['source']\n if \"exon_status\" not in attributes:\n attributes[\"exon_status\"] = \"KNOWN\"\n\n for att in attributes.keys():\n if (att in ignore) or (\"gene\" in att) or (\"transcript\" in att):\n continue\n value = attributes[att]\n cols = \" (\" + \", \".join([str_wrap_double(x) for x in [\"ID\",\"annot_name\",\n \"source\", \"attribute\", \"value\"]]) + \") \"\n vals = [exon_id, annot_name, source, att, value]\n\n command = 'INSERT OR IGNORE INTO \"exon_annotations\"' + cols + \"VALUES \" + \\\n '(?,?,?,?,?)'\n c.execute(command,vals)\n\n return",
"def test_bti_set_eog():\n raw = read_raw_bti(\n fname_sim, preload=False, eog_ch=(\"X65\", \"X67\", \"X69\", \"X66\", \"X68\")\n )\n assert_equal(len(pick_types(raw.info, eog=True)), 5)",
"def save_exact_samples(expt):\n if isinstance(expt, str):\n expt = get_experiment(expt)\n tr_expt = get_training_expt(expt)\n\n for it in tr_expt.save_after:\n for avg in AVG_VALS:\n print 'Iteration', it, avg\n try:\n rbm = load_rbm(expt, it, avg)\n except:\n continue\n\n states = tractable.exact_samples(rbm, expt.annealing.num_samples)\n storage.dump(states, expt.gibbs_states_file(it, avg))",
"def setupEmbeddings(self, path = \"awd_lm\"):\n try:\n data_lm = TextLMDataBunch.from_df(path, train_df=self.train, valid_df=self.valid,\\\n text_cols = \"text\", label_cols = \"label\")\n except:\n print(\"error creating LM\")\n return\n\n learn = language_model_learner(data_lm, arch=AWD_LSTM, drop_mult=.25)\n learn.fit_one_cycle(1, 1e-2)\n learn.save_encoder('ft_enc_1')\n\n learn.unfreeze()\n learn.fit_one_cycle(3, 1e-3)\n learn.save_encoder('ft_enc_1')\n\n learn.unfreeze()\n learn.fit_one_cycle(5, 5e-4)\n learn.save_encoder('ft_enc_1')\n\n print(\"feature encoding saved\")",
"def __init__(self, in_dimension, layer_1d, layer_2d, layer_3d,\n latent_dimension):\n super(VAEEncoder, self).__init__()\n self.latent_dimension = latent_dimension\n\n # Reduce dimension up to second last layer of Encoder\n self.encode_nn = nn.Sequential(\n nn.Linear(in_dimension, layer_1d),\n nn.ReLU(),\n nn.Linear(layer_1d, layer_2d),\n nn.ReLU(),\n nn.Linear(layer_2d, layer_3d),\n nn.ReLU()\n )\n\n # Latent space mean\n self.encode_mu = nn.Linear(layer_3d, latent_dimension)\n\n # Latent space variance\n self.encode_log_var = nn.Linear(layer_3d, latent_dimension)",
"def __init__(self, in_dimension, layer_1d, layer_2d, layer_3d,\n latent_dimension):\n super(VAEEncoder, self).__init__()\n self.latent_dimension = latent_dimension\n\n # Reduce dimension up to second last layer of Encoder\n self.encode_nn = nn.Sequential(\n nn.Linear(in_dimension, layer_1d),\n nn.ReLU(),\n nn.Linear(layer_1d, layer_2d),\n nn.ReLU(),\n nn.Linear(layer_2d, layer_3d),\n nn.ReLU()\n )\n\n # Latent space mean\n self.encode_mu = nn.Linear(layer_3d, latent_dimension)\n\n # Latent space variance\n self.encode_log_var = nn.Linear(layer_3d, latent_dimension)"
] | [
"0.75448066",
"0.61869884",
"0.50735664",
"0.50666195",
"0.5029381",
"0.4966422",
"0.49151808",
"0.48683658",
"0.48182648",
"0.47745952",
"0.47381002",
"0.47372207",
"0.47243765",
"0.47120556",
"0.46972522",
"0.4653973",
"0.4647424",
"0.46075404",
"0.46064758",
"0.4606467",
"0.4597736",
"0.45839357",
"0.45410728",
"0.45320866",
"0.45288715",
"0.44665632",
"0.44405258",
"0.44379017",
"0.44311005",
"0.44311005"
] | 0.7274047 | 1 |
Load decoder_exog at timestamp (exog_index), dept_stores (exog_columns). decoder_exog with shape (n_ts, n_timestamp, n_features) | def load_decoder_exog(self, exog_index, exog_cols):
decoder_exog = np.repeat(np.array(self.calendar_exog.loc[exog_index]).reshape((1,exog_index.shape[0],-1)), repeats=exog_cols.shape[0], axis=0)
decoder_exog = np.concatenate([decoder_exog, np.array(self.snap_exog.loc[exog_index, exog_cols].T).reshape((exog_cols.shape[0], -1, 1))], axis=-1)
decoder_exog = np.concatenate([decoder_exog, np.array(self.price_discount.loc[exog_index, exog_cols].T).reshape((exog_cols.shape[0], -1, 1))], axis=-1)
#Append with id features
id_features = self.dept_store_encoder.transform(self.agg_sales_train.loc[exog_cols, ['dept_id', 'store_id']])
id_features = np.repeat(np.expand_dims(id_features, axis=1), repeats=exog_index.shape[0], axis=1)
decoder_exog = np.concatenate([decoder_exog, id_features], axis=-1)
return(decoder_exog.astype(np.float32)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_encoder_exog(self, exog_index, exog_cols):\n encoder_exog = np.zeros((exog_cols.shape[0], exog_index.shape[0], 1))\n return(encoder_exog.astype(np.float32))",
"def prepare_dataset_encoder(self):\n calendar, sales_train, prices_df = self.calendar, self.sales_train, self.prices_df\n agg_endog, agg_idx, agg_sales_train = self.agg_endog, self.agg_idx, self.agg_sales_train\n \n #Prepare exog dataset ---------------------------------------------------------------\n #Prepare calendar exog: event_type & wday on a date\n calendar_exog = pd.DataFrame(index=calendar.index)\n for event_type in ['Sporting', 'Cultural', 'National', 'Religious']:\n calendar_exog['is_{}'.format(event_type)] = np.where((calendar.loc[calendar_exog.index, ['event_type_1', 'event_type_2']] == event_type).any(axis=1), 1, 0)\n wday_encoder = OneHotEncoder(drop='first', sparse=False) #drop Sat.\n wday_df = pd.DataFrame(wday_encoder.fit_transform(calendar.loc[calendar_exog.index, ['wday']]), columns=['w7'] + ['w{}'.format(i) for i in range(1,6)])\n calendar_exog = pd.concat([calendar_exog, wday_df], axis=1)\n \n #Prepare snap_exog: if there is snap event on that date & dept_store ts\n snap_exog = pd.DataFrame(0., index=calendar.index, columns=agg_endog.columns)\n for idx in snap_exog.columns:\n state = sales_train[agg_idx == idx].state_id.unique()[0]\n snap_exog[idx] = calendar.loc[snap_exog.index, 'snap_{}'.format(state)]\n \n #Prepare price discount on that date & dept_store ts\n price_exog = pd.DataFrame(index=calendar.index, columns=agg_endog.columns) #mean price across item_store for a dept_store ts\n for idx in price_exog.columns:\n price_exog[idx] = prices_df.T.loc[agg_idx == idx].mean()\n price_discount = price_exog / price_exog.max() #normalized\n \n self.calendar_exog = calendar_exog\n self.snap_exog = snap_exog\n self.price_discount = price_discount\n \n #Prepare encoder ----------------------------------------------------------------------\n #Create encoder for dept_store_id\n dept_store_encoder = OneHotEncoder(drop='first', sparse=False).fit(agg_sales_train[['dept_id', 'store_id']])\n \n #Create encoder for event name\n calendar['event_name_1'].fillna('missing', inplace=True)\n event_encoder = LabelEncoder().fit(calendar['event_name_1'])\n \n self.dept_store_encoder = dept_store_encoder\n self.event_encoder = event_encoder",
"def prepare_training_dataset(self, train_endog, w, sliding_freq=1):\n\n input_window, output_window = self.input_window, self.output_window\n timestamp = np.arange(input_window, train_endog.shape[0]-output_window+1, sliding_freq)\n\n X_endog_f, Y_endog_f, X_decoder_f, encoder_exog_f, decoder_exog_f, decoder_event_f, w_f = [], [], [], [], [], [], []\n for t in timestamp:\n endog_slide = train_endog.iloc[(t-input_window):(t+output_window), :].T.dropna().copy()\n endog_slide = endog_slide - np.array(endog_slide.iloc[:,0]).reshape((-1,1)) #remove the first obs as to remove trend\n sample_batch_index = np.arange(endog_slide.shape[0])\n\n X_endog = endog_slide.iloc[sample_batch_index, :input_window]\n Y_endog = endog_slide.iloc[sample_batch_index, input_window:]\n\n X_decoder = np.zeros((*Y_endog.shape, 1)) #decoder endog input, which is always 0 as training unconditionally\n\n encoder_exog = self.dataset_generator.load_encoder_exog(X_endog.columns, X_endog.index)\n decoder_exog = self.dataset_generator.load_decoder_exog(Y_endog.columns, Y_endog.index)\n decoder_event = self.dataset_generator.load_event_name(Y_endog.columns, Y_endog.index)\n\n X_endog_f.append(np.array(X_endog).reshape((-1, input_window, 1)))\n Y_endog_f.append(np.array(Y_endog).reshape((-1, output_window, 1)))\n X_decoder_f.append(X_decoder)\n encoder_exog_f.append(encoder_exog)\n decoder_exog_f.append(decoder_exog)\n decoder_event_f.append(decoder_event)\n w_f.append(np.array(w[X_endog.index]))\n \n X_endog_f = np.concatenate(X_endog_f, axis=0)\n Y_endog_f = np.concatenate(Y_endog_f, axis=0)\n X_decoder_f = np.concatenate(X_decoder_f, axis=0)\n encoder_exog_f = np.concatenate(encoder_exog_f, axis=0)\n decoder_exog_f = np.concatenate(decoder_exog_f, axis=0)\n decoder_event_f = np.concatenate(decoder_event_f, axis=0)\n w_f = np.concatenate(w_f, axis=0)\n\n return([[X_endog_f, encoder_exog_f, X_decoder_f, decoder_exog_f, decoder_event_f], Y_endog_f, w_f])",
"def load_encoder_dataset(sentences, oracle=None):\n dataset = TFRecordDataset([path.join(TFREDIR, sentence+'.tfr')\n for sentence in sentences])\\\n .map(\n lambda record: \\\n tf.parse_single_example(\n record,\n features={\n 's': tf.FixedLenFeature([], tf.string),\n 'e': tf.FixedLenSequenceFeature([NE],\n tf.float32,\n allow_missing=True),\n 'n': tf.FixedLenFeature([], tf.int64)\n }\n )\n )\n\n if oracle is None:\n return dataset.map(lambda feature: (feature['e'], feature['n']))\n else:\n indices = {s: n for n, s in enumerate(sentences)}\n return dataset.map(lambda feature: \\\n (feature['e'],\n feature['n'],\n tf.py_func(\n lambda s: oracle[indices[s.decode('ascii')],:].reshape(NC),\n [feature['s']],\n tf.float32\n ))\n )",
"def load(npz):\n e = np.load(npz, allow_pickle=True)\n return EOMap(\n e['dataStore'],\n e['etas'],\n e['etaEdges'],\n e['omegas'],\n e['omeEdges'],\n e['iHKLList'],\n plane_data(e)\n )",
"def loadEdt(self):\n\n maxNumChannels = self._maxNumChannels # 4\n\n baseFilePath, ext = os.path.splitext(self.path)\n baseFilePath = baseFilePath.replace('_ch1', '')\n baseFilePath = baseFilePath.replace('_ch2', '')\n\n # load mask\n #labeledPath = dvMaskPath + '_mask.tif'\n #labeledData = tifffile.imread(labeledPath)\n\n maskFromLabelGreaterThan = 0\n\n edtMult = 3 # 3 because we have (raw==0, mask==1, skel==2, edt==3)\n\n # load labeled\n for channelIdx in range(maxNumChannels):\n channelNumber = channelIdx + 1 # for _ch1, _ch2, ...\n stackListIdx = maxNumChannels * edtMult + channelIdx # for index into self._stackList\n\n chStr = '_ch' + str(channelNumber)\n edtPath = baseFilePath + chStr + '_edt.tif'\n\n # if we find _labeeled.tif, load and make a mask\n # o.w. if we find _mask.tif then load that\n if os.path.isfile(edtPath):\n print(' bStack.loadEdt() loading channelNumber:', channelNumber,\n 'maxNumChannels:', maxNumChannels,\n 'stackListIdx:', stackListIdx,\n 'edtPath:', edtPath)\n edtData = tifffile.imread(edtPath)\n print(' edtData:', edtData.shape, edtData.dtype)\n self._stackList[stackListIdx] = edtData\n #print(' shape is:', self._stackList[stackListIdx].shape)",
"def decode_onestep(enc_inp, enc_extended_inp, dec_inp, batch_oov_len):\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(enc_inp, dec_inp)\n outputs = model(enc_inp,\n enc_extended_inp,\n batch_oov_len,\n dec_inp,\n params['training'],\n enc_padding_mask,\n combined_mask,\n dec_padding_mask)\n final_dists = outputs[\"logits\"]\n attentions = outputs[\"attentions\"]\n\n # final_dists shape=(3, 1, 30000)\n # top_k_probs shape=(3, 6)\n # top_k_ids shape=(3, 6)\n top_k_probs, top_k_ids = tf.nn.top_k(tf.squeeze(final_dists), k=params[\"beam_size\"] * 2)\n top_k_log_probs = tf.math.log(top_k_probs)\n # dec_hidden shape = (3, 256)\n # attentions, shape = (3, 115)\n # p_gens shape = (3, 1)\n # coverages,shape = (3, 115, 1)\n results = {\"attention_vec\": attentions, # [batch_sz, max_len_x, 1]\n \"top_k_ids\": top_k_ids,\n \"top_k_log_probs\": top_k_log_probs}\n return results",
"def load_evictions_data(data_path, date_col, min_year, max_year, create_geoid = True):\n ev_raw = (\n pd.read_csv(\n data_path,\n parse_dates=[date_col],\n infer_datetime_format=True,\n )\n .dropna(how=\"all\")\n .drop_duplicates()\n )\n ev_raw[\"year\"] = ev_raw[date_col].dt.year\n ev_df = ev_raw[(ev_raw.year >= min_year) & (ev_raw.year <= max_year)]\n ev_df[\"year\"] = ev_df[\"year\"].astype(int)\n ev_df[\"month\"] = get_month_as_str_col(ev_df, date_col)\n \n if create_geoid == True:\n # Convert to strings\n ev_df[\"state_code\"] = ev_df[\"state_code\"].astype(str).replace(r'\\.0', '', regex = True) \n ev_df[\"county_code\"] = ev_df[\"county_code\"].astype(str).replace(r'\\.0', '', regex = True) \n ev_df[\"tract_code\"] = ev_df[\"tract_code\"].astype(str).replace(r'\\.0', '', regex = True)\n # Add zeroes as necessary\n ev_df[\"state_code\"] = ev_df[\"state_code\"].apply(lambda x: (((2-len(x)) * \"0\") + x) if len(x) < 2 else x)\n ev_df[\"county_code\"] = ev_df[\"county_code\"].apply(lambda x: (((3-len(x)) * \"0\") + x) if len(x) < 3 else x)\n ev_df[\"tract_code\"] = ev_df[\"tract_code\"].apply(lambda x: (((6-len(x)) * \"0\") + x) if len(x) < 6 else x)\n # Finally, concat everything\n ev_df[\"GEOID\"] = (\n ev_df[\"state_code\"] + ev_df[\"county_code\"] + ev_df[\"tract_code\"]\n )\n \n return ev_df",
"def load_ae(self, year):\n ae_paths = list(pathlib.Path(config.AE_DIR).glob(f'{year}*ae.txt'))\n assert len(ae_paths) == 1, (f'No AE files found.\\nae_dir={config.AE_DIR}, '\n f'year={year}, ae_paths={ae_paths}')\n ae_data = pd.read_csv(ae_paths[0], sep=' ', index_col=0, \n parse_dates=True, comment='#', \n names=['dateTime', 'AE'])\n return ae_data",
"def get_forecast(self, input_endog):\n output_window = self.output_window\n \n val_index = np.arange(input_endog.index[-1]+1, input_endog.index[-1]+1+output_window)\n\n X_endog = input_endog.T.dropna().copy()\n obs_items = X_endog.index #store_item pair that has complete input\n first_obs = np.array(X_endog.iloc[:,0]).reshape((-1,1)) #remove the first obs to remove trend\n X_endog -= first_obs\n \n encoder_exog = self.dataset_generator.load_encoder_exog(X_endog.columns, obs_items)\n decoder_exog = self.dataset_generator.load_decoder_exog(val_index, obs_items)\n decoder_event = self.dataset_generator.load_event_name(val_index, obs_items)\n\n X_decoder = np.zeros((X_endog.shape[0], output_window, 1))\n\n forecast = self.model.predict([np.array(X_endog).reshape((*X_endog.shape,1)), encoder_exog, X_decoder, decoder_exog, decoder_event]).reshape((-1, output_window))\n \n #Add back the first obs\n forecast += first_obs\n forecast_df = pd.DataFrame(np.NaN, index=val_index, columns=input_endog.columns)\n forecast_df[obs_items] = forecast.T\n return(forecast_df)",
"def load_event_name(self, exog_index, exog_cols):\n \n event_name = self.event_encoder.transform(self.calendar.loc[exog_index, 'event_name_1']).reshape((1,-1))\n event_name = np.repeat(event_name, repeats=exog_cols.shape[0], axis=0)\n \n return(event_name)",
"def decode_onestep(self, sess, batch, latest_tokens, enc_states, dec_init_states, prev_coverage):\n\n beam_size = len(dec_init_states)\n\n # Turn dec_init_states (a list of LSTMStateTuples) into a single LSTMStateTuple for the batch\n cells = [np.expand_dims(state.c, axis=0) for state in dec_init_states]\n hiddens = [np.expand_dims(state.h, axis=0) for state in dec_init_states]\n new_c = np.concatenate(cells, axis=0) # shape [batch_size,hidden_dim]\n new_h = np.concatenate(hiddens, axis=0) # shape [batch_size,hidden_dim]\n new_dec_in_state = tf.contrib.rnn.LSTMStateTuple(new_c, new_h)\n\n feed = {\n self._enc_states: enc_states,\n self._enc_padding_mask: batch.enc_padding_mask,\n self._dec_in_state: new_dec_in_state,\n self._dec_batch: np.transpose(np.array([latest_tokens])),\n }\n\n to_return = {\n \"ids\": self._topk_ids,\n \"probs\": self._topk_log_probs,\n \"states\": self._dec_out_state,\n \"attn_dists\": self.attn_dists\n }\n \n if self._hps.hier:\n feed[self._enc_batch_sections] = batch.batch_sections # shape=[batch-size, num-sections, enc-seq-len]\n feed[self._batch_sections_len] = batch.batch_sections_len\n feed[self._doc_sec_lens] = batch.batch_doc_sec_lens\n feed[self._enc_section_padding_mask] = batch.enc_section_padding_mask\n feed[self._enc_lens] = batch.enc_lens\n to_return['attn_dists_sec'] = self.attn_dists_sec\n\n if FLAGS.pointer_gen:\n feed[self._enc_batch_extend_vocab] = batch.enc_batch_extend_vocab\n feed[self._max_art_oovs] = batch.max_art_oovs\n to_return['p_gens'] = self.p_gens\n\n if self._hps.coverage:\n feed[self.prev_coverage] = np.stack(prev_coverage, axis=0)\n to_return['coverage'] = self.coverage\n\n results = sess.run(to_return, feed_dict=feed) # run the decoder step\n\n # Convert results['states'] (a single LSTMStateTuple) into a list of LSTMStateTuple -- one for each hypothesis\n new_states = [tf.contrib.rnn.LSTMStateTuple(results['states'].c[i, :], results['states'].h[i, :]) for i in xrange(beam_size)]\n\n # Convert singleton list containing a tensor to a list of k arrays\n assert len(results['attn_dists'])==1\n attn_dists = results['attn_dists'][0].tolist()\n \n if 'attn_dists_sec' in results:\n if len(results['attn_dists_sec']) > 0:\n attn_dists_sec = results['attn_dists_sec'][0].tolist()\n else: attn_dists_sec = None\n else:\n attn_dists_sec = None\n\n if FLAGS.pointer_gen:\n # Convert singleton list containing a tensor to a list of k arrays\n assert len(results['p_gens'])==1\n p_gens = results['p_gens'][0].tolist()\n else:\n p_gens = [None for _ in xrange(beam_size)]\n\n # Convert the coverage tensor to a list length k containing the coverage vector for each hypothesis\n if FLAGS.coverage:\n new_coverage = results['coverage'].tolist()\n assert len(new_coverage) == beam_size\n else:\n new_coverage = [None for _ in xrange(beam_size)]\n\n return results['ids'], results['probs'], new_states, attn_dists, p_gens, new_coverage, attn_dists_sec",
"def load_expt_gaps():\n path = os.path.join(DATA_DIR, \"bandgap-zhuo-4604.csv\")\n df = pd.read_csv(path, index_col=False)\n return df",
"def load(self, **kwargs):\n if \"events\" in kwargs:\n events = kwargs[\"events\"] # type: pd.DataFrame\n # drop any invalid eegoffset events\n if np.any(events[\"eegoffset\"] < 0):\n warnings.warn(\"Some events have eegoffset < 0 and will be dropped.\")\n events = events[events[\"eegoffset\"] >= 0]\n else:\n if self.session is None:\n raise ValueError(\n \"A session must be specified to load an entire session of \"\n \"EEG data!\"\n )\n\n # Because of reasons, PS4 experiments may or may not end with a 5.\n if self.experiment.startswith(\"PS4\") and self.experiment.endswith(\"5\"):\n experiment = self.experiment[:-1]\n else:\n experiment = self.experiment\n\n finder = PathFinder(\n subject=self.subject,\n experiment=experiment,\n session=self.session,\n rootdir=self.rootdir,\n )\n\n events_file = finder.find(\"task_events\")\n all_events = EventReader.fromfile(\n events_file, self.subject, self.experiment, self.session\n )\n\n # Select only a single event with a valid eegfile just to get the\n # filename\n valid = all_events[\n (all_events[\"eegfile\"].notnull())\n & (all_events[\"eegfile\"].str.len() > 0)\n ]\n events = pd.DataFrame(valid.iloc[0]).T.reset_index(drop=True)\n\n # Set relative start and stop times if necessary. If they were\n # already specified, these will allow us to subset the session.\n if \"rel_start\" not in kwargs:\n kwargs[\"rel_start\"] = 0\n if \"rel_stop\" not in kwargs:\n kwargs[\"rel_stop\"] = -1\n\n if not len(events):\n raise ValueError(\n \"No events found! Hint: did filtering events \" \"result in at least one?\"\n )\n elif len(events[\"subject\"].unique()) > 1:\n raise ValueError(\n \"Loading multiple sessions of EEG data requires \"\n \"using events from only a single subject.\"\n )\n\n if \"rel_start\" not in kwargs or \"rel_stop\" not in kwargs:\n raise exc.IncompatibleParametersError(\n \"rel_start and rel_stop must be given with events\"\n )\n\n # info = EEGMetaReader.fromfile(path, subject=self.subject)\n # sample_rate = info[\"sample_rate\"]\n # dtype = info[\"data_format\"]\n\n self.clean = kwargs.get(\"clean\", None)\n self.scheme = kwargs.get(\"scheme\", None)\n\n events = self._eegfile_absolute(events.copy())\n return self.as_timeseries(events, kwargs[\"rel_start\"], kwargs[\"rel_stop\"])",
"def load(self, episode_name):\n with open(savefile, 'rb') as f:\n bytesin = f.read()\n\n tout = np.frombuffer(bytesin, dtype=self.datatype)\n tout = tout.reshape((-1, num_channels))\n self.all_read = tout",
"def load_dataset(sequence_length=10):\n train_x = []\n train_y = []\n notes_to_emotion = []\n song_index_to_notes = get_notes()\n song_index_to_emotion = get_emotions()\n\n for index, notes in song_index_to_notes.items():\n if index in song_index_to_emotion:\n notes_to_emotion.append((notes, song_index_to_emotion[index]))\n\n for notes, emotion in notes_to_emotion:\n # get all pitch names\n pitchnames = sorted(set(item for item in notes))\n\n # create a dictionary to map pitches to integers\n note_to_int = dict((note, number) for number, note in enumerate(pitchnames))\n for i in range(0, int(len(notes)) - sequence_length):\n music_in = notes[i: i + sequence_length]\n train_x.append([note_to_int[char] for char in music_in])\n train_y.append(emotion)\n\n print(\"train_x has shape: \", len(train_x))\n print(\"train_y has shape: \", len(train_y))\n\n return (np.asarray(train_x), np.asarray(train_y))",
"def training_ae(num_epochs, reduced_dim, file_index, save_model):\n normal, post_normal = read_in(file_index, 1, 3, 0.3)\n three, four, five, six = split(post_normal, 4)\n signal_shape = normal.shape[1:]\n batch_size = round(len(normal) * 0.15)\n\n encoder, decoder = build_model(reduced_dim)\n\n inp = Input(signal_shape)\n encode = encoder(inp)\n reconstruction = decoder(encode)\n\n autoencoder = Model(inp, reconstruction)\n opt = keras.optimizers.Adam(learning_rate=0.001)\n autoencoder.compile(optimizer=opt, loss='mse')\n\n autoencoder.fit(x=normal, y=normal, epochs=num_epochs, batch_size=batch_size)\n\n if save_model:\n # save out the model\n filename = 'Working_Data/CDAE_patient_' + str(file_index) + '_iter' + str(0) + '_model'\n autoencoder.save_weights(filename, save_format = \"tf\")\n print('Model saved for patient: ' + str(file_index))\n\n # using autoencoder to encode all of the patient data\n encoded = encoder.predict(three)\n reconstruction = decoder.predict(encoded)\n\n # save reconstruction and encoded files\n reconstruction_save = \"Working_Data/reconstructed_10hb_cae_\" + str(file_index) + \"_hour2_4\" + \".npy\"\n # encoded_save = \"Working_Data/encoded_10hb_cae_\" + str(file_index) + \".npy\"\n np.save(reconstruction_save, reconstruction)\n # np.save(encoded_save, encoded)",
"def etl_dataset(output_path, source_path=None, **kwargs):\r\n kwargs.setdefault('adjust_for_shapefile', False)\r\n kwargs.setdefault('clean_whitespace_field_names', ())\r\n kwargs.setdefault('etl_name', os.path.basename(output_path))\r\n kwargs.setdefault('dissolve_field_names')\r\n kwargs.setdefault('extract_where_sql')\r\n kwargs.setdefault('field_name_change_map', {})\r\n kwargs.setdefault('insert_dataset_paths', ())\r\n kwargs.setdefault('insert_dicts_kwargs', ())\r\n kwargs.setdefault('insert_iters_kwargs', ())\r\n kwargs.setdefault('unique_id_field_names', ())\r\n kwargs.setdefault('use_edit_session', False)\r\n kwargs.setdefault('xy_tolerance')\r\n import arcetl\r\n with arcetl.ArcETL(kwargs['etl_name']) as etl:\r\n # Init.\r\n if source_path:\r\n etl.extract(source_path,\r\n extract_where_sql=kwargs['extract_where_sql'])\r\n else:\r\n etl.init_schema(output_path)\r\n rename_fields(etl, kwargs['field_name_change_map'])\r\n # Insert features.\r\n for func, key in (\r\n (insert_features_from_paths, 'insert_dataset_paths'),\r\n (insert_features_from_dicts, 'insert_dicts_kwargs'),\r\n (insert_features_from_iters, 'insert_iters_kwargs'),\r\n ):\r\n func(etl, kwargs[key])\r\n # Alter attributes.\r\n clean_whitespace(etl, kwargs['clean_whitespace_field_names'])\r\n # Combine features.\r\n if kwargs['dissolve_field_names']:\r\n etl.transform(arcetl.features.dissolve,\r\n dissolve_field_names=kwargs['dissolve_field_names'],\r\n tolerance=kwargs['xy_tolerance'])\r\n # Finalize attributes.\r\n update_attributes_by_unique_ids(etl, kwargs['unique_id_field_names'])\r\n if kwargs['adjust_for_shapefile']:\r\n etl.transform(arcetl.combo.adjust_for_shapefile)\r\n feature_count = etl.load(output_path,\r\n use_edit_session=kwargs['use_edit_session'])\r\n # Loading shapefiles destroys spatial indexes: restore after load.\r\n if kwargs.get('adjust_for_shapefile'):\r\n arcetl.dataset.add_index(output_path, field_names=('shape',),\r\n fail_on_lock_ok=True)\r\n return feature_count",
"def LoadEEGData(filename, EEGdevice):\n if EEGdevice == 7:\n x = 1\n elif EEGdevice == 8:\n # Read in the .easy file\n df = pd.read_csv(filename, delimiter='\\t', header=None)\n\n # Get metadata from the .info file\n fname = filename[:-5] + '.info'\n with open(fname) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n\n # Get the channel names\n channel_info = [x for x in content if 'Channel ' in x]\n channel_names = []\n for ch in range(len(channel_info)):\n channel_names.append(channel_info[ch].split(': ')[1])\n\n channel_names.append('X')\n channel_names.append('Y')\n channel_names.append('Z')\n channel_names.append('STI 014')\n channel_names.append('DateTime')\n\n # Get sampling rates\n sampling_rates = [x for x in content if 'sampling rate: ' in x]\n fs_all = []\n for freq in range(len(sampling_rates)):\n tmp = sampling_rates[freq].split(': ')[1].split(' ')[0]\n if tmp in ['N/A']:\n print('Skipping N/A')\n else:\n fs_all.append(float(sampling_rates[freq].split(': ')[1].split(' ')[0]))\n\n # Store sampling rates\n fs = fs_all[0]\n fs_accel = fs_all[1]\n\n # Assign the column names\n df.columns = channel_names\n \n # Return dataframe and sampling rates\n return df, fs, fs_accel",
"def load_data(model_path):\n x_arrays = []\n y_arrays = []\n for partition in iter_embeddings(model_path):\n h5f = h5py.File(partition, 'r')\n X = h5f[\"embeddings\"][:]\n x_arrays.append(X)\n try:\n Y = h5f[\"labels\"][:]\n y_arrays.append(Y)\n except KeyError:\n print(\"Labels not defined\")\n if len(y_arrays) > 0:\n X = np.vstack(x_arrays)\n Y = np.hstack(y_arrays)\n return X, Y\n else:\n X = np.vstack(x_arrays)\n Y = np.zeros(len(X))\n return X, Y",
"def load(self, uri):\r\n self._encoder = load_model(uri+\"_lstm_encoder.hdf5\")\r\n self._autoencoder = load_model(uri+\"_lstm_autoencoder.hdf5\")\r\n\r\n pf = PyFolder(os.path.dirname(os.path.realpath(uri)))\r\n dict_options = pf[os.path.basename(uri)+\"_options.json\"]\r\n\r\n self._latent_space = dict_options['latent_space']\r\n self._input_cells = dict_options['input_cells']",
"def load_models(encoder_dict, decoder_dict, epoch):\n for name, encoder in encoder_dict.items():\n encoder.load_state_dict(torch.load('saved_models/' + str(epoch) + '/encoders/' + str(name) + '.pt'))\n for name, decoder in decoder_dict.items():\n decoder.load_state_dict(torch.load('saved_models/' + str(epoch) + '/decoders/' + str(name) + '.pt'))",
"def import_file(self):\n\n t0 = datetime.now()\n\n print(\"\\n\" + \"Importing {}...\".format(self.filepath))\n\n file = pyedflib.EdfReader(self.filepath)\n\n self.sample_rate = file.getSampleFrequencies()[0]\n self.accel_sample_rate = file.getSampleFrequencies()[1]\n\n # READS IN ECG DATA ===========================================================================================\n if self.end_offset == 0:\n print(\"Importing file from index {} to the end...\".format(self.start_offset))\n self.raw = file.readSignal(chn=0, start=self.start_offset)\n\n if self.load_accel:\n self.x = file.readSignal(chn=1, start=int(self.start_offset *\n self.accel_sample_rate / self.sample_rate))\n self.y = file.readSignal(chn=2, start=int(self.start_offset *\n self.accel_sample_rate / self.sample_rate))\n self.z = file.readSignal(chn=3, start=int(self.start_offset *\n self.accel_sample_rate / self.sample_rate))\n\n if self.end_offset != 0:\n print(\"Importing file from index {} to {}...\".format(self.start_offset,\n self.start_offset + self.end_offset))\n self.raw = file.readSignal(chn=0, start=self.start_offset, n=self.end_offset)\n\n if self.load_accel:\n self.x = file.readSignal(chn=1,\n start=int(self.start_offset * self.accel_sample_rate / self.sample_rate),\n n=int(self.end_offset * self.accel_sample_rate / self.sample_rate))\n\n self.y = file.readSignal(chn=2,\n start=int(self.start_offset * self.accel_sample_rate / self.sample_rate),\n n=int(self.end_offset * self.accel_sample_rate / self.sample_rate))\n\n self.z = file.readSignal(chn=3,\n start=int(self.start_offset * self.accel_sample_rate / self.sample_rate),\n n=int(self.end_offset * self.accel_sample_rate / self.sample_rate))\n\n # Calculates gravity-subtracted vector magnitude. Converts from mg to G\n # Negative values become zero\n if self.load_accel:\n self.vm = (np.sqrt(np.square(np.array([self.x, self.y, self.z])).sum(axis=0)) - 1000) / 1000\n self.vm[self.vm < 0] = 0\n\n print(\"ECG data import complete.\")\n\n self.starttime = file.getStartdatetime() + timedelta(seconds=self.start_offset/self.sample_rate)\n self.file_dur = round(file.getFileDuration() / 3600, 3)\n\n # Data filtering\n self.filtered = Filtering.filter_signal(data=self.raw, low_f=self.low_f, high_f=self.high_f,\n filter_type=self.f_type, sample_f=self.sample_rate, filter_order=3)\n\n # TIMESTAMP GENERATION ========================================================================================\n t0_stamp = datetime.now()\n\n print(\"\\n\" + \"Creating timestamps...\")\n\n # Timestamps\n end_time = self.starttime + timedelta(seconds=len(self.raw)/self.sample_rate)\n self.timestamps = np.asarray(pd.date_range(start=self.starttime, end=end_time, periods=len(self.raw)))\n self.epoch_timestamps = self.timestamps[::self.epoch_len * self.sample_rate]\n\n t1_stamp = datetime.now()\n stamp_time = (t1_stamp - t0_stamp).seconds\n print(\"Complete ({} seconds).\".format(round(stamp_time, 2)))\n\n t1 = datetime.now()\n proc_time = (t1 - t0).seconds\n print(\"\\n\" + \"Import complete ({} seconds).\".format(round(proc_time, 2)))",
"def import_data(inp, picks=None, array_keys={'X': 'X', 'y': 'y'}):\n if isinstance(inp, (mne.epochs.EpochsFIF, mne.epochs.BaseEpochs)):\n print('processing epochs')\n # if isinstance(picks, dict):\n # picks = mne.pick_types(inp.info, include=picks)\n inp.load_data()\n data = inp.get_data()\n events = inp.events[:, 2]\n if isinstance(picks, dict):\n print(\"Converting picks\")\n picks = mne.pick_types(inp.info, **picks)\n\n elif isinstance(inp, tuple) and len(inp) == 2:\n print('importing from tuple')\n data, events = inp\n\n elif isinstance(inp, str):\n # TODO: ADD CASE FOR RAW FILE\n fname = inp\n if fname[-3:] == 'fif':\n epochs = mne.epochs.read_epochs(fname, preload=True,\n verbose='CRITICAL')\n print(np.unique(epochs.events[:, 2]))\n events = epochs.events[:, 2]\n epochs.crop(tmin=-1., tmax=1.)\n data = epochs.get_data()\n if isinstance(picks, dict):\n print(\"Converting picks\")\n picks = mne.pick_types(epochs.info, **picks)\n\n else:\n if fname[-3:] == 'mat':\n datafile = sio.loadmat(fname)\n\n if fname[-3:] == 'npz':\n print('Importing from npz')\n datafile = np.load(fname)\n\n data = datafile[array_keys['X']]\n events = datafile[array_keys['y']]\n print('Extracting target variables from {}'\n .format(array_keys['y']))\n else:\n print(\"Dataset not found\")\n return None, None\n\n data = data.astype(np.float32)\n\n # Make sure that X is 3d here\n while data.ndim < 3:\n # (x, ) -> (1, 1, x)\n # (x, y) -> (1, x, y)\n data = np.expand_dims(data, 0)\n\n if isinstance(picks, (np.ndarray, list, tuple)):\n picks = np.asarray(picks)\n if np.any(data.shape[1] <= picks):\n raise ValueError(\"Invalid picks {} for n_channels {} \".format(\n max(len(picks), max(picks)), data.shape[1]))\n data = data[:, picks, :]\n\n return data, events",
"def read_all_odbsql_stn_withfeedback(dataset, odbfile):\n columns, kinds, tdict = make_odb_header(odbfile, dataset) \n try: \n t=time.time() \n try:\n f=gzip.open(odbfile) \n except:\n print(odbfile, 'The zipped ODB file was not found !')\n return\n \n #d=['date@hdr','time@hdr','statid@hdr','vertco_reference_1@body','varno@body','reportype','andate','antime',\n # 'obsvalue@body','fg_depar@body','an_depar@body','biascorr@body','sonde_type@conv','collection_identifier@conv','source@hdr']\n \n # had to remove 'collection_identifier@conv' to make it work with 1, 3188, 1759, 1761 \n \n tdict['sensor@hdr']=numpy.float32\n tdict['ppcode@conv_body']=numpy.float32\n \n '''\n d=['date@hdr','time@hdr','statid@hdr','vertco_reference_1@body','varno@body','lon@hdr','lat@hdr','seqno@hdr',\n 'obsvalue@body','source@hdr' , 'vertco_type@body']\n \n if 'fg_depar@body' in columns: # creating the colkumns for era5fb \n d=d+['fg_depar@body','an_depar@body','biascorr@body','sonde_type@conv','reportype','andate','antime']\n '''\n \n# restrict feedback to certain columns \n #for c in columns:\n # if c not in d:\n # del tdict[c]\n \n #columns=d.copy()\n \n alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) #nrows=1000000)\n \n \"\"\" Case where erafb is not available \"\"\"\n if 'fg_depar@body' not in columns:\n alldict['fg_depar@body']=numpy.float32(numpy.NaN)\n alldict['an_depar@body']=numpy.float32(numpy.NaN)\n alldict['biascorr@body']=numpy.float32(numpy.NaN)\n alldict['sondetype@conv']=numpy.int32(-2147483648)\n alldict['reportype']=numpy.int32(-2147483648)\n \n #print(time.time()-t,sys.getsizeof(alldict)//1024//1024)\n idx=numpy.where(numpy.logical_or(alldict.reportype.values==16045,alldict.reportype.values==16068))[0]\n if len(idx)>0:\n \n #alldict.drop(index=alldict.index[idx],inplace=True)\n y=numpy.int64(alldict['date@hdr'].values)*1000000+alldict['time@hdr'].values\n x=numpy.unique(y)\n dropindex=[]\n for i in range(1,x.shape[0]):\n if x[i]-x[i-1]<60:\n idx=numpy.where(y==x[i-1])[0]\n if idx.shape[0]>0:\n dropindex.append(idx)\n else:\n print('empty index')\n if dropindex: \n dropindex = numpy.concatenate(dropindex).ravel()\n alldict.drop(index=alldict.index[dropindex],inplace=True)\n \n #print(time.time()-t) #,sys.getsizeof(alldict)//1024//1024)\n \n #idx=numpy.where(alldict.reportype.values==16045)[0]\n #if idx.shape[0]>0:\n #idy=numpy.where(numpy.logical_and(alldict.reportype.values!=16045,alldict.reportype.values!=16068))[0]\n #if idy.shape[0]>0:\n #idz=numpy.isin(alldict.andate.values[idy],alldict.andate.values[idx])\n #if numpy.sum(idz)>0:\n #alldict.drop(index=alldict.index[idy[idz]],inplace=True)\n \n #idx=numpy.where(alldict.reportype.values==16068)[0]\n #if idx.shape[0]>0:\n #idy=numpy.where(numpy.logical_and(alldict.reportype.values!=16045,alldict.reportype.values!=16068))[0]\n #if idy.shape[0]>0:\n #idz=numpy.isin(alldict.andate.values[idy],alldict.andate.values[idx])\n #if numpy.sum(idz)>0:\n #alldict.drop(index=alldict.index[idy[idz]],inplace=True)\n \n \n #print(time.time()-t,sys.getsizeof(alldict)//1024//1024)\n \n alldict['source_id'] = dataset.rjust(10)\n\n for c in alldict.columns:\n \n if type(alldict[c].iloc[0]) in [str,bytes]:\n l=alldict[c].shape[0]\n slen=len(alldict[c].values[0])\n alldict[c]=numpy.array(alldict.pop(c).values,dtype='S{}'.format(slen))\n #alldict[c]=numpy.string_(alldict[c])\n \n if type(alldict[c].iloc[0]) is numpy.int64:\n alldict[c]=numpy.int32(alldict[c])\n \n if type(alldict[c].iloc[0]) is numpy.float64:\n alldict[c]=numpy.float32(alldict[c])\n \n #print('after odb:',time.time()-t)\n \n except MemoryError:\n print('Reading ODB failed ! ' + odbfile)\n return alldict\n \n #print(odbfile,time.time()-t)#, sys.getsizeof(alldict))\n\n \n return alldict",
"def _load_ludb(self, path):\n signal, info = wfdb.rdsamp(path)\n self.fs = 500\n self.lead_match = ['I', 'II', 'III', 'aVR', 'aVL', 'aVF', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']\n self.raw_data = np.transpose(np.array([signal]), (2, 0, 1))\n self.symbol = []\n self.coords = []\n for lead in ['i', 'ii', 'iii', 'avr', 'avl', 'avf', 'v1', 'v2', 'v3', 'v4', 'v5', 'v6']:\n ann_ii = wfdb.rdann(path, extension='atr_{}'.format(lead))\n symbol_1 = ann_ii.symbol\n coords_1 = ann_ii.sample\n if list(np.unique(np.array(symbol_1))) != ['(', ')', 'N', 'p', 't'] and list(np.unique(np.array(symbol_1))) != ['(', ')', 'N', 'p', 't', 'u']:\n print(\"Invalid symbols in ECG annotations.\")\n raise ValueError\n self.symbol.append(symbol_1)\n self.coords.append(coords_1)\n self.label_name = ['(', 'p', ')', '(', 'N', ')', '(', 't', ')']\n self._generate_beatlabel_from_label()",
"def load_data(self, X, loss):\n\n self.X = X\n self.tags = pd.DataFrame(loss)\n\n self.index = [_ALL]\n\n self.X_all = pd.concat([self.X_all , self.X], axis = 0, ignore_index=True)\n self.tags_all = pd.concat([self.tags_all, self.tags], axis = 0, ignore_index=True)",
"def train_ev_ea(self):\n # Set data loader.\n data_loader = self.data_loader\n \n noise = torch.FloatTensor(self.batch_size, self.nz_num)\n noise = noise.to(self.device) # noise vector z\n \n start_iters = 0\n\n # Start training.\n print('Start encoder_a and encoder_v training...')\n start_time = time.time()\n \n ev_ea_c_iters = self.ev_ea_c_iters\n c_pre_iters = self.c_pre_iters\n \n C_path = os.path.join(self.model_save_dir, '{}-C.ckpt'.format(ev_ea_c_iters))\n \n encoder_a_path = os.path.join(self.model_save_dir, '{}-encoder_a.ckpt'.format(ev_ea_c_iters))\n \n encoder_v_path = os.path.join(self.model_save_dir, '{}-encoder_v.ckpt'.format(ev_ea_c_iters))\n \n \n if os.path.exists(C_path):\n self.C.load_state_dict(torch.load(C_path, map_location=lambda storage, loc: storage))\n print('Load model checkpoints from {}'.format(C_path))\n \n self.encoder_a.load_state_dict(torch.load(encoder_a_path, map_location=lambda storage, loc: storage))\n print('Load model checkpoints from {}'.format(encoder_a_path))\n \n self.encoder_v.load_state_dict(torch.load(encoder_v_path, map_location=lambda storage, loc: storage))\n print('Load model checkpoints from {}'.format(encoder_v_path))\n else:\n C_pre_path = os.path.join(self.model_save_dir, '{}-C.ckpt'.format(c_pre_iters))\n if os.path.exists(C_pre_path):\n self.C.load_state_dict(torch.load(C_pre_path, map_location=lambda storage, loc: storage))\n print('Load model pretrained checkpoints from {}'.format(C_pre_path))\n else:\n for i in range(0, c_pre_iters):\n # Fetch real images, attributes and labels.\n x_real, wrong_images, attributes, _, label_org = data_loader.train.next_batch(self.batch_size,10)\n\n\n x_real = x_real.to(self.device) # Input images.\n attributes = attributes.to(self.device) # Input attributes\n label_org = label_org.to(self.device) # Labels for computing classification loss.\n \n ev_x = self.encoder_v(x_real)\n cls_x = self.C(ev_x.detach())\n # Classification loss from only images for C training\n c_loss_cls = self.classification_loss(cls_x, label_org) \n # Backward and optimize.\n self.c_optimizer.zero_grad()\n c_loss_cls.backward()\n self.c_optimizer.step()\n \n if (i+1) % self.log_step == 0:\n loss = {}\n loss['c_loss_cls'] = c_loss_cls.item()\n prec1, prec5 = accuracy(cls_x.data, label_org.data, topk=(1, 5))\n loss['prec1'] = prec1\n loss['prec5'] = prec5\n log = \"C pretraining iteration [{}/{}]\".format(i+1, c_pre_iters)\n for tag, value in loss.items():\n log += \", {}: {:.4f}\".format(tag, value)\n print(log)\n torch.save(self.C.state_dict(), C_pre_path)\n print('Saved model pretrained checkpoints into {}...'.format(C_pre_path))\n \n for i in range(c_pre_iters, ev_ea_c_iters):\n # Fetch real images, attributes and labels.\n x_real, wrong_images, attributes, _, label_org = data_loader.train.next_batch(self.batch_size,10)\n\n\n x_real = x_real.to(self.device) # Input images.\n attributes = attributes.to(self.device) # Input attributes\n label_org = label_org.to(self.device) # Labels for computing classification loss.\n \n\n # =================================================================================== #\n # Train the domain-specific features discriminator \n # =================================================================================== #\n \n noise.normal_(0, 1)\n # Compute embedding of both images and attributes\n ea_a = self.encoder_a(attributes, noise)\n ev_x = self.encoder_v(x_real)\n \n \n ev_x_real = self.D_s(ev_x, attributes)\n ds_loss_real = -torch.mean(ev_x_real)\n \n \n ea_a_fake = self.D_s(ea_a, attributes)\n ds_loss_fake = torch.mean(ea_a_fake)\n \n # Compute loss for gradient penalty.\n alpha = torch.rand(ev_x.size(0), 1).to(self.device)\n ebd_hat = (alpha * ev_x.data + (1 - alpha) * ea_a.data).requires_grad_(True)\n \n ebd_inter = self.D_s(ebd_hat, attributes)\n ds_loss_gp = self.gradient_penalty(ebd_inter, ebd_hat)\n \n ds_loss = ds_loss_real + ds_loss_fake + self.lambda_gp * ds_loss_gp #+ ds_loss_realw\n #self.reset_grad_eb()\n self.ea_optimizer.zero_grad()\n self.ds_optimizer.zero_grad()\n self.ev_optimizer.zero_grad()\n\n ds_loss.backward()\n self.ds_optimizer.step()\n if (i+1) % self.n_critic == 0:\n # =================================================================================== #\n # Train the encoder_a and C \n # =================================================================================== #\n ev_x = self.encoder_v(x_real)\n ev_x_real = self.D_s(ev_x, attributes)\n ev_loss_real = torch.mean(ev_x_real)\n \n cls_x = self.C(ev_x)\n c_loss_cls = self.classification_loss(cls_x, label_org)\n\n # Backward and optimize.\n ev_c_loss = ev_loss_real + c_loss_cls\n self.ea_optimizer.zero_grad()\n self.ds_optimizer.zero_grad()\n self.ev_optimizer.zero_grad()\n ev_c_loss.backward()\n self.ev_optimizer.step()\n \n # =================================================================================== #\n # Train the encoder_v #\n # =================================================================================== #\n noise.normal_(0, 1)\n ea_a = self.encoder_a(attributes,noise)\n ea_a_fake = self.D_s(ea_a, attributes)\n ea_loss_fake = -torch.mean(ea_a_fake)\n \n cls_a = self.C(ea_a)\n ebn_loss_cls = self.classification_loss(cls_a, label_org)\n \n\n # Backward and optimize.\n ea_loss = ea_loss_fake + ebn_loss_cls\n self.ea_optimizer.zero_grad()\n self.ds_optimizer.zero_grad()\n self.ev_optimizer.zero_grad()\n ea_loss.backward()\n self.ea_optimizer.step()\n \n # Logging.\n loss = {}\n \n loss['ds/ds_loss_real'] = ds_loss_real.item()\n loss['ds/ds_loss_fake'] = ds_loss_fake.item()\n loss['ds/ds_loss_gp'] = ds_loss_gp.item()\n \n # Print out training information.\n if (i+1) % self.log_step == 0:\n et = time.time() - start_time\n et = str(datetime.timedelta(seconds=et))[:-7]\n prec1, prec5 = accuracy(cls_x.data, label_org.data, topk=(1, 5))\n loss['prec1'] = prec1\n loss['prec5'] = prec5\n prec1e, prec5e = accuracy(cls_a.data, label_org.data, topk=(1, 5))\n loss['prec1e'] = prec1e\n loss['prec5e'] = prec5e\n log = \"Encoder_a and Encoder_v Training Elapsed [{}], Iteration [{}/{}]\".format(et, i+1, ev_ea_c_iters)\n for tag, value in loss.items():\n log += \", {}: {:.4f}\".format(tag, value)\n print(log)\n\n \n # Save model checkpoints.\n if (i+1) % self.model_save_step == 0:\n C_path = os.path.join(self.model_save_dir, '{}-C.ckpt'.format(i+1))\n torch.save(self.C.state_dict(), C_path)\n print('Saved model checkpoints into {}...'.format(C_path))\n \n encoder_a_path = os.path.join(self.model_save_dir, '{}-encoder_a.ckpt'.format(i+1))\n torch.save(self.encoder_a.state_dict(), encoder_a_path)\n print('Saved model checkpoints into {}...'.format(encoder_a_path))\n \n encoder_v_path = os.path.join(self.model_save_dir, '{}-encoder_v.ckpt'.format(i+1))\n torch.save(self.encoder_v.state_dict(), encoder_v_path)\n print('Saved model checkpoints into {}...'.format(encoder_v_path))",
"def data_loader(edges,features,y):\n\n\n edge_index = torch.tensor(edges, dtype=torch.long)\n edge_index = edge_index.t().contiguous()\n x = torch.tensor(features.todense(), dtype=torch.float)\n\n y = torch.tensor(y)\n\n data = Data(x=x, edge_index=edge_index, y = y)\n\n return data",
"def _parse_el_example(array_feats, array_feat_types, quant_feats):\n out_example = []\n d_keys = sorted(array_feats.keys())\n for k in d_keys:\n n_feat = quant_feats[k]\n point_feat = tf.decode_raw(array_feats[k], array_feat_types[k])\n point_feat = tf.reshape(point_feat, [quant_feats[k]])\n out_example.append(point_feat)\n return tuple(out_example)"
] | [
"0.6400031",
"0.549128",
"0.5342998",
"0.5310045",
"0.52799636",
"0.51366115",
"0.5101912",
"0.5084188",
"0.5063949",
"0.5024628",
"0.5011325",
"0.5000094",
"0.49920833",
"0.49706978",
"0.4955498",
"0.4926493",
"0.4926392",
"0.4908149",
"0.48982498",
"0.4852428",
"0.48472175",
"0.4819475",
"0.47981507",
"0.4736028",
"0.47312766",
"0.47295734",
"0.4687906",
"0.4675351",
"0.46672973",
"0.46571904"
] | 0.8127291 | 0 |
Load event_id at timestamp (exog_index), dept_stores (exog_columns). event_name with shape (n_ts, n_timestamp). | def load_event_name(self, exog_index, exog_cols):
event_name = self.event_encoder.transform(self.calendar.loc[exog_index, 'event_name_1']).reshape((1,-1))
event_name = np.repeat(event_name, repeats=exog_cols.shape[0], axis=0)
return(event_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_decoder_exog(self, exog_index, exog_cols):\n decoder_exog = np.repeat(np.array(self.calendar_exog.loc[exog_index]).reshape((1,exog_index.shape[0],-1)), repeats=exog_cols.shape[0], axis=0)\n decoder_exog = np.concatenate([decoder_exog, np.array(self.snap_exog.loc[exog_index, exog_cols].T).reshape((exog_cols.shape[0], -1, 1))], axis=-1)\n decoder_exog = np.concatenate([decoder_exog, np.array(self.price_discount.loc[exog_index, exog_cols].T).reshape((exog_cols.shape[0], -1, 1))], axis=-1)\n \n #Append with id features\n id_features = self.dept_store_encoder.transform(self.agg_sales_train.loc[exog_cols, ['dept_id', 'store_id']])\n id_features = np.repeat(np.expand_dims(id_features, axis=1), repeats=exog_index.shape[0], axis=1)\n decoder_exog = np.concatenate([decoder_exog, id_features], axis=-1)\n \n return(decoder_exog.astype(np.float32))",
"def load_events(event_filename):\n\n print(\"Events\")\n\n for i, row in enumerate(open(event_filename)):\n row = row.rstrip()\n event_id, host, category, title, start_str, end_str, created_str = row.split(\"|\")\n\n host = int(host)\n start_on = datetime.strptime(start_str, \"%m/%d/%Y %H:%M\")\n end_on = datetime.strptime(end_str, \"%m/%d/%Y %H:%M\")\n created_on = datetime.strptime(created_str, \"%m/%d/%Y %H:%M\")\n\n # Instantiate event\n event = Event(host=host,\n category=category,\n title=title,\n start_on=start_on,\n end_on=end_on,\n created_on=created_on)\n \n # Add event to session\n db.session.add(event)\n\n # Commit all event instances to DB\n db.session.commit()",
"def load_events(sc, file):\n events = sc.read.csv(config.GDELT_PATH + file, sep=\"\\t\", header=False,\n schema=config.EVENTS_SCHEMA, mode=\"DROPMALFORMED\")\n events = events.withColumn(\n \"DATE\", F.to_timestamp(events.Day_DATE, \"yyyyMMdd\"))\n events = events.select(\"GLOBALEVENTID\", \"DATE\", \"Actor1Code\", \"Actor1Name\", \"Actor1CountryCode\",\n \"Actor1Type1Code\", \"Actor1Type2Code\", \"Actor1Type3Code\",\n \"Actor2Code\", \"Actor2Name\", \"Actor2CountryCode\",\n \"Actor2Type1Code\", \"Actor2Type2Code\", \"Actor2Type3Code\",\n \"EventCode\", \"GoldsteinScale\",\n \"NumMentions\", \"NumSources\", \"NumArticles\", \"AvgTone\", \"Actor1Geo_Type\", \"Actor1Geo_FullName\",\n \"Actor1Geo_CountryCode\", \"Actor2Geo_Type\", \"Actor2Geo_FullName\", \"Actor2Geo_CountryCode\",\n \"ActionGeo_Type\", \"ActionGeo_FullName\", \"ActionGeo_CountryCode\")\n return events",
"def load_events(protests_data):\n\n print \"Events\"\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate events \n Event.query.delete()\n\n # Read through each protest event all need info\n for protest in protests_data:\n event_id = protest[0]\n full_date = protest[1]\n year = protest[3]\n event_code = protest[27]\n full_location = protest[36]\n latitude = protest[39]\n longitude = protest[40]\n url = protest[57]\n\n if latitude == \"\" or longitude == \"\":\n continue\n\n event = Event(event_id=event_id,\n full_date=full_date,\n year=year, \n event_code=event_code, \n full_location=full_location,\n latitude=latitude,\n longitude=longitude,\n url=url)\n\n # Add event to session\n db.session.add(event)\n\n # Commit to database\n db.session.commit()",
"def load(self, **kwargs):\n if \"events\" in kwargs:\n events = kwargs[\"events\"] # type: pd.DataFrame\n # drop any invalid eegoffset events\n if np.any(events[\"eegoffset\"] < 0):\n warnings.warn(\"Some events have eegoffset < 0 and will be dropped.\")\n events = events[events[\"eegoffset\"] >= 0]\n else:\n if self.session is None:\n raise ValueError(\n \"A session must be specified to load an entire session of \"\n \"EEG data!\"\n )\n\n # Because of reasons, PS4 experiments may or may not end with a 5.\n if self.experiment.startswith(\"PS4\") and self.experiment.endswith(\"5\"):\n experiment = self.experiment[:-1]\n else:\n experiment = self.experiment\n\n finder = PathFinder(\n subject=self.subject,\n experiment=experiment,\n session=self.session,\n rootdir=self.rootdir,\n )\n\n events_file = finder.find(\"task_events\")\n all_events = EventReader.fromfile(\n events_file, self.subject, self.experiment, self.session\n )\n\n # Select only a single event with a valid eegfile just to get the\n # filename\n valid = all_events[\n (all_events[\"eegfile\"].notnull())\n & (all_events[\"eegfile\"].str.len() > 0)\n ]\n events = pd.DataFrame(valid.iloc[0]).T.reset_index(drop=True)\n\n # Set relative start and stop times if necessary. If they were\n # already specified, these will allow us to subset the session.\n if \"rel_start\" not in kwargs:\n kwargs[\"rel_start\"] = 0\n if \"rel_stop\" not in kwargs:\n kwargs[\"rel_stop\"] = -1\n\n if not len(events):\n raise ValueError(\n \"No events found! Hint: did filtering events \" \"result in at least one?\"\n )\n elif len(events[\"subject\"].unique()) > 1:\n raise ValueError(\n \"Loading multiple sessions of EEG data requires \"\n \"using events from only a single subject.\"\n )\n\n if \"rel_start\" not in kwargs or \"rel_stop\" not in kwargs:\n raise exc.IncompatibleParametersError(\n \"rel_start and rel_stop must be given with events\"\n )\n\n # info = EEGMetaReader.fromfile(path, subject=self.subject)\n # sample_rate = info[\"sample_rate\"]\n # dtype = info[\"data_format\"]\n\n self.clean = kwargs.get(\"clean\", None)\n self.scheme = kwargs.get(\"scheme\", None)\n\n events = self._eegfile_absolute(events.copy())\n return self.as_timeseries(events, kwargs[\"rel_start\"], kwargs[\"rel_stop\"])",
"def load_series(country: str, event: str, \n source: str):\n tck = f\"OWID.{country.replace(' ','_')}_{event}\".upper()\n desc = f\"{event.replace('-', ' ')}\".upper()\n try:\n add_series(tck, desc, country.upper(), source)\n except:\n print(f\"Series {tck} failed to be added!\")",
"def load_events():\n\n print('load_events')\n\n Event.query.delete()\n\n for row in open(\"seed_data/events.csv\"):\n row = row.rstrip()\n private, \\\n host_id, \\\n venue, \\\n title, \\\n time_begin, \\\n time_end, \\\n max_cap, \\\n url = row.split(',')\n\n private = int(private)\n host_id = int(host_id)\n\n ven = Venue.query.filter_by(name=venue).first()\n\n begin_at = datetime.strptime(time_begin, \"%y-%m-%d %H:%M:%S\")\n\n end_at = datetime.strptime(time_end, \"%y-%m-%d %H:%M:%S\")\n\n evt = Event(private=private,\n host_id=host_id,\n venue_id=ven.id,\n title=title,\n begin_at=begin_at,\n end_at=end_at,\n max_cap=max_cap,\n url=url)\n\n db.session.add(evt)\n\n db.session.commit()",
"def load_evictions_data(data_path, date_col, min_year, max_year, create_geoid = True):\n ev_raw = (\n pd.read_csv(\n data_path,\n parse_dates=[date_col],\n infer_datetime_format=True,\n )\n .dropna(how=\"all\")\n .drop_duplicates()\n )\n ev_raw[\"year\"] = ev_raw[date_col].dt.year\n ev_df = ev_raw[(ev_raw.year >= min_year) & (ev_raw.year <= max_year)]\n ev_df[\"year\"] = ev_df[\"year\"].astype(int)\n ev_df[\"month\"] = get_month_as_str_col(ev_df, date_col)\n \n if create_geoid == True:\n # Convert to strings\n ev_df[\"state_code\"] = ev_df[\"state_code\"].astype(str).replace(r'\\.0', '', regex = True) \n ev_df[\"county_code\"] = ev_df[\"county_code\"].astype(str).replace(r'\\.0', '', regex = True) \n ev_df[\"tract_code\"] = ev_df[\"tract_code\"].astype(str).replace(r'\\.0', '', regex = True)\n # Add zeroes as necessary\n ev_df[\"state_code\"] = ev_df[\"state_code\"].apply(lambda x: (((2-len(x)) * \"0\") + x) if len(x) < 2 else x)\n ev_df[\"county_code\"] = ev_df[\"county_code\"].apply(lambda x: (((3-len(x)) * \"0\") + x) if len(x) < 3 else x)\n ev_df[\"tract_code\"] = ev_df[\"tract_code\"].apply(lambda x: (((6-len(x)) * \"0\") + x) if len(x) < 6 else x)\n # Finally, concat everything\n ev_df[\"GEOID\"] = (\n ev_df[\"state_code\"] + ev_df[\"county_code\"] + ev_df[\"tract_code\"]\n )\n \n return ev_df",
"def load_ae(self, year):\n ae_paths = list(pathlib.Path(config.AE_DIR).glob(f'{year}*ae.txt'))\n assert len(ae_paths) == 1, (f'No AE files found.\\nae_dir={config.AE_DIR}, '\n f'year={year}, ae_paths={ae_paths}')\n ae_data = pd.read_csv(ae_paths[0], sep=' ', index_col=0, \n parse_dates=True, comment='#', \n names=['dateTime', 'AE'])\n return ae_data",
"def events(path, samples=False, ds=None, trigger='trigger', t_edf='t_edf'):\n edf = Edf(path, samples=samples)\n if ds is None:\n ds = edf.get_triggers(trigger, t_edf)\n else:\n if 'edf' in ds.info:\n raise ValueError(\"ds.info already contains 'edf' entry.\")\n if trigger in ds:\n edf.assert_trigger_match(ds)\n else:\n ds[trigger] = Var(edf.triggers['Id'])\n ds.info['edf'] = edf\n edf.add_t_to(ds, trigger, t_edf)\n\n return ds",
"def summary_info_events(filename):\n # filename = self.out_filename('events')\n print('Reading {}'.format(filename))\n table = Table.read(str(filename), hdu='EVENTS')\n data = dict()\n \n # Copy over header info to the summary table\n data['RA_PNT'] = np.float32(table.meta['RA_PNT'])\n data['DEC_PNT'] = np.float32(table.meta['DEC_PNT'])\n #data['GLON_PNT'] = np.float32(table.meta['GLON_PNT'])\n #data['GLAT_PNT'] = np.float32(table.meta['GLAT_PNT'])\n data['ALT_PNT'] = np.float32(table.meta['ALT_PNT'])\n data['AZ_PNT'] = np.float32(table.meta['AZ_PNT'])\n data['ZEN_PNT'] = np.float32(90. - table.meta['ALT_PNT'])\n data['ONTIME'] = np.float32(table.meta['ONTIME'])\n data['LIVETIME'] = np.float32(table.meta['LIVETIME'])\n data['DEADC'] = np.float32(table.meta['DEADC'])\n\n MJDREFI = table.meta['MJDREFI']\n MJDREFF = table.meta['MJDREFF']\n MJDREF = MJDREFI + MJDREFF\n\n TSTART_MET = table.meta['TSTART'] / 3600. / 24.\n TSTOP_MET = table.meta['TSTOP'] / 3600. / 24.\n\n start_time = Time(MJDREF + TSTART_MET, scale='tt', format='mjd')\n stop_time = Time(MJDREF + TSTOP_MET, scale='tt', format='mjd')\n\n data['TSTART'] = np.float32(start_time.utc.mjd)\n data['TSTOP'] = np.float32(stop_time.utc.mjd)\n data['TSTART_STR'] = str(start_time.utc.iso[:-4])\n data['TSTOP_STR'] = str(stop_time.utc.iso[:-4])\n\n data['N_TELS'] = table.meta['N_TELS']\n data['TELLIST'] = table.meta['TELLIST']\n try:\n data['OBJECT'] = table.meta['OBJECT']\n except KeyError:\n data['OBJECT'] = \"\"\n data['RA_OBJ'] = np.float32(table.meta['RA_OBJ'])\n data['DEC_OBJ'] = np.float32(table.meta['DEC_OBJ'])\n\n # data['OBS_MODE'] = table.meta['OBS_MODE']\n\n try:\n data['MUONEFF'] = np.float32(table.meta['MUONEFF'])\n except KeyError:\n data['MUONEFF'] = np.float32(-1)\n\n # Calculate some summary statistics for important event columns\n data['EVENT_COUNT'] = len(table)\n data['EVENT_TIME_MIN'] = table['TIME'].min()\n data['EVENT_TIME_MAX'] = table['TIME'].max()\n data['EVENT_ENERGY_MEDIAN'] = np.float32(np.median(table['ENERGY']))\n data['EVENT_RA_MEDIAN'] = np.float32(np.median(table['RA']))\n data['EVENT_DEC_MEDIAN'] = np.float32(np.median(table['DEC']))\n\n return data",
"def summary_info_events(filename):\n # filename = self.out_filename('events')\n print('Reading {}'.format(filename))\n table = Table.read(str(filename), hdu='EVENTS')\n data = dict()\n\n # Copy over header info to the summary table\n data['RA_PNT'] = np.float32(table.meta['RA_PNT'])\n data['DEC_PNT'] = np.float32(table.meta['DEC_PNT'])\n # data['GLON_PNT'] = np.float32(table.meta['GLON_PNT'])\n # data['GLAT_PNT'] = np.float32(table.meta['GLAT_PNT'])\n data['ALT_PNT'] = np.float32(table.meta['ALT_PNT'])\n data['AZ_PNT'] = np.float32(table.meta['AZ_PNT'])\n #data['ZEN_PNT'] = np.float32(90. - table.meta['ALT_PNT'])\n data['ZEN_PNT'] = np.float32(90. - table['ALT'].mean())\n data['ONTIME'] = np.float32(table.meta['ONTIME'])\n data['LIVETIME'] = np.float32(table.meta['LIVETIME'])\n data['DEADC'] = np.float32(table.meta['DEADC'])\n\n MJDREFI = table.meta['MJDREFI']\n MJDREFF = table.meta['MJDREFF']\n MJDREF = MJDREFI + MJDREFF\n\n TSTART_MET = table.meta['TSTART'] / 3600. / 24.\n TSTOP_MET = table.meta['TSTOP'] / 3600. / 24.\n\n start_time = Time(MJDREF + TSTART_MET, scale='tt', format='mjd')\n stop_time = Time(MJDREF + TSTOP_MET, scale='tt', format='mjd')\n\n data['TSTART'] = np.float32(start_time.utc.mjd)\n data['TSTOP'] = np.float32(stop_time.utc.mjd)\n data['TSTART_STR'] = str(start_time.utc.iso[:-4])\n data['TSTOP_STR'] = str(stop_time.utc.iso[:-4])\n\n data['N_TELS'] = table.meta['N_TELS']\n data['TELLIST'] = table.meta['TELLIST']\n try:\n data['OBJECT'] = table.meta['OBJECT']\n except KeyError:\n data['OBJECT'] = \"\"\n data['RA_OBJ'] = np.float32(table.meta['RA_OBJ'])\n data['DEC_OBJ'] = np.float32(table.meta['DEC_OBJ'])\n\n # data['OBS_MODE'] = table.meta['OBS_MODE']\n\n try:\n data['MUONEFF'] = np.float32(table.meta['MUONEFF'])\n except KeyError:\n data['MUONEFF'] = np.float32(-1)\n\n # Calculate some summary statistics for important event columns\n data['EVENT_COUNT'] = len(table)\n data['EVENT_TIME_MIN'] = table['TIME'].min()\n data['EVENT_TIME_MAX'] = table['TIME'].max()\n data['EVENT_ENERGY_MEDIAN'] = np.float32(np.median(table['ENERGY']))\n data['EVENT_RA_MEDIAN'] = np.float32(np.median(table['RA']))\n data['EVENT_DEC_MEDIAN'] = np.float32(np.median(table['DEC']))\n\n return data",
"def import_events(self, data, timezone_offset, dataset_version=None):\n if self.dataset_id or dataset_version:\n if self.dataset_id and dataset_version:\n endpoint = 'import-events'\n base_url = self.BETA_IMPORT_API\n else:\n Mixpanel.LOGGER.warning('Must supply both, dataset_version and dataset_id in init, or neither!')\n return\n else:\n endpoint = 'import'\n base_url = self.IMPORT_API\n\n self._import_data(data, base_url, endpoint, timezone_offset=timezone_offset, dataset_id=self.dataset_id,\n dataset_version=dataset_version)",
"def test_load_events(self):\n command = '{0}'.format(\n os.path.join(self.datadir, 'monol_testA.evt'))\n hen.read_events.main(command.split())\n new_filename = self.first_event_file\n ev = hen.io.load_events(new_filename)\n assert hasattr(ev, 'header')\n assert hasattr(ev, 'gti')",
"def etl(filename: str, schema_filename: str) -> None:\n raw_df = extract_data(filename, schema_filename)\n clean_df = transform_clean_data(raw_df)\n user_activities_df = transform_data_for_user_activities(clean_df)\n agg_events_df = transform_data_for_agg_events(clean_df)\n load_data(user_activities_df)\n load_data(agg_events_df)",
"def init_events():\n db = get_db()\n\n from emapp.db import init_db\n init_db()\n for event in current_app.config['PREDEFINED_EVENTS']:\n db.execute(('INSERT INTO event '\n '(name, location, start_time, end_time)'\n 'VALUES (?,?,?,?)'),\n (event['name'], event['location'],\n event['start_time'], event['end_time']))\n db.commit()",
"def load_dataset(path):\n if '.h5' in str(path):\n dataframe = pd.read_hdf(path)\n elif '.pkl' in str(path):\n dataframe = pd.read_pickle(path)\n else:\n print('Wrong file')\n sys.exit()\n\n # Make it multiindex\n dataframe['event'] = dataframe.index\n dataframe = dataframe.set_index(['sample_nr', 'event'])\n dataframe = dataframe.reset_index('event', drop=True)\n dataframe = dataframe.set_index(dataframe.groupby(level=0).cumcount().rename('event'), append=True)\n\n return dataframe",
"def import_data_helper(self): \n if len(self.components) == 1:\n hapi.fetch(TableName = self.tablename, M = self.components[0][0], I = self.components[0][1], numin = self.min_x, numax = self.max_x)\n else: \n global_id = []\n for c in self.components:\n global_id.append(hapi.ISO[c][0])\n hapi.fetch_by_ids(TableName = self.tablename, iso_id_list = global_id, numin = self.min_x, numax = self.max_x)",
"def load_segment(self):\n \n data = pd.read_csv(self.metadata['file_info']['path'], header = [0, 1], index_col = 0, parse_dates=True)\n \n # Check cycle length against 5 minute duration minimum\n cycle_len_secs = (data.index[-1] - data.index[0]).total_seconds()\n self.data = data\n \n diff = data.index.to_series().diff()[1:2]\n s_freq = 1000000/diff[0].microseconds\n\n self.metadata['file_info']['start_time'] = str(data.index[0])\n self.metadata['analysis_info'] = {'s_freq': s_freq, 'cycle_len_secs': cycle_len_secs}\n self.s_freq = s_freq\n\n print('EEG successfully imported.')",
"def adbGetEvent( self, pars ):\n \n\t( id1, id2, id3, flag ) = pars\n\n\tif id3 == 1:\n\n\t e\t= self.adb.get( 'logEvents' )\n\t x\t= []\n\t y\t= []\n\t nx\t= 0\n\t ny\t= 0\n\t for j in range(e.shape[0]):\n\t\tif e[j,0] == id1 and e[j,1] == id2:\n\t\t if ny <= nx:\n\t\t\ty.append( e[j,2] )\n\t\t\tny\t+= 1\n\t\tif e[j,0] == _EVENT_TS and nx < ny:\n\t\t x.append( e[j,2] )\n\t\t nx\t+= 1\n\n\telif id3 == 2:\n\n\t e\t= self.adb.get( 'logEvents' )\n\t x\t= []\n\t y\t= []\n\t nx\t= 0\n\t ny\t= 0\n\t for j in range(e.shape[0]):\n\t\tif e[j,0] == id1 and e[j,1] == id2:\n\t\t if ny <= nx:\n\t\t\ty.append( e[j,2] )\n\t\t\tny\t+= 1\n\t\t else:\n\t\t\ty[-1]\t= e[j,2]\n\t\tif e[j,0] == _EVENT_TS and nx < ny:\n\t\t x.append( e[j,2] )\n\t\t nx\t+= 1\n\n\telif id3 == 3:\n\n\t e\t= self.adb.get( 'logEvents' )\n\t x\t= []\n\t y\t= []\n\t nx\t= 0\n\t ny\t= 0\n\t for j in range(e.shape[0]):\n\t\tif e[j,0] == id1 and e[j,1] == id2:\n\t\t y.append( e[j,2] )\n\t\t ny\t+= 1\n\t\tif e[j,0] == _EVENT_TS and nx < ny:\n\t\t n\t= ny - nx\n\t\t dx\t= 1. / n\n\t\t for i in range(1,n+1):\n\t\t\tx.append( e[j,2] - dx * (n-i) )\n\t\t nx\t+= n\n\n\telse:\n\n\t e\t= self.adb.get( 'logEvents' )\n\t x\t= self.adb.get( 'steps' )\n\t y\t= []\n\t for j in range(e.shape[0]):\n\t\tif e[j,0] == id1 and e[j,1] == id2:\n\t\t y.append( e[j,2] )\n\n\tif flag == 1:\n\t y\t= numarray.array( y, 'd' )\n\t y\t= numarray.maximum( y, 1.e-20 )\n\n steps = self.adb.get( 'steps' )\n times = self.adb.get( 'times' )\n tIncs = self.adb.get( 'timeIncs' )\n nSteps = len( steps )\n xt = []\n i = 0\n for j in range(len(x)):\n while i < nSteps and steps[i] < x[j]:\n i += 1\n t = times[i] + (x[j] - steps[i]) * tIncs[i]\n xt.append( t )\n \n\tx \t= numarray.array( x ).flat\n\txt \t= numarray.array( xt ).flat\n\ty \t= numarray.array( y ).flat\n return( x, xt, y )",
"def get_db_data(selected_columns, app_id, y, m, d):\n # TODO edit string based on what query to run\n query_string = \"select {} from hive.dfs_prod.eo_custom_event where app_id={} and y={} and m={} and d={}\".format(\n ', '.join(selected_columns), app_id, y, m, d)\n return pd.read_sql(query_string, con=dfs)",
"def _gen_id(event):\n eid = np.sort(np.unique(event))\n iid = {}\n for i in xrange(len(eid)):\n iid[eid[i]] = i\n return len(eid), eid, iid",
"def load_data_part(fname):\n if \"_data\" not in fname:\n return None\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data', '_events')\n # read event file\n labels = pd.read_csv(events_fname)\n clean = data.drop(['id'], axis=1) # remove id\n labels = labels.drop(['id'], axis=1) # remove id\n return clean, labels",
"def fill_event(event):\n is_external = _inspect_descriptor(event.descriptor)\n for data_key, (value, timestamp) in event.data.items():\n if is_external[data_key]:\n # Retrieve a numpy array from filestore\n event.data[data_key][0] = retrieve(value)",
"def on_import(self, event=None):\n if event is not None:\n event.Skip()\n data_id, theory_id, state_id = self.set_data_helper()\n temp = data_id + state_id\n self.parent.set_data(data_id=temp, theory_id=theory_id)",
"def get_events(filename):\n\tdf = pd.read_csv(filename)\n\t#get date from first entry (row) of DateUTC column\n\tdf['date'] = df['DateUTC<br />'][0].split(' ')[0]\n\t\n\t\n\t#drop the following columns\n\tdropLabels = ['FullMetar', 'DateUTC<br />', \\\n\t'Wind Direction','Gust SpeedMPH', \\\n\t'WindDirDegrees', 'Sea Level PressureIn', 'Dew PointF', \\\n\t'TemperatureF', 'Humidity','VisibilityMPH', \\\n 'Wind SpeedMPH', 'PrecipitationIn']\n\n\tdf.drop(labels=dropLabels,axis=1,inplace=True)\n\t\n\t#add hour column\n\ttimeLabel = df.columns.values[0] \n\tdf['Hour'] = pd.to_datetime(df[timeLabel]).dt.hour\n\t#drop timelabel column since we don't use anything beyond hour\n\tdf.drop(labels=timeLabel,axis=1,inplace=True)\n\n\treturn df",
"def test_ingest_with_column_names():\n schema = [\"foo\", \"bar\"]\n\n data = [{\"foo\": 1, \"bar\": 2}, {\"foo\": 10, \"bar\": 20}]\n\n converted_data = client.ingest_data(data, schema)\n assert converted_data.to_pydict() == {'foo': [1, 10], 'bar': [2, 20]}",
"def load_dim_date(cur,table):\n logging.info(f\"loading {table} table\")\n cur.execute(etl.insert_dim_date)\n logging.info(f\"data loaded in table {table}\")",
"def event_key(event_name=DEFAULT_EVENT):\n return ndb.Key('EventModel', event_name)",
"def test_observation_info(dl1_file):\n from ctapipe.io.tableloader import TableLoader\n\n with TableLoader(dl1_file, load_observation_info=True) as table_loader:\n table = table_loader.read_telescope_events()\n assert \"subarray_pointing_lat\" in table.colnames"
] | [
"0.6103134",
"0.5990384",
"0.58281827",
"0.57115626",
"0.5672822",
"0.56140965",
"0.5546844",
"0.5339346",
"0.51344454",
"0.5069847",
"0.5052246",
"0.5048571",
"0.4947652",
"0.49333274",
"0.48763838",
"0.485143",
"0.4815527",
"0.480106",
"0.4793264",
"0.4761636",
"0.47536895",
"0.4752796",
"0.47420913",
"0.4729457",
"0.4727619",
"0.47245765",
"0.47155333",
"0.46947694",
"0.46849102",
"0.46839148"
] | 0.63991827 | 0 |
Build Vanilla LSTM encoderdecoder network. | def build_model(self):
num_layers, num_units, input_window, output_window, encoder_exog_size, decoder_exog_size, dropout_rate, l2_regu =\
self.num_layers, self.num_units, self.input_window, self.output_window, self.encoder_exog_size, self.decoder_exog_size, self.dropout_rate, self.l2_regu
#Define embedding layers (item_id, event_name), in case the embedding layers are applied to both encoder and decoder.
event_embed = Embedding(input_dim=31, output_dim=8, mask_zero=False, name='event_embed')
#Define encoder model
encoder_input = Input(shape=(input_window, 1)) #endog input for encoder
encoder_exog_input = Input(shape=(input_window, encoder_exog_size))
encoder_concat_input = Concatenate()([encoder_input, encoder_exog_input])
encoder_lstm_res = {}
for i in range(num_layers):
encoder_lstm = LSTM(num_units[i], kernel_regularizer=l2_regu, recurrent_regularizer=l2_regu, dropout=dropout_rate, recurrent_dropout=0,
return_sequences=True, return_state=True, name='encoder_lstm_{}'.format(i))
if (i == 0):
encoder_lstm_outputs, encoder_lstm_state_h, encoder_lstm_state_c = encoder_lstm(encoder_concat_input)
else:
encoder_lstm_outputs, encoder_lstm_state_h, encoder_lstm_state_c = encoder_lstm(encoder_lstm_res[(i-1, 'outputs')])
encoder_lstm_res[(i, 'model')] = encoder_lstm
encoder_lstm_res[(i, 'outputs')] = encoder_lstm_outputs
encoder_lstm_res[(i, 'states')] = [encoder_lstm_state_h, encoder_lstm_state_c]
#Define decoder model
#endog input for decoder. It is always a vector of 0s, meaning that model is trained unconditionally without using any forecast information.
decoder_input = Input(shape=(output_window, 1))
decoder_exog_input = Input(shape=(output_window, decoder_exog_size))
decoder_event_input = Input(shape=(output_window,))
decoder_event_embed = event_embed(decoder_event_input)
decoder_concat_input = Concatenate()([decoder_input, decoder_exog_input, decoder_event_embed])
decoder_lstm_res = {}
for i in range(num_layers):
decoder_lstm = LSTM(num_units[i], kernel_regularizer=l2_regu, recurrent_regularizer=l2_regu, dropout=dropout_rate, recurrent_dropout=0,
return_sequences=True, return_state=True, name='decoder_lstm_{}'.format(i))
if (i == 0):
decoder_lstm_outputs, _, _ = decoder_lstm(decoder_concat_input, initial_state=encoder_lstm_res[(i, 'states')])
else:
decoder_lstm_outputs, _, _ = decoder_lstm(decoder_lstm_res[(i-1, 'outputs')], initial_state=encoder_lstm_res[(i, 'states')])
decoder_lstm_res[(i, 'model')] = decoder_lstm
decoder_lstm_res[(i, 'outputs')] = decoder_lstm_outputs
decoder_output = Dense(1, activation=None, kernel_regularizer=l2_regu, name='decoder_output')(decoder_lstm_outputs)
#training mode of model
model = Model(inputs = [encoder_input, encoder_exog_input, decoder_input, decoder_exog_input, decoder_event_input], outputs = decoder_output)
adam = Adam(learning_rate=self.lr)
model.compile(optimizer=adam, loss='mse')
print(model.summary())
self.model = model
return(model) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __build_mol_to_latent_model(self):\n\n # Input tensor (MANDATORY)\n encoder_inputs = Input(shape=self.input_shape, name=\"Encoder_Inputs\")\n\n x = encoder_inputs\n\n # The two encoder layers, number of cells are halved as Bidirectional\n encoder = Bidirectional(\n LSTM(\n self.lstm_dim // 2,\n return_sequences=True,\n return_state=True, # Return the states at end of the batch\n name=\"Encoder_LSTM_1\",\n )\n )\n\n x, state_h, state_c, state_h_reverse, state_c_reverse = encoder(x)\n\n if self.bn:\n x = BatchNormalization(momentum=self.bn_momentum, name=\"BN_1\")(x)\n\n encoder2 = Bidirectional(\n LSTM(\n self.lstm_dim // 2,\n return_state=True, # Return the states at end of the batch\n name=\"Encoder_LSTM_2\",\n )\n )\n\n _, state_h2, state_c2, state_h2_reverse, state_c2_reverse = encoder2(x)\n\n # Concatenate all states of the forward and the backward LSTM layers\n states = Concatenate(axis=-1, name=\"Concatenate_1\")(\n [\n state_h,\n state_c,\n state_h2,\n state_c2,\n state_h_reverse,\n state_c_reverse,\n state_h2_reverse,\n state_c2_reverse,\n ]\n )\n\n if self.bn:\n states = BatchNormalization(momentum=self.bn_momentum, name=\"BN_2\")(states)\n\n # A non-linear recombination\n neck_relu = Dense(\n self.codelayer_dim, activation=self.h_activation, name=\"Codelayer_Relu\"\n )\n neck_outputs = neck_relu(states)\n\n if self.bn:\n neck_outputs = BatchNormalization(\n momentum=self.bn_momentum, name=\"BN_Codelayer\"\n )(neck_outputs)\n\n # Add Gaussian noise to \"spread\" the distribution of the latent variables during training\n neck_outputs = GaussianNoise(self.noise_std, name=\"Gaussian_Noise\")(\n neck_outputs\n )\n\n # Define the model\n self.__mol_to_latent_model = Model(encoder_inputs, neck_outputs)\n\n # Name it!\n self.mol_to_latent_model.name = \"mol_to_latent_model\"",
"def build_train_network(self):\n # Inputs\n vid_input = tf.placeholder(tf.float32, [None, self.num_frame, self.feat_size])\n caption_input = tf.placeholder(tf.int32, [None, self.sent_len])\n caption_mask = tf.placeholder(tf.float32, [None, self.sent_len])\n\n batch_size = tf.shape(vid_input)[0]\n # State variables\n v_LSTM_states = (tf.zeros((batch_size, self.v_LSTM_cell.state_size[0])),\n tf.zeros((batch_size, self.v_LSTM_cell.state_size[1])))\n t_LSTM_states = (tf.zeros((batch_size, self.t_LSTM_cell.state_size[0])),\n tf.zeros((batch_size, self.t_LSTM_cell.state_size[1])))\n padding = tf.zeros([batch_size, self.state_size])\n\n loss = 0.0\n # Encoder network\n # To ensure reuse is False when calling Adam \n with tf.variable_scope(tf.get_variable_scope()):\n for idx in range(self.num_frame):\n if idx > 0:\n tf.get_variable_scope().reuse_variables()\n\n with tf.variable_scope('v_LSTM'):\n v_output, v_LSTM_states = self.v_LSTM_cell(vid_input[:,idx,:], v_LSTM_states)\n with tf.variable_scope('t_LSTM'):\n _, t_LSTM_states = self.t_LSTM_cell(tf.concat([padding, v_output], 1), t_LSTM_states)\n \n null_video = tf.zeros([batch_size, self.feat_size])\n for idx in range(self.sent_len):\n tf.get_variable_scope().reuse_variables()\n # pdb.set_trace() \n # Decoder network\n with tf.variable_scope('v_LSTM'):\n v_output, v_LSTM_states = self.v_LSTM_cell(null_video, v_LSTM_states) \n # Lookup word embedding for each word in current time frame\n caption_embed = tf.nn.embedding_lookup(self.word_embed, caption_input[:,idx])\n with tf.variable_scope('t_LSTM'):\n t_output, t_LSTM_states = self.t_LSTM_cell(tf.concat([v_output, caption_embed], 1), t_LSTM_states)\n logit_output = tf.nn.xw_plus_b(t_output, self.t_output_W, self.t_output_b)\n # Label processing\n caption_onehot = tf.one_hot(caption_input[:,idx], self.dict_size)\n # Calculate loss\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logit_output, labels=caption_onehot)\n cross_entropy = cross_entropy * caption_mask[:,idx]\n\n loss += tf.reduce_mean(cross_entropy)\n # Average loss\n # loss = loss / tf.reduce_sum(caption_mask)\n # pdb.set_trace()\n train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)\n # train_op = None\n\n tf.add_to_collection('x', vid_input)\n tf.add_to_collection('y', caption_input)\n tf.add_to_collection('y_mask', caption_mask)\n tf.add_to_collection('loss', loss)\n tf.add_to_collection('train_op', train_op)\n \n return dict(\n x = vid_input,\n y = caption_input,\n y_mask = caption_mask,\n loss = loss,\n train_op = train_op\n )",
"def build_test_network(self):\n # Inputs\n vid_input = tf.placeholder(tf.float32, [None, self.num_frame, self.feat_size])\n batch_size = tf.shape(vid_input)[0]\n \n # State variables\n v_LSTM_states = (tf.zeros((batch_size, self.v_LSTM_cell.state_size[0])),\n tf.zeros((batch_size, self.v_LSTM_cell.state_size[1])))\n t_LSTM_states = (tf.zeros((batch_size, self.t_LSTM_cell.state_size[0])),\n tf.zeros((batch_size, self.t_LSTM_cell.state_size[1])))\n padding = tf.zeros([batch_size, self.state_size])\n\n outputs = [] \n loss = 0.0\n # Encoder network\n # vid_input_list = tf.split(vid_input, self.num_frame, 1)\n with tf.variable_scope(tf.get_variable_scope()):\n for idx in range(self.num_frame):\n if idx > 0:\n tf.get_variable_scope().reuse_variables()\n with tf.variable_scope('v_LSTM'):\n v_output, v_LSTM_states = self.v_LSTM_cell(vid_input[:,idx,:], v_LSTM_states)\n with tf.variable_scope('t_LSTM'):\n _, t_LSTM_states = self.t_LSTM_cell(tf.concat([padding, v_output], 1), t_LSTM_states)\n \n null_video = tf.zeros([batch_size, self.feat_size])\n for idx in range(self.sent_len):\n tf.get_variable_scope().reuse_variables()\n if idx == 0:\n caption_embed = tf.nn.embedding_lookup(self.word_embed, tf.ones([batch_size], dtype=tf.int64))\n # Decoder network\n with tf.variable_scope('v_LSTM'):\n v_output, v_LSTM_states = self.v_LSTM_cell(null_video, v_LSTM_states) \n # pdb.set_trace()\n with tf.variable_scope('t_LSTM'):\n t_output, t_LSTM_states = self.t_LSTM_cell(tf.concat([caption_embed, v_output], 1), t_LSTM_states)\n logit_output = tf.nn.xw_plus_b(t_output, self.t_output_W, self.t_output_b)\n \n # Produce output\n # pdb.set_trace()\n max_prob_index = tf.argmax(logit_output, 1)\n outputs.append(max_prob_index)\n\n caption_embed = tf.nn.embedding_lookup(self.word_embed, max_prob_index)\n # caption_embed = tf.expand_dims(caption_embed, 0)\n \n return dict(\n x = vid_input,\n outputs = outputs\n )",
"def _define_decoder(self):\n self.decoder = nn.Sequential(nn.Linear(self.encoding_shape, 512, bias=False), nn.SELU(),\n nn.BatchNorm1d(512),\n nn.Linear(512, 2560, bias=False), nn.SELU(),\n nn.BatchNorm1d(2560),\n nn.Linear(2560, 5120, bias=False), nn.SELU(),\n nn.BatchNorm1d(5120),\n nn.Linear(5120, 64*64*3, bias=False), nn.ReLU(),\n View((-1, 3, 64, 64)),\n )",
"def _define_decoder(self):\n self.decoder = nn.Sequential(nn.Linear(self.encoding_shape, 256), # B, 256\n View((-1, 256, 1, 1)), # B, 256, 1, 1\n nn.SELU(),\n nn.ConvTranspose2d(256, 64, 4), # B, 64, 4, 4\n nn.SELU(),\n nn.ConvTranspose2d(64, 64, 4, 2, 1), # B, 64, 8, 8\n nn.SELU(),\n nn.ConvTranspose2d(64, 32, 4, 2, 1), # B, 32, 16, 16\n nn.SELU(),\n nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 32, 32\n nn.SELU(),\n nn.ConvTranspose2d(32, 3, 4, 2, 1), # B, nc, 64, 64\n nn.ReLU()\n )",
"def build_model(self):\n # Define model inputs for the encoder/decoder stack\n x_enc = Input(shape=(self.seq_len_in, self.input_feature_amount), name=\"x_enc\")\n x_dec = Input(shape=(self.seq_len_out, self.output_feature_amount), name=\"x_dec\")\n\n # Add noise\n x_dec_t = GaussianNoise(0.2)(x_dec)\n\n input_conv2 = Conv1D(filters=64, kernel_size=5, strides=2, activation='relu', padding='same')\n input_conv1 = Conv1D(filters=64, kernel_size=3, strides=2, activation='relu', padding='same', name=\"last_conv_layer\")\n\n input_conv2_out = input_conv2(x_enc)\n input_conv1_out = input_conv1(input_conv2_out)\n\n # Define the encoder GRU, which only has to return a state\n encoder_gru = GRU(self.state_size, return_sequences=True, return_state=True, name=\"encoder_gru\")\n encoder_out, encoder_state = encoder_gru(input_conv1_out)\n\n # Decoder GRU\n decoder_gru = GRU(self.state_size, return_state=True, return_sequences=True,\n name=\"decoder_gru\")\n # Use these definitions to calculate the outputs of out encoder/decoder stack\n dec_intermediates, decoder_state = decoder_gru(x_dec_t, initial_state=encoder_state)\n\n # Define the attention layer\n attn_layer = AttentionLayer(name=\"attention_layer\")\n attn_out, attn_states = attn_layer([encoder_out, dec_intermediates])\n\n # Concatenate decoder and attn out\n decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([dec_intermediates, attn_out])\n\n # Define the dense layer\n dense = Dense(self.output_feature_amount, activation='linear', name='output_layer')\n dense_time = TimeDistributed(dense, name='time_distributed_layer')\n decoder_pred = dense_time(decoder_concat_input)\n\n # Define the encoder/decoder stack model\n encdecmodel = tsModel(inputs=[x_enc, x_dec], outputs=decoder_pred)\n\n # Define the separate encoder model for inferencing\n encoder_inf_inputs = Input(shape=(self.seq_len_in, self.input_feature_amount), name=\"encoder_inf_inputs\")\n\n input_conv2_inf = input_conv2(encoder_inf_inputs)\n input_conv1_inf_out = input_conv1(input_conv2_inf)\n\n encoder_inf_out, encoder_inf_state = encoder_gru(input_conv1_inf_out)\n encoder_model = tsModel(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_state])\n\n # Define the separate encoder model for inferencing\n decoder_inf_inputs = Input(shape=(1, self.output_feature_amount), name=\"decoder_inputs\")\n encoder_inf_states = Input(shape=(encdecmodel.get_layer('last_conv_layer').output_shape[1], self.state_size), name=\"decoder_inf_states\")\n decoder_init_state = Input(shape=(self.state_size,), name=\"decoder_init\")\n\n decoder_inf_out, decoder_inf_state = decoder_gru(decoder_inf_inputs, initial_state=decoder_init_state)\n attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])\n decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])\n decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)\n decoder_model = tsModel(inputs=[encoder_inf_states, decoder_init_state, decoder_inf_inputs],\n outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state])\n\n return encoder_model, decoder_model, encdecmodel",
"def _configure_network(self):\r\n def repeat_vector(args):\r\n [layer_to_repeat, sequence_layer] = args\r\n return RepeatVector(K.shape(sequence_layer)[1])(layer_to_repeat)\r\n\r\n encoder_input = Input(shape=(None, self._input_cells))\r\n encoder_output = LSTM(self._latent_space)(encoder_input)\r\n\r\n # Before feeding the decoder, the encoded data must be repeated as many times as time steps in the input data,\r\n # but the decoder does not know beforehand how many timesteps are fed into the autoencoder.\r\n # Check https://github.com/keras-team/keras/issues/7949 for the solution to this. Basically we take it\r\n # dynamically from the input shape with a Lambda layer for the repeat vector.\r\n # The input shape may vary per sample.\r\n\r\n decoder_input = Lambda(repeat_vector, output_shape=(None, self._latent_space))([encoder_output, encoder_input])\r\n\r\n decoder_output = LSTM(self._input_cells, return_sequences=True)(decoder_input)\r\n\r\n self._autoencoder = Model(encoder_input, decoder_output)\r\n self._encoder = Model(encoder_input, encoder_output)\r\n\r\n self._autoencoder.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"accuracy\"])",
"def add_model(self):\n\n b_sz = tf.shape(self.encoder_input)[0]\n tstp_en = tf.shape(self.encoder_input)[1]\n tstp_de = tf.shape(self.decoder_input)[1]\n\n encoder_dropout_input = tf.nn.dropout(self.encoder_input, self.ph_dropout, name='encoder_Dropout')\n decoder_dropout_input = tf.nn.dropout(self.decoder_input, self.ph_dropout, name='decoder_Dropout')\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.config.hidden_size)\n \"\"\"#(batch_size, num_sentence, hidden_size)\"\"\"\n encoder_outputs, state = tf.nn.dynamic_rnn(lstm_cell, encoder_dropout_input, self.encoder_tstps, \n dtype=tf.float32, swap_memory=True, time_major=False, scope = 'rnn_encode')\n self.state=state\n with tf.variable_scope('decoder') as vscope:\n decoder_outputs, _ = tf.nn.dynamic_rnn(lstm_cell, decoder_dropout_input, self.decoder_tstps, #(batch_size, time_steps, hidden_size)\n initial_state=state, dtype=tf.float32, swap_memory=True, time_major=False, scope='rnn_decode')\n \n with tf.variable_scope('rnn_decode'):\n #tf.reshape(self.ph_decoder_label, shape=(-1, 1)) #(batch_size*time_steps, 1)\n encoder_outputs_reshape = tf.reshape(encoder_outputs, shape=(-1, self.config.hidden_size), name='add_model_reshape_0') #(batch_size*time_steps, hidden_size)\n decoder_outputs_reshape = tf.reshape(decoder_outputs, shape=(-1, self.config.hidden_size), name='add_model_reshape_1') #(batch_size*time_steps_1, hidden_size)\n encoder_outputs_linear_reshape = tf.nn.rnn_cell._linear(encoder_outputs_reshape, output_size=self.config.hidden_size, #(#(batch_size*time_steps, hidden_size))\n bias=False, scope='Ptr_W1')\n decoder_outputs_linear_reshape = tf.nn.rnn_cell._linear(decoder_outputs_reshape, output_size=self.config.hidden_size, #(#(batch_size*time_steps, hidden_size))\n bias=False, scope='Ptr_W2')\n encoder_outputs_linear = tf.reshape(encoder_outputs_linear_reshape, tf.shape(encoder_outputs), name='add_model_reshape_2')\n decoder_outputs_linear = tf.reshape(decoder_outputs_linear_reshape, tf.shape(decoder_outputs), name='add_model_reshape_3')\n \n encoder_outputs_linear_expand = tf.expand_dims(encoder_outputs_linear, 1) #(b_sz, 1, tstp_en, h_sz)\n decoder_outputs_linear_expand = tf.expand_dims(decoder_outputs_linear, 2) #(b_sz, tstp_de, 1, h_sz)\n \n after_add = tf.tanh(encoder_outputs_linear_expand + decoder_outputs_linear_expand) #(b_sz, tstp_de, tstp_en, h_sz)\n \n after_add_reshape = tf.reshape(after_add, shape=(-1, self.config.hidden_size), name='add_model_reshape_4')\n \n after_add_linear_reshape = tf.nn.rnn_cell._linear(after_add_reshape, output_size=1, #(b_sz*tstp_de*tstp_en, 1)\n bias=False, scope='Ptr_v')\n after_add_linear = tf.reshape(after_add_linear_reshape, shape=tf.shape(after_add)[:3], name='add_model_reshape_5') #(b_sz, tstp_de, tstp_en)\n\n en_length_mask = tf.sequence_mask(self.encoder_tstps, #(b_sz, tstp_en)\n maxlen=tf.shape(after_add_linear)[-1], dtype=tf.bool)\n en_length_mask = tf.expand_dims(en_length_mask, 1) #(b_sz, 1, tstp_en)\n en_length_mask = tf.tile(en_length_mask, [1, tstp_de, 1])\n\n logits = tf.select(en_length_mask, after_add_linear,\n tf.ones_like(after_add_linear) * (-np.Inf)) # shape(b_sz, tstp_de, tstp_en)\n \n flat_logits = tf.reshape(logits, shape=[b_sz * tstp_de, tstp_en])\n\n vscope.reuse_variables()\n outputs_ta, _, _ = self.decoder(lstm_cell, state, encoder_outputs, encoder_dropout_input, scope='rnn_decode')\n outputs = outputs_ta.pack() #(time_steps, batch_size)\n outputs = tf.transpose(outputs, [1, 0]) #(batch_size, time_steps)\n \n state = tf.concat(1, state)\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.config.hidden_size, state_is_tuple=False)\n beam_outputs, beam_seq, beam_prob = self.beam_decoder(lstm_cell, state, encoder_outputs, \n encoder_dropout_input, beam_size=self.config.beam_size, scope='rnn_decode')\n \n self.logits = logits\n self.encoder_outputs = encoder_outputs\n self.beam_seq = beam_seq\n self.beam_prob = beam_prob\n return flat_logits, outputs, beam_outputs",
"def decode(self):\n decoder_input = Input(shape=self.input_decoder_shape, batch_shape=self.input_batch_decoder_shape)\n ppg_input = Input(shape=self.input_ppg_shape, batch_shape=self.input_batch_ppg_shape)\n\n if self.hparams.Masking is True:\n mask_decoder_input = Masking(mask_value=0)(decoder_input)\n mask_ppg_input = Masking(mask_value=0)(ppg_input)\n prenet_output = self.PreNet(mask_decoder_input)\n encoder_input = self.Encoder(mask_ppg_input)\n decoder_mask = None\n else:\n decoder_mask = Masking(mask_value=0).compute_mask(ppg_input)\n prenet_output = self.PreNet(decoder_input)\n encoder_input = self.Encoder(ppg_input, decoder_mask)\n\n rnn_output = Concatenate(axis=-1)([prenet_output, encoder_input])\n # mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n # diff_mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n for i in range(self.hparams.Tacotron_decoder_layers):\n rnn_output = self.Decoder_LSTM[i](rnn_output, mask=decoder_mask)\n\n # feed by self.states is unhelpful in training, since we don't stop rnn during epochs\n # but it is important in generating since each fit states will be set to zeros.!!!!!!\n rnn_output = Concatenate(axis=-1)([rnn_output, encoder_input])\n decoder_output = self.Linear_projection(rnn_output)\n if self.hparams.Tacotron_postnet is True:\n residual_output = decoder_output\n for i in range(self.hparams.PostNet_layers):\n residual_output = self.PostNet_Conv1D[i](residual_output)\n residual_output = self.PostNet_BatchNorm[i](residual_output)\n residual_output = self.PostNet_dropout_list[i](residual_output)\n decoder_output = Add()([decoder_output, residual_output])\n return Model(inputs=[decoder_input, ppg_input], outputs=decoder_output)",
"def _build_model(self):\n\n # Build Encoder\n inputs = Input(shape=(self.n_features_,))\n # Input layer\n layer = Dense(self.n_features_, activation=self.hidden_activation)(\n inputs)\n # Hidden layers\n for neurons in self.encoder_neurons:\n layer = Dense(neurons, activation=self.hidden_activation,\n activity_regularizer=l2(self.l2_regularizer))(layer)\n layer = Dropout(self.dropout_rate)(layer)\n # Create mu and sigma of latent variables\n z_mean = Dense(self.latent_dim)(layer)\n z_log = Dense(self.latent_dim)(layer)\n # Use parametrisation sampling\n z = Lambda(self.sampling, output_shape=(self.latent_dim,))(\n [z_mean, z_log])\n # Instantiate encoder\n encoder = Model(inputs, [z_mean, z_log, z])\n if self.verbosity >= 1:\n encoder.summary()\n\n # Build Decoder\n latent_inputs = Input(shape=(self.latent_dim,))\n # Latent input layer\n layer = Dense(self.latent_dim, activation=self.hidden_activation)(\n latent_inputs)\n # Hidden layers\n for neurons in self.decoder_neurons:\n layer = Dense(neurons, activation=self.hidden_activation)(layer)\n layer = Dropout(self.dropout_rate)(layer)\n # Output layer\n outputs = Dense(self.n_features_, activation=self.output_activation)(\n layer)\n # Instatiate decoder\n decoder = Model(latent_inputs, outputs)\n if self.verbosity >= 1:\n decoder.summary()\n # Generate outputs\n outputs = decoder(encoder(inputs)[2])\n\n # Instantiate VAE\n vae = Model(inputs, outputs)\n vae.add_loss(self.vae_loss(inputs, outputs, z_mean, z_log))\n vae.compile(optimizer=self.optimizer)\n if self.verbosity >= 1:\n vae.summary()\n return vae",
"def build_decoder(opt, embeddings):\n return TransformerDecoder(opt.dec_layers, opt.dec_rnn_size, opt.heads, opt.transformer_ff, opt.dropout, embeddings)",
"def build_autoencoder(self):\n # first build the encoder model\n inputs = Input(shape=(self.state_dim, ), name='state')\n feature_size = 32\n x = Dense(256, activation='relu')(inputs)\n x = Dense(128, activation='relu')(x)\n feature = Dense(feature_size, name='feature_vector')(x)\n\n # instantiate encoder model\n self.encoder = Model(inputs, feature, name='encoder')\n self.encoder.summary()\n plot_model(self.encoder,\n to_file='encoder.png', \n show_shapes=True)\n\n # build the decoder model\n feature_inputs = Input(shape=(feature_size,), \n name='decoder_input')\n x = Dense(128, activation='relu')(feature_inputs)\n x = Dense(256, activation='relu')(x)\n outputs = Dense(self.state_dim, activation='linear')(x)\n\n # instantiate decoder model\n self.decoder = Model(feature_inputs, \n outputs, \n name='decoder')\n self.decoder.summary()\n plot_model(self.decoder, \n to_file='decoder.png', \n show_shapes=True)\n\n # autoencoder = encoder + decoder\n # instantiate autoencoder model\n self.autoencoder = Model(inputs, \n self.decoder(self.encoder(inputs)),\n name='autoencoder')\n self.autoencoder.summary()\n plot_model(self.autoencoder, \n to_file='autoencoder.png', \n show_shapes=True)\n\n # Mean Square Error (MSE) loss function, Adam optimizer\n self.autoencoder.compile(loss='mse', optimizer='adam')",
"def runLSTMEncoder(self, encoder, num_layers):\n inputs_ph = tf.placeholder(\n dtype=tf.float32,\n shape=(None, common_utils.TIME_STEPS, common_utils.DEPTH))\n inputs_length_ph = tf.placeholder(dtype=tf.int32, shape=(None))\n\n outputs, states = encoder.encode(\n mode=tf.estimator.ModeKeys.TRAIN,\n sequence_inputs=inputs_ph,\n sequence_length=inputs_length_ph)\n\n num_bi_layers = 1\n num_uni_layers = num_layers - num_bi_layers\n\n if num_uni_layers == 1:\n # states is a tuple of (states_bi_bw, states_uni)\n # states_bi_bw is a tuple (states_bi_bw)\n # states_uni is a tuple of length num_uni_layers\n states_bi_bw, states_uni = states\n self.assertEqual(1, len(states_bi_bw))\n self.assertEqual(num_uni_layers, len(states_uni))\n # states_bi_bw[0] is a tuple of (states_c, states_h)\n self.assertEqual(2, len(states_bi_bw[0]))\n\n # convert states from tuple to tensor\n states_list = [states_bi_bw[0]]\n for i in range(num_uni_layers):\n states_list.append(states_uni[i])\n states = tf.convert_to_tensor(states_list)\n else:\n # states is a tuple of (states_uni) of length num_uni_layers\n states_uni = states\n self.assertEqual(num_uni_layers, len(states_uni))\n states_list = []\n for i in range(num_uni_layers):\n states_list.append(states_uni[i])\n states = tf.convert_to_tensor(states_list)\n\n inputs, inputs_length = common_utils.get_encoder_test_inputs()\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n outputs, states = sess.run(\n [outputs, states],\n feed_dict={\n inputs_ph: inputs,\n inputs_length_ph: inputs_length\n })\n\n self.assertAllEqual(\n [common_utils.TIME_STEPS, common_utils.BATCH_SIZE, common_utils.DEPTH],\n outputs.shape)\n\n if num_uni_layers == 1:\n self.assertEqual(num_layers, len(states))\n # 2 in second dimension means states_c and states_h\n self.assertAllEqual(\n [num_layers, 2, common_utils.BATCH_SIZE, common_utils.DEPTH],\n states.shape)\n else:\n self.assertEqual(num_uni_layers, len(states))\n self.assertAllEqual(\n [num_uni_layers, 2, common_utils.BATCH_SIZE, common_utils.DEPTH],\n states.shape)",
"def build_model(hparams: dict) -> tf.keras.models.Model:\n\n s = hparams['layer_size']\n ratio = hparams['input_seq_length'] // hparams['output_seq_length']\n name = 'enc{}{}_dec{}{}'.format(hparams['num_encoder_layers'], hparams['encoder_type'],\n hparams['num_decoder_layers'], hparams['decoder_type'])\n\n if hparams['encoder_type'] == 'uni':\n make_enc_layer = lambda x: tf.keras.layers.LSTM(s, return_sequences=True)(x)\n elif hparams['encoder_type'] == 'bi':\n make_enc_layer = lambda x: tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(s // 2, return_sequences=True))(x)\n else:\n raise ValueError(\"encoder_type must be either 'uni' or 'bi'\")\n\n if hparams['decoder_type'] == 'uni':\n make_dec_layer = lambda x: tf.keras.layers.LSTM(s, return_sequences=True)(x)\n elif hparams['decoder_type'] == 'bi':\n make_dec_layer = lambda x: tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(s // 2, return_sequences=True))(x)\n else:\n raise ValueError(\"decoder_type must be either 'uni' or 'bi'\")\n\n inp = tf.keras.layers.Input(shape=(hparams['input_seq_length'], 1))\n\n enc = inp\n for _ in range(hparams['num_encoder_layers']):\n enc = make_enc_layer(enc)\n\n dec = tf.keras.layers.Reshape((hparams['output_seq_length'], ratio * s))(enc)\n\n if hparams['attention_type'] == 'mul':\n dec = make_dec_layer(dec)\n dec = tf.keras.layers.Attention(use_scale=hparams['attention_scale'],\n dropout=hparams['attention_dropout'],\n causal=hparams['attention_causal'])([dec, enc])\n hparams['num_decoder_layers'] -= 1\n name += '_mulattn'\n\n elif hparams['attention_type'] == 'add':\n dec = make_dec_layer(dec)\n dec = tf.keras.layers.AdditiveAttention(use_scale=hparams['attention_scale'],\n dropout=hparams['attention_dropout'],\n causal=hparams['attention_causal'])([dec, enc])\n\n hparams['num_decoder_layers'] -= 1\n name += '_addattn'\n\n elif hparams['attention_type'] == 'self-br':\n # apply self-attention before reshapre\n\n dec = tf.keras.layers.Attention(use_scale=hparams['attention_scale'],\n dropout=hparams['attention_dropout'],\n causal=hparams['attention_causal'])([enc, enc])\n\n dec = tf.keras.layers.Reshape((hparams['output_seq_length'], ratio * s))(dec)\n\n elif hparams['attention_type'] == 'self-ar' or hparams['attention_type'] == 'self':\n # apply self-attention after reshape\n\n dec = tf.keras.layers.Reshape((hparams['output_seq_length'], ratio * s))(enc)\n\n dec = tf.keras.layers.Attention(use_scale=hparams['attention_scale'],\n dropout=hparams['attention_dropout'],\n causal=hparams['attention_causal'])([dec, dec])\n\n for _ in range(hparams['num_decoder_layers']):\n dec = make_dec_layer(dec)\n\n out = tf.keras.layers.LSTM(1, return_sequences=True)(dec)\n\n model = tf.keras.models.Model(inp, out, name=name)\n\n model.compile(loss='mae', optimizer='adam', metrics=['mae', 'mse'])\n\n return model",
"def _build_decoder(self, hparams, inputs, initial_state, is_training):\n ## Decoder.\n with tf.variable_scope(\"trajectory_decoder\"):\n if hparams.decoder_type == \"fc\":\n regression = self._build_fc_decoder(hparams, inputs, is_training)\n final_states = None\n \n elif hparams.decoder_type == \"rnn\":\n list_dummy_input = []\n with tf.name_scope(\"dummy_input\"):\n for gpu_idx in range(self.num_gpu):\n with tf.device(tf.DeviceSpec(device_type=\"GPU\", device_index=gpu_idx)), tf.name_scope(\"tower_{:d}\".format(gpu_idx)):\n list_dummy_input.append(tf.zeros(tf.stack([self.target_length, self.batch_size[gpu_idx], 1])))\n \n with tf.variable_scope(\"rnn\"):\n if hparams.encoder_type == \"cnn\":\n with tf.variable_scope(\"rnn_initial_state\"):\n initial_state = self._make_initial_states(hparams, inputs)\n\n net, final_states = self._build_rnn_decoder(hparams, list_dummy_input, initial_state, is_training)\n\n with tf.name_scope(\"time_batch_transpose\"):\n net = list_ops.list_transpose(net, perm=[1, 0, 2])\n \n with tf.variable_scope(\"projection\"):\n regression = self._build_output_projection(hparams, net, is_training)\n\n else:\n raise ValueError(\"Unknown decoder type {:s}.\".format(hparams.decoder_type))\n\n return regression, final_states",
"def _decode_train(self):\n\n # the basic idea is, we use golden sketch during train and in order to copy from source\n # we given true mask of decoder to generate right copy weights\n state = {'encoder': self.concated_encoder_output}\n\n def transformer_concated_decoder_internal(inputs, memory, bias, mem_bias, params, state=None, scope=None,\n reuse=False):\n return transformer_decoder(inputs, memory, bias, mem_bias, params, state, scope, reuse)\n\n self.final_logits = self._decode_func(\n self.tgt_seq, self.tgt_len, self.target_embeddings, self.decoder_weights,\n self.final_enc_attn_bias, 'train', state, self.vocab_size, use_copy=True,\n expand_source_ids_oo=self.concat_src_ids_oo,\n max_out_oovs=self.max_out_oovs, src_mask=self.concat_src_mask,\n decoder_fn=transformer_concated_decoder_internal,\n scope='final_decoder')",
"def build_decoder(opt, embeddings):\n dec_type = \"ifrnn\" if opt.decoder_type == \"rnn\" and opt.input_feed \\\n else opt.decoder_type\n return str2dec[dec_type].from_opt(opt, embeddings)",
"def encoder_decoder_archi_gan(inputs, is_train):\n\n encoder_layers = []\n\n encoded = inputs\n\n encoder_layers.append(encoded)\n\n for i in range(config.encoder_layers):\n encoded = encoder_conv_block_gan(encoded, i, is_train)\n encoder_layers.append(encoded)\n \n encoder_layers.reverse()\n\n\n\n decoded = encoder_layers[0]\n\n for i in range(config.encoder_layers):\n decoded = decoder_conv_block_gan(decoded, encoder_layers[i+1], i, is_train)\n\n return decoded",
"def _add_input_decoder(self, inputs, seq_len, enc_fw, enc_bw):\n with tf.variable_scope(\"decoder\"):\n cell_fw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n ((fw_states, bw_states), (final_fw, final_bw)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True, initial_state_fw=enc_fw, initial_state_bw=enc_bw)\n\n return fw_states, bw_states",
"def dis_encoder_seq2seq(hparams):\n assert FLAGS.discriminator_model == 'seq2seq_vd'\n assert hparams.dis_num_layers == 2\n\n ## Encoder forward variables.\n encoder_lstm_w_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'\n ][0]\n encoder_lstm_b_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'\n ][0]\n encoder_lstm_w_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'\n ][0]\n encoder_lstm_b_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'\n ][0]\n\n if FLAGS.data_set == 'ptb':\n model_str = 'Model'\n else:\n model_str = 'model'\n\n variable_mapping = {\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':\n encoder_lstm_w_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':\n encoder_lstm_b_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':\n encoder_lstm_w_1,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':\n encoder_lstm_b_1\n }\n return variable_mapping",
"def __init__(self, ntoken, emb_sz, n_hid, n_layers, pad_token, bidir=False,\n dropouth=0.3, dropouti=0.65, dropoute=0.1, wdrop=0.5, qrnn=False):\n\n super().__init__()\n self.ndir = 2 if bidir else 1\n self.bs, self.qrnn = 1, qrnn\n self.encoder = nn.Embedding(ntoken, emb_sz, padding_idx=pad_token)\n self.encoder_with_dropout = EmbeddingDropout(self.encoder)\n if self.qrnn:\n self.rnns = [QRNNLayer(emb_sz if l == 0 else n_hid, (n_hid if l != n_layers - 1 else emb_sz) // self.ndir,\n save_prev_x=True, zoneout=0, window=2 if l == 0 else 1, output_gate=True) for l in range(n_layers)]\n if wdrop:\n for rnn in self.rnns:\n rnn.linear = WeightDrop(rnn.linear, wdrop, weights=['weight'])\n else:\n self.rnns = [nn.LSTM(emb_sz if l == 0 else n_hid, (n_hid if l != n_layers - 1 else emb_sz)//self.ndir,\n 1, bidirectional=bidir) for l in range(n_layers)]\n if wdrop: self.rnns = [WeightDrop(rnn, wdrop) for rnn in self.rnns]\n self.rnns = torch.nn.ModuleList(self.rnns)\n self.encoder.weight.data.uniform_(-self.initrange, self.initrange)\n\n self.emb_sz,self.n_hid,self.n_layers,self.dropoute = emb_sz,n_hid,n_layers,dropoute\n self.dropouti = LockedDropout(dropouti)\n self.dropouths = nn.ModuleList([LockedDropout(dropouth) for l in range(n_layers)])",
"def _build_encoder(self, hparams):\n\t\tnum_layers = self.num_encoder_layers\n\t\tnum_redisual_layers = self.num_encoder_residual_layers\n\n\t\twith tf.variable_scope('encoder') as _:\n\t\t\tself.encoder_emb_inp = tf.nn.embedding_lookup(self.embedding_encoder, self.encoder_input_data)\n\n\t\t\tif hparams.encoder_type == 'uni':\n\t\t\t\t_info('num_layers = {} num_residual_layers = {}'.format(num_layers, num_redisual_layers))\n\t\t\t\t# 1. build a list of cells\n\t\t\t\tcell = self._build_encoder_cell(hparams, num_layers, num_redisual_layers)\n\t\t\t\t# 2. forward\n\t\t\t\t# encoder_outputs: [batch, time, hidden]\n\t\t\t\t# encoder_state: ([batch, hidden] for _ in range(layers))\n\t\t\t\tencoder_outputs, encoder_state = tf.nn.dynamic_rnn(\n\t\t\t\t\tcell,\n\t\t\t\t\tself.encoder_emb_inp,\n\t\t\t\t\tdtype=self.dtype,\n\t\t\t\t\tsequence_length=self.seq_length_encoder_input_data,\n\t\t\t\t\tswap_memory=True)\n\t\t\telif hparams.encoder_type == 'bi':\n\t\t\t\tif not num_layers % 2 == 0:\n\t\t\t\t\t_error('Bi-directional requires num_layers={} should be divided by 2'.format(num_layers))\n\t\t\t\t\traise ValueError\n\t\t\t\tnum_bi_layers = int(num_layers / 2)\n\t\t\t\tnum_bi_residual_layers = num_bi_layers - 1\n\t\t\t\t_info(' num_bi_layers={} num_bi_residual_layers={}'.format(num_bi_layers, num_bi_residual_layers))\n\n\t\t\t\tcell_fw = self._build_encoder_cell(hparams, num_bi_layers, num_bi_residual_layers)\n\t\t\t\tcell_bw = self._build_encoder_cell(hparams, num_bi_layers, num_bi_residual_layers)\n\n\t\t\t\t# bi_outputs: (fw, bw): fw: [batch, seq, hidden]\n\t\t\t\t# bi_state: (fw, bw): fw : [[batch, hidden] for _ in range(layers)]\n\t\t\t\tbi_outputs, bi_state = tf.nn.bidirectional_dynamic_rnn(\n\t\t\t\t\tcell_fw,\n\t\t\t\t\tcell_bw,\n\t\t\t\t\tself.encoder_emb_inp,\n\t\t\t\t\tdtype=self.dtype,\n\t\t\t\t\tsequence_length=self.seq_length_encoder_input_data,\n\t\t\t\t\tswap_memory=True)\n\n\t\t\t\tif num_bi_layers == 1:\n\t\t\t\t\tencoder_state = bi_state\n\t\t\t\telse:\n\t\t\t\t\tencoder_state = []\n\t\t\t\t\tfor layer_id in range(num_bi_layers):\n\t\t\t\t\t\tencoder_state.append(bi_state[0][layer_id])\t\t# fw state in layer id\n\t\t\t\t\t\tencoder_state.append(bi_state[1][layer_id])\t\t# bw state in layer id\n\t\t\t\t\tencoder_state = tuple(encoder_state)\n\t\t\t\tencoder_outputs = tf.concat(bi_outputs, -1)\t\t# [batch, seq, hidden * 2]\n\t\t\telse:\n\t\t\t\t_error('Unknow encoder type: {}'.format(hparams.encoder_type))\n\t\t\t\traise ValueError\n\t\t\n\t\treturn encoder_outputs, encoder_state",
"def build_encoder(self):\n \n # some general variables concerning the current processed batch\n batch_size=self.image_embeddings.get_shape()[0]\n sentence_length = self.config.sentence_length # == self.seq_embeddings.get_shape()[2]\n max_text_length = tf.shape(self.seq_embeddings)[1] # maximum text length for this batch\n \n # This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the\n # modified LSTM in the \"Show and Tell\" paper has no biases and outputs\n # new_c * sigmoid(o).\n \n # create an lstm cell that will process a sentence (a sequence of tokens)\n lstm_cell_sentences = tf.nn.rnn_cell.BasicLSTMCell(\n num_units=self.config.sentence_embedding_size, state_is_tuple=True) # num_units describes the size of the internal memory cell (but it is also the output size)\n \n # we also need an lstm cell that will process a sequence of sentences (a text)\n lstm_cell_text = tf.nn.rnn_cell.BasicLSTMCell(\n num_units=self.config.article_embedding_size, state_is_tuple=True)\n \n if self.mode == \"train\":\n # to avoid overfitting we use dropout for all lstm cells\n lstm_cell_sentences = tf.nn.rnn_cell.DropoutWrapper(\n lstm_cell_sentences,\n input_keep_prob=self.config.dropout_keep_prob_encoder,\n output_keep_prob=self.config.dropout_keep_prob_encoder)\n lstm_cell_text = tf.nn.rnn_cell.DropoutWrapper(\n lstm_cell_text,\n input_keep_prob=self.config.dropout_keep_prob_encoder,\n output_keep_prob=self.config.dropout_keep_prob_encoder)\n\n with tf.variable_scope(\"lstm_sentence_encode\", initializer=self.initializer) as lstm_scope:\n # we use the image embedding only to feed the text lstm with image information\n # The sentences are initialized with a zero state\n \n # Set the initial LSTM state.\n initial_state_sentences = lstm_cell_sentences.zero_state(\n batch_size=batch_size, dtype=tf.float32)\n\n # At first, generate a mask for all sentences. \n # This will allow us to specify the individual length of each sentence \n # This lengths are fed into tf.nn.dynamic_rnn, which will produce zero outputs for \n # all padded tokens.\n # Note, that self.input_seqs contains a zero for each padded token (zero is not in the vocabulary)\n zeros = tf.zeros_like(self.input_seqs)\n self.sentence_mask = tf.select(tf.greater(self.input_seqs, zeros) , tf.ones_like(self.input_seqs), zeros) # type int64\n\n #self.sentence_mask = tf.cast(self.sentence_mask, tf.int32)\n \n # In the following, we run a hierarchical approach:\n # Tokens of a sentence are mapped onto an embedding vector through lstm_cell_sentences\n # The resulting sentence embeddings are passed though lstm_cell_text to gather text embeddings\n \n # Since we have to generate an embedding for each sentence in a text, we need a loop somehow.\n # But the number of sentences in a text is dynamically determined for each batch (max_text_length).\n # Therefore, we cannot use unpack and a python loop. Instead we use the while_loop control method of TF.\n \n \n # The output of lstm_cell_sentences will be stored in this matrix, but only \n # the lstm output of the last not padded word in a sentence\n lstm_outputs_sentences = tf.zeros(tf.pack([batch_size, max_text_length, self.config.sentence_embedding_size])) # tf.pack is a hotfix, since a normal array passing would not work as max_text_length is a tensor\n #lstm_outputs_sentences = tf.zeros([batch_size, max_text_length, self.config.embedding_size])\n \n # Allow the LSTM variables to be reused.\n #lstm_scope.reuse_variables()\n\n # now we compute the lstm outputs for each token sequence (sentence) in the while loop body\n def body(i,n,los):\n \"\"\"Compute lstm outputs for sentences i (sentences with index i in text) of current batch.\n\n Inputs:\n i: control variable of loop (runs from 0 to n-1)\n n: max_text_length\n los: lstm_outputs_sentences\n\n Outputs:\n i: incremented\n n: unchanged\n los: input with updated values in index i of second dimension\n \"\"\"\n # extract correct lstm input (i-th sentence from each batch)\n #es = tf.slice(self.seq_embeddings,[0,i,0,0],[batch_size, 1, sentence_length, self.config.word_embedding_size])\n es = tf.slice(self.seq_embeddings,tf.pack([0,i,0,0]),tf.pack([batch_size, 1, sentence_length, self.config.word_embedding_size]))\n es = tf.squeeze(es, axis=1) # get rid of sentence index dimension\n es = tf.reshape(es, tf.pack([batch_size, sentence_length, self.config.word_embedding_size])) # dirty hack, to ensure that shape is known (needed by further methods)\n\n # extract masks of sentences i\n sm = tf.slice(self.sentence_mask,tf.pack([0,i,0]),tf.pack([batch_size, 1, sentence_length]))\n sm = tf.squeeze(sm, axis=1)\n # compute sentence lengths\n sm = tf.reduce_sum(sm, 1)\n sm = tf.reshape(sm, tf.pack([batch_size])) # dirty hack, to ensure that shape is known\n\n # feed i-th sentences through lstm\n lstm_outputs_sentences_tmp, _ = tf.nn.dynamic_rnn(cell=lstm_cell_sentences,\n inputs=es,\n sequence_length=sm,\n initial_state=initial_state_sentences,\n dtype=tf.float32,\n scope=lstm_scope)\n # lstm_outputs_sentences_tmp has shape (batch_size, sentence_length, sentence_embedding_size\n # lstm_outputs_sentences_tmp contains an output for each token in the sentences, but we are only interested in the \n # output of the last token of a sentence\n \n # Now we extract only those outputs (output of last token, which is not a padded token) from lstm_outputs_sentences_tmp\n\n # sm contains the length of each sentence, meaning we can access the right output with the index (length - 1)\n # Note, that the actual masks where reduced to lengths in the above statements.\n sm = tf.sub(sm, 1) # sentence mask contains now the index of the last token in each sentence\n # Those sentence, that have zero tokens (padded sentences) have now an index of -1. We have to set them back to 0\n # which are simply zero outputs of the lstm\n zeros = tf.zeros_like(sm)\n sm = tf.select(tf.less(sm, zeros) , zeros, sm)\n\n # We use tf.gather_nd to extract the desired outputs from lstm_outputs_sentences_tmp.\n # Therefore, we have to produce the \"indices\" parameter of this method first.\n # The elements of the last dimension in this matrix determine the indices for gathering slices from lstm_outputs_sentences\n # Hence the innermost dimension must be a 2D vector: (batch, token) <- index of desired embedding in lstm_outputs_sentences\n # for sentence with index (batch, i) in self.seq_embeddings\n\n # We generate for each of the two indices a seperate matrix and concatenate them at the end\n sm = tf.expand_dims(sm, 1)\n sm = tf.cast(sm, dtype=tf.int32)\n\n # use tf.range to generate the equivalence of sm for batch indices\n #batch_indices = tf.range(0, batch_size)\n batch_indices = tf.constant(np.arange(int(batch_size)), dtype=tf.int32)\n batch_indices = tf.expand_dims(batch_indices, 1) \n\n # then use tf.concat to generate the actual tensor, that can be used to gather the right outputs from lstm_outputs_sentences_tmp\n gather_indices = tf.concat(1, [batch_indices, sm])\n\n # now we can consider the elements (of the last dimension) of gather_indices as indices for the correct ouput\n lstm_outputs_sentences_tmp = tf.gather_nd(lstm_outputs_sentences_tmp, gather_indices)\n lstm_outputs_sentences_tmp = tf.expand_dims(lstm_outputs_sentences_tmp, 1) \n\n # add the current output to our list of outputs\n los = tf.concat(1, [tf.slice(los, tf.pack([0,0,0]), tf.pack([batch_size, i, self.config.sentence_embedding_size])),\n lstm_outputs_sentences_tmp,\n tf.slice(los, tf.pack([0,i+1,0]), tf.pack([batch_size,n-i-1,self.config.sentence_embedding_size]))])\n \n return i+1,n,los\n\n def condition(i,n,los):\n \"\"\"Break condition for while loop\n\n Inputs:\n i: control variable of loop (runs from 0 to n-1)\n n: max_text_length\n los: lstm_outputs_sentences\n\n Outputs:\n Ture, if body should be run.\n \"\"\"\n\n return i < n\n\n result = tf.while_loop(condition, body, loop_vars=[0, max_text_length, lstm_outputs_sentences])\n lstm_outputs_sentences = result[2] \n \n with tf.variable_scope(\"lstm_text_encode\", initializer=self.initializer) as lstm_scope: \n \n # Feed the image embeddings to set the initial LSTM state.\n zero_state_text = lstm_cell_text.zero_state(\n batch_size=batch_size, dtype=tf.float32)\n _, initial_state_text = lstm_cell_text(self.image_embeddings, zero_state_text)\n \n # Allow the LSTM variables to be reused.\n lstm_scope.reuse_variables()\n \n # lstm_outputs_sentences has now the last lstm output for each sentence in the batch (output of last unpadded token)\n # Its shape is (batch_size, max_text_length, sentence_embedding_size)\n \n # Now we use the sentence embeddings to generate text embeddings\n # Run the batch of sentence embeddings through the LSTM.\n self.sentence_sequence_length = tf.reduce_sum(self.input_mask, 1)\n lstm_outputs_text, _ = tf.nn.dynamic_rnn(cell=lstm_cell_text,\n inputs=lstm_outputs_sentences,\n sequence_length=self.sentence_sequence_length,\n initial_state=initial_state_text,\n dtype=tf.float32,\n scope=lstm_scope)\n # lstm_outputs_text has now the lstm output of each sentence_embedding,\n # where the output of the last unpadded sentence_embedding is considered as the text embedding.\n # Note, that we could also call it article embedding, since it comprises the information of the \n # text and the image.\n # Its shape is (batch_size, max_text_length, article_embedding_size)\n\n # extract the text embedding from lstm_outputs_text\n \n # sequence_length contains the length of each text, meaning we can access the right output with the index (length - 1)\n last_sentence = tf.sub(self.sentence_sequence_length, 1) # sentence mask contains now the index of the last unpadded sentence in each text\n\n # We use tf.gather_nd to extract the desired outputs from lstm_outputs_text.\n # Therefore, we have to produce the \"indices\" parameter of this method first.\n # The elements of the last dimension in this matrix determine the indices for gathering slices from lstm_outputs_text\n # Hence the innermost dimension must be a 2D vector: (batch, sentence)\n\n # We generate for each of the two indices a seperate matrix and concatenate them at the end\n last_sentence = tf.expand_dims(last_sentence, 1)\n\n # use tf.range to generate the equivalence of sm for batch indices\n batch_indices = tf.range(0, batch_size)\n batch_indices = tf.expand_dims(batch_indices, 1) \n\n # then use tf.concat to generate the actual tensor, that can be used to gather the right outputs from lstm_outputs_text\n gather_indices = tf.concat(1, [batch_indices, last_sentence])\n \n # now we can consider the elements (of the last dimension) of gather_indices as indices for the correct ouput\n self.article_embeddings = tf.gather_nd(lstm_outputs_text, gather_indices)\n \n # As the image information might have gone lost in the hierarchical rnn, the reader might reconsider it.\n if self.config.reconsider_image:\n with tf.variable_scope(\"reconsider_image\", initializer=self.initializer, reuse=None) as reconsider_image_scope: \n # concat current article embedding with image_embedding and map them through an fully connected layer onto a new embedding\n article_image_concat = tf.concat(1, [self.article_embeddings, self.image_embeddings])\n \n self.article_embeddings = tf.contrib.layers.fully_connected(\n inputs=article_image_concat,\n num_outputs=self.config.article_embedding_size,\n activation_fn=tf.nn.relu, #None, # linear activation \n weights_initializer=self.initializer,\n scope=reconsider_image_scope)\n \n if self.mode == \"train\":\n # to avoid overfitting we use dropout for all fully connected layers\n self.article_embeddings = tf.nn.dropout(self.article_embeddings, self.config.dropout_keep_prob_encoder)\n \n # self.article_embeddings contains now the text/article embedding for each article in the batch\n # Its shape is (batch_size, article_embedding_size)\n \n # All variables up until this point are shared with the autoencoder. So these are the variables\n # (the whole encoder network) that we want to restore/share.\n self.autoencoder_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)",
"def make_decoder(self, latent_size: int, output_size: int) -> nn.Module:\n pass",
"def build_encoder(self):\n with tf.variable_scope(\"encoder\") as scope:\n length1 = tf.to_int32(tf.reduce_sum(self.encode_mask1, 1), name=\"length1\")\n\n if self.config.bidirectional_encoder:\n if self.config.encoder_dim % 2:\n raise ValueError(\n \"encoder_dim must be even when using a bidirectional encoder.\")\n num_units = self.config.encoder_dim // 2\n cell_fw = self._initialize_gru_cell(num_units) # Forward encoder\n cell_bw = self._initialize_gru_cell(num_units) # Backward encoder\n _, states = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=self.encode_emb1,\n sequence_length=length1,\n dtype=tf.float32,\n scope=scope)\n thought_vectors1 = tf.concat(states, 1, name=\"thought_vectors1\")\n else:\n cell = self._initialize_gru_cell(self.config.encoder_dim)\n _, state = tf.nn.dynamic_rnn(\n cell=cell,\n inputs=self.encode_emb1,\n sequence_length=length1,\n dtype=tf.float32,\n scope=scope)\n # Use an identity operation to name the Tensor in the Graph.\n thought_vectors1 = tf.identity(state, name=\"thought_vectors1\")\n \n scope.reuse_variables()\n\n length2 = tf.to_int32(tf.reduce_sum(self.encode_mask2, 1), name=\"length2\")\n\n if self.config.bidirectional_encoder:\n if self.config.encoder_dim % 2:\n raise ValueError(\n \"encoder_dim must be even when using a bidirectional encoder.\")\n num_units = self.config.encoder_dim // 2\n cell_fw = self._initialize_gru_cell(num_units) # Forward encoder\n cell_bw = self._initialize_gru_cell(num_units) # Backward encoder\n _, states = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=self.encode_emb2,\n sequence_length=length2,\n dtype=tf.float32,\n scope=scope)\n thought_vectors2 = tf.concat(states, 1, name=\"thought_vectors2\")\n else:\n cell = self._initialize_gru_cell(self.config.encoder_dim)\n _, state = tf.nn.dynamic_rnn(\n cell=cell,\n inputs=self.encode_emb2,\n sequence_length=length2,\n dtype=tf.float32,\n scope=scope)\n # Use an identity operation to name the Tensor in the Graph.\n thought_vectors2 = tf.identity(state, name=\"thought_vectors2\")\n\n self.thought_vectors1 = thought_vectors1\n self.thought_vectors2 = thought_vectors2",
"def build_model(encode_size):\n # Build the encoder\n encoder = Sequential()\n encoder.add(InputLayer((1000,4)))\n encoder.add(Conv1D(5, 11, activation=\"tanh\", padding=\"same\"))\n encoder.add(Conv1D(7, 7, activation=\"relu\", padding=\"same\"))\n encoder.add(MaxPooling1D(2))\n encoder.add(Conv1D(11, 5, activation=\"tanh\", padding=\"same\"))\n encoder.add(Conv1D(11, 3, activation=\"tanh\", padding=\"same\"))\n encoder.add(MaxPooling1D(2))\n encoder.add(Flatten())\n encoder.add(Dense(750, activation = 'tanh', kernel_initializer='glorot_normal'))\n encoder.add(Dense(400, activation = 'tanh', kernel_initializer='glorot_normal'))\n encoder.add(Dense(200, activation = 'tanh', kernel_initializer='glorot_normal'))\n encoder.add(Dense(encode_size))\n\n # Build the decoder\n decoder = Sequential()\n decoder.add(InputLayer((encode_size,)))\n decoder.add(Dense(200, activation='tanh', kernel_initializer='glorot_normal'))\n decoder.add(Dense(400, activation='tanh', kernel_initializer='glorot_normal'))\n decoder.add(Dense(750, activation='tanh', kernel_initializer='glorot_normal'))\n decoder.add(Dense(10000, activation='tanh', kernel_initializer='glorot_normal'))\n decoder.add(Reshape((1000, 10)))\n decoder.add(Conv1DTranspose(8, 11, activation=\"relu\", padding=\"same\"))\n decoder.add(Conv1DTranspose(4, 5, activation=\"linear\", padding=\"same\"))\n\n return encoder, decoder\n\n # encoder = Sequential()\n # encoder.add(InputLayer((100, 4)))\n # encoder.add(Conv1D(5, 11, activation=\"tanh\", padding=\"same\"))\n # encoder.add(Conv1D(7, 7, activation=\"relu\", padding=\"same\"))\n # encoder.add(MaxPooling1D(2))\n # encoder.add(Conv1D(11, 5, activation=\"tanh\", padding=\"same\"))\n # encoder.add(Conv1D(11, 3, activation=\"tanh\", padding=\"same\"))\n # encoder.add(MaxPooling1D(2))\n # encoder.add(Flatten())\n # encoder.add(Dense(75, activation='tanh', kernel_initializer='glorot_normal'))\n # encoder.add(Dense(40, activation='tanh', kernel_initializer='glorot_normal'))\n # encoder.add(Dense(20, activation='tanh', kernel_initializer='glorot_normal'))\n # encoder.add(Dense(encode_size))\n #\n # # Build the decoder\n # decoder = Sequential()\n # decoder.add(InputLayer((encode_size,)))\n # decoder.add(Dense(20, activation='tanh', kernel_initializer='glorot_normal'))\n # decoder.add(Dense(40, activation='tanh', kernel_initializer='glorot_normal'))\n # decoder.add(Dense(75, activation='tanh', kernel_initializer='glorot_normal'))\n # decoder.add(Dense(1000, activation='tanh', kernel_initializer='glorot_normal'))\n # decoder.add(Reshape((100, 10)))\n # decoder.add(Conv1DTranspose(8, 11, activation=\"relu\", padding=\"same\"))\n # decoder.add(Conv1DTranspose(4, 5, activation=\"linear\", padding=\"same\"))\n # # print(encoder.summary())\n # # print(decoder.summary())\n # return encoder, decoder",
"def _build_decoder(self, encoder_outputs, encoder_state, hparams):\n\t\ttgt_sos_id = tf.cast(tf.constant(hparams.sos_id), tf.int32)\n\t\ttgt_eos_id = tf.cast(tf.constant(hparams.eos_id), tf.int32)\n\n\t\tmaximum_iterations = self._get_infer_maximum_iterations(hparams)\n\n\t\t# Decoder\n\t\twith tf.variable_scope('decoder') as decoder_scope:\n\t\t\tcell, decoder_initial_state = self._build_decoder_cell(hparams, encoder_state)\n\t\t\t\n\t\t\tlogits = tf.no_op()\n\t\t\tdecoder_outputs = None\n\n\t\t\t# Train or Eval\n\t\t\tif self.mode != 'infer':\n\t\t\t\tdecoder_emb_input = tf.nn.embedding_lookup(self.embedding_decoder, self.decoder_input_data)\n\n\t\t\t\t# helper\n\t\t\t\thelper = tf.contrib.seq2seq.TrainingHelper(\n\t\t\t\t\tdecoder_emb_input, self.seq_length_decoder_input_data)\n\t\t\t\t\n\t\t\t\t# decoder\n\t\t\t\tmy_decoder = tf.contrib.seq2seq.BasicDecoder(\n\t\t\t\t\tcell,\n\t\t\t\t\thelper,\n\t\t\t\t\tdecoder_initial_state)\n\t\t\t\t\n\t\t\t\t# dynamic decoding\n\t\t\t\toutputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(\n\t\t\t\t\tmy_decoder,\n\t\t\t\t\tswap_memory=True,\n\t\t\t\t\tscope=decoder_scope)\n\t\t\t\t\n\t\t\t\tsample_id = outputs.sample_id\n\t\t\t\tlogits = self.output_layer(outputs.rnn_output)\n\t\t\telse:\n\t\t\t\tinfer_mode = hparams.infer_mode\n\t\t\t\tstart_tokens = tf.fill([self.batch_size], tgt_sos_id)\n\t\t\t\tend_token = tgt_eos_id\n\t\t\t\t_info(' decoder by infer_mode={} beam_width={}'.format(infer_mode, hparams.beam_width))\n\n\t\t\t\tif infer_mode == 'greedy':\n\t\t\t\t\thelper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n\t\t\t\t\t\tself.embedding_decoder, start_tokens, end_token)\n\t\t\t\telif infer_mode == 'beam_search':\n\t\t\t\t\tbeam_width = hparams.beam_width\n\t\t\t\t\tlength_penalty_weight = hparams.length_penalty_weight\n\t\t\t\t\tcoverage_penalty_weight = hparams.coverage_penalty_weight\n\n\t\t\t\t\t# beam search do not require helper\n\t\t\t\t\tmy_decoder = tf.contrib.seq2seq.BeamSearchDecoder(\n\t\t\t\t\t\tcell=cell,\n\t\t\t\t\t\tembedding=self.embedding_decoder,\n\t\t\t\t\t\tstart_tokens=start_tokens,\n\t\t\t\t\t\tend_token=end_token,\n\t\t\t\t\t\tinitial_state=decoder_initial_state,\n\t\t\t\t\t\tbeam_width=beam_width,\n\t\t\t\t\t\toutput_layer=self.output_layer,\n\t\t\t\t\t\tlength_penalty_weight=length_penalty_weight,\n\t\t\t\t\t\tcoverage_penalty_weight=coverage_penalty_weight)\n\t\t\t\telse:\n\t\t\t\t\t_error('Unknown infer_mode {}'.format(infer_mode))\n\t\t\t\t\traise ValueError\n\t\t\t\t\n\t\t\t\tif infer_mode != 'beam_search':\n\t\t\t\t\tmy_decoder = tf.contrib.seq2seq.BasicDecoder(\n\t\t\t\t\t\tcell,\n\t\t\t\t\t\thelper,\n\t\t\t\t\t\tdecoder_initial_state,\n\t\t\t\t\t\toutput_layer=self.output_layer)\t\t# apply to the RNN output prior to storing the result or sampling\n\t\t\t\t\n\t\t\t\toutputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(\n\t\t\t\t\tmy_decoder,\n\t\t\t\t\tmaximum_iterations=maximum_iterations,\n\t\t\t\t\tswap_memory=True,\n\t\t\t\t\tscope=decoder_scope)\n\t\t\t\n\t\t\t\tif infer_mode == 'beam_search':\n\t\t\t\t\tsample_id = outputs.predicted_ids\n\t\t\t\telse:\n\t\t\t\t\tlogits = outputs.rnn_output\n\t\t\t\t\tsample_id = outputs.sample_id\n\n\t\treturn logits, sample_id, final_context_state",
"def _build_encoder(self, hparams, is_training):\n with tf.variable_scope(\"trajectory_encoder\"):\n with tf.name_scope(\"source_placeholder\"):\n input_phs = list_ops.list_placeholder(self.num_gpu, (None, self.input_length, self.input_dims), tf.float32)\n for ph in input_phs:\n tf.add_to_collection('placeholder', ph)\n \n if hparams.encoder_type == \"rnn\":\n net = input_phs\n with tf.variable_scope(\"projection\"):\n net = self._build_input_projection(hparams, net, is_training)\n\n with tf.name_scope(\"batch_time_transpose\"):\n net = list_ops.list_transpose(net, perm=[1, 0, 2])\n\n with tf.variable_scope(\"rnn\"):\n net, state = self._build_rnn_encoder(hparams, net, is_training)\n\n if hparams.relu_reconfiguration:\n with tf.variable_scope(\"reconfiguration\"):\n net = list_ops.list_dense_with_bn(net,\n hparams.cnn_input_projector_filters[-1],\n is_training,\n self.bn_decay,\n seed=self.random_seed)\n\n elif hparams.encoder_type == \"cnn\":\n net = self._build_cnn_encoder(hparams, input_phs, is_training)\n state = None\n \n else:\n raise ValueError(\"Unknown encoder type {:s}.\".format(hparams.encoder_type))\n\n return net, state",
"def _build_graph(self, hparams, scope=None):\n\n sample = self.iterator.get_next()\n if self.mode != tf.contrib.learn.ModeKeys.INFER:\n enc_inputs, dec_inputs, dec_outputs, seq_len = sample\n else:\n # At inference, only two inputs are given\n enc_inputs, seq_len, dec_start = sample\n #indices = (hparams.num_labels-1)*tf.ones([enc_inputs.shape[0]], tf.int32)\n #depth = hparams.num_labels\n #dec_start = tf.one_hot(indices, depth, axis=-1)\n\n with tf.variable_scope(scope or \"dynamic_seq2seq\", dtype=tf.float32):\n # create encoder\n dense_input_layer = tf.layers.Dense(hparams.num_units)\n\n if hparams.dense_input:\n enc_inputs = dense_input_layer(enc_inputs)\n\n enc_cells = mdl_help.create_rnn_cell(unit_type=hparams.unit_type,\n num_units=hparams.num_units,\n num_layers=hparams.num_layers,\n depth=hparams.depth,\n num_residual_layers=hparams.num_residual_layers,\n forget_bias=hparams.forget_bias,\n dropout=hparams.dropout,\n mode=self.mode)\n\n # run encoder\n enc_outputs, enc_state = tf.nn.dynamic_rnn(cell=enc_cells,\n inputs=enc_inputs,\n sequence_length=seq_len,\n dtype=tf.float32,\n scope=\"encoder\")\n\n tgt_seq_len = tf.add(seq_len, tf.constant(1, tf.int32))\n\n # create decoder\n dec_cells = mdl_help.create_rnn_cell(unit_type=hparams.unit_type,\n num_units=hparams.num_units,\n num_layers=hparams.num_layers,\n depth=hparams.depth,\n num_residual_layers=hparams.num_residual_layers,\n forget_bias=hparams.forget_bias,\n dropout=hparams.dropout,\n mode=self.mode)\n\n # output project layer\n projection_layer = tf.layers.Dense(hparams.num_labels, use_bias=False)\n\n if self.mode == tf.contrib.learn.ModeKeys.TRAIN:\n if hparams.train_helper == \"teacher\":\n # teacher forcing\n helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_inputs,\n sequence_length=tgt_seq_len)\n elif hparams.train_helper == \"sched\":\n # scheduled sampling\n helper = tf.contrib.seq2seq.\\\n ScheduledOutputTrainingHelper(inputs=dec_inputs,\n sequence_length=tgt_seq_len,\n sampling_probability=self.sample_probability,\n next_inputs_fn=lambda x: mdl_help.multiclass_sample(x),\n )\n elif self.mode == tf.contrib.learn.ModeKeys.EVAL:\n helper = tf.contrib.seq2seq.\\\n ScheduledOutputTrainingHelper(inputs=dec_inputs,\n sequence_length=tgt_seq_len,\n sampling_probability=tf.constant(1.0),\n next_inputs_fn=lambda x: mdl_help.multiclass_sample(x))\n\n else: # running inference\n def end_fn(sample_ids):\n are_eq = tf.equal(dec_start, sample_ids)\n reduce_eq = tf.reduce_all(are_eq, axis=-1)\n return reduce_eq\n helper = tf.contrib.seq2seq.\\\n InferenceHelper(sample_fn=lambda x: mdl_help.multiclass_sample(x),\n sample_shape=[hparams.num_labels],\n sample_dtype=tf.float32,\n start_inputs=dec_start,\n end_fn=lambda x: end_fn(x))\n\n max_len = tf.reduce_max(tgt_seq_len)\n\n decoder = tf.contrib.seq2seq.BasicDecoder(cell=dec_cells,\n helper=helper,\n initial_state=enc_state,\n output_layer=projection_layer)\n\n # run decoder\n final_outputs, final_states, final_lengths = tf.contrib.seq2seq.dynamic_decode(\n decoder=decoder,\n impute_finished=True,\n maximum_iterations=tf.constant(2)*max_len,\n scope=\"decoder\")\n\n logits = final_outputs.rnn_output\n sample_ids = final_outputs.sample_id\n\n if self.mode == tf.contrib.learn.ModeKeys.INFER:\n return enc_inputs, sample_ids\n\n # mask out entries longer than target sequence length\n mask = tf.expand_dims(tf.sequence_mask(tgt_seq_len, dtype=tf.float32), axis=-1)\n\n #stop gradient thru labels by crossent op\n labels = tf.stop_gradient(dec_outputs)\n\n crossent = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,\n labels=labels,\n name=\"crossent\")\n\n\n loss = tf.reduce_sum((crossent * mask) / tf.expand_dims(\n tf.expand_dims(tf.cast(tgt_seq_len, tf.float32), -1), -1))/hparams.batch_size\n\n# loss = tf.reduce_sum(crossent*mask)/(hparams.batch_size*tf.reduce_mean(tf.cast(tgt_seq_len,\n# tf.float32)))\n\n metrics = []\n update_ops = []\n if self.mode == tf.contrib.learn.ModeKeys.EVAL:\n # for predictions, we will scale the logits and then count each class as\n # active if it is over .5\n predictions = mdl_help.multiclass_prediction(logits)\n targets = dec_outputs\n acc, acc_update = tf.metrics.accuracy(predictions=predictions,\n labels=targets,\n weights=mask)\n metrics = [acc]\n update_ops = [acc_update]\n\n return logits, loss, metrics, update_ops",
"def _construct_encoders_decoders(self):\n self.enc_inp = {}\n self.dec_out = {}\n if self.encode_hints:\n self.enc_hint = {}\n if self.decode_diffs:\n self.node_dec_diff = hk.Linear(1)\n self.edge_dec_diff = (hk.Linear(1), hk.Linear(1), hk.Linear(1))\n self.graph_dec_diff = (hk.Linear(1), hk.Linear(1))\n if self.decode_hints:\n self.dec_hint = {}\n\n for name in self.spec:\n stage, loc, t = self.spec[name]\n if stage == _Stage.INPUT:\n self.enc_inp[name] = [hk.Linear(self.hidden_dim)]\n if loc == _Location.EDGE and t == _Type.POINTER:\n # Edge pointers need two-way encoders\n self.enc_inp[name].append(hk.Linear(self.hidden_dim))\n\n elif stage == _Stage.OUTPUT:\n if loc == _Location.NODE:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_out[name] = (hk.Linear(1),)\n elif t == _Type.CATEGORICAL:\n self.dec_out[name] = (hk.Linear(self.nb_dims[name]),)\n elif t == _Type.POINTER:\n self.dec_out[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n elif loc == _Location.EDGE:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_out[name] = (hk.Linear(1), hk.Linear(1), hk.Linear(1))\n elif t == _Type.CATEGORICAL:\n cat_dims = self.nb_dims[name]\n self.dec_out[name] = (hk.Linear(cat_dims), hk.Linear(cat_dims),\n hk.Linear(cat_dims))\n elif t == _Type.POINTER:\n self.dec_out[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n elif loc == _Location.GRAPH:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_out[name] = (hk.Linear(1), hk.Linear(1))\n elif t == _Type.CATEGORICAL:\n cat_dims = self.nb_dims[name]\n self.dec_out[name] = (hk.Linear(cat_dims), hk.Linear(cat_dims))\n elif t == _Type.POINTER:\n self.dec_out[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n else:\n raise ValueError('Incorrect location')\n\n elif stage == _Stage.HINT:\n if self.encode_hints:\n self.enc_hint[name] = [hk.Linear(self.hidden_dim)]\n if loc == _Location.EDGE and t == _Type.POINTER:\n # Edge pointers need two-way encoders\n self.enc_hint[name].append(hk.Linear(self.hidden_dim))\n\n if self.decode_hints:\n if loc == _Location.NODE:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_hint[name] = (hk.Linear(1),)\n elif t == _Type.CATEGORICAL:\n self.dec_hint[name] = (hk.Linear(self.nb_dims[name]),)\n elif t == _Type.POINTER:\n self.dec_hint[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n elif loc == _Location.EDGE:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_hint[name] = (hk.Linear(1), hk.Linear(1), hk.Linear(1))\n elif t == _Type.CATEGORICAL:\n cat_dims = self.nb_dims[name]\n self.dec_hint[name] = (hk.Linear(cat_dims), hk.Linear(cat_dims),\n hk.Linear(cat_dims))\n elif t == _Type.POINTER:\n self.dec_hint[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n elif loc == _Location.GRAPH:\n if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]:\n self.dec_hint[name] = (hk.Linear(1), hk.Linear(1))\n elif t == _Type.CATEGORICAL:\n cat_dims = self.nb_dims[name]\n self.dec_hint[name] = (hk.Linear(cat_dims), hk.Linear(cat_dims))\n elif t == _Type.POINTER:\n self.dec_hint[name] = (hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim),\n hk.Linear(self.hidden_dim))\n else:\n raise ValueError('Incorrect type')\n else:\n raise ValueError('Incorrect location')"
] | [
"0.6820037",
"0.67112243",
"0.6623604",
"0.65783787",
"0.6512118",
"0.65099704",
"0.64977014",
"0.63607395",
"0.634724",
"0.6332607",
"0.63037586",
"0.62852144",
"0.6256251",
"0.6255645",
"0.6220524",
"0.62135637",
"0.620888",
"0.62087196",
"0.6197982",
"0.6101629",
"0.6096858",
"0.60288304",
"0.60238224",
"0.60229355",
"0.60088533",
"0.5991207",
"0.59705514",
"0.59705514",
"0.5959584",
"0.5950674"
] | 0.6713481 | 1 |
Return forecast using input_endog. | def get_forecast(self, input_endog):
output_window = self.output_window
val_index = np.arange(input_endog.index[-1]+1, input_endog.index[-1]+1+output_window)
X_endog = input_endog.T.dropna().copy()
obs_items = X_endog.index #store_item pair that has complete input
first_obs = np.array(X_endog.iloc[:,0]).reshape((-1,1)) #remove the first obs to remove trend
X_endog -= first_obs
encoder_exog = self.dataset_generator.load_encoder_exog(X_endog.columns, obs_items)
decoder_exog = self.dataset_generator.load_decoder_exog(val_index, obs_items)
decoder_event = self.dataset_generator.load_event_name(val_index, obs_items)
X_decoder = np.zeros((X_endog.shape[0], output_window, 1))
forecast = self.model.predict([np.array(X_endog).reshape((*X_endog.shape,1)), encoder_exog, X_decoder, decoder_exog, decoder_event]).reshape((-1, output_window))
#Add back the first obs
forecast += first_obs
forecast_df = pd.DataFrame(np.NaN, index=val_index, columns=input_endog.columns)
forecast_df[obs_items] = forecast.T
return(forecast_df) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_predict_end(self, end):\n\n out_of_sample = 0 # will be overwritten if needed\n if end is None: # use data for ARIMA - endog changes\n end = len(self.data.endog) - 1\n\n dates = self.data.dates\n freq = self.data.freq\n\n if isinstance(end, str):\n if dates is None:\n raise ValueError(\"Got a string for end and dates is None\")\n try:\n dtend = self._str_to_date(end)\n self.data.predict_end = dtend\n end = self._get_dates_loc(dates, dtend)\n except KeyError as err: # end is greater than dates[-1]...probably\n if dtend > self.data.dates[-1]:\n end = len(self.data.endog) - 1\n freq = self.data.freq\n out_of_sample = datetools._idx_from_dates(dates[-1], dtend,\n freq)\n else:\n if freq is None:\n raise ValueError(\"There is no frequency for these \"\n \"dates and date %s is not in dates \"\n \"index. Try giving a date that is in \"\n \"the dates index or use an integer.\"\n % dtend)\n else: #pragma: no cover\n raise err # should never get here\n self._make_predict_dates() # attaches self.data.predict_dates\n\n elif isinstance(end, int) and dates is not None:\n try:\n self.data.predict_end = dates[end]\n except IndexError as err:\n nobs = len(self.data.endog) - 1 # as an index\n out_of_sample = end - nobs\n end = nobs\n if freq is not None:\n self.data.predict_end = datetools._date_from_idx(dates[-1],\n out_of_sample, freq)\n elif out_of_sample <= 0: # have no frequency but are in sample\n #TODO: what error to catch here to make sure dates is\n #on the index?\n try:\n self.data.predict_end = self._get_dates_loc(dates,\n end)\n except KeyError:\n raise\n else:\n self.data.predict_end = end + out_of_sample\n self.data.predict_start = self._get_dates_loc(dates,\n self.data.predict_start)\n\n self._make_predict_dates()\n\n elif isinstance(end, int):\n nobs = len(self.data.endog) - 1 # is an index\n if end > nobs:\n out_of_sample = end - nobs\n end = nobs\n\n elif freq is None: # should have a date with freq = None\n raise ValueError(\"When freq is None, you must give an integer \"\n \"index for end.\")\n\n return end, out_of_sample",
"def predict_end():\n data = request.json\n\n if data:\n predictor.pred_dict[\"end_date\"] = data[\"end_date\"]\n else:\n pass\n\n return 'Non tam praeclarum est scire latine, quam turpe nescire'",
"def forecast(self) -> TSDataset:\n future = self.ts.make_future(self.horizon)\n predictions = self.model.forecast(future)\n return predictions",
"def h_obs(self, cur_time, e_id):\n end_time = cur_time\n\n y_f = np.matrix(np.zeros((self.dim_obs, 1), float))\n\n # the predicted value should be the aggregated value during the evolution process.\n # the forecast flow\n if 'flow' in self.y_index.keys():\n for s_id in self.y_index['flow'].keys():\n\n # if it is on freeway, use self.f_flow\n if s_id in self.sensors['freeway']:\n cell_id = self.sensors[s_id]['cell']\n\n # forecast flow data during time interval\n # avoid doing matrix transformation to speed code up.\n # current length of data\n len_window = len(self.__f_flow['data'][e_id])\n\n start_time = end_time - self.sensors[s_id]['aggregation_sec']\n\n # get the index of the data instances to aggregate\n index_start = np.searchsorted(self.__f_flow['time'][0:len_window], start_time, side='right')\n index_end = np.searchsorted(self.__f_flow['time'][0:len_window], end_time, side='left')\n\n # time_index = (start_time < self.__f_flow['time'][0:len_window]) & \\\n # (self.__f_flow['time'][0:len_window]<= end_time)\n # # get the index\n # ele_index = [i for i,b in enumerate(time_index) if b]\n\n flow_sum = 0.0\n flow_num = 0\n for i in range(index_start, index_end + 1):\n flow_sum += self.__f_flow['data'][e_id][i][cell_id]\n flow_num += 1\n\n # tmp_flow = np.matrix( self.__f_flow['data'][e_id] ).T\n # data_interval = tmp_flow[cell_id, time_index]\n\n # save the average data as the forecast data\n if flow_num != 0:\n y_f[self.y_index['flow'][s_id]] = flow_sum / flow_num\n else:\n raise Exception('Error: take mean over empty array.')\n\n # if it is on onramp, use est_state_all[ x_index['onramp'][cell] ]\n elif s_id in self.sensors['onramp']:\n cell_id = self.sensors[s_id]['cell']\n # we can just reuse the same time grid to find the index in est_state_all\n # __f_flow['time'] include all th time points. Extract the times point till now\n\n start_time = end_time - self.sensors[s_id]['aggregation_sec']\n time_index = (start_time < self.__f_flow['time']) & (self.__f_flow['time'] <= end_time)\n\n # forecast onramp data during the time interval as the mean estimated value\n # TODO, it should be depending on each ensemble, but here we are just using the mean estimated state.\n data_interval = self.est_state_all[self.x_index['onramp'][cell_id], time_index]\n\n # save the average data as the forecast data\n y_f[self.y_index['flow'][s_id]] = data_interval.mean(1)\n\n # if it is on onramp, use est_state_all[ x_index['onramp'][cell] ]\n elif s_id in self.sensors['offramp']:\n cell_id = self.sensors[s_id]['cell']\n # we can just reuse the same time grid to find the index in est_state_all\n start_time = end_time - self.sensors[s_id]['aggregation_sec']\n time_index = (start_time < self.__f_flow['time']) & (self.__f_flow['time'] <= end_time)\n\n # forecast onramp data during the time interval\n # TODO, it should be depending on each ensemble\n data_interval = self.est_state_all[self.x_index['offramp'][cell_id], time_index]\n\n # save the average data as the forecast data\n y_f[self.y_index['flow'][s_id]] = data_interval.mean(1)\n\n else:\n raise Exception('Error: Flow sensors must locate on freeway grid or on/off ramps.')\n\n # the forecast velocity\n if 'speed' in self.y_index.keys():\n for s_id in self.y_index['speed'].keys():\n # the speed sensor must be on the freeway\n if s_id in self.sensors['freeway']:\n cell_id = self.sensors[s_id]['cell']\n\n len_window = len(self.__f_speed['data'][e_id])\n\n start_time = end_time - self.sensors[s_id]['aggregation_sec']\n\n # get the index of the data instances to aggregate\n index_start = np.searchsorted(self.__f_speed['time'][0:len_window], start_time, side='right')\n index_end = np.searchsorted(self.__f_speed['time'][0:len_window], end_time, side='left')\n\n # time_index = (start_time < self.__f_speed['time'][0:len_window]) & \\\n # (self.__f_speed['time'][0:len_window]<= end_time)\n # ele_index = [i for i,b in enumerate(time_index) if b]\n\n speed_sum = 0.0\n speed_num = 0\n for i in range(index_start, index_end + 1):\n speed_sum += self.__f_speed['data'][e_id][i][cell_id]\n speed_num += 1\n\n # get the data during the interval\n # tmp_speed = np.matrix( self.__f_speed['data'][e_id] ).T\n # len_window = tmp_speed.shape[1]\n # data_interval = tmp_speed[cell_id, time_index]\n\n # the forecast velocity is the average value\n if speed_num != 0:\n y_f[self.y_index['speed'][s_id]] = speed_sum / speed_num\n else:\n raise Exception('Error: take mean over empty array')\n\n else:\n raise Exception('Error: Speed sensors must locate on the freeway grid.')\n\n # the forecast travel time\n # TODO: need to update the bluetooth observation equation, now it is an average of the snap shot travel time during the interval\n if 'travel_time' in self.y_index.keys():\n for s_id in self.y_index['travel_time'].keys():\n\n start_cell, end_cell = self.sensors[s_id]['cell']\n # still use the __f_flow to get the time grid index\n start_time = end_time - self.sensors[s_id]['aggregation_sec']\n time_index = (start_time < self.__f_flow['time']) & (self.__f_flow['time'] <= end_time)\n\n # get the traffic density data from the interval\n data_interval = self.est_state_all[\n self.x_index['density'][start_cell]: self.x_index['density'][start_cell],\n time_index]\n\n travel_time = []\n speed_cells = []\n # compute a snap-shot travel time using each density profile\n for j in range(0, data_interval.shape[1]):\n for i in range(0, data_interval.shape[0]):\n cell_id = start_cell + i\n vel = self.__rho2v(self.vm_cells[cell_id, 0], self.beta_cells[cell_id, 0],\n self.rhoc_cells[cell_id, 0], self.wc_cells[cell_id, 0],\n data_interval[i, j])\n if vel == 0:\n raise Exception('Error: Got negative speed.')\n else:\n speed_cells.append(float(vel))\n\n speed_cells = np.array(speed_cells)\n\n # get the length of cells\n L_cells = self.len_cells[start_cell: end_cell]\n\n # append this snap shot travel time\n travel_time.append(np.sum(L_cells / speed_cells))\n\n # compute the mean travel time as the forecast travel time\n travel_time = np.array(travel_time)\n y_f[self.y_index['travel_time'][s_id]] = travel_time.mean()\n\n return y_f",
"def forecast_weather(self):\n pass",
"async def _forecast_single(\n self,\n model: Prophet\n ) -> pd.DataFrame:\n future = model.make_future_dataframe(self._periods, 'H', False)\n return model.predict(future)",
"def fit(self, train):\n model = ARIMA(train, order=(self.p, self.d, self.q))\n model_fit = model.fit(disp=0)\n output = model_fit.forecast()\n nextDaysPred = output[0][0]\n return nextDaysPred",
"def _fit(self, endog, endog_end, min_ts_mean, min_ts_mean_window, include_holidays=False,\n min_ts_length=None, max_ft_freq=None, exog_data=None, optimize=None):\n import numpy as np\n from pykalman import KalmanFilter\n import warnings\n warnings.filterwarnings('ignore')\n\n p, q = self._params['p'], self._params['q']\n freq = self._params['freq']\n pred_len = self.max_scoring_length\n x_matrix_train = None\n x_matrix_score = None\n\n # set exogenous (holiday) variables for input data\n if include_holidays and len(endog) + pred_len > 385:\n exog = exog_data.loc[endog.index.min():endog_end]\n else:\n include_holidays = False\n exog = None\n\n if min_ts_length is not None and len(endog) < min_ts_length:\n raise ValueError('TimeSeries length less than minimum length specified')\n\n if min_ts_mean is not None:\n if (min_ts_mean_window is not None and endog[-min_ts_mean_window:].fillna(0).mean() < min_ts_mean) or \\\n (min_ts_mean_window is None and endog.fillna(0).mean() < min_ts_mean):\n raise ValueError('Metric values too small to model.')\n\n # Smoothing the given time series as a pre-processing for modeling seasonalities through Fourier\n # transformation\n kf = KalmanFilter()\n endog_smoothed, filtered_state_covariances = kf.em(endog).smooth(endog)\n endog_smoothed = endog_smoothed[:, 0]\n\n endog, diff_order, actual_previous_per_diff = DataExploration._stationarizer(endog=pd.Series(endog),\n diff_min=0,\n diff_max=1,\n obs_incl=False)\n if diff_order:\n endog_smoothed = np.diff(endog_smoothed)\n\n if freq == 'D':\n complete_cycle = int(len(endog) / 7)\n endog = endog[- (complete_cycle * 7):]\n endog_smoothed = endog_smoothed[- (complete_cycle * 7):]\n elif freq == 'H':\n complete_cycle = int(len(endog) / 24)\n endog = endog[- (complete_cycle * 24):]\n endog_smoothed = endog_smoothed[- (complete_cycle * 24):]\n\n exog = exog.iloc[-len(endog):] if exog is not None else None\n\n if include_holidays:\n exog = exog.loc[:, (exog != 0).any(axis=0)]\n ext_training_features = list(exog.columns)\n else:\n ext_training_features = None\n\n stepwise_fit = []\n\n # Updating the user specified maximum number of frequencies to consider for the Fourier transformation\n # based on the length of the smoothed endogenous variable\n max_ft_freq = int(min(max_ft_freq, len(endog_smoothed) / 4))\n\n # Running the Fourier transformation extrapolating one point ahead in future that is going to be used\n # for predicting\n\n if max_ft_freq > 0:\n x_matrix = self._fourier_extp(series=endog_smoothed, max_trun=(2 * max_ft_freq),\n forecast_period=pred_len)\n if not optimize and np.all(x_matrix[0] == x_matrix[0][0]):\n x_matrix_train = None\n x_matrix_score = None\n max_ft_freq = 0\n else:\n x_matrix_train = x_matrix[:, :(x_matrix.shape[1] - pred_len)]\n x_matrix_score = x_matrix[:, (x_matrix.shape[1] - pred_len):]\n\n\n self._seasonal_arima(endog=endog, exog=exog, p=p, d=0, q=q, imodels=max_ft_freq,\n include_holidays=include_holidays, ift_matrix=x_matrix_train,\n stepwise_fit=stepwise_fit, optimize=optimize)\n model = stepwise_fit[0]\n\n seasonal_feature_scoring = x_matrix_score[0, :].tolist() if not x_matrix_score is None else None\n\n result = {\n 'model': model,\n 'diff_order': diff_order,\n 'seasonal_feature_scoring': seasonal_feature_scoring,\n 'ext_training_features': ext_training_features,\n }\n\n p_selected = model.k_ar if hasattr(model, 'k_ar') else 0\n d_selected = diff_order\n q_selected = model.k_ma if hasattr(model, 'k_ma') else 0\n order = (p_selected, d_selected, q_selected)\n\n return result, order",
"def execute(train_ts, train_inputs, test_inputs, op_exec):\n if op_exec == \"mlp\":\n model = mod.mlp_model()\n model.fit(train_inputs, train_ts)\n forecast = model.forecast(test_inputs)\n return forecast\n elif op_exec == \"rf\":\n model = mod.rf_model()\n model.fit(train_inputs, train_ts)\n forecast = model.forecast(test_inputs)\n return forecast",
"def _predict(cls, model, is_log_transformed,\n raw_actual, interpolated_actual,\n training_end=None, seasonal_feature_scoring=None, pred_date=None, order_of_diff=None,\n training_tail=None, ext_training_features=None, pred_len=None, freq=None,\n include_holidays_exog=None):\n\n import numpy as np\n import pandas as pd\n import scipy.stats as st\n from numpy.linalg import LinAlgError\n import math\n\n alpha = cls._sig_level\n alpha_extreme = cls._sig_level_extreme\n\n include_holidays_exog = include_holidays_exog if ext_training_features else 0\n\n index = pd.date_range(start=training_end, end=pred_date, freq=freq)[1:] # Holidays are always daily.\n\n de_obj = DataExploration()\n pred_exog = de_obj._get_exog_data(pred_date, pred_date, index) if include_holidays_exog else None\n\n if pred_exog is not None and set(pred_exog.columns.values) != set(ext_training_features):\n missing_col_list = list(set(ext_training_features) - set(pred_exog.columns.values))\n common_cols = list(set(ext_training_features).intersection(set(pred_exog.columns.values)))\n temp_df = pred_exog[common_cols]\n missing_feat_df = pd.DataFrame(np.zeros([len(pred_exog), len(missing_col_list)]),\n columns=missing_col_list, index=pred_exog.index.values)\n pred_exog = pd.concat([temp_df, missing_feat_df], axis=1)\n pred_exog = pred_exog[ext_training_features]\n\n freq = \"1\" + freq if not any(char.isdigit() for char in freq) else freq\n\n forecast_ndays = int((pred_date - pd.Timestamp(training_end)) / pd.Timedelta(freq))\n model_freshness = forecast_ndays / float(pred_len)\n\n try:\n if forecast_ndays > pred_len:\n raise ValueError('Current trained model object expired')\n\n float_min = 1e-10\n\n # set exogenous (holiday) variables for input data\n if include_holidays_exog:\n pred_exog = pred_exog.loc[pd.Timestamp(training_end) + pd.Timedelta(freq): pred_date]\n else:\n pred_exog = None\n\n if seasonal_feature_scoring:\n if not include_holidays_exog:\n pred_exog = seasonal_feature_scoring[:forecast_ndays]\n else:\n pred_exog['fourier_feature'] = seasonal_feature_scoring[:forecast_ndays]\n\n forecast = list(model.forecast(steps=forecast_ndays, alpha=alpha, exog=pred_exog))\n interpolated_training_data = list(zip(*training_tail))[1]\n\n for order in list(reversed(range(order_of_diff))):\n training_data_diff = np.diff(interpolated_training_data,\n order) if order > 0 else interpolated_training_data\n\n forecast_diff_mean = [training_data_diff[-1]]\n forecast_diff_ci = []\n\n for i in range(forecast_ndays):\n forecast_diff_mean.append(forecast_diff_mean[-1] + forecast[0][i])\n forecast_diff_ci.append([forecast_diff_mean[-1] -\n (st.norm.ppf(1 - (alpha / 2.0)) * forecast[1][i]),\n forecast_diff_mean[-1] +\n (st.norm.ppf(1 - (alpha / 2.0)) * forecast[1][i])])\n forecast[0] = forecast_diff_mean[1:]\n forecast[2] = forecast_diff_ci\n\n if is_log_transformed:\n transformed_back_forecast = np.exp(forecast[0][-1] + ((forecast[1][-1] ** 2) / 2.0)) - 1\n transformed_back_std_err = np.sqrt((np.exp(forecast[1][-1] ** 2) - 1) * (np.exp((2 * forecast[0][-1]) +\n (forecast[1][\n -1] ** 2))))\n transformed_back_CILower = transformed_back_forecast - \\\n st.norm.ppf(1 - (alpha / 2.0), 0, transformed_back_std_err) \\\n if transformed_back_std_err != 0 else transformed_back_forecast\n transformed_back_CIUpper = transformed_back_forecast + \\\n st.norm.ppf(1 - (alpha / 2.0), 0, transformed_back_std_err) \\\n if transformed_back_std_err != 0 else transformed_back_forecast\n transformed_back_interpolated_actual = float(np.exp(interpolated_actual) - 1)\n if np.sum(np.isnan(forecast[0][-1])) or np.isnan(forecast[1][-1]):\n raise ValueError('Predicted null value')\n\n if is_log_transformed:\n zscore = (transformed_back_interpolated_actual -\n transformed_back_forecast) / max(float(transformed_back_std_err), float_min)\n\n anomaly_probability = (2 * st.norm(0, 1).cdf(abs(zscore))) - 1\n if math.isnan(anomaly_probability) or math.isnan(transformed_back_CILower) \\\n or math.isnan(transformed_back_CIUpper):\n raise ValueError('Either Anomaly probability or CILower or CIUpper is NaN under log transform')\n down_anomaly_probability = 1 - st.norm(0, 1).cdf(zscore)\n up_anomaly_probability = st.norm(0, 1).cdf(zscore)\n\n result = {'Success': True,\n 'IsLogTransformed': is_log_transformed,\n 'LogTransformedAdjustedActual': interpolated_actual,\n 'LogTransformedPrediction': float(forecast[0][-1]),\n 'LogTransformedStdErr': float(forecast[1][-1]),\n 'LogTransformedCILower': float(forecast[2][-1][0]),\n 'LogTransformedCIUpper': float(forecast[2][-1][1]),\n 'AdjustedActual': transformed_back_interpolated_actual,\n 'Prediction': float(transformed_back_forecast) if not float(\n transformed_back_forecast) == float('inf') else 0.0,\n 'StdErr': float(transformed_back_std_err) if not float(\n transformed_back_std_err) == float('inf') else 0.0,\n 'CILower': float(transformed_back_CILower) if not float(\n transformed_back_CILower) == float('-inf') else 0.0,\n 'CIUpper': float(transformed_back_CIUpper) if not float(\n transformed_back_CIUpper) == float('inf') else 0.0,\n 'ConfLevel': float(1.0 - alpha) * 100,\n 'ExogenousHolidays': include_holidays_exog,\n 'IsAnomaly': bool(anomaly_probability > 1 - alpha),\n 'IsAnomalyExtreme': bool(anomaly_probability > 1 - alpha_extreme),\n 'AnomalyProbability': 1 if raw_actual is None else float(anomaly_probability),\n 'DownAnomalyProbability': 1 if raw_actual is None else float(down_anomaly_probability),\n 'UpAnomalyProbability': 1 if raw_actual is None else float(up_anomaly_probability),\n 'ModelFreshness': model_freshness}\n\n else:\n zscore = (interpolated_actual - forecast[0][-1]) / max(float(forecast[1][-1]), float_min)\n\n anomaly_probability = (2 * st.norm(0, 1).cdf(abs(zscore))) - 1\n if math.isnan(anomaly_probability) or math.isnan(forecast[2][-1][0]) or math.isnan(forecast[2][-1][1]):\n raise ValueError('Either Anomaly probability or CILower or CIUpper is NaN')\n\n down_anomaly_probability = 1 - st.norm(0, 1).cdf(zscore)\n up_anomaly_probability = st.norm(0, 1).cdf(zscore)\n\n result = {'Success': True,\n 'IsLogTransformed': is_log_transformed,\n 'AdjustedActual': interpolated_actual,\n 'Prediction': float(forecast[0][-1]) if not float(\n forecast[0][-1]) == float('inf') else 0.0,\n 'StdErr': float(forecast[1][-1]) if not float(\n forecast[1][-1]) == float('inf') else 0.0,\n 'CILower': float(forecast[2][-1][0]) if not float(\n forecast[2][-1][0]) == float('-inf') else 0.0,\n 'CIUpper': float(forecast[2][-1][1]) if not float(\n forecast[2][-1][1]) == float('inf') else 0.0,\n 'ConfLevel': float(1.0 - alpha) * 100,\n 'ExogenousHolidays': include_holidays_exog,\n 'IsAnomaly': bool(anomaly_probability > 1 - alpha),\n 'IsAnomalyExtreme': bool(anomaly_probability > 1 - alpha_extreme),\n 'AnomalyProbability': 1 if raw_actual is None else float(anomaly_probability),\n 'DownAnomalyProbability': 1 if raw_actual is None else float(down_anomaly_probability),\n 'UpAnomalyProbability': 1 if raw_actual is None else float(up_anomaly_probability),\n 'ModelFreshness': model_freshness}\n\n except (LinAlgError, ValueError, LADStructuralError) as e:\n result = {'Success': False,\n 'AdjustedActual': interpolated_actual,\n 'ErrorMessage': str(e)}\n\n return result",
"def forecast(dataloader):\n model = NBeats.load_from_checkpoint(os.path.join(DIR, 'model/nbeats.ckpt'))\n raw_predictions = model.predict(dataloader, mode=\"raw\", return_x=False)\n y = np.array(raw_predictions['prediction'].reshape(-1, 1))\n df = pd.DataFrame(data=y, columns=['prediction'])\n df.to_csv(os.path.join(DIR, 'data/nbeats_forecast.csv'), index=False)\n return y",
"def create_forecast_dataset(self):\n pass",
"def predict(model, up_to_days=DEFAULT_UP_TO_DAYS, start_time=None, end_time=None, plot=False):\n if start_time is None and end_time is None:\n df_future = model.make_future_dataframe(periods=24 * up_to_days, freq='H')\n start_future = df_future.iloc[-1, :]['ds'] - datetime.timedelta(days=up_to_days)\n df_future = df_future[df_future['ds'] >= start_future]\n elif start_time is not None and end_time is not None:\n indices = pd.date_range(start_time, end_time, freq='H')\n assert len(indices) > 0, \"Indices should not be empty\"\n df_future = pd.DataFrame(columns=['ds', 'y'])\n df_future['ds'] = indices\n else:\n raise ValueError(\"Either up_to_days or start_time and end_time must be set appropriately.\")\n df_forecast = model.predict(df_future)\n if plot:\n model.plot(df_forecast)\n model.plot_components(df_forecast)\n return df_forecast",
"def make_forecast(chain, train_data, len_forecast: int, max_window_size: int):\n\n # Here we define which task should we use, here we also define two main\n # hyperparameters: forecast_length and max_window_size\n task = Task(TaskTypesEnum.ts_forecasting,\n TsForecastingParams(forecast_length=len_forecast,\n max_window_size=max_window_size,\n return_all_steps=False,\n make_future_prediction=True))\n\n # Prepare data to train the model\n train_input = InputData(idx=np.arange(0, len(train_data)),\n features=None,\n target=train_data,\n task=task,\n data_type=DataTypesEnum.ts)\n\n # Make a \"blank\", here we need just help FEDOT understand that the\n # forecast should be made exactly the \"len_forecast\" length\n predict_input = InputData(idx=np.arange(0, len_forecast),\n features=None,\n target=None,\n task=task,\n data_type=DataTypesEnum.ts)\n\n available_model_types_primary = ['linear', 'ridge', 'lasso',\n 'dtreg', 'knnreg']\n\n available_model_types_secondary = ['linear', 'ridge', 'lasso', 'rfr',\n 'dtreg', 'knnreg', 'svr']\n\n composer_requirements = GPComposerRequirements(\n primary=available_model_types_primary,\n secondary=available_model_types_secondary, max_arity=5,\n max_depth=3, pop_size=10, num_of_generations=12,\n crossover_prob=0.8, mutation_prob=0.8,\n max_lead_time=datetime.timedelta(minutes=5),\n add_single_model_chains=True)\n\n metric_function = MetricsRepository().metric_by_id(\n RegressionMetricsEnum.RMSE)\n builder = GPComposerBuilder(task=task).with_requirements(\n composer_requirements).with_metrics(metric_function).with_initial_chain(\n chain)\n composer = builder.build()\n\n obtained_chain = composer.compose_chain(data=train_input,\n is_visualise=False)\n obtained_chain.__class__ = TsForecastingChain\n\n print('Obtained chain')\n obtained_models = []\n for node in obtained_chain.nodes:\n print(str(node))\n obtained_models.append(str(node))\n depth = int(obtained_chain.depth)\n print(f'Глубина цепочки {depth}')\n\n # Fit it\n obtained_chain.fit_from_scratch(train_input)\n\n # Predict\n predicted_values = obtained_chain.forecast(initial_data=train_input,\n supplementary_data=predict_input).predict\n\n return predicted_values, obtained_models, depth",
"def forecast(run_metadata,\n specification,\n output_root, mark_best, production_tag,\n preprocess_only,\n verbose, with_debugger,\n **input_versions):\n cli_tools.configure_logging_to_terminal(verbose)\n\n do_forecast(\n run_metadata=run_metadata,\n specification=specification,\n output_root=output_root,\n mark_best=mark_best,\n production_tag=production_tag,\n preprocess_only=preprocess_only,\n with_debugger=with_debugger,\n input_versions=input_versions,\n )\n\n logger.info('**Done**')",
"def run_forecast(data_start_date='1980-01-01', forecast_end_date='2015-12-01',\n test_date_start='2010-01-01', test_model=True):\n \n # 1. Prepare dataframe.\n df = read_and_prepare_dataframe(start_date=data_start_date)\n \n # 2. Split dataframe into train and test.\n y, y_train, y_test = time_series_train_test_split_y(df=df, test_date_start=test_date_start)\n \n # 3. Optimize parameters.\n opt_order, opt_seasonal_order = optimize_sarima_parameters(y,\n trend='c', start_p=0, start_q=0, test='adf', \n max_order=6, stepwise=True, trace=False)\n \n # 4. Train SARIMAX model.\n results = train_sarima_model(y_train=y_train, order=opt_order, seasonal_order=opt_seasonal_order, \n plot_diagnostics=False,\n trend='c', enforce_stationarity=False, enforce_invertibility=False)\n \n # 5. Test the model (optional).\n if test_model:\n test_sarima_model(y=y, y_test=y_test, results=results, dynamic=True)\n \n # 6. Make the forecast with the model and return the table containing the results.\n table = forecast_temperature(y=y, results=results, end_date=forecast_end_date)\n return table",
"def predict(self, params, start=None, end=None):\n if start is None:\n freq = getattr(self._index, 'freq', 1)\n start = self._index[-1] + freq\n start, end, out_of_sample, prediction_index = self._get_prediction_index(\n start=start, end=end)\n if out_of_sample > 0:\n res = self._predict(h=out_of_sample, **params)\n else:\n res = self._predict(h=0, **params)\n return res.fittedfcast[start:end + out_of_sample + 1]",
"def forecast(self, stock):\n # Load the trained model\n model_handler = ModelHandler()\n model = model_handler.load_json_model(stock)\n\n # Importing the training set\n dataset = pd.read_csv(stock.csv_name)\n dates = dataset.iloc[len(dataset)-31:len(dataset)-1, 0].values\n dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in dates]\n\n # Create the test dataset\n dataset_test = dataset[len(dataset) - 30:]\n real_stock_price = dataset_test.iloc[:, 1:2].values\n dataset = dataset['Open']\n inputs = dataset[len(dataset) - len(dataset_test) - 60:].values\n inputs = inputs.reshape(-1, 1)\n\n # Feature Scaling\n sc = MinMaxScaler(feature_range=(0, 1))\n inputs = sc.fit_transform(inputs)\n\n x_test = []\n x_test.append(inputs[0:60, 0])\n predicted_values = []\n for i in range(1, 31):\n x_test_np = np.array(x_test)\n x_test_np = np.reshape(x_test_np, (x_test_np.shape[0], x_test_np.shape[1], 1))\n new_data = model.predict(x_test_np)\n predicted_values.append(new_data[0])\n x_test[0] = np.delete(x_test[0], 0)\n x_test[0] = np.concatenate([x_test[0], new_data[0]])\n\n predicted_values = sc.inverse_transform(predicted_values)\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))\n plt.gca().xaxis.set_major_locator(mdates.DayLocator())\n plt.plot(dates, real_stock_price, color='red', label=f'Actual {stock.ticker} Stock Price')\n plt.plot(dates, predicted_values, color='blue', label=f'Predicted {stock.ticker} Stock Price')\n plt.gcf().autofmt_xdate()\n plt.title(f'{stock.ticker} Stock Price Prediction')\n plt.xlabel('Time')\n plt.ylabel(f'{stock.ticker} Stock Price')\n plt.legend()\n plt.show()",
"def rungetorf(self,input={}):\n # handle potentially applied input argument\n self._handle_input_subdict(input)\n # check if orfs already predicted (in self.input data structure)\n if not self.input['orfs']:\n # predict ORFs on the dna sequence\n _retstruct = rungetorf( { self._create_auto_key(): self.input } )\n else:\n # ORFs already available in self.input['orfs']\n pass",
"def get_idx_forecast(idx_start, ar_iteration, forecast_cycle, output_k):\n return idx_start + (forecast_cycle * ar_iteration) + output_k",
"def serving_input_fn():\n inputs = {\n \"dayofweek\": tf.compat.v1.placeholder(\n dtype=tf.dtypes.int64, shape=[None], name=\"dayofweek\"),\n \"hourofday\": tf.compat.v1.placeholder(\n dtype=tf.dtypes.int64, shape=[None], name=\"hourofday\"),\n \"pickuplon\": tf.compat.v1.placeholder(\n dtype=tf.dtypes.float32, shape=[None], name=\"pickuplon\"),\n \"pickuplat\": tf.compat.v1.placeholder(\n dtype=tf.dtypes.float32, shape=[None], name=\"pickuplat\"),\n \"dropofflon\": tf.compat.v1.placeholder(\n dtype=tf.dtypes.float32, shape=[None], name=\"dropofflon\"),\n \"dropofflat\": tf.compat.v1.placeholder(\n dtype=tf.dtypes.float32, shape=[None], name=\"dropofflat\"),\n \"passengers\": tf.compat.v1.placeholder(\n dtype=tf.dtypes.float32, shape=[None], name=\"passengers\")\n }\n\n features = add_engineered(inputs)\n\n return tf.estimator.export.ServingInputReceiver(\n features=features, receiver_tensors=inputs)",
"def get_expo_data(\n data: Union[pd.Series, pd.DataFrame],\n target_column: str = \"close\",\n trend: str = \"A\",\n seasonal: str = \"A\",\n seasonal_periods: int = 7,\n dampen: str = \"F\",\n n_predict: int = 5,\n start_window: float = 0.85,\n forecast_horizon: int = 5,\n metric: str = \"mape\",\n) -> Tuple[\n List[TimeSeries],\n List[TimeSeries],\n List[TimeSeries],\n Optional[Union[float, ndarray]],\n ExponentialSmoothing,\n]:\n\n use_scalers = False\n _, ticker_series = helpers.get_series(data, target_column, is_scaler=use_scalers)\n\n if trend == \"M\":\n trend_model = ModelMode.MULTIPLICATIVE\n elif trend == \"N\":\n trend_model = ModelMode.NONE\n else: # Default\n trend_model = ModelMode.ADDITIVE\n\n if seasonal == \"M\":\n seasonal_model = SeasonalityMode.MULTIPLICATIVE\n elif seasonal == \"N\":\n seasonal_model = SeasonalityMode.NONE\n else: # Default\n seasonal_model = SeasonalityMode.ADDITIVE\n\n damped = True\n if dampen == \"F\":\n damped = False\n\n # Model Init\n model_es = ExponentialSmoothing(\n trend=trend_model,\n seasonal=seasonal_model,\n seasonal_periods=int(seasonal_periods),\n damped=damped,\n random_state=42,\n )\n\n try:\n # Historical backtesting\n historical_fcast_es = model_es.historical_forecasts(\n ticker_series, # backtest on entire ts\n start=float(start_window),\n forecast_horizon=int(forecast_horizon),\n verbose=True,\n )\n except Exception as e: # noqa\n error = str(e)\n # lets translate this to something everyone understands\n if \"with`overlap_end` set to `False`.\" in error:\n console.print(\n \"[red]Dataset too small.[/red]\"\n \"[red] Please increase size to at least 100 data points.[/red]\"\n )\n else:\n console.print(f\"[red]{error}[/red]\")\n return [], [], [], None, None\n\n # train new model on entire timeseries to provide best current forecast\n best_model = ExponentialSmoothing(\n trend=trend_model,\n seasonal=seasonal_model,\n seasonal_periods=int(seasonal_periods),\n damped=damped,\n random_state=42,\n )\n\n # we have the historical fcast, now lets train on entire set and predict.\n best_model.fit(ticker_series)\n probabilistic_forecast = best_model.predict(int(n_predict), num_samples=500)\n\n if metric == \"rmse\":\n precision = rmse(actual_series=ticker_series, pred_series=historical_fcast_es)\n elif metric == \"mse\":\n precision = mse(actual_series=ticker_series, pred_series=historical_fcast_es)\n elif metric == \"mape\":\n precision = mape(actual_series=ticker_series, pred_series=historical_fcast_es)\n elif metric == \"smape\":\n precision = smape(actual_series=ticker_series, pred_series=historical_fcast_es)\n\n console.print(\n f\"Exponential smoothing obtains {metric.upper()}: {precision:.2f}% \\n\"\n )\n\n return (\n ticker_series,\n historical_fcast_es,\n probabilistic_forecast,\n precision,\n best_model,\n )",
"def rolling_arima(train, test, endog, order, exog=None, dates=None, freq=None, missing='none'):\n history = list(train)\n predictions = []\n for row in test:\n model = sm.tsa.ARIMA(endog=history, order=order, exog=exog, dates=dates, freq=freq, missing=missing)\n model_fit = model.fit(disp=0)\n output = model_fit.forecast()\n yhat = output[0][0]\n predictions.append(yhat)\n history.append(row)\n\n return predictions",
"async def forecast(self) -> Optional[Report]:\n pass",
"def update(self, y_true: list[Number], y_pred: list[Number]) -> ForecastingMetric:",
"def _predict(self, fh, X):\n names, _ = self._check_forecasters()\n y_pred = pd.concat(self._predict_forecasters(fh, X), axis=1, keys=names)\n y_pred = y_pred.groupby(level=1, axis=1).agg(\n _aggregate, self.aggfunc, self.weights\n )\n return y_pred",
"def run():\n\n df = read_input() # the parameters\n df = add_time_period(df) # a feature\n df = is_holiday(df) # a feature\n df = scale_continous(df) # continous feature transformation\n df = encode_dummy(df) # categorical feature transformation\n df = order_columns(df) # ordering model inputs\n model = load_model() # the multiple linear regression model\n prediction = int(model.predict(df)) # form a prediction\n return prediction # return the prediction",
"def run(self, action: str = 'evaluate') -> Union[\n pd.DataFrame, List[Dict[str, Dict[str, Union[Union[float, str], Any]]]]]:\n model = VAR(endog=self.train)\n model_fit = model.fit()\n if action == 'predict':\n # Use full dataset to get prediction\n model_full = VAR(endog=self.df)\n model_fit_full = model_full.fit()\n return pd.DataFrame(model_fit.forecast(model_fit_full.y, steps=2), index=['2008', '2009'],\n columns=[self.df.columns])\n else:\n tmp = []\n for col in self.df.columns:\n tmp.append({col: {'rmse_val': sqrt(\n mean_squared_error(self.valid[col], self._prediction(model_fit, self.valid)[[col]])),\n 'mae_val': mean_absolute_error(self.valid[col],\n self._prediction(model_fit, self.valid)[[col]]),\n 'mape_val': f'{self.mean_absolute_percentage_error(self.valid[col], self._prediction(model_fit, self.valid)[[col]])} %'}})\n\n return tmp",
"def _predict(self, fh, X):\n y_pred_df = pd.concat(self._predict_forecasters(fh, X), axis=1)\n # apply weights\n y_pred = y_pred_df.apply(lambda x: np.average(x, weights=self.weights_), axis=1)\n y_pred.name = self._y.name\n return y_pred",
"def forecast(self, noutput_items, ninputs):\n # create an integer array of zeros noutput_items long\n ninputs_needed = [0] * ninputs\n for i in range(ninputs):\n ninputs_needed[i] = (self.vdecimate*noutput_items) + \\\n self.gateway.history() - 1\n \n return ninputs_needed"
] | [
"0.62966007",
"0.5987943",
"0.571518",
"0.5691806",
"0.5684179",
"0.56726193",
"0.5672091",
"0.5671584",
"0.563833",
"0.55245525",
"0.53885925",
"0.5387731",
"0.5384682",
"0.5375867",
"0.53200275",
"0.5293495",
"0.5273004",
"0.52590513",
"0.52562207",
"0.5252408",
"0.5236556",
"0.5233061",
"0.5212854",
"0.5205837",
"0.51832426",
"0.51785153",
"0.51714987",
"0.51621586",
"0.51507556",
"0.51405996"
] | 0.8307179 | 0 |
Detect collision between a rectangle and circle. | def collision(rleft, rtop, width, height, # rectangle definition
center_x, center_y, radius): # circle definition
global intX, intY
# complete boundbox of the rectangle
rright, rbottom = rleft + width, rtop + height
# bounding box of the circle
cleft, ctop = center_x-radius, center_y-radius
cright, cbottom = center_x+radius, center_y+radius
# trivial reject if bounding boxes do not intersect
if rright < cleft or rleft > cright or rbottom < ctop or rtop > cbottom:
return False # no collision possible
# check whether any point of rectangle is inside circle's radius
for x in (rleft, rleft+width):
for y in (rtop, rtop+height):
# compare distance between circle's center point and each point of
# the rectangle with the circle's radius
if hypot(x-center_x, y-center_y) <= radius:
intX = x
intY = y
return True # collision detected
# check if center of circle is inside rectangle
if rleft <= center_x <= rright and rtop <= center_y <= rbottom:
return True # overlaid
return False # no collision detected
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __circle_collision(self, circle):\n raise Exception('--- This methods have not been implemented yet! Use circle_collider instead ---')",
"def circles_collide(x1: float, y1: float, r1: float, x2: float, y2: float, r2: float) -> bool:\n return distance_between_sq(x1, y1, x2, y2) <= (r1 + r2)**2",
"def circles_overlapping(x1, y1, x2, y2, r):\n # print(abs((x2-x1)**2 + (y2-y1)**2))\n # print((2*r)**2)\n if (abs((x2-x1)**2 + (y2-y1)**2) > (2*r)**2):\n return False\n else: return True",
"def intersect_rectangle_circle(rec_pos, sx, sy, circle_pos, circle_radius, circle_speed):\n\n # Position of the walls relative to the ball\n top = (rec_pos.y ) - circle_pos.y\n bottom = (rec_pos.y + sy) - circle_pos.y \n left = (rec_pos.x ) - circle_pos.x\n right = (rec_pos.x + sx) - circle_pos.x\n\n r = circle_radius \n intersecting = left <= r and top <= r and right >= -r and bottom >= -r\n\n if intersecting:\n # Now need to figure out the vector to return.\n # should be just a matter of flipping x and y of the ball?\n\n impulse = circle_speed.normalized()\n\n if abs(left) <= r and impulse.x > 0:\n impulse.x = -impulse.x\n if abs(right) <= r and impulse.x < 0:\n impulse.x = -impulse.x\n if abs(top) <= r and impulse.y > 0:\n impulse.y = -impulse.y\n if abs(bottom) <= r and impulse.y < 0:\n impulse.y = -impulse.y\n \n #print(\"Impact\", circle_speed, impulse.normalized())\n\n return impulse.normalized()\n raise Exception(\"No intersection\")",
"def in_circle(x0, y0, x, y, r):\n return ((x - x0) ** 2 + (y - y0) ** 2) <= (r ** 2)",
"def CCD_circle_collision(self, old_pos1, old_pos2, old_v1, old_v2, r1, r2, m1, m2, return_t):\n\n relative_pos = [old_pos1[0]-old_pos2[0], old_pos1[1]-old_pos2[1]]\n relative_v = [old_v1[0]-old_v2[0], old_v1[1]-old_v2[1]]\n if (relative_v[0]**2+relative_v[1]**2) == 0:\n return -1\n\n pos_v = relative_pos[0]*relative_v[0] + relative_pos[1]*relative_v[1]\n K = pos_v/(relative_v[0]**2+relative_v[1]**2)\n l = (relative_pos[0]**2 + relative_pos[1]**2 - (r1+r2)**2)/(relative_v[0]**2+relative_v[1]**2)\n\n sqrt = (K**2 - l)\n if sqrt <0 and return_t:\n #print('CCD circle no solution')\n return -1\n\n sqrt = math.sqrt(sqrt)\n t1 = -K - sqrt\n t2 = -K + sqrt\n t = min(t1, t2)\n\n if return_t:\n return t\n\n x1,y1 = old_pos1\n x2,y2 = old_pos2\n x1_col = x1 + old_v1[0]*t\n y1_col = y1 + old_v1[1]*t\n x2_col = x2 + old_v2[0]*t\n y2_col = y2 + old_v2[1]*t\n pos_col1, pos_col2 = [x1_col, y1_col], [x2_col, y2_col]\n\n #handle collision\n v1_col, v2_col = self._circle_collision_response(pos_col1, pos_col2, old_v1, old_v2, m1, m2) #the position and v at the collision time\n\n return pos_col1, v1_col, pos_col2, v2_col",
"def is_intersects(self, circle):\n if self.distance_to(circle) < self.radius + circle.radius:\n return True\n return False",
"def collide(b1,b2):\n if mag(b1.pos-b2.pos) < (b1.radius + b2.radius - .05):\n return True",
"def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2",
"def isInCircle(self,x1,y1,radius1):\r\n if(distance(self.x,x1,self.y,y1) < (self.radius+radius1)):\r\n return True\r\n return False",
"def collide(self):\n dist = distance.cdist(self.object_position, self.object_position, \"euclidean\")\n collision = ((dist - self.object_radius) <= 0) * 1\n np.fill_diagonal(collision, 0)\n collision = np.sum(collision, axis=1)\n print(dist)\n print(collision)\n return collision",
"def detect_collision():\n # with the top and bottom of screen\n if ball.ycor() > GAME_Y_BARRIER or ball.ycor() < -GAME_Y_BARRIER:\n ball.bounce_y()\n # with the paddles\n if ball.distance(paddle_right) < 50 and ball.xcor() > GAME_X_BARRIER \\\n or ball.distance(paddle_left) < 50 and ball.xcor() < -GAME_X_BARRIER:\n ball.bounce_x()",
"def collides(a, b):\n # Current locations.\n xa = a.x\n xb = b.x\n ya = a.y\n yb = b.y\n\n # Treat b as a point, we only need one radius.\n try:\n radius = a.radius + b.radius\n except AttributeError:\n radius = getattr(a, 'radius', 0.5) + getattr(b, 'radius', 0.5)\n\n # Previous frame locations.\n try: pxa = a.px\n except KeyError: pxa = xa\n try: pya = a.py\n except KeyError: pya = ya\n try: pxb = b.px\n except KeyError: pxb = xb\n try: pyb = b.py\n except KeyError: pyb = yb\n\n # Translate b's final position to be relative to a's start.\n # And now, circle/line collision.\n dir_x = pxa + (xb - xa) - pxb\n dir_y = pya + (yb - ya) - pyb\n\n diff_x = pxa - pxb\n diff_y = pya - pyb\n if (dir_x < 0.0001 and dir_x > -0.0001\n and dir_y < 0.0001 and dir_y > -0.0001):\n # b did not move relative to a, so do point/circle.\n return diff_x * diff_x + diff_y * diff_y < radius * radius\n\n # dot(diff, dir) / dot(dir, dir)\n t = (diff_x * dir_x + diff_y * dir_y) / (dir_x * dir_x + dir_y * dir_y)\n if t < 0:\n t = 0\n elif t > 1:\n t = 1\n\n dist_x = pxa - (pxb + dir_x * t)\n dist_y = pya - (pyb + dir_y * t)\n\n # dist_sq < radius_sq\n return dist_x * dist_x + dist_y * dist_y <= radius * radius",
"def collide(self, x, y):\n return self._rect.collidepoint(x, y)",
"def cylinder_collision_detection(\n point_a1, point_a2, radius_a, point_b1, point_b2, radius_b, bbox_a=None, bbox_b=None\n):\n\n if bbox_a is None:\n bbox_a = get_bbox([point_a1, point_a2], margin=radius_a)\n if bbox_b is None:\n bbox_b = get_bbox([point_b1, point_b2], margin=radius_b)",
"def incircle(self,xpos,ypos,cellx,celly):\n xcell, ycell = self.getcellcenter(cellx,celly)\n if ((xpos - xcell)**2 + (ypos - ycell)**2) < self.crad2:\n return True\n return False\n\n return cellx, celly",
"def _circle_collision_response(self, coord1, coord2, v1, v2, m1, m2):\n n_x = coord1[0] - coord2[0]\n n_y = coord1[1] - coord2[1]\n\n vdiff_x = (v1[0] - v2[0])\n vdiff_y = (v1[1] - v2[1])\n\n n_vdiff = n_x * vdiff_x + n_y * vdiff_y\n nn = n_x * n_x + n_y * n_y\n b = n_vdiff/nn\n\n #object 1\n u1_x = v1[0] - 2*(m2/(m1+m2)) * b * n_x\n u1_y = v1[1] - 2*(m2/(m1+m2)) * b * n_y\n\n #object 2\n u2_x = v2[0] + 2*(m1/(m1+m2)) * b * n_x\n u2_y = v2[1] + 2*(m1/(m1+m2)) * b * n_y\n\n return [u1_x*self.restitution, u1_y*self.restitution], [u2_x*self.restitution, u2_y*self.restitution]",
"def overlap(cir1x, cir1y, rad1, cir2x, cir2y, rad2):\n radius = rad1 + rad2\n compare = ((cir2y - cir1y)**2 + (cir2x - cir1x)**2)**0.5\n if compare > radius:\n print \"no overlapping\"\n else:\n print \"overlapping\"",
"def is_in_collision_point(self, pos):\n x, y = pos\n return sqrt((self.x - x)**2 + (self.y - y)**2) < self.r",
"def collision(self, block):\n if self.pos_x == block.pos_x and self.pos_y+self.height == block.pos_y:\n self.col_d = True\n if self.pos_x == block.pos_x+block.width and self.pos_y == block.pos_y:\n self.col_l = True\n if self.pos_x == block.pos_x-self.width and self.pos_y == block.pos_y:\n self.col_r = True",
"def collides(pos1, rad1, pos2, rad2):\n x1 = pos1[0]\n y1 = pos1[1]\n\n x2 = pos2[0]\n y2 = pos2[1]\n\n # Right edge is over the left edge and\n # Left edge is not over the right edge\n right_horizontal_colliding = (\n ((x1 + rad1) > (x2 - rad2)) and ((x1 - rad1) < (x2 + rad2))\n )\n left_horizontal_colliding = (\n ((x1 - rad1) < (x2 + rad2)) and ((x1 + rad1) > (x2 - rad2))\n )\n vertical_colliding = (\n ((y1 + rad1) > (y2 - rad2)) and ((y1 - rad1) < (y2 + rad2))\n )\n\n horizontal_colliding = \\\n right_horizontal_colliding or left_horizontal_colliding\n\n return horizontal_colliding and vertical_colliding",
"def is_rectangle_colliding(self, rectangle):\n for obstacle in self.obstacle_iterator():\n if rectangle.colliderect(obstacle.rect):\n return True\n return False",
"def collision_detect(self):\n\n # Check if the collision was with a map\n # Rect-based collision code\n for map_rect in Map.current_map.collision_rects:\n collision_time, norm_x, norm_y = collision.aabb_swept_collision(self.rect, (self.vx, self.vy), map_rect)\n if collision_time != 1:\n if DEBUG: print(\"[collision]\", collision_time)\n break\n self.x += self.vx * collision_time\n self.y += self.vy * collision_time\n\n remaining_time = 1 - collision_time\n \"\"\"\n if remaining_time > 0:\n self.vx *= remaining_time;\n self.vy *= remaining_time;\n \"\"\"\n if collision_time != 1:\n if abs(norm_x) > .0001:\n self.vx = -self.vx * COLLISION_DAMPING\n if abs(norm_y) > .0001:\n self.vy = -self.vy * COLLISION_DAMPING\n self.collision_counter += 1\n return True\n return False\n\n # Old, mask-based collision code\n \"\"\"\n self.mask = pygame.mask.from_surface(self.image)\n point = pygame.sprite.collide_mask(Map.current_map, self)\n if point:\n if COLLISION_ALGORITHM_EXPERIMENTAL:\n self.vx, self.vy = collision.calculate_reflection_angle(Map.current_map.mask, point, (self.vx, self.vy))\n else: \n self.vx, self.vy = collision.simple_collision(Map.current_map.mask, point, (self.vx, self.vy))\n self.vx, self.vy = self.vx * COLLISION_DAMPING, self.vy * COLLISION_DAMPING\n \n self.collision_counter += 1\n return True\n return False\n \"\"\"",
"def draw(self, window):\r\n # sauvegarde rectangle pour les collisions\r\n self.rectangle = pygame.draw.circle(window, self.color, (self.center_x, self.center_y), self.radius)\r\n\r\n # pour afficher la hitbox de la balle\r\n # pygame.draw.rect(window, \"red\", (self.rectangle.x, self.rectangle.y, self.rectangle.width, self.rectangle.height), 2)\r",
"def collision(self):\n # Check collision with walls\n (x_coord, y_coord) = (self.x_coord[0], self.y_coord[0])\n if x_coord <= EDGE or x_coord >= SCREEN_X - self.size - EDGE or \\\n y_coord <= EDGE or y_coord >= SCREEN_Y - self.size - EDGE:\n return True\n # Check collision with self\n corners = self.get_corners()\n if self.heading == \"right\":\n (frontleft_x, frontleft_y) = (corners[1][0], corners[1][1])\n (frontright_x, frontright_y) = (corners[2][0], corners[2][1])\n elif self.heading == \"left\":\n (frontleft_x, frontleft_y) = (corners[3][0], corners[3][1])\n (frontright_x, frontright_y) = (corners[0][0], corners[0][1])\n elif self.heading == \"up\":\n (frontleft_x, frontleft_y) = (corners[0][0], corners[0][1])\n (frontright_x, frontright_y) = (corners[1][0], corners[1][1])\n elif self.heading == \"down\":\n (frontleft_x, frontleft_y) = (corners[2][0], corners[2][1])\n (frontright_x, frontright_y) = (corners[3][0], corners[3][1])\n for i in range(len(self.x_coord)):\n if self.x_coord[i] < frontleft_x < self.x_coord[i] + self.size and \\\n self.y_coord[i] < frontleft_y < self.y_coord[i] + self.size:\n return True\n if self.x_coord[i] < frontright_x < self.x_coord[i] + self.size and \\\n self.y_coord[i] < frontright_y < self.y_coord[i] + self.size:\n return True\n return False",
"def collide(obj1, obj2):\n offset_x = obj2.x - obj1.x #The difference between obj1 and obj 2\n offset_y = obj2.y - obj1.y \n return obj1.mask.overlap(obj2.mask, (int(offset_x), int(offset_y))) != None # (x,y)",
"def inside_rectangle(self, x, y):\n if (self.pos.x - self.width < x < self.pos.x + self.width and\n self.pos.y - self.height < y < self.pos.y + self.height):\n return True",
"def check_collision(self, a, b):\n\n dis_x = abs((a.x+a.r + a.dx)-(b.x+b.r + b.dx))\n dis_y = abs((a.y+a.r + a.dy)-(b.y+b.r + b.dy))\n distance = math.sqrt(dis_x*dis_x + dis_y*dis_y)\n\n if distance <= (b.r + a.r) and (a.colliding == False or b.colliding == False):\n\n return True",
"def isCrossingCircle(self, other):\n vector = Vector.createFromTwoPoints(self.center, other.center)\n return vector.norm < self.radius + other.radius",
"def has_collide(self, obj):\n rect1 = self.anim.getRect()\n rect2 = obj.anim.getRect()\n \n rect1.move_ip(self.pos)\n rect2.move_ip(obj.pos)\n \n return rect1.colliderect(rect2)"
] | [
"0.77753234",
"0.7718585",
"0.72119",
"0.71962464",
"0.71542555",
"0.71298283",
"0.71152836",
"0.70446295",
"0.691543",
"0.6855141",
"0.6847333",
"0.67994976",
"0.6798189",
"0.6795586",
"0.66961086",
"0.66861546",
"0.6671946",
"0.65710783",
"0.6566224",
"0.65465003",
"0.65379095",
"0.6510779",
"0.64749473",
"0.645877",
"0.64249516",
"0.64206254",
"0.63958997",
"0.639309",
"0.63679975",
"0.6360075"
] | 0.83668697 | 0 |
Send an order/modif/cancel to the market | def _send_to_market(self, order, is_mine):
ord_type = order[self.col_idx['ordtype']]
if ord_type == "new":
self.mkt.send(is_buy=order[self.col_idx['is_buy']],
qty=order[self.col_idx['qty']],
price=order[self.col_idx['price']],
uid=order[self.col_idx['uid']],
is_mine=is_mine,
timestamp=order[self.col_idx['timestamp']])
elif ord_type == "cancel":
self.mkt.cancel(uid=order[self.col_idx['uid']])
elif ord_type == "modif":
self.mkt.modif(uid=order[self.col_idx['uid']],
new_qty=order[self.col_idx['qty']])
else:
raise ValueError(f'Unexpected ordtype: {ord_type}') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def order_send(self, symbol, style, volume, price_stop_loss, price_take_profit, comment, magic, features=None): # =MarketOrder()\n \n dt = get_datetime()\n price = self.data[symbol].price\n \n log.info(\"bo_blotter: order_send %s %s price=%s SL=%s TP=%s %s\" % (volume, symbol, price, price_stop_loss, price_take_profit, repr_execution_style(style))) # repr_execution_style ToFix\n \n if volume == 0:\n log.error(\"Don't bother placing orders for 0 shares.\")\n return\n\n bo = BracketOrder(self, symbol, style, volume, price_stop_loss, price_take_profit, comment, magic, features) \n #order_id_master = bo.send(price, dt)\n bo.send(price, dt)\n\n #log.info(\"bo_blotter: bo#%s order_send %s %s price=%s SL=%s TP=%s %s\" % (bo.ticket, volume, symbol, price, price_stop_loss, price_take_profit, repr_execution_style(style))) # repr_execution_style ToFix\n\n self._d_orders['trades'][bo.ticket] = bo\n\n return(bo.ticket)",
"def SendOrderConfirmation(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def order(self, typ, price, volume):\r\n self.count_submitted += 1\r\n self.client.send_order_add(typ, price, volume)",
"def sellAtMarketOpen(self):\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n day = dt_central.strftime(\"%a\")\n\n tm = dt_central.strftime(\"%H:%M:%S\")\n\n weekdays = [\"Sat\", \"Sun\"]\n\n # CHECK IF MARKET OPEN AND NOT WEEKEND\n if tm == \"08:30\" and day not in weekdays:\n\n queue_orders = self.mongo.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Account_ID\": self.account_id, \"Order_Type\" : \"SELL\"})\n\n for order in queue_orders:\n\n # CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(order[\"Order_ID\"])\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n trade_data = {\n \"Symbol\": order[\"Symbol\"],\n \"Side\": \"SELL\",\n \"Aggregation\": order[\"Aggregation\"],\n \"Strategy\": order[\"Strategy\"],\n \"Asset_Type\": order[\"Asset_Type\"],\n \"Account_ID\": self.account_id\n }\n\n # SELL MARKET ORDER\n self.placeOrder(trade_data, order, orderType=\"MARKET\")",
"def send_quote(self, p_quote, p_ask_out, p_bid_out, count):\n pass",
"def trade_action(self, BUY_QTY):\n BUY_QTY = 4500\n self.trade(BUY_QTY)\n #self.show()",
"async def on_trade_send(self, trade: \"steam.TradeOffer\") -> None:",
"def send_order(self, \n userid:int, percentage:float, order_type:str,\n base:str, trade:str, price:float, exchange=\"baseexchange\", \n is_live=False, episode=None, timestamp=time.time()):\n create_call = \"/order/send\"\n call_loc = f\"{self.address}{create_call}\"\n call_params = {\n \"userid\": userid,\n \"base\": base,\n \"trade\": trade,\n \"exchange\": exchange,\n \"pct\": percentage,\n \"action\": order_type,\n \"price\": price,\n \"spec\": {\n \"episodeid\": episode,\n \"live\": is_live\n },\n \"timestamp\": timestamp\n }\n call_params_json = json.dumps(call_params)\n self.session.post(call_loc, data=call_params_json)",
"def sell(self, irc, msg, args, optlist, amount, thing, price, otherthing, notes):\n self.db.deleteExpired(self.registryValue('orderExpiry'))\n gpgauth = self._checkGPGAuth(irc, msg.prefix)\n if gpgauth is None:\n irc.error(\"For identification purposes, you must be identified via GPG \"\n \"to use the order book.\")\n return\n results = self.db.getByNick(gpgauth['nick'])\n if len(results) >= self.registryValue('maxUserOpenOrders'):\n irc.error(\"You may not have more than %s outstanding open orders.\" % \\\n self.registryValue('maxUserOpenOrders'))\n return\n extratime = 0\n if dict(optlist).has_key('long'):\n extratime = self.registryValue('longOrderDuration')\n trust = self._getTrust(irc, 'nanotube', gpgauth['nick'])\n sumtrust = sum([t for t,n in trust])\n if sumtrust < self.registryValue('minTrustForLongOrders'):\n irc.error(\"You must have a minimum of %s cumulative trust at \"\n \"level 1 and level 2 from nanotube to \"\n \"to place long orders.\" % (self.registryValue('minTrustForLongOrders'),))\n return\n orderid = self.db.sell(gpgauth['nick'], msg.host, amount, thing, price, otherthing, notes, extratime)\n irc.reply(\"Order id %s created.\" % (orderid,))\n if not world.testing:\n irc.queueMsg(ircmsgs.privmsg(\"#bitcoin-otc-ticker\",\n \"#%s || %s || SELL %s %s @ %s %s || %s\" % (orderid,\n gpgauth['nick'],\n amount,\n thing,\n self._getIndexedValue(price),\n otherthing,\n notes,)))",
"def buy(self, irc, msg, args, optlist, amount, thing, price, otherthing, notes):\n self.db.deleteExpired(self.registryValue('orderExpiry'))\n gpgauth = self._checkGPGAuth(irc, msg.prefix)\n if gpgauth is None:\n irc.error(\"For identification purposes, you must be identified via GPG \"\n \"to use the order book.\")\n return\n results = self.db.getByNick(gpgauth['nick'])\n if len(results) >= self.registryValue('maxUserOpenOrders'):\n irc.error(\"You may not have more than %s outstanding open orders.\" % \\\n self.registryValue('maxUserOpenOrders'))\n return\n extratime = 0\n if dict(optlist).has_key('long'):\n extratime = self.registryValue('longOrderDuration')\n trust = self._getTrust(irc, 'nanotube', gpgauth['nick'])\n sumtrust = sum([t for t,n in trust])\n if sumtrust < self.registryValue('minTrustForLongOrders'):\n irc.error(\"You must have a minimum of %s cumulative trust at \"\n \"level 1 and level 2 from nanotube to \"\n \"to place long orders.\" % (self.registryValue('minTrustForLongOrders'),))\n return\n orderid = self.db.buy(gpgauth['nick'], msg.host, amount, thing, price, otherthing, notes, extratime)\n irc.reply(\"Order id %s created.\" % (orderid,))\n if not world.testing:\n irc.queueMsg(ircmsgs.privmsg(\"#bitcoin-otc-ticker\",\n \"#%s || %s || BUY %s %s @ %s %s || %s\" % (orderid,\n gpgauth['nick'],\n amount,\n thing,\n self._getIndexedValue(price),\n otherthing,\n notes,)))",
"def place_buy_order(self):\n price = request.form[\"price\"]\n stocks = request.form[\"stocks\"]\n trader_id = request.form[\"trader_id\"]\n self.market.place_buy_order(trader_id, price, stocks)\n return \"\"",
"async def new_limit_order(side):\n symbol = App.config[\"symbol\"]\n now_ts = now_timestamp()\n\n #\n # Find limit price (from signal, last kline and adjustment parameters)\n #\n last_kline = App.analyzer.get_last_kline(symbol)\n last_close_price = to_decimal(last_kline[4]) # Close price of kline has index 4 in the list\n if not last_close_price:\n log.error(f\"Cannot determine last close price in order to create a market buy order.\")\n return None\n\n price_adjustment = App.config[\"trader\"][\"limit_price_adjustment\"]\n if side == SIDE_BUY:\n price = last_close_price * Decimal(1.0 - price_adjustment) # Adjust price slightly lower\n elif side == SIDE_SELL:\n price = last_close_price * Decimal(1.0 + price_adjustment) # Adjust price slightly higher\n\n price_str = round_str(price, 2)\n price = Decimal(price_str) # We will use the adjusted price for computing quantity\n\n #\n # Find quantity\n #\n if side == SIDE_BUY:\n # Find how much quantity we can buy for all available USD using the computed price\n quantity = App.quote_quantity # USD\n percentage_used_for_trade = App.config[\"trader\"][\"percentage_used_for_trade\"]\n quantity = (quantity * percentage_used_for_trade) / Decimal(100.0) # Available for trade\n quantity = quantity / price # BTC to buy\n # Alternatively, we can pass quoteOrderQty in USDT (how much I want to spend)\n elif side == SIDE_SELL:\n # All available BTCs\n quantity = App.base_quantity # BTC\n\n quantity_str = round_down_str(quantity, 6)\n\n #\n # Execute order\n #\n order_spec = dict(\n symbol=symbol,\n side=side,\n type=ORDER_TYPE_LIMIT, # Alternatively, ORDER_TYPE_LIMIT_MAKER\n timeInForce=TIME_IN_FORCE_GTC,\n quantity=quantity_str,\n price=price_str,\n )\n\n if App.config[\"trader\"][\"no_trades_only_data_processing\"]:\n print(f\"NOT executed order spec: {order_spec}\")\n else:\n order = execute_order(order_spec)\n\n #\n # Store/log order object in our records (only after confirmation of success)\n #\n App.order = order\n App.order_time = now_ts\n\n return order",
"def _send_market_price_request(self, ric_name):\n mp_req_json = {\n 'ID': 2,\n 'Key': {\n 'Name': ric_name,\n 'Service': service\n },\n }\n self.web_socket_app.send(json.dumps(mp_req_json))\n print(\"SENT on \" + self.session_name + \":\")\n print(json.dumps(mp_req_json, sort_keys=True, indent=2, separators=(',', ':')))",
"def buy_order(self, market_id, quan, direction):\n trading_accout_id = self.user_info()[TRADING_ACCOUNT_ID]\n # Get current rate of this market\n rate = self.get_current_rate(market_id)\n if rate is None:\n print(\"Error occured in Get market rate!\")\n return None\n\n null = None\n false = False\n true = True\n\n request_body = {\n # \"OcoOrder\": null,\n # \"Type\":null,\n # \"LastChangedDateTimeUTCDate\": null,\n # \"ExpiryDateTimeUTC\": null,\n # \"Applicability\": null,\n \"Direction\": direction,\n # \"ExpiryDateTimeUTCDate\": null,\n # \"TriggerPrice\": null,\n \"BidPrice\": rate,\n # \"AuditId\": \"8049808-0-0-0-R\",\n \"AutoRollover\": false,\n \"MarketId\": market_id,\n \"isTrade\": true,\n \"OfferPrice\": rate,\n \"OrderId\": 0,\n # \"LastChangedDateTimeUTC\": null,\n # \"Currency\": null,\n \"Quantity\": quan,\n # \"QuoteId\": null,\n \"TradingAccountId\": trading_accout_id, #402043148,\n #\"MarketName\": market_name,\n \"PositionMethodId\": 1,\n \"Status\": null,\n \"IfDone\": []\n }\n\n parameters = {SESSION: self.auth_token, USERNAME: self.uname}\n\n try:\n res = requests.post(URL_BUY_SELL, json=request_body, params=parameters)\n res_data_json = res.json()\n print(\"Buy order data************\\n\", res_data_json)\n\n except requests.exceptions.HTTPError as e:\n raise requests.exceptions.HTTPError(e.strerror)\n\n if res.status_code == 200:\n print(\"Trade Order successful, OrderId is\", res_data_json['OrderId'])\n return res_data_json['OrderId']\n\n return res_data_json['OrderId']",
"def send_quote(self, quote, ask_out, bid_out, count):\n self.invoke_log('on_invoke_send_quote', quote=quote, ask_out=ask_out, bid_out=bid_out, count=count)\n if self._xapi:\n func = self._xapi.X_SendQuote\n func.restype = None\n func.argtypes = [c_void_p, c_void_p, POINTER(QuoteField), POINTER(OrderIDType), POINTER(OrderIDType), c_int]\n func(self.p_fun, self.p_api, byref(quote), byref(ask_out), byref(bid_out), count)",
"def market_cancel(self, orderid):\n return self.delete(f'orders/{orderid}', auth=True)",
"def on_send_order(self, data, request):\n self.update_rate_limit(request)",
"async def post_submit_order(self, symbol, amount, price):\n order = await self.client.submit_order(\n symbol=symbol,\n market_type=Order.Type.EXCHANGE_MARKET,\n amount=amount,\n price=price\n )\n return order",
"def send(ctx, address, amount, denomination, use_unconfirmed, verbose):\n if denomination == '':\n confirmed = click.confirm(uxstring.UxString.default_price_denomination, default=True)\n if not confirmed:\n raise exceptions.Two1Error(uxstring.UxString.cancel_command)\n denomination = currency.Price.SAT\n price = currency.Price(amount, denomination)\n return _send(ctx.obj['wallet'], address, price.satoshis, verbose, use_unconfirmed)",
"def sell(self, price, volume):\r\n self.order(\"ask\", price, volume)",
"def request_orders(self):\r\n if self.use_http():\r\n self.enqueue_http_request(\"money/orders\", {}, \"orders\")\r\n else:\r\n self.send_signed_call(\"private/orders\", {}, \"orders\")",
"async def trade(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n if len(args) < 4:\n await ctx.send('Arguments missing. '\n 'Syntax is `~trade [name] [number] [item] [offer]`.')\n return\n\n try:\n trade = {'user1': ctx.author.id,\n 'user2': args[0],\n 'amount1': args[1],\n 'amount2': args[-1],\n 'item1': ' '.join(args[2:-1]),\n 'item2': 'coins'}\n ctx.bot.trade_manager.add_trade(ctx, trade)\n except TradeError as e:\n await ctx.send(e.msg)\n return\n\n name = args[0]\n for member in ctx.guild.members:\n if name.lower() in member.name.lower():\n name_member = member\n break\n\n offer = users.parse_int(args[-1])\n number = users.parse_int(args[1])\n itemid = items.find_by_name(' '.join(args[2:-1]))\n name = get_display_name(ctx.author)\n offer_formatted = '{:,}'.format(offer)\n out = (f'{items.SHOP_HEADER}{name.title()} wants to sell {name_member.mention} '\n f'{items.add_plural(number, itemid)} for {offer_formatted} coins. '\n f'To accept this offer, reply to this post with a :thumbsup:. '\n f'Otherwise, this offer will expire in one minute.')\n msg = await ctx.send(out)\n\n if await self.confirm(ctx, msg, out, timeout=60):\n price = {\"0\": offer}\n users.update_inventory(name_member.id, price, remove=True)\n users.update_inventory(ctx.author.id, price)\n loot = {itemid: number}\n users.update_inventory(ctx.author.id, loot, remove=True)\n users.update_inventory(name_member.id, loot)\n\n buyer_name = get_display_name(name_member)\n await ctx.send(f'{items.SHOP_HEADER}{name.title()} successfully sold '\n f'{items.add_plural(number, itemid)} to {buyer_name} for '\n f'{offer_formatted} coins!')\n ctx.bot.trade_manager.reset_trade(trade, ctx.author.id, name_member.id)",
"def _on_op_private_trade(self, msg):\r\n if msg[\"trade\"][\"price_currency\"] != self.curr_quote:\r\n return\r\n if msg[\"trade\"][\"item\"] != self.curr_base:\r\n return\r\n if msg[\"channel\"] == CHANNELS[\"trade.%s\" % self.curr_base]:\r\n own = False\r\n else:\r\n own = True\r\n date = int(msg[\"trade\"][\"date\"])\r\n price = int(msg[\"trade\"][\"price_int\"])\r\n volume = int(msg[\"trade\"][\"amount_int\"])\r\n typ = msg[\"trade\"][\"trade_type\"]\r\n\r\n if own:\r\n self.debug(\"trade: %s: %s @ %s (own order filled)\" % (\r\n typ,\r\n self.base2str(volume),\r\n self.quote2str(price)\r\n ))\r\n # send another private/info request because the fee might have\r\n # changed. We request it a minute later because the server\r\n # seems to need some time until the new values are available.\r\n self.client.request_info_later(60)\r\n else:\r\n self.debug(\"trade: %s: %s @ %s\" % (\r\n typ,\r\n self.base2str(volume),\r\n self.quote2str(price)\r\n ))\r\n\r\n self.signal_trade(self, (date, price, volume, typ, own))",
"async def update(self, *args, **kwargs):\n if not self.__bought:\n random_stock = 1\n stock_price = self.priceindicator[random_stock].price\n if stock_price != 0:\n random_const = float(decimal.Decimal(random.randrange(-5,5))/100)\n stock_price = stock_price + stock_price*random_const\n stock_price = int(stock_price)\n await self.place_buy_order(random_stock, self.settings[\"stocks_per_company\"], stock_price, 1)\n log_message = \"StockBuyerBot(\" + self.name + \") bought \" + str(random_stock)\n print(log_message)\n else:\n log_message = \"StockBuyerBot(\" + self.name + \") bought nothing\"\n print(log_message)\n self.add_to_log(self.id, log_message)",
"async def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n # order types \"limit\" and \"market\" immediatley parsed \"EXCHANGE LIMIT\" and \"EXCHANGE MARKET\"\n # note: same order types exist for margin orders without the EXCHANGE prefix\n orderTypes = self.safe_value(self.options, 'orderTypes', {})\n orderType = type.upper()\n if market['spot']:\n # although they claim that type needs to be 'exchange limit' or 'exchange market'\n # in fact that's not the case for swap markets\n orderType = self.safe_string_upper(orderTypes, type, type)\n stopPrice = self.safe_string_2(params, 'stopPrice', 'triggerPrice')\n timeInForce = self.safe_string(params, 'timeInForce')\n postOnlyParam = self.safe_value(params, 'postOnly', False)\n reduceOnly = self.safe_value(params, 'reduceOnly', False)\n clientOrderId = self.safe_value_2(params, 'cid', 'clientOrderId')\n params = self.omit(params, ['triggerPrice', 'stopPrice', 'timeInForce', 'postOnly', 'reduceOnly', 'price_aux_limit'])\n amountString = self.amount_to_precision(symbol, amount)\n amountString = amountString if (side == 'buy') else Precise.string_neg(amountString)\n request = {\n # 'gid': 0123456789, # int32, optional group id for the order\n # 'cid': 0123456789, # int32 client order id\n 'type': orderType,\n 'symbol': market['id'],\n # 'price': self.number_to_string(price),\n 'amount': amountString,\n # 'flags': 0, # int32, https://docs.bitfinex.com/v2/docs/flag-values\n # 'lev': 10, # leverage for a derivative orders, the value should be between 1 and 100 inclusive, optional, 10 by default\n # 'price_trailing': self.number_to_string(priceTrailing),\n # 'price_aux_limit': self.number_to_string(stopPrice),\n # 'price_oco_stop': self.number_to_string(ocoStopPrice),\n # 'tif': '2020-01-01 10:45:23', # datetime for automatic order cancellation\n # 'meta': {\n # 'aff_code': 'AFF_CODE_HERE'\n # },\n }\n stopLimit = ((orderType == 'EXCHANGE STOP LIMIT') or ((orderType == 'EXCHANGE LIMIT') and (stopPrice is not None)))\n exchangeStop = (orderType == 'EXCHANGE STOP')\n exchangeMarket = (orderType == 'EXCHANGE MARKET')\n stopMarket = (exchangeStop or (exchangeMarket and (stopPrice is not None)))\n ioc = ((orderType == 'EXCHANGE IOC') or (timeInForce == 'IOC'))\n fok = ((orderType == 'EXCHANGE FOK') or (timeInForce == 'FOK'))\n postOnly = (postOnlyParam or (timeInForce == 'PO'))\n if (ioc or fok) and (price is None):\n raise InvalidOrder(self.id + ' createOrder() requires a price argument with IOC and FOK orders')\n if (ioc or fok) and exchangeMarket:\n raise InvalidOrder(self.id + ' createOrder() does not allow market IOC and FOK orders')\n if (orderType != 'MARKET') and (not exchangeMarket) and (not exchangeStop):\n request['price'] = self.price_to_precision(symbol, price)\n if stopLimit or stopMarket:\n # request['price'] is taken for stop orders\n request['price'] = self.price_to_precision(symbol, stopPrice)\n if stopMarket:\n request['type'] = 'EXCHANGE STOP'\n elif stopLimit:\n request['type'] = 'EXCHANGE STOP LIMIT'\n request['price_aux_limit'] = self.price_to_precision(symbol, price)\n if ioc:\n request['type'] = 'EXCHANGE IOC'\n elif fok:\n request['type'] = 'EXCHANGE FOK'\n # flag values may be summed to combine flags\n flags = 0\n if postOnly:\n flags = self.sum(flags, 4096)\n if reduceOnly:\n flags = self.sum(flags, 1024)\n if flags != 0:\n request['flags'] = flags\n if clientOrderId is not None:\n request['cid'] = clientOrderId\n params = self.omit(params, ['cid', 'clientOrderId'])\n response = await self.privatePostAuthWOrderSubmit(self.extend(request, params))\n #\n # [\n # 1653325121, # Timestamp in milliseconds\n # \"on-req\", # Purpose of notification('on-req', 'oc-req', 'uca', 'fon-req', 'foc-req')\n # null, # unique ID of the message\n # null,\n # [\n # [\n # 95412102131, # Order ID\n # null, # Group ID\n # 1653325121798, # Client Order ID\n # \"tDOGE:UST\", # Market ID\n # 1653325121798, # Millisecond timestamp of creation\n # 1653325121798, # Millisecond timestamp of update\n # -10, # Amount(Positive means buy, negative means sell)\n # -10, # Original amount\n # \"EXCHANGE LIMIT\", # Type of the order: LIMIT, EXCHANGE LIMIT, MARKET, EXCHANGE MARKET, STOP, EXCHANGE STOP, STOP LIMIT, EXCHANGE STOP LIMIT, TRAILING STOP, EXCHANGE TRAILING STOP, FOK, EXCHANGE FOK, IOC, EXCHANGE IOC.\n # null, # Previous order type(stop-limit orders are converted to limit orders so for them previous type is always STOP)\n # null, # Millisecond timestamp of Time-In-Force: automatic order cancellation\n # null, # _PLACEHOLDER\n # 4096, # Flags, see parseOrderFlags()\n # \"ACTIVE\", # Order Status, see parseOrderStatus()\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0.071, # Price(Stop Price for stop-limit orders, Limit Price for limit orders)\n # 0, # Average Price\n # 0, # Trailing Price\n # 0, # Auxiliary Limit price(for STOP LIMIT)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0, # Hidden(0 if False, 1 if True)\n # 0, # Placed ID(If another order caused self order to be placed(OCO) self will be that other order's ID)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # \"API>BFX\", # Routing, indicates origin of action: BFX, ETHFX, API>BFX, API>ETHFX\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # {\"$F7\":1} # additional meta information about the order( $F7 = IS_POST_ONLY(0 if False, 1 if True), $F33 = Leverage(int))\n # ]\n # ],\n # null, # CODE(work in progress)\n # \"SUCCESS\", # Status of the request\n # \"Submitting 1 orders.\" # Message\n # ]\n #\n status = self.safe_string(response, 6)\n if status != 'SUCCESS':\n errorCode = response[5]\n errorText = response[7]\n raise ExchangeError(self.id + ' ' + response[6] + ': ' + errorText + '(#' + errorCode + ')')\n orders = self.safe_value(response, 4, [])\n order = self.safe_value(orders, 0)\n return self.parse_order(order, market)",
"def slot_trade(self, dummy_sender, data):\r\n (dummy_date, price, volume, typ, own) = data\r\n if own:\r\n # nothing special to do here (yet), there will also be\r\n # separate user_order messages to update my owns list\r\n # and a copy of this trade message in the public channel\r\n pass\r\n else:\r\n # we update the orderbook. We could also wait for the depth\r\n # message but we update the orderbook immediately.\r\n voldiff = -volume\r\n if typ == \"bid\": # tryde_type=bid means an ask order was filled\r\n self._repair_crossed_asks(price)\r\n if len(self.asks):\r\n if self.asks[0].price == price:\r\n self.asks[0].volume -= volume\r\n if self.asks[0].volume <= 0:\r\n voldiff -= self.asks[0].volume\r\n self.asks.pop(0)\r\n self.last_change_type = \"ask\" #the asks have changed\r\n self.last_change_price = price\r\n self.last_change_volume = voldiff\r\n self._update_total_ask(voldiff)\r\n self._valid_ask_cache = -1\r\n if len(self.asks):\r\n self.ask = self.asks[0].price\r\n\r\n if typ == \"ask\": # trade_type=ask means a bid order was filled\r\n self._repair_crossed_bids(price)\r\n if len(self.bids):\r\n if self.bids[0].price == price:\r\n self.bids[0].volume -= volume\r\n if self.bids[0].volume <= 0:\r\n voldiff -= self.bids[0].volume\r\n self.bids.pop(0)\r\n self.last_change_type = \"bid\" #the bids have changed\r\n self.last_change_price = price\r\n self.last_change_volume = voldiff\r\n self._update_total_bid(voldiff, price)\r\n self._valid_bid_cache = -1\r\n if len(self.bids):\r\n self.bid = self.bids[0].price\r\n\r\n self.signal_changed(self, None)",
"def run(self):\n # client -> server\n self.client.send_message(\n new_order_message(self.client,\n symbol='abc',\n side='0',\n order_type='1',\n extra_tags=[(38, 100), # orderQty\n (44, 10), ])) # price\n\n # server <- client\n message = self.server.wait_for_message('waiting for new order')\n assert_is_not_none(message)\n\n # server -> client\n self.server.send_message(\n execution_report(self.server,\n message,\n exec_trans_type='0',\n exec_type='0',\n ord_status='0',\n symbol='abc',\n side='0',\n leaves_qty='100',\n cum_qty='0',\n avg_px='0'))\n\n # client <- server\n message = self.client.wait_for_message('waiting for new order ack')\n assert_is_not_none(message)",
"async def handle_cancel_order_response(self, response: RequesterResponse\n ) -> HitbtcOrderModel:",
"def notify_remote_orders(connection, orders):\r\n\r\n # deal with null orders (empty string)\r\n if orders == '':\r\n orders = 'null'\r\n \r\n # send orders\r\n try:\r\n connection['out'].sendall(orders.encode())\r\n except:\r\n raise IOError('remote player cannot be reached')",
"def execute_order(order: dict):\n\n # TODO: Check validity, e.g., against filters (min, max) and our own limits\n\n if App.config[\"trader\"][\"test_order_before_submit\"]:\n try:\n log.info(f\"Submitting test order: {order}\")\n test_response = App.client.create_test_order(**order) # Returns {} if ok. Does not check available balances - only trade rules\n except Exception as e:\n log.error(f\"Binance exception in 'create_test_order' {e}\")\n # TODO: Reset/resync whole account\n return\n\n if App.config[\"trader\"][\"simulate_order_execution\"]:\n # TODO: Simply store order so that later we can check conditions of its execution\n print(order)\n print(App.signal)\n pass\n else:\n # -----\n # Submit order\n try:\n log.info(f\"Submitting order: {order}\")\n order = App.client.create_order(**order)\n except Exception as e:\n log.error(f\"Binance exception in 'create_order' {e}\")\n return\n\n if not order or not order.get(\"status\"):\n return None\n\n return order"
] | [
"0.6762728",
"0.647902",
"0.6422314",
"0.63938046",
"0.6357052",
"0.63393205",
"0.6320198",
"0.62730086",
"0.61724967",
"0.6157658",
"0.61026967",
"0.6090159",
"0.6065455",
"0.6054261",
"0.60485345",
"0.60345185",
"0.6028087",
"0.59672505",
"0.59616095",
"0.5960345",
"0.5948733",
"0.5917711",
"0.591128",
"0.59111553",
"0.5885642",
"0.5883415",
"0.5878363",
"0.5875147",
"0.5869092",
"0.5857552"
] | 0.7360399 | 0 |
Move the market forward one tick (process next order) If the user has messages (new/cancel/modif) queued, it will decide whether to send a user or historical order based on their theoretical arrival time (timestamp) | def tick(self):
# next historical order to be sent
mktorder = self.hist_orders[self.mkt_idx+1]
# if I have queued orders
if self.my_queue:
# if my order reaches the market before the next historical order
if self.my_queue[0].timestamp < mktorder[self.col_idx['timestamp']]:
my_order = self.my_queue.popleft()
self._send_to_market(my_order, is_mine=True)
self.mkt_time = my_order[self.col_idx['timestamp']]
return
# otherwise sent next historical order
self._send_historical_order(mktorder) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def new_tick(self, msg):\n message_type = msg['type']\n if 'sequence' not in msg:\n if message_type == 'subscriptions':\n # request an order book snapshot after the websocket feed is established\n print('Coinbase Subscriptions successful for : %s' % self.sym)\n self.load_book()\n return True\n elif np.isnan(msg['sequence']):\n # this situation appears during data replays (and not in live data feeds)\n print('\\n%s found a nan in the sequence' % self.sym)\n return True\n\n # check the incoming message sequence to verify if there\n # is a dropped/missed message.\n # If so, request a new orderbook snapshot from Coinbase Pro.\n new_sequence = int(msg['sequence'])\n self.diff = new_sequence - self.sequence\n\n if self.diff == 1:\n # tick sequences increase by an increment of one\n self.sequence = new_sequence\n elif message_type in ['load_book', 'book_loaded', 'preload']:\n # message types used for data replays\n self.sequence = new_sequence\n elif self.diff <= 0:\n if message_type in ['received', 'open', 'done', 'match', 'change']:\n print('%s [%s] has a stale tick: current %i | incoming %i' % (\n self.sym, message_type, self.sequence, new_sequence))\n return True\n else:\n print('UNKNOWN-%s %s has a stale tick: current %i | incoming %i' % (\n self.sym, message_type, self.sequence, new_sequence))\n return True\n else: # when the tick sequence difference is greater than 1\n print('sequence gap: %s missing %i messages. new_sequence: %i [%s]\\n' %\n (self.sym, self.diff, new_sequence, message_type))\n self.sequence = new_sequence\n return False\n\n # persist data to Arctic Tick Store\n self.db.new_tick(msg) # make sure CONFIGS.RECORDING is false when replaying data\n\n side = msg['side']\n if message_type == 'received':\n return True\n\n elif message_type == 'open':\n if side == 'buy':\n self.bids.insert_order(msg)\n return True\n else:\n self.asks.insert_order(msg)\n return True\n\n elif message_type == 'done':\n if side == 'buy':\n self.bids.remove_order(msg)\n return True\n else:\n self.asks.remove_order(msg)\n return True\n\n elif message_type == 'match':\n trade_notional = float(msg['price']) * float(msg['size'])\n if side == 'buy': # trades matched on the bids book are considered sells\n self.trade_tracker['sells'] += trade_notional\n self.bids.match(msg)\n return True\n else: # trades matched on the asks book are considered buys\n self.trade_tracker['buys'] += trade_notional\n self.asks.match(msg)\n return True\n\n elif message_type == 'change':\n if side == 'buy':\n self.bids.change(msg)\n return True\n else:\n self.asks.change(msg)\n return True\n\n elif message_type == 'preload':\n if side == 'buy':\n self.bids.insert_order(msg)\n return True\n else:\n self.asks.insert_order(msg)\n return True\n\n elif message_type == 'load_book':\n self.clear_book()\n return True\n\n elif message_type == 'book_loaded':\n self.bids.warming_up = False\n self.asks.warming_up = False\n return True\n\n else:\n print('\\n\\n\\nunhandled message type\\n%s\\n\\n' % str(msg))\n return False",
"def receiveMessage(self, currentTime, msg):\n super().receiveMessage(currentTime, msg)\n if self.state == 'AWAITING_SPREAD' and msg.body['msg'] == 'QUERY_SPREAD':\n bid, _, ask, _ = self.getKnownBidAsk(self.symbol)\n if bid and ask:\n self.mid_list.append((bid + ask) / 2)\n if len(self.mid_list) > self.window1: self.avg_win1_list.append(pd.Series(self.mid_list).ewm(span=self.window1).mean().values[-1].round(2))\n if len(self.mid_list) > self.window2: self.avg_win2_list.append(pd.Series(self.mid_list).ewm(span=self.window2).mean().values[-1].round(2))\n if len(self.avg_win1_list) > 0 and len(self.avg_win2_list) > 0:\n if self.avg_win1_list[-1] >= self.avg_win2_list[-1]:\n # Check that we have enough cash to place the order\n if self.holdings['CASH'] >= (self.size * ask):\n self.placeLimitOrder(self.symbol, quantity=self.size, is_buy_order=True, limit_price=ask)\n else:\n if self.symbol in self.holdings and self.holdings[self.symbol] > 0:\n self.placeLimitOrder(self.symbol, quantity=self.size, is_buy_order=False, limit_price=bid)\n self.setWakeup(currentTime + self.getWakeFrequency())\n self.state = 'AWAITING_WAKEUP'",
"def message_event(self, market_processor, timestamp, message):\n lob = market_processor.current_lob\n if (isinstance(message, AddOrderMessage) or\n isinstance(message, AddOrderMPIDMessage)):\n # Detect if top of book is affected; if so record the message\n if message.bsindicator == b'B':\n if (lob is None or 0 == len(lob.bid_levels) or\n message.price >= lob.bid_levels[0].price):\n record = {\"MessageType\": \"Add\",\n \"Queue\": \"Bid\",\n \"Price\": message.price,\n \"Volume\": message.shares,\n \"OrderID\": message.orderRefNum}\n self.records.append((timestamp, record))\n elif message.bsindicator == b'S':\n if (lob is None or 0 == len(lob.ask_levels) or\n message.price <= lob.ask_levels[0].price):\n record = {\"MessageType\": \"Add\",\n \"Queue\": \"Ask\",\n \"Price\": message.price,\n \"Volume\": message.shares,\n \"OrderID\": message.orderRefNum}\n self.records.append((timestamp, record))\n elif isinstance(message, OrderExecutedMessage):\n # An executed order will ALWAYS be against top of book\n # because of price priority, so record.\n if lob.ask_order_on_book(message.orderRefNum):\n record = {\"MessageType\": \"Exec\",\n \"Volume\": message.shares,\n \"OrderID\": message.orderRefNum}\n record[\"Queue\"] = \"Ask\"\n record[\"Price\"] = lob.ask_levels[0].price\n self.records.append((timestamp, record))\n elif lob.bid_order_on_book(message.orderRefNum):\n record = {\"MessageType\": \"Exec\",\n \"Volume\": message.shares,\n \"OrderID\": message.orderRefNum}\n record[\"Queue\"] = \"Bid\"\n record[\"Price\"] = lob.bid_levels[0].price\n self.records.append((timestamp, record))\n elif isinstance(message, TradeMessage):\n if message.bsindicator == b'S':\n record = {\"MessageType\": \"ExecHid\", \"Volume\": message.shares, \"OrderID\":-1}\n record[\"Queue\"] = \"Ask\"\n record[\"Price\"] = message.price\n self.records.append((timestamp, record))\n elif message.bsindicator == b'B':\n record = {\"MessageType\": \"ExecHid\", \"Volume\": message.shares, \"OrderID\":-1}\n record[\"Queue\"] = \"Bid\"\n record[\"Price\"] = message.price\n self.records.append((timestamp, record))\n elif isinstance(message, OrderExecutedPriceMessage):\n if (len(lob.ask_levels) > 0 and\n lob.ask_levels[0].order_on_book(message.orderRefNum)):\n record = {\"MessageType\": \"ExecPrice\",\n \"Queue\": \"Ask\",\n \"Volume\": message.shares,\n \"OrderID\": message.orderRefNum,\n \"Price\": message.price}\n self.records.append((timestamp, record))\n elif (len(lob.bid_levels) > 0 and\n lob.bid_levels[0].order_on_book(message.orderRefNum)):\n record = {\"MessageType\": \"ExecPrice\",\n \"Queue\": \"Bid\",\n \"Volume\": message.shares,\n \"OrderID\": message.orderRefNum,\n \"Price\": message.price}\n self.records.append((timestamp, record))\n elif isinstance(message, OrderCancelMessage):\n if (len(lob.ask_levels) > 0 and\n lob.ask_levels[0].order_on_book(message.orderRefNum)):\n record = {\"MessageType\": \"Cancel\",\n \"Queue\": \"Ask\",\n \"Volume\": message.cancelShares,\n \"OrderID\": message.orderRefNum,\n \"Price\": lob.ask_levels[0].price}\n self.records.append((timestamp, record))\n elif (len(lob.bid_levels) > 0 and\n lob.bid_levels[0].order_on_book(message.orderRefNum)):\n record = {\"MessageType\": \"Cancel\",\n \"Queue\": \"Bid\",\n \"Volume\": message.cancelShares,\n \"OrderID\": message.orderRefNum,\n \"Price\": lob.bid_levels[0].price}\n self.records.append((timestamp, record))\n elif isinstance(message, OrderDeleteMessage):\n if (len(lob.ask_levels) > 0 and\n lob.ask_levels[0].order_on_book(message.orderRefNum)):\n volume = lob.ask_levels[0].queue[\n lob.ask_levels[0].find_order_on_book(\n message.orderRefNum)].volume\n record = {\"MessageType\": \"Delete\",\n \"Queue\": \"Ask\",\n \"Volume\": volume,\n \"OrderID\": message.orderRefNum,\n \"Price\": lob.ask_levels[0].price}\n self.records.append((timestamp, record))\n elif (len(lob.bid_levels) > 0 and\n lob.bid_levels[0].order_on_book(message.orderRefNum)):\n volume = lob.bid_levels[0].queue[\n lob.bid_levels[0].find_order_on_book(\n message.orderRefNum)].volume\n record = {\"MessageType\": \"Delete\",\n \"Queue\": \"Bid\",\n \"Volume\": volume,\n \"OrderID\": message.orderRefNum,\n \"Price\": lob.bid_levels[0].price}\n self.records.append((timestamp, record))\n elif isinstance(message, OrderReplaceMessage):\n if lob.ask_order_on_book(message.origOrderRefNum): #change to the top at same price\n if (lob.ask_levels[0].order_on_book(message.origOrderRefNum) and message.price==lob.ask_levels[0].price):\n (queue, i, j)=lob.find_order(message.origOrderRefNum,0)\n old_volume=queue[i].volume()\n new_shares=message.shares-old_volume\n record = {\"MessageType\": \"Replace\", \"Queue\": \"Ask\", \"Volume\": new_shares, \"OrderID\": message.newOrderRefNum,\n \"Price\": lob.ask_levels[0].price}\n self.records.append((timestamp, record))\n elif (lob.ask_levels[0].order_on_book(message.origOrderRefNum) and lob.ask_levels[0].price<message.price):#replace of a top order for an inferior order\n record = {\"MessageType\": \"Replace\",\n \"Queue\": \"Ask\",\n \"Volume\": message.shares*-1, \"OrderID\": message.newOrderRefNum,\"Price\": lob.ask_levels[0].price}\n self.records.append((timestamp, record))\n elif message.price<lob.ask_levels[0].price:\n record = {\"MessageType\": \"Replace\",\n \"Queue\": \"Ask\",\n \"Volume\": message.shares, \"OrderID\": message.newOrderRefNum, \"Price\": lob.ask_levels[0].price}\n self.records.append((timestamp, record))\n elif (message.price==lob.ask_levels[0].price and lob.ask_levels[0].order_on_book(message.origOrderRefNum)==False): #Improvement over old order\n record = {\"MessageType\": \"Replace\",\n \"Queue\": \"Ask\",\n \"Volume\": message.shares, \"OrderID\": message.newOrderRefNum,\n \"Price\": lob.ask_levels[0].price}\n self.records.append((timestamp, record))\n if lob.bid_order_on_book(message.origOrderRefNum):\n if (lob.bid_levels[0].order_on_book(message.origOrderRefNum) and message.price==lob.bid_levels[0].price):\n (queue, i, j)=lob.find_order(message.origOrderRefNum,1)\n old_volume=queue[i].volume()\n new_shares=message.shares-old_volume\n record = {\"MessageType\": \"Replace\", \"Queue\": \"Bid\", \"Volume\": new_shares, \"OrderID\": message.newOrderRefNum,\n \"Price\": lob.bid_levels[0].price}\n self.records.append((timestamp, record))\n elif (lob.bid_levels[0].order_on_book(message.origOrderRefNum) and lob.bid_levels[0].price>message.price): #replace of a top order for an inferior order\n record = {\"MessageType\": \"Replace\",\n \"Queue\": \"Bid\",\n \"Volume\": message.shares*-1, \"OrderID\": message.newOrderRefNum,\n \"Price\": lob.bid_levels[0].price}\n self.records.append((timestamp, record))\n elif message.price>lob.bid_levels[0].price: #Improvement of a top of the order_id\n record = {\"MessageType\": \"Replace\",\n \"Queue\": \"Bid\", \"Volume\": message.shares,\"OrderID\": message.newOrderRefNum,\n \"Price\": lob.bid_levels[0].price}\n self.records.append((timestamp, record))\n elif (message.price==lob.bid_levels[0].price and lob.bid_levels[0].order_on_book(message.origOrderRefNum)==False): #Improvement over old order\n record = {\"MessageType\": \"Replace\",\n \"Queue\": \"Bid\", \"Volume\": message.shares, \"OrderID\": message.newOrderRefNum, \"Price\": lob.bid_levels[0].price}\n self.records.append((timestamp, record))",
"def sellAtMarketOpen(self):\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n day = dt_central.strftime(\"%a\")\n\n tm = dt_central.strftime(\"%H:%M:%S\")\n\n weekdays = [\"Sat\", \"Sun\"]\n\n # CHECK IF MARKET OPEN AND NOT WEEKEND\n if tm == \"08:30\" and day not in weekdays:\n\n queue_orders = self.mongo.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Account_ID\": self.account_id, \"Order_Type\" : \"SELL\"})\n\n for order in queue_orders:\n\n # CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(order[\"Order_ID\"])\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n trade_data = {\n \"Symbol\": order[\"Symbol\"],\n \"Side\": \"SELL\",\n \"Aggregation\": order[\"Aggregation\"],\n \"Strategy\": order[\"Strategy\"],\n \"Asset_Type\": order[\"Asset_Type\"],\n \"Account_ID\": self.account_id\n }\n\n # SELL MARKET ORDER\n self.placeOrder(trade_data, order, orderType=\"MARKET\")",
"def on_market(self, oid, body):\n\t\tif body['freq'] != self.freq: return\n\n\t\tticks = body['ticks']\n\t\tself._update_data(ticks)\n\n\t\tif self.t >= self.warmup:\n\t\t\tself._calculate_signals()\n\n\t\t\t# publish generated signals\n\t\t\tequity = self.total_bp\n\t\t\tbp = copy(self.avaliable_bp) # current snap_shot of buying power\n\t\t\tfor S, pos in self.pos.items():\n\t\t\t\tfor order, lvl in pos.generate_orders(equity):\n\t\t\t\t\tused_bp = self.on_order(order, lvl, bp)\n\t\t\t\t\tbp -= used_bp\n\t\t\t\t\n\t\t\t# save old strategy performance history\n\t\t\tself._pbar.update(1)\n\t\t\n\t\t# if ticks.timestamp >= self.start_dt:\n\t\t\t# self.basic_publish('next', sender=self.id)\n\n\t\tif self.t >= self.warmup:\n\t\t\tself._save_positions()",
"def _send_to_market(self, order, is_mine):\n \n \n ord_type = order[self.col_idx['ordtype']]\n if ord_type == \"new\":\n self.mkt.send(is_buy=order[self.col_idx['is_buy']],\n qty=order[self.col_idx['qty']],\n price=order[self.col_idx['price']],\n uid=order[self.col_idx['uid']],\n is_mine=is_mine,\n timestamp=order[self.col_idx['timestamp']])\n elif ord_type == \"cancel\":\n self.mkt.cancel(uid=order[self.col_idx['uid']])\n elif ord_type == \"modif\":\n self.mkt.modif(uid=order[self.col_idx['uid']], \n new_qty=order[self.col_idx['qty']])\n else:\n raise ValueError(f'Unexpected ordtype: {ord_type}')",
"def execute_order(self, event):\n if isinstance(event, OrderEvent):\n signal = FillEvent(\n event.symbol,\n date.today(),\n event.quantity,\n event.direction,\n None\n )\n self.event_queue.put(signal)",
"def killQueueOrder(self):\n # CHECK ALL QUEUE ORDERS AND CANCEL ORDER IF GREATER THAN TWO HOURS OLD\n queue_orders = self.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Account_ID\": self.account_id})\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n two_hours_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(hours=2), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n ten_minutes_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(minutes=10), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n for order in queue_orders:\n\n order_date = order[\"Date\"]\n\n order_type = order[\"Order_Type\"]\n\n id = order[\"Order_ID\"]\n\n forbidden = [\"REJECTED\", \"CANCELED\", \"FILLED\"]\n\n if two_hours_ago > order_date and (order_type == \"BUY\" or order_type == \"BUY_TO_OPEN\") and id != None and order[\"Order_Status\"] not in forbidden:\n\n # FIRST CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(id)\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n other = {\n \"Symbol\": order[\"Symbol\"],\n \"Order_Type\": order[\"Order_Type\"],\n \"Order_Status\": \"CANCELED\",\n \"Strategy\": order[\"Strategy\"],\n \"Account_ID\": self.account_id,\n \"Trader\": self.user[\"Name\"],\n \"Date\": getDatetime()\n }\n\n self.other.insert_one(other)\n\n self.queue.delete_one(\n {\"Trader\": self.user[\"Name\"], \"Symbol\": order[\"Symbol\"], \"Strategy\": order[\"Strategy\"]})\n\n self.logger.INFO(\n f\"CANCELED ORDER FOR {order['Symbol']} - TRADER: {self.user['Name']}\", True)\n\n # IF QUEUE ORDER DATE GREATER THAN 10 MINUTES OLD AND ORDER ID EQUALS NONE, SEND ALERT\n if ten_minutes_ago > order_date and order[\"Order_ID\"] == None and order[\"Account_ID\"] == self.account_id:\n\n if order[\"Symbol\"] not in self.no_ids_list:\n\n self.logger.ERROR(\n \"QUEUE ORDER ID ERROR\", f\"ORDER ID FOR {order['Symbol']} NOT FOUND - TRADER: {self.user['Name']} - ACCOUNT ID: {self.account_id}\")\n\n self.no_ids_list.append(order[\"Symbol\"])\n\n else:\n\n if order[\"Symbol\"] in self.no_ids_list:\n\n self.no_ids_list.remove(order[\"Symbol\"])",
"def _simulate_market_order_execution() -> None:\n if jh.is_backtesting() or jh.is_unit_testing() or jh.is_paper_trading():\n store.orders.execute_pending_market_orders()",
"def on_next_command(self, event):\n self.pre_check(event)\n if not self.get_player(event.guild.id).queue:\n return event.channel.send_message(\"There aren't any songs queued.\")\n ytdata = self.get_ytdl_values(\n self.get_player(event.guild.id).queue[0].metadata,\n )\n event.channel.send_message(\n \"Next in queue is ``{}`` by ``{}`` with length ``{}`` minutes using ``{}``.\".format(\n ytdata[\"title\"],\n ytdata[\"uploader\"],\n ytdata[\"time_formated\"],\n ytdata[\"source\"],\n ),\n )",
"def send(self):\n \n # Check that we have something to send\n if len(self.items) > 0:\n \n # If no items 'sent' or 'playing', send next item in queue\n sent_items = [item for item in self.playlist_store.find({'status':'sent'})]\n playing_items = [item for item in self.playlist_store.find({'status':'playing'})]\n \n # Look for any expired items in playing\n expired = False\n for item in playing_items:\n end_date = item['start_date'] + datetime.timedelta(seconds=item['track']['track']['length'])\n expired = expired or end_date < datetime.datetime.now()\n \n # Assume we send nothing\n send_item = False\n # Conditions under which we send...\n # 1. Nothing sent, and nothing playing\n send_item = send_item or (len(sent_items) == 0 and len(playing_items) == 0)\n # 2. Nothing sent, and something expired marked as playing\n send_item = send_item or (len(sent_items) == 0 and len(playing_items) > 0 and expired)\n \n if send_item:\n \n # Send next item in queue\n self.current_item = self.items.pop(0)\n print \" [x] Sending %r\" % (self.current_item['track']['track']['name'],)\n \n # Send using the broadcast exchange (Pub/Sub)\n self.amqp_primary_channel.basic_publish(exchange=self.amqp_broadcast_exchange,\n routing_key='',\n body=json.dumps({'_id': str(self.current_item['_id']),\n 'track': self.current_item['track'],\n 'from': self.current_item['from']}),\n properties=pika.BasicProperties(\n content_type=\"application/json\",\n delivery_mode=2))\n \n # Mark item as sent\n self.current_item['status'] = 'sent'\n self.playlist_store.update({'_id': self.current_item['_id']}, self.current_item)\n \n elif len(sent_items) == 0 and len(playing_items) > 0 and not expired:\n # TODO\n # If something playing and nothing sent, set up timer\n # timer = Timer(self.current_item['track']['track']['length'], self.next)\n # timer.start()\n pass",
"def on_order(self, order: OrderData):\n\n if order.vt_orderid not in (self.short_orders + self.long_orders):\n return\n\n self.pos_calculator.update_position(order)\n\n self.current_pos = self.pos_calculator.pos\n self.avg_price = self.pos_calculator.avg_price\n\n if order.status == Status.ALLTRADED:\n\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n self.trade_count += 1\n\n short_price = order.price + self.step_price\n if short_price <= self.high_price:\n orders = self.short(short_price, self.order_volume)\n self.short_orders.extend(orders)\n\n if len(self.long_orders) < self.max_open_orders:\n long_price = order.price - self.step_price * self.max_open_orders\n if long_price >= self.low_price:\n orders = self.buy(long_price, self.order_volume)\n self.long_orders.extend(orders)\n\n if order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n self.trade_count += 1\n long_price = order.price - self.step_price\n if long_price >= self.low_price:\n orders = self.buy(long_price, self.order_volume)\n self.long_orders.extend(orders)\n\n if len(self.short_orders) < self.max_open_orders:\n short_price = order.price + self.step_price * self.max_open_orders\n if short_price <= self.high_price:\n orders = self.short(short_price, self.order_volume)\n self.short_orders.extend(orders)\n\n if not order.is_active():\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n self.put_event()",
"def run(self):\r\n db_conn = order_db_connector()\r\n db_conn.process_order(self.id)\r\n schedule.every(self.order_max_lifetime).seconds.do(self.order_finish).tag(self.id)\r\n #schedule.every(5).seconds.do(self.trading_main).tag(f'{self.id}_main',self.id)\r\n self.trading_main()\r\n logger.info(\"ENDED trading main\")\r\n \"\"\"\r\n Add order status \r\n \"\"\"\r\n\r\n #Clear scheduled task to avoid task stacking in scheduler\r",
"def bqm_move_queue(self):\n self.bqm.turn_once()",
"def new_order(self, signal, type):\n # self.client = bitmex.bitmex(test=True, api_key=self.strategy.api_key.key, api_secret=self.strategy.api_key.secret)\n if not self.strategy.live_trade:\n self.logger.info('Notice: Trading on testnet.')\n if self.scrape_only:\n return\n self.update_position()\n self.logger.info('New Order {} {}'.format(signal, type))\n self.logger.info(\"Current Position: {}\".format(self.p))\n self.logger.info(\"Canceling all orders\")\n self.client.Order.Order_cancelAll(symbol=self.strategy.symbol).result()\n self.trigers = []\n\n if type == 'entry' and signal == 'LONG' and self.p == 0:\n\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n oq = self.strategy.contract_size\n ot = self.strategy.order_type\n try:\n self.logger.info(\"Placing LONG entry Order of {}\".format(oq))\n order = self.execute_order(oq, ot, text=\"{} {}_{}\".format(self.strategy.id, signal, type))\n if self.strategy.stop_loss:\n triger = {\n \"side\": -1,\n \"price\": order['price'] - self.strategy.stop_loss,\n \"type\": 'sl'\n }\n self.trigers.append(triger)\n self.logger.info('Stop loss trigger placed at {}'.format(triger['price']))\n if self.strategy.take_profit:\n triger = {\n 'side': -1,\n \"price\": order['price'] + self.strategy.take_profit,\n \"type\": 'tp'\n }\n self.trigers.append(triger)\n self.logger.info('Take Profit trigger placed at {}'.format(triger['price']))\n if self.strategy.trailing_stop:\n triger = {\n 'side': -1,\n \"price\": order['price'] + self.strategy.trailing_stop,\n 'type': 'ts'\n }\n self.trigers.append(triger)\n self.logger.info('Trailing Stop trigger placed at {}'.format(triger['price']))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))\n\n if type == 'entry' and signal == 'SHORT' and self.p == 0:\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n oq = self.strategy.contract_size * -1\n ot = self.strategy.order_type\n try:\n self.logger.info(\"Placing Short entry Order of {}\".format(oq))\n order = self.execute_order(oq, ot, text=\"{} {}_{}\".format(self.strategy.id, signal, type))\n if self.strategy.stop_loss:\n triger = {\n \"side\": 1,\n \"price\": order['price'] + self.strategy.stop_loss,\n \"type\": 'sl'\n }\n self.trigers.append(triger)\n self.logger.info('Stop loss trigger placed at {}'.format(triger['price']))\n if self.strategy.take_profit:\n triger = {\n 'side': 1,\n \"price\": order['price'] - self.strategy.take_profit,\n \"type\": 'tp'\n }\n self.trigers.append(triger)\n self.logger.info('Take profit trigger placed at {}'.format(triger['price']))\n if self.strategy.trailing_stop:\n triger = {\n 'side': 1,\n \"price\": order['price'] - self.strategy.trailing_stop,\n 'type': 'ts'\n }\n self.trigers.append(triger)\n self.logger.info('Trailing Stop trigger placed at {}'.format(triger['price']))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))\n\n if type == 'entry' and signal == 'LONG' and self.p < 0:\n\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n p = self.p * -1\n oq = p + self.strategy.contract_size\n ot = self.strategy.order_type\n try:\n self.logger.info(\"Placing LONG entry and Short Exit Order of {}\".format(oq))\n order = self.execute_order(oq, ot, text=\"{} {}_{}-{}_{}\".format(self.strategy.id, signal, type, \"SHORT\",\n \"exit\"))\n if self.strategy.stop_loss:\n triger = {\n \"side\": -1,\n \"price\": order['price'] - self.strategy.stop_loss,\n \"type\": 'sl'\n }\n self.trigers.append(triger)\n self.logger.info('Stop loss triger placed at {}'.format(triger['price']))\n if self.strategy.take_profit:\n triger = {\n 'side': -1,\n \"price\": order['price'] + self.strategy.take_profit,\n \"type\": 'tp'\n }\n self.trigers.append(triger)\n self.logger.info('Take Profit triger placed at {}'.format(triger['price']))\n if self.strategy.trailing_stop:\n triger = {\n 'side': -1,\n \"price\": order['price'] + self.strategy.trailing_stop,\n 'type': 'ts'\n }\n self.trigers.append(triger)\n self.logger.info('Trailing Stop trigger placed at {}'.format(triger['price']))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))\n\n if type == 'entry' and signal == 'SHORT' and self.p > 0:\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n oq = -(self.p + self.strategy.contract_size)\n ot = self.strategy.order_type\n\n try:\n self.logger.info(\"Placing Short entry and Long Exit Order of {}\".format(oq))\n order = self.execute_order(oq, ot,\n text=\"{} {}_{}-{}_{}\".format(self.strategy.id, signal, type, \"LONG\", \"exit\"))\n if self.strategy.stop_loss:\n triger = {\n \"side\": 1,\n \"price\": order['price'] + self.strategy.stop_loss,\n \"type\": 'sl'\n }\n self.trigers.append(triger)\n self.logger.info('Stop loss triger placed at {}'.format(triger['price']))\n if self.strategy.take_profit:\n triger = {\n 'side': 1,\n \"price\": order['price'] - self.strategy.take_profit,\n \"type\": 'tp'\n }\n self.trigers.append(triger)\n self.logger.info('Take Profit triger placed at {}'.format(triger['price']))\n if self.strategy.trailing_stop:\n triger = {\n 'side': 1,\n \"price\": order['price'] - self.strategy.trailing_stop,\n 'type': 'ts'\n }\n self.trigers.append(triger)\n self.logger.info('Trailing Stop trigger placed at {}'.format(triger['price']))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))\n\n if type == 'exit' and signal == 'LONG' and self.p > 0:\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n oq = -(self.p)\n try:\n self.logger.info(\"Placing Long Exit Order of {}\".format(oq))\n self.execute_order(oq, self.strategy.order_type, text=\"{} {}_{}\".format(self.strategy.id, signal, type))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))\n\n if type == 'exit' and signal == 'SHORT' and self.p < 0:\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n oq = -(self.p)\n try:\n self.logger.info(\"Placing Shot Exit Order of {}\".format(oq))\n self.execute_order(oq, self.strategy.order_type, text=\"{} {}_{}\".format(self.strategy.id, signal, type))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))",
"def killQueueOrder(self):\n # CHECK ALL QUEUE ORDERS AND CANCEL ORDER IF GREATER THAN TWO HOURS OLD\n queue_orders = self.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n two_hours_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(hours=2), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n ten_minutes_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(minutes=10), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n for order in queue_orders:\n\n order_date = order[\"Date\"]\n\n order_type = order[\"Order_Type\"]\n\n id = order[\"Order_ID\"]\n\n forbidden = [\"REJECTED\", \"CANCELED\", \"FILLED\"]\n\n if two_hours_ago > order_date and (order_type == \"BUY\" or order_type == \"BUY_TO_OPEN\") and id != None and order[\"Order_Status\"] not in forbidden:\n\n # FIRST CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(id)\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n other = {\n \"Symbol\": order[\"Symbol\"],\n \"Order_Type\": order[\"Order_Type\"],\n \"Order_Status\": \"CANCELED\",\n \"Strategy\": order[\"Strategy\"],\n \"Account_ID\": self.account_id,\n \"Aggregation\": order[\"Aggregation\"],\n \"Trader\": self.user[\"Name\"],\n \"Date\": getDatetime()\n }\n\n if self.asset_type == \"OPTION\":\n\n other[\"Pre_Symbol\"] = order[\"Pre_Symbol\"]\n\n other[\"Exp_Date\"] = order[\"Exp_Date\"]\n\n self.other.insert_one(other)\n\n self.queue.delete_one(\n {\"Trader\": self.user[\"Name\"], \"Symbol\": order[\"Symbol\"], \"Strategy\": order[\"Strategy\"], \"Asset_Type\": self.asset_type})\n\n self.logger.INFO(\n f\"CANCELED ORDER FOR {order['Symbol']} - TRADER: {self.user['Name']}\", True)\n\n # IF QUEUE ORDER DATE GREATER THAN 10 MINUTES OLD AND ORDER ID EQUALS NONE, SEND ALERT\n if ten_minutes_ago > order_date and order[\"Order_ID\"] == None and order[\"Account_ID\"] == self.account_id:\n\n if order[\"Symbol\"] not in self.no_ids_list:\n\n self.logger.ERROR(\n \"QUEUE ORDER ID ERROR\", f\"ORDER ID FOR {order['Symbol']} NOT FOUND - TRADER: {self.user['Name']} - ACCOUNT ID: {self.account_id}\")\n\n self.no_ids_list.append(order[\"Symbol\"])\n\n else:\n\n if order[\"Symbol\"] in self.no_ids_list:\n\n self.no_ids_list.remove(order[\"Symbol\"])",
"def queue_my_modif(self, uid, new_qty):\n \n message = self.OrdTuple(ordtype=\"modif\",\n uid=uid,\n is_buy=np.nan,\n qty=new_qty,\n price=np.nan, \n timestamp=self._arrival_time()) \n self.my_queue.append(message)",
"def reveille(self):\n now = datetime.datetime.now()\n # TODO: this logic can be optimized if our queue has a peek() method\n while self._queue.size() > 0:\n item = self._queue.get_tuple()\n if item[1] <= now:\n self._callback(item[0])\n else:\n # put the item back & terminate iteration\n self._queue.put(item[0], item[1])\n break",
"def queue_my_new(self, is_buy, qty, price): \n \n self.my_last_uid -= 1 \n message = self.OrdTuple(ordtype=\"new\",\n uid=self.my_last_uid,\n is_buy=is_buy,\n qty=qty,\n price=price, \n timestamp=self._arrival_time()) \n self.my_queue.append(message) \n return self.my_last_uid",
"def on_queue_next_command(self, event, index):\n self.pre_check(event)\n self.same_channel_check(event)\n if 1 < index <= len(self.get_player(event.guild.id).queue):\n index -= 1\n self.get_player(event.guild.id).queue.insert(\n 0,\n self.get_player(event.guild.id).queue.pop(index),\n )\n ytdata = self.get_ytdl_values(\n self.get_player(event.guild.id).queue[0].metadata,\n )\n api_loop(\n event.channel.send_message,\n \"Moved ``{}`` to the front of the queue.\".format(\n ytdata[\"title\"],\n ytdata[\"uploader\"],\n ytdata[\"time_formated\"],\n ytdata[\"source\"],\n ),\n )\n else:\n api_loop(event.channel.send_message, \"Invalid index input.\")",
"def slot_trade(self, dummy_sender, data):\r\n (dummy_date, price, volume, typ, own) = data\r\n if own:\r\n # nothing special to do here (yet), there will also be\r\n # separate user_order messages to update my owns list\r\n # and a copy of this trade message in the public channel\r\n pass\r\n else:\r\n # we update the orderbook. We could also wait for the depth\r\n # message but we update the orderbook immediately.\r\n voldiff = -volume\r\n if typ == \"bid\": # tryde_type=bid means an ask order was filled\r\n self._repair_crossed_asks(price)\r\n if len(self.asks):\r\n if self.asks[0].price == price:\r\n self.asks[0].volume -= volume\r\n if self.asks[0].volume <= 0:\r\n voldiff -= self.asks[0].volume\r\n self.asks.pop(0)\r\n self.last_change_type = \"ask\" #the asks have changed\r\n self.last_change_price = price\r\n self.last_change_volume = voldiff\r\n self._update_total_ask(voldiff)\r\n self._valid_ask_cache = -1\r\n if len(self.asks):\r\n self.ask = self.asks[0].price\r\n\r\n if typ == \"ask\": # trade_type=ask means a bid order was filled\r\n self._repair_crossed_bids(price)\r\n if len(self.bids):\r\n if self.bids[0].price == price:\r\n self.bids[0].volume -= volume\r\n if self.bids[0].volume <= 0:\r\n voldiff -= self.bids[0].volume\r\n self.bids.pop(0)\r\n self.last_change_type = \"bid\" #the bids have changed\r\n self.last_change_price = price\r\n self.last_change_volume = voldiff\r\n self._update_total_bid(voldiff, price)\r\n self._valid_bid_cache = -1\r\n if len(self.bids):\r\n self.bid = self.bids[0].price\r\n\r\n self.signal_changed(self, None)",
"def slot_user_order(self, dummy_sender, data):\r\n (price, volume, typ, oid, status) = data\r\n found = False\r\n removed = False # was the order removed?\r\n opened = False # did the order change from 'post-pending' to 'open'\"?\r\n voldiff = 0 # did the order volume change (full or partial fill)\r\n if \"executing\" in status:\r\n # don't need this status at all\r\n return\r\n if \"post-pending\" in status:\r\n # don't need this status at all\r\n return\r\n if \"removed\" in status:\r\n for i in range(len(self.owns)):\r\n if self.owns[i].oid == oid:\r\n order = self.owns[i]\r\n\r\n # work around MtGox strangeness:\r\n # for some reason it will send a \"completed_passive\"\r\n # immediately followed by a \"completed_active\" when a\r\n # market order is filled and removed. Since \"completed_passive\"\r\n # is meant for limit orders only we will just completely\r\n # IGNORE all \"completed_passive\" if it affects a market order,\r\n # there WILL follow a \"completed_active\" immediately after.\r\n if order.price == 0:\r\n if \"passive\" in status:\r\n # ignore it, the correct one with\r\n # \"active\" will follow soon\r\n return\r\n\r\n self.debug(\r\n \"### removing order %s \" % oid,\r\n \"price:\", self.gox.quote2str(order.price),\r\n \"type:\", order.typ)\r\n\r\n # remove it from owns...\r\n self.owns.pop(i)\r\n\r\n # ...and update own volume cache in the bids or asks\r\n self._update_level_own_volume(\r\n order.typ,\r\n order.price,\r\n self.get_own_volume_at(order.price, order.typ)\r\n )\r\n removed = True\r\n break\r\n else:\r\n for order in self.owns:\r\n if order.oid == oid:\r\n found = True\r\n self.debug(\r\n \"### updating order %s \" % oid,\r\n \"volume:\", self.gox.base2str(volume),\r\n \"status:\", status)\r\n voldiff = volume - order.volume\r\n opened = (order.status != \"open\" and status == \"open\")\r\n order.volume = volume\r\n order.status = status\r\n break\r\n\r\n if not found:\r\n # This can happen if we added the order with a different\r\n # application or the gox server sent the user_order message\r\n # before the reply to \"order/add\" (this can happen because\r\n # actually there is no guarantee which one arrives first).\r\n # We will treat this like a reply to \"order/add\"\r\n self.add_own(Order(price, volume, typ, oid, status))\r\n\r\n # The add_own() method has handled everything that was needed\r\n # for new orders and also emitted all signals already, we\r\n # can immediately return here because the job is done.\r\n return\r\n\r\n # update level own volume cache\r\n self._update_level_own_volume(\r\n typ, price, self.get_own_volume_at(price, typ))\r\n\r\n # We try to help the strategy with tracking the orders as good\r\n # as we can by sending different signals for different events.\r\n if removed:\r\n reason = self.gox.msg[\"user_order\"][\"reason\"]\r\n self.signal_own_removed(self, (order, reason))\r\n if opened:\r\n self.signal_own_opened(self, (order))\r\n if voldiff:\r\n self.signal_own_volume(self, (order, voldiff))\r\n self.signal_changed(self, None)\r\n self.signal_owns_changed(self, None)",
"async def _trade(self, msg: dict, timestamp: float):\n for data in msg['data']:\n ts = self.timestamp_normalize(data['timestamp'])\n await self.callback(TRADES, feed=self.id,\n symbol=self.exchange_symbol_to_std_symbol(data['symbol']),\n side=BUY if data['side'] == 'Buy' else SELL,\n amount=Decimal(data['size']),\n price=Decimal(data['price']),\n order_id=data['trdMatchID'],\n timestamp=ts,\n receipt_timestamp=timestamp)",
"def transmittingState(self, tick):\n assert (self.mState == self.States.Transmitting)\n assert (len(self.mMessageQueue) > 0)\n ResultsSingleton.getInstance().recordMessageSent()\n\n \"\"\" TODO(Colin): Pop or popleft? \"\"\"\n self.mCurrentMessage = self.mMessageQueue.pop()\n #print(\"CURRENT MESSAGE\")\n #print(self.mCurrentMessage)\n self.mBus.startBroadcast(self.mCurrentMessage)\n self.mNextTickForTransmissionCompletion = tick + self.mCurrentMessage.getTicksToFullyTransmit(self.mTransmissionRate)\n self.mState = self.States.TransmittingWaiting",
"def notify_order(self, order):\n if order.status in [order.Submitted, order.Accepted]:\n return # active buy/sell order submitted/accepted - do nothing\n\n # check if order has been completed (could reject if not enough cash)\n if order.status in [order.Completed]:\n if order.isbuy():\n self.log(f'BUY EXECUTED, {order.executed.price:.2f}')\n elif order.issell():\n self.log(f'SELL EXECUTED, {order.executed.price:.2f}')\n elif order.status in [order.Canceled, order.Margin, order.Rejected]:\n self.log('Order Canceled/Margin/Rejected')\n\n self.bar_executed = len(self)\n\n self.order = None # reset orders",
"async def process(self, msg):\n logger.debug(\"msg:\", json.dumps(msg), caller=self)\n e = msg.get(\"e\")\n if e == \"ORDER_TRADE_UPDATE\": # Order update.\n self._update_order(msg[\"o\"])",
"async def new_limit_order(side):\n symbol = App.config[\"symbol\"]\n now_ts = now_timestamp()\n\n #\n # Find limit price (from signal, last kline and adjustment parameters)\n #\n last_kline = App.analyzer.get_last_kline(symbol)\n last_close_price = to_decimal(last_kline[4]) # Close price of kline has index 4 in the list\n if not last_close_price:\n log.error(f\"Cannot determine last close price in order to create a market buy order.\")\n return None\n\n price_adjustment = App.config[\"trader\"][\"limit_price_adjustment\"]\n if side == SIDE_BUY:\n price = last_close_price * Decimal(1.0 - price_adjustment) # Adjust price slightly lower\n elif side == SIDE_SELL:\n price = last_close_price * Decimal(1.0 + price_adjustment) # Adjust price slightly higher\n\n price_str = round_str(price, 2)\n price = Decimal(price_str) # We will use the adjusted price for computing quantity\n\n #\n # Find quantity\n #\n if side == SIDE_BUY:\n # Find how much quantity we can buy for all available USD using the computed price\n quantity = App.quote_quantity # USD\n percentage_used_for_trade = App.config[\"trader\"][\"percentage_used_for_trade\"]\n quantity = (quantity * percentage_used_for_trade) / Decimal(100.0) # Available for trade\n quantity = quantity / price # BTC to buy\n # Alternatively, we can pass quoteOrderQty in USDT (how much I want to spend)\n elif side == SIDE_SELL:\n # All available BTCs\n quantity = App.base_quantity # BTC\n\n quantity_str = round_down_str(quantity, 6)\n\n #\n # Execute order\n #\n order_spec = dict(\n symbol=symbol,\n side=side,\n type=ORDER_TYPE_LIMIT, # Alternatively, ORDER_TYPE_LIMIT_MAKER\n timeInForce=TIME_IN_FORCE_GTC,\n quantity=quantity_str,\n price=price_str,\n )\n\n if App.config[\"trader\"][\"no_trades_only_data_processing\"]:\n print(f\"NOT executed order spec: {order_spec}\")\n else:\n order = execute_order(order_spec)\n\n #\n # Store/log order object in our records (only after confirmation of success)\n #\n App.order = order\n App.order_time = now_ts\n\n return order",
"def received_order(self, order):\n\t\tif order.direction == ORDERDIR.IN:\n\t\t\tself.set_button_light(order.floor, OUTPUT.IN_LIGHTS, 1)\n\t\telse:\n\t\t\tself.startedOrderQueue.put(order)\n\t\tself.orderQueue.add_order(order)\n\t\tself.update_and_send_elevator_info()\n\t\tself.should_drive()",
"def on_order(self, order: OrderData):\n self.position_calculator.update_position(order)\n\n self.current_pos = self.position_calculator.pos\n self.avg_price = self.position_calculator.avg_price\n\n if order.status == Status.ALLTRADED and order.vt_orderid in (self.long_orders + self.short_orders):\n\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n if order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n self.last_filled_order = order\n\n for ids in (self.long_orders + self.short_orders + self.profit_orders):\n self.cancel_order(ids)\n\n if abs(self.position_calculator.pos) < self.fixed_size:\n return\n\n step = self.get_step()\n\n # tick 存在且仓位数量还没有达到设置的最大值.\n if self.tick and abs(self.position_calculator.pos) < self.max_pos_size * self.fixed_size:\n buy_price = order.price - step * self.grid_step\n sell_price = order.price + step * self.grid_step\n\n buy_price = min(self.tick.bid_price_1 * (1 - 0.0001), buy_price)\n sell_price = max(self.tick.ask_price_1 * (1 + 0.0001), sell_price)\n\n long_ids = self.buy(buy_price, self.fixed_size)\n short_ids = self.sell(sell_price, self.fixed_size)\n\n self.long_orders.extend(long_ids)\n self.short_orders.extend(short_ids)\n\n if order.status == Status.ALLTRADED and order.vt_orderid in self.profit_orders:\n self.profit_orders.remove(order.vt_orderid)\n if abs(self.position_calculator.pos) < self.fixed_size:\n self.cancel_all()\n\n if not order.is_active():\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.profit_orders:\n self.profit_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.stop_orders:\n self.stop_orders.remove(order.vt_orderid)\n\n self.put_event()",
"def queue_my_cancel(self, uid):\n \n message = self.OrdTuple(ordtype=\"cancel\",\n uid=uid,\n is_buy=np.nan,\n qty=np.nan,\n price=np.nan, \n timestamp=self._arrival_time()) \n self.my_queue.append(message)"
] | [
"0.6234445",
"0.61741984",
"0.6084046",
"0.59530133",
"0.59332037",
"0.5807359",
"0.57960933",
"0.5781202",
"0.57786",
"0.57753426",
"0.5773166",
"0.57452565",
"0.5744463",
"0.56984305",
"0.5681479",
"0.5674377",
"0.56380785",
"0.5613147",
"0.56096065",
"0.55684096",
"0.55490786",
"0.55250573",
"0.5503959",
"0.54965705",
"0.54915005",
"0.5483302",
"0.5447842",
"0.54300255",
"0.5404483",
"0.53840786"
] | 0.79819286 | 0 |
Queue a user new order to be sent to the market when time is due | def queue_my_new(self, is_buy, qty, price):
self.my_last_uid -= 1
message = self.OrdTuple(ordtype="new",
uid=self.my_last_uid,
is_buy=is_buy,
qty=qty,
price=price,
timestamp=self._arrival_time())
self.my_queue.append(message)
return self.my_last_uid | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tick(self):\n \n # next historical order to be sent\n mktorder = self.hist_orders[self.mkt_idx+1]\n # if I have queued orders\n if self.my_queue:\n # if my order reaches the market before the next historical order\n if self.my_queue[0].timestamp < mktorder[self.col_idx['timestamp']]:\n my_order = self.my_queue.popleft()\n self._send_to_market(my_order, is_mine=True)\n self.mkt_time = my_order[self.col_idx['timestamp']]\n return\n \n # otherwise sent next historical order\n self._send_historical_order(mktorder)",
"def queue_my_cancel(self, uid):\n \n message = self.OrdTuple(ordtype=\"cancel\",\n uid=uid,\n is_buy=np.nan,\n qty=np.nan,\n price=np.nan, \n timestamp=self._arrival_time()) \n self.my_queue.append(message)",
"def queue_my_modif(self, uid, new_qty):\n \n message = self.OrdTuple(ordtype=\"modif\",\n uid=uid,\n is_buy=np.nan,\n qty=new_qty,\n price=np.nan, \n timestamp=self._arrival_time()) \n self.my_queue.append(message)",
"def order(self, typ, price, volume):\r\n self.count_submitted += 1\r\n self.client.send_order_add(typ, price, volume)",
"def execute_order(self, event):\n if isinstance(event, OrderEvent):\n signal = FillEvent(\n event.symbol,\n date.today(),\n event.quantity,\n event.direction,\n None\n )\n self.event_queue.put(signal)",
"def _add_user_to_queue(self, user):\n now = datetime.now(tz=pytz.utc)\n HatQueue.create(user_id=user, start_time=now)\n return HatQueue.select().where(HatQueue.end_time.is_null(True), HatQueue.start_time < now).count()",
"def run(self):\r\n db_conn = order_db_connector()\r\n db_conn.process_order(self.id)\r\n schedule.every(self.order_max_lifetime).seconds.do(self.order_finish).tag(self.id)\r\n #schedule.every(5).seconds.do(self.trading_main).tag(f'{self.id}_main',self.id)\r\n self.trading_main()\r\n logger.info(\"ENDED trading main\")\r\n \"\"\"\r\n Add order status \r\n \"\"\"\r\n\r\n #Clear scheduled task to avoid task stacking in scheduler\r",
"def notify_order(self, order):\n if order.status in [order.Submitted, order.Accepted]:\n return # active buy/sell order submitted/accepted - do nothing\n\n # check if order has been completed (could reject if not enough cash)\n if order.status in [order.Completed]:\n if order.isbuy():\n self.log(f'BUY EXECUTED, {order.executed.price:.2f}')\n elif order.issell():\n self.log(f'SELL EXECUTED, {order.executed.price:.2f}')\n elif order.status in [order.Canceled, order.Margin, order.Rejected]:\n self.log('Order Canceled/Margin/Rejected')\n\n self.bar_executed = len(self)\n\n self.order = None # reset orders",
"def sellAtMarketOpen(self):\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n day = dt_central.strftime(\"%a\")\n\n tm = dt_central.strftime(\"%H:%M:%S\")\n\n weekdays = [\"Sat\", \"Sun\"]\n\n # CHECK IF MARKET OPEN AND NOT WEEKEND\n if tm == \"08:30\" and day not in weekdays:\n\n queue_orders = self.mongo.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Account_ID\": self.account_id, \"Order_Type\" : \"SELL\"})\n\n for order in queue_orders:\n\n # CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(order[\"Order_ID\"])\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n trade_data = {\n \"Symbol\": order[\"Symbol\"],\n \"Side\": \"SELL\",\n \"Aggregation\": order[\"Aggregation\"],\n \"Strategy\": order[\"Strategy\"],\n \"Asset_Type\": order[\"Asset_Type\"],\n \"Account_ID\": self.account_id\n }\n\n # SELL MARKET ORDER\n self.placeOrder(trade_data, order, orderType=\"MARKET\")",
"def killQueueOrder(self):\n # CHECK ALL QUEUE ORDERS AND CANCEL ORDER IF GREATER THAN TWO HOURS OLD\n queue_orders = self.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Account_ID\": self.account_id})\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n two_hours_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(hours=2), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n ten_minutes_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(minutes=10), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n for order in queue_orders:\n\n order_date = order[\"Date\"]\n\n order_type = order[\"Order_Type\"]\n\n id = order[\"Order_ID\"]\n\n forbidden = [\"REJECTED\", \"CANCELED\", \"FILLED\"]\n\n if two_hours_ago > order_date and (order_type == \"BUY\" or order_type == \"BUY_TO_OPEN\") and id != None and order[\"Order_Status\"] not in forbidden:\n\n # FIRST CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(id)\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n other = {\n \"Symbol\": order[\"Symbol\"],\n \"Order_Type\": order[\"Order_Type\"],\n \"Order_Status\": \"CANCELED\",\n \"Strategy\": order[\"Strategy\"],\n \"Account_ID\": self.account_id,\n \"Trader\": self.user[\"Name\"],\n \"Date\": getDatetime()\n }\n\n self.other.insert_one(other)\n\n self.queue.delete_one(\n {\"Trader\": self.user[\"Name\"], \"Symbol\": order[\"Symbol\"], \"Strategy\": order[\"Strategy\"]})\n\n self.logger.INFO(\n f\"CANCELED ORDER FOR {order['Symbol']} - TRADER: {self.user['Name']}\", True)\n\n # IF QUEUE ORDER DATE GREATER THAN 10 MINUTES OLD AND ORDER ID EQUALS NONE, SEND ALERT\n if ten_minutes_ago > order_date and order[\"Order_ID\"] == None and order[\"Account_ID\"] == self.account_id:\n\n if order[\"Symbol\"] not in self.no_ids_list:\n\n self.logger.ERROR(\n \"QUEUE ORDER ID ERROR\", f\"ORDER ID FOR {order['Symbol']} NOT FOUND - TRADER: {self.user['Name']} - ACCOUNT ID: {self.account_id}\")\n\n self.no_ids_list.append(order[\"Symbol\"])\n\n else:\n\n if order[\"Symbol\"] in self.no_ids_list:\n\n self.no_ids_list.remove(order[\"Symbol\"])",
"def on_send_order(self, data, request):\n self.update_rate_limit(request)",
"def killQueueOrder(self):\n # CHECK ALL QUEUE ORDERS AND CANCEL ORDER IF GREATER THAN TWO HOURS OLD\n queue_orders = self.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n two_hours_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(hours=2), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n ten_minutes_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(minutes=10), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n for order in queue_orders:\n\n order_date = order[\"Date\"]\n\n order_type = order[\"Order_Type\"]\n\n id = order[\"Order_ID\"]\n\n forbidden = [\"REJECTED\", \"CANCELED\", \"FILLED\"]\n\n if two_hours_ago > order_date and (order_type == \"BUY\" or order_type == \"BUY_TO_OPEN\") and id != None and order[\"Order_Status\"] not in forbidden:\n\n # FIRST CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(id)\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n other = {\n \"Symbol\": order[\"Symbol\"],\n \"Order_Type\": order[\"Order_Type\"],\n \"Order_Status\": \"CANCELED\",\n \"Strategy\": order[\"Strategy\"],\n \"Account_ID\": self.account_id,\n \"Aggregation\": order[\"Aggregation\"],\n \"Trader\": self.user[\"Name\"],\n \"Date\": getDatetime()\n }\n\n if self.asset_type == \"OPTION\":\n\n other[\"Pre_Symbol\"] = order[\"Pre_Symbol\"]\n\n other[\"Exp_Date\"] = order[\"Exp_Date\"]\n\n self.other.insert_one(other)\n\n self.queue.delete_one(\n {\"Trader\": self.user[\"Name\"], \"Symbol\": order[\"Symbol\"], \"Strategy\": order[\"Strategy\"], \"Asset_Type\": self.asset_type})\n\n self.logger.INFO(\n f\"CANCELED ORDER FOR {order['Symbol']} - TRADER: {self.user['Name']}\", True)\n\n # IF QUEUE ORDER DATE GREATER THAN 10 MINUTES OLD AND ORDER ID EQUALS NONE, SEND ALERT\n if ten_minutes_ago > order_date and order[\"Order_ID\"] == None and order[\"Account_ID\"] == self.account_id:\n\n if order[\"Symbol\"] not in self.no_ids_list:\n\n self.logger.ERROR(\n \"QUEUE ORDER ID ERROR\", f\"ORDER ID FOR {order['Symbol']} NOT FOUND - TRADER: {self.user['Name']} - ACCOUNT ID: {self.account_id}\")\n\n self.no_ids_list.append(order[\"Symbol\"])\n\n else:\n\n if order[\"Symbol\"] in self.no_ids_list:\n\n self.no_ids_list.remove(order[\"Symbol\"])",
"def _send_to_market(self, order, is_mine):\n \n \n ord_type = order[self.col_idx['ordtype']]\n if ord_type == \"new\":\n self.mkt.send(is_buy=order[self.col_idx['is_buy']],\n qty=order[self.col_idx['qty']],\n price=order[self.col_idx['price']],\n uid=order[self.col_idx['uid']],\n is_mine=is_mine,\n timestamp=order[self.col_idx['timestamp']])\n elif ord_type == \"cancel\":\n self.mkt.cancel(uid=order[self.col_idx['uid']])\n elif ord_type == \"modif\":\n self.mkt.modif(uid=order[self.col_idx['uid']], \n new_qty=order[self.col_idx['qty']])\n else:\n raise ValueError(f'Unexpected ordtype: {ord_type}')",
"def new_order(self, signal, type):\n # self.client = bitmex.bitmex(test=True, api_key=self.strategy.api_key.key, api_secret=self.strategy.api_key.secret)\n if not self.strategy.live_trade:\n self.logger.info('Notice: Trading on testnet.')\n if self.scrape_only:\n return\n self.update_position()\n self.logger.info('New Order {} {}'.format(signal, type))\n self.logger.info(\"Current Position: {}\".format(self.p))\n self.logger.info(\"Canceling all orders\")\n self.client.Order.Order_cancelAll(symbol=self.strategy.symbol).result()\n self.trigers = []\n\n if type == 'entry' and signal == 'LONG' and self.p == 0:\n\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n oq = self.strategy.contract_size\n ot = self.strategy.order_type\n try:\n self.logger.info(\"Placing LONG entry Order of {}\".format(oq))\n order = self.execute_order(oq, ot, text=\"{} {}_{}\".format(self.strategy.id, signal, type))\n if self.strategy.stop_loss:\n triger = {\n \"side\": -1,\n \"price\": order['price'] - self.strategy.stop_loss,\n \"type\": 'sl'\n }\n self.trigers.append(triger)\n self.logger.info('Stop loss trigger placed at {}'.format(triger['price']))\n if self.strategy.take_profit:\n triger = {\n 'side': -1,\n \"price\": order['price'] + self.strategy.take_profit,\n \"type\": 'tp'\n }\n self.trigers.append(triger)\n self.logger.info('Take Profit trigger placed at {}'.format(triger['price']))\n if self.strategy.trailing_stop:\n triger = {\n 'side': -1,\n \"price\": order['price'] + self.strategy.trailing_stop,\n 'type': 'ts'\n }\n self.trigers.append(triger)\n self.logger.info('Trailing Stop trigger placed at {}'.format(triger['price']))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))\n\n if type == 'entry' and signal == 'SHORT' and self.p == 0:\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n oq = self.strategy.contract_size * -1\n ot = self.strategy.order_type\n try:\n self.logger.info(\"Placing Short entry Order of {}\".format(oq))\n order = self.execute_order(oq, ot, text=\"{} {}_{}\".format(self.strategy.id, signal, type))\n if self.strategy.stop_loss:\n triger = {\n \"side\": 1,\n \"price\": order['price'] + self.strategy.stop_loss,\n \"type\": 'sl'\n }\n self.trigers.append(triger)\n self.logger.info('Stop loss trigger placed at {}'.format(triger['price']))\n if self.strategy.take_profit:\n triger = {\n 'side': 1,\n \"price\": order['price'] - self.strategy.take_profit,\n \"type\": 'tp'\n }\n self.trigers.append(triger)\n self.logger.info('Take profit trigger placed at {}'.format(triger['price']))\n if self.strategy.trailing_stop:\n triger = {\n 'side': 1,\n \"price\": order['price'] - self.strategy.trailing_stop,\n 'type': 'ts'\n }\n self.trigers.append(triger)\n self.logger.info('Trailing Stop trigger placed at {}'.format(triger['price']))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))\n\n if type == 'entry' and signal == 'LONG' and self.p < 0:\n\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n p = self.p * -1\n oq = p + self.strategy.contract_size\n ot = self.strategy.order_type\n try:\n self.logger.info(\"Placing LONG entry and Short Exit Order of {}\".format(oq))\n order = self.execute_order(oq, ot, text=\"{} {}_{}-{}_{}\".format(self.strategy.id, signal, type, \"SHORT\",\n \"exit\"))\n if self.strategy.stop_loss:\n triger = {\n \"side\": -1,\n \"price\": order['price'] - self.strategy.stop_loss,\n \"type\": 'sl'\n }\n self.trigers.append(triger)\n self.logger.info('Stop loss triger placed at {}'.format(triger['price']))\n if self.strategy.take_profit:\n triger = {\n 'side': -1,\n \"price\": order['price'] + self.strategy.take_profit,\n \"type\": 'tp'\n }\n self.trigers.append(triger)\n self.logger.info('Take Profit triger placed at {}'.format(triger['price']))\n if self.strategy.trailing_stop:\n triger = {\n 'side': -1,\n \"price\": order['price'] + self.strategy.trailing_stop,\n 'type': 'ts'\n }\n self.trigers.append(triger)\n self.logger.info('Trailing Stop trigger placed at {}'.format(triger['price']))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))\n\n if type == 'entry' and signal == 'SHORT' and self.p > 0:\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n oq = -(self.p + self.strategy.contract_size)\n ot = self.strategy.order_type\n\n try:\n self.logger.info(\"Placing Short entry and Long Exit Order of {}\".format(oq))\n order = self.execute_order(oq, ot,\n text=\"{} {}_{}-{}_{}\".format(self.strategy.id, signal, type, \"LONG\", \"exit\"))\n if self.strategy.stop_loss:\n triger = {\n \"side\": 1,\n \"price\": order['price'] + self.strategy.stop_loss,\n \"type\": 'sl'\n }\n self.trigers.append(triger)\n self.logger.info('Stop loss triger placed at {}'.format(triger['price']))\n if self.strategy.take_profit:\n triger = {\n 'side': 1,\n \"price\": order['price'] - self.strategy.take_profit,\n \"type\": 'tp'\n }\n self.trigers.append(triger)\n self.logger.info('Take Profit triger placed at {}'.format(triger['price']))\n if self.strategy.trailing_stop:\n triger = {\n 'side': 1,\n \"price\": order['price'] - self.strategy.trailing_stop,\n 'type': 'ts'\n }\n self.trigers.append(triger)\n self.logger.info('Trailing Stop trigger placed at {}'.format(triger['price']))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))\n\n if type == 'exit' and signal == 'LONG' and self.p > 0:\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n oq = -(self.p)\n try:\n self.logger.info(\"Placing Long Exit Order of {}\".format(oq))\n self.execute_order(oq, self.strategy.order_type, text=\"{} {}_{}\".format(self.strategy.id, signal, type))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))\n\n if type == 'exit' and signal == 'SHORT' and self.p < 0:\n # self.client.Order.Order_cancelAll(symbol = self.strategy.symbol).result()\n oq = -(self.p)\n try:\n self.logger.info(\"Placing Shot Exit Order of {}\".format(oq))\n self.execute_order(oq, self.strategy.order_type, text=\"{} {}_{}\".format(self.strategy.id, signal, type))\n except Exception as ex:\n self.logger.error(\"{}: Couldn't place order {}, {} \".format(self.strategy.id, signal, type))\n self.logger.error(str(ex))\n self.logger.error(repr(ex))",
"def buy(self, irc, msg, args, optlist, amount, thing, price, otherthing, notes):\n self.db.deleteExpired(self.registryValue('orderExpiry'))\n gpgauth = self._checkGPGAuth(irc, msg.prefix)\n if gpgauth is None:\n irc.error(\"For identification purposes, you must be identified via GPG \"\n \"to use the order book.\")\n return\n results = self.db.getByNick(gpgauth['nick'])\n if len(results) >= self.registryValue('maxUserOpenOrders'):\n irc.error(\"You may not have more than %s outstanding open orders.\" % \\\n self.registryValue('maxUserOpenOrders'))\n return\n extratime = 0\n if dict(optlist).has_key('long'):\n extratime = self.registryValue('longOrderDuration')\n trust = self._getTrust(irc, 'nanotube', gpgauth['nick'])\n sumtrust = sum([t for t,n in trust])\n if sumtrust < self.registryValue('minTrustForLongOrders'):\n irc.error(\"You must have a minimum of %s cumulative trust at \"\n \"level 1 and level 2 from nanotube to \"\n \"to place long orders.\" % (self.registryValue('minTrustForLongOrders'),))\n return\n orderid = self.db.buy(gpgauth['nick'], msg.host, amount, thing, price, otherthing, notes, extratime)\n irc.reply(\"Order id %s created.\" % (orderid,))\n if not world.testing:\n irc.queueMsg(ircmsgs.privmsg(\"#bitcoin-otc-ticker\",\n \"#%s || %s || BUY %s %s @ %s %s || %s\" % (orderid,\n gpgauth['nick'],\n amount,\n thing,\n self._getIndexedValue(price),\n otherthing,\n notes,)))",
"def createOrder(self, item, units, quantity, delivery_time):\n self.order = Order(item, units, quantity, delivery_time)",
"def on_order(self, order: OrderData):\n pass",
"def on_order(self, order: OrderData):\n pass",
"def on_order(self, order: OrderData):\n pass",
"def send_exchange_request(self, user):\n self.current['user'] = user\n self.send_request(user, self.KM_REQUEST_KEY)",
"def on_order(self, order: OrderData):\n # print(\"on_order\")\n # print(order)\n pass",
"def purchase_item(self):\r\n self.purchased_callback()\r\n self.status = 'purchased'\r\n self.fulfilled_time = datetime.now(pytz.utc)\r\n self.save()",
"def trackOrderRequest(self):\n\t\tstart_dat=datetime.today()\n\t\tstart_date = start_dat - timedelta( hours=start_dat.time().hour,minutes=start_dat.time().minute,seconds=start_dat.time().second ) \n\t\tend_date=start_dat\n\t\tans=None\n\t\t#print start_dat.time().hour\n\t\tprint end_date\n\t\tans=Order.objects.filter(date_of_order__range=(start_date,end_date))\n\t\tlst=[]\n\t\tfor b in ans:\n\t\t\towneradd=b.owner_id.address\n\t\t\tuseradd=b.userid.address\n\t\t\tusername=b.userid.email\n\t\t\townername=b.owner_id.email\n\t\t\tuserphone=b.userid.contact_no\n\t\t\townerphone=b.owner_id.contact_no\n\t\t\tbookname=b.bookid.title\n\t\t\tstatus=b.paymentid.ispending\n\t\t\tbook=b.__dict__\n\t\t\tbook['owneradd']=owneradd\n\t\t\tbook['useradd']=useradd\n\t\t\tbook['username']=username\n\t\t\tbook['ownername']=ownername\n\t\t\tbook['userphone']=userphone\n\t\t\tbook['ownerphone']=ownerphone\n\t\t\tbook['name']=bookname\n\t\t\tif status==True:\n\t\t\t\tbook['status']=\"Pending\"\n\t\t\telse:\n\t\t\t\tbook['status']=\"Delivered\"\n\t\t\tlst.append(book)\n\t\t#print ans\n\t\t\n\t\treturn lst",
"def fulfill_order(request_data):\n # First, save this information in a receipt\n receipt = Receipt.objects.create(data=request_data)\n\n # Link the order with the receipt if we can parse it\n reference_number = request_data[\"req_reference_number\"]\n req_bill_to_email = request_data.get(\"req_bill_to_email\")\n order = Order.objects.get_by_reference_number(reference_number)\n receipt.order = order\n receipt.save()\n\n new_order_status = determine_order_status_change(order, request_data[\"decision\"])\n if new_order_status is None:\n # This is a duplicate message, ignore since it's already handled\n return\n\n order.status = new_order_status\n order.save()\n sync_hubspot_deal(order)\n\n if order.status == Order.FULFILLED:\n complete_order(order)\n if settings.ENABLE_ORDER_RECEIPTS:\n send_ecommerce_order_receipt(\n order=order, cyber_source_provided_email=req_bill_to_email\n )\n\n # Save to log everything to an audit table including enrollments created in complete_order\n order.save_and_log(None)",
"def place_order(self, order_event):\n self._check_day_data(order_event.order_time)\n if order_event.order_type == 'MARKET':\n self._fill_market_order(order_event)\n elif order_event.order_type == 'LIMIT':\n if self._check_limit_order(order_event, order_event.order_time):\n pass\n self.resting_orders.append(order_event)",
"def subscribe_user_orders(self, update_handler):\n pass",
"def place_order(env, inventory_stock):\n yield env.timeout(LEAD_TIME)\n #amount = inventory_stock.capacity - inventory_stock.level\n amount = EOQ\n print('Inventory refilled by {1} products at {0} '.format(env.now, amount))\n print('Inventory Level = {}'.format(inventory_stock.capacity))\n order_arrival_time.append(env.now)\n order_amount.append(amount)\n yield inventory_stock.put(amount)",
"def send_order(self, \n userid:int, percentage:float, order_type:str,\n base:str, trade:str, price:float, exchange=\"baseexchange\", \n is_live=False, episode=None, timestamp=time.time()):\n create_call = \"/order/send\"\n call_loc = f\"{self.address}{create_call}\"\n call_params = {\n \"userid\": userid,\n \"base\": base,\n \"trade\": trade,\n \"exchange\": exchange,\n \"pct\": percentage,\n \"action\": order_type,\n \"price\": price,\n \"spec\": {\n \"episodeid\": episode,\n \"live\": is_live\n },\n \"timestamp\": timestamp\n }\n call_params_json = json.dumps(call_params)\n self.session.post(call_loc, data=call_params_json)",
"async def payouts(ctx, time):\n await bot.say(\"Fetching rewards history for the past **\"+time+\"** days to #rewards_log\")\n await loop.create_task(send_discord(\"*Manual queing initiated*\", rewardchannel))\n await stream_rewards(time)\n await loop.create_task(send_discord(\"*Manual queing ended*\", rewardchannel))",
"def slot_user_order(self, dummy_sender, data):\r\n (price, volume, typ, oid, status) = data\r\n found = False\r\n removed = False # was the order removed?\r\n opened = False # did the order change from 'post-pending' to 'open'\"?\r\n voldiff = 0 # did the order volume change (full or partial fill)\r\n if \"executing\" in status:\r\n # don't need this status at all\r\n return\r\n if \"post-pending\" in status:\r\n # don't need this status at all\r\n return\r\n if \"removed\" in status:\r\n for i in range(len(self.owns)):\r\n if self.owns[i].oid == oid:\r\n order = self.owns[i]\r\n\r\n # work around MtGox strangeness:\r\n # for some reason it will send a \"completed_passive\"\r\n # immediately followed by a \"completed_active\" when a\r\n # market order is filled and removed. Since \"completed_passive\"\r\n # is meant for limit orders only we will just completely\r\n # IGNORE all \"completed_passive\" if it affects a market order,\r\n # there WILL follow a \"completed_active\" immediately after.\r\n if order.price == 0:\r\n if \"passive\" in status:\r\n # ignore it, the correct one with\r\n # \"active\" will follow soon\r\n return\r\n\r\n self.debug(\r\n \"### removing order %s \" % oid,\r\n \"price:\", self.gox.quote2str(order.price),\r\n \"type:\", order.typ)\r\n\r\n # remove it from owns...\r\n self.owns.pop(i)\r\n\r\n # ...and update own volume cache in the bids or asks\r\n self._update_level_own_volume(\r\n order.typ,\r\n order.price,\r\n self.get_own_volume_at(order.price, order.typ)\r\n )\r\n removed = True\r\n break\r\n else:\r\n for order in self.owns:\r\n if order.oid == oid:\r\n found = True\r\n self.debug(\r\n \"### updating order %s \" % oid,\r\n \"volume:\", self.gox.base2str(volume),\r\n \"status:\", status)\r\n voldiff = volume - order.volume\r\n opened = (order.status != \"open\" and status == \"open\")\r\n order.volume = volume\r\n order.status = status\r\n break\r\n\r\n if not found:\r\n # This can happen if we added the order with a different\r\n # application or the gox server sent the user_order message\r\n # before the reply to \"order/add\" (this can happen because\r\n # actually there is no guarantee which one arrives first).\r\n # We will treat this like a reply to \"order/add\"\r\n self.add_own(Order(price, volume, typ, oid, status))\r\n\r\n # The add_own() method has handled everything that was needed\r\n # for new orders and also emitted all signals already, we\r\n # can immediately return here because the job is done.\r\n return\r\n\r\n # update level own volume cache\r\n self._update_level_own_volume(\r\n typ, price, self.get_own_volume_at(price, typ))\r\n\r\n # We try to help the strategy with tracking the orders as good\r\n # as we can by sending different signals for different events.\r\n if removed:\r\n reason = self.gox.msg[\"user_order\"][\"reason\"]\r\n self.signal_own_removed(self, (order, reason))\r\n if opened:\r\n self.signal_own_opened(self, (order))\r\n if voldiff:\r\n self.signal_own_volume(self, (order, voldiff))\r\n self.signal_changed(self, None)\r\n self.signal_owns_changed(self, None)"
] | [
"0.68102014",
"0.64660317",
"0.64283824",
"0.6401807",
"0.6256527",
"0.6240372",
"0.6207156",
"0.6180311",
"0.61682606",
"0.6147138",
"0.6145767",
"0.6132009",
"0.6068847",
"0.5979409",
"0.5953211",
"0.59475",
"0.5896322",
"0.5896322",
"0.5896322",
"0.58944386",
"0.5879047",
"0.58754295",
"0.5871363",
"0.5860877",
"0.584416",
"0.58372355",
"0.581569",
"0.57758725",
"0.5770414",
"0.5759177"
] | 0.679702 | 1 |
Returns the current mkt status of an order identified by its uid. | def ord_status(self, uid):
# TODO: use ticker to select market orderbook
return self.mkt.get(uid) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_status(self, order_uuid):\n portfolio = self.get_portfolio_object()\n # only take complete orders\n orders = [order for order in portfolio.orders if order.order_uuid == order_uuid]\n if orders:\n order = orders[-1]\n return order.status\n return None",
"async def update_order_status():\n symbol = App.config[\"symbol\"]\n\n # Get currently active order and id (if any)\n order = App.order\n order_id = order.get(\"orderId\", 0) if order else 0\n if not order_id:\n log.error(f\"Wrong state or use: check order status cannot find the order id.\")\n return None\n\n # -----\n # Retrieve order from the server\n try:\n new_order = App.client.get_order(symbol=symbol, orderId=order_id)\n except Exception as e:\n log.error(f\"Binance exception in 'get_order' {e}\")\n return\n\n # Impose and overwrite the new order information\n if new_order:\n order.update(new_order)\n else:\n return None\n\n # Now order[\"status\"] contains the latest status of the order\n return order[\"status\"]",
"async def get_order_status(self, symbol, order_id, client_order_id):\n params = {\n \"symbol\": symbol,\n \"orderId\": str(order_id),\n \"origClientOrderId\": client_order_id,\n \"timestamp\": tools.get_cur_timestamp_ms()\n }\n success, error = await self.request(\"GET\", \"/api/v3/order\", params=params, auth=True)\n return success, error",
"async def get_order_status(self, symbol, order_id, client_order_id):\n uri = \"/fapi/v1/order\"\n params = {\n \"symbol\": symbol,\n \"orderId\": str(order_id),\n \"origClientOrderId\": client_order_id,\n \"timestamp\": tools.get_cur_timestamp_ms()\n }\n success, error = await self.request(\"GET\", uri, params=params, auth=True)\n return success, error",
"def get_paytm_transaction_status(order_id):\n data = {'MID': config('PAYTM_MERCHANT_ID'),\n 'ORDERID': order_id}\n data['CHECKSUMHASH'] = urllib.parse.quote_plus(Checksum.generate_checksum(data, config('PAYTM_MERCHANT_KEY')))\n url = config('PAYTM_TRANSACTION_STATUS_URL')\n url += '?JsonData={\"MID\":\"' + data['MID'] + '\",\"ORDERID\":\"' + data['ORDERID'] + '\",\"CHECKSUMHASH\":\"' + \\\n data['CHECKSUMHASH'] + '\"}'\n response_data = requests.get(url).json()\n logger.debug(json.dumps(response_data))\n return response_data.get('STATUS')",
"async def get_order_status(self, order_no):\n uri = \"/v3/spot/order\"\n params = {\n \"order_id\": order_no\n }\n success, error = await self.request(\"GET\", uri, params, auth=True)\n return success, error",
"def order_update_status():\n result = order_obj.order_update_status(request.forms) \n return result",
"def __order_status(self):\n log.debug(\"Displaying __order_status\")\n # Find the latest orders\n orders = self.session.query(db.Order) \\\n .filter(db.Order.user == self.user) \\\n .order_by(db.Order.creation_date.desc()) \\\n .limit(20) \\\n .all()\n # Ensure there is at least one order to display\n if len(orders) == 0:\n self.bot.send_message(self.chat.id, self.loc.get(\"error_no_orders\"))\n # Display the order status to the user\n for order in orders:\n self.bot.send_message(self.chat.id, order.text(w=self, session=self.session, user=True))\n # TODO: maybe add a page displayer instead of showing the latest 5 orders",
"def getOrderStatus(self):\n return self.__orderhistory[0]",
"def get_order(self, order_id):\n try:\n self.ask_request()\n response = self._request(\n 'order/status', {'order_id': int(order_id)})\n order_status = response.json()\n except Exception as e:\n raise ExchangeRequestError(error=e)\n\n if 'message' in order_status:\n raise ExchangeRequestError(\n error='Unable to retrieve order status: {}'.format(\n order_status['message'])\n )\n return self._create_order(order_status)",
"def get_razorpay_transaction_status(transaction_id):\n global RazorPayClient\n return RazorPayClient.order.fetch(transaction_id)['status']",
"def fetch_order_status(order_id: str):\n try:\n return EXCHANGE.fetch_order_status(order_id)\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n fetch_order_status(order_id)",
"def get_status_of_id(sku_id):\n if not sku_id:\n return None\n\n status_query = list(sku_database.find({\"SKU_unit\": int(sku_id)}, {'_id': 0, 'Status': 1}))\n status = status_query[0][\"Status\"]\n return status",
"def get_status(self):\n\n # update status\n # TODO: this needs to consider \"partial\" status based on the testcodes that are defined\n # in the panel.\n # get the condition OK aliquot condition instance\n result_item_cls = models.get_model(self._meta.app_label, 'resultitem')\n aliquot_condition_ok = AliquotCondition.objects.get_ok()\n if not self.aliquot.aliquot_condition:\n # how can this be ??\n status = 'ERROR'\n elif result_item_cls.objects.filter(result__order=self) or self.panel.panel_type == 'STORAGE':\n # test aliquot condition and set the order status\n if self.aliquot.aliquot_condition == aliquot_condition_ok:\n status = 'COMPLETE'\n else:\n # has results or is stored but condition is not 10\n # was this meant to be a storage panel?\n status = 'ERROR'\n elif self.aliquot.aliquot_condition != aliquot_condition_ok:\n status = 'REDRAW'\n else:\n status = 'PENDING'\n # regardless of status, check that order was not deleted on DMIS\n dmis_tools = DmisTools()\n if dmis_tools.is_withdrawn_order(self):\n # other aspects of result visibility must consider this value\n status = 'WITHDRAWN'\n return status",
"def status(self):\n return self._call_txtrader_api('status', {})",
"def get_status(self):\n # TODO retrieve from db if not set\n return self.status",
"def get_status(self):\n return self.status",
"def get_status(self):\n return self.status",
"def get_status(self):\n return self.status",
"def request_order_mass_status_request(self, req):\n assert req.MsgType == Fix.Tags.MsgType.Values.ORDERMASSSTATUSREQUEST, \\\n \"Order request is not ORDERMASSSTATUSREQUEST\"\n\n return self.api_connector.send_request(\"Order\", RestApiConnector.HTTPMethod.GET, None)",
"def get_status(self):\n return self._status",
"def request_order_status_request(self, req):\n assert req.MsgType == Fix.Tags.MsgType.Values.ORDERSTATUSREQUEST, \\\n \"Order request is not ORDERSTATUSREQUEST\"\n\n params = { \"filter\": \"{\\\"orderID\\\": \\\"%s\\\"}\" % req.OrderID.value }\n return self.api_connector.send_request(\"Order\", RestApiConnector.HTTPMethod.GET, params)",
"def account_order(self, orderid):\n return self.get(f'orders/{orderid}', auth=True)",
"def status(self):\n return self.get(self._names[\"status\"])",
"def ConfirmedTradeStatus():\n return 'FO Confirmed'",
"def getstatus(self):\n return self.__status",
"def statuses(self):\n big = BigCommerceAPI()\n response = big.get('orderstatuses')\n return response.text",
"def _get_status(self):\n return self.__status",
"def get_status_by_id(cls, request, id):\n return request.dbsession.query(cls).get(id).status",
"def by_order(item):\n if \"Order\" in item:\n return item[\"Order\"]\n return 0"
] | [
"0.67004323",
"0.635158",
"0.62932646",
"0.6276117",
"0.6239673",
"0.59735906",
"0.5803745",
"0.56158584",
"0.55416226",
"0.5533915",
"0.55191296",
"0.54236466",
"0.54227835",
"0.52846724",
"0.522806",
"0.5186176",
"0.51838356",
"0.51838356",
"0.51838356",
"0.51725906",
"0.5127723",
"0.5121654",
"0.5121613",
"0.5117894",
"0.50946105",
"0.50938016",
"0.50295496",
"0.50142014",
"0.49662268",
"0.4952542"
] | 0.7893943 | 0 |
Compute the pixel error of the corresponding keypoints | def compute_pixel_err(pred_x, pred_y, gt_x, gt_y, imwidth, crop):
canonical_sz = 70
scale = canonical_sz / (imwidth - 2 * crop)
pred_x = pred_x * scale
pred_y = pred_y * scale
gt_x = gt_x * scale
gt_y = gt_y * scale
return np.sqrt((gt_x - pred_x)**2 + (gt_y - pred_y)**2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def root_square_error(input_img, output_img):\n rse = np.sqrt(np.sum(np.power(output_img - input_img, 2)))\n\n return rse",
"def calc_error_dist(self):\n pass",
"def __error(self, R, P, Q, K, beta):\n e = 0\n for i in xrange(len(R)):\n for j in xrange(len(R[i])):\n if R[i][j] > 0:\n\n # loss function error sum( (y-y_hat)^2 )\n e = e + pow(R[i][j]-numpy.dot(P[i,:],Q[:,j]), 2)\n\n # add regularization\n for k in xrange(K):\n\n # error + ||P||^2 + ||Q||^2\n e = e + (beta/2) * ( pow(P[i][k], 2) + pow(Q[k][j], 2) )\n return e",
"def error(line, data): # error function\n # Metric: Sum of squared Y-axis differences\n err = np.sum((data[:, 1] - (line[0] * data[:, 0] + line[1])) ** 2)\n return err",
"def train_error(self):\n self.prediction = self.predict()\n pred = self.prediction.reshape(-1)\n self.error = np.sum(pred != self.label) / self.train_data.shape[0]\n return(self.error)",
"def calculate_error(self, estimated_x, estimated_y):\n\n return np.sqrt((self.ball_x - estimated_x) ** 2 + (self.ball_y - estimated_y) ** 2)",
"def get_error(self, params):\n return self.endog - self.predict(params)",
"def calib_error(objpoints, imgpoints, rvecs, tvecs, mtx, dist):\r\n tot_error = 0\r\n for i in range(len(objpoints)):\r\n imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)\r\n error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)\r\n tot_error += error\r\n return tot_error / len(objpoints) # calculate the arithmetic mean of the errors for all the calibration images\r",
"def getAverageErr(self):\n TotalErr = np.zeros(self.pts3D.shape[1])\n\n for view in range(self.nViews):\n # Weights: 1 for points that appear in the image, zero otherwise\n idx_valid = self.getValidPtsInView(view)\n # Project 3D points onto the image plane\n proj_pts2D = utils.ProjectPts(self.pts3D[:, idx_valid], \\\n self.cam_poses[:, view], \\\n self.cam.KK)\n # Reprojection error for each point\n ErrView = np.sqrt( np.sum( ( self.pts2D[:, idx_valid, view] - \\\n proj_pts2D )**2, axis = 0 ))\n TotalErr[idx_valid] += ErrView\n \n # Count how many 2D views a pts3D appears\n num_views = self.getNumViews() \n\n self.avg_err = TotalErr / num_views.astype(float)\n # Average error per view\n return self.avg_err",
"def error(self) -> Sequence[float]:\n errors = []\n for line, sign in zip(self.marker_lines, (-1, 1)):\n if self._orientation == Orientation.UP_DOWN:\n picket_pos = self._fit(line.center.y)\n mlc_pos = line.center.x\n else:\n picket_pos = self._fit(line.center.x)\n mlc_pos = line.center.y\n if (\n self._separate_leaves\n ): # offset the picket position by the DLG and nominal gap\n mag_factor = self._image.sid / 1000\n picket_pos += (\n sign * self._nominal_gap_mm * mag_factor / 2 * self._image.dpmm\n )\n errors.append((mlc_pos - picket_pos) / self._image.dpmm)\n return errors",
"def _computeError(self, inputs, targets):\n return .5*np.sum((targets-self._pcnfwd(inputs))**2)",
"def calculate_error(k_means_matrix):\n return sum([min(dist) for dist in k_means_matrix])",
"def error_metric(phi_1, phi_2, spherical=False, xpts=None):\n if spherical:\n return sum(abs(phi_1-phi_2)*(xpts**2))/(2.0*sum(abs(phi_1)*(xpts**2)))\n else:\n return sum(abs(phi_1-phi_2))/(2.0*sum(phi_1))",
"def error(Y, X):\n return (Y - X) ** 2",
"def compute_error(self, X, Y):\n\n if self.method != 'knn':\n accuracy = self.classifier.score(X, Y)\n error = 1 - accuracy\n return error\n else:\n distances, indices = self.classifier.kneighbors(X)\n error = 0\n for index, ground_truth in zip(indices, Y):\n classes = [self.train_Y[neigbhor] for neigbhor in index]\n mode, _ = stats.mode(classes)\n if mode != ground_truth:\n error += 1\n\n return error / len(Y)",
"def error(self,pt,eta):\n return self._data[self.__ptBin(pt)][self.__etaBin(eta)][1]",
"def calc_errors(test_data, loc_by_img):\n one_km_count = 0\n five_km_count = 0\n ten_km_count = 0\n hundred_km_count = 0\n thousand_km_count = 0\n other_count = 0\n for test_img in test_data:\n img_id = test_img['watchlink']\n img_result_loc = loc_by_img[img_id]\n img_actual_loc = Location(float(test_img['latitude']), float(test_img['longitude']))\n error = Location.dist(img_result_loc, img_actual_loc)\n if error < 1:\n one_km_count += 1\n elif error < 5:\n five_km_count += 1\n elif error < 10:\n ten_km_count += 1\n elif error < 100:\n hundred_km_count += 1\n elif error < 1000:\n thousand_km_count += 1\n else:\n other_count += 1\n return [one_km_count, five_km_count, ten_km_count, hundred_km_count, thousand_km_count, other_count]",
"def _compute_errors(self):\n self.errors = np.sqrt(self.data)\n self.errors[self.errors == 0.] = 1.",
"def cps_err(self):\n return np.sqrt(self.totalcounts) / self.exptime",
"def _getErrorFunction(self):\n\n\t\treturn (self._setpoint - self._current)",
"def calculate_error(self):\n \n delta = self.Y - self.T\n error = delta.dot(delta) / self.N\n error = format(error, '.5f')\n \n self.errors.append(error)",
"def error(self, trainset: ([], [])):\n # MSE = Σ | d – y |^2 / n\n error_sum = 0.0\n for index, example in enumerate(trainset):\n # | d – y |^2\n output = self.activate(example[0])\n\n target = example[1][0]\n\n error = target - output\n error_sum += error ** 2\n\n # Σ |error_sum| / n\n error_sum = error_sum / len(trainset)\n return error_sum",
"def get_error_rate(self, points, labelled_centroids):\n classified_incorrect = 0\n for (label, point) in points:\n classified_label = self.classify_point(point, labelled_centroids)\n if classified_label != label:\n classified_incorrect +=1\n error_rate = classified_incorrect / float(len(points))\n return error_rate",
"def error(self, X, y):\n predicted = self.predict(X)\n y = self.transformy(y)\n return 1 - (y == predicted).sum() / predicted.size",
"def sketch_error_bound(self,A, k):\n return self.sketcher.error_bound(A,k)",
"def get_image_deviation(img1, img2):\n if img1 is None or img2 is None:\n logger.warning(\"Image if None, therefore deviation is 0.\")\n return 0\n\n dev_img = np.abs(img1.astype(np.int16) - img2.astype(np.int16)).astype(np.uint8)\n dev_per_pixel = np.mean(dev_img) / 255\n return dev_per_pixel",
"def compute_error(data, user_features, item_features, nz):\n sum_err = 0\n for d, n in nz:\n err = data[d,n] - np.dot(item_features[d,:],user_features[:,n])\n sum_err += err**2\n rmse = 0.5*sum_err/len(nz)\n return rmse",
"def _calculate_error(self, targets):\n\n def hist(d):\n f, v = histogram(array(d))\n i = len(f) if argmax(f) == len(f) - 1 else argmax(f)\n return v[i]\n\n devxs, devys = list(zip(*[r.dev_centroid for r in targets]))\n\n if len(targets) > 2 and self.use_histogram:\n dx = hist(devxs)\n dy = hist(devys)\n else:\n\n def avg(s):\n return sum(s) / len(s)\n\n dx = avg(devxs)\n dy = avg(devys)\n\n return -dx, dy",
"def get_reproj_errors(img_idx, points3d_with_views, R, t, K, keypoints, distCoeffs=np.array([])):\n points_3d, points_2d, pt3d_idxs = prep_for_reproj(img_idx, points3d_with_views, keypoints)\n rvec, _ = cv2.Rodrigues(R)\n projPoints, _ = cv2.projectPoints(points_3d, rvec, t, K, distCoeffs=distCoeffs)\n projPoints = np.squeeze(projPoints)\n avg_error, errors = calculate_reproj_errors(projPoints, points_2d)\n\n return points_3d, points_2d, avg_error, errors",
"def get_error(intercept, slope, points):\n error_value = 0\n for i in range(0, len(points)):\n error_value += (points[i].y - (slope * points[i].x + intercept)) ** 2\n return error_value / float(len(points))"
] | [
"0.61242414",
"0.6060558",
"0.6023828",
"0.59681743",
"0.5867764",
"0.5859218",
"0.58545613",
"0.57950455",
"0.57770866",
"0.57720906",
"0.5771524",
"0.57699007",
"0.5763964",
"0.5750326",
"0.573165",
"0.57013786",
"0.56941974",
"0.56742144",
"0.5662502",
"0.5633745",
"0.5598023",
"0.55728865",
"0.55689657",
"0.5567799",
"0.5555998",
"0.5538679",
"0.55188274",
"0.55179334",
"0.5515975",
"0.55126786"
] | 0.67416435 | 0 |
parameter sweep names/values in compact form | def labels(self):
param=self
l=len(param)
sweep_label=[]
for index,name in enumerate(param.names):
sweep_label.append((\
''.join([c for c in name if c.isupper()]))\
.replace("IDT","")\
.replace("S","")\
.replace("M",""))
stringout=[]
unique={name:list(dict.fromkeys(values)) for name,values in zip(param.names,param.values)}
for i in range(l):
tmp_lab=''
for lab,name in zip(sweep_label,self.names):
tmp_lab=tmp_lab+lab+str(unique[name].index(param()[name][i]))
stringout.append(tmp_lab)
return stringout | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parameter_names(self) -> List[str]:",
"def flatten_parameters(self):",
"def paramDetails(cls):\n return {\n 'dim': (10, 20, 2, 20),\n 'nIter': (1, 10, 2, 5),\n 'lamb': (.1, 1., .1, .05),\n 'alph': (30, 50, 5, 40)\n }",
"def _cls_repr(self):\n prefixes = []\n for k in self.names:\n # list only params with not default values\n if self[k].isDefault:\n continue\n prefixes.append(\"%s=%s\" % (k, self[k].value))\n return prefixes",
"def _get_param_names_transformed(self):\r\n n = self._get_param_names()\r\n\r\n # remove/concatenate the tied parameter names\r\n if len(self.tied_indices):\r\n for t in self.tied_indices:\r\n n[t[0]] = \"<tie>\".join([n[tt] for tt in t])\r\n remove = np.hstack([t[1:] for t in self.tied_indices])\r\n else:\r\n remove = np.empty(shape=(0,), dtype=np.int)\r\n\r\n # also remove the fixed params\r\n if len(self.fixed_indices):\r\n remove = np.hstack((remove, np.hstack(self.fixed_indices)))\r\n\r\n # add markers to show that some variables are constrained\r\n for i, t in zip(self.constrained_indices, self.constraints):\r\n for ii in i:\r\n n[ii] = n[ii] + t.__str__()\r\n\r\n n = [nn for i, nn in enumerate(n) if not i in remove]\r\n return n",
"def get_swp_values(self, var: str) -> List[Any]:\n return self._sweep_params[var]",
"def pretty(self):\n def arg_to_str(name, value):\n if value is True:\n return '+' + name\n elif value is False:\n return '~' + name\n elif isinstance(value, Var):\n if value.name == name:\n return '?' + value.name\n return name + \"=\" + value.name\n else:\n return name + \"=\" + repr(value)\n\n if len(self.args) == 0:\n return self.name\n return \"{}[{}]\".format(self.name,\n \", \".join(arg_to_str(name, value)\n for name, value in self.args))",
"def return_parameter_names():\n return list(titles), list(labels)",
"def get_layer_var_names(self):\n return(self.params)",
"def param_strs(self):\n name_len = max(len(p.name) for p in self)\n value_len = max(len(p.value_str) for p in self.params.values())\n units_len = max(len(p.units) for p in self.params.values())\n return [(p.name.ljust(name_len), p.value_str.ljust(value_len),\n p.units.ljust(units_len), p.__doc__)\n for p in self.params.values() if p]",
"def getparameters(filter,title = \"\"):\n vardic = filter.__dict__\n for i in list(vardic.keys()):\n if vardic[i] is not None:\n title += \" \"+i+\": \"\n title += str(vardic[i])+\",\"\n return title[:-1]",
"def _get_param_names(self):\n temp_params = {'function': self.function, 'target': self.target}\n\n temp_params.update(self.kwargs)\n\n return temp_params",
"def return_all_parameter_names():\n a = list(titles)\n a.append(r\"$\\chi^{2}$ per degree of freedom\")\n b = list(labels)\n b.append(\"chi2_per_dof\")\n return a, b",
"def serialize_parameter(name,values):\n l=[]\n for v in values:\n l.append(\"%s='%s'\"%(name,v))\n\n if len(l)>0:\n buf=\" OR \".join(l)\n return \"(%s)\"%buf\n else:\n return \"\"",
"def _create_parameter_names(self):\n self._parameter_names = self.parameter_schema[\"problem\"][\"names\"]",
"def _fc_in_parameters(self) -> List[str]:\n result = list() # type: List[str]\n\n for param in self.params:\n type_list = param.fc_type()\n for type_name, postfix in type_list:\n result.append('{} {}'.format(type_name, param.name + postfix))\n\n return result",
"def extract_parameters(self) -> Dict[str, Set[str]]:\n regex = \"\\{([A-Za-z0-9_]+)\\}\"\n reserved_parameters = [\n \"output\",\n \"input\",\n \"output_vec\",\n \"input_vec\",\n \"df\",\n \"vec_open\",\n \"vec_close\",\n ]\n parameters = {}\n for scope in self.scopes:\n parameters[scope] = set(\n [\n x\n for x in re.findall(regex, self.call)\n if x not in reserved_parameters\n ]\n )\n return parameters",
"def get_str_param_names(self):\n # Exclude self.api and self.names from the command string\n return self.get_attribute_names(FormattedParameter)",
"def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []",
"def reconstruct_for_smac(self, selected_vals, kv):\n pass",
"def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]",
"def _create_parameter_names(self):\n self._parameter_names = [key for key in self.parameter_schema.keys() if key != 'num_simulations']",
"def __str__(self, nw=30):\r\n names = self._get_param_names()\r\n #if names is None:\r\n # names = self._get_print_names()\r\n #name_indices = self.grep_param_names(\"|\".join(names))\r\n N = len(names)\r\n\r\n if not N:\r\n return \"This object has no free parameters.\"\r\n header = ['Name', 'Value', 'Constraints', 'Ties']\r\n values = self._get_params() # map(str,self._get_params())\r\n #values = self._get_params()[name_indices] # map(str,self._get_params())\r\n # sort out the constraints\r\n constraints = [''] * len(names)\r\n #constraints = [''] * len(self._get_param_names())\r\n for i, t in zip(self.constrained_indices, self.constraints):\r\n for ii in i:\r\n constraints[ii] = t.__str__()\r\n for i in self.fixed_indices:\r\n for ii in i:\r\n constraints[ii] = 'Fixed'\r\n # sort out the ties\r\n ties = [''] * len(names)\r\n for i, tie in enumerate(self.tied_indices):\r\n for j in tie:\r\n ties[j] = '(' + str(i) + ')'\r\n\r\n if values.size == 1:\r\n values = ['%.4f' %float(values)]\r\n else:\r\n values = ['%.4f' % float(v) for v in values]\r\n max_names = max([len(names[i]) for i in range(len(names))] + [len(header[0])])\r\n max_values = max([len(values[i]) for i in range(len(values))] + [len(header[1])])\r\n max_constraint = max([len(constraints[i]) for i in range(len(constraints))] + [len(header[2])])\r\n max_ties = max([len(ties[i]) for i in range(len(ties))] + [len(header[3])])\r\n cols = np.array([max_names, max_values, max_constraint, max_ties]) + 4\r\n # columns = cols.sum()\r\n\r\n header_string = [\"{h:^{col}}\".format(h=header[i], col=cols[i]) for i in range(len(cols))]\r\n header_string = map(lambda x: '|'.join(x), [header_string])\r\n separator = '-' * len(header_string[0])\r\n param_string = [\"{n:^{c0}}|{v:^{c1}}|{c:^{c2}}|{t:^{c3}}\".format(n=names[i], v=values[i], c=constraints[i], t=ties[i], c0=cols[0], c1=cols[1], c2=cols[2], c3=cols[3]) for i in range(len(values))]\r\n\r\n\r\n return ('\\n'.join([header_string[0], separator] + param_string)) + '\\n'",
"def summary_parameters(self):\n return {'filters': ', '.join(self.getOption('filters'))}",
"def param_str(self, pnames=None):\n l = self.get_params(pnames)\n s = \"\"\n for p in l:\n s += \"%s : %s\\n\" % (p.public_name, p.tostr(self))\n return s",
"def parameter_names(self):\n return [x for x in self.transformations.values() if isinstance(x, str)]",
"def show_parameters(self):\n with np.printoptions(precision=3, suppress=True):\n print('number of wind phase = {}'.format(self.ncomp))\n print('galactic parameter = {}'.format(self.scaling_field))\n print('reference height = {}'.format(self.z0))\n for p in ['cool_params','hot_params','params','ref_params','scaling_params']:\n params = getattr(self,p)\n print(p)\n for k,v in params.items():\n print(' {} = {}'.format(k,v))",
"def parameters(self):",
"def format_freeform_params(self):\n return self.format_param_pairs(self.get_freeform_reg_params())",
"def _formal_params(self, doclet):\n name, paren, params = self.arguments[0].partition('(')\n return ('(%s' % params) if params else '(%s)' % ', '.join(doclet['meta']['code']['paramnames'])"
] | [
"0.5911144",
"0.57292724",
"0.5718404",
"0.5714987",
"0.5709971",
"0.5680104",
"0.56399965",
"0.56001055",
"0.5525521",
"0.54950786",
"0.54378587",
"0.5428696",
"0.5421627",
"0.54177386",
"0.54092795",
"0.5373104",
"0.5367285",
"0.5366399",
"0.5350676",
"0.53308094",
"0.5325102",
"0.53249717",
"0.52877456",
"0.52864915",
"0.5278051",
"0.5269328",
"0.525649",
"0.524557",
"0.52427495",
"0.5202016"
] | 0.62087077 | 0 |
Combine two SweepParam. The parameters are combined with another SweepParam instance to generate a new SweepParam. If the instance has a length M and the parameter has a length N , resulting SweepParam will have length MxN. | def combine(self,sweep2):
sweep1=self
if not isinstance(sweep2,SweepParam):
raise ValueError("the parameter needs to be a SweepParam")
init_names=sweep1.names
new_names=sweep2.names
init_values=[]
for x in sweep1.values:
if isinstance(x,np.ndarray):
init_values.append(x.tolist())
else:
init_values.append(x)
new_values=[]
for x in sweep2.values:
if isinstance(x,np.ndarray):
new_values.append(x.tolist())
else:
new_values.append(x)
if any([name in new_names for name in init_names]):
raise ValueError("Unexpected behaviour:at least one sweep parameter is repeated")
if len(init_values)>1:
init_values=[_ for _ in zip(*init_values)]
else:
init_values=init_values[0]
if len(new_values)>1:
new_values=[_ for _ in zip(*new_values)]
else:
new_values=new_values[0]
import itertools
tot_values=[_ for _ in itertools.product(init_values,new_values)]
new_length=len(tot_values)
def flatten(L):
for item in L:
try:
yield from flatten(item)
except TypeError:
yield item
tot_values=[_ for _ in flatten(tot_values)]
dict_new={x : [] for x in init_names+new_names}
for index in range(new_length):
for name in dict_new.keys():
dict_new[name].append(tot_values.pop(0))
return SweepParam(dict_new) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __add__(self, other: 'ModelParameters') -> 'ModelParameters':\n return ModelParameters([self[idx] + other[idx] for idx in range(len(self))])",
"def __radd__(self, other: 'ModelParameters') -> 'ModelParameters':\n return self.__add__(other)",
"def extend(self, other):\n overlap = [key for key in other.defaults if key in self.defaults]\n if overlap:\n raise ValueError(\n \"Duplicate hyperparameter(s): %s\" % \" \".join(overlap))\n new = dict(self.defaults)\n new.update(other.defaults)\n return HyperparameterDefaults(**new)",
"def merge_param(name: str, a: Optional[T], b: Optional[T]) -> T:\n if a is None and b is None:\n raise ValueError(f'Parameter \"{name}\" must be passed to the constructor or at call time.')\n if a is not None and b is not None:\n raise ValueError(f'Parameter \"{name}\" was passed to the constructor and at call time.'\n ' Should be passed just once.')\n if a is None:\n return b\n else:\n return a",
"def update_params(self, other):\n if isinstance(other, Params):\n found = False\n for key, param in other._src.items():\n if key in self._src:\n self._src[key] = param\n found = True\n\n if not found:\n raise RuntimeError(\n \"Tried to set parameters which do not exist in the target model.\"\n )\n else:\n raise RuntimeError(\"Attempt to stream non-parameter list to parameter list.\")",
"def combine(cls, first: 'Output', second: 'Output') -> 'Output':\n return cls(\n first.output,\n second.target,\n second.input,\n second.params or first.params,\n first.delay + second.delay,\n times=first.times if second.times < 0\n else second.times if first.times < 0\n else min(first.times, second.times),\n inst_out=first.inst_out,\n inst_in=second.inst_in,\n comma_sep=first.comma_sep and second.comma_sep,\n )",
"def merge(self, other):\n self._moments = merge_pqc([self, other])._moments\n self._parameters = sp.symarray(self.parameter_symbol, len(self.symbols))\n if self.flatten_circuit:\n self.flatten()",
"def add_(self, other: 'ModelParameters'):\n for idx in range(len(self)):\n self.parameters[idx] += other[idx]",
"def add_params(self, params, module, prefix=''):\n # get param-wise options\n bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.)\n bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.)\n norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.)\n dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', 1.)\n bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False)\n\n # special rules for norm layers and depth-wise conv layers\n is_norm = isinstance(module,\n (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm))\n is_dwconv = (\n isinstance(module, torch.nn.Conv2d)\n and module.in_channels == module.groups)\n\n for name, param in module.named_parameters(recurse=False):\n param_group = {'params': [param]}\n if not param.requires_grad:\n params.append(param_group)\n continue\n if bypass_duplicate and self._is_in(param_group, params):\n warnings.warn(f'{prefix} is duplicate. It is skipped since '\n f'bypass_duplicate={bypass_duplicate}')\n continue\n # bias_lr_mult affects all bias parameters except for norm.bias\n if name == 'bias' and not is_norm:\n param_group['lr'] = self.base_lr * bias_lr_mult\n # apply weight decay policies\n if self.base_wd is not None:\n # norm decay\n if is_norm:\n param_group[\n 'weight_decay'] = self.base_wd * norm_decay_mult\n # depth-wise conv\n elif is_dwconv:\n param_group[\n 'weight_decay'] = self.base_wd * dwconv_decay_mult\n # bias lr and decay\n elif name == 'bias':\n param_group[\n 'weight_decay'] = self.base_wd * bias_decay_mult\n params.append(param_group)\n\n for child_name, child_mod in module.named_children():\n child_prefix = f'{prefix}.{child_name}' if prefix else child_name\n self.add_params(params, child_mod, prefix=child_prefix)",
"def _set_params(self,x):\r\n self.k1._set_params(x[:self.k1.num_params])\r\n self.k2._set_params(x[self.k1.num_params:])",
"def _set_params(self,x):\r\n self.k1._set_params(x[:self.k1.num_params])\r\n self.k2._set_params(x[self.k1.num_params:])",
"def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2):\n\n\tt1 = Transform({\"type\":\"2D\",\"alpha\":alpha1,\"tx\":sx1,\"ty\":sy1,\"mirror\":mirror1,\"scale\":1.0})\n\tt2 = Transform({\"type\":\"2D\",\"alpha\":alpha2,\"tx\":sx2,\"ty\":sy2,\"mirror\":mirror2,\"scale\":1.0})\n\ttt = t2*t1\n\td = tt.get_params(\"2D\")\n\treturn d[ \"alpha\" ], d[ \"tx\" ], d[ \"ty\" ], d[ \"mirror\" ]",
"def set_params(self, w, b):\n self.w = w\n self.b = b\n return",
"def merged_parameters(self, parameters):\n result = self.__params.copy()\n for k, v in parameters.iteritems():\n result[k] = v\n return result",
"def Params(cls) -> InstantiableParams:\n p = super().Params()\n p.Define('sub', None, 'The param of the sub-layer.')\n p.Define('x_times', 0, 'Num times to repeat sub.')\n p.Define(\n 'unpack_summaries', False,\n 'If true, unpack summaries to the individual values from each loop'\n ' iterations.')\n p.Define(\n 'checkpoint_policy', recurrent.AutodiffCheckpointType.SAVE_NOTHING,\n 'How to checkpoint residuals for BProp: save nothing, dot only or '\n 'dot with no batch dimensions.')\n wp = p.weight_split_dims_mapping\n wp.Define('sub', None, 'How the list of subs should be sharded.')\n return p",
"def addPppParams(model):\n \n ### GAPDP Parameters ####\n model.addParameter('GAPDP','KmSub2',0.385) # nadp\n model.addParameter('GAPDP','KmProd2',0.202) # nadph\n model.addParameter('GAPDP','kcatF',2.8)\n model.addParameter('GAPDP','kcatR',0)\n\n ### FMETTRS Parameters ###\n model.addParameter('FMETTRS','kcatF',0.45)\n\n ### MTHFC Parameters ###\n model.addParameter('MTHFC','kcatF',185)\n\n #### GHMT2 Paramters ####\n model.addParameter('GHMT2','kcatF',0.0)\n model.addParameter('GHMT2','kcatR',0.0)\n \n #### TKT1 Parameters ####\n model.addParameter('TKT1',rxnFormKey='kcatF',value=20.58)\n model.addParameter('TKT1',rxnFormKey='kcatR',value=0.8)\n \n model.addParameter('TKT1',rxnFormKey='KmSub1',value=0.743) #g3p\n model.addParameter('TKT1',rxnFormKey='KmSub2',value=3.7298) #s7p\n model.addParameter('TKT1',rxnFormKey='KmProd1',value=0.4717) #r5p\n model.addParameter('TKT1',rxnFormKey='KmProd2',value=0.134) #xu5p\n \n #### TKT2 Parameters ####\n model.addParameter('TKT2',rxnFormKey='kcatF',value=26.87)\n model.addParameter('TKT2',rxnFormKey='kcatR',value=1.4)\n \n model.addParameter('TKT2',rxnFormKey='KmSub1',value=0.25) #f6p\n model.addParameter('TKT2',rxnFormKey='KmSub2',value=0.743) #g3p\n model.addParameter('TKT2',rxnFormKey='KmProd1',value=0.0227) #e4p\n model.addParameter('TKT2',rxnFormKey='KmProd2',value=0.134) #xu5p\n \n #### TALA Parameters ####\n model.addParameter('TALA',rxnFormKey='kcatF',value=22.3)\n model.addParameter('TALA',rxnFormKey='kcatR',value=0.54)\n \n model.addParameter('TALA',rxnFormKey='KmSub1',value=0.0401) #e4p\n model.addParameter('TALA',rxnFormKey='KmSub2',value=0.6688) #f6p\n model.addParameter('TALA',rxnFormKey='KmProd1',value=1.9) #g3p\n model.addParameter('TALA',rxnFormKey='KmProd2',value=0.285) #s7p\n\n \n #### Speed up DGSN Pathway ####\n model.addParameter('DGSNK',rxnFormKey='kcatF',value=2.25)\n\n #### Speed up DADN pathway ####\n model.addParameter('PUNP2',rxnFormKey='kcatF',value=13.3)\n\n #### Speed up FBA rxn ####\n #model.addParameter('FBA',rxnFormKey='kcatF',value=64.5)\n\n model.addParameter('RNDR2',rxnFormKey='KmSub1',value=0.24)\n\n \n# #### RPI Parameters ####\n model.addParameter('RPI',rxnFormKey='kcatF',value=10.0)\n model.addParameter('RPI',rxnFormKey='kcatR',value=1.0)\n \n #model.addParameter('RPI',rxnFormKey='KmSub1',value=1.0)\n #model.addParameter('RPI',rxnFormKey='KmProd1',value=1.0)\n \n model.addParameter('FBA',rxnFormKey='KmSub1',value=0.12)\n model.addParameter('FBA',rxnFormKey='KmProd2',value=0.05)\n \n \n model.addParameter('GAPD',rxnFormKey='kcatF',value=442.0) \n model.addParameter('GAPD',rxnFormKey='kcatR',value=73.6) \n \n\n model.addParameter('FBA',rxnFormKey='kcatR',value=12.6)\n \n\n model.addParameter('TPI',rxnFormKey='kcatR',value=67)\n \n model.addParameter('TPI',rxnFormKey='KmSub1',value=0.077)\n model.addParameter('TPI',rxnFormKey='KmProd1',value=0.084) \n \n\n model.addParameter('FBA',rxnFormKey='kcatF',value=21.0)\n \n \n model.addParameter('PGK',rxnFormKey='kcatR',value=3.4)\n \n model.addParameter('PGM',rxnFormKey='KmSub1',value=3.6)\n model.addParameter('PGM',rxnFormKey='KmProd1',value=0.2)\n \n \n model.addParameter('PGK',rxnFormKey='KmSub1',value=0.01)\n model.addParameter('PGK',rxnFormKey='KmProd1',value=0.1)\n \n \n model.addParameter('GAPD',rxnFormKey='KmProd1',value=0.47)\n model.addParameter('GAPD',rxnFormKey='KmProd2',value=0.061)\n \n \n model.addParameter('DRPA',rxnFormKey='kcatR',value=34.0)\n \n model.addParameter('DRPA',rxnFormKey='KmProd1',value=0.267)\n model.addParameter('DRPA',rxnFormKey='KmProd2',value=0.2)\n\n \n model.addParameter('PPM2',rxnFormKey='kcatF',value=173)\n \n model.addParameter('PPM2',rxnFormKey='KmSub1',value=0.013)\n model.addParameter('PPM2',rxnFormKey='KmProd1',value=1.2)\n\n\n\n# print('Updated PPP Parameters')\n\n return",
"def __init__(self, v1, v2):\n self.v1 = v1\n self.v2 = v2\n self.p1 = 0\n self.p2 = 0\n self.n1 = len(v1)\n self.n2 = len(v2)\n self.first = True",
"def Pool2DOptionsAddStrideW(builder, strideW):\n return AddStrideW(builder, strideW)",
"def set_params(self, **params):\n self.check_params(params)\n self.sk_params.update(params)\n return self",
"def brepalgo_ConcatenateWire(*args):\n return _BRepAlgo.brepalgo_ConcatenateWire(*args)",
"def __add__(self, other):\n s = Shape([])\n for i,p in enumerate(self.pts):\n s.add_point(p + other.pts[i])\n return s",
"def enrich_params(self):\n\n self.params['nmaps'] = len(self.params['probes']) + np.sum(self.params['spins'] == 2)\n\n pass",
"def get_param_combinations(cls):\n for key, val in cls.param.items():\n if not isinstance(val, (list, Quantity)):\n cls.param[key] = [val]\n elif isinstance(val, Quantity) and val.size == 1:\n try:\n # check if val.value is iterable, e.g. a list or a NumPy array\n iter(val.value)\n except:\n cls.param[key] = [val.value] * val.unit\n combos = tuple(dict(zip(cls.param, combo)) for combo in it.product(*cls.param.values()))\n return tuple(c for c in filter(cls._param_validator, combos))",
"def ConcatenateWire(*args):\n return _BRepAlgo.brepalgo_ConcatenateWire(*args)",
"def write_param(self):\n param_file = f\"{self.name}.snapparam\"\n coeff_file = f\"{self.name}.snapcoeff\"\n model = self.model\n describer = self.model.describer\n profile = describer.element_profile\n ne = len(self.elements)\n nbc = len(describer.subscripts)\n if describer.quadratic:\n nbc += int((1 + nbc) * nbc / 2)\n\n coeff_lines = []\n coeff_lines.append(f\"{ne} {nbc + 1}\")\n for element, coeff in zip(self.elements, np.split(model.model.coef_, ne)):\n coeff_lines.append(f\"{element} {profile[element]['r']} {profile[element]['w']}\")\n coeff_lines.extend([str(c) for c in coeff])\n with open(coeff_file, \"w\") as f:\n f.write(\"\\n\".join(coeff_lines))\n\n param_lines = []\n keys = [\"rcutfac\", \"twojmax\"]\n param_lines.extend([f\"{k} {getattr(describer, k)}\" for k in keys])\n param_lines.extend([\"rfac0 0.99363\", \"rmin0 0\"])\n param_lines.append(f\"quadraticflag {int(describer.quadratic)}\")\n param_lines.append(\"bzeroflag 0\")\n with open(param_file, \"w\") as f:\n f.write(\"\\n\".join(param_lines))\n\n pair_style = self.pair_style\n pair_coeff = self.pair_coeff.format(\n elements=\" \".join(self.elements), coeff_file=coeff_file, param_file=param_file\n )\n return [pair_style, pair_coeff]",
"def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):\n\n self.params = {}\n self.params['W1'] = weight_init_std * \\\n np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = weight_init_std * \\\n np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)",
"def Pool2DOptionsAddPadding(builder, padding):\n return AddPadding(builder, padding)",
"def set_params(self, params):\n self._W = np.reshape(params[0:self._W.size], self._W.shape)\n end = self._W.size + self._b.size\n self._b = np.reshape(params[self._W.size:end], self._b.shape)",
"def copy_params(source, target):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)",
"def welding_state_add(self, ws1, ws2):\n\n if ws2 is None:\n return ws1\n\n if ws2.amperage is not None:\n ws1.amperage += ws2.amperage\n\n if ws2.voltage is not None:\n ws1.voltage += ws2.voltage\n\n if ws2.filler_speed is not None:\n ws1.filler_speed += ws2.filler_speed\n\n if ws2.default_arc_length is not None:\n ws1.default_arc_length += ws2.default_arc_length\n\n if ws2.mode is not None:\n ws1.mode = ws2.mode\n\n if ws2.speed is not None:\n ws1.speed += ws2.speed\n\n return ws1"
] | [
"0.5873824",
"0.56465286",
"0.5540291",
"0.5447841",
"0.5389237",
"0.5358258",
"0.5308901",
"0.52810967",
"0.5273042",
"0.5264155",
"0.5264155",
"0.51643217",
"0.5134226",
"0.5092758",
"0.50632066",
"0.50562054",
"0.5029355",
"0.4981182",
"0.49767885",
"0.49306467",
"0.49202284",
"0.49022377",
"0.4886",
"0.48840153",
"0.4878061",
"0.48375472",
"0.4825871",
"0.48246467",
"0.4811091",
"0.48044714"
] | 0.76385486 | 0 |
A pandas.DataFrame that represent all the parameters in PArray.device | def table(self):
param=self.x_param
device=self.device
base_params=device.get_params()
data_tot=DataFrame()
for i in range(len(param)):
print_index=1
for name in param.names:
device._set_params(param(i))
device.draw()
df=device.export_all()
if self.labels_bottom is not None:
index=self.labels_bottom[i]
else:
index=str(i)
print("Generating table, item {} of {}\r".format(print_index,len(param)),end="")
data_tot=data_tot.append(Series(df,name=index))
device._set_params(base_params)
return data_tot | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pandas(self):\n names,prior,posterior = [],[],[]\n for iname,name in enumerate(self.posterior_parameter.row_names):\n names.append(name)\n posterior.append(np.sqrt(float(\n self.posterior_parameter[iname, iname]. x)))\n iprior = self.parcov.row_names.index(name)\n prior.append(np.sqrt(float(self.parcov[iprior, iprior].x)))\n for pred_name, pred_var in self.posterior_prediction.items():\n names.append(pred_name)\n posterior.append(np.sqrt(pred_var))\n prior.append(self.prior_prediction[pred_name])\n return pd.DataFrame({\"posterior\": posterior, \"prior\": prior},\n index=names)",
"def make_output_df(self):\n df = pd.concat([pd.DataFrame(dat) for dat in [self.qdata, self.pdata]], axis=1)\n columns = np.hstack(([['{}{}'.format(x, c) for c in self.actions] for x in ['q', 'p']]))\n df.columns = columns\n df.insert(0, 'trial', np.arange(1, df.shape[0]+1))\n df['choice'] = self.choices\n df['feedback'] = self.feedback\n# r = np.array(self.bandits.rvalues)\n# p = np.array(self.bandits.preward)\n df['optimal'] = self.demand\n df.insert(0, 'agent', 1)\n self.data = df.copy()",
"def as_DF(self):\n\n gs_df = pd.DataFrame(self.P, columns=self.xvec, index=self.yvec)\n gs_df.columns.name = 'x'\n gs_df.index.name = 'y'\n\n return gs_df",
"def __array__(self):\n return pa.column(\"dummy\", self.data).to_pandas().values",
"def create_df(Varr, Iarr, POA, T, mode):\n df = pd.DataFrame()\n df['voltage'] = Varr\n df['current'] = Iarr\n df['E'] = POA\n df['T'] = T\n df['mode'] = mode\n return df",
"def pd(self, *args, **kwargs):\n return pd.DataFrame.from_records(self.aslist(), *args, **kwargs)",
"def dataframe(self):\n return self.generator.dataframe",
"def get_data(self)->pd.DataFrame:\n pass",
"def to_frame(self) -> pd.DataFrame:\n df = pd.DataFrame(data={\n 'Name': [p.name for p in self],\n 'Description': [p.desc for p in self],\n 'Value': [p.value for p in self],\n 'Hyper-Space': [p.hyper_space for p in self]\n }, columns=['Name', 'Description', 'Value', 'Hyper-Space'])\n return df",
"def dataframe(self):\n return self.get_target().dataframe()",
"def dataframe(self):\n df = pd.DataFrame({'x':self.x, 'y':self.y, 'd':self.d})\n\n if self.z is not None:\n for k, v in self.z.items():\n df[k] = v\n\n return df",
"def get_df(self):\n return pd.DataFrame(self.points)",
"def sa_pandas_init(self):\n\n lca = self.lca\n\n ind_activity = 0\n ind_product = 1\n ind_biosphere = 2\n\n cols = []\n rows = []\n inputs = []\n\n #All exchanges in inputs\n for input_ in self.inputs:\n\n if input_ == 'biosphere':\n continue\n\n for i in self.inputs_dict[input_]['tech_params']:\n act = lca.reverse_dict() [ind_activity] [i['col']]\n prod = lca.reverse_dict() [ind_product] [i['row']]\n cols += [ bw.get_activity(act) ['name'] ]\n rows += [ bw.get_activity(prod)['name'] ]\n inputs += [input_]\n for j in self.inputs_dict[input_]['bio_params']:\n act = lca.reverse_dict() [ind_activity] [j['col']]\n bio = lca.reverse_dict() [ind_biosphere] [j['row']]\n cols += [ bw.get_activity(act) ['name'] ]\n rows += [ bw.get_activity(prod)['name'] ]\n inputs += [input_]\n\n if self.parameters != None:\n # All parameters\n parameters_names_list = [name for name in self.parameters_array['name']]\n cols += parameters_names_list\n rows += parameters_names_list\n inputs += ['Parameters']*len(parameters_names_list)\n\n df = pd.DataFrame([inputs, rows, cols], index = ['Inputs', 'Products or flows', 'Activities'])\n df = df.transpose()\n\n self.sensitivity_indices_df = df",
"def data_frame_creator(self):\n\n return pd.DataFrame()",
"def data(self):\n return self.as_named_DataFrame()",
"def construct_df():\n iterable = [['approach', 'contact', 'retract', 'pause'], ['force', 'height']]\n index = pd.MultiIndex.from_product(iterable, names=['segment', 'channel'])\n return pd.DataFrame(columns=index)",
"def build_dataframe(self):\n #Freq 0.0 2.5\n #ElementID NodeID Item\n #6901 6901 angle 0.000000+0.000000j 0.000000+0.000000j\n # sc 13.847674-0.461543j 13.855294-0.462052j\n # sd 0.625892-0.020861j 0.623742-0.020717j\n # se -12.178029+0.405894j -12.185331+0.406381j\n # sf 1.043753-0.034788j 1.046222-0.034953j\n # 6904 angle 0.000000+0.000000j 0.000000+0.000000j\n # sc -1.660571-0.416504j -1.663256-0.416978j\n # sd -2.790551+0.024178j -2.789738+0.024356j\n # se 0.627616+0.450933j 0.629571+0.451455j\n # sf 1.757596+0.010251j 1.756053+0.010121j\n #6902 6901 angle 0.000000+0.000000j 0.000000+0.000000j\n headers = self.headers\n column_names, column_values = self._build_dataframe_transient_header()\n self.data_frame = self._build_pandas_transient_element_node(\n column_values, column_names,\n headers, self.element_node, self.data)",
"def get_obj_df(self) -> pd.DataFrame:\n df = pd.DataFrame(self.obj, columns=[\"x\", \"y\", \"m\", \"dx\", \"dy\"])\n df['iter'] = self.current_iteration\n return df",
"def to_vdf(self):\n\t\treturn (vDataFrame(self.name, self.cursor))",
"def _create_devices(dev_list, index=None):\n if index is not None:\n return pd.DataFrame(columns=dev_list, index=index)\n else:\n return pd.DataFrame(columns=dev_list)",
"def dataframe(self):\n dictionary = OrderedDict(zip(self.keys, [[value] for value in self.values]))\n dataframe = pd.DataFrame(dictionary)\n return dataframe",
"def to_df(self):\r\n return pd.DataFrame([dict(self)])",
"def transform(self, X: np.ndarray) -> pd.core.frame.DataFrame:\r\n return pd.DataFrame(X, columns=self.attributes_names)",
"def query2df(query):\n df = pd.DataFrame(data = list(itertools.product([0, 1], repeat=len(query.variables))), columns=query.variables)\n df['p'] = query.values.flatten()\n return df",
"def to_dataframe(self, value_column=\"* values *\") -> pd.DataFrame:\n index, vals = self.vector.to_values()\n df = self.schema.decode_many(index, self.dims_list)\n df[value_column] = vals\n return df",
"def df_sdb(self):\n df = pd.DataFrame(index=self.sdb_net.keys())\n df[\"out\"] = [-q for q in self.sdb_out.values()] # assign negative values\n df[\"in\"] = self.sdb_in.values()\n df[\"net\"] = self.sdb_net.values()\n return df",
"def dataframe(self):\n if not self.all_records:\n print('No rows cached.')\n return\n dict_list = [row.as_dict() for row in self.all_records]\n columns = self.all_records[0].keys\n dataframe = pd.DataFrame(dict_list, columns=columns)\n return dataframe",
"def parameterArray(self, expand=False, factor=False, simplify=False):\n return (self.bTable(expand=expand, factor=factor, simplify=simplify),\n self.cTable(expand=expand, factor=factor, simplify=simplify))",
"def get_as_pandas_dataframe(self):\n pd_df = pd.DataFrame()\n for name in self.dict_colname_to_index:\n pd_df[name] = np.copy(self[name])\n return pd_df",
"def construct_data_frame(self) -> pd.DataFrame:\n data_frame = self.base_data_frame[\n [self.name_col, self.description_col]\n ].reset_index()\n data_frame.columns = [\"label_encoder\", \"name\", \"description\"]\n\n return data_frame.set_index(\"label_encoder\")"
] | [
"0.6849395",
"0.64293045",
"0.6400187",
"0.63682944",
"0.6342038",
"0.6328716",
"0.6227246",
"0.6214504",
"0.6160134",
"0.61536926",
"0.61372817",
"0.6065142",
"0.6061857",
"0.60402805",
"0.60186565",
"0.60112226",
"0.5987733",
"0.59752315",
"0.59647155",
"0.5960611",
"0.5909411",
"0.5892218",
"0.5882728",
"0.58604664",
"0.58566433",
"0.5850467",
"0.5838722",
"0.5816173",
"0.58152115",
"0.58125526"
] | 0.6891203 | 0 |
Generate automatically labels to attach to array cells. resulting labels are stored in "labels_top" and "labels_bottom" attributes. | def auto_labels(self,top=True,bottom=True,top_label='',bottom_label='',\
col_index=0,row_index=0):
param=self.x_param
top_label=[top_label+" "+ x for x in param.labels]
bottom_label=[bottom_label+"{:02d} x {:02d}".format(col_index,y) for y in range(row_index,row_index+len(param))]
if top==True :
self.labels_top=top_label
else:
self.labels_top=None
if bottom==True :
self.labels_bottom=bottom_label
else:
self.labels_bottom=None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def autolabel(rects,array,axis,dist):\n ctr = 0\n label_array = [EM.truncate(v*100,1) for v in array]\n for entry in range(len(label_array)):\n if(label_array[entry]>=0) and (label_array[entry]<=1):\n label_array[entry] = EM.truncate(array[entry]*100,2)\n\n\n for rect in rects:\n height = rect.get_height()\n if(axis=='1'):\n ax1.text(rect.get_x() + rect.get_width()/2., height+dist,\n label_array[ctr],fontsize=fonts[3],\n #'%d' % int(height),\n ha='center', va='bottom',rotation=90)\n elif(axis=='2'):\n ax2.text(rect.get_x() + rect.get_width()/2., height+dist,\n label_array[ctr],fontsize=fonts[3],\n #'%d' % int(height),\n ha='center', va='bottom',rotation=90)\n elif(axis=='3'):\n ax3.text(rect.get_x() + rect.get_width()/2., height+dist,\n label_array[ctr],fontsize=fonts[3],\n #'%d' % int(height),\n ha='center', va='bottom',rotation=90)\n elif(axis=='4'):\n ax4.text(rect.get_x() + rect.get_width()/2., height+dist,\n label_array[ctr],fontsize=fonts[3],\n #'%d' % int(height),\n ha='center', va='bottom',rotation=90)\n ctr = ctr + 1",
"def autolabel(rects):",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n # ax.text(rect.get_x() + rect.get_width() / 2., 1.22 * height,\n # '%d' % int(height),\n # ha='center', va='bottom')",
"def create_labels(self):\n for name in self.name_to_phone:\n temp_labels = Label(text=name)\n self.root.ids.main.add_widget(temp_labels)",
"def autolabel(rects):\n #for rect in rects:\n for i in range(len(rects)):\n rect = rects[i]\n height = rect.get_height()\n ax.annotate('{}'.format(('%.2f' % (height)) + '% of\\n' + ('%d' % range_data[i].shape[0]) + ' people' ),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')",
"def autolabel(rects):\n\t for rect in rects:\n\t\theight = rect.get_height()\n\t\tax.text(rect.get_x() + rect.get_width()/2., 1.01*height,\n\t\t '%d' % int(height),\n\t\t ha='center', va='bottom')",
"def autolabel(X_pos,values,height_lift):\r\n\theight= np.round(np.nan_to_num(values),2);y_pos = height_lift*height\r\n\tfor i in range(len(height)):\r\n\t\tax.text(X_pos[i],y_pos[i],'%4.2f' % height[i], ha='center', va='bottom',size=4)",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n # ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,\n # '%d' % int(height),\n # ha='center', va='bottom')",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n height = np.round(height, 3)\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom',\n fontsize=20)",
"def generate_labels(pics):\r\n return []",
"def add_labels(self, labels):\n for i, axis in enumerate(self.bottom):\n self.grid[axis].set_xlabel(labels[i])\n\n for i, axis in enumerate(np.array(self.left)[-1::-1]):\n if axis == self.upperleft:\n continue\n\n self.grid[axis].set_ylabel(labels[i]) \n\n pl.draw()",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(np.around(height,2)),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')",
"def _compute_labels(self, element, data, mapping):\n lidx = element.nodes.get_dimension(self.label_index)\n if element.vdims:\n edges = Dataset(element)[element[element.vdims[0].name]>0]\n nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))\n nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})\n else:\n nodes = element\n\n value_dim = element.vdims[0]\n labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]\n if self.show_values:\n value_labels = []\n for i, node in enumerate(element._sankey['nodes']):\n value = value_dim.pprint_value(node['value'])\n label = '%s - %s' % (labels[i], value)\n if value_dim.unit:\n label += ' %s' % value_dim.unit\n value_labels.append(label)\n labels = value_labels\n\n ys = nodes.dimension_values(1)\n nodes = element._sankey['nodes']\n offset = (nodes[0]['x1']-nodes[0]['x0'])/4.\n if self.label_position == 'right':\n xs = np.array([node['x1'] for node in nodes])+offset\n else:\n xs = np.array([node['x0'] for node in nodes])-offset\n data['text_1'] = dict(x=xs, y=ys, text=[str(l) for l in labels])\n align = 'left' if self.label_position == 'right' else 'right'\n mapping['text_1'] = dict(text='text', x='x', y='y', text_baseline='middle', text_align=align)",
"def autolabel(rects): #source: [.........]\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,\n '%d' % height.round(1),\n ha='center', va='bottom')",
"def drawLabels(self):\r\n if self.sensors == None or self.sensors == []:\r\n return\r\n col = self.app.getSensorCol(self.sensors[self.sensor_ids[0]])\r\n self.c.create_text(30,20,text=self.sensors[self.sensor_ids[0]],fill=col,anchor=tk.NW)\r\n if len(self.sensor_ids) == 2:\r\n col = self.app.getSensorCol(self.sensors[self.sensor_ids[1]])\r\n self.c.create_text(30,40,text=self.sensors[self.sensor_ids[1]],fill=col,anchor=tk.NW)",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,\n '%d' % int(height),\n ha='center', va='bottom')",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n pyplot.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, -75), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom', rotation=90)",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')",
"def _setup_labels(self):\n self._labels = self.get_labels()\n self._labels = self.get_predefined_labels() + list(self._labels)\n self._labels = sorted(self._labels)\n\n self._labels_2_index = {label.lower(): i for i, label in enumerate([self._unknown_label] + self._labels)}\n self._index_2_labels = {i: label for label, i in self._labels_2_index.items()}\n\n self._labels_dim = len(self._labels_2_index)\n return None",
"def autolabel(rects):\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.annotate('{}'.format(height),\r\n xy=(rect.get_x() + rect.get_width() / 2, height),\r\n xytext=(0, 3), # 3 points vertical offset\r\n textcoords=\"offset points\",\r\n ha='center', va='bottom')",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., height + 5,\n '%d' % int(height),\n ha='center', va='bottom',\n rotation=\"vertical\", fontsize=6)",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate(\"%.2f\"%(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom', fontsize=7)",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax5.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 2), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n plt.text(rect.get_x() + rect.get_width() / 2., 1.0 * height,\n '%d' % int(height),\n ha='center', va='bottom')",
"def autolabel(rects):\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.text(rect.get_x() + rect.get_width()/2., 1.0*height, '%d' % int(height), ha='center', va='bottom')",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{:.1f}'.format(height/1e9),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')",
"def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n plt.text(rect.get_x() + rect.get_width()/2., 1*height,\n '%d' % int(height),\n ha='center', va='bottom')"
] | [
"0.69725925",
"0.6728825",
"0.66664743",
"0.66598105",
"0.65748835",
"0.65733373",
"0.6559446",
"0.65413666",
"0.6500541",
"0.6498696",
"0.64960986",
"0.64840364",
"0.647242",
"0.6454908",
"0.6419941",
"0.641354",
"0.64103264",
"0.63884693",
"0.63884693",
"0.63884693",
"0.63884693",
"0.6386727",
"0.6384259",
"0.6383265",
"0.6382345",
"0.6380208",
"0.63760984",
"0.63743913",
"0.6371895",
"0.63714385"
] | 0.72019595 | 0 |
check if layer with index L_i is a layer lower than v's dependent index if true, L_i is not a valid layer for course v. | def has_dependent(self, L_i):
return self.dependentIndex==None or self.dependentIndex >= L_i | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_cl_constraints(vector):\n\tglobal __cl_constraints\n\n\tfor con in __cl_constraints:\n\t\t# a vector is not allowed to hold for both\n\t\tif vector[con[0]] == 1 and vector[con[1]] == 1:\n\t\t\treturn False\n\treturn True",
"def component_piece_lower(index):\n i, t, k = index\n xmin = component_para[i][\"fundata\"][\"min\"][k]\n return xmin * component_status_k[i, t, k] <= component_output_k[i, t, k]",
"def check_lin_independence(vectors):\n if len(vectors) == 1:\n return True\n M = np.zeros([len(vectors), len(vectors[0])], dtype=np.complex_)\n for i in range(len(vectors)):\n M[i] = vectors[i].T\n pl, u = lu(M, permute_l=True)\n if any([np.count_nonzero(M[i]) == 0 for i in range(len(vectors))]):\n return False #M must be full rank for linear independence\n return True",
"def check(degree, knot_vector, num_ctrlpts):\n try:\n if knot_vector is None or len(knot_vector) == 0:\n raise ValueError(\"Input knot vector cannot be empty\")\n except TypeError as e:\n print(\"An error occurred: {}\".format(e.args[-1]))\n raise TypeError(\"Knot vector must be a list or tuple\")\n except Exception:\n raise\n\n # Check the formula; m = p + n + 1\n if len(knot_vector) != degree + num_ctrlpts + 1:\n return False\n\n # Check ascending order\n prev_knot = knot_vector[0]\n for knot in knot_vector:\n if prev_knot > knot:\n return False\n prev_knot = knot\n\n return True",
"def test_v_bounds(self):\n n = 50\n t_max = 100.0\n dt = 0.1\n\n G = StudentLayer(n)\n G.i_ext_init = np.linspace(-1.0, 1.0, n)\n\n class BoundsChecker(object):\n def __init__(self, target):\n self.target = target\n self.small = None\n self.large = None\n self.order = 1\n\n def evolve(self, t, dt):\n small = np.min(self.target.v)\n large = np.max(self.target.v)\n if self.small is None or self.small > small:\n self.small = small\n if self.large is None or self.large < large:\n self.large = large\n \n M = BoundsChecker(G)\n\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n\n self.assertLess(M.large, G.v_th)",
"def is_loop_vertex(self, v):\n if not self.has_loop_vertices():\n return False\n return not self.L.is_zero(v[0]) and v[0] + self.fe.zech_logarithm_norm(0) == 2 * v[1]",
"def __isFarFromLevel(self, l):\n\n s = np.mean(self.df['high'] - self.df['low'])\n return np.sum([abs(l-x) < s for x in self.levels]) == 0",
"def check_ml_constraints(vector):\n\tglobal __ml_constraints\n\n\tfor con in __ml_constraints:\n\t\t# a vector must be either contained or missing for both instances *con* tuple\n\t\tif not (vector[con[0]] == vector[con[1]]):\n\t\t\treturn False\n\treturn True",
"def breaks_connectivity(level, index, axis=0):\n new_level = remove_index(level, index, axis=axis)\n return not is_solvable(new_level)",
"def is_valid_index(x, y, l_matrix):\n return x < l_matrix and y < l_matrix and x > -1 and y > -1",
"def layer_in_degree(self, layer_id):\n ...",
"def test_layer_ok(self):\n self.assertTrue(self.vector)",
"def lt_prev(self, index, val=2, o=0):\n return len(self.get_prev_word(index - o, orignal=True)) <= val",
"def is_linearly_independent_2x2(u, v):\n uv = get_uv(u, v)\n if uv[0][0] * uv[1][1] - uv[1][0] * uv[0][1] != 0:\n return True\n else:\n return False",
"def get_constraints(self, prev_layer):\n constraints = []\n if self.activation is not None:\n constraints += self.activation.get_constraints(self, prev_layer)\n else:\n # for linear activations\n current_constraints = []\n for channel_indx in range(self.n_in_channels):\n upper_bound, _ = prev_layer.get_bounds(channel_indx)\n critical_prob = prev_layer.get_critical_neurons(channel_indx)\n if critical_prob is None:\n keep_upper_bound = 0\n else:\n keep_upper_bound = cp.multiply(1 - critical_prob, upper_bound)\n\n current_constraints += [\n self.layer_input[channel_indx]\n == prev_layer.get_computation_layer(channel_indx) - keep_upper_bound\n ]\n constraints += self.create_constraint(\n f\"{self.name}_linear\", current_constraints\n )\n if prev_layer.compute_critical_neurons:\n constraints += self.create_constraint(\n f\"neuron_importance_bounds_{prev_layer.name}\",\n [prev_layer.neuron_importance >= 0, prev_layer.neuron_importance <= 1],\n )\n return constraints",
"def is_lower_triangular(self):\n self.check_square()\n\n for i in range(self.rows):\n for j in range(i+1, self.rows):\n if self[i, j] != 0:\n return False\n return True",
"def check_layers(self, layer_param, params, permitted_layers, mandatory):\n exception = None\n\n requested_layers = params.get(layer_param)\n if requested_layers:\n requested_layers = requested_layers.split(',')\n for layer in requested_layers:\n # allow only permitted layers\n if layer and not layer.startswith('EXTERNAL_WMS:') and layer not in permitted_layers:\n exception = {\n 'code': \"LayerNotDefined\",\n 'message': (\n 'Layer \"%s\" does not exist or is not permitted'\n % layer\n )\n }\n break\n elif mandatory:\n # mandatory layers param is missing or blank\n exception = {\n 'code': \"MissingParameterValue\",\n 'message': (\n '%s is mandatory for %s operation'\n % (layer_param, params.get('REQUEST'))\n )\n }\n\n return exception",
"def find_layer(z, params):\r\n N = len(params['d_list'])\r\n for i in range(N):\r\n if z <= params['layer_bottom_list'][i]:\r\n return i-1\r\n return N-1",
"def linear_constraint(u, Lin_lhs, Lin_rhs, tol = 0.05):\n return Lin_lhs.dot(u) <= Lin_rhs",
"def new_check_knot_vector(degree=0, knot_vector=(), control_points_size=0, tol=0.001): \r\n if not knot_vector:\r\n raise ValueError(\"Input knot vector cannot be empty\")\r\n\r\n # Check the formula; m = p + n + 1\r\n if len(knot_vector) is not degree + control_points_size + 1:\r\n return False\r\n\r\n # Set up a return value\r\n ret_val = True\r\n\r\n # Check ascending order\r\n if ret_val:\r\n prev_knot = knot_vector[0]\r\n for knot in knot_vector:\r\n if prev_knot > knot:\r\n ret_val = False\r\n break\r\n prev_knot = knot\r\n\r\n return ret_val",
"def zzX_valid_p(f):\n levels = []\n\n def rec_valid(g, l):\n if poly_univariate_p(g):\n levels.append(l)\n return zzx_strip(g) == g\n else:\n return zzX_strip(g) == g and \\\n all([ rec_valid(h, l+1) for h in g ])\n\n return rec_valid(f, 1) and len(set(levels)) == 1",
"def is_graph_34valent(self, G):\n return all( len(G[v])<=4 for v in G.vertices() )",
"def getIsLinearPolymer(self):\n dataDict = self.__dict__\n lin = 0\n nonlin = 0\n for ccv in self.chemCompVars:\n linking = ccv.linking \n if linking in ('start', 'middle', 'end'):\n lin = lin + 1\n elif linking != 'none':\n nonlin = nonlin + 1\n \n if lin:\n if nonlin:\n raise ApiError(\"ChemComp %s,%s has illegal combination of ChemCompVar linkings\" % (self.molType, self.ccpCode))\n else:\n result = True\n else:\n result = False\n return result",
"def check_solvability(self, state):\n\n inversion = 0\n for i in range(len(state)):\n for j in range(i, len(state)):\n if state[i] > state[j] != 0:\n inversion += 1\n\n return inversion % 2 == 0",
"def _isInside(self, v, select, progress):\n # Compute on non-masked sources :\n xyz = self.xyz\n N = xyz.shape[0]\n inside = np.ones((xyz.shape[0],), dtype=bool)\n v = v.reshape(v.shape[0] * 3, 3)\n\n # Loop over sources :\n progress.show()\n for k in range(N):\n # Get the euclidian distance :\n eucl = cdist(v, xyz[[k], :])\n # Get the closest vertex :\n eucl_argmin = eucl.argmin()\n # Get distance to zero :\n xyz_t0 = np.sqrt((xyz[k, :] ** 2).sum())\n v_t0 = np.sqrt((v[eucl_argmin, :] ** 2).sum())\n inside[k] = xyz_t0 <= v_t0\n progress.setValue(100 * k / N)\n self.data.mask = False\n self.data.mask = inside if select != 'inside' else np.invert(inside)\n # Finally update data sources and text :\n self.update()\n self.text_update()\n progress.hide()",
"def test_loc_techs_not_cost_var_constraint(self):\n m = build_model({}, \"simple_conversion,two_hours,investment_costs\")\n m.run(build_only=True)\n assert not hasattr(m._backend_model, \"cost_var\")",
"def lower_row_invariant(self, target_row, target_col):\r\n conditions = 0\r\n curent = self._grid[target_row][target_col] == 0\r\n if curent:\r\n conditions +=1\r\n else:\r\n print 'Tile ZERO is not at current position'\r\n return False\r\n\r\n last_row_ind = self._height - 1\r\n if target_row != last_row_ind:\r\n lower_row = target_row + 1\r\n for ind in range(len(self._grid[lower_row])):\r\n if self.current_position(lower_row, ind) != (lower_row, ind):\r\n print 'Some tile in the lower row does not in correct place' \r\n return False\r\n conditions += 1\r\n # print len(self._grid[target_row])\r\n # print self._grid[target_row]\r\n # print self._grid[target_row][target_col+1:]\r\n right_part = self._grid[target_row][target_col+1:]\r\n \r\n for tile in range(1,len(right_part)+1):\r\n # print right_part.index(self._grid[target_col+1])\r\n # print tile\r\n # print self.current_position(target_row, target_col + tile)\r\n # print (target_row, target_col+tile)\r\n if self.current_position(target_row, target_col+tile) != (target_row, target_col+tile):\r\n print 'Right part tile does not in correct place'\r\n return False\r\n conditions +=1\r\n if conditions == 3:\r\n print 'All conditions are correct!'\r\n return True",
"def component_status_constraint(index):\n i, t = index\n return component_status[i, t] == pulp.lpSum(component_status_k[i, t, RANGE])",
"def insert_layer_check(function):\n\n def wrapper(self, l, nth):\n \"\"\"Method to insert a Layer object into the n-th place in the current Circuit object. The \n first parameter must be a Layer object while the second parameter must be equal to or \n bigger than 0 and equal to or less than the actual size of the layers in the Circuit. The \n size of the Layer object must be equal to the size of the already used Layers in the \n Circuit.\n \n Arguments:\n l {layer} -- Layer to be inserted\n nth {int} -- Index where the layer to be inserted\n \n Raises:\n ValueError, TypeError\n \n Examples:\n >>> import qvantum\n >>>\n >>> l1 = qvantum.Layer([qvantum.Hadamard(), qvantum.Gate()])\n >>> l2 = qvantum.Layer([qvantum.CNOT(1, 0)])\n >>> c = qvantum.Circuit([l1, l2])\n >>> c.get_layer_list()\n OrderedDict([(0, <qvantum.layer.Layer at 0x27b47de9898>), (1, <qvantum.layer.Layer at 0x27b47de9550>)])\n >>> l3 = qvantum.Layer([qvantum.Swap()])\n >>> c.insert_layer(l3, 1)\n >>> c.get_layer_list()\n OrderedDict([(0, <qvantum.layer.Layer at 0x27b47de9898>), (1, <qvantum.layer.Layer at 0x27b47e5dc50>), (2, <qvantum.layer.Layer at 0x27b47de9550>)])\n \"\"\"\n\n if isinstance(l, layer.Layer) and isinstance(nth, int):\n return function(self, l, nth)\n \n else:\n raise TypeError('Invalid input! Argument must be a pair of layer object and integer.')\n \n return wrapper",
"def _validateVertex(self, v):\n if v < 0 or v >= self._V:\n raise Exception(\"vertex {} is not between 0 and {}\".format(v, (self._V-1)))"
] | [
"0.55143213",
"0.5481485",
"0.5404945",
"0.53278077",
"0.5310819",
"0.53015536",
"0.5235135",
"0.521915",
"0.5146539",
"0.51066667",
"0.509961",
"0.5069351",
"0.50506",
"0.5031631",
"0.5015915",
"0.50061643",
"0.4986805",
"0.49610192",
"0.49486637",
"0.49297816",
"0.49165577",
"0.49152943",
"0.4896058",
"0.48750812",
"0.48711795",
"0.4868765",
"0.4849762",
"0.48368677",
"0.48280868",
"0.48179755"
] | 0.63930285 | 0 |
tag that course with id 'cid' satisfy v's prereq OR set with index Bi | def tag_prereq(self, Bi, cid):
if Bi >= len(self.prereq) or cid not in self.prereq[Bi]:
raise Exception(
"Course {cid} not exists in OR set with index {Bi}".format(cid=cid, Bi=Bi))
self.prereqBool[Bi] = cid | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_matching_course_indexes(self, query):\r\n return self.course_index.find(query)",
"def T(v,securite):\n to_return = {} #renvoie le dictionnaire {indice du contact (0 -> direct / sinon -> plus ou moins direct) : set({disque})} \n Cv = set(C(v,securite))\n Tv = set(Cv)\n i=0\n xv,yv=l[v][0],l[v][1]\n while Cv != set() and i<5:\n to_return[str(i)]=Cv\n new_Cv = set()\n for j in Cv:\n xj,yj=l[j][0],l[j][1]\n #si j est devant v, on ne le copte pas\n if sqrt((xj-xt)**2+(yj-yt)**2)<sqrt((xv-xt)**2+(yv-yt)**2):\n continue\n new_Cv= new_Cv.__or__(C(j,securite).__sub__(Tv.__or__(set(j).__or__({v}))))\n Tv = Tv.__or__(new_Cv)\n Cv = new_Cv\n i+=1\n return to_return",
"def search_courses(courses, query):\n return courses.annotate(\n course_id=Concat('subject', Value(' '), 'course_number', Value(' '),\n 'section', output_field=CharField()),\n ).annotate(rank=Case(\n When(\n course_id__istartswith=query,\n then=1\n ),\n When(\n title__icontains=query,\n then=2\n ),\n When(\n instructor__icontains=query,\n then=3\n ),\n default=0,\n output_field=IntegerField()\n )).filter(rank__gt=0)",
"def get_candidates(self,query_index):\r\n # you will need to use self.hashed_A for this method\r\n bucket1 = self.hashed_A\r\n bucket = bucket1.filter(lambda z: (z[2] != query_index[2]) and (any(set(z[2]) & set(query_index[2]))))\r\n #print(bucket)\r\n return bucket\r\n raise NotImplementedError",
"def pl_resolve(ci, cj):\n clauses = []\n for di in disjuncts(ci):\n for dj in disjuncts(cj):\n if di == ~dj or ~di == dj:\n dnew = unique(removeall(di, disjuncts(ci)) +\n removeall(dj, disjuncts(cj)))\n clauses.append(associate('|', dnew))\n return clauses",
"def __init__(self, rf: bool=False, atts: Sctids=None, eq: bool=True, ecv: Sctids=None, query=None, _mt_instance=None):\n Set.__init__(self, Quads)\n _Instance.__init__(self, RF2_Quad)\n RF2_Substrate_Common.__init__(self)\n self._val = self\n self._type = Quads\n if _mt_instance:\n self._query = \"SELECT id AS rid, sourceId AS id, typeId, destinationId, gid FROM %s WHERE 0\" % \\\n (RelationshipDB.fname() + \"_ext\")\n self.rf = False\n self._len = 0\n else:\n self._len = None # number of elements\n if query:\n self._query = query\n else:\n self._query = \"SELECT id AS rid, sourceId\" + (\" AS id,\" if not rf else \",\")\n self._query += \" typeId, destinationId\" + (\" AS id,\" if rf else \",\")\n self._query += \" gid FROM %s\" % RelationshipDB.fname() + '_ext'\n self._query += \" WHERE \"\n if atts is not None:\n self._query += ((\"typeId IN (%s)\" % atts.as_sql()) if eq else\n (\"typeId NOT IN (%s)\" % atts.as_sql())) + \" AND \"\n if ecv is not None:\n self._query += ((\"sourceId IN (%s)\" % ecv.as_sql()) if rf else\n (\"destinationId IN (%s)\" % ecv.as_sql())) + \" AND \"\n self._query += \"active=1 AND locked=0\"\n self.rf = rf",
"def get_at_least_one_per_instance_cstr_for_video(id_tracklet, tbound_track,\n groupby, gt, tmp_cstr, extra_info):\n key_tracklet = 'tracklet_{}'.format(id_tracklet)\n\n # Check if it interacts with GT.\n has_constraint = False\n for idx_gt, i_gt in enumerate(gt):\n if is_inside(tbound_track, i_gt['tbound']):\n id_action = i_gt['label'] - 1\n if key_tracklet not in tmp_cstr:\n tmp_cstr[key_tracklet] = set()\n\n # this is a candidate action for this tracklet\n tmp_cstr[key_tracklet].add(id_action)\n has_constraint = True\n\n if id_action not in tmp_cstr:\n tmp_cstr[id_action] = {}\n\n if idx_gt not in tmp_cstr[id_action]:\n tmp_cstr[id_action][idx_gt] = []\n\n # this tracklet is a candidate to match this gt (at least one tracklet will match the whole GT)\n tmp_cstr[id_action][idx_gt].append(id_tracklet)\n\n return has_constraint, key_tracklet",
"def contained_in_order(cls, order, course_id):\r\n return course_id in [item.paidcourseregistration.course_id\r\n for item in order.orderitem_set.all().select_subclasses(\"paidcourseregistration\")]",
"def create_search_key(course, user):\n # For regex query we need main part of the course_id either course_id complex (with section part) or not\n main_course = course.split(':')[0]\n course_escape = main_course.replace('+', '\\\\+') # '+' char should be escaped with '\\'\n return {'$regex': '{course_id}.+{user_id}'.format(course_id=course_escape, user_id=user)}",
"def search_courses(self,terms):\n\n return self.course_search.search_for(terms)",
"def test_combine_multiple_or(self):\n inv_search = 'author:\"ellis, j*\" and (title:report or keyword:\"cross section\")'\n spi_search = 'find a j ellis and (t report or k \"cross section\")'\n self._compare_searches(inv_search, spi_search)",
"def add_searcher_constraints(md, g, my_vars: dict, start: list, vertices_t: dict, deadline: int):\n # get variables\n X = get_var(my_vars, 'x')\n Y = get_var(my_vars, 'y')\n\n S, m = ext.get_set_searchers(start)\n Tau_ext = ext.get_set_time_u_0(deadline)\n\n # legality of the paths, for all s = {1,...m}\n for s in S:\n # 0, 1, 2... T\n for t in Tau_ext:\n v_t = vertices_t.get((s, t))\n # each searcher can only be at one place at each time (including the start vertex), Eq. (1, 7)\n if t == 0:\n md.addConstr(X[s, v_t[0], 0] == 1)\n\n for u in v_t:\n my_next_v = cm.get_next_vertices(g, s, u, t, vertices_t, Tau_ext)\n my_previous_v = cm.get_previous_vertices(g, s, u, t, vertices_t)\n if my_next_v is not None:\n # (Eq. 9) searcher can only move to: i in delta_prime(v) AND V^tau(t+1)\n # sum == 1 if searcher is at u, sum == zero if searcher is not at u (depends on X[s, u, t])\n md.addConstr(quicksum(Y[s, u, i, t] for i in my_next_v) == X[s, u, t])\n\n if my_previous_v is not None:\n # (Eq. 8) searcher can only move to v from j in delta_prime(v) AND V^tau(t-1)\n md.addConstr(quicksum(Y[s, i, u, t - 1] for i in my_previous_v) == X[s, u, t])",
"def index_valid_star_entries(star_catalog,target,tol,log,valid_cat=False):\n\n idx1 = np.where(star_catalog['cal_ref_mag_ip'] > 0.0)[0]\n idx2 = np.where(star_catalog['cal_ref_mag_ip'] <= 22.0)[0]\n idx3 = np.where(star_catalog['cal_ref_mag_rp'] > 0.0)[0]\n idx4 = np.where(star_catalog['cal_ref_mag_rp'] <= 22.0)[0]\n idx5 = np.where(star_catalog['cal_ref_mag_gp'] > 0.0)[0]\n idx6 = np.where(star_catalog['cal_ref_mag_gp'] <= 22.0)[0]\n\n det_idx = set(idx1).intersection(set(idx2))\n det_idx = det_idx.intersection(set(idx3))\n det_idx = det_idx.intersection(set(idx4))\n det_idx = det_idx.intersection(set(idx5))\n det_idx = det_idx.intersection(set(idx6))\n\n log.info('Identified '+str(len(det_idx))+\\\n ' detected stars with valid measurements in gri')\n\n if valid_cat == False:\n return list(det_idx), None, None\n\n idx4 = np.where(star_catalog['imag'] > 0.0)[0]\n idx5 = np.where(star_catalog['rmag'] > 0.0)[0]\n idx6 = np.where(star_catalog['gmag'] > 0.0)[0]\n\n cat_idx = det_idx.intersection(set(idx4))\n cat_idx = cat_idx.intersection(set(idx5))\n cat_idx = list(cat_idx.intersection(set(idx6)))\n det_idx = list(det_idx)\n\n log.info('Identified '+str(len(cat_idx))+\\\n ' detected stars with valid catalogue entries in gri')\n\n close_idx = find_stars_close_to_target(star_catalog, target, tol, log)\n\n close_cat_idx = list(set(cat_idx).intersection(set(close_idx)))\n\n log.info('Identified '+str(len(close_cat_idx))+\\\n ' stars close to the target with valid catalogue entries in gri')\n\n return det_idx, cat_idx, close_cat_idx",
"def _replace_or_append_index(altered_index):\n for index, existing in enumerate(course_indexes):\n if all(existing[attr] == altered_index[attr] for attr in ['org', 'course', 'run']):\n course_indexes[index] = altered_index\n return\n course_indexes.append(altered_index)",
"def count_candidates(C, transaction):\n for candidate in C:\n if all(transaction[elem] == 1 for elem in candidate):\n candidate.count+=1",
"def generate_matching_courses(self,goal):\n\n searchstring = self.preprocess(goal.goal)\n wordlist = nltk.word_tokenize(searchstring)\n relevant_words = []\n mystopwords = stopwords.words(\"english\") + stopwords.words(\"german\")\n for word in wordlist:\n if word not in mystopwords:\n relevant_words.append(word)\n # TODO: Activate in production\n # TODO: For testing find workaround to make local courses available for local test systems\n #user_origin = goal.user.origin\n # TODO: The following two lines have to be exchanged to filter courses according to origin\n #courses = models.Course.objects.filter(origin=user_origin)\n courses = models.Course.objects.all()\n\n matches = {}\n for course in courses:\n if course == None:\n print(\"Course is None\")\n if course.TF_IDF_scores == {}:\n continue\n score = 0.0\n for word in relevant_words:\n if word in course.TF_IDF_scores:\n if word in course.TF_IDF_scores:\n score += course.TF_IDF_scores[word]\n if score > 0.0:\n if score in matches.keys():\n matches[score].append(course)\n else:\n matches[score] = []\n matches[score].append(course)\n scores = list(matches.keys())\n scores.sort()\n\n bestcourses = []\n\n i = 0\n for score in scores:\n for course in matches[score]:\n bestcourses.append(course)\n i += 1\n if i >= COURSES_TO_DISPLAY:\n break\n\n if len(bestcourses) == 0:\n a = models.Activity.objects.get_or_create(\n title=\"Keine passenden Lehrveranstaltungen\",\n description=\"Aktuell gibt es zu Ihrem Interesse keine passenden Lehrveranstaltungen. \" \\\n \"Siddata wird regelmäßig nach neuen passenden Kursen suchen und diese ggf. hier anzeigen. \",\n type=\"todo\",\n goal=goal,\n image=\"idea.png\",\n status=\"new\"\n )[0]\n a.save()\n i = 0\n for course in bestcourses:\n\n a = models.Activity.objects.get_or_create(\n title=course.title,\n description=course.description,\n type=\"course\",\n goal=goal,\n image=\"%s.png\" %random.choice([\"world\",\"idea\",\"library\",\"cat\",\"brainbulb\",\"braincloud\",\"friends\"]),\n course=course,\n status=\"new\"\n )[0]\n a.save()\n i += 1\n if i == COURSE_MAX:\n break",
"def cmu_to_verba(mapping, cid):\n\n BASE = \"http://cmu.verbacompare.com/\"\n TERM = 6668 # The \"term\" (semester ID?), currently S15\n cid = parse_cid(cid)\n \n # Get the verbacompare ID\n try:\n sections = mapping['depts'][cid[0]]['courses'][\"\".join(cid)]\n except KeyError:\n return False\n \n return sections",
"def _add_indexes_from_active_records(\n self,\n course_indexes,\n branch=None,\n search_targets=None,\n org_target=None,\n course_keys=None\n ):\n def _replace_or_append_index(altered_index):\n \"\"\"\n If the index is already in indexes, replace it. Otherwise, append it.\n \"\"\"\n for index, existing in enumerate(course_indexes):\n if all(existing[attr] == altered_index[attr] for attr in ['org', 'course', 'run']):\n course_indexes[index] = altered_index\n return\n course_indexes.append(altered_index)\n\n for _, record in self._active_records:\n if branch and branch not in record.index.get('versions', {}):\n continue\n\n if search_targets:\n if any(\n 'search_targets' not in record.index or\n field not in record.index['search_targets'] or\n record.index['search_targets'][field] != value\n for field, value in search_targets.items()\n ):\n continue\n # if we've specified a filter by org,\n # make sure we've honored that filter when\n # integrating in-transit records\n if org_target:\n if record.index['org'] != org_target:\n continue\n\n if course_keys:\n index_exists_in_active_records = False\n for course_key in course_keys:\n if all(record.index[key_attr] == getattr(course_key, key_attr)\n for key_attr in ['org', 'course', 'run']):\n index_exists_in_active_records = True\n break\n if not index_exists_in_active_records:\n continue\n\n if not hasattr(course_indexes, 'append'): # Just in time conversion to list from cursor\n course_indexes = list(course_indexes)\n\n _replace_or_append_index(record.index)\n\n return course_indexes",
"def test_filter_Q_or (self) :\n\n o = self.from_model.all()[:2]\n o_n = self.from_indexed.filter(\n Q(pk=o[0].pk) | Q(pk=o[1].pk)\n )\n\n self.assertEqual(\n set([i.pk for i in o]),\n set([i.pk for i in o_n])\n )",
"def cerca(self,fitxa, cmpcerc):\n\t\tidcerca = basedades.SQL(self.bd)\n\t\tquery = []\n\t\tqry = \"\"\n\t\tprint \"Cercare....\",fitxa, cmpcerc\n\t\t\n\t\tfor s in cmpcerc.keys():\n\t\t\t\n\t\t\tif s==\"id\":\n\t\t\t\tquery.append( \" %s=%s \" % (s.upper(),cmpcerc[s]))\n\t\t\telse:\n\t\t\t\tquery.append( \" %s LIKE '%s' \" % (s.upper(),cmpcerc[s]))\n\t\t\n\t\tqry = \" and \".join(query)\n\t\t\n\t\tsent = \"SELECT ID FROM %s WHERE %s\" % (self.taules[fitxa], qry)\n\t\t\n\t\tprint \"Cercam...\", sent\n\t\tidcerca.executa(sent)\n\t\t\n\t\tprint idcerca.num_reg\n\t\t\n\t\tif idcerca.num_reg !=0:\n\t\t\tidcerca.primer_reg()\n\t\t\tret = idcerca.ret_reg()['ID']\n\t\t\tpos = self.fitxaSQL[fitxa].troba_reg(ret)\n\t\t\tif pos != -1: # trobat\n\t\t\t\tself.anam_fitxa(fitxa,pos)\n\t\t\t\treturn pos\n\t\t\treturn None\n\t\telse:\n\t\t\treturn None",
"def pair_has_contradiction(graph, u, v):\n relations = get_all_relations(graph, u, v)\n return relation_set_has_contradictions(relations)",
"def typesense_index_condition(con, client=None):\n if not client:\n client = typesense_client()\n\n condition_document = {\n 'id': str(con.pk),\n 'created': con.created.timestamp(),\n 'referral_id': con.referral.pk if con.referral else None,\n 'proposed_condition': con.proposed_condition if con.proposed_condition else '',\n 'approved_condition': con.condition if con.condition else '',\n }\n client.collections['conditions'].documents.upsert(condition_document)",
"def _filter_cid(self, cids):\n return [cid for cid in cids if cid is not None]",
"def boolean_search(query_word,excluded_word, inverted_index, price_range, prices):\n # YOUR CODE HERE\n M = [] #merged list\n try:\n A = [doc_count[0] for doc_count in inverted_index[query_word.lower()]] #query\n B = [doc_count[0] for doc_count in inverted_index[excluded_word.lower()]] #excluded\n except:\n return M\n \n A_pnt = 0\n B_pnt = 0\n A_end = len(A)\n B_end = len(B)\n while A_pnt < A_end and B_pnt < B_end:\n if A[A_pnt] == B[B_pnt]:\n A_pnt += 1\n B_pnt += 1\n else:\n if A[A_pnt] < B[B_pnt]:\n if prices[int(A[A_pnt])] < price_range:\n M.append(int(A[A_pnt]))\n A_pnt += 1\n else:\n B_pnt += 1\n \n while A_pnt < A_end:\n if prices[int(A[A_pnt])] < price_range:\n M.append(int(A[A_pnt]))\n A_pnt += 1\n return M",
"def get_cid(self, cid_index, mission):\n\t\tfor cid, mission2 in cid_index.items():\n\t\t\tif mission2 is mission:\n\t\t\t\treturn cid",
"def test_adding_criteria_to_segments(self):\n pass",
"def get_course_id_terms(self):\n return # osid.search.terms.IdTerm",
"def inconst(p,c):\n for q in c:\n if dist(p,q) <= 3:\n return True\n return False",
"def id_used(self, cid):\n q = \"\"\"\n Select ?x where {\n <%s/n%s> ?x ?y .\n } limit 1\"\"\" % (self._namespace, cid)\n self.setQuery(q)\n try:\n rval = self.query()\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)\n return True\n try:\n g = rval.convert()\n if len(g['results']['bindings'])<1:\n return False\n except:\n return True\n return True",
"def filter_by_comid(record, filterset=[]):\n return record['properties']['comid'] in filterset"
] | [
"0.51670814",
"0.5086646",
"0.49947807",
"0.48174822",
"0.47873774",
"0.47358462",
"0.46620637",
"0.46118984",
"0.45816174",
"0.4563327",
"0.45620036",
"0.45105565",
"0.44940156",
"0.4493605",
"0.44824594",
"0.44758672",
"0.44754773",
"0.44749555",
"0.44587547",
"0.44321156",
"0.4430347",
"0.44136092",
"0.44095063",
"0.44038984",
"0.43964267",
"0.43876868",
"0.43714824",
"0.43704128",
"0.43693268",
"0.4355841"
] | 0.6560112 | 0 |
List all code readings, or create a new reading. | def reading_list(request):
if request.method == 'GET':
readings = Reading.objects.all()
serializer = ReadingSerializer(readings, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = ReadingSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_reads(cls) -> list:\n return [cls.FWREAD, cls.RVREAD];",
"def read_all(self):\r\n pass",
"def get_readings(self):\n\n return Readings(\n arduino=self.inputs.get_arduino_reading(),\n tapo=self.inputs.get_tapo_plug_reading(),\n garden=self.inputs.get_garden_co2_reading(),\n )",
"def add_reads(self, new_reads): \n if self.sampling:\n self.convert_to_list()\n self.reads.extend(new_reads)",
"def read_all(self, *args, **kwargs):\n pass",
"def read(self):\n self._read(True)\n return self._readings",
"def add_read(self, new_read): \n if self.sampling:\n self.convert_to_list()\n self.reads.append(new_read)\n self.total+=1",
"def get_readers():\n return all_readers",
"def get_books_by_read_value(self, read):\n try:\n cur = self._db.cursor()\n cur.execute('SELECT rowid, * FROM books WHERE read = ?', (read,))\n return self._cursor_to_booklist(cur)\n\n except sqlite3.Error as e:\n raise BookError(f'Error getting books with read = {read}') from e",
"def reading(reading_id):\n\n reading = Reading.query.get(reading_id)\n\n if reading:\n return jsonify({'status': 'success',\n 'reading_id': reading.reading_id,\n 'reading_name': reading.reading_name,\n 'reading_card:': reading.reading_card})\n else:\n return jsonify({'status': 'error',\n 'message': 'No reading found with that ID'})",
"def store_reading(reading):\n\n global readings\n\n if len(readings) > dump_size:\n dumped_nr = dump()\n readings = readings[dumped_nr[1] - 1:]\n\n readings.append(reading)",
"def reading_detail(request, pk):\n try:\n reading = Reading.objects.get(pk=pk)\n except Reading.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = ReadingSerializer(reading)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = ReadingSerializer(reading, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n reading.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)",
"def mark_as_read(entry):\n if has_been_read(entry):\n return\n title = entry.title\n date = date_parser.parse(entry.updated)\n READLIST.append(str(date.timestamp()) + '|' + title)\n save_datfile()",
"def main():\n\n open_read_write()",
"def read():\n print(command(\"R\"))",
"def read():\n # TODO",
"def initialize_file_readers():\n savefile_path = os.path.join(os.getcwd()+ \"/../data/\", SAVE_FILE)\n file_reader_list = []\n for file in os.listdir(savefile_path):\n file_reader = open(os.path.join(savefile_path,file), \"r\")\n file_reader_list.append({\"file_reader\": file_reader, \"last_read\": { \"word\": \"\", \"doc_score_list\": []}})\n return file_reader_list",
"def get_reader_funcs():\n return READERS",
"def generate_reader(n):\n counter = 1\n for i in range(n):\n name = generate_reader_name()\n if not name in readers:\n readers[name] = f'Reader/{counter}'\n counter += 1",
"def read(self):\n if self.status == 'read':\n return\n self.status = 'read'\n self.emit('read')\n self.emit('modified')",
"def get_all_books():\n for n, book in enumerate(BOOKS, 1):\n state = 'YES' if book['read'] else 'NO'\n print(\n f\"{[n]} - {book['name'].capitalize()}, by {book['author'].capitalize()} - Read: {state}\"\n )",
"def register_read(self):\n self._reads_since_check += 1",
"def change_to_read(name):\n for book in BOOKS:\n if book['name'] == name:\n book['read'] = True\n else:\n print('No book with this name!')",
"def get_all(self):\n return ReadingSet(self._set)",
"def read():\n global counter\n\n try:\n with open(BOOKS_FILE_NAME) as f:\n book_json = json.load(f)\n book_json_manipulation(book_json)\n except FileNotFoundError:\n # First time program has run. Assume no books.\n pass\n\n try:\n with open(COUNTER_FILE_NAME) as f:\n try:\n counter = int(f.read())\n except:\n counter = 0\n except:\n counter = len(book_list)",
"def _read_all(self):\n return self._connector.read_all()",
"def get_readings(readings_file):\n with open(readings_file, 'r') as f:\n readings = json.load(f)\n return readings",
"def mark_all_read(self):\n response = self._connection.session.post(self.url + \"/mark_all_as_read\")\n return self._raise_or_return_json(response)",
"def read(self, *args, **kwargs):\n pass",
"def object_readers(name, *, specify_reader=False):\n\treturn object_access('read', name, specify_reader)"
] | [
"0.63985664",
"0.5793882",
"0.5761688",
"0.5644271",
"0.5628969",
"0.54702663",
"0.54576665",
"0.53569704",
"0.5144443",
"0.51227206",
"0.5087292",
"0.5066277",
"0.50593466",
"0.50306433",
"0.5030192",
"0.5014886",
"0.50122917",
"0.50112325",
"0.49763814",
"0.49056098",
"0.49052346",
"0.48937342",
"0.48744652",
"0.48661953",
"0.48660985",
"0.48657295",
"0.48532924",
"0.48418656",
"0.48384786",
"0.47345698"
] | 0.6159648 | 1 |
Test exceptions at pipeline level | def test_pipeline_error(time):
# test fit
df = _test_df()
def _func(df):
return df["num1"] == df["num3"]
pipeline = PdPipeline([ColByFrameFunc("Equality", _func), ColDrop("B")])
with pytest.raises(PipelineApplicationError):
pipeline.fit(df, verbose=True, time=time)
# test apply
df = _test_df()
with pytest.raises(PipelineApplicationError):
pipeline.apply(df, verbose=True, time=time)
# test transform
df = _test_df()
with pytest.raises(PipelineApplicationError):
pipeline.transform(df, verbose=True, time=time)
# test fit_transform
df = _test_df()
with pytest.raises(PipelineApplicationError):
pipeline.fit_transform(df, verbose=True, time=time) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_invoke_processor_errors():\n\n def processor(app, documents):\n raise ValueError(\"something bad happened\")\n yield\n\n testapp = holocron.Application()\n testapp.add_processor(\"processor\", processor)\n testapp.add_pipe(\"test\", [{\"name\": \"processor\"}])\n\n stream = testapp.invoke(\"test\")\n\n with pytest.raises(ValueError, match=r\"^something bad happened$\"):\n next(stream)\n\n with pytest.raises(StopIteration):\n next(stream)",
"def test_execute_pipeline_two(self):\n task_list = [Test()]\n with self.assertRaises(AttributeError):\n execute_pipeline(task_list)",
"def test_cond_with_uncaught_error(env):\n def explode(env, delay):\n yield env.timeout(delay)\n raise ValueError(f'Onoes, failed after {delay}!')\n\n def process(env):\n yield env.timeout(1) | env.process(explode(env, 2))\n\n env.process(process(env))\n try:\n env.run()\n assert False, 'There should have been an exception.'\n except ValueError:\n pass\n assert env.now == 2",
"def test_switch_fail(self):\n\n # Assert that a RelaxNoPipeError occurs when the pipe type is invalid.\n self.assertRaises(RelaxNoPipeError, pipes.switch, 'x')",
"def unexpectedException(self):",
"def unexpected_error(self, exception):",
"def test_invoke_pipe_not_found():\n\n testapp = holocron.Application()\n\n with pytest.raises(ValueError) as excinfo:\n next(testapp.invoke(\"test\"))\n\n assert str(excinfo.value) == \"no such pipe: 'test'\"",
"def test_execute_pipeline_three(self):\n task_list = [Task()]\n with self.assertRaises(NotImplementedError):\n execute_pipeline(task_list)",
"def test_fail_pipeline_stage():\n fail_stage = FailStage()\n df = _test_df()\n with pytest.raises(FailedPreconditionError):\n fail_stage.apply(df, verbose=True)",
"def test_on_record_error(sdc_builder, sdc_executor, stage_attributes):\n\n DATA = {'name': 'Al Gore', 'birthplace': 'Washington, D.C.'}\n on_record_error = stage_attributes['on_record_error']\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.data_format = 'JSON'\n dev_raw_data_source.raw_data = json.dumps(DATA)\n dev_raw_data_source.stop_after_first_batch = True\n\n field_replacer = pipeline_builder.add_stage('Field Replacer')\n field_replacer.set_attributes(replacement_rules=[{'setToNull': False, 'fields': '/age'}],\n field_does_not_exist='TO_ERROR',\n **stage_attributes)\n\n wiretap = pipeline_builder.add_wiretap()\n\n dev_raw_data_source >> field_replacer >> wiretap.destination\n\n pipeline = pipeline_builder.build()\n\n sdc_executor.add_pipeline(pipeline)\n\n if on_record_error == 'DISCARD':\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert not wiretap.error_records and not wiretap.output_records\n\n elif on_record_error == 'STOP_PIPELINE':\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_status('RUN_ERROR')\n\n assert False, 'An exception should have been thrown'\n except RunError:\n\n assert not wiretap.error_records and not wiretap.output_records\n\n elif on_record_error == 'TO_ERROR':\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n record = wiretap.error_records[0]\n assert record.field == DATA and not wiretap.output_records",
"def test_are_chained_exceptions_printed(self):\n\n io = BufferedSystemIO()\n\n try:\n try:\n raise IndexError('Invalid index 5')\n except IndexError as index_exc:\n raise Exception('There was an error with index') from index_exc\n\n except Exception as exc:\n output_formatted_exception(exc, ':my-test-task', io)\n\n self.assertIn('(Caused by) IndexError:', io.get_value())\n self.assertIn('Exception:', io.get_value())\n self.assertIn('There was an error with index', io.get_value())",
"def test_build_pipeline_four(self):\n args = \"Test_APP ONE FIVE\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)",
"def testHandleExceptionException(self):\n class TestError(Exception):\n \"\"\"Unique test exception\"\"\"\n\n class BadStage(generic_stages.BuilderStage):\n \"\"\"Stage that throws an exception when PerformStage is called.\"\"\"\n\n handled_exceptions = []\n\n def PerformStage(self):\n raise TestError('first fail')\n\n def _HandleStageException(self, exc_info):\n self.handled_exceptions.append(str(exc_info[1]))\n raise TestError('nested')\n\n stage = self._ConstructStageWithExpectations(BadStage)\n results_lib.Results.Clear()\n self.assertRaises(failures_lib.StepFailure, self._RunCapture, stage)\n\n # Verify the results tracked the original exception.\n results = results_lib.Results.Get()[0]\n self.assertTrue(isinstance(results.result, TestError))\n self.assertEqual(str(results.result), 'first fail')\n\n self.assertEqual(stage.handled_exceptions, ['first fail'])\n\n # Verify the stage is still marked as failed in cidb.\n self.mock_cidb.StartBuildStage.assert_called_once_with(\n DEFAULT_BUILD_STAGE_ID)\n self.mock_cidb.FinishBuildStage.assert_called_once_with(\n DEFAULT_BUILD_STAGE_ID,\n constants.BUILDER_STATUS_FAILED)",
"def test_halt_exceptionality(get_pipe_manager, raise_error):\n pm = get_pipe_manager(name=\"halt-error\")\n if raise_error is None:\n # Default is exceptional.\n with pytest.raises(PipelineHalt):\n pm.halt()\n elif raise_error:\n with pytest.raises(PipelineHalt):\n pm.halt(raise_error=True)\n else:\n pm.halt(raise_error=False)",
"def test_exception(self):\n\n sink = TObserver(immediate_continue=0)\n self.obs.observe(init_observer_info(sink))\n ack1 = self.left.on_next_list([select_completed])\n\n self.right.on_error(self.exception)\n\n self.assertIsInstance(self.measure_state(self.obs), ControlledZipStates.Stopped)\n self.assertEqual(self.exception, sink.exception)",
"def test_build_pipeline_two(self):\n args = \"Test_APP ONE TWO ABC\".split(\" \")\n with self.assertRaises(ValueError):\n build_pipeline(args, False)",
"def test_resources_exception(self):\n with self.assertRaises(ProcessorConfigError) as context:\n self.pl.resource.remove(\"onto_specs_path\")\n self.pl.resource.remove(\"onto_specs_dict\")\n self.pl.add(\n self._stave_processor,\n config={\"port\": self._port, \"server_thread_daemon\": True},\n )\n self.pl.run(self._dataset_dir)",
"def handle_error_cleaning_pipeline(raw_data, endpoint, endpoint_params):\n\n try:\n result = clean_pipeline(raw_data, endpoint, endpoint_params)\n except Exception as e:\n print(f'Error! data doesn\\'t exist while cleansing proccess running!')\n print(e)\n sys.exit(1)\n\n return result",
"def test_build_pipeline_eight(self):\n args = \"Test_APP ONE SIX A B\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)",
"def test_creation_fail(self):\n\n # Assert that a RelaxError occurs when the pipe type is invalid.\n self.assertRaises(RelaxError, pipes.create, 'new', 'x')",
"def test_halts_if_halt_on_next(self, get_pipe_manager, raise_error):\n pm = get_pipe_manager(name=\"TestPM\")\n pm.halt_on_next = True\n if raise_error:\n with pytest.raises(PipelineHalt):\n pm.timestamp(\"testing\")\n else:\n pm.timestamp(\"testing\", raise_error=False)\n assert pm.halted",
"def test_projecttype_exception(self):\n self.pl.add(\n self._stave_processor,\n config={\n \"port\": self._port,\n \"project_type\": \"multi_pack\",\n \"server_thread_daemon\": True,\n },\n )\n with self.assertRaises(ProcessorConfigError) as context:\n self.pl.run(self._dataset_dir)",
"def test_test(self):\n\n # The following should do nothing as the pipes exist.\n pipes.check_pipe()\n pipes.check_pipe('orig')\n pipes.check_pipe('empty')\n\n # Assert that a RelaxNoPipeError occurs when the pipe doesn't exist.\n self.assertRaises(RelaxNoPipeError, pipes.check_pipe, 'x')\n\n # Reset relax.\n reset()\n\n # Now none of the following pipes exist, hence errors should be thrown.\n self.assertRaises(RelaxNoPipeError, pipes.check_pipe)\n self.assertRaises(RelaxNoPipeError, pipes.check_pipe, 'orig')\n self.assertRaises(RelaxNoPipeError, pipes.check_pipe, 'empty')",
"def test_build_pipeline_three(self):\n args = \"Test_APP ONE FOUR\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)",
"def test_failure(self):\n\n @sync_performer\n def fail(dispatcher, intent):\n raise intent\n\n dispatcher = lambda _: fail\n self.assertThat(\n sync_perform(\n dispatcher, Effect(ValueError(\"oh dear\")).on(error=lambda e: e)\n ),\n MatchesException(ValueError(\"oh dear\")),\n )",
"def test_log_error(log_error, capsys, test_df):\n\n err_msg = \"This is a test Exception\"\n\n @log_step(log_error=log_error)\n def do_nothing(df, *args, **kwargs):\n raise RuntimeError(err_msg)\n\n err_reraised = False\n try:\n test_df.pipe(do_nothing)\n except RuntimeError:\n err_reraised = True\n\n captured = capsys.readouterr()\n\n assert err_reraised\n assert \"FAILED\" in captured.out\n assert (f\"FAILED with error: {err_msg}\" in captured.out) == log_error",
"async def test_fetch_filtered_dataset_call_exception(self):\n assembly_id = 'GRCh38'\n position = (10, 20, None, None, None, None)\n chromosome = 1\n reference = 'A'\n alternate = ('DUP', None)\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = ConnectionException()\n with self.assertRaises(aiohttp.web_exceptions.HTTPInternalServerError):\n await fetch_filtered_dataset(pool, assembly_id, position, chromosome, reference, alternate, None, None, False)",
"def test_parser_exception(self):\n # file contains 1 invalid sample values, 17 PH records total\n self.create_sample_data_set_dir('node59p1_bad.dat', TELEM_DIR, \"node59p1.dat\")\n\n self.assert_initialize()\n\n self.event_subscribers.clear_events()\n result = self.get_samples(DataParticleType.CONTROL, 1)\n result = self.get_samples(DataParticleType.SAMPLE, 16, 30)\n self.assert_sample_queue_size(DataParticleType.CONTROL, 0)\n self.assert_sample_queue_size(DataParticleType.SAMPLE, 0)\n\n # Verify an event was raised and we are in our retry state\n self.assert_event_received(ResourceAgentErrorEvent, 10)\n self.assert_state_change(ResourceAgentState.STREAMING, 10)",
"def test_exception_in_all_worker_process(self):\n pool = ProcessPool(5)\n pool.start(ExceptionGeneratingWorker_5)\n with self.assertRaises(RuntimeError):\n for _ in range(10000):\n pool.ventilate(\"Datanum\")\n time.sleep(.1)",
"def test_is_information_written_through_stderr_methods(self):\n\n io = BufferedSystemIO()\n io._stdout = lambda *args, **kwargs: None\n\n try:\n raise IndexError('Invalid index 5')\n except Exception as exc:\n output_formatted_exception(exc, ':my-test-task', io)\n\n self.assertIn('IndexError', io.get_value())\n self.assertIn('Invalid index 5', io.get_value())\n self.assertIn('Retry with \"-rl debug\" switch before failed task to see stacktrace', io.get_value())"
] | [
"0.696629",
"0.6821481",
"0.66784084",
"0.6626683",
"0.6564704",
"0.6535647",
"0.6510281",
"0.64824855",
"0.6480367",
"0.6412341",
"0.6411471",
"0.6390847",
"0.63784766",
"0.6375952",
"0.63289636",
"0.6295979",
"0.6271767",
"0.62533396",
"0.62444544",
"0.62271756",
"0.6218167",
"0.6196122",
"0.6189809",
"0.6156758",
"0.6149558",
"0.6140627",
"0.6130299",
"0.61086774",
"0.6106467",
"0.6094209"
] | 0.7224575 | 0 |
Prints a separator to standart output. Optional length parameter defines how many characters the separator will consist of. Comes with an extra newline for increased readability. | def printSeparator(length=40):
logging.info(formatText("-" * length + "\n", True)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_section_delimiter(length=DEFAULT_LENGTH) -> None:\n print('='*length)",
"def _print_separator():\n print(\n \"───── ──────────────── ──────────────────────────────────────────────────────────────────────────────── ──────── ───────── ───── ──────── ──── ──── ──── ──── ──── ──── ──── ──── ──── ────\"\n )",
"def printSeparator(count: int):\n if count == 0 or count == 3 or count == 6:\n print(\"|\", end='')\n return\n\n if count == 1 or count == 4 or count == 7:\n print(\"|\", end='')\n return\n\n if count == 2 or count == 5:\n print('')\n print(\"-+-+-\")\n return\n\n if count == 8:\n print('')\n return",
"def divider(size=100): # Draws a dividing line to go between sections\r\n # (default 100 characters long)\r\n for i in range(size):\r\n print('-', end='') # Prints out a single dash, no newline afterwards\r\n # (the end= sets the last character to blank\r\n print('') # Print out a newline (using the default ending of a print\r\n # statement being a newline\r\n return",
"def _get_separator(num, sep_title, sep_character, sep_length):\n left_divider_length = right_divider_length = sep_length\n if isinstance(sep_length, tuple):\n left_divider_length, right_divider_length = sep_length\n left_divider = sep_character * left_divider_length\n right_divider = sep_character * right_divider_length\n title = sep_title.format(n=num + 1)\n\n return \"{left_divider}[ {title} ]{right_divider}\\n\".format(\n left_divider=left_divider, right_divider=right_divider, title=title\n )",
"def divider(title, length=None):\n length = shutil.get_terminal_size(fallback=(80, 24))[0] if length is None else length\n rest = length - len(title) - 2\n left = rest // 2 if rest % 2 else (rest + 1) // 2\n return \"\\n{} {} {}\".format(\"=\" * left, title, \"=\" * (rest - left))",
"def separator(self, num=1):\n for i in range(num):\n print('-') * 79",
"def print_horizontal_line(length=None, symbol=\"=\"):\n\n if length == None:\n try:\n _, length = get_terminal_size()\n except:\n length = 50\n\n print(symbol * length)",
"def display_divider(self):\n column_sizes = self.get_column_sizes()\n # Create a divider with enough room for columns as well as lines and\n # spaces between columns. To do this accounting we add the column\n # sizes, then 3 additional marks for the 2 spaces and one line around\n # each word, and finally a single additional mark for the leading line.\n divider_length = sum(column_sizes) + (len(self.column_names) * 3) + 1\n print '+{}+'.format('-' * (divider_length - 2))",
"def showSeparator():\n\treturn (1, 0)",
"def _printSeparator(self, filePointer, color=Const.HEADER_COLOR1):\n filePointer.write ('<font face=\"verdana\" color=\" ' + color + '\"><br>\\n=======================================================</font><br>\\n')",
"def print_dotted_line(width=72):\n print('-' * width)",
"def output_sep_title(title):\n print(f\"{sep_mark}\\t{title}{sep_mark}\")",
"def space():\n print(' ', end='')",
"def divider():\n return \"-------------------\"",
"def print(*args, sep=\" \"):\n pass",
"def _spacer(self, msg):\n msg = str(msg)\n msg_len = len(msg)\n if msg_len == 1:\n print(\" \", end=\"\")\n elif msg_len == 2:\n print(\" \", end=\"\")",
"def divider():\n return \"-------------------------\"",
"def output_sep_mark():\n print(sep_mark)",
"def separator(self):\n pass",
"def display_end(self, nsep):\n\n if self.opt['Verbose'] and self.opt['StatusHeader']:\n print(\"-\" * nsep)",
"def print_border_line(self):\n return (self.max_length + 1) * '-' + '\\n'",
"def output_plain_sep_mark():\n print(plain_sep_mark)",
"def delimiter(num_of_lines, line_type='-'):\n\n string = '\\n'\n string += line_type * num_of_lines\n\n return string",
"def DrawLine(p_length: int, p_character: str):\n print(p_character * p_length)\n return",
"def _pipe_segment_with_colons(align, colwidth):\n w = colwidth\n if align in [\"right\", \"decimal\"]:\n return (\"-\" * (w - 1)) + \":\"\n elif align == \"center\":\n return \":\" + (\"-\" * (w - 2)) + \":\"\n elif align == \"left\":\n return \":\" + (\"-\" * (w - 1))\n else:\n return \"-\" * w",
"def output_plain_sep_title(title):\n print(f\"{plain_sep_mark}\\t{title}{plain_sep_mark}\")",
"def hsep(self, width = 1, fg=None, bg=None, double=False):\n\n self.savepos()\n\n self.out.write(self._colorize((\"═\" if double else \"─\") * width, fg, bg))\n\n self.restorepos()",
"def one_line_print(length, player, pos):\n if pos == 0:\n print(player, 'in Home', end=' ')\n else:\n print('Home', end=' ')\n for i in range(1, length):\n if i == pos:\n print(player, end=' ')\n else:\n print('.', end=' ')\n if pos == length:\n print(player, 'in Finish')\n else:\n print('Finish')",
"def get_divider(title='Error', linewidth=79, fill_character='#'):\n lines = [\n '',\n fill_character * linewidth,\n f' {title} '.center(linewidth, fill_character),\n ]\n return '\\n'.join(lines)"
] | [
"0.75847524",
"0.7112995",
"0.69336325",
"0.6724393",
"0.6669018",
"0.63386697",
"0.6214024",
"0.6116022",
"0.601394",
"0.5993874",
"0.5971108",
"0.5948834",
"0.59433085",
"0.59402794",
"0.58452433",
"0.58447677",
"0.5812377",
"0.5800336",
"0.5783507",
"0.5782023",
"0.57378185",
"0.57095486",
"0.5693812",
"0.56697506",
"0.5654456",
"0.5605112",
"0.5560394",
"0.5546566",
"0.5534696",
"0.54647243"
] | 0.85090715 | 0 |
Set the color of string s to the CLOSEST MATCH to color c. c is expected to be a list like with 3 integers as its items. | def colorText(s, c):
if not FORMATTING_AVAILABLE:
return s
HEAD = "\033["
TAIL = "m"
color = "39;49"
lastDifference = 800
for i in COLORS:
diff = abs(i[0] - c[0]) + abs(i[1] - c[1]) + abs(i[2] - c[2]) #calculates difference to stock color
if diff < lastDifference:
lastDifference = diff #chooses closest match
color = i[3]
return HEAD+color+TAIL+s+COLOR_RESET #color code + string + reset code | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _color(self,c):\n return self.colorlist[c%len(self.colorlist)]",
"def mircColor(self, s, fg=None, bg=None):\n if fg is None and bg is None:\n return s\n elif bg is None:\n fg = self.mircColors[str(fg)]\n return '\\x03%s%s\\x03' % (fg.zfill(2), s)\n elif fg is None:\n bg = self.mircColors[str(bg)]\n # According to the mirc color doc, a fg color MUST be specified if a\n # background color is specified. So, we'll specify 00 (white) if the\n # user doesn't specify one.\n return '\\x0300,%s%s\\x03' % (bg.zfill(2), s)\n else:\n fg = self.mircColors[str(fg)]\n bg = self.mircColors[str(bg)]\n # No need to zfill fg because the comma delimits.\n return '\\x03%s,%s%s\\x03' % (fg, bg.zfill(2), s)",
"def _process_colors(self, s: str) -> str:\r\n return self._color_regexp.sub(lambda m: self._ansi_equivalent(m.group()), s)",
"def colorize(text, color):\n return COLOR_DICT[color] + str(text) + COLOR_DICT['end']",
"def colorize_string(string: str, r: int, g: int, b: int, *, reset: bool = True) -> str:\n # Todo: optimize sequential characters with same colors.\n output = f\"\\u001b[38;2;{r};{g};{b}m{string}\"\n if reset:\n output += \"\\033[0m\"\n return output",
"def get_sat_color(colors):\n sat = (0, 0, 0)\n for q, color in colors:\n h, s, v = color\n if s > sat[1]:\n sat = color\n return sat",
"def in_green(s: str) -> str:\n return f\"\\033[92m{str(s)}\\033[0m\"",
"def format_color(string, color):\n cs = \"\\x1b[38;2;{};{};{}m{}\\x1b[0m\"\n\n # my colors\n if color == \"red1\":\n r, g, b = 215, 0, 0\n elif color == \"green1\":\n r, g, b = 0, 255, 0\n elif color == \"blue1\":\n r, g, b = 50, 50, 255\n\n # list from https://www.rapidtables.com/web/color/RGB_Color.html\n elif color == \"Black\":\n r, g, b = 0, 0, 0\n elif color == \"White\":\n r, g, b = 255, 255, 255\n elif color == \"Red\":\n r, g, b = 255, 0, 0\n elif color == \"Lime\":\n r, g, b = 0, 255, 0\n elif color == \"Blue\":\n r, g, b = 0, 0, 255\n elif color == \"Yellow\":\n r, g, b = 255, 255, 0\n elif color == \"Cyan\":\n r, g, b = 0, 255, 255\n elif color == \"Magenta\":\n r, g, b = 255, 0, 255\n elif color == \"Silver\":\n r, g, b = 192, 192, 192\n elif color == \"Gray\":\n r, g, b = 128, 128, 128\n elif color == \"Maroon\":\n r, g, b = 128, 0, 0\n elif color == \"Olive\":\n r, g, b = 128, 128, 0\n elif color == \"Green\":\n r, g, b = 0, 128, 0\n elif color == \"Purple\":\n r, g, b = 128, 0, 128\n elif color == \"Teal\":\n r, g, b = 0, 128, 128\n elif color == \"Navy\":\n r, g, b = 0, 0, 128\n elif color == \"maroon\":\n r, g, b = 128, 0, 0\n elif color == \"dark red\":\n r, g, b = 139, 0, 0\n elif color == \"brown\":\n r, g, b = 165, 42, 42\n elif color == \"firebrick\":\n r, g, b = 178, 34, 34\n elif color == \"crimson\":\n r, g, b = 220, 20, 60\n elif color == \"red\":\n r, g, b = 255, 0, 0\n elif color == \"tomato\":\n r, g, b = 255, 99, 71\n elif color == \"coral\":\n r, g, b = 255, 127, 80\n elif color == \"indian red\":\n r, g, b = 205, 92, 92\n elif color == \"light coral\":\n r, g, b = 240, 128, 128\n elif color == \"dark salmon\":\n r, g, b = 233, 150, 122\n elif color == \"salmon\":\n r, g, b = 250, 128, 114\n elif color == \"light salmon\":\n r, g, b = 255, 160, 122\n elif color == \"orange red\":\n r, g, b = 255, 69, 0\n elif color == \"dark orange\":\n r, g, b = 255, 140, 0\n elif color == \"orange\":\n r, g, b = 255, 165, 0\n elif color == \"gold\":\n r, g, b = 255, 215, 0\n elif color == \"dark golden rod\":\n r, g, b = 184, 134, 11\n elif color == \"golden rod\":\n r, g, b = 218, 165, 32\n elif color == \"pale golden rod\":\n r, g, b = 238, 232, 170\n elif color == \"dark khaki\":\n r, g, b = 189, 183, 107\n elif color == \"khaki\":\n r, g, b = 240, 230, 140\n elif color == \"olive\":\n r, g, b = 128, 128, 0\n elif color == \"yellow\":\n r, g, b = 255, 255, 0\n elif color == \"yellow green\":\n r, g, b = 154, 205, 50\n elif color == \"dark olive green\":\n r, g, b = 85, 107, 47\n elif color == \"olive drab\":\n r, g, b = 107, 142, 35\n elif color == \"lawn green\":\n r, g, b = 124, 252, 0\n elif color == \"chart reuse\":\n r, g, b = 127, 255, 0\n elif color == \"green yellow\":\n r, g, b = 173, 255, 47\n elif color == \"dark green\":\n r, g, b = 0, 100, 0\n elif color == \"green\":\n r, g, b = 0, 128, 0\n elif color == \"forest green\":\n r, g, b = 34, 139, 34\n elif color == \"lime\":\n r, g, b = 0, 255, 0\n elif color == \"lime green\":\n r, g, b = 50, 205, 50\n elif color == \"light green\":\n r, g, b = 144, 238, 144\n elif color == \"pale green\":\n r, g, b = 152, 251, 152\n elif color == \"dark sea green\":\n r, g, b = 143, 188, 143\n elif color == \"medium spring green\":\n r, g, b = 0, 250, 154\n elif color == \"spring green\":\n r, g, b = 0, 255, 127\n elif color == \"sea green\":\n r, g, b = 46, 139, 87\n elif color == \"medium aqua marine\":\n r, g, b = 102, 205, 170\n elif color == \"medium sea green\":\n r, g, b = 60, 179, 113\n elif color == \"light sea green\":\n r, g, b = 32, 178, 170\n elif color == \"dark slate gray\":\n r, g, b = 47, 79, 79\n elif color == \"teal\":\n r, g, b = 0, 128, 128\n elif color == \"dark cyan\":\n r, g, b = 0, 139, 139\n elif color == \"aqua\":\n r, g, b = 0, 255, 255\n elif color == \"cyan\":\n r, g, b = 0, 255, 255\n elif color == \"light cyan\":\n r, g, b = 224, 255, 255\n elif color == \"dark turquoise\":\n r, g, b = 0, 206, 209\n elif color == \"turquoise\":\n r, g, b = 64, 224, 208\n elif color == \"medium turquoise\":\n r, g, b = 72, 209, 204\n elif color == \"pale turquoise\":\n r, g, b = 175, 238, 238\n elif color == \"aqua marine\":\n r, g, b = 127, 255, 212\n elif color == \"powder blue\":\n r, g, b = 176, 224, 230\n elif color == \"cadet blue\":\n r, g, b = 95, 158, 160\n elif color == \"steel blue\":\n r, g, b = 70, 130, 180\n elif color == \"corn flower blue\":\n r, g, b = 100, 149, 237\n elif color == \"deep sky blue\":\n r, g, b = 0, 191, 255\n elif color == \"dodger blue\":\n r, g, b = 30, 144, 255\n elif color == \"light blue\":\n r, g, b = 173, 216, 230\n elif color == \"sky blue\":\n r, g, b = 135, 206, 235\n elif color == \"light sky blue\":\n r, g, b = 135, 206, 250\n elif color == \"midnight blue\":\n r, g, b = 25, 25, 112\n elif color == \"navy\":\n r, g, b = 0, 0, 128\n elif color == \"dark blue\":\n r, g, b = 0, 0, 139\n elif color == \"medium blue\":\n r, g, b = 0, 0, 205\n elif color == \"blue\":\n r, g, b = 0, 0, 255\n elif color == \"royal blue\":\n r, g, b = 65, 105, 225\n elif color == \"blue violet\":\n r, g, b = 138, 43, 226\n elif color == \"indigo\":\n r, g, b = 75, 0, 130\n elif color == \"dark slate blue\":\n r, g, b = 72, 61, 139\n elif color == \"slate blue\":\n r, g, b = 106, 90, 205\n elif color == \"medium slate blue\":\n r, g, b = 123, 104, 238\n elif color == \"medium purple\":\n r, g, b = 147, 112, 219\n elif color == \"dark magenta\":\n r, g, b = 139, 0, 139\n elif color == \"dark violet\":\n r, g, b = 148, 0, 211\n elif color == \"dark orchid\":\n r, g, b = 153, 50, 204\n elif color == \"medium orchid\":\n r, g, b = 186, 85, 211\n elif color == \"purple\":\n r, g, b = 128, 0, 128\n elif color == \"thistle\":\n r, g, b = 216, 191, 216\n elif color == \"plum\":\n r, g, b = 221, 160, 221\n elif color == \"violet\":\n r, g, b = 238, 130, 238\n elif color == \"magenta\":\n r, g, b = 255, 0, 255\n elif color == \"orchid\":\n r, g, b = 218, 112, 214\n elif color == \"medium violet red\":\n r, g, b = 199, 21, 133\n elif color == \"pale violet red\":\n r, g, b = 219, 112, 147\n elif color == \"deep pink\":\n r, g, b = 255, 20, 147\n elif color == \"hot pink\":\n r, g, b = 255, 105, 180\n elif color == \"light pink\":\n r, g, b = 255, 182, 193\n elif color == \"pink\":\n r, g, b = 255, 192, 203\n elif color == \"antique white\":\n r, g, b = 250, 235, 215\n elif color == \"beige\":\n r, g, b = 245, 245, 220\n elif color == \"bisque\":\n r, g, b = 255, 228, 196\n elif color == \"blanched almond\":\n r, g, b = 255, 235, 205\n elif color == \"wheat\":\n r, g, b = 245, 222, 179\n elif color == \"corn silk\":\n r, g, b = 255, 248, 220\n elif color == \"lemon chiffon\":\n r, g, b = 255, 250, 205\n elif color == \"light golden rod yellow\":\n r, g, b = 250, 250, 210\n elif color == \"light yellow\":\n r, g, b = 255, 255, 224\n elif color == \"saddle brown\":\n r, g, b = 139, 69, 19\n elif color == \"sienna\":\n r, g, b = 160, 82, 45\n elif color == \"chocolate\":\n r, g, b = 210, 105, 30\n elif color == \"peru\":\n r, g, b = 205, 133, 63\n elif color == \"sandy brown\":\n r, g, b = 244, 164, 96\n elif color == \"burly wood\":\n r, g, b = 222, 184, 135\n elif color == \"tan\":\n r, g, b = 210, 180, 140\n elif color == \"rosy brown\":\n r, g, b = 188, 143, 143\n elif color == \"moccasin\":\n r, g, b = 255, 228, 181\n elif color == \"navajo white\":\n r, g, b = 255, 222, 173\n elif color == \"peach puff\":\n r, g, b = 255, 218, 185\n elif color == \"misty rose\":\n r, g, b = 255, 228, 225\n elif color == \"lavender blush\":\n r, g, b = 255, 240, 245\n elif color == \"linen\":\n r, g, b = 250, 240, 230\n elif color == \"old lace\":\n r, g, b = 253, 245, 230\n elif color == \"papaya whip\":\n r, g, b = 255, 239, 213\n elif color == \"sea shell\":\n r, g, b = 255, 245, 238\n elif color == \"mint cream\":\n r, g, b = 245, 255, 250\n elif color == \"slate gray\":\n r, g, b = 112, 128, 144\n elif color == \"light slate gray\":\n r, g, b = 119, 136, 153\n elif color == \"light steel blue\":\n r, g, b = 176, 196, 222\n elif color == \"lavender\":\n r, g, b = 230, 230, 250\n elif color == \"floral white\":\n r, g, b = 255, 250, 240\n elif color == \"alice blue\":\n r, g, b = 240, 248, 255\n elif color == \"ghost white\":\n r, g, b = 248, 248, 255\n elif color == \"honeydew\":\n r, g, b = 240, 255, 240\n elif color == \"ivory\":\n r, g, b = 255, 255, 240\n elif color == \"azure\":\n r, g, b = 240, 255, 255\n elif color == \"snow\":\n r, g, b = 255, 250, 250\n elif color == \"black\":\n r, g, b = 0, 0, 0\n elif color == \"dim gray\":\n r, g, b = 105, 105, 105\n elif color == \"gray\":\n r, g, b = 128, 128, 128\n elif color == \"dark gray\":\n r, g, b = 169, 169, 169\n elif color == \"silver\":\n r, g, b = 192, 192, 192\n elif color == \"light gray\":\n r, g, b = 211, 211, 211\n elif color == \"gainsboro\":\n r, g, b = 220, 220, 220\n elif color == \"white smoke\":\n r, g, b = 245, 245, 245\n elif color == \"white\":\n r, g, b = 255, 255, 255\n else:\n r, g, b = 255, 255, 255\n\n return cs.format(r, g, b, string)",
"def colorize(self, string):\n D = \"(%s)\" % colorize(\"@R{D}\")\n L = \"(%s)\" % colorize(\"@G{L}\")\n DL = \"(%s,%s)\" % (colorize(\"@R{D}\"), colorize(\"@G{L}\"))\n colorized = string.replace(\"(D)\", D)\n colorized = colorized.replace(\"(L)\", L)\n colorized = colorized.replace(\"(D,L)\", DL)\n return colorized",
"def string_to_color(s):\n if type(s) is not str:\n raise TypeError(\"s must be a string\")\n\n if not(s.startswith(\"#\") or s.startswith(\"0x\")):\n raise ValueError(\"value is not Color-compatible\")\n\n if s.startswith(\"#\"):\n s = s[1:]\n else:\n s = s[2:]\n\n r, g, b, a = 255, 255, 255, 255\n if len(s) in (3, 4):\n # A triple/quadruple in the form #ead == #eeaadd\n r = int(s[0], 16) << 4 | int(s[0], 16)\n g = int(s[1], 16) << 4 | int(s[1], 16)\n b = int(s[2], 16) << 4 | int(s[2], 16)\n if len(s) == 4:\n a = int(s[3], 16) << 4 | int(s[3], 16)\n elif len(s) in (6, 8):\n r = int(s[0], 16) << 4 | int(s[1], 16)\n g = int(s[2], 16) << 4 | int(s[3], 16)\n b = int(s[4], 16) << 4 | int(s[5], 16)\n if len(s) == 8:\n a = int(s[6], 16) << 4 | int(s[7], 16)\n else:\n raise ValueError(\"value is not Color-compatible\")\n return Color(r, g, b, a)",
"def colored (string_, color, attrs):\n return string_",
"def cmykstring2rgbstring(s):\n c, m, y, k = [float(j) for j in s.split()]\n r, g, b = cmyk2rgb(c, m, y, k)\n return f\"{r:.6f} {g:.6f} {b:.6f}\"",
"def highlight_max(s):\n is_max = s == s.max()\n return ['color: red' if v else '' for v in is_max]",
"def anyTextToColor(self, mystr, r=None):\n\n if len(mystr) < 3:\n # pad up with zeros\n while len(mystr) % 3 != 0:\n mystr += \"0\"\n\n i = 0\n sum1 = 0\n sum2 = 0\n sum3 = 0\n for c in mystr:\n if i % 3 == 0:\n sum1 += int( str(ord(c)) + str(i)[::-1])\n if i % 3 == 1:\n sum2 += int(str(ord(c)) + str(i)[::-1])\n if i % 3 == 2:\n sum3 += int(str(ord(c)) + str(i)[::-1])\n i += 1\n\n x1 = sum1 % 255\n x2 = sum2 % 255\n x3 = sum3 % 255\n\n if r is not None:\n x1 = r\n\n # if we wants to force a shade of green\n # x2 = 255\n\n outstr = \"%x%x%x\" % (x1, x2, x3)\n\n while len(outstr) < 6:\n outstr += \"a\"\n\n return outstr",
"def color_negative_red(val):\n if val == 'k':\n color = 'red' \n else:\n color = 'yellow'\n return ['color: %s' % color]*3",
"def colorize(text, color):\n\n if not supports_color():\n return text\n\n return color + text + Colors.ENDC",
"def cleanup_passed_color_value(s):\n reo = re.compile('[0-9a-f]')\n cannotBeCleaned = ''\n if s[0] == '#' and len(s) in [4,7] and reo.match(s[1:]):\n return s\n if s in colorNamesAndCodes:\n col = colorNamesAndCodes[s]\n if reo.match(col[1:]):\n return col\n else:\n return cannotBeCleaned\n if len(s) in [3,6] and reo.match(s):\n return '#' + s\n if len(s) == 2 and reo.match(s):\n return '#' +s +s +s\n return cannotBeCleaned",
"def color_conversion(string):\n if (string == 'J'):\n return 0.14\n if (string == 'I'):\n return 0.28\n if (string == 'H'):\n return 0.42\n if (string == 'G'):\n return 0.56\n if (string == 'F'):\n return 0.70\n if (string == 'E'):\n return 0.84\n if (string == 'D'):\n return 1",
"def setColorString(clr):\n dislin.color(clr)",
"def colorize(s, fg=None, bg=None, bold=False, underline=False, reverse=False):\n \n style_fragments = []\n if fg in _COLOR_TABLE:\n # Foreground colors go from 30-39\n style_fragments.append(_COLOR_TABLE.index(fg) + 30)\n if bg in _COLOR_TABLE:\n # Background colors go from 40-49\n style_fragments.append(_COLOR_TABLE.index(bg) + 40)\n if bold:\n style_fragments.append(1)\n if underline:\n style_fragments.append(4)\n if reverse:\n style_fragments.append(7)\n style_start = '\\x1b[' + ';'.join(map(str,style_fragments)) + 'm'\n style_end = '\\x1b[0m'\n return style_start + s + style_end",
"def verify_color(cci):\n\n if cci < -6.0:\n return OrangeColor.GREEN\n elif -6.0 <= cci < -1.0:\n return OrangeColor.YELLOWISH_GREEN\n elif -1.0 <= cci < 2.7:\n return OrangeColor.YELLOW\n elif 2.7 <= cci < 6.0:\n return OrangeColor.LIGHT_ORANGE\n else: # cci >= 6\n return OrangeColor.ORANGE",
"def color(c):\n\n if isinstance(c, tuple) and len(c) == 4:\n return c\n\n if c is None:\n return c\n\n if isinstance(c, basestring):\n if c[0] == '#':\n c = c[1:]\n\n if len(c) == 6:\n r = int(c[0]+c[1], 16)\n g = int(c[2]+c[3], 16)\n b = int(c[4]+c[5], 16)\n a = 255\n elif len(c) == 8:\n r = int(c[0]+c[1], 16)\n g = int(c[2]+c[3], 16)\n b = int(c[4]+c[5], 16)\n a = int(c[6]+c[7], 16)\n elif len(c) == 3:\n r = int(c[0], 16) * 0x11\n g = int(c[1], 16) * 0x11\n b = int(c[2], 16) * 0x11\n a = 255\n elif len(c) == 4:\n r = int(c[0], 16) * 0x11\n g = int(c[1], 16) * 0x11\n b = int(c[2], 16) * 0x11\n a = int(c[3], 16) * 0x11\n else:\n raise Exception(\"Color string must be 3, 4, 6, or 8 hex digits long.\")\n\n return (r, g, b, a)\n\n raise Exception(\"Not a color: %r\" % (c,))",
"def highlight_series(s):\n return ['background-color: #eee' for v in s]",
"def match_color(self, hexcolor, n_closest=1):\n n_closest = min(int(n_closest), len(self.lookup_table))\n ref_color = rgb.RgbColor(hexcolor)\n\n # Returns the responses in a list of dictionaries with\n # all of the RgbColor objects cast into strings.\n matched_colorlist = map(lambda nt: dict(nt._asdict()), \n sorted(self.lookup_table, key=lambda clr: ref_color.distfrom(clr.hex))[:n_closest])\n for color in matched_colorlist:\n color['hex'] = str(color['hex'])\n\n return matched_colorlist",
"def colour(string: str) -> str:\n string = f\"\\033[32m{string}\\033[0m\"\n return string",
"def highlight(string: str) -> str:\n return text_color(string, \"cyan\")",
"def convColor(colorString):\n if len(colorString) != 6:\n return None\n r, g, b = colorString[:2], colorString[2:4], colorString[4:]\n r, g, b = [int(n, 16) for n in (r, g, b)]\n return (r, g, b)",
"def _greedy_color(self, source):\n for target in self.graph.iteradjacent(source):\n if self.color[target] is not None:\n self._color_list[self.color[target]] = True\n for c in xrange(self.graph.v()): # check colors\n if not self._color_list[c]:\n self.color[source] = c\n break\n for target in self.graph.iteradjacent(source):\n if self.color[target] is not None:\n self._color_list[self.color[target]] = False\n return c",
"def guess_colour(dbo, s):\n s = str(s).lower()\n guess = db.query_int(dbo, \"SELECT ID FROM basecolour WHERE LOWER(BaseColour) LIKE '%\" + db.escape(s) + \"%'\")\n if guess != 0: return guess\n return configuration.default_colour(dbo)",
"def color(self, sids=None, sat=1):\n if sids == None: # init/overwrite self.colors\n nids = self.nids\n # uint8, single unit nids are 1-based:\n self.colors = CLUSTERCLRSRGB[nids % len(CLUSTERCLRSRGB) - 1] * sat\n # overwrite unclustered/multiunit points with GREYRGB\n self.colors[nids < 1] = GREYRGB * sat\n else: # assume self.colors exists\n sidis = self.sids.searchsorted(sids)\n nids = self.nids[sidis]\n self.colors[sidis] = CLUSTERCLRSRGB[nids % len(CLUSTERCLRSRGB) - 1] * sat\n self.colors[sidis[nids < 1]] = GREYRGB * sat"
] | [
"0.6247449",
"0.5833682",
"0.5591282",
"0.55455554",
"0.5502045",
"0.5501849",
"0.54304963",
"0.5416966",
"0.5401237",
"0.5394844",
"0.5387507",
"0.53824794",
"0.5380216",
"0.53772527",
"0.5364747",
"0.53561676",
"0.5346845",
"0.53449506",
"0.53247803",
"0.5321496",
"0.5265814",
"0.5244493",
"0.52301073",
"0.5227589",
"0.52231944",
"0.5211915",
"0.51905113",
"0.5172975",
"0.5171192",
"0.5170383"
] | 0.6658775 | 0 |
Format the string s to be displayed as bold, underlined, negative or any combination of these. This function will automatically append a reset sequence after the text. Manually resetting the formatting is not necessary. | def formatText(s, bold=False, underlined=False, negative=False):
if not FORMATTING_AVAILABLE:
return s
head = ""
if bold: head += "\033[1m"
if underlined: head += "\033[4m"
if negative: head += "\033[7m"
return head + s + "\033[0m" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bold(string):\n return BOLD + string + RESETFORMAT",
"def bold(self, s):\n return '\\x02%s\\x02' % s",
"def bold(string):\n return BOLD + string + END",
"def bold(self, string):\n return self.wrap(bold(string))",
"def bold(msg):\n return '\\033[1m%s\\033[0m' % msg",
"def embolden(string):\n return \"<b>\"+string+\"</b>\"",
"def b(string):\n return \"\\033[94m{0}\\033[0m\".format(string)",
"def bold(string: str) -> str:\n return f\"**{string}**\"",
"def colorize(s, fg=None, bg=None, bold=False, underline=False, reverse=False):\n \n style_fragments = []\n if fg in _COLOR_TABLE:\n # Foreground colors go from 30-39\n style_fragments.append(_COLOR_TABLE.index(fg) + 30)\n if bg in _COLOR_TABLE:\n # Background colors go from 40-49\n style_fragments.append(_COLOR_TABLE.index(bg) + 40)\n if bold:\n style_fragments.append(1)\n if underline:\n style_fragments.append(4)\n if reverse:\n style_fragments.append(7)\n style_start = '\\x1b[' + ';'.join(map(str,style_fragments)) + 'm'\n style_end = '\\x1b[0m'\n return style_start + s + style_end",
"def bold(text, should_bold=True):\n return f\"\\033[1m{text}\\033[0m\" if should_bold else text",
"def colorize_string(string: str, r: int, g: int, b: int, *, reset: bool = True) -> str:\n # Todo: optimize sequential characters with same colors.\n output = f\"\\u001b[38;2;{r};{g};{b}m{string}\"\n if reset:\n output += \"\\033[0m\"\n return output",
"def bold(text):\n return '\\x1b[1;30m'+text+'\\x1b[0m' if text else '\\x1b[1;30m'+'\\x1b[0m'",
"def with_color(text, color, bold=False):\n color_fmt = '$fg_bold[{:s}]' if bold else '$fg[{:s}]'\n return '%{{{:s}%}}{:s}%{{$reset_color%}}'.format(\n color_fmt.format(color), text)",
"def formatted(s):\n matches = re.findall(_format_re, normalize(s))\n if len(matches) == 1 and matches[0][0] != '':\n return matches[0][0]\n def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a):\n if txt_none != '':\n return FORMAT_NONE, txt_none\n elif txt_sw != '':\n return FORMAT_SW, txt_sw\n elif txt_rem != '':\n return FORMAT_REM, txt_rem\n elif txt_em != '':\n return FORMAT_EM, txt_em\n elif txt_a != '':\n return FORMAT_A, txt_a\n return [to_fmt(*m) for m in matches]",
"def getHTMLText(self, s):\r\n\r\n # Removes any \"<\" or \">\" from the text, and replaces line ends with <br> tags\r\n if s is not None:\r\n res = str(s)\r\n res = string.replace(res, \">\", \">\")\r\n res = string.replace(res, \"<\", \"<\")\r\n res = string.replace(s, \"\\n\", \"<br style='mso-data-placement:same-cell;'/>\")\r\n else:\r\n res = \"\"\r\n\r\n # Inserts formatting tag around text, if defined\r\n if self.formatBeginTag:\r\n res = self.formatBeginTag + res + self.formatEndTag\r\n\r\n return res",
"def _format (color, style=''):\n _format = QtGui.QTextCharFormat()\n if color != '':\n _format.setForeground(getattr(QtCore.Qt, color))\n if 'bold' in style:\n _format.setFontWeight(QtGui.QFont.Bold)\n if 'italic' in style:\n _format.setFontItalic(True)\n return _format",
"def colorize(string, color, bold=False, highlight=False):\n attr = []\n num = color2num[color]\n if highlight:\n num += 10\n attr.append(str(num))\n if bold:\n attr.append('1')\n return '\\x1b[%sm%s\\x1b[0m' % (';'.join(attr), string)",
"def update_line(s, bold=False, underline=False, blinking=False, color=None, bgcolor=None):\n s = get_line(s, bold=bold, underline=underline, blinking=blinking,\n color=color, bgcolor=bgcolor, update_line=True)\n print(s, end='')",
"def subtitle(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"-\")))",
"def colorText(s, c):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n HEAD = \"\\033[\"\n TAIL = \"m\"\n\n color = \"39;49\"\n lastDifference = 800\n\n for i in COLORS:\n diff = abs(i[0] - c[0]) + abs(i[1] - c[1]) + abs(i[2] - c[2]) #calculates difference to stock color\n if diff < lastDifference:\n lastDifference = diff #chooses closest match\n color = i[3]\n\n return HEAD+color+TAIL+s+COLOR_RESET #color code + string + reset code",
"def html_manual_format(string):\n return html_div(string, \"manualfmt\")",
"def stylizer(self, str=unicode(\"\"), style_name=unicode(\"\")):\n sret = str\n\n try:\n\n if self.style_fontstyle[style_name] == \"italic\":\n sret = unicode(\"<i>%s</i>\" % sret)\n\n finally:\n\n try:\n\n if self.style_fontweight[style_name] == \"bold\":\n sret = unicode(\"<b>%s</b>\" % sret)\n\n finally:\n\n try:\n\n if self.style_textunderline[style_name] == \"underlined\":\n sret = unicode('<span style=\"text-decoration: underline;\">%s</span>' % sret)\n\n finally:\n return sret",
"def stripBold(self, s):\n return s.replace('\\x02', '')",
"def strc(text, color='black', style='normal'):\n\n ansii = ANSIIcode(color, style)\n back_to_normal = ANSIIcode('normal', 'normal') # '\\033[0m'\n\n return ansii + text + back_to_normal",
"def print_bold(msg, end='\\n'):\n print(\"\\033[1m\" + msg + \"\\033[0m\", end=end)",
"def print_bold(msg, end='\\n'):\n print(\"\\033[1m\" + msg + \"\\033[0m\", end=end)",
"def bold(self, color=None):\n ## Get balise\n if color != None:\n exec(\"balise = self.%s\"%(color))\n else:\n balise = \"\"\n ## Return bold color\n return self.BOLD + balise",
"def md_bold(raw_text):\n return '**%s**' % md_escape(raw_text, characters='*')",
"def underline(self, s):\n return '\\x1F%s\\x1F' % s",
"def colored_text(text, color, bold=False):\n if DISABLE_COLORS:\n return text\n attrs = []\n if bold:\n attrs.append('bold')\n return colored(text, color=color, attrs=attrs)"
] | [
"0.74200016",
"0.73931444",
"0.65095335",
"0.64862657",
"0.62759024",
"0.62598115",
"0.6205859",
"0.6177458",
"0.61253005",
"0.59057105",
"0.5868853",
"0.58566475",
"0.5785357",
"0.5782194",
"0.5779407",
"0.576035",
"0.5745407",
"0.57070243",
"0.56860864",
"0.5685168",
"0.5643359",
"0.5630512",
"0.5602746",
"0.5580047",
"0.55334204",
"0.55334204",
"0.5523156",
"0.55078435",
"0.54871845",
"0.5456447"
] | 0.7816094 | 0 |
Deletes a User Object from the userList. Returns list | def deleteUser(self, userList, index):
if(self.adminAccess):
ret = userList.pop(index)
print("User has been deleted")
return userList | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_user(self):\n User.user_list.remove(self)",
"def delete_user(self):\n User.user_list.remove(self)",
"def delete_user(self):\n User.user_list.remove(self)",
"def delete_user(self):\n\n User.user_list.remove(self)",
"def delete_user():",
"def delete(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}",
"def delete(self, dnzo_user, task_list):\n from tasks_data.task_lists import delete_task_list\n if dnzo_user.lists_count <= 1:\n self.bad_request(\"User only has one list; cannot delete the last list.\")\n return\n \n delete_task_list(dnzo_user, task_list)\n self.json_response(task_list=task_list.to_dict())",
"def delete_user():\n #TODO user delete\n pass",
"def remove(self, user_id):\n pass",
"def delete_user(self, user):\n self.delete(user)",
"def delete_user(self, user):\n self.execute(TABELLE['id_users'][\"delete\"], user[\"id\"])",
"def delete_users(user_id):\n my_users = storage.get(\"User\", user_id)\n if my_users:\n storage.delete(my_users)\n storage.save()\n storage.close()\n return jsonify({}), 200\n else:\n abort(404)",
"def delete(self, user_id):\r\n return delete_user(request, user_id)",
"def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200",
"def delete_users(project):\n for user_id in project.user_id.all():\n project.user_id.remove(user_id.pk)\n project.save()",
"def remove_user(cloud_list, user_id, adminu, adminpw):\n url_success = ['Success', 'success']\n for cloud in cloud_list:\n try:\n resp = urllib2.urlopen('%s/services/users/%s?operation=delete&user=%s&password=%s' %\n (cloud, user_id, adminu, adminpw))\n contents = resp.read()\n except urllib2.HTTPError, error:\n contents = error.read()\n except urllib2.URLError:\n contents = 'failed'\n output(contents, cloud, user_id, url_success, '')",
"def delete_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"delete_user\")",
"def remove(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty",
"def delete(self, username, private_list_name):\n user = query_user_by_name(username)\n if user is None:\n return 'User does not exit', 404\n if invalid_user(username):\n return 'Unauthorized User', 401\n lst = query_private_list_by_id(username, private_list_name)\n if lst is None:\n return 'Private List does not exist', 404\n db.session.delete(lst)\n db.session.commit()\n return \"PrivateList has been deleted\", 200",
"def delete_user(id):\n pass",
"def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)",
"def delete(user_id):\n assert isinstance(user_id, ObjectId)\n\n User.objects(id=user_id).delete()",
"def delete(self, user_id):\n\n user = User.objects.get_or_404(public_id=user_id)\n return user.delete()",
"def delete(self, user_id):\n return delete_user(user_id)",
"def user_id_delete(user_id):\n user = storage.get(\"User\", user_id)\n\n if user is None:\n abort(404)\n user.delete()\n del user\n return make_response(jsonify({}), 200)",
"async def delete(self, request, uid):\n return await super(User, self).delete_item(request.app.pool, 'user',\n uid)",
"def del_user(request):\r\n mdict = request.matchdict\r\n\r\n # Submit a username.\r\n del_username = mdict.get('username', None)\r\n\r\n if del_username is None:\r\n LOG.error('No username to remove.')\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: No username to remove.',\r\n })\r\n\r\n u = UserMgr.get(username=del_username)\r\n\r\n if not u:\r\n LOG.error('Username not found.')\r\n request.response.status_int = 404\r\n return _api_response(request, {\r\n 'error': 'User not found.',\r\n })\r\n\r\n try:\r\n # First delete all the tag references for this user's bookmarks.\r\n res = DBSession.query(Bmark.bid).filter(Bmark.username == u.username)\r\n bids = [b[0] for b in res]\r\n\r\n qry = bmarks_tags.delete(bmarks_tags.c.bmark_id.in_(bids))\r\n qry.execute()\r\n\r\n # Delete all of the bmarks for this year.\r\n Bmark.query.filter(Bmark.username == u.username).delete()\r\n DBSession.delete(u)\r\n return _api_response(request, {\r\n 'success': True,\r\n 'message': 'Removed user: ' + del_username\r\n })\r\n except Exception, exc:\r\n # There might be cascade issues or something that causes us to fail in\r\n # removing.\r\n LOG.error(exc)\r\n request.response.status_int = 500\r\n return _api_response(request, {\r\n 'error': 'Bad Request: ' + str(exc)\r\n })",
"def delete_user(user_id):\n temp = models.storage.get('User', user_id)\n if temp is None:\n abort(404)\n temp.delete()\n models.storage.save()\n return jsonify({})",
"def del_user_id(user_id):\r\n obj = storage.get(User, user_id)\r\n if obj is None:\r\n abort(404)\r\n obj.delete()\r\n storage.save()\r\n return jsonify({}), 200",
"def del_user(id):\n user = User.query.get(id)\n\n db.session.delete(user)\n db.session.commit()\n\n return userSchema.jsonify(user)"
] | [
"0.7945344",
"0.7945344",
"0.7945344",
"0.7874968",
"0.73239255",
"0.72655916",
"0.71796215",
"0.70598745",
"0.6910753",
"0.6903936",
"0.68606716",
"0.6858181",
"0.6844352",
"0.6788512",
"0.67843497",
"0.6771759",
"0.67586803",
"0.6744388",
"0.671837",
"0.6703052",
"0.6680883",
"0.6670432",
"0.66501",
"0.6649797",
"0.6644437",
"0.6642136",
"0.6640144",
"0.6634369",
"0.6604477",
"0.66033435"
] | 0.82633924 | 0 |
Imports mods List from disk Looks for mods.txt file and creates a list of Class objects Returns a list of Mods class objects if mods.txt exists else returns None | def importMods():
try:
with open("mods.txt", "r") as fp:
ModsList = []
for line in fp:
#expload string on comma
lineArray = line.split(",")
#Split each var up
name = lineArray[0].split(":")[1]
email = lineArray[1].split(":")[1]
nickname = lineArray[2].split(":")[1]
passwd = lineArray[3].split(":")[1]
# clean up end of nickname string
passwd = passwd.strip(" }\n")
ModsList.append(Mods(name, email, nickname, passwd))
except IOError:
ModsList = None
return ModsList | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __get_modules(self, data: dict):\n\n v = Validator([(data[\"modules\"], list)])\n for module in data[\"modules\"]:\n mod_data = load_configuration(module)\n mod_path = Path(mod_data[\"path\"])\n self.__cache[mod_path.stem] = (\n load_class(\n mod_data[\"path\"],\n mod_path.stem,\n mod_data[\"class_name\"],\n ),\n data[\"args\"][module]\n if \"args\" in data and module in data[\"args\"]\n else {},\n )\n for mod_folder in [\n a.stem.lower() for a in mod_path.parents\n ]: # check if parent folder is android\n if mod_folder == \"android\": # to know if android or not\n self.__cache[mod_path.stem][0].is_android = True",
"def _ImportSysMods(self, modsFile):\n lines = self._ConfigFileParse(\"System\", modsFile)\n print(\"Importing:\")\n for line in lines:\n for module in line:\n print(\"\\t{}\".format(module))\n self._ImportSystemModule(module)\n\n return lines",
"def get_module_list_from_pkg_rcrsv(self, package, mod_list):\n\n for name, obj in inspect.getmembers(package, inspect.ismodule):\n # name_only = Utils.file_Utils.getNameOnly(os.path.basename(obj.__file__))\n mod_list.append(obj)\n if name == '__init__.py':\n self.get_module_list_from_pkg_rcrsv(obj, mod_list)\n return mod_list",
"def load_from_file(cls):\n filename = cls.__name__ + \".json\"\n new_list = []\n if not os.path.isfile(filename):\n return new_list\n with open(filename) as fp:\n json_string = fp.read()\n cls_list = cls.from_json_string(json_string)\n for items in cls_list:\n new_inst = cls.create(**items)\n new_list.append(new_inst)\n return new_list",
"def load_from_file(cls):\n\n l = []\n if o.exists(cls.__name__ + \".json\"):\n with open(cls.__name__ + \".json\") as f:\n for line in f:\n s = cls.from_json_string(line)\n for d in s:\n l.append(cls.create(**d))\n\n return l",
"def load_from_file(cls):\n empty_list = []\n try:\n f = open(cls.__name__ + '.json')\n f.close()\n except FileNotFoundError:\n return empty_list\n\n with open(cls.__name__ + \".json\", 'r') as f:\n new_list = cls.from_json_string(f.read())\n for i in new_list:\n empty_list.append(cls.create(**i))\n return empty_list",
"def load_from_file(cls):\n list_obj = []\n if os.path.exists(cls.__name__ + \".json\"):\n with open(cls.__name__ + \".json\", \"r\") as _file:\n str_json = _file.read()\n _file.close()\n _dict = Base.from_json_string(str_json)\n for obj in _dict:\n list_obj.append(cls.create(**obj))\n return(list_obj)",
"def load_from_file(cls):\n lis = []\n if not os.path.isfile(cls.__name__ + \".json\"):\n return lis\n with open(cls.__name__ + \".json\", encoding=\"utf-8\") as myFile:\n json_str = myFile.read()\n my_dict = cls.from_json_string(json_str)\n for inst in my_dict:\n lis.append(cls.create(**inst))\n return lis",
"def avail(self,pattern=str):\t\n import re\n\n availmods = []\n avail_out = self._modulecmd(\"\"\"%s python avail %s\"\"\" % (self.modulecmd, pattern)).decode('utf-8')\n if avail_out.strip() == '':\n return availmods\n alines = [str(x) for x in avail_out.strip().splitlines()]\n repo = None\n top_insert = 0 # keep track of the head based on each time repo changes\n for aline in alines:\n if aline.strip() == '':\n repo = None\n continue\n try:\n repo = re.match(r'^-+\\s*([^-]+)\\s*-+\\s*$', aline).group(1)\n top_insert = len(availmods)\n continue\n except AttributeError:\n pass \n if repo:\n for tmpmod in aline.split():\n fullpath = os.path.join(repo, tmpmod)\n if tmpmod.lower().endswith(\"(default)\"):\n tmpmod = re.sub(r'(?i)\\(default\\)$', '', tmpmod)\n availmods.insert(\n top_insert, (\n tmpmod,\n fullpath\n )\n )\n else:\n availmods.append((tmpmod, fullpath))\n return availmods",
"def load_from_file(cls):\n try:\n with open(cls.__name__ + '.json', 'r') as f:\n jstr = f.read()\n list_d = Base.from_json_string(jstr)\n list_o = []\n for item in list_d:\n list_o.append(cls.create(**item))\n return list_o\n except FileNotFoundError:\n return []",
"def load_from_file(cls):\n if path.exists(cls.__name__ + \".json\") is False:\n return []\n with open(cls.__name__ + \".json\", \"r\", encoding='utf-8') as file:\n listofinstances = []\n objectlist = cls.from_json_string(file.read())\n for dict in objectlist:\n objectdict = {}\n for key, value in dict.items():\n objectdict[key] = value\n listofinstances.append(cls.create(**objectdict))\n return listofinstances",
"def get_modules(name_only=False):\n\n mods = list()\n\n dtf_db = sqlite3.connect(DTF_DB)\n cur = dtf_db.cursor()\n\n # This just returns the name\n if name_only:\n\n sql = ('SELECT name '\n 'FROM modules ')\n\n for mod in cur.execute(sql):\n mods.append(mod[0])\n\n # This returns a list of items\n else:\n\n sql = ('SELECT name, version, '\n 'about, author '\n 'FROM modules '\n 'ORDER BY name')\n\n cur.execute(sql)\n\n while True:\n\n item = dtf.core.item.Item()\n line = cur.fetchone()\n if line is None:\n break\n\n item.type = dtf.core.item.TYPE_MODULE\n item.name = line[0]\n item.version = line[1]\n item.about = line[2]\n item.author = line[3]\n\n mods.append(item)\n\n return mods",
"def GetClassesFromFile(self,file_path):\n classes = []\n try:\n fl = open(file_path,\"r\")\n for line in fl.readlines():\n if \"class\" in line and \":\" in line:\n line = line.strip(\"class \")\n line2 = \"\"\n for i in line:\n if i!=\":\": line2+=i\n\n classes.append(line2)\n if classes:\n return classes\n else:\n return False\n fl.close()\n except:\n return False",
"def getModules(self):\n\n self.provenance = []\n provenance = self.provenance\n self.mLocations = []\n mLocations = self.mLocations\n\n self.locations = None\n self.modules = None\n\n self.good = True\n self.seen = set()\n\n self.getMain()\n self.getRefs()\n self.getStandard()\n\n version = self.version\n good = self.good\n app = self.app\n\n if good:\n app.mLocations = mLocations\n app.provenance = provenance\n else:\n return\n\n mModules = []\n if mLocations:\n mModules.append(version or \"\")\n\n locations = self.locationsArg\n modules = self.modulesArg\n\n givenLocations = (\n []\n if locations is None\n else [expandDir(app, x.strip()) for x in itemize(locations, \"\\n\")]\n if type(locations) is str\n else [str(x) for x in locations]\n )\n givenModules = (\n []\n if modules is None\n else [normpath(x.strip()) for x in itemize(modules, \"\\n\")]\n if type(modules) is str\n else [normpath(str(x)) for x in modules]\n )\n\n self.locations = mLocations + givenLocations\n self.modules = mModules + givenModules",
"def used_mods(ffile):\n import re\n import codecs\n\n # Go through line by line,\n # remove comments and strings because the latter can include ';'.\n # Then split at at ';', if given.\n # The stripped line should start with 'use '.\n # After use should be the \"module_name\", ', intrinsic :: module_name', or\n # ', non_intrinsic :: module_name'. We allow also to use \":: module_name\"\n # After module name should only be ', only: ...' or ', a ==> b'\n olist = list()\n of = codecs.open(ffile, 'r', encoding='ascii', errors='ignore')\n for line in of:\n ll = line.rstrip().lower() # everything lower case\n ll = re.sub('!.*$', '', ll) # remove F90 comment\n ll = re.sub('^c.*$', '', ll) # remove F77 comments\n ll = re.sub('\".*?\"', '', ll) # remove \"string\"\n ll = re.sub(\"'.*?'\", '', ll) # remove 'string'\n # check if several commands are on one line\n if ';' in ll:\n lll = ll.split(';')\n else:\n lll = [ll]\n for il in lll:\n iil = il.strip()\n # line should start with 'use '\n if iil.startswith('use '):\n iil = iil[4:].strip() # remove 'use '\n # skip intrinsic modules\n if 'intrinsic' in iil:\n if 'non_intrinsic' in iil:\n iil = re.sub(', *non_intrinsic', '', iil)\n iil = iil.strip()\n else:\n continue # skip to next in lll\n if iil.startswith('::'):\n iil = iil[2:].strip() # remove ':: '\n # remove after ',' if rename-list or only-list\n iil = re.sub(',.*$', '', iil)\n olist.append(iil.strip())\n of.close()\n\n return olist",
"def load_from_file(cls):\n lists = []\n filename = cls.__name__ + \".json\"\n try:\n with open(filename, \"r\") as f:\n instances = cls.from_json_string(f.read())\n for k, v in enumerate(instances):\n lists.append(cls.create(**instances[k]))\n\n except:\n pass\n return lists",
"def process_class_list(self, module, classes):",
"def _load_shot_definitions(self, shot_mod: types.ModuleType) -> None:\n self.shots = []\n for module_item_str in dir(shot_mod):\n module_item = getattr(shot_mod, module_item_str)\n if module_item.__class__ != type:\n continue\n if not issubclass(module_item, Shot):\n continue\n if not hasattr(module_item, \"name\"):\n continue\n logger.info(f\"loading shot config {module_item}\")\n self.shots.append(module_item())",
"def import_modules(modules_path, skip_list=None):\n # skip list not defined: create an empty one\n if skip_list is None:\n skip_list = []\n # make sure __init__ is not loaded\n skip_list.append('__init__')\n\n # lists python files in the current directory\n dir_entries = glob.glob(os.path.join(modules_path, '*.py'))\n\n # import each module and add its object to the final list\n\n # convert the list from filenames to module names and remove those in\n # skip list\n loaded_list = []\n for module_file in dir_entries:\n # remove the leading dir and file extension from the module name\n module_name = os.path.basename(module_file)[:-3]\n\n # module in skip list: do not load\n if module_name in skip_list:\n continue\n\n # we might hit a SyntaxError here\n spec = util.spec_from_file_location(module_name, module_file)\n module = util.module_from_spec(spec)\n spec.loader.exec_module(module)\n loaded_list.append(module)\n\n return loaded_list",
"def load_from_file(cls):\n filename = cls.__name__ + \".json\"\n instances = []\n try:\n text = []\n with open(filename, 'r') as f:\n text = cls.from_json_string(f.read())\n for obj in text:\n instances.append(cls.create(**obj))\n except FileNotFoundError:\n instances = []\n\n return instances",
"def load_from_file(cls):\n new_list = []\n try:\n with open(\"%s.json\" % cls.__name__, mode='r') as f:\n file = cls.from_json_string(f.read())\n for i in file:\n new_list.append(cls.create(**i))\n except Exception:\n pass\n return new_list",
"def load_classes(path):\n with open(path, 'r') as f:\n names = f.read().split(\"\\n\")\n # Filter removes empty strings (such as last line)\n return list(filter(None, names))",
"def import_all(self) -> None:\n with open(normpath('levels/level/lvl1.txt'), 'r') as f:\n while f:\n string = f.readline()\n if string == '':\n break\n string = string.strip().split(' ')\n if len(string) == 4:\n self.objects.append(pygame.Rect(int(string[0]), int(\n string[1]), int(string[2]), int(string[3])))\n for i in range(len(self.objects)):\n self.color.append(colors[random.randint(0, len(colors)-1)])",
"def process_module_list(self, modules):",
"def gen_availablemods(dir_path):\n mod_dir = os.listdir(os.path.join(dir_path, 'modules'))\n mods = [i.replace('.py', '') for i in mod_dir if i.startswith('mod_') and i.endswith('.py')]\n\n live_or_dead_mods = [i for i in mods if not i.startswith('mod_live_') and not i.startswith('mod_dead_')]\n only_live_mods = sorted([i for i in mods if i.startswith('mod_live_')])\n\n available_mods = only_live_mods + live_or_dead_mods\n\n return available_mods",
"def import_all_handlers(self):\n import os\n exclude_list=[\"base\"]\n\n #\n # the list of handlers (excluding base. Add more you dont want\n # to be loaded or inspected to exclude_list above.)\n #\n mods=[]\n module_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'handlers'))\n #print(\"importing handlers from: \" + module_path)\n for mod in os.listdir( module_path ):\n mod = mod.split(\".\")[0]\n if not mod.startswith(\"_\") and not mod in exclude_list:\n #print(\" now processing: \" + str(mod))\n mods.append(mod)\n \n #print(\"mods: \" + str(mods))\n class_list = []\n # load all the models from their modules (mods)\n #print(str(mods))\n import importlib\n for m in mods:\n #print(\"importing: \" + 'pow_comments.handlers.' + m) \n try:\n mod = importlib.import_module('pow_comments.handlers.' + m)\n except:\n pass\n #print(dir(mod))",
"def load_file(path: str) -> List[object]:\n with open(path, \"r\") as input_file:\n manifests = yaml.load_all(input_file, Loader=yaml.SafeLoader)\n objs: List[object] = list()\n for manifest in manifests:\n obj_type: Optional[object] = get_type(manifest)\n if obj_type is None:\n LOG.warning(\"Unable to determine object type for manifest:\",\n manifest)\n else:\n objs.append(new_object(obj_type, manifest))\n return objs",
"def load_from_file(cls):\n\n try:\n list_of_ins = []\n with open(cls.__name__ + '.json') as my_file:\n dicts = Base.from_json_string(my_file.read())\n for key in dicts:\n list_of_ins += [cls.create(**key)]\n return (list_of_ins)\n except:\n return ([])",
"def load_modules(self):\n module_dir = os.path.dirname(__file__)\n names = [os.path.splitext(i) for i in os.listdir(module_dir)\n if os.path.isfile(os.path.join(module_dir, i))]\n # FIXME: sort 'plain' to start of list for devel.\n names.sort(key=lambda x: (not x[0].startswith('plain'), x[0]))\n modules = []\n for name in [i[0] for i in names if i[1].lower() == '.py']:\n try:\n modules.append(import_module('leo.plugins.editpane.' + name))\n DBG(f\"Loaded module: {name}\")\n except ImportError as e:\n DBG(\n f\"{e.__class__.__name__}: \"\n f\"Module not loaded (unmet dependencies?): {name}\")\n for module in modules:\n for key in dir(module):\n value = getattr(module, key)\n if hasattr(value, 'lep_type') and value not in self.widget_classes:\n if module not in self.modules:\n self.modules.append(module)\n self.widget_classes.append(value)\n self.widget_for[value.lep_type].append(value)",
"def load_from_file(cls):\n filename = cls.__name__ + \".json\"\n listOfInst = []\n try:\n with open(filename, \"r\") as f:\n listOfInst = cls.from_json_string(f.read())\n for num, val in enumerate(listOfInst):\n listOfInst[num] = cls.create(**listOfInst[num])\n except:\n pass\n return listOfInst"
] | [
"0.59589136",
"0.5919964",
"0.57883304",
"0.5713228",
"0.5680105",
"0.56653875",
"0.56545734",
"0.5643083",
"0.559432",
"0.5591948",
"0.55810237",
"0.55806607",
"0.54854405",
"0.5479954",
"0.5475797",
"0.5451154",
"0.54316515",
"0.5427745",
"0.5424476",
"0.5419658",
"0.5419105",
"0.5409858",
"0.53961915",
"0.53850555",
"0.53574055",
"0.53500056",
"0.5343914",
"0.5333561",
"0.53319067",
"0.5293788"
] | 0.7338112 | 0 |
Return the instructions for this Game. | def get_instructions(self) -> str:
instructions = "Players take turns to occupy available positions " \
"on the " \
"board. Once half or more of a ley-line has been " \
"occupied" \
"one player, that ley-line is entirely captured by " \
"said player. The winner is the person who captures " \
"half" \
"or more of the ley-lines first."
return instructions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_instructions(self):\n return \"A non-negative whole number is chosen as the starting \\n\" \\\n \"valueby some neutral entity. In our case, a player will \\n\" \\\n \"choose it (i.e. through the use of input. The player whose \\n\" \\\n \"turn it is chooses some square of a positive whole number (\\n\" \\\n \"such as 1, 4, 9, 16, . . . ) to subtract from the \\n\" \\\n \"value, provided the chosen square is not larger. After \\n\" \\\n \"subtracting, we have a new value and the next player \\n\" \\\n \"chooses a square to ubtract from it. Play continues\\n\" \\\n \" to alternate between the two players until no moves are\\n\" \\\n \" possible. Whoever is about to play at that point loses!\"",
"def getInstructions(self):\n\t\treturn \"\"",
"def instructions(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instructions\")",
"def instructions(self) -> List[str]:\n return list(self._map.keys())",
"def get_instructions(self) -> str:\n instruction = \"Duck need to fill this blank _____, which I have no idea what it is #$%^&*\"\n return instruction",
"def get_instructions(self):\n tmp_ins = []\n idx = 0\n for i in self.method.get_instructions():\n if idx >= self.start and idx < self.end:\n tmp_ins.append(i)\n\n idx += i.get_length()\n return tmp_ins",
"def instructions(self):\n return \"\\n\".join(\n [i for i in self.schema.instructions().split(\"\\n\") if i != \"Oppskrift\"]\n )",
"def plot_instructions(self):\n return self.__plot_instructions",
"def print_instructions(self):\n\t\tprint('\\n\\n==========================================================================')\n\t\tprint('==========================================================================\\n')\n\t\tprint('Welcome to Tic Tac Toe, the came you know and love. \\nThe rules are the same ones you know and love. \\nTo make a move just type the coordinates of the spot like so - row,column. \\nNo spaces please! Lets go ahead and start! Here is a picuter of the board with some coordinates just in case!\\n')\n\t\tprint('=====================')\n\t\tprint('|| 0,0 | 0,1 | 0,2 ||')\n\t\tprint(' -----------------')\n\t\tprint('|| 1,0 | 1,1 | 1,2 ||')\n\t\tprint(' -----------------')\n\t\tprint('|| 2,0 | 2,1 | 2,2 ||')\n\t\tprint('=====================')\n\t\tprint('\\n==========================================================================')\n\t\tprint('==========================================================================\\n\\n')",
"def showInstructions():\n print(\"\"\"\n RPG Game\n ========\n Commands:\n go [direction]\n get [item]\n\n\t\"\"\")",
"def get_instructions(prog):\n insts = {}\n for i in range(prog.InstructionCount()):\n insts[i] = prog.setParam(i)\n return insts",
"def instructions():\n\t\n\tprint \\\n\t\"\"\"\n\tToday we will play the perennial favorite game of...\n\tRock! Paper!! Scissors!!!.\n\tThe objective of the game is to outthink your opponent (in this case me) and defeat.\n\tThe rules are very simple\n\t1. Paper covers the Rock\n\t2. Rock breaks the Scissors\n\t3. Scissors cut the Paper\n\t\n\tChoose your move from the following:\n\t1. Paper (p)\n\t2. Rock (r)\n\t3. Scissors (s)\n\t\n\tAre you ready? Alright then, let's play...\n\t\"\"\"",
"def draw_instructions_page(self):\n arcade.draw_rectangle_filled(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2,\n SCREEN_WIDTH,\n SCREEN_HEIGHT + 1000, arcade.color.BLACK)\n\n # title, instructions\n arcade.draw_rectangle_filled(670, 570, 1035, 100, arcade.color.RED)\n arcade.draw_rectangle_filled(450, 350, 470, 250, arcade.color.WHITE)\n arcade.draw_rectangle_filled(450, 350, 447, 230, arcade.color.BLACK)\n arcade.draw_text(\"STREET RACER XTREME\", 168, 525, arcade.color.BLACK, 85)\n arcade.draw_text(\"CLICK TO START GAME!\", 760, 330, arcade.color.WHITE, 35)\n arcade.draw_text(\"Coins are 10 points each\", 320, 400, arcade.color.WHITE, 20)\n arcade.draw_text(\"Press space to use nitrous\", 320, 340, arcade.color.WHITE, 20)\n arcade.draw_text(\"Move with the arrow keys\", 320, 280, arcade.color.WHITE, 20)\n arcade.draw_text(\"!! DONT CRASH INTO ANYBODY !!\", 350, 100, arcade.color.RED, 40)",
"def get_current_instruction(self) -> Dict:\n\n instructions = self.environment.code.instruction_list\n return instructions[self.mstate.pc]",
"def instructions(self):\n yield self.inst\n for inst in self.arguments[:]:\n yield inst\n for basic_block in self.basic_blocks[:]:\n if basic_block.function is not None:\n yield basic_block.inst\n for inst in basic_block.insts[:]:\n yield inst\n yield self.end_inst",
"def instructions():\n\n instructions_text = 'Here is how to use this calculator:<br>'\n instructions_text += 'http://localhost:8080/ => These instructions<br>'\n instructions_text += 'To add: http://localhost:8080/add/23/42 => 65<br>'\n instructions_text += 'To subtract: http://localhost:8080/subtract/23/42 => -19<br>'\n instructions_text += 'To multiply: http://localhost:8080/multiply/3/5 => 15<br>'\n instructions_text += 'To divide: http://localhost:8080/divide/22/11 => 2'\n\n return instructions_text",
"def make_instructions(self):\n #de, aux, vers = self.rods\n de, aux, vers = 0, 1, 2\n n = self.num_rings\n\n self.recur(n, de, aux, vers)\n\n ### Add dummy tuple at end so I can look one move ahead on states\n self.instructions.append((0, 0, 0))",
"def get_alt_commands(self):\n return self.altcmds",
"def get_commands():\n return \"Commands:\\n 1 [Find shortest path between given cities]\\n 2 [Find shortest path between random cities]\\n 3 [See list of cities]\\n 4 [Close application]\\n\"",
"def instruction_names(self):\n return list(self.instruction_pool.keys())",
"def instruct(self):\n\t\tretval = \" `% Instructions\\r\\n\"\n\t\tretval += \"`0-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\\r\\n\"\n\t\tretval += \" `9WELCOME TO THE ADVENTURE OF A LIFETIME!\\r\\n\\r\\n\"\n\t\tretval += \" `0** `9Full Multi-Node Support.\\r\\n\"\n\t\tretval += \" `0** `9This game is FINISHABLE! (If the sysop chooses)\\r\\n\"\n\t\tretval += \" `0** `9Real Time Online Messages And Battles.\\r\\n\"\n\t\tretval += \" `0** `9Marrage And other 'Real Life' Options.\\r\\n\"\n\t\tretval += \" `0** `9RIP & In-Game Downloading Of Icons File Support. (Both are Auto Detect)\\r\\n\"\n\t\tretval += \" `0** `9Auto Reincarnation If A Player Is Dead For Two Days.\\r\\n\\r\\n\\r\\n\"\n\t\tretval += \" `2This is multi player battle game, created for BBS's, it is the\\r\\n\"\n\t\tretval += \" `2type of game where you kill other players, get stronger and stronger\\r\\n\"\n\t\tretval += \" `2and your number one goal is to stay #1 in the player rankings! Of\\r\\n\"\n\t\tretval += \" `2course, killing the Dreaded Red Dragon will make you a hero, and your\\r\\n\"\n\t\tretval += \" `2name will be immortalized in the Hall Of Honor.\\r\\n\\r\\n\"\n\t\tretval += \" `2Each day, you are given a certain amount of fights per day, once you\\r\\n\"\n\t\tretval += \" `2use them, you can no longer do battle that day, you must call back\\r\\n\"\n\t\tretval += \" `2the NEXT day to be 'refilled'.\\r\\n\\r\\n\"\n\t\tretval += \" `2Stay at the Inn, and you will be safe from `0MOST`2 attackers...If they\\r\\n\"\n\t\tretval += \" `2want to kill you bad enough, they may find a way...However costly.\\r\\n\\r\\n\"\n\t\tretval += \" `2Be sure to buy better armour and weapons when possible, it really makes\\r\\n\"\n\t\tretval += \" `2a LARGE difference. \\r\\n\\r\\n\"\n\t\tretval += \" `2Be sure to take advantage of the advanced mail writing functions\\r\\n\"\n\t\tretval += \" `2avaible, they are very fast and easy to use, and you will have LOADS\\r\\n\"\n\t\tretval += \" `2more fun when you get to `0KNOW`2 who you are killing!\\r\\n\\r\\n\"\n\t\tretval += \" `2Particapate in conversation at The Bar, interacting with real people\\r\\n\"\n\t\tretval += \" `2is what makes BBS games so enjoyable, and this game is loaded with ways\\r\\n\"\n\t\tretval += \" `2to do that... From insulting people in the Daily Happenings, to \\r\\n\"\n\t\tretval += \" `2slaughtering them in cold blood, then sending them mail gloating over\\r\\n\"\n\t\tretval += \" `2the victory, this game will let you have some fun!\\r\\n\"\n\t\tretval += \" `2The game is pretty self explanatory, so I will let you, the player, \\r\\n\"\n\t\tretval += \" `2explore on your own. Just hit '`0?`2' when you're not sure, and you will\\r\\n\"\n\t\tretval += \" `2get a menu. For starters, try visiting the Inn.\\r\\n\"\n\t\tretval += \" `2If you are male, try your hand at Flirting with Violet...If you\\r\\n\"\n\t\tretval += \" `2are female, you can try your luck with The Bard.\\r\\n\\r\\n\"\n\t\tretval += \" `2If someone else attacks you and loses, you will get the experience\\r\\n\"\n\t\tretval += \" `2just as if you killed them yourself. (You will be mailed on the\\r\\n\"\n\t\tretval += \" `2details of the battle)\\r\\n\\r\\n\"\n\t\tretval += \" `9NOTE: This game contains some mature subject matter.\\r\\n\\r\\n\"\n\t\tretval += \" `0GOOD LUCK AND HAPPY GAMING!`9\\r\\n\"\n\t\treturn retval",
"def _extract_instructions(self, xmltree):\r\n return get_instructions(xmltree)",
"def _extract_instructions(self, xmltree):\r\n return get_instructions(xmltree)",
"def _extract_instructions(self, xmltree):\r\n return get_instructions(xmltree)",
"def instruction_iter(self):\n for ins in self.instructions:\n yield ins",
"def instructions():\n running = True\n while running:\n for evnt in event.get():\n if evnt.type == KEYDOWN:\n if evnt.key == K_RETURN:\n return \"menu\"\n if evnt.type == QUIT:\n return \"exit\"\n # Drawing all of the instructions screen text and background\n screen.blit(backgroundPics[0],(0,0))\n screen.blit(instructHelp,(235,40))\n screen.blit(moveRightHelp,(80,130))\n screen.blit(moveLeftHelp,(80,170))\n screen.blit(jumpHelp,(80,210))\n screen.blit(crouchHelp,(80,250))\n screen.blit(pauseHelp,(80,290))\n screen.blit(musicPauseHelp,(80,330))\n screen.blit(backTextHelp,(650,450))\n screen.blit(titleSelect,(610,445))\n screen.blit(brickSprites[0][3], (375,400))\n display.flip()\n fpsCounter.tick(60)\n return \"menu\"",
"def instructions(self):\n for inst in self.global_insts[:]:\n yield inst\n for function in self.functions[:]:\n for inst in function.instructions():\n yield inst",
"def printInstructions(self):\n print(\"\"\"•\tAim of the Game is to be the first to lose all of your chips\n•\tPlayers are put in order of the lowest to \nhighest based on their first roll\n(This is done automatically when you enter your name)\n• You start out with 5 chips.\n• When it is your turn you roll the die.\n\\t•\tIf the space with the same number as the die is empty (value of 0),\n\\t\\tput a chip there.\n\\t•\tbut if there already is a chip there (value of 1), you must take it.\n\\t•\tIf you roll a 6, you always put one of your chips on the space number 6 – \n\\t\\tregardless of how many chips are there already. \n\\t\\tChips on space number 6 are out of the game,\n\\t\\tand you never pick these up again.\n\"\"\")",
"def instruction():\n print('- - - - - - - - - - - - - - - - - - - - -')\n print(\"this is instruction for tic tac toe game\".upper())\n print('- - - - - - - - - - - - - - - - - - - - -')\n print('This is game for two players')\n print('Each player can choose a number between 1 and 9')\n print('Numbers represent the fields on the board')\n print('You can choose only numbers that are not taken by any player')\n list_of_symbols = ['1', '2', '3', '4', '5', '6', '7', '8', '9']\n print_board(list_of_symbols)\n print('You win the game if you have 3 symbols in column, row or diagonally')\n print('- - - - - - - - - - - - - - - - - - - - -')\n\n begin_game()",
"def instructions():\n txt_naming = ConfigReader.texture_naming_dict()\n\n text = \"<b>Texture naming rules:</b><br>(put an underscore _ at the end of file name; \" \\\n \"you can enumerate textures using two digits after texture type without any other character\" \\\n \"<br> e.g. _normal01 or in general _normalXX)\" \\\n \"<br>\"\n\n for key, value in txt_naming.iteritems():\n text += \"<br>- {0}: {1}\".format(key, ', '.join(a for a in value['text']))\n\n text += \"<br>\"\n text += \"<br><b>File formats:</b>\"\n text += \"<br>Meshes:\"\n text += ConfigReader.generate_file_filter()\n text += \"<br>Textures:\"\n text += ConfigReader.generate_texture_filter()\n\n return text"
] | [
"0.7537964",
"0.7404567",
"0.6759493",
"0.66712695",
"0.66122705",
"0.63154495",
"0.5985097",
"0.5833046",
"0.58077806",
"0.57885665",
"0.5745979",
"0.5728191",
"0.56949335",
"0.56795263",
"0.5595347",
"0.5581683",
"0.55724883",
"0.547264",
"0.5459932",
"0.5457961",
"0.5450936",
"0.54382694",
"0.54382694",
"0.54382694",
"0.543408",
"0.54141754",
"0.5397812",
"0.5392521",
"0.53699905",
"0.53365695"
] | 0.7712404 | 0 |
Return whether player is the winner of the game. | def is_winner(self, player) -> bool:
return (self.current_state.get_current_player_name() != player
and self.is_over(self.current_state)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_winner(self, player):\n return (self.current_state.get_current_player_name() != player\n and self.is_over(self.current_state))",
"def is_winner(self):\n return self.winner",
"def winner(self):\n return self._fetch_element('winner') == 'true'",
"def is_winner(self):\n return self._winner != self.NEUTRAL_PLAYER",
"def is_winner(self, player: str) -> bool:\n total_result = self.current_state.hori_result + self.current_state.left_result + self.current_state.right_result\n total_line = len(total_result)\n p1_taken = 0\n p2_taken = 0\n for item in total_result:\n if item == '1':\n p1_taken+=1\n elif item == '2':\n p2_taken += 1\n if player == \"p1\":\n return float(p1_taken) >= total_line/2\n return float(p2_taken) >= total_line/2",
"def has_a_winner(self):\n return self.state in {State.X_WON, State.O_WON}",
"def winner(self):\n if self.__current_player == 1:\n if self.__fields[0].winner():\n print(self.__players[0]._Player__name + \"is winner!\")\n Game.play = False\n elif self.__current_player == 2:\n if self.__fields[1].winner():\n print(self.__players[1]._Player__name + \"is winner!\")\n Game.play = False",
"def is_game_over(self):\r\n\r\n if self.winner != 0:\r\n return True\r\n\r\n return False",
"def has_winner(self):\n\n if self.num_black_pieces == 0 or len(self.get_all_valid_moves(Player.black)) == 0:\n return Player.white\n elif self.num_white_pieces == 0 or len(self.get_all_valid_moves(Player.white)) == 0:\n return Player.black\n elif self.repetition_happened() or self.passive_game():\n return \"Tie\"\n else:\n return None",
"def winner(self):\n\n if self.game_ended():\n return self.winning()\n else:\n return 0",
"def check_winner(self):\n if DotsAndBoxesState.score1 > 4: # Because the total score is fixed at nine, if player's score is greater than four,\n # then the player is the winner.\n return \"A\"\n else:\n return \"B\"",
"def is_winner(self, player, cell):\n \n if len(self.moves) > 2:\n # Need at least 3 moves for a player to win. Normally\n # would be 5, but turn enforcement is not implemented here.\n \n column = cell % 3\n row = cell - (cell % 3)\n diagonal = cell % 2 == 0\n \n victory = False\n \n # For these checks, we slice the cells in question out of the\n # board, compare them all to the player, and finally check that\n # each is True.\n if diagonal:\n victory = victory or \\\n all([c == player for c in self.cells[0:9:4]]) or \\\n all([c == player for c in self.cells[2:8:2]])\n \n victory = victory or \\\n all([c == player for c in self.cells[column:9:3]]) or \\\n all([c == player for c in self.cells[row:row+3]])\n \n return victory\n return False",
"def winFor(self,player):\n if(self.cachedWin == False):\n won = False;\n if(player==WHITE):\n for x in range(0,WIDTH):\n if(self.gameState[x,0]==WHITE):\n won = True\n \n elif(player==BLACK):\n for x in range(0,WIDTH):\n if(self.gameState[x,HEIGHT-1]==BLACK):\n won = True\n \n if(len(self.successors()) == 0):#IF there are no available moves for both players\n bCount = self.count(BLACK) #check who has the most pawns\n wCount = self.count(BLACK)\n if(bCount>wCount):\n self.cachedWin = True\n self.cachedWinner = player\n return True\n if(wCount>bCount):\n self.cachedWin = True\n self.cachedWinner = player\n return True\n \n if(won):\n self.cachedWin = True\n self.cachedWinner = player\n return True\n else:\n return False\n else:\n return player == self.cachedWinner",
"def check_winner(self):\n\t\tif self.check_diagonals() or self.check_rows() or self.check_columns():\n\t\t\treturn True\n\t\telif self.board_is_full():\n\t\t\tprint(\"There was a draw, everyone lost\")\n\t\t\treturn None\n\t\treturn False",
"def hasWin(self) :\n comparison = self.compareNumberUser()\n if (comparison == 'equal') :\n return True\n else :\n return False",
"def winner(self):\n\n\t\tfor player in [1,2]:\n\t\t\twon = np.full((self.boardSize), player)\n\n\t\t\t# Check diagonals\n\t\t\tif(np.array_equal(np.diag(self.board), won)): return player\n\t\t\tif(np.array_equal(np.diag(np.fliplr(self.board)), won)): return player\n\n\t\t\t# Check lines and columns\n\t\t\tfor i in range(self.boardSize):\n\t\t\t\tif(np.array_equal(self.board[i], won)): return player\n\t\t\t\tif(np.array_equal(self.board[:,i], won)): return player\n\n\t\t# Draw\n\t\tif(not(0 in self.board)): return 3\n\n\t\t# No win or draw\n\t\treturn 0",
"def has_won(board, player):\r\n return False",
"def is_winner(self):\n for i in range(3):\n if (self.board[i][0] == self.board[i][1] == self.board[i][2]) \\\n and (self.board[i][0] != 0):\n return True, self.board[i][0]\n\n if self.board[0][i] == self.board[1][i] == self.board[2][i] \\\n and (self.board[0][i] != 0):\n return True, self.board[0][i]\n\n if self.board[0][0] == self.board[1][1] == self.board[2][2] \\\n and (self.board[0][0] != 0):\n return True, self.board[0][0]\n\n if self.board[2][0] == self.board[1][1] == self.board[0][2] \\\n and (self.board[2][0] != 0):\n return True, self.board[2][0]\n\n if self.available_combinations() == []:\n return False, 'end'\n\n return False, None",
"def has_won(board, player):\n return False",
"def check_winner(self):\n pass",
"def checkWinner(self, surface):\r\n winner = True\r\n \r\n # Checks for winner\r\n for point in self.points:\r\n if point.getTeam() == self.getTurn():\r\n winner = False\r\n \r\n # Displays winner message if there is a winner\r\n if winner:\r\n self.surface.fill(BLACK)\r\n winText = graphicalObjects.Text(self.getCurrentString() + ' wins!', WIN_CENTER, 20)\r\n winText.draw(self.surface)\r\n pygame.display.flip()\r\n self.won = True",
"def has_winner(self):\r\n\r\n\t\t\"Check for horizonal win\"\r\n\r\n\t\tfor x in range(0, 3):\r\n\r\n\t\t\tif self.game_board[x][0] == self.game_board[x][1] and self.game_board[x][1] == self.game_board[x][2]:\r\n\r\n\t\t\t\treturn self.game_board[x][0]\r\n\r\n\t\t\"Check for vertical win\"\r\n\r\n\t\tfor y in range(0, 3):\r\n\r\n\t\t\tif self.game_board[0][y] == self.game_board[1][y] and self.game_board[1][y] == self.game_board[2][y]:\r\n\r\n\t\t\t\treturn self.game_board[0][y]\r\n\r\n\t\t\"Check for diagonal from left to right\"\r\n\t\r\n\t\tif self.game_board[0][0] == self.game_board[1][1] and self.game_board[1][1] == self.game_board[2][2]:\r\n\t\t\treturn self.game_board[1][1]\t\r\n\r\n\t\tif self.game_board[0][2] == self.game_board[1][1] and self.game_board[1][1] == self.game_board[2][0]:\r\n\t\t\treturn self.game_board[1][1]\t\r\n\r\n\t\tif self.count == 8:\r\n\r\n\t\t\treturn \"Tie\"\r\n\r\n\t\telse:\r\n\r\n\t\t\treturn \"0\"\r\n\r\n\r\n\t\tpass",
"def winning_game_player(players):\n\n # in order for there to be a winner, the game must\n # be over\n if not game_over(players):\n return None\n\n # if the game is over, it could be that there is no\n # winner\n active_players = players_with_decks(players)\n if not active_players:\n return False\n\n # if the game is over than find the winner\n return players_with_decks(players)[0]",
"def check_winner(board):\n winner = get_winner(board)\n if winner:\n print(f\"Game Over, You Win\") if winner == \"X\" else print(\"Game Over, You Loose\") # noqa\n return winner",
"def determineWinner(self) -> bool:\n\n # Saving the board's rows, columns and diagonals in variables\n rows: List[List[str]] = self.board.getRows()\n columns: List[List[str]] = self.board.getColumns()\n diagonals: List[List[str]] = self.board.getDiagonals()\n\n # saving the board's rows, columns and diagonals in one list\n lines: List[List[str]] = [row for row in rows]\n for column in columns:\n lines.append(column)\n for diagonal in diagonals:\n lines.append(diagonal)\n\n # checking if either the AI or the human has three in a row, column or diagonal\n for symbol in [self.getPlayerSymbol(), self.getAiSymbol()]:\n for line in lines:\n if line.count(symbol) == 3:\n # human player wins\n if symbol == self.getPlayerSymbol():\n winner: Player = self.player\n\n # AI wins\n else:\n winner: Ai = self.ai\n print(f\"{winner.getName()} wins!\")\n return True\n return False",
"def is_game_won(board, player):\n\n\tis_won = False\n\n\tif (\n\t\tboard[0] == board[1] == board[2] == player or\n\t\tboard[3] == board[4] == board[5] == player or\n\t\tboard[6] == board[7] == board[8] == player or\n\t\tboard[0] == board[3] == board[6] == player or\n\t\tboard[1] == board[4] == board[7] == player or\n\t\tboard[2] == board[5] == board[8] == player or\n\t\tboard[0] == board[4] == board[8] == player or\n\t\tboard[2] == board[4] == board[6] == player\n\t):\n\t\tis_won = True\n\n\treturn is_won",
"def is_winner(self, player, cell):\n \n column = cell % 3\n row = cell - (cell % 3)\n diagonal = cell % 2 == 0\n \n victory = False\n \n cells, boards = zip(*self.cells)\n \n if diagonal:\n victory = victory or \\\n all([c == player for c in cells[0:9:4]]) or \\\n all([c == player for c in cells[2:8:2]])\n \n victory = victory or \\\n all([c == player for c in cells[column:9:3]]) or \\\n all([c == player for c in cells[row:row+3]])\n \n return victory\n return False",
"def is_game_won(self) -> int:\n\n b = self.board\n for c1, c2, c3, c4 in _WINDOWS:\n if b[c1] and (b[c1] == b[c2] == b[c3] == b[c4]):\n print(\"win\", c1, c2, c3, c4)\n return b[c1]",
"def _get_vertical_winner(self) -> bool:\n\n # Loop through all columns\n for col in range(self._board_size):\n # Get the entry of the first cell in the column\n winner = self._board[0][col]\n\n # Loop through all the rows\n for row in range(self._board_size):\n cell = self._board[row][col]\n\n # If we haven't already found a blank cell this row\n # and if the current cell is not our current value,\n # then set it to be a self.NEUTRAL_PLAYER (not a winner)\n if (winner != self.NEUTRAL_PLAYER and cell != winner) or cell == self.NEUTRAL_PLAYER:\n winner = self.NEUTRAL_PLAYER\n break\n\n # Set the winning player if anybody won\n if winner != self.NEUTRAL_PLAYER:\n self._winner = winner\n self._win_edges = ((col, 0), (col, self._board_size - 1))\n return True\n\n # If we make it this far, then we did not find a winner\n return False",
"def is_win(self, roster):\n player = roster.get_current()\n guess = player.get_move().get_guess()\n if guess == self._code:\n return True\n else:\n return False"
] | [
"0.8397136",
"0.8132984",
"0.8101946",
"0.8044826",
"0.7906446",
"0.7795879",
"0.7764098",
"0.773107",
"0.76601756",
"0.76403075",
"0.76173764",
"0.7454074",
"0.74401104",
"0.743083",
"0.73032343",
"0.72994506",
"0.72927463",
"0.7289262",
"0.7287912",
"0.7260246",
"0.72597",
"0.725625",
"0.7232909",
"0.7219158",
"0.7216302",
"0.7205354",
"0.71764463",
"0.716348",
"0.71190226",
"0.7105001"
] | 0.8399962 | 0 |
Return the move that string represents. If string is not a move, return an invalid move. | def str_to_move(self, string):
if not string.strip().isalpha():
return -1
return string.strip() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def str_to_move(self, string):\n if not string.strip().isdigit():\n return -1\n\n return int(string.strip())",
"def get_move() -> str:\n msg = 'Enter a move for that section (C to check, S to swap, R to rotate): '\n move = input(msg)\n while not wf.is_valid_move(move):\n print('Invalid move!')\n move = input(msg) \n return move",
"def handle_move(self, move_string):\n def map_move(move):\n col = int(ascii_lowercase.find(move[0])) + 1 # dummy col\n row = int(move[1:])\n # if not 0 < col <= game[\"board_width\"]:\n # raise ValueError('bad coord; invalid col in ' + coord)\n # if not 0 < row <= game[\"board_height\"]:\n # raise ValueError('bad coord; invalid row in ' + coord)\n return row*(self.rules[\"row_len\"]) + col\n move = list(map(map_move,move_string.split(' ')))\n self.turn[\"board\"][move[0]].make_move(*move[1:])\n self.turn[\"half_move_clock\"] += 1\n if self.turn[\"active_player\"] == 1:\n self.turn[\"full_move_clock\"] += 1\n self.turn[\"active_player\"] = (self.turn[\"active_player\"] + 1) % 2\n # self.turn[\"board\"][move_start].make_move(move_end)",
"def str_to_move(self, str1: str) -> Any:\n if not str1.strip().isalpha():\n return -1\n return str1.strip()",
"def _parse_move_statement(dlstr):\n\n try:\n tokens = dlstr.lower().split()\n if tokens[0] != \"move\":\n raise ValueError(\"Expected 'move' statement\")\n\n mtype, nmove, pfreq, rmin = \\\n tokens[1], int(tokens[2]), int(tokens[3]), float(tokens[4])\n except IndexError:\n raise ValueError(\"Badly formed 'move' statement?\")\n\n return mtype, nmove, pfreq, rmin",
"def get_move(character: dict, move_command: str) -> dict:\n\n movelist = get_character_movelist(character)\n\n move = list(filter(lambda x: (move_simplifier(x['Command'].replace(\"\\\\\",\"\"))\n == move_simplifier(move_command)), movelist))\n if not move:\n move = list(filter(lambda x: (is_command_in_alias(move_command, x)), movelist))\n\n if move:\n move[0]['Command'] = move[0]['Command'].replace(\"\\\\\",\"\")\n return move[0]\n else:\n return None",
"def get_move(self, board):\n\n valid_moves = [move for move in board.legal_moves]\n is_valid_move = False\n while not is_valid_move:\n move = input(\"Enter a valid move in uci format: \").lower()\n if len(move) == 4 or len(move) == 5:\n try:\n player_move = chess.Move.from_uci(move)\n\n if board.is_legal(player_move):\n try:\n board.push(player_move)\n return player_move\n except:\n print(\"invalid move...\")\n else:\n print(\"invalid move...\")\n except:\n print(\"invalid move...\")\n else:\n print(\"invalid move...\")",
"def get_next_move(self):\n return int(input('Enter your move: '))",
"def parse_move(move):\n if not (len(move) == 2):\n return None, None\n try:\n row = ord(move[0].upper()) - 65\n col = int(move[1])\n except:\n return None, None\n return row, col",
"def get_move_from_user(self):\n user_input = input(\"Move: \")\n if user_input == 'undo':\n return user_input\n try:\n move_list = user_input.split(\" \")\n move_list[1] = int(move_list[1])\n except:\n move_list = ['XX', 0, 'XX']\n return move_list",
"def get_next_move(self):\n if self.move == 'X':\n return 'O'\n return 'X'",
"def get_move(self):\n if self._difficulty == 0:\n return self._get_easy_move()\n else:\n # Different stategies/difficulties can be attached here\n return",
"def interactive_strategy(game: Game) -> str:\n move = input(\"Enter a move: \")\n return game.str_to_move(move)",
"def get_move(self, find_move_name):\n frame_data = self._get_frame_data()\n sprites = self._get_sprites()\n\n # Need to check both names separately\n for move in frame_data.keys():\n if '\"' in find_move_name:\n temp_move_name = find_move_name.replace('\"', '')\n if temp_move_name == move:\n frame_data_name = move\n break\n else:\n continue\n elif find_move_name.lower() == move.lower():\n frame_data_name = move\n break\n\n else:\n for move in frame_data.keys():\n if find_move_name.lower() in move.lower():\n frame_data_name = move\n break\n else:\n raise MoveNotFound\n\n sprite_name = None\n\n # temporary fix for the 214/236B/22x/5AD meme\n if '214b' in frame_data_name.lower() and not '214bc' in frame_data_name.lower():\n for move in sprites.keys():\n if '214A/B' in move:\n sprite_name = move\n break\n elif '236b' in frame_data_name.lower() and not '236bc' in frame_data_name.lower():\n for move in sprites.keys():\n if '236A/B' in move:\n sprite_name = move\n break\n\n elif '22' in frame_data_name.lower():\n for move in sprites.keys():\n if '22A/B' in move and '22c' not in frame_data_name.lower():\n sprite_name = move\n break\n elif '22A/B/C' in move and '22c' in frame_data_name.lower():\n sprite_name = move\n break\n\n elif 'reversal' in frame_data_name.lower():\n for move in sprites.keys():\n if '5AD' in move:\n sprite_name = move\n break\n\n for move in sprites.keys():\n if sprite_name is not None:\n break\n if 'j.' in frame_data_name.lower() and ' ' in frame_data_name:\n for split_name in frame_data_name.split(' '):\n if move.lower() == split_name.lower():\n sprite_name = move\n break\n elif move.lower() == frame_data_name.lower():\n sprite_name = move\n break\n else:\n for move in sprites.keys():\n if sprite_name is not None:\n break\n if 'j.' in frame_data_name.lower() and ' ' in frame_data_name:\n for split_name in frame_data_name.split(' '):\n if move.lower() in split_name.lower():\n sprite_name = move\n break\n elif move.lower() in frame_data_name.lower() and '22' not in find_move_name:\n print('ok')\n sprite_name = move\n break\n elif find_move_name.lower() in move.lower():\n sprite_name = move\n break\n else:\n sprite_name = None\n\n if sprite_name is None:\n sprite = ''\n else:\n sprite = self._get_high_quality_sprite(sprites[sprite_name])\n\n return {\n frame_data_name: {\n 'fd': frame_data[frame_data_name],\n 'sprite': sprite\n }\n }",
"def from_string(dlstr):\n\n moves_volume = {\"vector\": VolumeVectorMove,\n \"ortho\": VolumeOrthoMove,\n \"cubic\": VolumeCubicMove}\n\n moves_mc = {\"atom\": AtomMove,\n \"molecule\": MoleculeMove,\n \"rotatemol\": RotateMoleculeMove,\n \"gcinsertatom\": InsertAtomMove,\n \"gcinsertmol\": InsertMoleculeMove}\n\n lines = dlstr.splitlines()\n tokens = lines[0].lower().split()\n if tokens[0] != \"move\" or len(tokens) < 4:\n raise ValueError(\"Expected: 'move key ...': got {!r}\".format(lines[0]))\n\n key = tokens[1]\n\n # We need to allow for possible DL key abbreviations\n if key.startswith(\"atom\"):\n key = \"atom\"\n if key.startswith(\"molecu\"):\n key = \"molecule\"\n if key.startswith(\"rotatemol\"):\n key = \"rotatemol\"\n\n inst = None\n if key == \"volume\":\n subkey = tokens[2]\n if subkey in moves_volume:\n inst = moves_volume[subkey].from_string(dlstr)\n else:\n if key in moves_mc:\n inst = moves_mc[key].from_string(dlstr)\n\n if inst is None:\n raise ValueError(\"Move unrecognised: {!r}\".format(dlstr))\n\n return inst",
"def _get_move(self) -> Tile:\n if not self.game_state:\n raise RuntimeError(\"Cannot call get_move when the game has not started!\")\n if isinstance(self.current_turn, Player):\n return self._get_player_move()\n elif isinstance(self.current_turn, Enemy):\n return self._get_enemy_move()\n else:\n raise TypeError(\"You're trying to move something that isn't a character or an adversary.\")",
"def move(self):\r\n their_move = self.last_moves[\"their_move\"]\r\n return (their_move == \"\" and random.choice(moves) or their_move)",
"def get_move(moves):\n pass",
"def parse_move_to_square(self, uci_move: str):\n chars = utils.split_string_to_chars(uci_move)\n square_from = ''.join(chars[0] + chars[1])\n square_to = ''.join(chars[2] + chars[3])\n return square_from, square_to",
"def move(self):\r\n move = None\r\n if self.last_move is None:\r\n move = rockyman.move(self)\r\n else:\r\n index = the_moves.index(self.last_move) + 1\r\n if index >= len(the_moves):\r\n index = 0\r\n move = the_moves[index]\r\n self.last_move = move\r\n return move",
"def fix_move(self, invalid_move: QMove):\n\n # TODO: reduce time_per_game second by second\n ERROR_MSG = f\"INVALID_MOVE {invalid_move.to_string()}\"\n\n if self.is_ai and self.proc is not None:\n self.proc.stdin.write(str.encode(ERROR_MSG + '\\n'))\n self.proc.stdin.flush()\n new_move = QMove(os.read(self.proc.stdout.fileno(), 100))\n else:\n new_move = QMove(\n input(\"Move was invalid, enter a valid move:\\n\\t>> \"))\n\n return new_move",
"def get_move(self, game_state: BotGameState) -> BotMove:\n return",
"def player_move():\n\tmove = None\n\twhile move not in moves:\n\t\tmove = raw_input(\"What is your move %s? --> \" % name)\n\treturn move",
"def get_move(state):\n entry = game_states[get_values(state)]\n options = list()\n\n for move in entry:\n move_result = entry[move]\n if move_result == 'Y':\n return move\n elif move_result == 'N':\n continue\n options.extend([move]*move_result)\n return choice(options)",
"def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction",
"def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction",
"def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction",
"def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction",
"def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction",
"def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction"
] | [
"0.7085393",
"0.6925762",
"0.677684",
"0.646239",
"0.6434038",
"0.64127743",
"0.6365554",
"0.63283306",
"0.63080925",
"0.62785333",
"0.6193179",
"0.6175988",
"0.6075098",
"0.6073601",
"0.6036474",
"0.59770846",
"0.59690195",
"0.5934326",
"0.59339803",
"0.59236306",
"0.5923513",
"0.5891884",
"0.58598125",
"0.5845519",
"0.58430475",
"0.58430475",
"0.58417284",
"0.58417284",
"0.58417284",
"0.58298236"
] | 0.7044586 | 1 |
Return an estimate in interval [LOSE, WIN] of best outcome the current player can guarantee from state self. | def rough_outcome(self) -> float:
if is_win(self):
return 1
elif is_lose(self):
return -1
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rough_outcome(self) -> float:\n # HUYNH YOU PRICK WHY THE FUCK DO YOU MAKE US WRITE THIS SHIT EVEN IT'S NOT USED ANYWHERE\n # pick move based on this may not be optimal but better than random\n # return 1 if win immediately\n # return -1 if all states reachable will result the other player win\n # return 0 if otherwise ??? what the fuck does this mean\n # look two states forward\n pass",
"def rough_outcome_strategy(game: Any) -> Any:\n current_state = game.current_state\n best_move = None\n best_outcome = -2 # Temporarily -- just so we can replace this easily later\n\n # Get the move that results in the lowest rough_outcome for the opponent\n for move in current_state.get_possible_moves():\n new_state = current_state.make_move(move)\n\n # We multiply the below by -1 since a state that's bad for the opponent\n # is good for us.\n guessed_score = new_state.rough_outcome() * -1\n if guessed_score > best_outcome:\n best_outcome = guessed_score\n best_move = move\n\n # Return the move that resulted in the best rough_outcome\n return best_move",
"def rough_outcome_strategy(game: Any) -> Any:\n current_state = game.current_state\n best_move = None\n best_outcome = -2 # Temporarily -- just so we can replace this easily later\n\n # Get the move that results in the lowest rough_outcome for the opponent\n for move in current_state.get_possible_moves():\n new_state = current_state.make_move(move)\n\n # We multiply the below by -1 since a state that's bad for the opponent\n # is good for us.\n guessed_score = new_state.rough_outcome() * -1\n if guessed_score > best_outcome:\n best_outcome = guessed_score\n best_move = move\n\n # Return the move that resulted in the best rough_outcome\n return best_move",
"def rough_outcome_strategy(game: Any) -> Any:\n current_state = game.current_state\n best_move = None\n best_outcome = -2 # Temporarily -- just so we can replace this easily later\n\n # Get the move that results in the lowest rough_outcome for the opponent\n for move in current_state.get_possible_moves():\n new_state = current_state.make_move(move)\n\n # We multiply the below by -1 since a state that's bad for the opponent\n # is good for us.\n guessed_score = new_state.rough_outcome() * -1\n if guessed_score > best_outcome:\n best_outcome = guessed_score\n best_move = move\n\n # Return the move that resulted in the best rough_outcome\n return best_move",
"def reward(self, winner):\n if winner == self.side:\n return self.win\n elif winner == VALUES.NOT_FINISHED:\n return self.not_finished\n elif winner == VALUES.DRAW:\n return self.draw\n else:\n return self.lose",
"def calc_winner(self):\n pass",
"def get_game_score(self):\n if self.game_is_tied():\n return 0\n elif self.is_game_won():\n my_available_steps = self.steps_available(self.loc)\n opp_available_steps = self.steps_available(self.opponent_loc)\n my_score = self.my_score - self.penalty_score if my_available_steps == 0 else self.my_score\n opp_score = self.opponent_score - self.penalty_score if opp_available_steps == 0 else self.opponent_score\n return (my_score - opp_score) / (abs(my_score) + abs(opp_score))\n else:\n if abs(self.my_score) + abs(self.opponent_score) == 0:\n return 0\n return (self.my_score - self.opponent_score) / (abs(self.my_score) + abs(self.opponent_score))",
"def rough_outcome(self) -> float:\n\n if self.p1_turn:\n name = '2'\n else:\n name = '1'\n\n count = 0\n for i in self.claim:\n if i == name:\n count += 1\n over = (self.get_possible_moves() == []) or \\\n (count >= 0.5 * len(self.claim))\n\n result = []\n if over:\n return -1\n else:\n for move in self.get_possible_moves():\n new_state = self.make_move(move)\n if new_state.rough_outcome() == -1:\n result.append(1)\n else:\n result.append(0)\n if 1 in result:\n return 1\n return -1",
"def __status(self):\r\n if self.__currentCell == self.storageCell:\r\n return Status.WIN\r\n\r\n if self.__totalReward < self.__rewardThreshold: # force end of game after to much loss\r\n return Status.LOSE\r\n\r\n return Status.PLAYING",
"def get_expected_objective(self) -> float:\n # pylint: disable=invalid-name\n obj = 0.\n for gr in self.grounded.values():\n dist = gr.get_expected_dist_to_satisfaction()\n obj += 1 - self.weight * max(0, dist) ** 2\n return obj",
"def winner(self):\n state = self._state['visible']\n if state['reserve'][0] < 1:\n return 1\n elif state['reserve'][1] < 1:\n return 0\n return -1",
"def winner(self):\n\n if self.home_score > self.away_score:\n return HOME\n elif self.home_score < self.away_score:\n return VISITOR\n else:\n return TIE",
"def evaluateWinner(self):\n\t\tif self.pots[-1] == 0:\n\t\t\tself.pots.pop()\n\t\tlivePlayers = self.getLivePlayers()\t\n\t\tfor i in range(len(self.pots)):\n\t\t\tplayers = self.getPlayersInPot(i, livePlayers)\n\t\t\tevaluations = []\n\t\t\tfor x in players:\n\t\t\t\tcombined = x.hand + self.communityCards\n\t\t\t\tevaluations.append((x, self.evaluator.getRankOfSeven(\tcombined[0], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[1], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[2], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[3], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[4], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[5], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[6] )))\n\t\t\twinners = self.getWinners(evaluations, i)\n\t\t\tself.handOutMoney(winners, i)\n\t\t\tself.potwinQ.append(winners[0].name)",
"def _determine_outcome(\n self,\n accept: AcceptanceCriterion,\n best: State,\n curr: State,\n cand: State,\n ) -> Outcome:\n outcome = Outcome.REJECT\n\n if accept(self._rnd_state, best, curr, cand): # accept candidate\n outcome = Outcome.ACCEPT\n\n if cand.objective() < curr.objective():\n outcome = Outcome.BETTER\n\n if cand.objective() < best.objective(): # candidate is new best\n logger.info(f\"New best with objective {cand.objective():.2f}.\")\n outcome = Outcome.BEST\n\n return outcome",
"def custom_score_2(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: avaliable moves ratio\n return float(own_moves/opp_moves)",
"def predict_winner(self):\n\t\tif len(self.players) > 1:\n\t\t\t# TODO: convert to using of max() function\n\t\t\twinner = self.players[0]\n\t\t\tfor player in self.players:\n\t\t\t\tif player.wr > winner.wr:\n\t\t\t\t\twinner = player\n\t\t\treturn winner\n\t\telse:\n\t\t\treturn None",
"def Pwin(state):\n # Assumes opponent also plays with optimal strategy\n p, me, you, pending = state\n if me + pending >= goal:\n return 1\n elif you >= goal:\n return 0\n else:\n return max(Q_pig(state, action, Pwin) for action in pig_actions(state))",
"def utility(self, state, player):\n if state.isWin() or state.isLose():\n return state.getScore()\n\n # In case of cycle.\n if player == PACMAN:\n return INFINITY\n else:\n return -INFINITY",
"def get_winner(self) -> int:\n return self._win_state",
"def best(self):\n alpha = -1\n beta = +1\n move = self.__negamax(alpha, beta, tt=DictTT())\n return move[1]",
"def winner(self):\n\n if self.game_ended():\n return self.winning()\n else:\n return 0",
"def showBestGainWon(self) :\n bestGainWon = 0\n for level in self.level_history :\n bestGainWon = level.profit if bestGainWon < level.profit else bestGainWon\n Scenario.messageGetBestGainWon(bestGainWon)",
"def Pwin(state):\n # Assumes opponent also plays with optimal strategy.\n (p, me, you, pending) = state\n if me + pending >= goal:\n return 1\n elif you >= goal:\n return 0\n else:\n return max(Q_pig(state, action, Pwin)\n for action in pig_actions(state))",
"def evaluate(self, mode=0):\r\n winner = self.determine_winner()\r\n if winner:\r\n return winner * self.WIN_SCORE\r\n\r\n if mode == 1:\r\n return self.centre_priority_evaluate()\r\n elif mode == 2:\r\n return 0.5 * (self.centre_priority_evaluate() + self.piece_evaluate())\r\n else:\r\n return self.piece_evaluate()",
"def evaluate_state(state):\n\n my_score = get_action_score(state.action[0], state.action[1], state.action_player, state.occupied)\n other_score = get_action_score(state.action[0], state.action[1], state.player, state.occupied)\n \n return max(my_score, other_score)",
"def showWorstGainWon(self) :\n worstGainWon = self.level_history[0].profit\n for level in self.level_history :\n worstGainWon = level.profit if ((worstGainWon > level.profit) and (level.result == 1)) else worstGainWon\n Scenario.messageGetWorstGainWon(worstGainWon)",
"def get_winner(state):\n state_val = get_action_score(state.action[0], state.action[1], state.action_player, state.occupied)\n if state_val == 100:\n return state.action_player\n elif len(state.available_moves) == 0:\n return 0\n else:\n return -1",
"def eval(self):\n\n ratio_player_win = self.player_wins / self.num_test\n ratio_opponent_win = self.opponent_wins / self.num_test\n ratio_tie = 1.0 - ratio_player_win - ratio_opponent_win\n\n print(\"\\nPlayer Test Results:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_tie))\n\n ratio_optimal_win = self.optimal_wins / self.num_test\n ratio_optimal_loose = self.optimal_losses / self.num_test\n ratio_optimal_tie = 1.0 - ratio_optimal_win - ratio_optimal_loose\n\n print(\"\\nOptimal Results:\")\n print(\"\\tPlayer {0:.2f}%\".format(100.0 * ratio_optimal_win))\n print(\"\\tOpponent {0:.2f}%\".format(100.0 * ratio_optimal_loose))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_optimal_tie))\n\n # Ratio of win, loss diff between player and optimal\n # positive if the player beats opponent\n relative_result = ((ratio_player_win - ratio_opponent_win) /\n (ratio_optimal_win - ratio_optimal_loose))\n\n print(\"\\nResults Player Relative Optimal:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win / ratio_optimal_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win / ratio_optimal_loose))\n print(\"\\tScore {0:.2f}%\".format(100.0 * relative_result))\n\n if self.last_test is not None:\n print(\"Diff from last test score is {0:.2f}%\".format(100.0 * (relative_result - self.last_test)))\n self.last_test = relative_result",
"def evaluate_board(self, board):\n \n win_score = 100\n win_or_loss_score = 50\n lose_score = 0\n \n if board.win_for(self.opponent()):\n return lose_score\n if board.win_for(self.side):\n return win_score\n if not board.win_for(self.side) or not board.win_for(self.opponent()):\n return win_or_loss_score",
"def minimax_decision(gameState):\n value = -sys.maxsize\n best_value = -sys.maxsize\n best_move = None\n legal_moves = gameState.get_legal_moves()\n for move in legal_moves:\n game = gameState.forecast_move(move)\n value = max(value, min_value(game))\n if value > best_value:\n best_value = value\n best_move = move\n return best_move"
] | [
"0.73625016",
"0.7059583",
"0.7059583",
"0.7059583",
"0.7020939",
"0.6829376",
"0.6702647",
"0.6694379",
"0.66560304",
"0.6624065",
"0.6616302",
"0.6588654",
"0.6566183",
"0.6555869",
"0.6547012",
"0.6531186",
"0.6518109",
"0.6501664",
"0.6488798",
"0.6458663",
"0.6427033",
"0.64219874",
"0.6408247",
"0.6403664",
"0.6400599",
"0.63933843",
"0.6387703",
"0.6387276",
"0.6378572",
"0.6365341"
] | 0.7338502 | 1 |
Return a dash definition of an HTML table for a Pandas dataframe | def make_dash_table(df):
table = []
for index, row in df.iterrows():
html_row = []
for i in range(len(row)):
html_row.append(html.Td([row[i]]))
table.append(html.Tr(html_row))
return table | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_dash_table(selection, df):\n\n df_subset = df.loc[df[\"NAME\"].isin(selection)]\n table = []\n\n for index, row in df_subset.iterrows():\n rows = []\n rows.append(html.Td([row[\"NAME\"]]))\n rows.append(html.Td([html.Img(src=row[\"IMG_URL\"])]))\n rows.append(html.Td([row[\"FORM\"]]))\n rows.append(\n html.Td([html.A(href=row[\"PAGE\"], children=\"Datasheet\", target=\"_blank\")])\n )\n table.append(html.Tr(rows))\n\n return table",
"def Table(dataframe, link_column_name=None, col1=None, col2=None, drop=[]):\n if link_column_name:\n if col2:\n links1 = dataframe[link_column_name] \\\n .map(lambda x: x.replace(' ', '').split(';')[0]).values\n links2 = dataframe[link_column_name] \\\n .map(lambda x: x.replace(' ', '').split(';')[1]).values\n else:\n links1 = dataframe[link_column_name] \\\n .map(lambda x: x.replace(' ', '')).values\n rows = []\n for i in range(len(dataframe)):\n row = []\n for col in dataframe.columns:\n if (col in [link_column_name] + drop) is False:\n value = dataframe.iloc[i][col]\n if col in [col1, col2]:\n if col == col2:\n cell = html.Td(dcc.Link(href=links2[i], children=value))\n else:\n cell = html.Td(dcc.Link(href=links1[i], children=value))\n else:\n cell = html.Td(children=value)\n row.append(cell)\n rows.append(html.Tr(row,\n style={\n 'color': '#7FDBFF',\n 'fontSize': '18px',\n }))\n return html.Table(\n # Header\n [html.Tr([html.Th(col,\n style={\n 'background-color': '#111111',\n 'color': '#7FDBFF',\n 'fontSize': '20px',\n }) \\\n for col in dataframe.columns if (col in [link_column_name] + drop) is False])] + \\\n rows,\n style={'width':'100%'}\n )",
"def disp(df):\n display(HTML(df.to_html(index=False)))",
"def create_html_report():\r\n\r\n #Sample DataFrame\r\n df = pd.DataFrame(np.random.randn(7,4)\r\n ,columns=['one','two','three','four']\r\n ,index=['a','b','c','d','e','f','g'])\r\n\r\n #Formatting rule\r\n def color_negative_red(val):\r\n color = 'red' if val<0 else 'black'\r\n return f'color: {color}'\r\n\r\n styler = df.style.applymap(color_negative_red)\r\n\r\n #Chart plotting\r\n filename = \"\".join([APP_ROOT, \"\\\\static\\\\images\\\\\" , \"plot.svg\"])\r\n #Plot\r\n ax = df.plot.bar()\r\n fig = ax.get_figure()\r\n fig.savefig(filename)\r\n\r\n #Template handling\r\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath='./templates/'))\r\n template = env.get_template('template.html')\r\n\r\n filename = \"file:///\" + filename\r\n html = template.render(my_table=styler.render(), img_url=filename)\r\n\r\n return html",
"def Table(data):\n data.columns = [\"Title\", \"Author\", \"Date\"]\n return dash_table.DataTable(\n id='table',\n columns=[{\"name\": i, \"id\": i} for i in data.columns],\n data=data.to_dict(\"rows\"),\n style_as_list_view=True,\n sorting=True,\n style_header={\n 'fontWeight': 'bold'\n },\n style_cell={\n },\n style_cell_conditional=[\n {'if': {'column_id': 'Title'},\n 'textAlign': 'left'},\n {'if': {'column_id': 'Author'},\n 'textAlign': 'left'},\n {'if': {'column_id': 'Date'},\n 'textAlign': 'center'},\n ],\n style_data={'whiteSpace': 'normal'},\n )",
"def html_data_table(self):\n return \"XXX\"",
"def df2html(df, name=None, dom=\"Brt\", show_index=False, pageLength=15):\n\n if name is None:\n name = uuid.uuid1().time_low\n # looks like datatable does not like ID made of numbers, even in string\n # so we convert to ABCDEFGH values\n name = \"\".join([chr(65 + int(x)) for x in str(name)])\n\n datatable = DataTable(df, name, index=show_index)\n datatable.datatable.datatable_options = {\n \"pageLength\": pageLength,\n \"scrollCollapse\": \"false\",\n \"dom\": dom,\n \"buttons\": [\"copy\", \"csv\"],\n }\n\n # identify links (columns ending in _links)\n df.columns = [str(x) for x in df.columns]\n for column in df.columns:\n if column.endswith(\"_links\"):\n prefix = column.replace(\"_links\", \"\")\n if prefix in df.columns:\n datatable.datatable.set_links_to_column(column, prefix)\n\n js = datatable.create_javascript_function()\n html = datatable.create_datatable(float_format=\"%.6g\")\n return js + html",
"def generate_table(dataframe, max_rows=10):\n return html.Table([\n html.Thead(\n html.Tr([html.Th(col) for col in dataframe.columns])\n ),\n html.Tbody([\n html.Tr([\n html.Td(dataframe.iloc[i][col]) for col in dataframe.columns\n ]) for i in range(min(len(dataframe), max_rows))\n ])\n ])",
"def _construct_html_table(self, df: Table) -> str:\n string = attach_tag_tr('\\n'.join(map(attach_tag_th, df.columns)))\n stringified_df = _stringify_table(df)\n\n for (i, row_elements) in stringified_df.iterrows():\n # Commented out code is a bit sleaker, but restrictive\n #string += '\\n' + attach_tag_tr('\\n'.join(map(attach_tag_td,\n # row_elements)))\n table_content: List = []\n for col, val in row_elements.iteritems():\n if col == 'cost':\n table_content.append(attach_tag_td_rjust(val))\n else:\n table_content.append(attach_tag_td(val))\n\n string += '\\n' + attach_tag_tr('\\n'.join(table_content))\n\n return attach_tag_table(\n attach_tag_caption(f'All Costs of {self.trip_title}')\n + '\\n'\n + attach_tag_tbody(string))",
"def render(cls, df: DataFrame, *args, **kwargs):\n from labext.widgets.data_table import DataTable\n dt = DataTable(df, *args, **kwargs)\n display(dt.widget, *dt.get_auxiliary_components())",
"def df_to_html(df, img_formatter=images_formatter):\n pd.set_option(\"display.max_colwidth\", -1)\n pd.set_option(\"display.max_columns\", -1)\n cond_formatter = lambda imgs: images_formatter(imgs, col=1)\n html_table = df.to_html(\n formatters={\n \"cond_imgs\": cond_formatter,\n \"out_imgs\": img_formatter,\n \"feat_imgs\": img_formatter,\n \"cond_feat\": img_formatter,\n \"ini_imgs\": img_formatter,\n },\n escape=False,\n border=0,\n )\n html = \"\"\"\n <html>\n <style>\n td{{\n border: 1px solid #444444;\n padding: 5px;\n }}\n table {{ \n border-spacing: 0px;\n border-collapse: separate;\n }}\n tr:nth-child(even) {{\n background: #f2f2f2; \n }}\n \n </style>\n\n <body>\n {}\n\n <br><br> <br><br> <br><br> <br><br> <br><br> \n </body>\n </html>\n \"\"\".format(\n html_table\n )\n return html",
"def generate_table(df, max_rows: int=20):\n return html.Table([\n html.Thead(\n html.Tr([html.Th(col) for col in df.columns])\n ),\n html.Tbody([\n html.Tr([\n html.Td(df.iloc[i][col]) for col in df.columns\n ]) for i in range(min(len(df), max_rows))\n ])\n ])",
"def _repr_html_(self):\n return html_table(self)",
"def as_html(self, max_rows=0):\n if not max_rows or max_rows > self.num_rows:\n max_rows = self.num_rows\n omitted = max(0, self.num_rows - max_rows)\n labels = self.column_labels\n lines = [\n (0, '<table border=\"1\" class=\"dataframe\">'),\n (1, '<thead>'),\n (2, '<tr>'),\n (3, ' '.join('<th>' + label + '</th>' for label in labels)),\n (2, '</tr>'),\n (1, '</thead>'),\n (1, '<tbody>'),\n ]\n fmts = [self._formats.get(k, self.format_column(k, v[:max_rows])) for\n k, v in self._columns.items()]\n for row in itertools.islice(self.rows, max_rows):\n lines += [\n (2, '<tr>'),\n (3, ' '.join('<td>' + fmt(v) + '</td>' for v, fmt in zip(row, fmts))),\n (2, '</tr>'),\n (1, '</tbody>'),\n ]\n lines.append((0, '</table>'))\n if omitted:\n lines.append((0, '<p>... ({} rows omitted)</p'.format(omitted)))\n return '\\n'.join(4 * indent * ' ' + text for indent, text in lines)",
"def df_to_html(df, percentage_columns=None): # pragma: no cover\n big_dataframe_setup()\n try:\n res = \"<br><h2> {} </h2>\".format(df.name)\n except AttributeError:\n res = \"\"\n df.style.set_properties(**{\"text-align\": \"center\"})\n res += df.to_html(\n formatters=_formatters_dict(\n input_df=df, percentage_columns=percentage_columns\n )\n )\n res += \"<br>\"\n return res",
"def create_html_layout(self):\n page = \"\"\"<!DOCTYPE html>\n <!doctype html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, shrink-to-fit=no\">\n </head>\n </html>\n <head>\n \t<meta charset=\"UTF-8\">\n </head>\n <body>\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-sm\">\n <h4>eda report: Exploratory data analysis</h4>\n </div>\n <div class=\"col-sm\">\n <h3>Inspecting dataframe of size: {size}\n </div>\n </div>\n </div>\n \t<table class=\"table table-hover\" style=\".table\">\n <thead>\n <tr style=\"font-size: 15px;\">\n <th width=\"5%\" align=\"left\" scope=\"col\">Variable Name</th>\n <th width=\"12%\" align=\"left\" scope=\"col\">Data Type</th>\n <th width=\"15%\" align=\"left\" scope=\"col\">Histogram</th>\n <th width=\"11%\" align=\"left\" scope=\"col\">Stats</th>\n <th width=\"7%\" align=\"left\" scope=\"col\">Missing NA</th>\n <th width=\"5%\" align=\"left\" scope=\"col\">Outliers</th>\n </tr>\n </thead>\n <tbody>\"\"\".format(size=self.df.size)\n\n end_page = \"\"\" \n </tbody>\n </table>\n </body>\n \"\"\"\n rows_html = []\n for i, column in enumerate(self.df.columns):\n Summary = ColumnSummary(data=self.df[column])\n datatype = Summary.data_type()\n missing = Summary.missing_values()\n stats = Summary.statistic_summary()\n outliers = Summary.outliers()\n Summary.create_histogram(i)\n html = f\"\"\"\n <tr>\n <td style=\"font-size: 15px;\" width=\"10%\" align=\"left\"> {column}</td>\n <td style=\"font-size: 15px;\"width=\"10%\" align=\"left\"> {datatype}</td>\n <td><img class=\"img-fluid\" src=\"hist_images/histogram{i}.png?{random.randint(0,\n 2e9)}\" style=\"width:800px\"> </td>\n <td style=\"font-size: 15px;\">mean: {stats.mean}<br>\n mode: {stats.mode}<br><br>\n min: {stats.min}<br>\n max: {stats.max}<br><br>\n lower-bound: {stats.lower}<br>\n upper-bound: {stats.upper}<b</td>\n <td style=\"font-size: 15px;\">{missing}</td>\n <td style=\"font-size: 15px;\">{outliers}</td>\n </tr>\n \"\"\"\n rows_html.append(html)\n\n merged_html = page + \"\".join(rows_html) + end_page\n return merged_html",
"def enable():\n pd.options.display.html.table_schema = True",
"def display_side_by_side(*args):\r\n\r\n html_string = ''\r\n for df in args:\r\n html_string += df.to_html(index=False, header=True)\r\n display_html(html_string.replace('table',\r\n 'table style=\"display:inline\"'), raw=True)",
"def as_html(table): \n if isinstance(table,Table):\n html = \"<table width=\\\"\" + str(table.total_width()) + \"\\\"\" + table.html_attributes + \" ><colgroup>\\n\"\n if table.col_width_dict:\n for i in range(table.no_of_columns()):\n html += \"<col width=\\\"\" + str(table.col_width_percent(i)) + \"%\\\"/>\\n\"\n html += \"</colgroup><tbody>\\n\" \n row = \"<tr>\"\n for c in range(table.no_of_columns()):\n row += \"<th width=\\\"\"+str(table.col_width_percent(c))+\"%\\\">\" + table.cell(0,c) +\"</th>\"\n row += \"</tr>\\n\"\n html += row\n for r in range(1,table.no_of_rows()):\n row = \"<tr>\"\n for c in range(table.no_of_columns()):\n row += \"<td>\" + table.cell(r,c) + \"</td>\"\n row += \"</tr>\\n\"\n html += row\n return mark_safe(html)\n else:\n return table",
"def data_table(\n filepath=\"sparkify_data.csv\",\n title=\"Engineered Features Dataframe\",\n ):\n df = read_data_csv(filepath)\n fig = go.Figure(\n data=[\n go.Table(\n header=dict(\n values=list(df.columns), align=\"left\"\n ),\n cells=dict(\n values=[df[col] for col in df.columns],\n align=\"left\",\n ),\n )\n ]\n )\n\n fig.update_layout(title=go.layout.Title(text=title, x=0.5))\n\n return fig",
"def data_table_page( table_type ) :\r\n logger.debug( f\"table_type={table_type}\" )\r\n model = session_info.get_user_model(session)\r\n\r\n # select table type's corresponding data\r\n if table_type == \"x\" :\r\n df = model._dfX\r\n elif table_type== \"y\" :\r\n df = model._dfY\r\n elif table_type == \"merged\" :\r\n df = model.dfMerged\r\n elif table_type == \"param\" :\r\n param = request.args[\"param\"]\r\n logger.debug(f\"param={param}\")\r\n df = model.dfMerged[[ model.id_col , f\"{param}_x\", f\"{param}_y\"]]\r\n else :\r\n logger.debug()\r\n raise ValueError( f\"Unrecognized table_type={table_type}\" )\r\n \r\n return f\"<pre>{df.to_string()}</pre>\" # TODO replace with template\r",
"def table_to_html(df, ev, html_id=\"\", add_class=\"\"):\n formatters = ev.getColumnFormatters(df)\n\n # apply sortlevel\n df = ev.sortDataFrame(df)\n\n tableclasses = 'ipet-table rb-table-data {}\" width=\"100%'.format(add_class)\n\n htmlstr = df.to_html(border=0,\n na_rep=NONE_DISPLAY, formatters=formatters, justify=\"right\",\n table_id=html_id, classes=tableclasses)\n\n return html.fromstring(htmlstr)",
"def _repr_html_(self) -> str:\n return self.all(pandas=True)._repr_html_() # type: ignore",
"def glue_table(name: str, df: pd.DataFrame, build_path=\"_build\"):\n\n if not os.path.exists(build_path):\n os.mkdir(build_path)\n df.to_excel(os.path.join(build_path, f\"{name}.xlsx\"))\n\n glue(name, df)",
"def show_table():\n\n title_list = ('ID', 'Platform', 'Producer', 'Year', 'Elements')\n \n return table, title_list",
"def _html_repr(self):\n html = '<table id=%s>' % (self._id,)\n\n for row in range(self.rows):\n html += '<tr>'\n for col in range(self.columns):\n if row == 0 and self.header_row or col == 0 and self.header_column:\n tag = 'th'\n else:\n tag = 'td'\n html += '<%(tag)s id=%(id)s></%(tag)s>' % {\n 'tag': tag,\n 'id': self._get_cell_id(row, col),\n }\n html += '</tr>'\n html += '</table>'\n return html",
"def render(self, project=None, total_records=None):\n # TODO check for index column in df other than the default numbering\n table = json.dumps(self.to_backgrid_dict())\n if total_records is None:\n total_records = self.shape[0]\n uuids = [str(uuid.uuid4()) for i in range(3)]\n juuids, jproject = json.dumps(uuids), json.dumps(project)\n html = f'<div id=\"{uuids[0]}\"></div>'\n html += f'<div id=\"{uuids[1]}\" style=\"width:100%;\"></div>'\n html += f'<div id=\"{uuids[2]}\"></div>'\n html += f'<script>render_table({{\\\n total_records: {total_records}, project: {jproject},\\\n uuids: {juuids}, table: {table}\\\n }})</script>'\n return html",
"def df_to_table(slide, df, left, top, width, height, colnames=None):\n rows, cols = df.shape\n #print('rows=',rows,'cols=',cols)\n res = slide.shapes.add_table(rows + 1, cols, left, top, width, height)\n\n if colnames is None:\n colnames = list(df.columns)\n\n # Insert the column names\n for col_index, col_name in enumerate(colnames):\n # Column names can be tuples\n if not isinstance(col_name, str):\n col_name = \" \".join(col_name)\n res.table.cell(0, col_index).text = col_name\n paragraph = res.table.cell(0, col_index).text_frame.paragraphs[0]\n paragraph.font.size = Pt(15)\n paragraph.alignment = PP_ALIGN.CENTER\n res.table.cell(0, col_index).fill.solid()\n res.table.cell(0, col_index).fill.fore_color.rgb = RGBColor(255,100,0)\n #print(col_name)\n\n m = df.to_numpy()\n #print('m numpy array:',m)\n\n\n for row in range(rows):\n for col in range(cols):\n val = m[row, col]\n text = str(val)\n res.table.cell(row + 1, col).text = text\n paragraph = res.table.cell(row+1, col).text_frame.paragraphs[0]\n paragraph.font.size = Pt(12)\n paragraph.font.color.rgb = RGBColor(0, 0, 0) # use black color for now \n res.table.cell(row+1, col).fill.background()",
"def as_html(self): # pragma: no cover\n\n return render_to_string(\n self._meta.template,\n { \"table\": self } )",
"def markdown_table(self):\n table_data = [\n [i + 1, filt.__class__.__name__, f'{filt.fc:.0f}', f'{filt.q:.2f}', f'{filt.gain:.1f}']\n for i, filt in enumerate(self.filters)\n ]\n return tabulate(\n table_data,\n headers=['#', 'Type', 'Fc (Hz)', 'Q', 'Gain (dB)'],\n tablefmt='github'\n )"
] | [
"0.7303146",
"0.7150178",
"0.71465176",
"0.7117627",
"0.68682384",
"0.6780832",
"0.6762202",
"0.67041314",
"0.6699682",
"0.6674215",
"0.66341263",
"0.6611604",
"0.6597323",
"0.64610684",
"0.64565516",
"0.64484704",
"0.64449763",
"0.64157796",
"0.6381783",
"0.63720167",
"0.6367736",
"0.6349433",
"0.6295628",
"0.62728727",
"0.62485015",
"0.6242766",
"0.62353605",
"0.6215395",
"0.619636",
"0.61916304"
] | 0.76284516 | 0 |
Initialises the attributes and properties of the AsyncTabata. | async def init_device(self):
await Device.init_device(self)
# PROTECTED REGION ID(AsyncTabata.init_device) ENABLED START #
self.logger = logging.getLogger(__name__)
self._lock = threading.Lock()
self._dev_factory = DevFactory()
self._prepare = 10
self._work = 20
self._rest = 10
self._cycles = 8
self._tabatas = 1
self._running_state = RunningState.PREPARE
self.subscribed = False
self.set_state(DevState.OFF)
# The below commented commands are not really needed
# since in GreenMode.Asyncio mode the monitor
# lock is disabled by default.
# util = tango.Util.instance()
# util.set_serial_model(tango.SerialModel.NO_SYNC)
# PROTECTED REGION END # // AsyncTabata.init_device | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def _init(self, **kwargs):",
"async def init(self) -> None:",
"async def init(self) -> None:",
"def read_tabatas(self):\n # PROTECTED REGION ID(AsyncTabata.tabatas_read) ENABLED START #\n return self._tabatas\n # PROTECTED REGION END # // AsyncTabata.tabatas_read",
"async def init(self):\n self.base_tamplates = {}\n self.preparing_task = None\n self.app = aioweb.Application()\n self.runner = aioweb.AppRunner(self.app)",
"async def initialize(self):",
"def init_attrs(self):\n raise NotImplementedError",
"def __init__(self):\n self.uris = AsyncSet('uris')\n self.sockets = AsyncSet('sockets')",
"def init_tab(self):",
"def _init_table(self, table: \"Table\"):\n if not self.columns:\n self.columns = table.columns\n self._data = table.data",
"def __init__(self, asynchronous=False):\n self.app: VisModel = None\n self.asynchronous = asynchronous\n self.config = {}",
"async def async_setup(self):\n\n async def async_update(event_time):\n \"\"\"Update device.\"\"\"\n queue = [entry for entry in self._queue[0:PARALLEL_CALLS]]\n for _ in queue:\n self._queue.append(self._queue.pop(0))\n\n for data_class in queue:\n if data_class[\"next_scan\"] > time():\n continue\n self._data_classes[data_class[\"name\"]][\"next_scan\"] = (\n time() + data_class[\"interval\"]\n )\n try:\n self.data[\n data_class[\"name\"]\n ] = await self.hass.async_add_executor_job(\n partial(data_class[\"class\"], **data_class[\"kwargs\"],),\n self._auth,\n )\n async_dispatcher_send(\n self.hass, f\"netatmo-update-{data_class['name']}\"\n )\n except (pyatmo.NoDevice, pyatmo.ApiError) as err:\n _LOGGER.debug(err)\n\n async_track_time_interval(\n self.hass, async_update, timedelta(seconds=SCAN_INTERVAL)\n )\n\n async def handle_event(event):\n \"\"\"Handle webhook events.\"\"\"\n if event.data[\"data\"][\"push_type\"] == \"webhook_activation\":\n _LOGGER.info(\"%s webhook successfully registered\", MANUFACTURER)\n self._webhook = True\n\n self.hass.bus.async_listen(\"netatmo_event\", handle_event)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def do_init(self):\n\n pass",
"async def async_setup(self):\n pass",
"async def async_setup(self) -> None:\n if not self.aiolivisi.livisi_connection_data:\n livisi_connection_data = {\n \"ip_address\": self.config_entry.data[CONF_HOST],\n \"password\": self.config_entry.data[CONF_PASSWORD],\n }\n\n await self.aiolivisi.async_set_token(\n livisi_connection_data=livisi_connection_data\n )\n controller_data = await self.aiolivisi.async_get_controller()\n if controller_data[\"controllerType\"] == \"Avatar\":\n self.port = AVATAR_PORT\n self.is_avatar = True\n else:\n self.port = CLASSIC_PORT\n self.is_avatar = False\n self.serial_number = controller_data[\"serialNumber\"]\n self.controller_type = controller_data[\"controllerType\"]",
"def initialise(self):\n self.set_up()",
"def main(args=None, **kwargs):\n # PROTECTED REGION ID(AsyncTabata.main) ENABLED START #\n debugpy.listen(5678)\n kwargs.setdefault(\"green_mode\", GreenMode.Asyncio)\n return run((AsyncTabata,), args=args, **kwargs)\n # AsyncTabata.run_server()\n # PROTECTED REGION END # // AsyncTabata.main",
"def __init__(self):\n self.relation = ''\n self.attributes = []\n self.attribute_types = dict()\n self.attribute_data = dict()\n self.comment = []\n self.data = []\n pass",
"def init(self, userdata, conn):\r\n pass"
] | [
"0.6283458",
"0.62262726",
"0.62262726",
"0.6223375",
"0.6218252",
"0.6105574",
"0.60600656",
"0.5998838",
"0.5786528",
"0.5765079",
"0.576342",
"0.57501346",
"0.5742235",
"0.5742235",
"0.5742235",
"0.5742235",
"0.5742235",
"0.5742235",
"0.5742235",
"0.5742235",
"0.5742235",
"0.5742235",
"0.5742235",
"0.57032156",
"0.5692514",
"0.5688433",
"0.5631292",
"0.5614722",
"0.5579264",
"0.5546673"
] | 0.65999895 | 0 |
Set the rest attribute. | def write_rest(self, value):
# PROTECTED REGION ID(AsyncTabata.rest_write) ENABLED START #
if value < 1:
raise Exception("only positive value!")
if self.get_state() == DevState.ON:
raise Exception("cannot change values when device is running!")
with self._lock:
self._rest = value
# PROTECTED REGION END # // AsyncTabata.rest_write | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, rest=AuthenticatingRestProxy(RestRequests())):\n self._rest = rest",
"def rest(self):\n\t\tpass",
"def apply_restoffset(armature, hipbone, restoffset):\r\n # apply rest offset to restpose\r\n bpy.context.scene.objects.active = armature\r\n bpy.ops.object.mode_set(mode='EDIT')\r\n bpy.ops.armature.select_all(action='SELECT')\r\n bpy.ops.transform.translate(value=restoffset, constraint_axis=(False, False, False),\r\n constraint_orientation='GLOBAL', mirror=False, proportional='DISABLED',\r\n proportional_edit_falloff='SMOOTH', proportional_size=1)\r\n bpy.ops.object.mode_set(mode='OBJECT')\r\n\r\n # apply restoffset to animation of hip\r\n restoffset_local = (restoffset[0], restoffset[2], -restoffset[1])\r\n for axis in range(3):\r\n fcurve = armature.animation_data.action.fcurves.find(\"pose.bones[\\\"\" + hipbone.name + \"\\\"].location\", axis)\r\n for pi in range(len(fcurve.keyframe_points)):\r\n fcurve.keyframe_points[pi].co.y -= restoffset_local[axis] / armature.scale.x\r\n return 1",
"def set_resource_data(self, resource, meta):",
"def _set_attributes(self):",
"def set_attribute(self, attr, value):\n super().set_attribute(attr, value) # Keep this line, it triggers the parent class method.\n setattr(self, attr, value)",
"def restore(self):\n if self.obj:\n for attrib in self.attribs:\n setattr(self.obj, attrib, getattr(self, attrib))",
"def rsetattr(obj, attr, val):\n pre, _, post = attr.rpartition('.')\n return setattr(rgetattr(obj, pre) if pre else obj, post, val)",
"def set_attribute(self, name, value):\n\n pass",
"def part(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def set(self, attr, val):\r\n self.__dict__[attr] = val",
"def __setattr__(self, attr, value):\n super().__setattr__(attr, value)",
"def SetAttributes(self, attr):\r\n \r\n if self._ownsAttr:\r\n del self._attr\r\n \r\n self._attr = attr\r\n self._ownsAttr = False",
"def __set__(self, instance, val):\n raise AttributeError(\"Can't set attribute\")",
"def set_node_attribute(\n node: MatterNode,\n endpoint: int,\n cluster_id: int,\n attribute_id: int,\n value: Any,\n) -> None:\n attribute_path = f\"{endpoint}/{cluster_id}/{attribute_id}\"\n node.endpoints[endpoint].set_attribute_value(attribute_path, value)",
"def _setAttributes(self, reactor, done):\n self.reactor = reactor\n self._done = done",
"def set_attr(self, name: str, values: Union[list, tuple, object]):",
"def proxy_method(self, rest_path, sign, kwargs):",
"def _update_from_rest_data(self) -> None:",
"def jersey(self, jersey):\n\n self._jersey = jersey",
"def set_iri(self, iri):\n try:\n self.uri = self.iri_to_uri(iri)\n except (ValueError, UnicodeError), why:\n self.http_error = httperr.UrlError(why[0])\n return\n if not re.match(\"^\\s*%s\\s*$\" % URI, self.uri, re.VERBOSE):\n self.add_note('uri', rs.URI_BAD_SYNTAX)\n if '#' in self.uri:\n # chop off the fragment\n self.uri = self.uri[:self.uri.index('#')]\n if len(self.uri) > MAX_URI:\n self.add_note('uri',\n rs.URI_TOO_LONG,\n uri_len=f_num(len(self.uri))\n )",
"def __setattr__(self, name, val):\n if name == '__root__' or name == '__pepth__':\n list.__setattr__(self, name, val)\n elif self.__pepth__ != 0:\n return plist.__getattr__(self, '__setattr__')(name, val)\n else:\n lval = _ensure_len(len(self), val)\n for i, x in enumerate(self):\n x.__setattr__(name, lval[i])\n return self",
"def set_attr(self):\n\n # Create a new array\n self.fileh.create_array('/', 'array', self.a1)\n for i in range(self.nobjects):\n # Set an attribute\n setattr(self.fileh.root.array.attrs, \"attr\" + str(i), str(self.a1))\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print(\"Time for Undo, Redo (set_attr):\", undo, \"s, \", redo, \"s\")",
"def set_attribute(self, attr, value):\n logger.debug(\"SET ATTRIBUTE {} to {}\".format(attr, value))",
"def set_uri(self, uri):\n self.__uri = uri",
"def set_real_star(self, star):\n self.real_star = star",
"def change_attr(self) -> None:\n\n self.attr = randint(0, 10)",
"def __init__(self, rest_class, client, endpoint_prefix=''):\n\n self._rest_class = self._mapToRestClass(rest_class)\n self._rest_client = client\n\n # endpoint is always lowercase\n self._endpoint = '%s%s/' % (endpoint_prefix, self._mapToRestClass(rest_class).lower())",
"def set_attr(self, name, value):\n setattr(self, name, value)",
"def setAttributeValue(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass"
] | [
"0.5630176",
"0.5530816",
"0.5378598",
"0.533996",
"0.53326255",
"0.5315345",
"0.5179512",
"0.50836456",
"0.5040169",
"0.4995078",
"0.49613616",
"0.49500024",
"0.4906609",
"0.49053308",
"0.4878314",
"0.48509192",
"0.48429102",
"0.48362812",
"0.48314637",
"0.4826485",
"0.4816591",
"0.48157164",
"0.48100838",
"0.48093495",
"0.47981855",
"0.47775662",
"0.47751755",
"0.47564298",
"0.47535336",
"0.47438264"
] | 0.614358 | 0 |
Set the tabatas attribute. | def write_tabatas(self, value):
# PROTECTED REGION ID(AsyncTabata.tabatas_write) ENABLED START #
if value < 1:
raise Exception("only positive value!")
if self.get_state() == DevState.ON:
raise Exception("cannot change values when device is running!")
with self._lock:
self._tabatas = value
# PROTECTED REGION END # // AsyncTabata.tabatas_write | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def SetTrSet(self,value):\n self.ds = value",
"def set_table_attributes(self, attributes):\n self._dirty = True\n if attributes is not None:\n for k, v in attributes.iteritems():\n _key_guard(k, 'Attribute name')\n _str_guard(v, 'Attribute value')\n self._attributes = attributes",
"def set(self, table):\n if table is None:\n return\n for name in table.dtype.names:\n self._set_column(name, table[name])",
"def setTable(self, tabNew):\n\t\tself.tabN = tabNew\n\t\tself.id.SetLabel(str(self.tabN - 100))\n\t\tself.time.SetLabel(\"%.3f\" % (self.cSound.TableGet(self.tabN, 1)))\n\t\tself.semit.SetLabel(\"%.3f\" % (self.cSound.TableGet(self.tabN, 2)))\n\t\tself.feed.tabN = self.tabN\n\t\tself.feed.SetValue(self.cSound.TableGet(self.tabN, 4))\n\t\tself.lf.tabN = self.tabN\n\t\tself.lf.SetValue(self.cSound.TableGet(self.tabN, 5))\n\t\tself.hf.tabN = self.tabN\n\t\tself.hf.SetValue(self.cSound.TableGet(self.tabN, 6))\n\t\tself.pan.tabN = self.tabN\n\t\tself.pan.SetValue(self.cSound.TableGet(self.tabN, 7))\n\t\tself.reso.tabN = self.tabN\t\t\n\t\tself.reso.SetValue(self.cSound.TableGet(self.tabN, 8))\n\t\tself.dist.tabN = self.tabN\n\t\tself.dist.SetValue(self.cSound.TableGet(self.tabN, 9))\n\t\t\n\t\tself.volin.tabN = self.tabN\n\t\tself.volin.SetValue(self.cSound.TableGet(self.tabN, 10))\t\n\t\tself.volinr.tabN = self.tabN\n\t\tself.volinr.SetValue(self.cSound.TableGet(self.tabN, 11))\t\n\t\t\n\t\tself.vol.tabN = self.tabN\t\t\n\t\tself.vol.SetValue(self.cSound.TableGet(self.tabN, 12))\t\n\t\tself.volr.tabN = self.tabN\t\t\n\t\tself.volr.SetValue(self.cSound.TableGet(self.tabN, 13))\n\t\t#\n\t\t#envfollowkey = self.envfollowOpt.keys()[self.envfollowOpt.values().index(self.cSound.TableGet(self.tabN, 14))]\n\t\tenvfollowkey = self.envfollowOpt.keys()[self.envfollowOpt.values().index(self.cSound.TableGet(tabNew, 14))]\n\t\tself.envfollow.SetValue(envfollowkey)\n\t\t#self.mainSizer.Layout()\n\t\tself.Layout() #Either works",
"def set(self, properties):\n self._column.attrs = properties",
"def set_attr(self):\n\n # Create a new array\n self.fileh.create_array('/', 'array', self.a1)\n for i in range(self.nobjects):\n # Set an attribute\n setattr(self.fileh.root.array.attrs, \"attr\" + str(i), str(self.a1))\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print(\"Time for Undo, Redo (set_attr):\", undo, \"s, \", redo, \"s\")",
"def set_tablename(self, name):\n self.ds_table = name",
"def setTCData(*args):\n args[0].Data.TCData.tc_data = args[1]",
"def setFila(self, fila):\n \n self.row = fila",
"def table(self, table):\n self._table = table",
"def table(self, table):\n self._table = table",
"def set_attr(zone, attr, line):\n zone.set_attr(attr, line[attr])",
"def set_attribute(self, name, value):\n\n pass",
"def set_data(self, indexer_table):\n\n self.indexer_table = indexer_table",
"def sit(self, table):\n self.table = table",
"def setTabWidth(self,tabwidth):\n self.tabwidth = tabwidth",
"def _set_attributes(self):",
"def setDataset(self,dataset):\n self.__dataSet = dataset",
"def assign_set_data(name,data):\n df = DataFrame(name)\n df.setColumn(name,data)\n ampl.setData(df,name)",
"def asthma(self, asthma):\n\n self.logger.debug(\"In 'asthma' setter.\")\n\n self._asthma = asthma",
"def set_data(self, df):\n self.df = df",
"def setTable(self, tabledef):\n if isinstance(tabledef, str):\n self._table = Table.Get ( tabledef )\n elif isinstance(tabledef, Table):\n self._table = tabledef\n else:\n raise ValueError (\"table - must be table name or Table instance.\" )",
"def setTable(self):\n if not self.outvar or self.data==None:\n return\n\n self.table.setColumnCount(len(self.data.domain.attributes) + (self.data.domain.classVar != None) + len(self.predictors))\n self.table.setRowCount(len(self.data))\n \n print self.table.rowCount(), len(self.data.domain.attributes), (self.data.domain.classVar != None), len(self.predictors)\n\n # HEADER: set the header (attribute names)\n## for col in range(len(self.data.domain.attributes)):\n## self.header.setLabel(col, self.data.domain.attributes[col].name)\n labels = [attr.name for attr in self.data.domain.variables] + [c.name for c in self.predictors.values()]\n self.table.setHorizontalHeaderLabels(labels)\n## col = len(self.data.domain.attributes)\n## if self.data.domain.classVar != None:\n## self.header.setLabel(col, self.data.domain.classVar.name)\n## col += 1\n## for (i,c) in enumerate(self.predictors.values()):\n## self.header.setLabel(col+i, c.name)\n\n # ATTRIBUTE VALUES: set the contents of the table (values of attributes), data first\n for i in range(len(self.data)):\n for j in range(len(self.data.domain.attributes)):\n## self.table.setText(i, j, str(self.data[i][j]))\n self.table.setItem(i, j, QTableWidgetItem(str(self.data[i][j])))\n col = len(self.data.domain.attributes)\n\n # TRUE CLASS: set the contents of the table (values of attributes), data first\n self.classifications = [[]] * len(self.data)\n if self.data.domain.classVar:\n for (i, d) in enumerate(self.data):\n c = d.getclass()\n item = colorItem(str(c))\n self.table.setItem(i, col, item)\n self.classifications[i] = [c]\n col += 1\n\n## for i in range(col):\n## self.table.adjustColumn(i)\n\n # include predictions, handle show/hide columns\n self.updateTableOutcomes()\n self.updateAttributes()\n self.updateTrueClass()\n self.table.show()",
"def set_attribute(self, name, value):\n attrs = self._column.attrs\n attrs[name] = value\n self._column.attrs = attrs",
"def __init__(self, parent=None):\n super(DataTab, self).__init__(parent)\n self.name = 'Data'",
"def attributes(table,attrs): \n if isinstance(table,Table):\n table.html_attributes = attrs\n return table",
"def set_mac_at(self, row, mac):\n\t\tself.__hh_table.item(row, 1).setText(mac)",
"def set_action_data(self, action, index):\n self._set_action_enabled(action, index)\n self._set_action_checkable(action, index)\n for args in self.setdataargs:\n self._set_action_attribute(action, index, args)",
"def setTd(self, Td):\r\n\t\tself.Td = Td\r\n\t\tself.label = \"standard_PID_Controller/Kp=%f, Ti=%f, Td=%f\" % (self.Kp, self.Ti, self.Td)",
"def settabular(self, *args, **kwargs):\n return _coordsys.coordsys_settabular(self, *args, **kwargs)"
] | [
"0.5949225",
"0.58843",
"0.58466774",
"0.57466394",
"0.5741959",
"0.5723373",
"0.5706781",
"0.56966734",
"0.564866",
"0.564174",
"0.564174",
"0.56349164",
"0.56261563",
"0.5615866",
"0.5553884",
"0.5537831",
"0.55232215",
"0.54708886",
"0.54633045",
"0.5443665",
"0.54405147",
"0.54359776",
"0.54132473",
"0.5383877",
"0.5357305",
"0.5352912",
"0.53395635",
"0.5325918",
"0.53121847",
"0.5311727"
] | 0.6633607 | 0 |
Return the running_state attribute. | async def read_running_state(self):
# PROTECTED REGION ID(AsyncTabata.running_state_read) ENABLED START #
return self._running_state
# PROTECTED REGION END # // AsyncTabata.running_state_read | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def running_state(self) -> int | None:\n return self.cluster.get(\"running_state\")",
"def state(self) -> RunState:\n return self._async_scheduler.state",
"def running(self): # type: () -> bool\n return self.state['Running']",
"def get_state(self):\n return self.state",
"def get_state(self):\n return self.state",
"def get_state(self):\n return self._env.get_state()",
"def state(self):\n return self.get_state()",
"def state(self):\n return self.__state",
"def state(self):\n return self.__state",
"def get_state(self):\n return self._state",
"def get_state(self):\n return self._state",
"def get_state(self):\n return self._state",
"def get_state(self):\n return self._state",
"def get_state(self):\n return self._state",
"def state(self):\r\n return self._state",
"def state(self):\r\n return self._state",
"def state(self):\r\n return self._state",
"def GetState(self):\r\n \r\n return self.state",
"def state(self):\n return self._state",
"def state(self):\n return self._state",
"def state(self):\n return self._state",
"def state(self):\n return self._state",
"def state(self):\n return self._state",
"def state(self):\n return self._state",
"def state(self):\n return self._state",
"def state(self):\n return self._state",
"def state(self):\n return self._state",
"def state(self):\n return self._state",
"def state(self):\n return self._state",
"def state(self):\n return self._state"
] | [
"0.82891697",
"0.762371",
"0.7526244",
"0.7523861",
"0.7523861",
"0.7504188",
"0.75040936",
"0.75001734",
"0.75001734",
"0.74893653",
"0.74893653",
"0.74893653",
"0.74893653",
"0.74893653",
"0.74570817",
"0.74570817",
"0.74570817",
"0.7450113",
"0.7433477",
"0.7433477",
"0.7433477",
"0.7433477",
"0.7433477",
"0.7433477",
"0.7433477",
"0.7433477",
"0.7433477",
"0.7433477",
"0.7433477",
"0.7433477"
] | 0.8102592 | 1 |
Main function of the AsyncTabata module. | def main(args=None, **kwargs):
# PROTECTED REGION ID(AsyncTabata.main) ENABLED START #
debugpy.listen(5678)
kwargs.setdefault("green_mode", GreenMode.Asyncio)
return run((AsyncTabata,), args=args, **kwargs)
# AsyncTabata.run_server()
# PROTECTED REGION END # // AsyncTabata.main | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n tng.api.runner()",
"def main():\n # Get credentials.\n logging.info('Obtaining Mega login credentials.')\n credentials = {}\n if os.path.exists(CREDENTIALS_FILE):\n credentials = json.load(open(CREDENTIALS_FILE))\n else:\n credentials['user'] = raw_input('User: ')\n credentials['password'] = getpass.getpass()\n \n # Create the required Mega API objects.\n executor = AsyncExecutor()\n api = MegaApi(APP_KEY, None, None, 'Python CRUD example')\n listener = AppListener(executor.continue_event)\n api.addListener(listener)\n\n # Run the operations.\n start_time = time.time()\n worker(api, listener, executor, credentials)\n logging.info('Total time taken: {} s'.format(time.time() - start_time))",
"def _main_helper(self):\n asyncio.create_task(self._main())",
"def main():\n\n\tgdl = TwitterDataLoader()\t\n\tgdl.load_twitter_data_to_db(truncate_table=False, skip_loaded_files=True)",
"def main():\n\n loop = asyncio.get_event_loop()\n bot = MyBot()\n\n # bot.pool = loop.run_until_complete(DB.create_pool(\n bot.pool = loop.run_until_complete(Table().create_pool(\n user=POSTGRES_USER,\n password=POSTGRES_PASS,\n database=POSTGRES_DB,\n host=POSTGRES_HOST\n ))\n # bot.db_manager = DB()\n bot.run()",
"def main():\n return",
"def main():\n args = get_args()\n\n levels = [logging.INFO, logging.DEBUG]\n level = levels[min(len(levels) - 1, args.vlevel)]\n logging.basicConfig(level=level)\n\n config.dry_run = args.dry_run\n config.adb_batch_size = args.adb_batch_size\n config.command_batch_size = args.command_batch_size\n \n asyncio.run(run(args))",
"def main():\n indicator = AyatanaIndicator()\n indicator.run()",
"def main():\n # Verify the database exists and has the correct layout\n db_seeder()\n app = DiminuendoApp()\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()",
"def main():\n pass",
"def main(self) -> None:\n pass",
"def main():\n\n cassandra_session = ConnectionPool(\n KEY_SPACE,\n [CASSANDRA_POOL],\n pool_size=1,\n )\n\n context = {\"cassandra_session\": cassandra_session,}\n\n application_tornado = tornado.web.Application([\n (\n r\"/api/1/simple-handler/(.*)\", \n SimpleHandler, \n context\n ),\n ])\n application_tornado.listen(8080)\n tornado.ioloop.IOLoop.current().start()",
"def main(self):\r\n pass",
"def main():\n\n handler = CSVDownloader('download.csv')\n handler.start()",
"def main():\n DataClasses = [FamilyStats, SeqHdrStats, UniProtStats]\n CmdLineOps, Args = parse_command_line_options()\n ThreadManager(DataClasses)\n print_results(DataClasses, CmdLineOps.output_file)\n return",
"def main() -> None:\n try:\n config = Config.load_config()\n asyncio.run(App(config=config, no_history=False).run())\n except ClientError:\n raise\n except Exception as e:\n raise Bug(str(e))",
"async def main():\n async with aiohttp.ClientSession() as session:\n data = Luftdaten(SENSOR_ID, loop, session)\n await data.get_data()\n\n if not await data.validate_sensor():\n print(\"Station is not available:\", data.sensor_id)\n return\n\n if data.values and data.meta:\n # Print the sensor values\n print(\"Sensor values:\", data.values)\n\n # Print the coordinates fo the sensor\n print(\"Location:\", data.meta['latitude'], data.meta['longitude'])",
"def _main():\n parser = _create_parser()\n args = parser.parse_args()\n\n if args.interval is None:\n args.interval = 10\n\n if args.what_if is None:\n args.what_if = False\n\n loop = asyncio.get_event_loop()\n\n params = {\n \"connection_string\": args.connection_string,\n \"name\": args.name,\n \"interval\": args.interval,\n \"what_if\": args.what_if\n }\n\n loop.run_until_complete(_run(params))",
"def main():\n\n # Initial message\n taq_data_tools_responses_physical_short_long.taq_initial_message()\n\n # Tickers and days to analyze\n year = '2008'\n tickers = ['AAPL', 'GOOG']\n taus_p = [x for x in range(10, 101, 10)]\n tau = 1000\n\n # Basic folders\n taq_data_tools_responses_physical_short_long.taq_start_folders(year)\n\n # Run analysis\n taq_data_plot_generator(tickers, year, tau, taus_p)\n\n print('Ay vamos!!!')\n\n return None",
"async def main():\n\n # provide greetings for the program\n # print out program heading (using multi-line statement)\n programHeading = \"PROGRAM BEGINS BELOW (Python Version {})\".format(\\\n sys.version[0:sys.\\\n version.index(\" \")])\n print(programHeading)\n print('=' * len(programHeading))\n programHeading = \"PYTHON ASYNC GENERATOR EVENT-DRIVEN PROGRAMMING\"\n print(programHeading)\n print('=' * len(programHeading))\n # end of greetings\n \n # use an async for-loop to get the generated random int\n async for number in getrandom_number():\n # print out the returned number\n print(\"Random Number: {}\".format(number))",
"def main():",
"def main():",
"def main():",
"def main():",
"def main():",
"def main():",
"def main():",
"def main():",
"def main():",
"def main():"
] | [
"0.62526685",
"0.5965609",
"0.5924747",
"0.59186465",
"0.5900263",
"0.5821848",
"0.58118916",
"0.58106273",
"0.5798",
"0.5766722",
"0.57555044",
"0.5747025",
"0.57421297",
"0.5733062",
"0.5727855",
"0.5694739",
"0.56915116",
"0.56727445",
"0.56583554",
"0.56498504",
"0.5634631",
"0.5634631",
"0.5634631",
"0.5634631",
"0.5634631",
"0.5634631",
"0.5634631",
"0.5634631",
"0.5634631",
"0.5634631"
] | 0.83177817 | 0 |
Iterator for subsample annealing, yielding (action, arg) pairs. This generates a subsample annealing schedule starting from an empty assignment state (no rows are assigned). It then interleaves 'add_row' and 'remove_row' actions so as to gradually increase the number of assigned rows. The increase rate is linear. | def make_annealing_schedule(num_rows, epochs, sample_tree_rate):
assert epochs >= 1.0
assert sample_tree_rate >= 1.0
# Randomly shuffle rows.
row_ids = list(range(num_rows))
np.random.shuffle(row_ids)
row_to_add = itertools.cycle(row_ids)
row_to_remove = itertools.cycle(row_ids)
# Use a linear annealing schedule.
epochs = float(epochs)
add_rate = epochs
remove_rate = epochs - 1.0
state = 2.0 * epochs
# Sample the tree sample_tree_rate times per batch.
num_assigned = 0
next_batch = 0
while num_assigned < num_rows:
if state >= 0.0:
yield 'add_row', next(row_to_add)
state -= remove_rate
num_assigned += 1
next_batch -= sample_tree_rate
else:
yield 'remove_row', next(row_to_remove)
state += add_rate
num_assigned -= 1
if num_assigned > 0 and next_batch <= 0:
yield 'sample_tree', None
next_batch = num_assigned | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def random_agent(bandit, iterations):\n\n for i in range(iterations):\n a = random.choice(bandit.actions)\n r = bandit.sample(a)\n yield a, r",
"def initial_explore_agent(bandit, iterations, initial_rounds = 10):\n pay_offs = dict()\n best_action = -1\n\n for i in range(iterations):\n # for the initial rounds pick a random action\n if i < initial_rounds:\n a = random.choice(bandit.actions)\n r = bandit.sample(a)\n\n #update rewards\n if a in pay_offs:\n pay_offs[a].append(r)\n else:\n pay_offs[a] = [r]\n # otherwise pick the best one thus far\n else:\n if (best_action == -1):\n # check for the lever with the best average payoff\n mean_dict = {}\n for key,val in pay_offs.items():\n mean_dict[key] = np.mean(val) \n best_action = max(mean_dict, key=mean_dict.get)\n a = best_action\n\n r = bandit.sample(a)\n \n yield a, r",
"def uniformSample (self) :\n S = self.mdp.S\n A = self.mdp.A\n\n for s, a in product(range(S), range(A)):\n s_, self.R[s, a] = self.mdp.step(s, a)\n self.updateVisitStatistics(s, a, s_)",
"def _generator(self):\n # Initial setup\n ac = self._env.action_space.sample() # not used, just so we have the datatype\n self.new = True # marks if we're on first timestep of an episode\n self.ob = self._convert_state(self._env.reset()) \n T = self._timesteps\n\n cur_ep_ret = 0 # return in current episode\n cur_ep_len = 0 # len of current episode\n ep_rets = [] # returns of completed episodes in this segment\n ep_lens = [] # lengths of ...\n\n # Initialize history arrays\n #obs = np.array([None for _ in range(T)])\n obs = nd.empty((T,) + self._env.observation_space.shape)\n rews = np.zeros(T, 'float32')\n vpreds = np.zeros(T, 'float32')\n news = np.zeros(T, 'int32')\n acs = np.array([ac for _ in range(T)])\n prevacs = acs.copy()\n\n t = 0\n while True:\n ob = self.ob # Use `self.` since `_evaluate` may have reset the env\n new = self.new\n prevac = ac\n ac, vpred = self._act(ob)\n # NOTE(openAI) Slight weirdness here because we need value function at time T\n # before returning segment [0, T-1] so we get the correct terminal value\n if t > 0 and t % T == 0:\n seg = {\"ob\": obs, \"rew\": rews, \"vpred\": vpreds, \"new\": news,\n \"ac\": acs, \"nextvpred\": vpred * (1 - new),\n \"ep_rets\": np.array(copy.deepcopy(ep_rets)),\n \"ep_lens\": np.array(copy.deepcopy(ep_lens))}\n self._add_vtarg_and_adv(seg, self._gamma, self._lambda)\n yield seg\n # NOTE: Do a deepcopy if the values formerly in these arrays are used later.\n ep_rets = []\n ep_lens = []\n i = t % T\n\n obs[i] = ob[0]\n vpreds[i] = vpred\n news[i] = new\n acs[i] = ac\n prevacs[i] = prevac\n\n ob, rew, new, _ = self._env.step(ac)\n ob = self._convert_state(ob)\n rews[i] = rew\n\n cur_ep_ret += rew\n cur_ep_len += 1\n if new:\n ep_rets.append(cur_ep_ret)\n ep_lens.append(cur_ep_len)\n cur_ep_ret = 0\n cur_ep_len = 0\n ob = self._convert_state(self._env.reset())\n self.new = new\n self.ob = ob\n t += 1",
"def optimal_agent(bandit, iterations):\n\n for i in range(iterations):\n a = bandit.pay_offs.index(max(bandit.pay_offs))\n r = bandit.sample(a)\n yield a, r",
"def batch_anneal(self, times=10):\n for i in range(1, times + 1):\n print(f\"Iteration {i}/{times} -------------------------------\")\n self.T = self.T_save\n self.iteration = 1\n self.cur_solution, self.cur_fitness = self.initial_solution()\n self.anneal()",
"def sample_generator(a, lookback=1, delay=1, start=None, end=None, target_col=3, flatten=True):\n assert len(a) > lookback, \"Length of array must be larger than batch size\"\n assert lookback > 0 and delay > 0, \"Batch and step must be positive\"\n if start:\n a = a[start:]\n if end:\n a = a[: end + lookback + delay - 1]\n a = np.array(a)\n i = lookback\n while i < len(a):\n X = a[i - lookback: i]\n if flatten:\n X = X.flatten()\n y = a[i, target_col]\n i += 1\n yield X, y",
"def train__iter__(self):\n\n # create worker-specific random number generator\n rng = create_rng_for_worker(self.model.current_epoch)\n\n while True:\n\n # select one file at random (with probability proportional to its annotated duration)\n file, *_ = rng.choices(\n self._train,\n weights=[f[\"duration\"] for f in self._train],\n k=1,\n )\n\n # select one annotated region at random (with probability proportional to its duration)\n segment, *_ = rng.choices(\n file[\"annotated\"],\n weights=[s.duration for s in file[\"annotated\"]],\n k=1,\n )\n\n # select one chunk at random (with uniform distribution)\n start_time = rng.uniform(segment.start, segment.end - self.duration)\n chunk = Segment(start_time, start_time + self.duration)\n\n X, one_hot_y, _ = self.prepare_chunk(file, chunk, duration=self.duration)\n\n y = self.prepare_y(one_hot_y)\n\n yield {\"X\": X, \"y\": y}",
"def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n for i, size in enumerate(self.group_sizes):\n if size > 0:\n indice = np.where(self.flag == i)[0]\n if not len(indice) == size:\n raise ValueError(\"the length of the indice should be equal to size\")\n indice = indice[list(torch.randperm(int(size),\n generator=g))].tolist()\n extra = int(\n math.ceil(\n size * 1.0 / self.samples_per_gpu / self.num_replicas)\n ) * self.samples_per_gpu * self.num_replicas - len(indice)\n indice += indice[:extra]\n indices += indice\n\n if not len(indices) == self.total_size:\n raise ValueError(\"the length of the indices should be equal to total_size\")\n\n indices = [\n indices[j] for i in list(\n torch.randperm(\n len(indices) // self.samples_per_gpu, generator=g))\n for j in range(i * self.samples_per_gpu, (i + 1) * self.samples_per_gpu)\n ]\n\n # subsample\n offset = self.num_samples * self.rank\n indices = indices[offset:offset + self.num_samples]\n if not len(indices) == self.num_samples:\n raise ValueError(\"the length of the indices should be equal to num_samplers in subsample\")\n\n return iter(indices)",
"def test_sampling2 () :\n delta = 2 * np.pi / 3\n r = Reward(partial(stepFunction, \n xRange=(-delta/2, delta/2), \n yRange=(-delta/2, delta/2)), \n (-1, 0))\n states = []\n xs = np.arange(-np.pi, np.pi, delta)\n ys = np.arange(-np.pi, np.pi, delta)\n for x, y in product(xs, ys) : \n states.append(\n toExternalStateRep([x + delta / 2, y + delta / 2, 0, 0]).astype(float)\n )\n agent = findOptimalAgent(r)\n vals = estimateValueFromAgent(states, agent, r)\n for s, v in zip(states, vals) : \n print(toInternalStateRep(s)[:2], v)",
"def schedule_sampling(eta, itr):\n zeros = np.zeros(\n (FLAGS.batch_size, FLAGS.total_length - FLAGS.input_length - 1,\n FLAGS.img_width // FLAGS.patch_size, FLAGS.img_width // FLAGS.patch_size,\n FLAGS.patch_size**2 * FLAGS.img_channel))\n if not FLAGS.scheduled_sampling:\n return 0.0, zeros\n\n if itr < FLAGS.sampling_stop_iter:\n eta -= FLAGS.sampling_changing_rate\n else:\n eta = 0.0\n random_flip = np.random.random_sample(\n (FLAGS.batch_size, FLAGS.total_length - FLAGS.input_length - 1))\n true_token = (random_flip < eta)\n ones = np.ones(\n (FLAGS.img_width // FLAGS.patch_size, FLAGS.img_width // FLAGS.patch_size,\n FLAGS.patch_size**2 * FLAGS.img_channel))\n zeros = np.zeros(\n (FLAGS.img_width // FLAGS.patch_size, FLAGS.img_width // FLAGS.patch_size,\n FLAGS.patch_size**2 * FLAGS.img_channel))\n real_input_flag = []\n for i in range(FLAGS.batch_size):\n for j in range(FLAGS.total_length - FLAGS.input_length - 1):\n if true_token[i, j]:\n real_input_flag.append(ones)\n else:\n real_input_flag.append(zeros)\n real_input_flag = np.array(real_input_flag)\n real_input_flag = np.reshape(\n real_input_flag,\n (FLAGS.batch_size, FLAGS.total_length - FLAGS.input_length - 1,\n FLAGS.img_width // FLAGS.patch_size, FLAGS.img_width // FLAGS.patch_size,\n FLAGS.patch_size**2 * FLAGS.img_channel))\n return eta, real_input_flag",
"def gen_rebatch(self, *args, **kwargs):\n _action = self._action_list[0]\n self._rest_batch = None\n while True:\n if self._rest_batch is None:\n cur_len = 0\n batches = []\n else:\n cur_len = len(self._rest_batch)\n batches = [self._rest_batch]\n self._rest_batch = None\n while cur_len < _action['batch_size']:\n try:\n new_batch = _action['pipeline'].next_batch(*args, **kwargs)\n except StopIteration:\n break\n else:\n batches.append(new_batch)\n cur_len += len(new_batch)\n if len(batches) == 0:\n break\n else:\n if _action['merge_fn'] is None:\n batch, self._rest_batch = batches[0].merge(batches, batch_size=_action['batch_size'])\n else:\n batch, self._rest_batch = _action['merge_fn'](batches, batch_size=_action['batch_size'])\n yield batch",
"def sample(self, idx):\n idx = (self._curr_pos + idx) % self._curr_size\n action = self.action[idx]\n reward = self.reward[idx]\n isOver = self.isOver[idx]\n comb_mask = self.comb_mask[idx]\n if idx + 2 <= self._curr_size:\n state = self.state[idx:idx+2]\n fine_mask = self.fine_mask[idx:idx+2]\n else:\n end = idx + 2 - self._curr_size\n state = self._slice(self.state, idx, end)\n fine_mask = self._slice(self.fine_mask, idx, end)\n return state, action, reward, isOver, comb_mask, fine_mask",
"def __iter__(self):\n # deterministically shuffle based on epoch\n if self.shuffle:\n g = torch.Generator()\n g.manual_seed(self.epoch)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n if not len(indices) == self.total_size:\n raise ValueError('the length of the indices should be equal to total_size')\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n if not len(indices) == self.num_samples:\n raise ValueError(\"the length of the indices should be equal to num_samples in subsample\")\n\n return iter(indices)",
"def simulated_annealing(x_init, inference_samples, partition_samples, edge_mat_samples, n_vertices,\n acquisition_func, reference=None):\n sa_runner = GraphSimulatedAnnealing(x_init, inference_samples, partition_samples, edge_mat_samples, n_vertices,\n acquisition_func, reference)\n steps = 500\n sa_runner.set_schedule({'tmax': 1.0, 'tmin': 0.8 ** steps, 'steps': steps, 'updates': sa_runner.updates})\n opt_state, opt_eval = sa_runner.anneal()\n\n # Annealer.anneal() MINinimzes an objective but acqusition functions should be MAXimized.\n return opt_state, -opt_eval",
"def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_function(dna),\n reverse=True)\n \n \n \n\n # mutation\n for dna in sorted_dna:\n dna.mute(self.mutation_probability)\n\n # crossover\n while len(new_data) < \\\n self.population_size - (self.population_size % 2):\n\n d1 = copy.copy(self.pick())\n d2 = copy.copy(self.pick())\n times = 2\n for i in range(times):\n d1.crossover(d2)\n\n new_data += [d1, d2]\n\n\n\n\n\n if (self.population_size % 2) == 1:\n new_data.append(copy.deepcopy(self.pick()))\n\n assert(len(self.data) == len(new_data))\n\n for i in range(len(new_data)):\n self.data[i].data = new_data[i]",
"def iteration_count_based(sched, warmup_iterations, iterations, runner, params):\n next_scheduled = 0\n total_iterations = warmup_iterations + iterations\n if total_iterations == 0:\n raise exceptions.RallyAssertionError(\"Operation must run at least for one iteration.\")\n for it in range(0, total_iterations):\n sample_type = metrics.SampleType.Warmup if it < warmup_iterations else metrics.SampleType.Normal\n percent_completed = (it + 1) / total_iterations\n yield (next_scheduled, sample_type, percent_completed, runner, params.params())\n next_scheduled = sched.next(next_scheduled)",
"def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling",
"def __call__(self):\n if self.numbatches is None:\n pool = self.pooler()\n if self.batchsize is None:\n self.batchsize = self.pooler.nInPool()\n self.numbatches = self.pooler.nInPool()//self.batchsize\n for i in xrange(self.numbatches):\n pool = self.pooler()\n self._reset_batch()\n if self.samplemethod == 'balance' and len(self.keysamplers)>0:\n batchinds,keyids = self._samplebalanced(pool)\n elif self.samplemethod == 'uniform':\n batchinds,keyids = self._sampleuniform(pool)\n else:\n batchinds,keyids = self._samplesequential(i)\n batch = self._extractInds(pool,batchinds,keyids)\n for k in batch:\n batch[k][np.isnan(batch[k])] = self.nanreplacement\n yield batch",
"def random_iterator(self, batch_size):\n all_indices = np.nonzero(np.logical_not(self._dones))[0]\n np.random.shuffle(all_indices)\n\n states = np.asarray(self._states)\n actions = np.asarray(self._actions)\n next_states = np.asarray(self._next_states)\n rewards = np.asarray(self._rewards)\n dones = np.asarray(self._dones)\n\n i = 0\n while i < len(all_indices):\n indices = all_indices[i:i+batch_size]\n\n yield states[indices], actions[indices], next_states[indices], rewards[indices], dones[indices]\n\n i += batch_size",
"def test_run_experiment_locally_in_batches(self) -> None:\n parallelism = 2\n rounds = 3\n\n experiment = Experiment(\n name=\"torchx_booth_parallel_demo\",\n search_space=SearchSpace(parameters=self._parameters),\n optimization_config=OptimizationConfig(objective=self._objective),\n runner=self._runner,\n is_test=True,\n properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True},\n )\n\n scheduler = Scheduler(\n experiment=experiment,\n generation_strategy=(\n choose_generation_strategy(\n search_space=experiment.search_space,\n max_parallelism_cap=parallelism,\n )\n ),\n options=SchedulerOptions(\n run_trials_in_batches=True, total_trials=(parallelism * rounds)\n ),\n )\n\n try:\n scheduler.run_all_trials()\n\n # TorchXMetric always returns trial index; hence the best experiment\n # for min objective will be the params for trial 0.\n scheduler.report_results()\n except FailureRateExceededError:\n pass # TODO(ehotaj): Figure out why this test fails in OSS.\n # Nothing to assert, just make sure experiment runs.",
"def sample_her_transitions(self, episode_batch, batch_size_in_transitions):\n T = episode_batch[0].shape[0]\n rollout_batch_size = episode_batch['actions'].shape[0] #300\n batch_size = batch_size_in_transitions\n # select which rollouts and which timesteps to be used\n\n t_samples = np.random.randint(T, size=batch_size) # random indices \n transitions = [elem[t_samples] for elem in episode_batch] # resort the array to t_samples incides\n # her idx\n her_indexes = np.where(np.random.uniform(size=batch_size) < self.future_p) # portion of future_p : future, else : final\n future_offset = np.random.uniform(size=batch_size) * (T - t_samples)\n future_offset = future_offset.astype(int)\n future_t = (t_samples + future_offset)[her_indexes] # adding 1 her could be problematic\n # replace des_goal with achieved goal / her_application\n future_ag = episode_batch[3][future_t]\n transitions[4][her_indexes] = future_ag \n # to get the params to re-compute reward\n # compute batch reward from achieved goals and desired goals \n transitions[6] = np.expand_dims(self.reward_func(transitions[3], transitions[4], None), 1)\n # update mean/std of normalizers \n\n return transitions",
"def sample(self):\n return self._action_out(\n [self.action_space.sample() for _ in range(self.batch_size)]\n )",
"def add_sample(self, state, action, reward, terminal):\n self.states[self.top] = state\n self.terminals[self.top] = terminal\n self.actions[self.top] = action\n self.rewards[self.top] = reward\n\n if self.size == self.max_steps:\n self.bottom = (self.bottom + 1) % self.max_steps\n else:\n self.size += 1\n self.top = (self.top + 1) % self.max_steps",
"def sample_action(self, obs, explore_prob):\n raise NotImplementedError",
"def do_iteration(self, mutation_fn, aggression):\n raise NotImplementedError()",
"def star_topology(random, population, args):\r\n for _ in range(len(population)):\r\n yield population[:]",
"def test_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index",
"def sample(self, batch_size):\n # get the sum of priorities\n priority_sum = self.sum_tree.get_sum_priority()\n # sample priorities \n priorities_to_sample = np.random.uniform(0, priority_sum, batch_size)\n # get the indexes of replays\n sample_idxes = [self.sum_tree.get(x) for x in priorities_to_sample]\n # fetch the transitions and prepare the batch for training\n random_sample = [self.queue[x] for x in sample_idxes]\n # zip\n zipped = [ torch.from_numpy( np.asarray(arr).astype(np.float32) ).float() for arr in zip(*random_sample) ]\n sample = Transition( zipped[0], zipped[1].unsqueeze_(-1).long(), zipped[2].unsqueeze_(-1), zipped[3], zipped[4].unsqueeze_(-1).byte() )\n return sample, sample_idxes",
"def train(self):\n if len(self.experience) < self.minibatch_size:\n return\n\n # sample a minibatch_size of random episode with a number of transitions >= unrollings_num\n random_episodes_indecies = np.random.choice(len(self.experience), self.minibatch_size)\n random_episodes = []\n for index in random_episodes_indecies:\n episode = self.experience[index]\n\n # 0:random_transitions_space is the range from which a random transition\n # can be picked up while having unrollings_num - 1 transitions after it\n random_transitions_space = len(episode) - self.unrollings_num\n random_start = np.random.choice(random_transitions_space, 1)\n\n random_episodes.append(episode[random_start:random_start + self.unrollings_num])\n\n state_shape = tuple([self.minibatch_size, self.unrollings_num] + self.state_shape)\n\n # prepare the training data\n states = np.empty(state_shape, dtype=np.float32)\n next_states = np.empty(state_shape, dtype=np.float32)\n rewards = np.empty((self.minibatch_size, self.unrollings_num, ), dtype=np.float32)\n transition_action_filters = np.zeros((self.minibatch_size, self.unrollings_num, self.actions_count), dtype=np.float32)\n next_legal_actions_filters = np.zeros((self.minibatch_size, self.unrollings_num, self.actions_count), dtype=np.float32)\n\n for i, episode in enumerate(random_episodes):\n for j, transition in enumerate(episode):\n state, action, reward, nextstate, next_legal_actions = transition\n\n states[i,j], rewards[i,j], next_states[i,j] = state, reward, nextstate\n transition_action_filters[i,j][action] = 1.0\n next_legal_actions_filters[i,j][next_legal_actions] = 1.0\n\n self.prediction_nn.clearLSTMS(self.session)\n self.target_nn.clearLSTMS(self.session)\n\n loss,_ = self.session.run([self.loss, self.finalize], {\n self.states: states,\n self.next_states: next_states,\n self.rewards: np.reshape(rewards, (self.minibatch_size * self.unrollings_num, )),\n self.transition_action_filters: np.reshape(transition_action_filters, (self.minibatch_size * self.unrollings_num, self.actions_count)),\n self.next_legal_actions_filters: np.reshape(next_legal_actions_filters, (self.minibatch_size * self.unrollings_num, self.actions_count))\n })\n\n if self.iteration != 0 and self.iteration % self.freeze_period == 0:\n self.target_nn.assign_to(self.prediction_nn, self.session)\n\n self.iteration += 1\n\n return loss, self.iteration"
] | [
"0.5455643",
"0.5289078",
"0.5249597",
"0.5138517",
"0.5060755",
"0.5059422",
"0.5056093",
"0.50501776",
"0.5032908",
"0.49953273",
"0.4981477",
"0.49236384",
"0.49223185",
"0.49203786",
"0.49098432",
"0.49025986",
"0.48921928",
"0.4862368",
"0.4837989",
"0.4807904",
"0.48005605",
"0.4797982",
"0.4795616",
"0.47887465",
"0.47731155",
"0.4745447",
"0.4709558",
"0.4709306",
"0.47048464",
"0.47044033"
] | 0.6056993 | 0 |
Remove a given row from the current subsample. | def remove_row(self, row_id): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deleteRow(self, row):\n if (row >= self._height or row <= -self.height):\n raise IndexError('Invalid index, row %d does not exist' % row)\n self._height -= 1\n return self._value.pop(row)",
"def delete_sample(self, rows):\n if not isinstance(rows, (list, tuple)):\n rows = [rows]\n for v in self.list:\n v.value = np.delete(v.value, rows, axis=0)",
"def remove_row(self, row_id):\n TODO('https://github.com/posterior/treecat/issues/27')",
"def removeRow(self, index: int) -> None:\n ...",
"def del_row(self, row_index):\n ...",
"def removeRow(self, row, parent=QModelIndex()):\n return self.removeRows(row, 1, parent)",
"def del_row(self, index):\n self.data.remove(self.data[index])",
"def removeRow(self):\n row = self.getCurrentRow()\n self.jobRow.removeRow(row)\n return self.layers.pop(row)",
"def remove_row(self, index):\n _ = self.matrix.pop(index) ## Delete row\n self.matrix.append(self.empty_row.copy())",
"def delete_row(A, delrow):\r\n m = A.shape[0]\r\n n = A.shape[1]\r\n keeprows = delete(arange(0, m), delrow)\r\n keepcols = arange(0, n)\r\n return A[keeprows][:, keepcols]",
"def delItem(self,row,column):\n data = self.data\n if row in data and column in data[row]:\n del data[row][column]\n self.hasChanged = True",
"def delete_row(self, pos):\n del self._grid[pos]",
"def delete(self, predicate=lambda row: True):\n self.rows = [row for row in self.rows if not predicate(row)]",
"def remove_row(self):\n if len(self.columns[\"rows\"].children) > 0:\n self.selects.pop()\n self.button_groups.pop()\n self.buttons[\"edit\"].pop()\n self.columns[\"rows\"].children.pop()",
"def removeRows(self, position, rows=1, index=QModelIndex()):\n self.beginRemoveRows(QModelIndex(), position, position + rows - 1)\n\n del self.Grains[position:position+rows]\n\n self.endRemoveRows()\n self.dataChanged.emit(index, index) \n return True",
"def remove_rows(data, nrows, accounting_column=None):\n logger.debug('start: removing {} rows in transition model'.format(nrows))\n nrows = abs(nrows) # in case a negative number came in\n unit_check = data[accounting_column].sum() if accounting_column else len(data)\n if nrows == 0:\n return data, _empty_index()\n elif nrows > unit_check:\n raise ValueError('Number of rows to remove exceeds number of records in table.')\n\n remove_rows = sample_rows(nrows, data, accounting_column=accounting_column, replace=False)\n remove_index = remove_rows.index\n\n logger.debug('finish: removed {} rows in transition model'.format(nrows))\n return data.loc[data.index.difference(remove_index)], remove_index",
"def __delitem__(self, index: int) -> None:\n del self._rows[index]",
"def removeRows(self, row, count, parent=QModelIndex()):\n self.dict_lock.acquire()\n self.list_lock.acquire()\n\n self.beginRemoveRows(parent, row, row + count -1)\n\n for offset in range(count):\n level = self.view_list[row + offset]\n del self.levels_dict[level.code]\n\n del self.view_list[row:row+count]\n if(not self.sorting & Sorting.Reversed):\n del self.view_keys[row:row+count]\n else:\n del self.view_keys[len(self.view_keys) - (row + count): len(self.view_keys) - row]\n\n self.endRemoveRows()\n\n self.list_lock.release()\n self.dict_lock.release()\n\n return True",
"def remove(self, idx):\n indices = range(len(self))\n indices.remove(idx)\n return self.take(indices, axis=0).take(indices, axis=1)",
"def removeRows(self, row, count, parent=QtCore.QModelIndex()):\n self.beginRemoveRows(parent, row, row+count-1)\n # Remove saved photo and statistics\n for assay in self.__assays[row:row+count]:\n if assay.img_path is not None and os.path.exists(assay.img_path):\n os.remove(assay.img_path)\n del(self.__assays[row:row+count])\n self.endRemoveRows()\n return True",
"def delete_row(self, identifier, rowid, datastore):\n # Get dataset. Raise exception if dataset is unknown.\n dataset = datastore.get_dataset(identifier)\n if dataset is None:\n raise ValueError(\"unknown dataset '{}'\".format(identifier))\n # Delete the row at the given index position\n df = vizual.delete_rows(dataset.to_dataframe(), rowids=[rowid])\n # Store updated dataset to get new identifier.\n ds = datastore.update_dataset(\n origin=dataset,\n df=df,\n annotations=dataset.annotations.filter(rows=list(df.index))\n )\n return VizualApiResult(ds)",
"def delete(self, predicate: WhereClause = lambda row: True) -> None:\n self.rows = [row for row in self.rows if not predicate(row)]",
"def delete_row(self, row_id):\n if self.__modify_data_file(self.__data_file_for_row_id(row_id), {row_id: None}, 'delete'): \n print('Row ' + str(row_id) + ' has been deleted.')\n else:\n raise Exception('There was a problem deleting row at ' + str(row_id) +'.')",
"def remove(self):\n self.end_child()\n index = self.get_row_index()\n if index is None:\n # Already gone\n return\n self.model.removeRow(index)",
"def remove_child(self, row):\n value = self.children[row]\n self.children.remove(value)\n\n return True",
"def delete(self, condition: conditions.Condition = None):\n if not condition:\n del self.rows[:]\n\n for i, row in enumerate(self.rows):\n if condition.evaluate(self, row):\n del self.rows[i]",
"def __delitem__(self, idx):\n row, col = idx\n\n array_row = self._find_row_before(row)\n\n if (array_row.next_row == None or array_row.next_row.row_number > row):\n return\n\n target_row = array_row.next_row\n array_entry = self._find_column_before(target_row, col)\n\n if (array_entry.next_entry == None or array_entry.next_entry.column_number > col):\n return\n\n array_entry.next_entry = array_entry.next_entry.next_entry\n\n # If this row still has entries in it we are finished\n if target_row.row_sentinel.next_entry != None:\n return\n\n array_row.next_row = array_row.next_row.next_row",
"def remove_rows(self, rows, regroup=False):\n self.table.remove_rows(np.atleast_1d(rows))\n if regroup:\n for col in ['setup', 'calib', 'calibbit', 'comb_id', 'bkg_id']:\n if col in self.keys():\n del self.table[col]\n self.set_configurations()\n self.set_calibration_groups()\n self.set_combination_groups()",
"def delColumn(self,column):\n data = self.data\n for rowData in data.values():\n if column in rowData:\n del rowData[column]\n self.hasChanged = True",
"def EliminateRows(self, rows):\n return _hypre.HypreParMatrix_EliminateRows(self, rows)"
] | [
"0.76017517",
"0.75120515",
"0.74128586",
"0.7357161",
"0.7258305",
"0.7201727",
"0.72012454",
"0.69447654",
"0.693629",
"0.6813859",
"0.6812815",
"0.6581558",
"0.6556254",
"0.6554288",
"0.635864",
"0.6341568",
"0.63035554",
"0.62991357",
"0.62884134",
"0.62074757",
"0.6165149",
"0.61378634",
"0.6106396",
"0.60859466",
"0.60855347",
"0.6041755",
"0.601924",
"0.59160656",
"0.5890085",
"0.5881816"
] | 0.7742161 | 0 |
Compute edge log probabilities on the complete graph. | def compute_edge_logits(self): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_edge_logits(self):\n V, E, K, M = self._VEKM\n vert_logits = logprob_dc(self._vert_ss, self._vert_prior, axis=1)\n if len(self._added_rows) == V:\n assignments = self._assignments\n else:\n assignments = self._assignments[sorted(self._added_rows), :]\n assignments = np.array(assignments, order='F')\n parallel = self._config['learning_parallel']\n result = treecat_compute_edge_logits(M, self._tree.complete_grid,\n self._gammaln_table, assignments,\n vert_logits, parallel)\n result += self._tree_prior\n return result",
"def compute_edge_logits(self):\n TODO('https://github.com/posterior/treecat/issues/27')",
"def compute_edge_logits(self):\n TODO('https://github.com/posterior/treecat/issues/26')",
"def log_prob(self):",
"def logprob(self):\n assert len(self._added_rows) == self._num_rows\n V, E, K, M = self._VEKM\n vertex_logits = logprob_dc(self._vert_ss, self._vert_prior, axis=1)\n logprob = vertex_logits.sum()\n for e, v1, v2 in self._tree.tree_grid.T:\n logprob += (logprob_dc(self._edge_ss[e, :, :], self._edge_prior) -\n vertex_logits[v1] - vertex_logits[v2])\n for v in range(V):\n beg, end = self._table.ragged_index[v:v + 2]\n logprob += logprob_dc(self._feat_ss[beg:end, :], self._feat_prior)\n logprob -= logprob_dc(self._meas_ss[v, :], self._meas_prior[v])\n return logprob",
"def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)",
"def logprob(self):\n assert len(self._added_rows) == self._num_rows\n TODO('https://github.com/posterior/treecat/issues/27')",
"def logprob(self):\n assert len(self._added_rows) == self._num_rows\n TODO('https://github.com/posterior/treecat/issues/26')",
"def logprob(self):\n assert len(self._added_rows) == self._num_rows",
"def get_log_prob(self, states, actions):\n dist, _ = self.get_dist_and_mode(states)\n log_probs = dist.log_prob(actions)\n log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting\n return log_probs",
"def __logprob__(self, cv, vsense):\n return 1.0 / (1.0 + np.exp(-np.dot(cv, vsense)))",
"def mamajek08_logRpHK_edge():\n return -4.355226174791392",
"def _update_logprobs(self):\n #self._logp_src = self._log_lim(self.p_source)\n self._logp_I0 = self._log_lim(self.p_source)\n self._logp_R0 = self._log_lim(self.p_source/(self.p_rec_div))\n self._logp_S_fin = self._log_lim(self.p_S_fin)\n self._logp_inf_fin = self._log_lim(self.p_infect_fin)",
"def log_prob(self):\n res = -self.L_h/2*np.log(2*np.pi*self.la)\n res = res + self.L_h*(self.L_h-1)/2*self.a\n\n\n res = res - 1/(2*self.la)*np.square(np.linalg.norm(self.e*self.pie))\n\n res = res - 1/(2*self.la)*np.sum(self.e2*self.pie_var)\n\n res = res - self.L_h/2*np.log(2*np.pi*self.sigma2)\n res = res - 1/(2*self.sigma2)*(np.square(np.linalg.norm(self.w))+np.trace(self.R))\n\n print(\"Log-probability difference = {}\".format(res - self.LP), file=self.logfile)\n self.LP = res\n return res",
"def attachment_likelihood(node):\n return log(len(node.edges)+1)+1.0",
"def log_probability(self, samples):\n pass",
"def edge_prob(graph, edge, print_prob=False):\n deg1 = graph.deg(edge[0])\n deg2 = graph.deg(edge[1])\n degsum = graph.deg_sum()\n prob = deg1*deg2/(graph.deg_sum()-deg1-deg2)\n if print_prob:\n print(\"Edge probability: \"+str(deg1)+\"*\"+str(deg2)+\"/(\"+\n str(degsum)+\"-\"+str(deg1)+\"-\"+str(deg2)+\") = \"+str(prob))\n return prob",
"def log_prob(target_distribution, x0, xs, accepteds):\n return np.mean([target_distribution.log_probability(x) for x in xs])",
"def sentence_logprob(self, sentence):\n line = get_ngrams(sentence,3)\n log_por = 0.0\n for item in line:\n raw_por = self.smoothed_trigram_probability(item)\n log_por = log_por+math.log2(raw_por)\n\n return float(log_por)",
"def logprob(predictions, labels):\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]",
"def logprob(predictions, labels):\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]",
"def logprob(predictions, labels):\n # prevent negative probability\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]",
"def compute_expected_log_prob(self):\n for (w, t), val in np.ndenumerate(self.e_log_prob):\n self.e_log_prob[w][t] = self.mean[w][t + 1] - np.log(self.zeta[t])\n return self.e_log_prob",
"def log_probs(self):\n return np.array([m['log_probs'] for m in self.model_outs], dtype=np.float32)",
"def _graph_fn_get_distribution_log_probs(self, key, parameters, actions):\n # For bounded continuous action spaces, need to unscale (0.0 to 1.0 for beta distribution).\n if self.bounded_action_space[key] is True:\n actions = (actions - self.action_space.low) / (self.action_space.high - self.action_space.low)\n return self.distributions[key].log_prob(parameters, actions)",
"def logprob(predictions, labels):\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions)))",
"def log_prob(self, scores : torch.Tensor, permutations):\n s = torch.log(select_indices(scores, permutations))\n n = len(scores)\n p = self.upto if self.upto is not None else n - 1\n return -sum(\n torch.log(torch.exp((s[k:] - s[k]) * self.shape).sum(dim=0))\n for k in range(p))",
"def log_prob(self, sents):\n log_prob = 0\n for sent in sents:\n log_prob += self.sent_log_prob(sent)\n return log_prob",
"def logrels(rets):\n return np.log(rets + 1)",
"def log_prob(self, x):\n z, log_det = self.backward_p(x)\n return self.prior.log_prob(z) + log_det"
] | [
"0.7335508",
"0.71378374",
"0.7127644",
"0.7103969",
"0.70094013",
"0.67944485",
"0.6733487",
"0.6730886",
"0.63937724",
"0.6335503",
"0.6307681",
"0.62550616",
"0.62338877",
"0.6196325",
"0.6193061",
"0.6187027",
"0.6134781",
"0.61287415",
"0.6126551",
"0.6121182",
"0.6121182",
"0.6102105",
"0.6094567",
"0.6079068",
"0.6076246",
"0.60678357",
"0.6037565",
"0.600249",
"0.60002404",
"0.5977617"
] | 0.78942937 | 0 |
Set edges of the latent structure and update statistics. | def set_edges(self, edges):
self._tree.set_edges(edges)
self._program = make_propagation_program(self._tree.tree_grid) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_edges(self, edges):\n assert len(edges) == self._num_edges\n self._tree_grid = make_tree(edges)",
"def edges(self, edges):\n\n self._edges = edges",
"def edges(self, e):\n self._edges = e",
"def refresh_metadata(self):\n #self.node_index = None\n #self.edge_index = None\n #self._calc_edge_centers = False\n #self._calc_cell_centers = False\n #self._calc_vcenters = False\n self._node_to_edges = None\n self._node_to_cells = None",
"def set_edges(self, edges):\n if (not isinstance(edges, None.__class__) and (edges.size != 0)):\n if ((np.shape(edges)[0] != self.E) or (np.shape(edges)[1] != 2)):\n raise ValueError('Incompatible size of the edge matrix')\n if edges.max() + 1 > self.V:\n raise ValueError('Incorrect edge specification')\n self.edges = edges\n else:\n self.edges = []",
"def _update_statistics(self):\n\n self._finalize_data()\n if 'z' in self.data.dtype.names:\n self.tree.settings['min_depth'] = np.nanmin(self.node_data['z']).compute()\n self.tree.settings['max_depth'] = np.nanmax(self.node_data['z']).compute()\n if 'tvu' in self.data.dtype.names:\n self.tree.settings['min_tvu'] = np.nanmin(self.node_data['tvu']).compute()\n self.tree.settings['max_tvu'] = np.nanmax(self.node_data['tvu']).compute()",
"def update(self, edges) -> None:\n for v1, v2 in edges:\n self.add(v1, v2)",
"def set_incident_edges(self, incident_edges):\n self.incident_edges = set(incident_edges) # overwrite the existing set of incident edges with the input set",
"def set_incident_edges(self, incident_edges):\n self.incident_edges = set(incident_edges) # overwrite the existing set of incident edges with the input set",
"def reset_edges(self):\n super().reset_edges()\n\n # If we're in default state, notheing to rest\n if self._modified_weighted_adj_matrices is None:\n return\n\n # Degrees are reset, so we need to reset the original weight scaling\n if self.scale_weights and not self.scaling_skipped:\n self._scale_weights_to_degree()\n self._generate_weighted_adj_matrices()\n else:\n # No weight scaling so just load prev values from cache\n self.weighted_adj_matrices = {**self.weighted_adj_matrices, **self._modified_weighted_adj_matrices}\n self._modified_weighted_adj_matrices = None",
"def setup_edges(self, *, _unused=None):\n # Edge from input data\n encoder_edge = ForwardEdge('input',\n self.hyperparameter_config)\n\n # Encoder setup\n self.children[0].setup_edges(encoder_edge)\n\n # Decoder setup\n decoder_edge = ForwardEdge(self.children[0].last_descendant())\n self.children[1].setup_edges(decoder_edge)\n\n # Gene setup is now complete\n self.setup_complete = True\n\n pass",
"def update(self):\r\n self.g = self.create_graph()",
"def _set_raw_structure(self, key):\n self.set_neighs(key)\n self.ifdistance = False",
"def __initilization(self,node_set):\n \n print \"*********************************\"\n \n for x in node_set:\n x.node_vol=np.transpose(np.matrix([cmath.exp(0), cmath.exp(complex(0,math.pi*2/3)), cmath.exp(complex(0,-math.pi*2/3))]))\n \n print \"Forward/Backward Algorithm Initialization Done!\"",
"def populate_graph(self):",
"def set_structure(self):\r\n self._evaluator.set_structure()",
"def set_right_edges(self):\n for v in self:\n for e in v.edges_list:\n e.linked[0]=v\n e.linked[1]=self[self.search_index_by_coordinates(e.linked[1].coordinates)]\n for e in self.list_of_edges:\n e.linked[0]=self[self.search_index_by_coordinates(e.linked[0].coordinates)]\n e.linked[1]=self[self.search_index_by_coordinates(e.linked[1].coordinates)]",
"def __init__(self, edgelist):\n self.edge = edgelist\n if edgelist:\n self.update_node2edge()",
"def populate_graph(self):\n if self.edges and self.vertices:\n graph = Graph()\n for edge in self.edges:\n graph.add_edge(edge)\n self.graph = graph\n else:\n print(\"Populate edges & vertices first, then populate graph!\")",
"def set(self, **attrs):\n self.graph._setattrs(handle=self.handle, **attrs)",
"def update_temporal_edges(self):\n for parse in self:\n try:\n self.model.graph.bump_temporal_edge(parse[-2], parse_set[-1])\n except IndexError:\n pass",
"def edges(self, edges):\n if edges:\n edges = ensure_list(edges)\n for (nd_out, nd_in) in edges:\n if nd_out not in self.nodes or nd_in not in self.nodes:\n raise Exception(\n f\"edge {(nd_out, nd_in)} can't be added to the graph\"\n )\n self._edges = edges",
"def reset_edges(self):\n\n # Ensure original edges are stored in cache, otherwise nothing to do.\n if self._modified_edges is None or self._weighted_modified_edges is None:\n return\n\n # Restore the former value from cache\n self.adj_matrices = {**self.adj_matrices, **self._modified_edges}\n self.degree_weighted_matrices = {**self.degree_weighted_matrices, **self._weighted_modified_edges}\n self.in_degree = {**self.in_degree, **self._orig_in_degree}\n self.out_degree = {**self.out_degree, **self._orig_out_degree}\n\n # Reset the edge and degree cache\n self._modified_edges = None\n self._weighted_modified_edges = None\n self._orig_in_degree = dict()\n self._orig_out_degree = dict()",
"def set_min_edges(self, edges):\n self.min_edges = edges",
"def _setattrs(self, handle=\"\",\n edge=\"\", node=\"\", subg=\"\", proto=\"\",\n **attrs):\n head, tail = '', ''\n if edge:\n head, tail = edge\n\n node, head, tail, subg = map(encode_page, [node, head, tail, subg])\n\n self.changed = 1\n\n if proto in [\"node\", \"edge\"]:\n # Gets handle when called from Subraphs.set()\n if subg:\n handle = gv.findsubg(self.handle, subg)\n # Called by self.set() and GraphvizSubgraph.set(), handle known\n item = getattr(gv, \"proto%s\" % proto)(handle)\n # print \"item = gv.proto\" + proto + \"(g)\"\n elif head and tail:\n item = gv.findedge(gv.findnode(handle, head),\n gv.findnode(handle, tail))\n # print \"item = gv.findedge(gv.findnode(g, '\" + head + \"'),\" + \\\n # \"gv.findnode(g, '\" + tail + \"'))\"\n elif node:\n item = gv.findnode(handle, node)\n # print \"item = gv.findnode(g, '\" + node + \"')\"\n elif subg:\n item = gv.findsubg(handle, subg)\n # print \"item = gv.findsubg(g, '\" + subg + \"')\"\n elif handle:\n item = handle\n else:\n raise ValueError(\"No graph element or element type specified\")\n\n for key, elem in attrs.iteritems():\n if isinstance(elem, set):\n for e in elem:\n key, e = map(encode_page, [key, e])\n gv.setv(item, key, e)\n else:\n key, elem = map(encode_page, [key, elem])\n gv.setv(item, key, elem)\n # print \"gv.setv(item, '\" + key + \"', '\" + elem + \"')\"",
"def update(self):\n\n for node in self.nodes:\n for edge in node.edges:\n for i, edge_node in enumerate(edge.nodes):\n if edge_node.id != node.id:\n edge_node.add_edge(edge)\n\n return self",
"def initialize_sets(self):\n for block in self.blocks:\n # Insert phi nodes from SSA stage into the assignments of the block\n for phi in block.phis:\n block.gen.setdefault(phi, []).insert(0, phi)\n\n # Update the kill set with the variables that are assigned to in\n # the block\n block.kill = set(block.gen)\n block.output = set(block.gen)\n #for entry in block.bound:\n # block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.itervalues():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen",
"def setup_ant(self):\n self.visited_nodes[1:] = []\n self.actual_node = self.start_pos",
"def edge(self, edge: EdgeConfig):\n\n self._edge = edge",
"def update_node2edge(self):\n self.node2edge = {e.child : e for e in self.edge}\n childrenset = set(self.node2edge.keys())\n rootset = set(e.parent for e in self.edge).difference(childrenset)\n if len(rootset) > 1:\n raise Warning(\"there should be a single root: \" + str(rootset))\n if len(rootset) == 0:\n raise Exception(\"there should be at least one root!\")\n self.root = rootset.pop()"
] | [
"0.65527594",
"0.6314186",
"0.6303436",
"0.6144085",
"0.609869",
"0.59172857",
"0.58231676",
"0.580853",
"0.580853",
"0.58068633",
"0.5789417",
"0.5767009",
"0.5761213",
"0.57139456",
"0.5707714",
"0.5707328",
"0.5633258",
"0.5617545",
"0.56172055",
"0.55140746",
"0.54579234",
"0.5450689",
"0.54412305",
"0.54344857",
"0.5427144",
"0.5423254",
"0.53963447",
"0.5384265",
"0.53784084",
"0.53556144"
] | 0.6691929 | 0 |
Train a TreeCat model using subsampleannealed MCMC. Let N be the number of data rows and V be the number of features. | def train_model(table, tree_prior, config):
assert isinstance(table, Table)
M = config['model_num_clusters']
D = config['model_latent_dim']
assert M >= 1
assert D >= 0
if D == 0:
Trainer = TreeCatTrainer
elif M == 1:
Trainer = TreeGaussTrainer
else:
Trainer = TreeMogTrainer
return Trainer(table, tree_prior, config).train() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train(self, X, T, *args, **kwargs):\n\n assert len(self.neurons) > 0, \"Add neurons to ELM before training it\"\n X, T = self._checkdata(X, T)\n args = [a.upper() for a in args] # make all arguments upper case\n\n # kind of \"enumerators\", try to use only inside that script\n MODELSELECTION = None # V / CV / MCCV / LOO / None\n NEURONRANKING = None # HQ / OP / None\n CLASSIFICATION = None # c / mc / None\n ADAPTIVE = False # batch / None\n Xv = None\n Tv = None\n k = None\n batch = None\n\n # check exclusive parameters\n assert len(set(args).intersection(set([\"V\", \"CV\", \"MCCV\", \"LOO\"]))) <= 1, \"Use only one of V / CV / MCCV / LOO\"\n assert len(set(args).intersection(set([\"HQ\", \"OP\"]))) <= 1, \"Use only one of HQ / OP\"\n assert len(set(args).intersection(set([\"C\", \"MC\"]))) <= 1, \"Use only one of classification / multiclass (c / mc)\"\n\n # parse parameters\n for a in args:\n if a == \"V\": # validation set\n assert \"Xv\" in kwargs.keys(), \"Provide validation dataset (Xv)\"\n assert \"Tv\" in kwargs.keys(), \"Provide validation targets (Tv)\"\n Xv = kwargs['Xv']\n Tv = kwargs['Tv']\n Xv, Tv = self._checkdata(Xv, Tv)\n MODELSELECTION = \"V\"\n if a == \"CV\":\n assert \"k\" in kwargs.keys(), \"Provide Cross-Validation number of splits (k)\"\n k = kwargs['k']\n MODELSELECTION = \"CV\"\n if a == \"LOO\":\n MODELSELECTION = \"LOO\"\n if a == \"HQ\":\n NEURONRANKING = \"HQ\"\n if a == \"OP\":\n NEURONRANKING = \"OP\"\n if a in (\"C\", \"CL\", \"CLASSIFICATION\"):\n CLASSIFICATION = \"c\"\n if a in (\"MC\", \"MULTICLASS\"):\n CLASSIFICATION = \"mc\"\n if a in (\"A\", \"AD\", \"ADAPTIVE\"):\n assert \"batch\" in kwargs.keys(), \"Provide batch size for adaptive ELM model (batch)\"\n batch = kwargs['batch']\n ADAPTIVE = True\n\n if MODELSELECTION == \"V\":\n self._train_v(X, T, Xv, Tv)\n else:\n self.Beta = self._solve(self.project(X), T)",
"def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()",
"def train_etd(X, num_hidden=10, random_seed=0, sparse=False, kernel=False):\n #svd = TruncatedSVD(n_components=num_hidden, n_iter=7, random_state=random_seed);\n svd = PCA(n_components=num_hidden);\n svd.fit(np.reshape(X, (X.shape[0], -1)));\n embed_func = lambda y : svd.transform(np.reshape(y, (y.shape[0], -1)))\n return embed_func",
"def ETC_train(X, y, n_trees):\r\n start_time = time.time()\r\n print('\\n'+ '# '*10+'[Training] ExtraTreeClassifier Model (ETC):'+ ' #'*10)\r\n clf = ExtraTreesClassifier(n_estimators=n_trees).fit(X, y)\r\n print('> '*2+'Training time: %.4f seconds.'%(time.time()-start_time))\r\n return clf",
"def trainTMVAMethods(self):\n # Open the file\n DataFile = ROOT.TFile(self.FileName)\n \n if DataFile.IsZombie() == True:\n print(\"Error opening data file {}. Is it a ROOT file?\".format(self.FileName))\n return False\n \n if DataFile.IsOpen() == False:\n print(\"Error opening data file\")\n return False\n\n # Get the data tree\n DataTree = DataFile.Get(\"Quality\")\n if DataTree == 0:\n print(\"Error reading data tree from root file\")\n return False\n\n\n # Limit the number of events:\n if DataTree.GetEntries() > self.MaxEvents:\n print(\"Reducing source tree size from \" + str(DataTree.GetEntries()) + \" to \" + str(self.MaxEvents) + \" (i.e. the maximum set)\")\n NewTree = DataTree.CloneTree(0);\n NewTree.SetDirectory(0);\n\n for i in range(0, self.MaxEvents):\n DataTree.GetEntry(i)\n NewTree.Fill()\n\n DataTree = NewTree;\n\n\n # Initialize TMVA\n ROOT.TMVA.Tools.Instance()\n\n FullPrefix = self.OutputPrefix\n ResultsFile = ROOT.TFile(FullPrefix + \".root\", \"RECREATE\")\n\n Factory = ROOT.TMVA.Factory(\"TMVAClassification\", ResultsFile, \"!V:!Silent:Color:DrawProgressBar:Transformations=I;D;P;G,D:AnalysisType=Classification\")\n\n DataLoader = ROOT.TMVA.DataLoader(self.OutputPrefix)\n\n IgnoredBranches = [ 'SimulationID', 'SequenceLength']\n Branches = DataTree.GetListOfBranches()\n\n for Name in IgnoredBranches:\n DataLoader.AddSpectator(Name, \"F\")\n\n for B in list(Branches):\n if not B.GetName() in IgnoredBranches:\n if not B.GetName().startswith(\"Evaluation\"):\n DataLoader.AddVariable(B.GetName(), \"F\")\n\n SignalCut = ROOT.TCut(\"EvaluationIsCompletelyAbsorbed >= 0.5\")\n BackgroundCut = ROOT.TCut(\"EvaluationIsCompletelyAbsorbed < 0.5\")\n DataLoader.SetInputTrees(DataTree, SignalCut, BackgroundCut)\n\n DataLoader.PrepareTrainingAndTestTree(SignalCut, BackgroundCut, \"nTrain_Signal=0:nTrain_Background=0:SplitMode=Random:NormMode=NumEvents:!V\")\n\n MethodList = []\n\n # Neural Networks\n if 'MLP' in self.Algorithms:\n method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kMLP, \"MLP\", \"H:!V:NeuronType=tanh:VarTransform=N:NCycles=100:HiddenLayers=N+10,N-5:TestRate=5:TrainingMethod=BFGS:!UseRegulator\")\n #method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kMLP, \"MLP\", \"H:!V:NeuronType=tanh:VarTransform=N:NCycles=100:HiddenLayers=N+10,N-5:TestRate=5:!UseRegulator\")\n #method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kMLP, \"MLP\", \"H:!V:NeuronType=tanh:VarTransform=N:NCycles=100:HiddenLayers=N+10,N-5:TestRate=6:TrainingMethod=BFGS:Sampling=0.3:SamplingEpoch=0.8:ConvergenceImprove=1e-6:ConvergenceTests=15:!UseRegulator\")\n MethodList.append(\"MLP\")\n\n\n # PDEFoamBoost\n if 'PDEFoamBoost' in self.Algorithms:\n method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kPDEFoam, \"PDEFoamBoost\", \"!H:!V:Boost_Num=100:Boost_Transform=linear:SigBgSeparate=F:MaxDepth=4:UseYesNoCell=T:DTLogic=MisClassificationError:FillFoamWithOrigWeights=F:TailCut=0:nActiveCells=2000:nBin=50:Nmin=200:Kernel=None:Compress=T\")\n #method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kPDEFoam, \"PDEFoamBoost\", \"!H:!V:Boost_Num=30:Boost_Transform=linear:SigBgSeparate=F:MaxDepth=4:UseYesNoCell=T:DTLogic=MisClassificationError:FillFoamWithOrigWeights=F:TailCut=0:nActiveCells=500:nBin=20:Nmin=400:Kernel=None:Compress=T\")\n MethodList.append(\"PDEFoamBoost\")\n\n\n # PDERSPCA\n if 'PDERSPCA' in self.Algorithms:\n method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kPDERS, \"PDERSPCA\", \"!H:!V:VolumeRangeMode=Adaptive:KernelEstimator=Gauss:GaussSigma=0.3:NEventsMin=400:NEventsMax=600:VarTransform=PCA\")\n MethodList.append(\"PDERSPCA\")\n\n\n # Random Forest Boosted Decision Trees\n if 'BDT' in self.Algorithms:\n #method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kBDT, \"BDT\", \"!H:!V:NTrees=1000:MinNodeSize=1%:MaxDepth=3:BoostType=AdaBoost:AdaBoostBeta=0.4:SeparationType=CrossEntropy:nCuts=100:PruneMethod=NoPruning\")\n #method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kBDT, \"BDT\", \"!H:!V:NTrees=850:nEventsMin=150:MaxDepth=3:BoostType=AdaBoost:AdaBoostBeta=0.5:SeparationType=GiniIndex:nCuts=20:PruneMethod=NoPruning\")\n #method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kBDT, \"BDT\", \"!H:!V:NTrees=1000:nEventsMin=1000:MaxDepth=4:BoostType=AdaBoost:AdaBoostBeta=0.5:SeparationType=GiniIndex:nCuts=20:PruneMethod=NoPruning\")\n \n options = \"!H:!V:NTrees={}:MinNodeSize={}%:MaxDepth={}:BoostType=AdaBoost:AdaBoostBeta={}:SeparationType=CrossEntropy:nCuts=100:PruneMethod=NoPruning\".format(self.BDT_NTrees, self.BDT_MinNodeSize, self.BDT_MaxDepth, self.BDT_AdaBoostBeta)\n \n method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kBDT, \"BDT\", ROOT.TString(options))\n MethodList.append(\"BDT\")\n\n\n # State Vector Machine\n if 'SVM' in self.Algorithms:\n method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kSVM, \"SVM\", \"Gamma=0.25:Tol=0.001:VarTransform=Norm\");\n MethodList.append(\"SVM\")\n\n\n # DNN\n if 'DNN_CPU' in self.Algorithms:\n Layout = \"Layout=TANH|N,TANH|N/2,LINEAR\"\n\n Training0 = \"LearningRate=1e-1,Momentum=0.9,Repetitions=1,ConvergenceSteps=30,BatchSize=256,TestRepetitions=10,WeightDecay=1e-4,Regularization=L2,DropConfig=0.0+0.5+0.5+0.5,Multithreading=True\"\n Training1 = \"LearningRate=1e-2,Momentum=0.9,Repetitions=1,ConvergenceSteps=30,BatchSize=256,TestRepetitions=10,WeightDecay=1e-4,Regularization=L2,DropConfig=0.0+0.0+0.0+0.0,Multithreading=True\"\n Training2 = \"LearningRate=1e-3,Momentum=0.0,Repetitions=1,ConvergenceSteps=30,BatchSize=256,TestRepetitions=10,WeightDecay=1e-4,Regularization=L2,DropConfig=0.0+0.0+0.0+0.0,Multithreading=True\"\n TrainingStrategy = \"TrainingStrategy=\" + Training0 + \"|\" + Training1 + \"|\" + Training2\n\n Options = \"!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=N:WeightInitialization=XAVIERUNIFORM:\" + Layout + \":\" + TrainingStrategy\n\n Options += \":Architecture=CPU\"\n method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kDNN, \"DNN_CPU\", Options)\n MethodList.append(\"DNN_CPU\")\n\n\n # DNN\n if 'DNN_GPU' in self.Algorithms:\n Layout = \"Layout=TANH|N,TANH|N/2,LINEAR\"\n\n Training0 = \"LearningRate=1e-1,Momentum=0.9,Repetitions=1,ConvergenceSteps=100,BatchSize=256,TestRepetitions=10,WeightDecay=1e-4,Regularization=L2,DropConfig=0.0+0.5+0.5+0.5,Multithreading=True\"\n Training1 = \"LearningRate=1e-2,Momentum=0.9,Repetitions=1,ConvergenceSteps=100,BatchSize=256,TestRepetitions=10,WeightDecay=1e-4,Regularization=L2,DropConfig=0.0+0.0+0.0+0.0,Multithreading=True\"\n Training2 = \"LearningRate=1e-3,Momentum=0.0,Repetitions=1,ConvergenceSteps=100,BatchSize=256,TestRepetitions=10,WeightDecay=1e-4,Regularization=L2,DropConfig=0.0+0.0+0.0+0.0,Multithreading=True\"\n TrainingStrategy = \"TrainingStrategy=\" + Training0 + \"|\" + Training1 + \"|\" + Training2\n\n Options = \"!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=N:WeightInitialization=XAVIERUNIFORM:\" + Layout + \":\" + TrainingStrategy\n\n Options += \":Architecture=GPU\"\n method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kDNN, \"DNN_GPU\", Options)\n MethodList.append(\"DNN_GPU\")\n\n\n # DL\n if 'DL_CPU' in self.Algorithms:\n Setup = \"!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=N:WeightInitialization=XAVIERUNIFORM\"\n Layout = \"Layout=TANH|N,TANH|N/2,LINEAR\"\n TrainingStrategy = \"TrainingStrategy=Optimizer=ADAM,LearningRate=0.001,TestRepetitions=1,MaxEpochs=100000,ConvergenceSteps=500,BatchSize=100,DropConfig=0.0\"\n Architecture = \"Architecture=CPU\" \n \n Options = Setup + \":\" + Layout + \":\" + TrainingStrategy + \":\" + Architecture\n\n method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kDL, \"DL_CPU\", Options)\n MethodList.append(\"DL_CPU\")\n\n\n # Finally test, train & evaluate all methods\n print(\"Started training\")\n Factory.TrainAllMethods()\n Factory.TestAllMethods()\n Factory.EvaluateAllMethods()\n\n print(\"\\nTake a look at the results in root with:\\nTMVA::TMVAGui(\\\"Results.root\\\");\\nEspecially plot 4a\")\n\n print(Factory.GetMethod(ROOT.TString(self.OutputPrefix), ROOT.TString(\"BDT\")).GetROCIntegral())\n\n self.ResultsTMVA.clear()\n for method in MethodList:\n print(\"{}: {}\".format(method, Factory.GetMethod(ROOT.TString(self.OutputPrefix), ROOT.TString(\"BDT\")).GetROCIntegral()))\n self.ResultsTMVA[method] = Factory.GetMethod(ROOT.TString(self.OutputPrefix), ROOT.TString(\"BDT\")).GetROCIntegral()\n \n return True",
"def sample_estimator(model, num_classes, train_loader):\n import sklearn.covariance\n\n model.eval()\n with torch.no_grad():\n group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n correct, total = 0, 0\n num_output = 1\n num_sample_per_class = np.empty(num_classes)\n num_sample_per_class.fill(0)\n list_features = []\n for i in range(num_output):\n temp_list = []\n for j in range(num_classes):\n temp_list.append(0)\n list_features.append(temp_list)\n\n for data, target in train_loader:\n total += data.size(0)\n data = data.cuda()\n data = Variable(data)\n output = model(data)['logits']\n # output, out_features = out['logits'], out['logits']\n\n out_features = output.view(output.size(0), output.size(1), -1)\n out_features = torch.mean(out_features, dim=2)\n\n # compute the accuracy\n pred = output.data.max(1)[1]\n equal_flag = pred.eq(target.cuda()).cpu()\n correct += equal_flag.sum()\n\n # construct the sample matrix\n for i in range(data.size(0)):\n label = target[i]\n out_count = 0\n if num_sample_per_class[label] == 0:\n list_features[out_count][label] = out_features[i].view(1, -1)\n else:\n list_features[out_count][label] \\\n = torch.cat((list_features[out_count][label], out_features[i].view(1, -1)), 0)\n num_sample_per_class[label] += 1\n\n sample_class_mean = []\n out_count = 0\n num_feature = num_classes\n temp_list = torch.Tensor(num_classes, num_feature).cuda()\n for j in range(num_classes):\n temp_list[j] = torch.mean(list_features[out_count][j], dim=0)\n sample_class_mean.append(temp_list)\n\n precision = []\n for k in range(num_output):\n X = 0\n for i in range(num_classes):\n if i == 0:\n X = list_features[k][i] - sample_class_mean[k][i]\n else:\n X = torch.cat((X, list_features[k][i] - sample_class_mean[k][i]), dim=0)\n\n # find inverse\n group_lasso.fit(X.cpu().numpy())\n temp_precision = group_lasso.precision_\n temp_precision = torch.from_numpy(temp_precision).float().cuda()\n precision.append(temp_precision)\n\n print('\\n Training Accuracy:({:.2f}%)\\n'.format(100. * correct / total))\n\n return sample_class_mean, precision",
"def __init__(self, table, tree_prior, config):\n logger.info('TreeCatTrainer of %d x %d data', table.num_rows,\n table.num_cols)\n assert isinstance(table, Table)\n N = table.num_rows # Number of rows.\n V = table.num_cols # Number of features, i.e. vertices.\n TreeTrainer.__init__(self, N, V, tree_prior, config)\n assert self._num_rows == N\n assert len(self._added_rows) == 0\n self._table = table\n self._assignments = np.zeros([N, V], dtype=np.int8)\n\n # These are useful dimensions to import into locals().\n E = V - 1 # Number of edges in the tree.\n K = V * (V - 1) // 2 # Number of edges in the complete graph.\n M = self._config['model_num_clusters'] # Clusters per latent.\n assert M <= 128, 'Invalid model_num_clusters > 128: {}'.format(M)\n self._VEKM = (V, E, K, M)\n\n # Use Jeffreys priors.\n self._vert_prior = 0.5\n self._edge_prior = 0.5 / M\n self._feat_prior = 0.5 / M\n self._meas_prior = self._feat_prior * np.array(\n [(table.ragged_index[v + 1] - table.ragged_index[v])\n for v in range(V)],\n dtype=np.float32).reshape((V, 1))\n self._gammaln_table = gammaln(\n np.arange(1 + N, dtype=np.float32) + self._edge_prior)\n assert self._gammaln_table.dtype == np.float32\n\n # Sufficient statistics are maintained always.\n self._vert_ss = np.zeros([V, M], np.int32)\n self._edge_ss = np.zeros([E, M, M], np.int32)\n self._feat_ss = np.zeros([table.ragged_index[-1], M], np.int32)\n self._meas_ss = np.zeros([V, M], np.int32)\n\n # Temporaries.\n self._vert_probs = np.empty(self._vert_ss.shape, np.float32)\n self._edge_probs = np.empty(self._edge_ss.shape, np.float32)\n self._feat_probs = np.empty(self._feat_ss.shape, np.float32)\n self._meas_probs = np.empty(self._meas_ss.shape, np.float32)\n\n # Maintain edge_probs.\n np.add(self._edge_ss, self._edge_prior, out=self._edge_probs)",
"def fit(self, dataset, verbose=False):\n self.inputs = dataset.shape[1]-1\n self.bits = np.ceil(\n np.log2(\n np.abs(\n np.amax(dataset, axis=0) -\n np.amin(dataset, axis=0)))).astype(np.int32)\n self.is_neg = (np.amin(dataset, axis=0) < 0).astype(np.int8)\n\n self.trees = []\n\n for i in range(self.n_trees):\n if verbose:\n print(\"... creating tree {}\".format(i))\n\n # as subsample is an expensive operation, we will only perform it if it\n # reduces the dataset substantially\n\n if self.sample_size and self.sample_size < 0.3 * dataset.shape[0]:\n if verbose:\n print(\"... generated subsample of size {}\".format(self.sample_size))\n sample = self.subsample(dataset)\n else:\n sample = dataset\n\n self.trees.append(fit_parallel(\n self.max_depth, self.min_size, sample, True))",
"def multi_voters_example():\n # MinCq parameters, fixed to a given value as this is a simple example.\n mu = 0.001\n\n # We load iris dataset, We convert the labels to be -1 or 1, and we split it in two parts: train and test.\n dataset = load_iris()\n dataset.target[dataset.target == 0] = -1\n dataset.target[dataset.target == 2] = -1\n X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, random_state=42)\n\n # We create a set of voters of different kind.\n voters = voter.KernelVotersGenerator(rbf_kernel, gamma=0.01).generate(X_train)\n voters = np.append(voters, voter.KernelVotersGenerator(rbf_kernel, gamma=0.1).generate(X_train))\n voters = np.append(voters, voter.KernelVotersGenerator(rbf_kernel, gamma=1).generate(X_train))\n voters = np.append(voters, voter.KernelVotersGenerator(rbf_kernel, gamma=10).generate(X_train))\n voters = np.append(voters, voter.KernelVotersGenerator(rbf_kernel, gamma=100).generate(X_train))\n voters = np.append(voters, voter.KernelVotersGenerator(polynomial_kernel, degree=2).generate(X_train))\n voters = np.append(voters, voter.KernelVotersGenerator(polynomial_kernel, degree=3).generate(X_train))\n voters = np.append(voters, voter.KernelVotersGenerator(linear_kernel).generate(X_train))\n\n # We train MinCq using these voters, on the training set.\n learner = MinCqLearner(mu, voters_type='manual')\n learner.fit(X_train, y_train, voters)\n\n # We predict the train and test labels and print the risk.\n predictions_train = learner.predict(X_train)\n predictions_test = learner.predict(X_test)\n\n print(\"\\nMultiVotersMinCq\")\n print(\"-----------\")\n print(\"Training set risk: {:.4f}\".format(zero_one_loss(y_train, predictions_train)))\n print(\"Testing set risk: {:.4f}\\n\".format(zero_one_loss(y_test, predictions_test)))",
"def train(self,trainset):\n \n self.n_classes = len(trainset.metadata['targets'])\n if self.n_classes > 2:\n raise ValueError('Invalid. Should have 2 classes.')\n \n features = np.zeros((len(trainset),trainset.metadata['input_size']))\n labels = np.zeros((len(trainset)),dtype='int')\n for i,xy in enumerate(trainset):\n x,y = xy\n features[i] = x\n labels[i] = y\n\n if self.criterion == 'information_gain':\n def criterion_fcn(labels0, labels1):\n return libmilk.supervised.tree.information_gain(labels0, labels1, include_entropy=self.include_entropy)\n elif self.criterion == 'z1_loss':\n def criterion_fcn(labels0, labels1):\n return libmilk.supervised.tree.z1_loss(labels0, labels1)\n else:\n raise ValueError('Invalid parameter: '+self.criterion+'. Should be either \\'information_gain\\' or \\'z1_loss\\'')\n\n learner = libmilk.supervised.tree_learner(criterion=criterion_fcn,min_split=self.min_split,return_label=True)\n #self.subsample = subsample\n #self.R = R\n model = learner.train(features, labels)\n \n self.tree = model",
"def randomSplitLOOBaselineCV(ncvs, svm_mat, labels, subjarray, motorOutput=False, permutation=False, decoder='similarity', nproc=5, featsel=False):\n \n\n ntasks = len(np.unique(labels))\n nsamples = svm_mat.shape[0]\n nsubjs = nsamples/ntasks\n\n subjects = np.unique(subjarray)\n indices = np.arange(nsamples)\n \n #numsubjs_perfold = 8\n numsubjs_perfold = 24\n if nsubjs%numsubjs_perfold!=0: \n raise Exception(\"Error: Folds don't match number of subjects\")\n \n nfolds = nsubjs/numsubjs_perfold\n subj_array_folds = subjarray.copy()\n \n inputs = [] \n \n nfolds = int(nfolds)\n for fold in range(nfolds):\n# #test_subjs = np.random.choice(subj_array_folds,numsubjs_perfold,replace=False)\n# test_subjs = [subjects[fold]]\n# train_subjs_all = np.delete(subjects,test_subjs)\n ## run 8 fold CV\n test_subjs_ind = np.random.choice(np.arange(len(subj_array_folds)),numsubjs_perfold,replace=False)\n test_subjs = subj_array_folds[test_subjs_ind]\n # Training subjects are all subjects - those in test set\n train_subjs_all = np.delete(subjects,test_subjs)\n # For each fold, train on test subjects independently\n\n for test_subj in test_subjs:\n # Randomly sample half of train set subjects for each cv (CV bootstrapping)\n train_subjs = np.random.choice(train_subjs_all,\n int(len(train_subjs_all)),\n replace=True)\n# train_subjs = train_subjs_all\n\n train_ind = []\n for subj in train_subjs:\n train_ind.extend(np.where(subjarray==subj)[0])\n\n # Only obtain single test subject\n test_ind = np.where(subjarray==test_subj)[0]\n \n train_ind = np.asarray(train_ind)\n test_ind = np.asarray(test_ind)\n\n trainset = svm_mat[train_ind,:]\n testset = svm_mat[test_ind,:]\n\n # Normalize trainset and testset\n mean = np.mean(svm_mat[train_ind,:],axis=0)\n mean.shape = (1,len(mean))\n std = np.std(svm_mat[train_ind,:],axis=0)\n std.shape = (1,len(std))\n\n trainset = np.divide((trainset - mean),std)\n testset = np.divide((testset - mean),std)\n \n trainlabels = labels[train_ind].copy()\n testlabels = labels[test_ind].copy()\n\n\n if motorOutput:\n ## Feature selection and downsampling\n unique_labels = np.unique(labels)\n feat1_labs = np.where(trainlabels==unique_labels[0])[0]\n feat2_labs = np.where(trainlabels==unique_labels[1])[0]\n # Perform t-test\n t, p = stats.ttest_rel(trainset[feat1_labs,:],trainset[feat2_labs,:],axis=0)\n h0, qs = mc.fdrcorrection0(p)\n # h0 = p<0.1\n # Construct feature masks\n feat1_mask = np.multiply(t>0,h0).astype(bool)\n feat2_mask = np.multiply(t<0,h0).astype(bool)\n # feat1_mask = t>0\n # feat2_mask = t<0\n # Downsample training set into original vertices into 2 ROI signals\n trainset_downsampled = np.zeros((trainset.shape[0],2))\n trainset_downsampled[:,0] = np.nanmean(trainset[:,feat1_mask],axis=1)\n trainset_downsampled[:,1] = np.nanmean(trainset[:,feat2_mask],axis=1)\n #trainset_downsampled = trainset[:,h0]\n # Downsample test set into original vertices\n testset_downsampled = np.zeros((testset.shape[0],2))\n testset_downsampled[:,0] = np.nanmean(testset[:,feat1_mask],axis=1)\n testset_downsampled[:,1] = np.nanmean(testset[:,feat2_mask],axis=1)\n #testset_downsampled = testset[:,h0]\n\n ## permutation\n if permutation:\n np.random.shuffle(trainlabels)\n \n if np.sum(feat1_mask)==0 or np.sum(feat2_mask==0):\n inputs.append((trainset,testset,trainlabels,testlabels,decoder))\n else:\n inputs.append((trainset_downsampled,testset_downsampled,trainlabels,testlabels,decoder))\n \n elif featsel:\n #### Revision addition - select for vertices based on FDR-corrected p<0.05 for noncircular accuracy decoding of motor output\n unique_labels = np.unique(labels)\n feat1_labs = np.where(trainlabels==unique_labels[0])[0]\n feat2_labs = np.where(trainlabels==unique_labels[1])[0]\n # Perform t-test\n t, p = stats.ttest_rel(trainset[feat1_labs,:],trainset[feat2_labs,:],axis=0)\n h0, qs = mc.fdrcorrection0(p)\n # Construct feature masks\n #feat_mask = h0\n feat_mask = p<0.05\n # use fdr-corrected vertices for feature selection \n trainset = trainset[:,feat_mask]\n testset = testset[:,feat_mask]\n\n # if permutation\n if permutation:\n np.random.shuffle(trainlabels)\n\n inputs.append((trainset,testset,trainlabels,testlabels,decoder))\n else:\n ## permutation\n if permutation:\n np.random.shuffle(trainlabels)\n# trainlabels = labels[train_ind]\n# testlabels = labels[test_ind]\n# f, p = f_classif(trainset,trainlabels)\n# thresh = 0.1\n# feat_mask = p < thresh\n# inputs.append((trainset[:,feat_mask],testset[:,feat_mask],labels[train_ind],labels[test_ind])) \n \n inputs.append((trainset,testset,trainlabels,testlabels,decoder))\n\n \n \n subj_array_folds = np.delete(subj_array_folds,test_subjs)\n \n #print('trainset.shape:', trainset.shape)\n #print('testset.shape:', testset.shape)\n #print('trainlabels:', inputs[0][2])\n #print('testlabels:', inputs[0][3])\n #print('inputs[0]:', len(inputs[0]))\n pool = mp.Pool(processes=nproc)\n scores = pool.starmap_async(_decoding,inputs).get()\n pool.close()\n pool.join()\n \n# subj_acc = np.zeros((len(subjects),))\n# scount = 0\n# i = 0\n# for subj in subjects:\n# subjmean = []\n# for cv in range(ncvs):\n# subjmean.append(scores[i])\n# i += 1\n \n# subj_acc[scount] = np.mean(subjmean)\n \n# scount += 1\n\n# return subj_acc\n acc = []\n r_match = []\n r_mismatch = []\n confusion_mat = []\n for score in scores:\n acc.extend(score[0])\n r_match.append(score[1])\n r_mismatch.append(score[2])\n confusion_mat.append(score[3])\n \n confusion_mat = np.asarray(confusion_mat)\n confusion_mat = np.mean(confusion_mat,axis=0)\n\n return acc, r_match, r_mismatch, confusion_mat",
"def wrapper_train(tree_depth, demos, validation_demos, pred_data=[None,None], verbose=True):\n return train(program_gen_step_size = 1000, \n num_programs = NUM_PROGRAMS, \n num_dts = 5, \n max_num_particles = 25, \n input_demos = demos, \n further_demos = validation_demos, \n tree_depth = tree_depth, \n return_prior=True,\n pred_data=pred_data,\n verbose=verbose)",
"def train_var_joint(Dataset, model, criterion, epoch, optimizer, writer, device, args):\n\n # Create instances to accumulate losses etc.\n class_losses = AverageMeter()\n recon_losses = AverageMeter()\n kld_losses = AverageMeter()\n losses = AverageMeter()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n\n top1 = AverageMeter()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n\n # train\n for i, (inp, target) in enumerate(Dataset.train_loader):\n inp = inp.to(device)\n class_target = target.to(device)\n recon_target = inp\n\n # measure data loading time\n data_time.update(time.time() - end)\n\n # compute model forward\n class_samples, recon_samples, mu, std = model(inp)\n\n # calculate loss\n class_loss, recon_loss, kld_loss = criterion(class_samples, class_target, recon_samples, recon_target,\n mu, std, device)\n\n # add the individual loss components together and weight the KL term.\n loss = class_loss + recon_loss + args.var_beta * kld_loss\n\n # take mean to compute accuracy. Note if variational samples are 1 this only gets rid of a dummy dimension.\n output = torch.mean(class_samples, dim=0)\n\n # record precision/accuracy and losses\n prec1 = accuracy(output, class_target)[0]\n top1.update(prec1.item(), inp.size(0))\n\n losses.update((class_loss + recon_loss + kld_loss).item(), inp.size(0))\n class_losses.update(class_loss.item(), inp.size(0))\n recon_losses.update(recon_loss.item(), inp.size(0))\n kld_losses.update(kld_loss.item(), inp.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # print progress\n if i % args.print_freq == 0:\n print('Training: [{0}][{1}/{2}]\\t' \n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Class Loss {cl_loss.val:.4f} ({cl_loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Recon Loss {recon_loss.val:.4f} ({recon_loss.avg:.4f})\\t'\n 'KL {KLD_loss.val:.4f} ({KLD_loss.avg:.4f})'.format(\n epoch+1, i, len(Dataset.train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses,\n cl_loss=class_losses, top1=top1, recon_loss=recon_losses, KLD_loss=kld_losses))\n\n # TensorBoard summary logging\n writer.add_scalar('training/train_precision@1', top1.avg, epoch)\n writer.add_scalar('training/train_average_loss', losses.avg, epoch)\n writer.add_scalar('training/train_KLD', kld_losses.avg, epoch)\n writer.add_scalar('training/train_class_loss', class_losses.avg, epoch)\n writer.add_scalar('training/train_recon_loss', recon_losses.avg, epoch)\n\n print(' * Train: Loss {loss.avg:.5f} Prec@1 {top1.avg:.3f}'.format(loss=losses, top1=top1))",
"def Create_truven(object):\n\tdef __init__(self, is_train = True, **config):\n\t\tprint(2)\n\t\tpass\n\n\t\t'''\n\t\tself.is_train = is_train\n\t\tfilename = config['train_file'] if is_train else config['test_file']\n\t\tbatch_size = config['batch_size']\n\t\tself.admis_dim = config['input_dim']\n\t\tself.max_length = config['max_length']\t\t\n\t\twith open(filename, 'r') as fin:\n\t\t\tlines = fin.readlines()\n\t\t\tf1 = lambda x:[int(i) for i in x.rstrip().split(';')[-1].split(' ')]\n\t\t\tself.label = list(map(f1, lines))\n\t\t\tf2 = lambda x:[[int(j) for j in i.split(' ')] for i in x.rstrip().split(config['separate_symbol_between_visit'])[:-1]]\n\t\t\tself.data_lst = list(map(self.line_to_visit_level, lines))\n\t\t\tadd = lambda x,y:x+y\n\t\t\tfrom functools import reduce\n\t\t\tf3 = lambda x:list(set(reduce(add,x)))\n\t\t\tself.data_decoder = list(map(f3, self.data_lst))\n\t\t\tdel lines\n\t\tself.batch_size = batch_size\n\t\tself.total_num = len(self.label)\n\t\tself.batch_num = int(np.ceil(self.total_num / self.batch_size))\n\t\tself.batch_id = 0 \n\t\tself.random_shuffle = np.arange(self.total_num) ### no shuffle at first epoch \n\t\t'''\n\t'''\n\tdef next(self):\n\t\tbgn = self.batch_id * self.batch_size\n\t\tendn = bgn + self.batch_size\n\t\tself.batch_id += 1\n\t\tif self.batch_id > self.batch_num - 1:\n\t\t\tself.batch_id = 0\n\t\treturn self.label[bgn:endn], self.data_lst[bgn:endn], self.data_decoder[bgn:endn]\n\t\t#data, label = self.data_lst[bgn:endn], self.label[bgn:endn]\n\t\t#return data, label\n\t'''",
"def train_ensemble(table, tree_prior, config):\n tasks = []\n for sub_seed in range(config['model_ensemble_size']):\n sub_config = config.copy()\n sub_config['seed'] += sub_seed\n tasks.append((table, tree_prior, sub_config))\n return parallel_map(_train_model, tasks)",
"def smote(X, y, N=100, K=5):\n\n seed = 1\n np.random.seed(seed)\n\n # Step 1. Define minority and majority class examples, and minority class features\n ms = int(sum(y))\n min_cls = X[0:ms, :]\n\n # Step 2. If N is less than 100, then only a random percent will be smoted.\n if N < 100:\n np.random.shuffle(min_cls)\n ms = int((N / 100) * ms)\n N = 100\n\n syn_ex = int(N / 100) * ms\n\n # Step 3. Compute the k-NN for each minority class\n clf = neighbors.KNeighborsClassifier()\n clf.fit(X, y)\n\n neighborhoods = []\n for i in range(ms):\n xi = X[i, :].reshape(1, -1)\n neighbours = clf.kneighbors(xi, n_neighbors=K, return_distance=False)[0]\n # Skip itself in the neighborhood\n neighbours = neighbours[1:]\n\n # Find all the minority examples\n neighborhood = []\n for index in neighbours:\n if index <= ms - 1:\n neighborhood.append(index)\n\n neighborhoods.append(neighborhood)\n\n # Step 4. Determine the amount of SMOTE examples to develop per neighbourhood.\n\n num_ex = int(syn_ex / len(neighborhoods))\n\n # Step 5. Generate SMOTE examples\n syn_data = []\n for i in range(ms):\n xi = X[i, :].reshape(1, -1)\n for j in range(num_ex):\n # if the neighbourhood is not empty\n if neighborhoods[i]:\n index = np.random.choice(neighborhoods[i])\n xzi = X[index, :].reshape(1, -1)\n si = xi + (xzi - xi) * np.random.uniform(0, 1)\n syn_data.append(si)\n\n # Build the data matrix\n data = []\n for values in syn_data:\n data.append(values[0])\n\n print(\"{} amount of minority class samples generated\".format(len(data)))\n\n # Step 6. Re-build the data set with synthetic data added\n\n # Concatenate the positive labels with the newly made data\n labels = np.ones([len(data), 1])\n data = np.concatenate([labels, data], axis=1)\n\n # Concatenate with old data\n org_data = np.concatenate([y.reshape(-1, 1), X], axis=1)\n data = np.concatenate([data, org_data])\n\n # Test the new generated data\n test = []\n for values in syn_data:\n a = clf.predict(values)\n test.append(a)\n\n print(\"Using the old classifier, {} out of {} would be classified as minority.\".format(np.sum(test), len(syn_data)))\n\n return data, neighborhoods",
"def retrain_sub_model(self):\r\n \r\n self.sub_model = self.load_weights_to_sub_model()\r\n X = np.array(self.conv4_characters_list)\r\n X = np.reshape(X, (X.shape[0]*X.shape[1], X.shape[2]))\r\n y = np.repeat(np.arange(1283), 9)\r\n \r\n opt = optimizers.Adam(lr=0.001)\r\n self.sub_model.compile(optimizer=opt,loss='sparse_categorical_crossentropy',metrics=['accuracy'])\r\n print(\"***Start to creat new decision model***\")\r\n self.sub_model.fit(X, y, epochs=20)\r\n print(\"***Finish***\")",
"def train_multinomial(corpus, vector = 'tfidf', split_size = 0.2, **kwargs):\n if not (\n isinstance(corpus, str)\n or isinstance(corpus, list)\n or isinstance(corpus, tuple)\n ):\n raise ValueError(\n 'corpus must be a string location or list of strings or tuple of strings'\n )\n if not isinstance(vector, str):\n raise ValueError('vector must be a string')\n if not isinstance(split_size, float):\n raise ValueError('split_size must be a float')\n if not (split_size > 0 and split_size < 1):\n raise ValueError('split_size must bigger than 0, less than 1')\n multinomial, labels, vectorize = None, None, None\n if isinstance(corpus, str):\n trainset = datasets.load_files(\n container_path = corpus, encoding = 'UTF-8'\n )\n trainset.data, trainset.target = separate_dataset(trainset)\n data, target = trainset.data, trainset.target\n labels = trainset.target_names\n if isinstance(corpus, list) or isinstance(corpus, tuple):\n if not len(corpus[0]) == 2:\n raise ValueError(\n 'element of corpus must be list or tuple of (string, label)'\n )\n if not isinstance(corpus[0][0], str):\n raise ValueError('left hand side of element must be a string')\n corpus = np.array(corpus)\n data, target = corpus[:, 0].tolist(), corpus[:, 1].tolist()\n labels = np.unique(target).tolist()\n target = LabelEncoder().fit_transform(target)\n\n from sklearn.utils import shuffle\n\n data, target = shuffle(data, target)\n\n for i in range(len(data)):\n data[i] = _classification_textcleaning_stemmer(data[i])\n if 'tfidf' in vector.lower():\n vectorize = TfidfVectorizer(**kwargs).fit(data)\n elif 'bow' in vector.lower():\n vectorize = CountVectorizer(**kwargs).fit(data)\n elif 'skip-gram' in vector.lower():\n vectorize = SkipGramVectorizer(**kwargs).fit(data)\n else:\n raise Exception(\n \"vectorizing techniques not supported, only support ['tf-idf', 'bow', 'skip-gram']\"\n )\n vectors = vectorize.transform(data)\n multinomial = MultinomialNB()\n if split_size:\n train_X, test_X, train_Y, test_Y = train_test_split(\n vectors, target, test_size = split_size\n )\n multinomial.partial_fit(train_X, train_Y, classes = np.unique(target))\n predicted = multinomial.predict(test_X)\n print(\n metrics.classification_report(\n test_Y, predicted, target_names = labels\n )\n )\n else:\n multinomial.partial_fit(vectors, target, classes = np.unique(target))\n predicted = multinomial.predict(vectors)\n print(\n metrics.classification_report(\n target, predicted, target_names = labels\n )\n )\n return USER_BAYES(\n multinomial,\n labels,\n vectorize,\n cleaning = _classification_textcleaning_stemmer,\n )",
"def train(x_train, y_train, x_test, y_test):\n\n print(\" Nearest centroid : \", end='')\n run(x_train, y_train, x_test, y_test, NearestCentroid())\n print(\" k-NN classifier (k=3) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=3))\n print(\" k-NN classifier (k=7) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=7))\n print(\" Naive Bayes (Gaussian) : \", end='')\n run(x_train, y_train, x_test, y_test, GaussianNB())\n print(\" Random Forest (trees= 5) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=5))\n print(\" Random Forest (trees= 50) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=50))\n print(\" Random Forest (trees=500) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=500))\n print(\" Random Forest (trees=1000): \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=1000))\n print(\" LinearSVM (C=0.01) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.01))\n print(\" LinearSVM (C=0.1) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.1))\n print(\" LinearSVM (C=1.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=1.0))\n print(\" LinearSVM (C=10.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=10.0))",
"def fit(self, X_train, y_train):\n for i in range(self.N):\n h = RandomDecisionTree(candidate_splits=self.candidate_splits, depth=self.max_depth)\n h = h.fit(*self.bootstrap(X_train, y_train))\n self.learners.append(h)",
"def actflowRandomSplitLOOBaselineCV(ncvs, svm_mat, actflow_svm_mat, labels, subjarray, featsel=True, permutation=False,nproc=5):\n \n ntasks = len(np.unique(labels))\n nsamples = svm_mat.shape[0]\n nsubjs = nsamples/ntasks\n\n subjects = np.unique(subjarray)\n indices = np.arange(nsamples)\n \n #numsubjs_perfold = 8\n numsubjs_perfold = 24\n if nsubjs%numsubjs_perfold!=0: \n raise Exception(\"Error: Folds don't match number of subjects\")\n \n nfolds = nsubjs/numsubjs_perfold\n subj_array_folds = subjarray.copy()\n \n inputs = [] \n \n nfolds = int(nfolds)\n for fold in range(nfolds):\n #test_subjs = np.random.choice(subj_array_folds,numsubjs_perfold,replace=False)\n #test_subjs = [subjects[fold]]\n # subj array folds are all subjects that have not yet been tested on\n test_subjs_ind = np.random.choice(np.arange(len(subj_array_folds)),numsubjs_perfold,replace=False)\n test_subjs = subj_array_folds[test_subjs_ind]\n # Training subjects are all subjects - those in test set\n train_subjs_all = np.delete(subjects,test_subjs)\n # For each fold, train on test subjects independently\n for test_subj in test_subjs:\n # Randomly sample half of train set subjects for each cv (CV bootstrapping)\n train_subjs = np.random.choice(train_subjs_all,\n int(len(train_subjs_all)),\n replace=True)\n# train_subjs = train_subjs_all\n\n train_ind = []\n for subj in train_subjs:\n train_ind.extend(np.where(subjarray==subj)[0])\n\n # Only obtain single test subject\n test_ind = np.where(subjarray==test_subj)[0]\n #test_ind = []\n #for subj in test_subjs:\n # test_ind.extend(np.where(subjarray==subj)[0])\n \n train_ind = np.asarray(train_ind)\n test_ind = np.asarray(test_ind)\n\n\n ## Predicted-to-actual\n trainset = actflow_svm_mat[train_ind,:]\n testset = svm_mat[test_ind,:]\n\n ## Actual-to-predicted\n# trainset = svm_mat[train_ind,:]\n# testset = actflow_svm_mat[test_ind,:]\n# orig_training = svm_mat[train_ind,:]\n\n # Normalize trainset and testset\n trainmean = np.mean(actflow_svm_mat[train_ind,:],axis=0)\n trainmean.shape = (1,len(trainmean))\n trainstd = np.std(actflow_svm_mat[train_ind,:],axis=0)\n trainstd.shape = (1,len(trainstd))\n \n # Normalize trainset and testset\n testmean = np.mean(svm_mat[train_ind,:],axis=0)\n testmean.shape = (1,len(testmean))\n teststd = np.std(svm_mat[train_ind,:],axis=0)\n teststd.shape = (1,len(teststd))\n\n trainset = np.divide((trainset - trainmean),trainstd)\n testset = np.divide((testset - testmean),teststd)\n\n ######## FEATURE SELECTION & REDUCTION\n ## Feature selection and downsampling\n trainlabels = labels[train_ind]\n testlabels = labels[test_ind]\n ## permutation\n if permutation:\n np.random.shuffle(trainlabels)\n ##\n unique_labels = np.unique(labels)\n #feat1_labs = np.where(trainlabels==0)[0]\n #feat2_labs = np.where(trainlabels==1)[0]\n ## Perform t-test\n #t, p = stats.ttest_rel(orig_training[feat1_labs,:],orig_training[feat2_labs,:],axis=0)\n ##t, p = stats.ttest_rel(trainset[feat1_labs,:],trainset[feat2_labs,:],axis=0)\n #h0, qs = mc.fdrcorrection0(p)\n\n\n ### BEGIN REGULAR FEATURE SELECTION ###\n if featsel:\n F, p = f_classif(trainset,trainlabels)\n thresh = 0.1\n feat_mask = np.where(p < thresh)[0]\n feat_mask = np.intersect1d(feat_mask,np.where(np.isnan(trainset[0,:])==False)[0]) # make sure no bad values are included\n if len(feat_mask)<2:\n # make sure there are enough features...\n inputs.append((trainset,testset,trainlabels,testlabels)) \n else:\n inputs.append((trainset[:,feat_mask],testset[:,feat_mask],trainlabels,testlabels)) \n ### END REGULAR FEATURE SELECTION ###\n\n# ### BEGIN DIGIT REPRESENTATION FEATURE SELECTION ###\n# # Construct feature masks\n# feat1_mask = np.multiply(t<0,qs<0.05)\n# feat2_mask = np.multiply(t>0,qs<0.05)\n# #feat1_mask = t>0\n# #feat2_mask = t<0\n# \n# # Downsample training set into original vertices into 2 ROI signals\n# trainset_downsampled = np.zeros((trainset.shape[0],2))\n# trainset_downsampled[:,0] = np.nanmean(trainset[:,feat1_mask],axis=1)\n# trainset_downsampled[:,1] = np.nanmean(trainset[:,feat2_mask],axis=1)\n# # Downsample test set into original vertices\n# testset_downsampled = np.zeros((testset.shape[0],2))\n# testset_downsampled[:,0] = np.nanmean(testset[:,feat1_mask],axis=1)\n# testset_downsampled[:,1] = np.nanmean(testset[:,feat2_mask],axis=1)\n# if np.nansum(feat1_mask)==0 or np.nansum(feat2_mask)==0:\n# print 'not running feature selection'\n# inputs.append((trainset,testset,labels[train_ind],labels[test_ind]))\n# else:\n# inputs.append((trainset_downsampled,testset_downsampled,labels[train_ind],labels[test_ind]))\n# ### END DIGIT REPRESENTATION FEATURE SELECTION ###\n else:\n inputs.append((trainset,testset,trainlabels,testlabels)) \n \n # Subj array folds are all subjects that have not yet been tested independently\n subj_array_folds = np.delete(subj_array_folds,test_subjs_ind)\n #subj_array_folds = np.delete(subj_array_folds,test_subjs)\n \n pool = mp.Pool(processes=nproc)\n scores = pool.starmap_async(_decoding,inputs).get()\n pool.close()\n pool.join()\n\n acc = []\n r_match = []\n r_mismatch = []\n confusion_mat = []\n for score in scores:\n acc.extend(score[0])\n r_match.append(score[1])\n r_mismatch.append(score[2])\n confusion_mat.append(score[3])\n \n confusion_mat = np.asarray(confusion_mat)\n confusion_mat = np.mean(confusion_mat,axis=0)\n\n return acc, r_match, r_mismatch, confusion_mat",
"def train(self, X, T):\r\n assert len(self.neurons) > 0, \"Add neurons before training ELM\"\r\n X, T = self._checkdata(X, T)\r\n H = self.project(X)\r\n self.Beta = np.linalg.pinv(H).dot(T)",
"def run_snv_phylogenetics(snv_count_data, allele_cn, clusters, results_prefix):\n snv_log_likelihoods = scgenome.snvphylo.compute_snv_log_likelihoods(\n snv_count_data, allele_cn, clusters)\n\n ml_tree, tree_annotations = scgenome.snvphylo.compute_dollo_ml_tree(\n snv_log_likelihoods)\n\n return ml_tree, tree_annotations",
"def train_model(self,X,Y):\n X_bar = subtract_mean_from_data(X,Y)\n cov_XX = compute_covariance_matrix(X_bar[0],X_bar[0])\n cov_XX += np.identity(len(np.array(X).T)) * self.reg_cov\n self.cov_XX = cov_XX\n self.muj = []\n j=0\n k=0\n val = Y[0]\n for i in range(self.NUM_CLASSES):\n print(i)\n while(j<len(Y)-1 and val==Y[j]):\n j = j+1\n X0 = X[k:j-1]\n mu0 = np.mean(X0,axis=0)\n self.muj.append(mu0)\n k = j\n val = Y[j]",
"def train():\n counts = {size: dict() for size in NGRAM_SIZES}\n for word in tqdm.tqdm(word_iterator(\"resources/datasets\")):\n if word == \"\":\n continue\n for size in NGRAM_SIZES:\n for token in ngrams(word, 2 * size):\n left, right = token[:size], token[size:]\n counts[size].setdefault(left, dict())\n counts[size][left].setdefault(right, 0)\n counts[size][left][right] += 1\n model = {size: dict() for size in NGRAM_SIZES}\n for size in NGRAM_SIZES:\n for left in counts[size]:\n total = sum(counts[size][left].values())\n model[size][left] = dict()\n for right in counts[size][left]:\n model[size][left][right] = math.log(\n counts[size][left][right] / total)\n with open(MODEL_FILENAME, \"wb\") as file:\n pickle.dump(model, file)",
"def train_lvq(numcbv, run_len_mult = 40, fpath = \"datasets\\\\lvq\"):\n # Number of iterations recommended by Kohonen is 40 times the number of codebook vectors\n runlen = run_len_mult * numcbv\n \n #run length for 'sammon'. Doesn't affect learning. May not be necessary.\n #runlen2 = 100\n \n #codebook size 40 will create files \"lvq/c40e.cod\", \"lvq/c40o.sam\" etc.\n cb = \"lvq\\\\c\" + str(numcbv)\n train = fpath + \"_train.txt\"\n test = fpath + \"_test.txt\"\n\n # Little lambdas just to help with readability below.\n cmd = lambda X: \"binaries_windows\\\\\"+X+\".exe\"\n din = lambda X: \" -din \" + str(X)\n cout = lambda X: \" -cout \" + str(X) \n cin = lambda X: \" -cin \" + str(X)\n rlen = lambda X: \" -rlen \" + str(X)\n noc = lambda X: \" -noc \" + str(X)\n cfout = lambda X: \" -cfout \" + str(X) \n \n # Initialize LVQ with even codebooks per class\n check_call(cmd(\"eveninit\") + din(train) + cout(cb + \"e.cod\") + noc(numcbv) )\n \n # Balance codebooks. Optional.\n check_call(cmd(\"balance\") + din(train) + cin(cb + \"e.cod\") + cout(cb + \"b.cod\") )\n \n #Codebook Training\n check_call(cmd(\"olvq1\") + din(train) + cin(cb + \"b.cod\") + cout(cb + \"o.cod\") + rlen(runlen) )\n \n # Compute accuracy for training and testing set.\n check_call(cmd(\"accuracy\") + din(train) + cin(cb + \"o.cod\") + cfout(cb + \"_train.cfo\") )\n check_call(cmd(\"accuracy\") + din(test) + cin(cb + \"o.cod\") + cfout(cb + \"_test.cfo\") )\n \n #Optional. Slow.\n #call(cmd(\"sammon\") + cin(cb + \"o.cod\") + cout(cb + \"o.sam\") + rlen(runlen2) )",
"def __init__(self, data, tree_prior, config):\n assert isinstance(data, np.ndarray)\n data = np.asarray(data, np.float32)\n assert len(data.shape) == 2\n N, V = data.shape\n D = config['model_latent_dim']\n E = V - 1 # Number of edges in the tree.\n TreeTrainer.__init__(self, N, V, tree_prior, config)\n self._data = data\n self._latent = np.zeros([N, V, D], np.float32)\n\n # This is symmetric positive definite.\n self._vert_ss = np.zeros([V, D, D], np.float32)\n # This is arbitrary (not necessarily symmetric).\n self._edge_ss = np.zeros([E, D, D], np.float32)\n # This represents (count, mean, covariance).\n self._feat_ss = np.zeros([V, D, 1 + 1 + D], np.float32)",
"def train(self, trnM, trnL):\n print 'Training ...'\n self.clf.fit(trnM, trnL)",
"def trainSN(options, epoch, device):\n \"\"\"-------------------------------CONFIG----------------------------------\"\"\"\n parser = argparse.ArgumentParser(description=\"PyTorch Regression GAN\")\n parser = general_parser(parser)\n opt = specific_parser(\n parser=parser, run_folder=options.log_dir, mode='train', tot_epochs=30, pretrained_GAN=options.checkpoint_dir,\n GAN_epoch=epoch, acc_log_freq=options.acc_log_freq, loss_log_freq=options.loss_log_freq,\n batch_size_SN=options.batch_size_SN, images_log_freq=options.images_log_freq,\n data_dir_train=options.data_dir_train2, data_dir_test=options.data_dir_test2,\n experiment_name='SN'+str(epoch), sar_c=options.sar_c, optical_c=options.optical_c,\n save_model_freq=1000, res_block_N=options.res_block_N)\n\n opt = config_routine(opt)\n\n \"\"\"-----------------------------DATA LOADER--------------------------------\"\"\"\n train_dataset = EUSARDataset(os.path.join(options.data_dir_train2), True, False, options.sar_c, options.optical_c)\n train_dataset = get_subset(train_dataset, options.prc_test)\n train_dataset = DataLoader(train_dataset, batch_size=options.batch_size_SN, shuffle=True,\n num_workers=options.workers, pin_memory=True, drop_last=False)\n\n test_dataset = EUSARDataset(os.path.join(options.data_dir_test2), True, False, options.sar_c, options.optical_c)\n test_dataset = get_subset(test_dataset, options.prc_test, True)\n test_dataset = DataLoader(test_dataset, batch_size=options.batch_size_SN, shuffle=False,\n num_workers=options.workers, pin_memory=True, drop_last=False)\n\n \"\"\"--------------------------------TRAIN-----------------------------------\"\"\"\n # Init model\n model = SN(opt, device)\n\n # set up tensorboard logging\n writer = SummaryWriter(log_dir=os.path.join(opt.tb_dir))\n # Model Training\n model.train(train_dataset, test_dataset, writer)",
"def train_var(Dataset, model, criterion, epoch, optimizer, writer, device, args):\n\n # Create instances to accumulate losses etc.\n cl_losses = AverageMeter()\n kld_losses = AverageMeter()\n losses = AverageMeter()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n\n top1 = AverageMeter()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n\n # train\n for i, (inp, target) in enumerate(Dataset.train_loader):\n inp = inp.to(device)\n target = target.to(device)\n\n # measure data loading time\n data_time.update(time.time() - end)\n\n # compute model forward\n output_samples, mu, std = model(inp)\n\n # calculate loss\n cl_loss, kld_loss = criterion(output_samples, target, mu, std, device)\n\n # add the individual loss components together and weight the KL term.\n loss = cl_loss + args.var_beta * kld_loss\n\n # take mean to compute accuracy. Note if variational samples are 1 this only gets rid of a dummy dimension.\n output = torch.mean(output_samples, dim=0)\n\n # record precision/accuracy and losses\n prec1 = accuracy(output, target)[0]\n top1.update(prec1.item(), inp.size(0))\n\n losses.update((cl_loss + kld_loss).item(), inp.size(0))\n cl_losses.update(cl_loss.item(), inp.size(0))\n kld_losses.update(kld_loss.item(), inp.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # print progress\n if i % args.print_freq == 0:\n print('Training: [{0}][{1}/{2}]\\t' \n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Class Loss {cl_loss.val:.4f} ({cl_loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'KL {KLD_loss.val:.4f} ({KLD_loss.avg:.4f})'.format(\n epoch+1, i, len(Dataset.train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, cl_loss=cl_losses, top1=top1, KLD_loss=kld_losses))\n\n # TensorBoard summary logging\n writer.add_scalar('training/train_precision@1', top1.avg, epoch)\n writer.add_scalar('training/train_class_loss', cl_losses.avg, epoch)\n writer.add_scalar('training/train_average_loss', losses.avg, epoch)\n writer.add_scalar('training/train_KLD', kld_losses.avg, epoch)\n\n print(' * Train: Loss {loss.avg:.5f} Prec@1 {top1.avg:.3f}'.format(loss=losses, top1=top1))"
] | [
"0.5764085",
"0.5691527",
"0.5678574",
"0.56131625",
"0.56115055",
"0.55656296",
"0.55542564",
"0.54764485",
"0.5473188",
"0.54617053",
"0.545335",
"0.5446527",
"0.5414085",
"0.5401629",
"0.5398226",
"0.5394197",
"0.53737515",
"0.5329471",
"0.5315939",
"0.5309096",
"0.5297996",
"0.52975774",
"0.52961946",
"0.529001",
"0.5269909",
"0.52610075",
"0.5256068",
"0.52377146",
"0.5232319",
"0.5224541"
] | 0.57309794 | 1 |
Train a TreeCat ensemble model using subsampleannealed MCMC. The ensemble size is controlled by config['model_ensemble_size']. Let N be the number of data rows and V be the number of features. | def train_ensemble(table, tree_prior, config):
tasks = []
for sub_seed in range(config['model_ensemble_size']):
sub_config = config.copy()
sub_config['seed'] += sub_seed
tasks.append((table, tree_prior, sub_config))
return parallel_map(_train_model, tasks) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train_model(table, tree_prior, config):\n assert isinstance(table, Table)\n M = config['model_num_clusters']\n D = config['model_latent_dim']\n assert M >= 1\n assert D >= 0\n if D == 0:\n Trainer = TreeCatTrainer\n elif M == 1:\n Trainer = TreeGaussTrainer\n else:\n Trainer = TreeMogTrainer\n return Trainer(table, tree_prior, config).train()",
"def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()",
"def ETC_train(X, y, n_trees):\r\n start_time = time.time()\r\n print('\\n'+ '# '*10+'[Training] ExtraTreeClassifier Model (ETC):'+ ' #'*10)\r\n clf = ExtraTreesClassifier(n_estimators=n_trees).fit(X, y)\r\n print('> '*2+'Training time: %.4f seconds.'%(time.time()-start_time))\r\n return clf",
"def train(self, X, T, *args, **kwargs):\n\n assert len(self.neurons) > 0, \"Add neurons to ELM before training it\"\n X, T = self._checkdata(X, T)\n args = [a.upper() for a in args] # make all arguments upper case\n\n # kind of \"enumerators\", try to use only inside that script\n MODELSELECTION = None # V / CV / MCCV / LOO / None\n NEURONRANKING = None # HQ / OP / None\n CLASSIFICATION = None # c / mc / None\n ADAPTIVE = False # batch / None\n Xv = None\n Tv = None\n k = None\n batch = None\n\n # check exclusive parameters\n assert len(set(args).intersection(set([\"V\", \"CV\", \"MCCV\", \"LOO\"]))) <= 1, \"Use only one of V / CV / MCCV / LOO\"\n assert len(set(args).intersection(set([\"HQ\", \"OP\"]))) <= 1, \"Use only one of HQ / OP\"\n assert len(set(args).intersection(set([\"C\", \"MC\"]))) <= 1, \"Use only one of classification / multiclass (c / mc)\"\n\n # parse parameters\n for a in args:\n if a == \"V\": # validation set\n assert \"Xv\" in kwargs.keys(), \"Provide validation dataset (Xv)\"\n assert \"Tv\" in kwargs.keys(), \"Provide validation targets (Tv)\"\n Xv = kwargs['Xv']\n Tv = kwargs['Tv']\n Xv, Tv = self._checkdata(Xv, Tv)\n MODELSELECTION = \"V\"\n if a == \"CV\":\n assert \"k\" in kwargs.keys(), \"Provide Cross-Validation number of splits (k)\"\n k = kwargs['k']\n MODELSELECTION = \"CV\"\n if a == \"LOO\":\n MODELSELECTION = \"LOO\"\n if a == \"HQ\":\n NEURONRANKING = \"HQ\"\n if a == \"OP\":\n NEURONRANKING = \"OP\"\n if a in (\"C\", \"CL\", \"CLASSIFICATION\"):\n CLASSIFICATION = \"c\"\n if a in (\"MC\", \"MULTICLASS\"):\n CLASSIFICATION = \"mc\"\n if a in (\"A\", \"AD\", \"ADAPTIVE\"):\n assert \"batch\" in kwargs.keys(), \"Provide batch size for adaptive ELM model (batch)\"\n batch = kwargs['batch']\n ADAPTIVE = True\n\n if MODELSELECTION == \"V\":\n self._train_v(X, T, Xv, Tv)\n else:\n self.Beta = self._solve(self.project(X), T)",
"def individual_train(data_location, test, noise_variance, hidden_sizes, omega, activation_function, \\\n learned_noise_var, input_dim, noise_param_init, learning_rate, no_epochs, standard_normal_prior, \\\n minibatch_size, results_dir=None, split=None, early_stopping=False):\n ensemble = MLP_Ensemble(no_members, noise_variance, hidden_sizes, omega, activation=activation_function, learned_noise_var=learned_noise_var, input_dim=input_dim, noise_param_init=noise_param_init, standard_normal_prior=standard_normal_prior, random_prior=random_prior, learning_rate=learning_rate)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # get dataset\n with open(data_location, 'rb') as f:\n train_set, train_set_normalised, val_set_normalised, test_set, train_mean, train_sd = pickle.load(f)\n\n train_mean = torch.cuda.FloatTensor(train_mean)\n train_sd = torch.cuda.FloatTensor(train_sd)\n\n x_train_normalised = torch.cuda.FloatTensor(train_set_normalised[:,:-1])\n y_train_normalised = torch.cuda.FloatTensor(train_set_normalised[:,-1])\n\n x_val_normalised = torch.cuda.FloatTensor(val_set_normalised[:,:-1])\n y_val_normalised = torch.cuda.FloatTensor(val_set_normalised[:,-1])\n\n if test == True: # combine train and val sets\n x_train_normalised = torch.cat((x_train_normalised, x_val_normalised), 0)\n y_train_normalised = torch.cat((y_train_normalised, y_val_normalised), 0)\n\n x_test = torch.cuda.FloatTensor(test_set[:,:-1])\n y_test = torch.cuda.FloatTensor(test_set[:,-1])\n\n # train the model, and print out the validation set log likelihood when training\n\n if test == True: # this is test time, no early stopping needed\n train(ensemble, x_train_normalised, y_train_normalised, x_test, y_test, train_mean, train_sd, validation=False, minibatch_size=minibatch_size, no_epochs=no_epochs)\n ensemble_MSE, ensemble_LL = evaluate(ensemble, x_test, y_test, train_mean, train_sd, validation=False, directory=results_dir, name=str(split)) \n return ensemble_MSE, ensemble_LL\n\n else: # this is validation time, do early stopping for hyperparam search\n results_dict_list = train(ensemble, x_train_normalised, y_train_normalised, x_val_normalised, y_val_normalised, train_mean, train_sd, validation=True, minibatch_size=minibatch_size, no_epochs=no_epochs, early_stopping=True)\n return results_dict_list",
"def main(config_path):\n # load the config file\n cfg = AttrDict.from_json_path(config_path)\n\n # Make Outputs directories\n out_path = os.path.join(cfg.path.output, cfg.exp_name)\n out_path_selfsup = os.path.join(out_path, 'classification_pretrain/')\n out_path_sup = os.path.join(out_path, 'supervised_train/')\n os.makedirs(out_path_selfsup, exist_ok=True)\n for k in range(cfg.Sup.split.n_fold):\n os.makedirs(os.path.join(out_path_sup, f'Fold_{k+1}/pred/'), exist_ok=True)\n\n # Initialize random seed\n if cfg.seed != -1:\n random.seed(cfg.seed)\n np.random.seed(cfg.seed)\n torch.manual_seed(cfg.seed)\n torch.cuda.manual_seed(cfg.seed)\n torch.cuda.manual_seed_all(cfg.seed)\n torch.backends.cudnn.deterministic = True\n\n # Set number of thread\n if cfg.n_thread > 0: torch.set_num_threads(cfg.n_thread)\n # set device\n if cfg.device:\n cfg.device = torch.device(cfg.device)\n else:\n cfg.device = get_available_device()\n\n ###################################################\n # Self-supervised training on Context Restoration #\n ###################################################\n # Initialize Logger\n logger = initialize_logger(os.path.join(out_path_selfsup, 'log.txt'))\n if os.path.exists(os.path.join(out_path_selfsup, f'checkpoint.pt')):\n logger.info('\\n' + '#'*30 + f'\\n Recovering Session \\n' + '#'*30)\n logger.info(f\"Experiment : {cfg.exp_name}\")\n\n # Load RSNA data csv\n df_rsna = pd.read_csv(os.path.join(cfg.path.data.SSL, 'slice_info.csv'), index_col=0)\n\n # Keep only fractions sample\n if cfg.SSL.dataset.n_data_0 >= 0:\n df_rsna_noICH = df_rsna[df_rsna.Hemorrhage == 0].sample(n=cfg.SSL.dataset.n_data_0, random_state=cfg.seed)\n else:\n df_rsna_noICH = df_rsna[df_rsna.Hemorrhage == 0]\n if cfg.SSL.dataset.n_data_1 >= 0:\n df_rsna_ICH = df_rsna[df_rsna.Hemorrhage == 1].sample(n=cfg.SSL.dataset.n_data_1, random_state=cfg.seed)\n else:\n df_rsna_ICH = df_rsna[df_rsna.Hemorrhage == 1]\n df_rsna = pd.concat([df_rsna_ICH, df_rsna_noICH], axis=0)\n\n # Split data to keep few for evaluation in a strafied way\n train_df, test_df = train_test_split(df_rsna, test_size=cfg.SSL.dataset.frac_eval, stratify=df_rsna.Hemorrhage, random_state=cfg.seed)\n logger.info('\\n' + str(get_split_summary_table(df_rsna, train_df, test_df)))\n\n # Make dataset : Train --> BinaryClassification, Test --> BinaryClassification\n train_RSNA_dataset = RSNA_dataset(train_df, cfg.path.data.SSL,\n augmentation_transform=[getattr(tf, tf_name)(**tf_kwargs) for tf_name, tf_kwargs in cfg.SSL.dataset.augmentation.train.items()],\n window=(cfg.data.win_center, cfg.data.win_width), output_size=cfg.data.size,\n mode='binary_classification')\n test_RSNA_dataset = RSNA_dataset(test_df, cfg.path.data.SSL,\n augmentation_transform=[getattr(tf, tf_name)(**tf_kwargs) for tf_name, tf_kwargs in cfg.SSL.dataset.augmentation.eval.items()],\n window=(cfg.data.win_center, cfg.data.win_width), output_size=cfg.data.size,\n mode='binary_classification')\n\n logger.info(f\"Data will be loaded from {cfg.path.data.SSL}.\")\n logger.info(f\"CT scans will be windowed on [{cfg.data.win_center-cfg.data.win_width/2} ; {cfg.data.win_center + cfg.data.win_width/2}]\")\n logger.info(f\"CT scans will be resized to {cfg.data.size}x{cfg.data.size}\")\n logger.info(f\"Training online data transformation: \\n\\n {str(train_RSNA_dataset.transform)}\\n\")\n logger.info(f\"Evaluation online data transformation: \\n\\n {str(test_RSNA_dataset.transform)}\\n\")\n\n # Make U-Net-Encoder architecture\n net_ssl = UNet_Encoder(**cfg.SSL.net).to(cfg.device)\n net_params = [f\"--> {k} : {v}\" for k, v in cfg.SSL.net.items()]\n logger.info(\"UNet like Binary Classifier \\n\\t\" + \"\\n\\t\".join(net_params))\n logger.info(f\"The Binary Classifier has {sum(p.numel() for p in net_ssl.parameters())} parameters.\")\n\n # Make Model\n cfg.SSL.train.model_param.lr_scheduler = getattr(torch.optim.lr_scheduler, cfg.SSL.train.model_param.lr_scheduler) # convert scheduler name to scheduler class object\n cfg.SSL.train.model_param.loss_fn = getattr(torch.nn, cfg.SSL.train.model_param.loss_fn) # convert loss_fn name to nn.Module class object\n w_ICH = train_df.Hemorrhage.sum() / len(train_df) # define CE weighting from train dataset\n cfg.SSL.train.model_param.loss_fn_kwargs['weight'] = torch.tensor([1 - w_ICH, w_ICH], device=cfg.device).float() # add weighting to CE kwargs\n\n classifier = BinaryClassifier(net_ssl, device=cfg.device, print_progress=cfg.print_progress, **cfg.SSL.train.model_param)\n\n train_params = [f\"--> {k} : {v}\" for k, v in cfg.SSL.train.model_param.items()]\n logger.info(\"Classifer Training Parameters \\n\\t\" + \"\\n\\t\".join(train_params))\n\n # Load weights if specified\n if cfg.SSL.train.model_path_to_load:\n model_path = cfg.SSL.train.model_path_to_load\n classifier.load_model(model_path, map_location=cfg.device)\n logger.info(f\"Classifer Model succesfully loaded from {cfg.SSL.train.model_path_to_load}\")\n\n # train if needed\n if cfg.SSL.train.model_param.n_epoch > 0:\n classifier.train(train_RSNA_dataset, valid_dataset=test_RSNA_dataset,\n checkpoint_path=os.path.join(out_path_selfsup, f'checkpoint.pt'))\n\n # evaluate\n auc, acc, recall, precision, f1 = classifier.evaluate(test_RSNA_dataset, save_tsne=False, return_scores=True)\n logger.info(f\"Classifier Test AUC : {auc:.2%}\")\n logger.info(f\"Classifier Test Accuracy : {acc:.2%}\")\n logger.info(f\"Classifier Test Recall : {recall:.2%}\")\n logger.info(f\"Classifier Test Precision : {precision:.2%}\")\n logger.info(f\"Classifier Test F1-score : {f1:.2%}\")\n\n # save model, outputs\n classifier.save_model(os.path.join(out_path_selfsup, 'pretrained_unet_enc.pt'))\n logger.info(\"Pre-trained U-Net encoder on binary classification saved at \" + os.path.join(out_path_selfsup, 'pretrained_unet_enc.pt'))\n classifier.save_outputs(os.path.join(out_path_selfsup, 'outputs.json'))\n logger.info(\"Classifier outputs saved at \" + os.path.join(out_path_selfsup, 'outputs.json'))\n test_df.reset_index(drop=True).to_csv(os.path.join(out_path_selfsup, 'eval_data_info.csv'))\n logger.info(\"Evaluation data info saved at \" + os.path.join(out_path_selfsup, 'eval_data_info.csv'))\n\n # delete any checkpoints\n if os.path.exists(os.path.join(out_path_selfsup, f'checkpoint.pt')):\n os.remove(os.path.join(out_path_selfsup, f'checkpoint.pt'))\n logger.info('Checkpoint deleted.')\n\n # get weights state dictionnary\n pretrained_unet_weights = classifier.get_state_dict()\n\n ###################################################################\n # Supervised fine-training of U-Net with K-Fold Cross-Validation #\n ###################################################################\n # load annotated data csv\n data_info_df = pd.read_csv(os.path.join(cfg.path.data.Sup, 'ct_info.csv'), index_col=0)\n patient_df = pd.read_csv(os.path.join(cfg.path.data.Sup, 'patient_info.csv'), index_col=0)\n\n # Make K-Fold spolit at patient level\n skf = StratifiedKFold(n_splits=cfg.Sup.split.n_fold, shuffle=cfg.Sup.split.shuffle, random_state=cfg.seed)\n\n # define scheduler and loss_fn as object\n cfg.Sup.train.model_param.lr_scheduler = getattr(torch.optim.lr_scheduler, cfg.Sup.train.model_param.lr_scheduler) # convert scheduler name to scheduler class object\n cfg.Sup.train.model_param.loss_fn = getattr(src.models.optim.LossFunctions, cfg.Sup.train.model_param.loss_fn) # convert loss_fn name to nn.Module class object\n\n # iterate over folds\n for k, (train_idx, test_idx) in enumerate(skf.split(patient_df.PatientNumber, patient_df.Hemorrhage)):\n # check if fold's results already exists\n if not os.path.exists(os.path.join(out_path_sup, f'Fold_{k+1}/outputs.json')):\n # initialize logger\n logger = initialize_logger(os.path.join(out_path_sup, f'Fold_{k+1}/log.txt'))\n if os.path.exists(os.path.join(out_path_sup, f'Fold_{k+1}/checkpoint.pt')):\n logger.info('\\n' + '#'*30 + f'\\n Recovering Session \\n' + '#'*30)\n logger.info(f\"Experiment : {cfg['exp_name']}\")\n logger.info(f\"Cross-Validation fold {k+1:02}/{cfg['Sup']['split']['n_fold']:02}\")\n\n # extract train/test slice dataframe\n train_df = data_info_df[data_info_df.PatientNumber.isin(patient_df.loc[train_idx,'PatientNumber'].values)]\n test_df = data_info_df[data_info_df.PatientNumber.isin(patient_df.loc[test_idx,'PatientNumber'].values)]\n # samples train dataframe to adjuste negative/positive fractions\n n_remove = int(max(0, len(train_df[train_df.Hemorrhage == 0]) - cfg.Sup.dataset.frac_negative * len(train_df[train_df.Hemorrhage == 1])))\n df_remove = train_df[train_df.Hemorrhage == 0].sample(n=n_remove, random_state=cfg.seed)\n train_df = train_df[~train_df.index.isin(df_remove.index)]\n logger.info('\\n' + str(get_split_summary_table(data_info_df, train_df, test_df)))\n\n # Make datasets\n train_dataset = public_SegICH_Dataset2D(train_df, cfg.path.data.Sup,\n augmentation_transform=[getattr(tf, tf_name)(**tf_kwargs) for tf_name, tf_kwargs in cfg.Sup.dataset.augmentation.train.items()],\n window=(cfg.data.win_center, cfg.data.win_width), output_size=cfg.data.size)\n test_dataset = public_SegICH_Dataset2D(test_df, cfg.path.data.Sup,\n augmentation_transform=[getattr(tf, tf_name)(**tf_kwargs) for tf_name, tf_kwargs in cfg.Sup.dataset.augmentation.eval.items()],\n window=(cfg.data.win_center, cfg.data.win_width), output_size=cfg.data.size)\n logger.info(f\"Data will be loaded from {cfg.path.data.Sup}.\")\n logger.info(f\"CT scans will be windowed on [{cfg.data.win_center-cfg.data.win_width/2} ; {cfg.data.win_center + cfg.data.win_width/2}]\")\n logger.info(f\"CT scans will be resized to {cfg.data.size}x{cfg.data.size}\")\n logger.info(f\"Training online data transformation: \\n\\n {str(train_dataset.transform)}\\n\")\n logger.info(f\"Evaluation online data transformation: \\n\\n {str(test_dataset.transform)}\\n\")\n\n # Make U-Net architecture\n unet_sup = UNet(**cfg.Sup.net).to(cfg.device)\n net_params = [f\"--> {k} : {v}\" for k, v in cfg.Sup.net.items()]\n logger.info(\"UNet-2D params \\n\\t\" + \"\\n\\t\".join(net_params))\n logger.info(f\"The U-Net2D has {sum(p.numel() for p in unet_sup.parameters())} parameters.\")\n\n # Make Model\n unet2D = UNet2D(unet_sup, device=cfg.device, print_progress=cfg.print_progress, **cfg.Sup.train.model_param)\n\n train_params = [f\"--> {k} : {v}\" for k, v in cfg.Sup.train.model_param.items()]\n logger.info(\"UNet-2D Training Parameters \\n\\t\" + \"\\n\\t\".join(train_params))\n\n # ????? load model if specified ?????\n\n # transfer weights learn with context restoration\n logger.info('Initialize U-Net2D with weights learned with context_restoration on RSNA.')\n unet2D.transfer_weights(pretrained_unet_weights, verbose=True)\n\n # Train U-net\n eval_dataset = test_dataset if cfg.Sup.train.validate_epoch else None\n unet2D.train(train_dataset, valid_dataset=eval_dataset, checkpoint_path=os.path.join(out_path_sup, f'Fold_{k+1}/checkpoint.pt'))\n\n # Evaluate U-Net\n unet2D.evaluate(test_dataset, save_path=os.path.join(out_path_sup, f'Fold_{k+1}/pred/'))\n\n # Save models and outputs\n unet2D.save_model(os.path.join(out_path_sup, f'Fold_{k+1}/trained_unet.pt'))\n logger.info(\"Trained U-Net saved at \" + os.path.join(out_path_sup, f'Fold_{k+1}/trained_unet.pt'))\n unet2D.save_outputs(os.path.join(out_path_sup, f'Fold_{k+1}/outputs.json'))\n logger.info(\"Trained statistics saved at \" + os.path.join(out_path_sup, f'Fold_{k+1}/outputs.json'))\n\n # delete checkpoint if exists\n if os.path.exists(os.path.join(out_path_sup, f'Fold_{k+1}/checkpoint.pt')):\n os.remove(os.path.join(out_path_sup, f'Fold_{k+1}/checkpoint.pt'))\n logger.info('Checkpoint deleted.')\n\n # save mean +/- 1.96 std Dice over Folds\n save_mean_fold_dice(out_path_sup, cfg.Sup.split.n_fold)\n logger.info('Average Scores saved at ' + os.path.join(out_path_sup, 'average_scores.txt'))\n\n # Save all volumes prediction csv\n df_list = [pd.read_csv(os.path.join(out_path_sup, f'Fold_{i+1}/pred/volume_prediction_scores.csv')) for i in range(cfg.Sup.split.n_fold)]\n all_df = pd.concat(df_list, axis=0).reset_index(drop=True)\n all_df.to_csv(os.path.join(out_path_sup, 'all_volume_prediction.csv'))\n logger.info('CSV of all volumes prediction saved at ' + os.path.join(out_path_sup, 'all_volume_prediction.csv'))\n\n # Save config file\n cfg.device = str(cfg.device)\n cfg.SSL.train.model_param.lr_scheduler = str(cfg.SSL.train.model_param.lr_scheduler)\n cfg.Sup.train.model_param.lr_scheduler = str(cfg.Sup.train.model_param.lr_scheduler)\n cfg.SSL.train.model_param.loss_fn = str(cfg.SSL.train.model_param.loss_fn)\n cfg.Sup.train.model_param.loss_fn = str(cfg.Sup.train.model_param.loss_fn)\n cfg.SSL.train.model_param.loss_fn_kwargs.weight = cfg.SSL.train.model_param.loss_fn_kwargs.weight.cpu().data.tolist()\n with open(os.path.join(out_path, 'config.json'), 'w') as fp:\n json.dump(cfg, fp)\n logger.info('Config file saved at ' + os.path.join(out_path, 'config.json'))\n\n # Analyse results\n analyse_supervised_exp(out_path_sup, cfg.path.data.Sup, n_fold=cfg.Sup.split.n_fold,\n config_folder=out_path, save_fn=os.path.join(out_path, 'results_supervised_overview.pdf'))\n logger.info('Results overview figure saved at ' + os.path.join(out_path, 'results_supervised_overview.pdf'))\n analyse_representation_exp(out_path_selfsup, save_fn=os.path.join(out_path, 'results_self-supervised_overview.pdf'))\n logger.info('Results overview figure saved at ' + os.path.join(out_path, 'results_self-supervised_overview.pdf'))",
"def ensemble_001():\n n_centroids = 3000\n s = 15\n crop = 150\n n_patches = 400000\n rf_size = 5\n\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_ensemble_001',\n n_iterations=20,\n n_jobs=-1,)\n\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n images = train_x_crop_scale.transform()\n patches = patch_extractor.transform(images)\n\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n X = kmeans_generator.transform(images, save_to_file='data/data_ensemble_001.npy', memmap=True)\n Y = classes.train_solutions.data\n\n # Unload some objects\n del images\n gc.collect()\n\n # Get the input for the RF so that we can split together\n sampler = SampleTransformer(training=True, steps=2, step_size=20, n_jobs=-1)\n pX = sampler.transform()\n\n # manual split of train and test\n train_x, test_x, ptrain_x, ptest_x, train_y, test_y = train_test_split(X, pX, Y, test_size=0.5)\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 500}, n_jobs=-1)\n wrapper.fit(train_x, train_y)\n kmeans_preds = wrapper.predict(test_x)\n\n pWrapper = ModelWrapper(RandomForestRegressor, {'n_estimators': 500, 'verbose': 3}, n_jobs=-1)\n pWrapper.fit(ptrain_x, train_y)\n pixel_preds = pWrapper.predict(ptest_x)\n\n logger.info('Kmeans')\n classes.colwise_rmse(kmeans_preds, test_y)\n classes.rmse(kmeans_preds, test_y)\n logger.info('Pixel RF')\n classes.colwise_rmse(pixel_preds, test_y)\n classes.rmse(pixel_preds, test_y)\n\n logger.info(\"Ensembling predictions\")\n etrain_x = np.hstack((wrapper.predict(train_x), pWrapper.predict(ptrain_x)))\n etest_x = np.hstack((kmeans_preds, pixel_preds))\n eWrapper = ModelWrapper(RandomForestRegressor, {'n_estimators': 500, 'verbose': 3}, n_jobs=-1)\n eWrapper.fit(etrain_x, train_y)\n ensemble_preds = eWrapper.predict(etest_x)\n classes.colwise_rmse(ensemble_preds, test_y)\n classes.rmse(ensemble_preds, test_y)",
"def retrain_sub_model(self):\r\n \r\n self.sub_model = self.load_weights_to_sub_model()\r\n X = np.array(self.conv4_characters_list)\r\n X = np.reshape(X, (X.shape[0]*X.shape[1], X.shape[2]))\r\n y = np.repeat(np.arange(1283), 9)\r\n \r\n opt = optimizers.Adam(lr=0.001)\r\n self.sub_model.compile(optimizer=opt,loss='sparse_categorical_crossentropy',metrics=['accuracy'])\r\n print(\"***Start to creat new decision model***\")\r\n self.sub_model.fit(X, y, epochs=20)\r\n print(\"***Finish***\")",
"def sample_estimator(model, num_classes, train_loader):\n import sklearn.covariance\n\n model.eval()\n with torch.no_grad():\n group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n correct, total = 0, 0\n num_output = 1\n num_sample_per_class = np.empty(num_classes)\n num_sample_per_class.fill(0)\n list_features = []\n for i in range(num_output):\n temp_list = []\n for j in range(num_classes):\n temp_list.append(0)\n list_features.append(temp_list)\n\n for data, target in train_loader:\n total += data.size(0)\n data = data.cuda()\n data = Variable(data)\n output = model(data)['logits']\n # output, out_features = out['logits'], out['logits']\n\n out_features = output.view(output.size(0), output.size(1), -1)\n out_features = torch.mean(out_features, dim=2)\n\n # compute the accuracy\n pred = output.data.max(1)[1]\n equal_flag = pred.eq(target.cuda()).cpu()\n correct += equal_flag.sum()\n\n # construct the sample matrix\n for i in range(data.size(0)):\n label = target[i]\n out_count = 0\n if num_sample_per_class[label] == 0:\n list_features[out_count][label] = out_features[i].view(1, -1)\n else:\n list_features[out_count][label] \\\n = torch.cat((list_features[out_count][label], out_features[i].view(1, -1)), 0)\n num_sample_per_class[label] += 1\n\n sample_class_mean = []\n out_count = 0\n num_feature = num_classes\n temp_list = torch.Tensor(num_classes, num_feature).cuda()\n for j in range(num_classes):\n temp_list[j] = torch.mean(list_features[out_count][j], dim=0)\n sample_class_mean.append(temp_list)\n\n precision = []\n for k in range(num_output):\n X = 0\n for i in range(num_classes):\n if i == 0:\n X = list_features[k][i] - sample_class_mean[k][i]\n else:\n X = torch.cat((X, list_features[k][i] - sample_class_mean[k][i]), dim=0)\n\n # find inverse\n group_lasso.fit(X.cpu().numpy())\n temp_precision = group_lasso.precision_\n temp_precision = torch.from_numpy(temp_precision).float().cuda()\n precision.append(temp_precision)\n\n print('\\n Training Accuracy:({:.2f}%)\\n'.format(100. * correct / total))\n\n return sample_class_mean, precision",
"def train():\n init_distributed_mode(args)\n save_dir = TRAIN_CFG['save_dir']\n if not os.path.exists(save_dir) and torch.distributed.get_rank() == 0:\n os.mkdir(save_dir)\n kwargs = {}\n # If augmenting data, disable Pytorch's own augmentataion\n # This has to be done manually as augmentation is embedded\n # refer : https://github.com/pytorch/vision/issues/2263\n base_path = DATASET_CFG['base_path']\n train_set = DATASET_CFG['train']\n valid_set = DATASET_CFG['valid']\n dset_mean_std = DATASET_CFG['mean_std']\n if dset_mean_std is not None:\n dataset_mean = [i/255. for i in dset_mean_std[0]]\n dataset_std = [i/255. for i in dset_mean_std[1]]\n else:\n dataset_mean, dataset_std = compute_mean_std(base_path, train_set)\n kwargs['image_mean'] = dataset_mean\n kwargs['image_std'] = dataset_std\n kwargs['min_size'] = DATASET_CFG['min_size']\n kwargs['max_size'] = DATASET_CFG['max_size']\n kwargs['box_detections_per_img'] = 300 # increase max det to max val in our benchmark\n\n # Set benchmark related parameters\n if benchmark == 'ScutHead':\n combined_cfg = {**cfg, **sh_anchors}\n elif benchmark == 'CrowdHuman':\n combined_cfg = {**cfg, **ch_anchors}\n elif benchmark == 'Combined':\n combined_cfg = {**cfg, **combined_anchors}\n else:\n raise ValueError(\"New dataset has to be registered\")\n\n # Create Model\n default_filter = False\n model = customRCNN(cfg=combined_cfg,\n use_deform=NET_CFG['use_deform'],\n ohem=NET_CFG['ohem'],\n context=NET_CFG['context'],\n custom_sampling=NET_CFG['custom_sampling'],\n default_filter=default_filter,\n soft_nms=NET_CFG['soft_nms'],\n upscale_rpn=NET_CFG['upscale_rpn'],\n median_anchors=NET_CFG['median_anchors'],\n **kwargs).cuda() \n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu],\n find_unused_parameters=True)\n model_without_ddp = model.module\n\n # Create Optimizer\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(params, lr=HYP_CFG['learning_rate'],\n momentum=HYP_CFG['learning_rate'],\n weight_decay=HYP_CFG['weight_decay'])\n\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=TRAIN_CFG['milestones'],\n gamma=HYP_CFG['gamma'])\n # Restore from checkpoint\n pt_model = TRAIN_CFG['pretrained_model']\n if pt_model:\n model_without_ddp = restore_network(model_without_ddp, pt_model,\n only_backbone=TRAIN_CFG['only_backbone'])\n \n # Create training and vaid dataset\n dataset_param = {'mean': dataset_mean, 'std':dataset_std,\n 'shape':(kwargs['min_size'], kwargs['max_size'])}\n batch_size = HYP_CFG['batch_size']\n train_dataset = HeadDataset(train_set,\n base_path,\n dataset_param,\n train=True)\n val_dataset = HeadDataset(valid_set,\n base_path,\n dataset_param,\n train=False)\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_batch_sampler = torch.utils.data.BatchSampler(train_sampler,\n batch_size,\n drop_last=True)\n train_data_loader = torch.utils.data.DataLoader(train_dataset,\n batch_sampler=train_batch_sampler,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)\n val_batch_sampler = torch.utils.data.BatchSampler(val_sampler,\n batch_size,\n drop_last=True)\n val_data_loader = torch.utils.data.DataLoader(val_dataset,\n batch_sampler=val_batch_sampler,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n # Fastforward the LR decayer\n start_epoch = TRAIN_CFG['start_epoch']\n max_epoch = TRAIN_CFG['max_epoch']\n for _ in range(0, -1):\n scheduler.step()\n\n # Start training\n print(\"======= Training for \" + str(max_epoch) + \"===========\")\n for epoch in range(start_epoch, int(max_epoch) + 1):\n if epoch % TRAIN_CFG['eval_every'] == 0:\n print(\"========= Evaluating Model ==========\")\n result_dict = evaluate(model, val_data_loader, benchmark=benchmark)\n if torch.distributed.get_rank() == 0:\n logging.info('Eval score at {0} epoch is {1}'.format(str(epoch),\n result_dict))\n \n train_one_epoch(model, optimizer, train_data_loader,\n device, epoch, print_freq=1000)\n scheduler.step()\n if torch.distributed.get_rank() == 0:\n print(\"Saving model\")\n torch.save(model.state_dict(), osp.join(save_dir,\n TRAIN_CFG['exp_name'] + '_epoch_' + str(epoch) + '.pth'))",
"def train(self,training_file,rare_thresh=100,clf_params=None,model_path=None,chosen_feats=None,tune_mode=None,size=None,as_text=False,multitrain=True,chosen_clf=None):\n\n\t\tif tune_mode is not None and size is None and tune_mode != \"hyperopt\":\n\t\t\tsize = 5000\n\t\t\tsys.stderr.write(\"o No sample size set - setting size to 5000\\n\")\n\n\t\tif not as_text:\n\t\t\ttrain = io.open(training_file,encoding=\"utf8\").read().strip().replace(\"\\r\",\"\") + \"\\n\"\n\t\telse:\n\t\t\ttrain = training_file\n\n\t\tif size is not None:\n\t\t\ttrain = shuffle_cut_conllu(train,size)\n\t\t#tagged = udpipe_tag(train,self.udpipe_model)\n\t\ttagged = tt_tag(train,self.lang,preserve_sent=True)\n\n\t\tif model_path is None: # Try default model location\n\t\t\tmodel_path = script_dir + os.sep + \"models\" + os.sep + self.corpus + \"_ensemble_sent.pkl\"\n\n\t\tif clf_params is None:\n\t\t\t# Default classifier parameters\n\t\t\t#clf_params = {\"n_estimators\":125,\"min_samples_leaf\":1, \"max_depth\":15, \"max_features\":None, \"n_jobs\":4, \"random_state\":42, \"oob_score\":True, \"bootstrap\":True}\n\t\t\tclf_params = {\"n_estimators\":100,\"min_samples_leaf\":1, \"min_samples_split\":5, \"max_depth\":10, \"max_features\":None, \"n_jobs\":4, \"random_state\":42, \"oob_score\":True, \"bootstrap\":True}\n\n\t\tif chosen_clf is None:\n\t\t\tchosen_clf = RandomForestClassifier(n_jobs=4,oob_score=True, bootstrap=True)\n\t\t\tchosen_clf.set_params(**clf_params)\n\n\t\tcat_labels = [\"word\",\"first\",\"last\",\"genre\",\"pos\",\"cpos\"]\n\t\tnum_labels = [\"tok_len\",\"tok_id\"]\n\n\t\ttrain_feats, vocab, toks, firsts, lasts = read_conll(tagged,genre_pat=self.genre_pat,mode=\"sent\",as_text=True,char_bytes=self.lang==\"zho\")\n\t\tgold_feats, _, _, _, _ = read_conll(train,mode=\"sent\",as_text=True)\n\t\tgold_feats = [{\"wid\":0}] + gold_feats + [{\"wid\":0}] # Add dummies to gold\n\n\t\t# Ensure that \"_\" is in the possible values of first/last for OOV chars at test time\n\t\toov_item = train_feats[-1]\n\t\toov_item[\"first\"] = \"_\"\n\t\toov_item[\"last\"] = \"_\"\n\t\toov_item[\"lemma\"] = \"_\"\n\t\toov_item[\"word\"] = \"_\"\n\t\toov_item[\"pos\"] = \"_\"\n\t\toov_item[\"cpos\"] = \"_\"\n\t\toov_item[\"genre\"] = \"_\"\n\t\ttrain_feats.append(oov_item)\n\t\ttrain_feats = [oov_item] + train_feats\n\t\ttoks.append(\"_\")\n\t\ttoks = [\"_\"] + toks\n\n\t\tvocab = Counter(vocab)\n\t\ttop_n_words = vocab.most_common(rare_thresh)\n\t\ttop_n_words, _ = zip(*top_n_words)\n\n\t\theaders = sorted(list(train_feats[0].keys()))\n\t\tdata = []\n\n\t\tpreds = {}\n\n\t\tfor e in self.estimators:\n\t\t\tif multitrain and e.name in [\"LRSentencer\",\"DNNSentencer\"]:\n\t\t\t\tpred = e.predict_cached(tagged)\n\t\t\telse:\n\t\t\t\tpred = e.predict(tagged)\n\t\t\t_, preds[e.name + \"_prob\"] = [list(x) for x in zip(*pred)]\n\t\t\tpreds[e.name + \"_prob\"] = [0.0] + preds[e.name + \"_prob\"] + [0.0] # Add dummy wrap for items -1 and +1\n\t\t\theaders.append(e.name + \"_prob\")\n\t\t\tnum_labels.append(e.name + \"_prob\")\n\n\t\tfor i, item in enumerate(train_feats):\n\t\t\tif item[\"word\"] not in top_n_words:\n\t\t\t\titem[\"word\"] = item[\"pos\"]\n\t\t\tfor e in self.estimators:\n\t\t\t\titem[e.name + \"_prob\"] = preds[e.name + \"_prob\"][i]\n\n\t\t\tfeats = []\n\t\t\tfor k in headers:\n\t\t\t\tfeats.append(item[k])\n\n\t\t\tdata.append(feats)\n\n\t\tdata, headers, cat_labels, num_labels = self.n_gram(data, headers, cat_labels, num_labels)\n\t\t# No need for n_gram feats for the following:\n\t\tif \"NLTKSentencer_prob_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"NLTKSentencer_prob_min1\")\n\t\t\tnum_labels.remove(\"NLTKSentencer_prob_pls1\")\n\t\tif \"UDPipeSentencer_prob_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"UDPipeSentencer_prob_min1\")\n\t\t\tnum_labels.remove(\"UDPipeSentencer_prob_pls1\")\n\t\tif \"LRSentencer_prob_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"LRSentencer_prob_min1\")\n\t\t\tnum_labels.remove(\"LRSentencer_prob_pls1\")\n\t\tif \"RuleBasedSplitter_prob_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"RuleBasedSplitter_prob_min1\")\n\t\t\tnum_labels.remove(\"RuleBasedSplitter_prob_pls1\")\n\t\tif \"DNNSentencer_prob_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"DNNSentencer_prob_min1\")\n\t\t\tnum_labels.remove(\"DNNSentencer_prob_pls1\")\n\t\tif \"tok_id_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"tok_id_min1\")\n\t\t\tnum_labels.remove(\"tok_id_pls1\")\n\t\tif \"genre_min1\" in cat_labels:\n\t\t\tcat_labels.remove(\"genre_min1\")\n\t\t\tcat_labels.remove(\"genre_pls1\")\n\n\t\t# Use specific feature subset\n\t\tif chosen_feats is not None:\n\t\t\tnew_cat = []\n\t\t\tnew_num = []\n\t\t\tfor feat in chosen_feats:\n\t\t\t\tif feat in cat_labels:\n\t\t\t\t\tnew_cat.append(feat)\n\t\t\t\telif feat in num_labels:\n\t\t\t\t\tnew_num.append(feat)\n\t\t\tcat_labels = new_cat\n\t\t\tnum_labels = new_num\n\n\t\tdata = pd.DataFrame(data, columns=headers)\n\t\tdata_encoded, multicol_dict = self.multicol_fit_transform(data, pd.Index(cat_labels))\n\n\t\tdata_x = data_encoded[cat_labels+num_labels].values\n\t\tdata_y = [int(t['wid'] == 1) for t in gold_feats]\n\n\t\tsys.stderr.write(\"o Learning...\\n\")\n\n\t\tif tune_mode is not None:\n\t\t\t# Randomize samples for training\n\t\t\tdata_x = data_encoded[cat_labels+num_labels+[\"label\"]].sample(frac=1,random_state=42)\n\t\t\tdata_y = np.where(data_x['label'] == \"_\", 0, 1)\n\t\t\tdata_x = data_x[cat_labels+num_labels]\n\n\t\t\t# Reserve 10% for validation\n\t\t\tval_x = data_x[int(len(data_y)/9):]\n\t\t\tval_y = data_y[int(len(data_y)/9):]\n\t\t\tdata_x = data_x[:int(len(data_y)/9)]\n\t\t\tdata_y = data_y[:int(len(data_y)/9)]\n\n\t\tif tune_mode == \"importances\":\n\t\t\tsys.stderr.write(\"o Measuring correlation of categorical variables\\n\")\n\t\t\ttheil_implications = report_theils_u(val_x,cat_labels)\n\t\t\tfor (var1, var2) in theil_implications:\n\t\t\t\tif var1 in cat_labels and var2 in cat_labels:\n\t\t\t\t\tdrop_var = var2\n\t\t\t\t\tu = theil_implications[(var1, var2)]\n\t\t\t\t\tsys.stderr.write(\"o Removed feature \" + drop_var + \" due to Theil's U \" + str(u)[:6] + \" of \" + var1 + \"->\" + var2 + \"\\n\")\n\t\t\t\t\tcat_labels.remove(drop_var)\n\n\t\t\tsys.stderr.write(\"o Measuring correlation of numerical variables\\n\")\n\t\t\tcor_mat = report_correlations(val_x[num_labels],thresh=0.95)\n\t\t\tfor (var1, var2) in cor_mat:\n\t\t\t\tif var1 in num_labels and var2 in num_labels:\n\t\t\t\t\tdrop_var = var2\n\t\t\t\t\tcorr_level = cor_mat[(var1, var2)]\n\t\t\t\t\tsys.stderr.write(\"o Removed feature \" + drop_var + \" due to correlation \" + str(corr_level) + \" of \" + var1 + \":\" + var2 + \"\\n\")\n\t\t\t\t\tnum_labels.remove(drop_var)\n\n\t\t\treturn cat_labels, num_labels\n\n\t\tif tune_mode in [\"paramwise\",\"full\"]:\n\t\t\tbest_params = {}\n\t\t\t# Tune individual params separately for speed, or do complete grid search if building final model\n\t\t\tparams_list = [{\"n_estimators\":[100,125,150]},\n\t\t\t\t\t\t {'max_depth': [10,15,20,None]},\n\t\t\t\t\t\t {\"min_samples_split\": [5, 10, 15]},\n\t\t\t\t\t\t {\"min_samples_leaf\":[1,2,3]},\n\t\t\t\t\t\t {\"max_features\":[None,\"sqrt\",\"log2\"]}]\n\t\t\tif tune_mode == \"full\":\n\t\t\t\t# Flatten dictionary if doing full CV\n\t\t\t\tparams_list = [{k: v for d in params_list for k, v in d.items()}]\n\t\t\tfor params in params_list:\n\t\t\t\tbase_params = copy.deepcopy(clf_params) # Copy default params\n\t\t\t\tfor p in params:\n\t\t\t\t\tif p in base_params: # Ensure base_params don't conflict with grid search params\n\t\t\t\t\t\tbase_params.pop(p)\n\t\t\t\tgrid = GridSearchCV(RandomForestClassifier(**base_params),params,cv=3,n_jobs=4,error_score=\"raise\",refit=False)\n\t\t\t\tgrid.fit(data_x,data_y)\n\t\t\t\tfor param in params:\n\t\t\t\t\tbest_params[param] = grid.best_params_[param]\n\t\t\twith io.open(\"best_params.tab\",'a',encoding=\"utf8\") as bp:\n\t\t\t\tcorpus = os.path.basename(training_file).split(\"_\")[0]\n\t\t\t\tbest_clf = RandomForestClassifier(**best_params)\n\t\t\t\tclf_name = best_clf.__class__.__name__\n\t\t\t\tfor k, v in best_params.items():\n\t\t\t\t\tbp.write(\"\\t\".join([corpus, clf_name, k, str(v)]))\n\t\t\t\tbp.write(\"\\n\")\n\t\t\treturn best_clf, best_params\n\t\telif tune_mode == \"hyperopt\":\n\t\t\tfrom hyperopt import hp\n\t\t\tfrom hyperopt.pyll.base import scope\n\t\t\tspace = {\n\t\t\t\t'n_estimators': scope.int(hp.quniform('n_estimators', 50, 150, 10)),\n\t\t\t\t'max_depth': scope.int(hp.quniform('max_depth', 5, 30, 1)),\n\t\t\t\t'min_samples_split': scope.int(hp.quniform('min_samples_split', 2, 10, 1)),\n\t\t\t\t'min_samples_leaf': scope.int(hp.quniform('min_samples_leaf', 1, 10, 1)),\n\t\t\t\t'max_features': hp.choice('max_features', [\"sqrt\", None, 0.5, 0.7, 0.9]),\n\t\t\t\t'clf': hp.choice('clf', [\"rf\",\"et\",\"gbm\"])\n\t\t\t}\n\t\t\t#space = {\n\t\t\t#\t'n_estimators': scope.int(hp.quniform('n_estimators', 50, 150, 10)),\n\t\t\t#\t'max_depth': scope.int(hp.quniform('max_depth', 3, 30, 1)),\n\t\t\t#\t'eta': scope.float(hp.quniform('eta', 0.01, 0.2, 0.01)),\n\t\t\t#\t'gamma': scope.float(hp.quniform('gamma', 0.01, 0.2, 0.01)),\n\t\t\t#\t'colsample_bytree': hp.choice('colsample_bytree', [0.4,0.5,0.6,0.7,1.0]),\n\t\t\t#\t'subsample': hp.choice('subsample', [0.5,0.6,0.7,0.8,1.0]),\n\t\t\t#\t'clf': hp.choice('clf', [\"xgb\"])\n\t\t\t#}\n\n\t\t\tbest_clf, best_params = hyper_optimize(data_x,data_y,cat_labels=cat_labels,space=space,max_evals=50)\n\t\t\treturn best_clf, best_params\n\t\telse:\n\t\t\tclf = chosen_clf\n\t\t\tclf.set_params(**clf_params)\n\t\t\tif clf.__class__.__name__ in [\"RandomForestClassifier\",\"ExtraTreesClassifier\",\"XGBClassifier\"]:\n\t\t\t\tclf.set_params(**{\"n_jobs\":3,\"random_state\":42,\"oob_score\":True,\"bootstrap\":True})\n\t\t\telse:\n\t\t\t\tclf.set_params(**{\"random_state\":42})\n\t\t\tclf.fit(data_x,data_y)\n\n\t\tfeature_names = cat_labels + num_labels\n\n\t\tzipped = zip(feature_names, clf.feature_importances_)\n\t\tsorted_zip = sorted(zipped, key=lambda x: x[1], reverse=True)\n\t\tsys.stderr.write(\"o Feature importances:\\n\\n\")\n\t\tfor name, importance in sorted_zip:\n\t\t\tsys.stderr.write(name + \"=\" + str(importance) + \"\\n\")\n\n\t\tif hasattr(clf, \"oob_score_\"):\n\t\t\tsys.stderr.write(\"\\no OOB score: \" + str(clf.oob_score_)+\"\\n\")\n\n\t\tsys.stderr.write(\"\\no Serializing model...\\n\")\n\n\t\tjoblib.dump((clf, num_labels, cat_labels, multicol_dict, top_n_words, firsts, lasts), model_path, compress=3)",
"def _TestEnsemble(self, config):\n # Note that the initialization of the lattice must be the same across the\n # units dimension (otherwise the loss will be different).\n # We fix the random seed to make sure we get similar initialization.\n if self.disable_ensembles:\n return\n config = dict(config)\n config[\"num_training_epoch\"] = 3\n config[\"kernel_initializer\"] = \"constant\"\n losses = []\n for units, lattice_index in [(1, 0), (3, 0), (3, 2)]:\n config[\"units\"] = units\n config[\"lattice_index\"] = lattice_index\n tf.keras.utils.set_random_seed(42)\n losses.append(self._TrainModel(config))\n self.assertAlmostEqual(min(losses), max(losses), delta=self.loss_eps)",
"def QuickML_Ensembling(X_train, y_train, X_test, y_test='', modeltype='Regression',\r\n Boosting_Flag=False,\r\n scoring='', verbose=0):\r\n start_time = time.time()\r\n seed = 99\r\n FOLDS = 5\r\n model_dict = {}\r\n model_tuples = []\r\n if len(X_train) <= 100000 and X_train.shape[1] < 50:\r\n NUMS = 100\r\n else:\r\n try:\r\n X_train = X_train.sample(frac=0.30,random_state=99)\r\n y_train = y_train[X_train.index]\r\n except:\r\n pass\r\n NUMS = 200\r\n ###### This is where we start performing ensembling of multiple simpler models ###\r\n if modeltype == 'Regression':\r\n if scoring == '':\r\n scoring = 'neg_mean_squared_error'\r\n #scv = ShuffleSplit(n_splits=FOLDS,random_state=seed)\r\n scv = KFold(n_splits=FOLDS, shuffle=False)\r\n if Boosting_Flag is None:\r\n ## Create an ensemble model ####\r\n model5 = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(\r\n random_state=seed, max_depth=1, min_samples_leaf=2\r\n ), n_estimators=NUMS, random_state=seed)\r\n model_tuples.append(('Adaboost',model5))\r\n elif not Boosting_Flag:\r\n model5 = LassoLarsCV(cv=scv)\r\n model_tuples.append(('LassoLarsCV',model5))\r\n else:\r\n model5 = LassoLarsCV(cv=scv)\r\n model_tuples.append(('LassoLarsCV',model5))\r\n if Boosting_Flag is None:\r\n model6 = BaggingRegressor(DecisionTreeRegressor(random_state=seed),\r\n n_estimators=NUMS,random_state=seed)\r\n model_tuples.append(('Bagging_Regressor',model6))\r\n elif not Boosting_Flag:\r\n model6 = LinearSVR()\r\n model_tuples.append(('Linear_SVR',model6))\r\n else:\r\n model6 = DecisionTreeRegressor(max_depth=5,min_samples_leaf=2)\r\n model_tuples.append(('Decision_Tree',model6))\r\n model7 = KNeighborsRegressor(n_neighbors=8)\r\n model_tuples.append(('KNN_Regressor',model7))\r\n if Boosting_Flag is None:\r\n #### If the Boosting_Flag is True, it means Boosting model is present.\r\n ### So choose a different kind of classifier here\r\n model8 = DecisionTreeRegressor(max_depth=5,min_samples_leaf=2)\r\n model_tuples.append(('Decision_Tree',model8))\r\n elif not Boosting_Flag:\r\n #### If the Boosting_Flag is True, it means Boosting model is present.\r\n ### So choose a different kind of classifier here\r\n model8 = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(\r\n random_state=seed, max_depth=1, min_samples_leaf=2\r\n ), n_estimators=NUMS, random_state=seed)\r\n model_tuples.append(('Adaboost',model8))\r\n else:\r\n model8 = RandomForestRegressor(bootstrap = False,\r\n max_depth = 10,\r\n max_features = 'auto',\r\n min_samples_leaf = 2,\r\n n_estimators = 200,\r\n random_state=99)\r\n model_tuples.append(('RF_Regressor',model8))\r\n else:\r\n if scoring == '':\r\n scoring = 'accuracy'\r\n num_classes = len(np.unique(y_test))\r\n scv = StratifiedKFold(n_splits=FOLDS, shuffle=True, random_state=seed)\r\n if Boosting_Flag is None:\r\n ## Create an ensemble model ####\r\n model5 = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(\r\n random_state=seed, max_depth=1, min_samples_leaf=2\r\n ), n_estimators=NUMS, random_state=seed)\r\n model_tuples.append(('Adaboost',model5))\r\n elif not Boosting_Flag:\r\n model5 = LinearDiscriminantAnalysis()\r\n model_tuples.append(('Linear_Discriminant',model5))\r\n else:\r\n model5 = LogisticRegressionCV(Cs=[0.001,0.01,0.1,1,10,100], tol=0.01,\r\n solver='liblinear', random_state=seed)\r\n model_tuples.append(('Logistic_Regression_CV',model5))\r\n if Boosting_Flag is None:\r\n model6 = DecisionTreeClassifier(max_depth=5,min_samples_leaf=2)\r\n model_tuples.append(('Decision_Tree',model6))\r\n elif not Boosting_Flag:\r\n model6 = LinearSVC()\r\n model_tuples.append(('Linear_SVC',model6))\r\n else:\r\n model6 = DecisionTreeClassifier(max_depth=5,min_samples_leaf=2)\r\n model_tuples.append(('Decision_Tree',model6))\r\n if modeltype == 'Binary_Classification':\r\n model7 = GaussianNB()\r\n else:\r\n model7 = MultinomialNB()\r\n model_tuples.append(('Naive_Bayes',model7))\r\n if Boosting_Flag is None:\r\n #### If the Boosting_Flag is True, it means Boosting model is present.\r\n ### So choose a different kind of classifier here\r\n model8 = RandomForestClassifier(bootstrap = False,\r\n max_depth = 10,\r\n max_features = 'auto',\r\n min_samples_leaf = 2,\r\n n_estimators = 200,\r\n random_state=99)\r\n model_tuples.append(('Bagging_Classifier',model8))\r\n elif not Boosting_Flag:\r\n #### If the Boosting_Flag is True, it means Boosting model is present.\r\n ### So choose a different kind of classifier here\r\n sgd_best_model = SGDClassifier(alpha=1e-06,\r\n loss='log',\r\n max_iter=1000,\r\n penalty='l2',\r\n learning_rate = 'constant',\r\n eta0 = .1,\r\n random_state = 3,\r\n tol=None)\r\n model8 = OneVsRestClassifier(sgd_best_model)\r\n model_tuples.append(('One_vs_Rest_Classifier',model8))\r\n else:\r\n model8 = RandomForestClassifier(bootstrap = False,\r\n max_depth = 10,\r\n max_features = 'auto',\r\n min_samples_leaf = 2,\r\n n_estimators = 200,\r\n random_state=99)\r\n model_tuples.append(('Bagging_Classifier',model8))\r\n model_dict = dict(model_tuples)\r\n models, results = run_ensemble_models(model_dict, X_train, y_train, X_test, y_test,\r\n scoring, modeltype)\r\n return models, results",
"def __init__(self, table, tree_prior, config):\n logger.info('TreeCatTrainer of %d x %d data', table.num_rows,\n table.num_cols)\n assert isinstance(table, Table)\n N = table.num_rows # Number of rows.\n V = table.num_cols # Number of features, i.e. vertices.\n TreeTrainer.__init__(self, N, V, tree_prior, config)\n assert self._num_rows == N\n assert len(self._added_rows) == 0\n self._table = table\n self._assignments = np.zeros([N, V], dtype=np.int8)\n\n # These are useful dimensions to import into locals().\n E = V - 1 # Number of edges in the tree.\n K = V * (V - 1) // 2 # Number of edges in the complete graph.\n M = self._config['model_num_clusters'] # Clusters per latent.\n assert M <= 128, 'Invalid model_num_clusters > 128: {}'.format(M)\n self._VEKM = (V, E, K, M)\n\n # Use Jeffreys priors.\n self._vert_prior = 0.5\n self._edge_prior = 0.5 / M\n self._feat_prior = 0.5 / M\n self._meas_prior = self._feat_prior * np.array(\n [(table.ragged_index[v + 1] - table.ragged_index[v])\n for v in range(V)],\n dtype=np.float32).reshape((V, 1))\n self._gammaln_table = gammaln(\n np.arange(1 + N, dtype=np.float32) + self._edge_prior)\n assert self._gammaln_table.dtype == np.float32\n\n # Sufficient statistics are maintained always.\n self._vert_ss = np.zeros([V, M], np.int32)\n self._edge_ss = np.zeros([E, M, M], np.int32)\n self._feat_ss = np.zeros([table.ragged_index[-1], M], np.int32)\n self._meas_ss = np.zeros([V, M], np.int32)\n\n # Temporaries.\n self._vert_probs = np.empty(self._vert_ss.shape, np.float32)\n self._edge_probs = np.empty(self._edge_ss.shape, np.float32)\n self._feat_probs = np.empty(self._feat_ss.shape, np.float32)\n self._meas_probs = np.empty(self._meas_ss.shape, np.float32)\n\n # Maintain edge_probs.\n np.add(self._edge_ss, self._edge_prior, out=self._edge_probs)",
"def run_cv_tcga_ccle(train_data_model,\n test_data_model,\n identifier,\n num_folds,\n shuffle_labels,\n model='lr',\n lasso=True,\n lasso_penalty=None,\n params={}):\n results = {\n 'gene_metrics': [],\n 'gene_auc': [],\n 'gene_aupr': [],\n 'gene_coef': []\n }\n if model == 'mlp':\n results['gene_param_grid'] = []\n results['learning_curves'] = []\n\n signal = 'shuffled' if shuffle_labels else 'signal'\n\n # the \"folds\" here refer to choosing different validation datasets,\n # the test dataset is the same (all valid CCLE cell lines)\n # the validation splitting happens in the LASSO code\n for fold_no in range(num_folds):\n\n # train on TCGA data, test on CCLE data\n X_train_raw_df = train_data_model.X_df\n X_test_raw_df = test_data_model.X_df\n y_train_df = train_data_model.y_df\n y_test_df = test_data_model.y_df\n\n if shuffle_labels:\n if cfg.shuffle_by_cancer_type:\n # in this case we want to shuffle labels independently for each cancer type\n # (i.e. preserve the total number of mutated samples in each)\n original_ones = y_train_df.groupby('DISEASE').sum()['status']\n y_train_df.status = shuffle_by_cancer_type(y_train_df, train_data_model.seed)\n y_test_df.status = shuffle_by_cancer_type(y_test_df, test_data_model.seed)\n new_ones = y_train_df.groupby('DISEASE').sum()['status']\n # label distribution per cancer type should be the same before\n # and after shuffling (or approximately the same in the case of\n # continuous labels)\n assert (original_ones.equals(new_ones) or\n np.all(np.isclose(original_ones.values, new_ones.values)))\n else:\n # we set a temp seed here to make sure this shuffling order\n # is the same for each gene between data types, otherwise\n # it might be slightly different depending on the global state\n with temp_seed(train_data_model.seed):\n y_train_df.status = np.random.permutation(y_train_df.status.values)\n y_test_df.status = np.random.permutation(y_test_df.status.values)\n\n X_train_df, X_test_df = tu.preprocess_data(\n X_train_raw_df,\n X_test_raw_df,\n # gene_features should be the same for TCGA and CCLE,\n # just use the training data features\n train_data_model.gene_features,\n y_df=y_train_df,\n feature_selection=train_data_model.feature_selection,\n num_features=train_data_model.num_features,\n mad_preselect=train_data_model.mad_preselect,\n seed=train_data_model.seed,\n )\n\n try:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n # set the hyperparameters\n classifiers_list = {\n 'lr': clf.train_classifier,\n 'mlp': clf.train_mlp\n }\n train_model = classifiers_list[model]\n train_model_params = apply_model_params(train_model,\n lasso=lasso,\n lasso_penalty=lasso_penalty,\n model=model)\n if model == 'mlp':\n model_results = train_model_params(\n X_train=X_train_df,\n X_test=X_test_df,\n y_train=y_train_df,\n y_test=y_test_df,\n seed=train_data_model.seed,\n n_folds=cfg.mlp_folds,\n max_iter=cfg.mlp_max_iter,\n search_hparams=params\n )\n (net, cv_pipeline, labels, preds) = model_results\n (y_train_df,\n y_cv_df) = labels\n (y_pred_train,\n y_pred_cv,\n y_pred_test) = preds\n else:\n model_results = train_model_params(\n X_train=X_train_df,\n X_test=X_test_df,\n y_train=y_train_df,\n seed=train_data_model.seed,\n n_folds=cfg.folds,\n max_iter=cfg.max_iter\n )\n (cv_pipeline, labels, preds) = model_results\n (y_train_df,\n y_cv_df) = labels\n (y_pred_train,\n y_pred_cv,\n y_pred_test) = preds\n except ValueError:\n raise OneClassError(\n 'Only one class present in test set for identifier: {}\\n'.format(identifier)\n )\n\n if model != 'mlp':\n # get coefficients\n coef_df = extract_coefficients(\n cv_pipeline=cv_pipeline,\n feature_names=X_train_df.columns,\n signal=signal,\n seed=train_data_model.seed,\n name='classify'\n )\n coef_df = coef_df.assign(identifier=identifier)\n coef_df = coef_df.assign(fold=fold_no)\n else:\n coef_df = pd.DataFrame()\n # get parameter grid\n results['gene_param_grid'].append(\n generate_param_grid(cv_pipeline.cv_results_, fold_no)\n )\n results['learning_curves'].append(\n history_to_tsv(net, fold_no)\n )\n\n try:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n metric_df, gene_auc_df, gene_aupr_df = clf.get_metrics(\n y_train_df, y_test_df, y_pred_cv, y_pred_train,\n y_pred_test, identifier, 'N/A', signal,\n train_data_model.seed, fold_no, y_cv_df\n )\n results['gene_metrics'].append(metric_df)\n results['gene_auc'].append(gene_auc_df)\n results['gene_aupr'].append(gene_aupr_df)\n results['gene_coef'].append(coef_df)\n except ValueError:\n raise OneClassError(\n 'Only one class present in test set for identifier: {}\\n'.format(identifier)\n )\n\n return results",
"def sample_estimator(model, num_classes, feature_list, train_loader):\n\n model.eval()\n group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n correct, total = 0, 0\n num_output = len(feature_list)\n num_sample_per_class = np.zeros(num_classes)\n list_features = []\n for i in range(num_output):\n temp_list = []\n for j in range(num_classes):\n temp_list.append(0)\n list_features.append(temp_list)\n\n for data, target, _ in train_loader:\n total += data.size(0)\n\n data = Variable(data.cuda())\n output, out_features = model.feature_list(data)\n\n # get hidden features\n for i in range(num_output):\n out_features[i] = out_features[i].view(\n out_features[i].size(0), out_features[i].size(1), -1)\n out_features[i] = torch.mean(out_features[i].data, 2)\n\n # compute the accuracy\n pred = output.data.max(1)[1]\n equal_flag = pred.eq(target.cuda()).cpu()\n correct += equal_flag.sum()\n\n # construct the sample matrix\n for i in range(data.size(0)):\n label = target[i]\n if num_sample_per_class[label] == 0:\n out_count = 0\n for out in out_features:\n list_features[out_count][label] = out[i].view(1, -1)\n out_count += 1\n else:\n out_count = 0\n for out in out_features:\n list_features[out_count][label] \\\n = torch.cat((list_features[out_count][label], out[i].view(1, -1)), 0)\n out_count += 1\n num_sample_per_class[label] += 1\n\n sample_class_mean = []\n out_count = 0\n for num_feature in feature_list:\n temp_list = torch.Tensor(num_classes, int(num_feature)).cuda()\n for j in range(num_classes):\n # print(f'|| Num classes: {num_classes}, j: {j}, temp_list: {temp_list}, list features: {list_features[out_count][j]}')\n temp_list[j] = torch.mean(list_features[out_count][j], 0)\n sample_class_mean.append(temp_list)\n out_count += 1\n\n precision = []\n for k in range(num_output):\n X = 0\n for i in range(num_classes):\n if i == 0:\n X = list_features[k][i] - sample_class_mean[k][i]\n else:\n X = torch.cat(\n (X, list_features[k][i] - sample_class_mean[k][i]), 0)\n\n # find inverse\n group_lasso.fit(X.cpu().numpy())\n temp_precision = group_lasso.precision_\n temp_precision = torch.from_numpy(temp_precision).float().cuda()\n precision.append(temp_precision)\n\n print('\\n Training Accuracy:({:.2f}%)\\n'.format(100. * correct / total))\n\n return sample_class_mean, precision",
"def train_model_warm(args):\n cfg, lbl_name = util.get_label_cfg_by_args(args)\n uid = cfg['uniqueid']\n result = util.get_clus_reg_by_dir('models/%s/%s' % (uid, lbl_name))\n cls, regs = result[5] # we know this is bad but could possibly work\n cls_data = torch.load(cls)\n print(cls_data.keys())\n cls_net = cls_data['model']\n xmean, xstd = cls_data['xScale']\n print('xmean', xmean, 'xstd', xstd)\n # cls_net.extendXYScale((xmean, xstd))\n expert = Experts([[2, 60, 75]] * 5)\n run_the_training(args, cls_net, expert)",
"def train_linear_ensemble(x, y, alpha, max_iter, n_ensembles):\n # x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\n x_train, y_train = x, y\n ensemble_models = []\n for i in range(n_ensembles):\n samples = sample_without_replacement(n_population=x_train.shape[0], n_samples=(x_train.shape[0]/5))\n x_seg_train = pd.DataFrame()\n y_seg_train = pd.Series()\n for sample in samples:\n x_seg_train = pd.concat([x_seg_train, x_train.iloc[[sample]]])\n y_seg_train = pd.concat([y_seg_train, y_train.iloc[[sample]]])\n\n model: Ridge = Ridge(alpha=alpha, normalize=True, max_iter=max_iter).fit(x_seg_train, y_seg_train)\n print(model.score(x_seg_train, y_seg_train))\n # print(model.score(x_test, y_test))\n ensemble_models.append(model)\n\n return ensemble_models",
"def test_dirichletensemble():\n np.random.seed(seed=2)\n X, y = make_blobs(n_samples=200, centers=2, n_features=2, cluster_std=4, \n random_state=2)\n n_train = 100\n trainX, testX = X[:n_train, :], X[n_train:, :]\n trainy, testy = y[:n_train], y[n_train:]\n n_members = 5\n stack = DirichletEnsemble(N=5000)\n for i in range(n_members):\n model = _get_fitted_random_model(trainX, trainy)\n train_batches = CustomIterator(trainX, trainy, 32)\n val_batches = CustomIterator(testX, testy, 32)\n m = KerasMember(keras_model=model, name=\"Model \" + str(i),\n train_batches=train_batches, val_batches=val_batches)\n stack.add_member(m)\n stack.fit()\n stack.describe()\n return True",
"def train_decision_tree():\n train_model(DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DECISION_TREE_DEFAULT_DATASET,\n model_file_name=DECISION_TREE_DEFAULT_MODEL)",
"def __init__(self,\n ensemble,\n p_vae,\n q_vae,\n latent_size=20):\n\n super().__init__()\n self.ensemble = ensemble\n self.p_vae = p_vae\n self.q_vae = q_vae\n self.latent_size = latent_size",
"def randomSplitLOOBaselineCV(ncvs, svm_mat, labels, subjarray, motorOutput=False, permutation=False, decoder='similarity', nproc=5, featsel=False):\n \n\n ntasks = len(np.unique(labels))\n nsamples = svm_mat.shape[0]\n nsubjs = nsamples/ntasks\n\n subjects = np.unique(subjarray)\n indices = np.arange(nsamples)\n \n #numsubjs_perfold = 8\n numsubjs_perfold = 24\n if nsubjs%numsubjs_perfold!=0: \n raise Exception(\"Error: Folds don't match number of subjects\")\n \n nfolds = nsubjs/numsubjs_perfold\n subj_array_folds = subjarray.copy()\n \n inputs = [] \n \n nfolds = int(nfolds)\n for fold in range(nfolds):\n# #test_subjs = np.random.choice(subj_array_folds,numsubjs_perfold,replace=False)\n# test_subjs = [subjects[fold]]\n# train_subjs_all = np.delete(subjects,test_subjs)\n ## run 8 fold CV\n test_subjs_ind = np.random.choice(np.arange(len(subj_array_folds)),numsubjs_perfold,replace=False)\n test_subjs = subj_array_folds[test_subjs_ind]\n # Training subjects are all subjects - those in test set\n train_subjs_all = np.delete(subjects,test_subjs)\n # For each fold, train on test subjects independently\n\n for test_subj in test_subjs:\n # Randomly sample half of train set subjects for each cv (CV bootstrapping)\n train_subjs = np.random.choice(train_subjs_all,\n int(len(train_subjs_all)),\n replace=True)\n# train_subjs = train_subjs_all\n\n train_ind = []\n for subj in train_subjs:\n train_ind.extend(np.where(subjarray==subj)[0])\n\n # Only obtain single test subject\n test_ind = np.where(subjarray==test_subj)[0]\n \n train_ind = np.asarray(train_ind)\n test_ind = np.asarray(test_ind)\n\n trainset = svm_mat[train_ind,:]\n testset = svm_mat[test_ind,:]\n\n # Normalize trainset and testset\n mean = np.mean(svm_mat[train_ind,:],axis=0)\n mean.shape = (1,len(mean))\n std = np.std(svm_mat[train_ind,:],axis=0)\n std.shape = (1,len(std))\n\n trainset = np.divide((trainset - mean),std)\n testset = np.divide((testset - mean),std)\n \n trainlabels = labels[train_ind].copy()\n testlabels = labels[test_ind].copy()\n\n\n if motorOutput:\n ## Feature selection and downsampling\n unique_labels = np.unique(labels)\n feat1_labs = np.where(trainlabels==unique_labels[0])[0]\n feat2_labs = np.where(trainlabels==unique_labels[1])[0]\n # Perform t-test\n t, p = stats.ttest_rel(trainset[feat1_labs,:],trainset[feat2_labs,:],axis=0)\n h0, qs = mc.fdrcorrection0(p)\n # h0 = p<0.1\n # Construct feature masks\n feat1_mask = np.multiply(t>0,h0).astype(bool)\n feat2_mask = np.multiply(t<0,h0).astype(bool)\n # feat1_mask = t>0\n # feat2_mask = t<0\n # Downsample training set into original vertices into 2 ROI signals\n trainset_downsampled = np.zeros((trainset.shape[0],2))\n trainset_downsampled[:,0] = np.nanmean(trainset[:,feat1_mask],axis=1)\n trainset_downsampled[:,1] = np.nanmean(trainset[:,feat2_mask],axis=1)\n #trainset_downsampled = trainset[:,h0]\n # Downsample test set into original vertices\n testset_downsampled = np.zeros((testset.shape[0],2))\n testset_downsampled[:,0] = np.nanmean(testset[:,feat1_mask],axis=1)\n testset_downsampled[:,1] = np.nanmean(testset[:,feat2_mask],axis=1)\n #testset_downsampled = testset[:,h0]\n\n ## permutation\n if permutation:\n np.random.shuffle(trainlabels)\n \n if np.sum(feat1_mask)==0 or np.sum(feat2_mask==0):\n inputs.append((trainset,testset,trainlabels,testlabels,decoder))\n else:\n inputs.append((trainset_downsampled,testset_downsampled,trainlabels,testlabels,decoder))\n \n elif featsel:\n #### Revision addition - select for vertices based on FDR-corrected p<0.05 for noncircular accuracy decoding of motor output\n unique_labels = np.unique(labels)\n feat1_labs = np.where(trainlabels==unique_labels[0])[0]\n feat2_labs = np.where(trainlabels==unique_labels[1])[0]\n # Perform t-test\n t, p = stats.ttest_rel(trainset[feat1_labs,:],trainset[feat2_labs,:],axis=0)\n h0, qs = mc.fdrcorrection0(p)\n # Construct feature masks\n #feat_mask = h0\n feat_mask = p<0.05\n # use fdr-corrected vertices for feature selection \n trainset = trainset[:,feat_mask]\n testset = testset[:,feat_mask]\n\n # if permutation\n if permutation:\n np.random.shuffle(trainlabels)\n\n inputs.append((trainset,testset,trainlabels,testlabels,decoder))\n else:\n ## permutation\n if permutation:\n np.random.shuffle(trainlabels)\n# trainlabels = labels[train_ind]\n# testlabels = labels[test_ind]\n# f, p = f_classif(trainset,trainlabels)\n# thresh = 0.1\n# feat_mask = p < thresh\n# inputs.append((trainset[:,feat_mask],testset[:,feat_mask],labels[train_ind],labels[test_ind])) \n \n inputs.append((trainset,testset,trainlabels,testlabels,decoder))\n\n \n \n subj_array_folds = np.delete(subj_array_folds,test_subjs)\n \n #print('trainset.shape:', trainset.shape)\n #print('testset.shape:', testset.shape)\n #print('trainlabels:', inputs[0][2])\n #print('testlabels:', inputs[0][3])\n #print('inputs[0]:', len(inputs[0]))\n pool = mp.Pool(processes=nproc)\n scores = pool.starmap_async(_decoding,inputs).get()\n pool.close()\n pool.join()\n \n# subj_acc = np.zeros((len(subjects),))\n# scount = 0\n# i = 0\n# for subj in subjects:\n# subjmean = []\n# for cv in range(ncvs):\n# subjmean.append(scores[i])\n# i += 1\n \n# subj_acc[scount] = np.mean(subjmean)\n \n# scount += 1\n\n# return subj_acc\n acc = []\n r_match = []\n r_mismatch = []\n confusion_mat = []\n for score in scores:\n acc.extend(score[0])\n r_match.append(score[1])\n r_mismatch.append(score[2])\n confusion_mat.append(score[3])\n \n confusion_mat = np.asarray(confusion_mat)\n confusion_mat = np.mean(confusion_mat,axis=0)\n\n return acc, r_match, r_mismatch, confusion_mat",
"def trainSN(options, epoch, device):\n \"\"\"-------------------------------CONFIG----------------------------------\"\"\"\n parser = argparse.ArgumentParser(description=\"PyTorch Regression GAN\")\n parser = general_parser(parser)\n opt = specific_parser(\n parser=parser, run_folder=options.log_dir, mode='train', tot_epochs=30, pretrained_GAN=options.checkpoint_dir,\n GAN_epoch=epoch, acc_log_freq=options.acc_log_freq, loss_log_freq=options.loss_log_freq,\n batch_size_SN=options.batch_size_SN, images_log_freq=options.images_log_freq,\n data_dir_train=options.data_dir_train2, data_dir_test=options.data_dir_test2,\n experiment_name='SN'+str(epoch), sar_c=options.sar_c, optical_c=options.optical_c,\n save_model_freq=1000, res_block_N=options.res_block_N)\n\n opt = config_routine(opt)\n\n \"\"\"-----------------------------DATA LOADER--------------------------------\"\"\"\n train_dataset = EUSARDataset(os.path.join(options.data_dir_train2), True, False, options.sar_c, options.optical_c)\n train_dataset = get_subset(train_dataset, options.prc_test)\n train_dataset = DataLoader(train_dataset, batch_size=options.batch_size_SN, shuffle=True,\n num_workers=options.workers, pin_memory=True, drop_last=False)\n\n test_dataset = EUSARDataset(os.path.join(options.data_dir_test2), True, False, options.sar_c, options.optical_c)\n test_dataset = get_subset(test_dataset, options.prc_test, True)\n test_dataset = DataLoader(test_dataset, batch_size=options.batch_size_SN, shuffle=False,\n num_workers=options.workers, pin_memory=True, drop_last=False)\n\n \"\"\"--------------------------------TRAIN-----------------------------------\"\"\"\n # Init model\n model = SN(opt, device)\n\n # set up tensorboard logging\n writer = SummaryWriter(log_dir=os.path.join(opt.tb_dir))\n # Model Training\n model.train(train_dataset, test_dataset, writer)",
"def __init__(self, data_provider, growth, depth,\n total_blocks,stages, keep_prob,\n weight_decay, nesterov_momentum, model_type, dataset,\n should_save_logs, should_save_model,\n renew_logs=False,\n reduction=1.0,\n bc_mode=False,\n **kwargs):\n self.data_provider = data_provider\n self.data_shape = data_provider.data_shape # (W,H,C)\n self.n_classes = data_provider.n_classes\n self.depth = depth\n\n #self.growth_rate = growth_rate\n # how many features will be received after first convolution\n # value the same as in the original Torch code\n self.growth = growth\n self.first_output_features = growth[0] * 2\n self.total_blocks = total_blocks\n self.stages = stages\n self.group_1x1 = kwargs['group_1x1']\n self.group_3x3 = kwargs['group_3x3']\n self.condense_factor = kwargs['condense_factor']\n self.bottleneck = kwargs['bottleneck']\n self.group_lasso_lambda= kwargs['group_lasso_lambda']\n\n #self.layers_per_block = (depth - (total_blocks + 1)) // total_blocks\n self.bc_mode = bc_mode\n # compression rate at the transition layers\n self.reduction = reduction\n '''\n if not bc_mode:\n print(\"Build %s model with %d blocks, \"\n \"%d composite layers each.\" % (\n model_type, self.total_blocks, self.layers_per_block))\n if bc_mode:\n self.layers_per_block = self.layers_per_block // 2\n print(\"Build %s model with %d blocks, \"\n \"%d bottleneck layers and %d composite layers each.\" % (\n model_type, self.total_blocks, self.layers_per_block,\n self.layers_per_block))\n '''\n print(\"Reduction at transition layers: %.1f\" % self.reduction)\n\n self.keep_prob = keep_prob\n self.weight_decay = weight_decay\n self.nesterov_momentum = nesterov_momentum\n self.model_type = model_type\n self.dataset_name = dataset\n self.should_save_logs = should_save_logs\n self.should_save_model = should_save_model\n self.renew_logs = renew_logs\n self.batches_step = 0\n\n self._stage = 0\n self._define_inputs()\n self._build_graph()\n self._initialize_session()\n self._count_trainable_params()",
"def build_model(X_train, Y_train):\n #Choosing a straighforward single tree model to make training tractable in terms of time\n DTC = DecisionTreeClassifier(random_state = 11)\n\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(estimator=DTC))\n ])\n\n parameters = {'clf__estimator__criterion': [\"gini\", \"entropy\"],\n 'clf__estimator__splitter': [\"best\", \"random\"],\n 'clf__estimator__max_depth': randint(3, 6),\n 'clf__estimator__min_samples_split': randint(2,6)}\n\n grid_obj = RandomizedSearchCV(pipeline,parameters,n_iter=5, cv=5 )\n grid_obj.fit(X_train, Y_train)\n\n return grid_obj.best_estimator_",
"def train(self, model_type, params=None):\n Model = load_model_class(model_type)\n self.model_type = model_type\n X, y = self.task.make_dataset()\n self.final_data = X.copy()\n # Save preds\n preds = np.zeros_like(y.values).astype(np.float)\n with TMPFolder():\n N = len(X)\n n = N // self.cv\n # Assign a fold to each sample\n folds = np.random.permutation(np.repeat(np.arange(self.cv), n+1)[:N])\n if self.cv == 1:\n folds[:] = 1\n folds[np.random.permutation(np.arange(N))[:int(round(0.25 * N))]] = 0\n # Iterate over folds\n for k in range(self.cv):\n print(\"Fold\", k)\n # Create model\n model = Model()\n if params is not None:\n model.set_hp(params)\n # Create sub-dataset\n X_train = X[folds != k]\n y_train = y[folds != k]\n X_test = X[folds == k]\n y_test = y[folds == k]\n # Train the model\n model.train(X_train, y_train)\n # Make predictions on test samples\n y_pred = model.predict(X_test)\n # Save the predictions\n preds[folds == k] = y_pred\n self.model_save.append(model)\n # Save folds\n self.folds = folds\n self.is_trained = True\n self.preds = preds\n self.true_labels = y",
"def fit(self, X_train, y_train):\n for i in range(self.N):\n h = RandomDecisionTree(candidate_splits=self.candidate_splits, depth=self.max_depth)\n h = h.fit(*self.bootstrap(X_train, y_train))\n self.learners.append(h)",
"def train(self,trainset):\n \n self.n_classes = len(trainset.metadata['targets'])\n if self.n_classes > 2:\n raise ValueError('Invalid. Should have 2 classes.')\n \n features = np.zeros((len(trainset),trainset.metadata['input_size']))\n labels = np.zeros((len(trainset)),dtype='int')\n for i,xy in enumerate(trainset):\n x,y = xy\n features[i] = x\n labels[i] = y\n\n if self.criterion == 'information_gain':\n def criterion_fcn(labels0, labels1):\n return libmilk.supervised.tree.information_gain(labels0, labels1, include_entropy=self.include_entropy)\n elif self.criterion == 'z1_loss':\n def criterion_fcn(labels0, labels1):\n return libmilk.supervised.tree.z1_loss(labels0, labels1)\n else:\n raise ValueError('Invalid parameter: '+self.criterion+'. Should be either \\'information_gain\\' or \\'z1_loss\\'')\n\n learner = libmilk.supervised.tree_learner(criterion=criterion_fcn,min_split=self.min_split,return_label=True)\n #self.subsample = subsample\n #self.R = R\n model = learner.train(features, labels)\n \n self.tree = model",
"def train_ours(self, config, samples, gpu=False, batch_size=None):\n tic = time.time()\n\n gmm = GMM(**config)\n if batch_size is not None:\n samples = samples.chunk(int(math.ceil(samples.size(0) / batch_size)))\n history = gmm.fit(samples, gpu=gpu)\n\n toc = time.time()\n\n nll = history.neg_log_likelihood[-1]\n\n print(f\"Training with pycave took {toc-tic:.2f} seconds.\")\n print(f\" Number of iterations was: {len(history):,}\")\n print(f\" Negative log-likelihood was: {nll:.4f}\")\n\n return toc - tic",
"def train(self, training_data, chunk_size=100):\n # For some reason, for the SVM to work, the keys need to be in alphabetical order\n training_data = {k : training_data[k] for k in sorted(training_data)}\n\n # Compile all author texts into one large text to then be broken down\n for auth in training_data:\n training_data[auth] = '\\n\\n'.join(training_data[auth])\n\n self.auths = list(training_data.keys())\n self.chunk_size = chunk_size\n\n # Creates two lists, one of the texts and one of the corresponding author labels.\n labels = []\n texts = []\n for auth in training_data:\n lines = training_data[auth].split('\\n')\n for p in range( chunk_size, len(lines), chunk_size ):\n labels.append(auth) # authors per text in the training corpus\n texts.append('\\n'.join(lines[p-chunk_size : p])) # texts in the training corpus\n labels = array(labels)\n texts = array(texts)\n\n # Cleans the texts\n for i in range(len(texts)):\n texts[i] = self._clean(texts[i])\n\n # Generates the profiles from these tests\n profiles = zeros((len(texts), len(self.alph)**self.N))\n for i in range(len(texts)):\n profiles[i] = self._profile(texts[i])\n\n\n # Reduces the features and fits the model\n self.train_data = [profiles, labels]\n self._reduceFeatures()\n\n self.model = SVC(kernel='linear')\n self.model.probability = True\n self.model.fit(self.train_data[0], self.train_data[1])"
] | [
"0.5890479",
"0.5852123",
"0.5740387",
"0.56975913",
"0.5632382",
"0.5611414",
"0.5605243",
"0.559962",
"0.5548124",
"0.5522018",
"0.54916924",
"0.5475484",
"0.54413205",
"0.5394088",
"0.53637904",
"0.5348193",
"0.53204775",
"0.53113496",
"0.5287788",
"0.5278241",
"0.5258622",
"0.5257364",
"0.5255909",
"0.52536654",
"0.5247148",
"0.5239782",
"0.5236736",
"0.5233521",
"0.5230751",
"0.52259964"
] | 0.6676452 | 0 |
Prepare mock axis device. | def setup_mock_axis_device(mock_device):
def mock_constructor(host, username, password, port, web_proto):
"""Fake the controller constructor."""
mock_device.host = host
mock_device.username = username
mock_device.password = password
mock_device.port = port
return mock_device
mock_device.side_effect = mock_constructor
mock_device.vapix.params.system_serialnumber = MAC
mock_device.vapix.params.prodnbr = "prodnbr"
mock_device.vapix.params.prodtype = "prodtype"
mock_device.vapix.params.firmware_version = "firmware_version" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setUp(self):\n serial_number = os.environ.get(\"ANDROID_SERIAL\", \"\")\n self.assertTrue(serial_number, \"$ANDROID_SERIAL is empty\")\n self._dut = utils.AndroidDevice(serial_number)\n self._temp_dir = tempfile.mkdtemp()",
"def setUp(self):\n # Initialize runtime and MDK:\n self.runtime = fakeRuntime()\n self.runtime.getEnvVarsService().set(\"DATAWIRE_TOKEN\", \"something\")\n self.mdk = MDKImpl(self.runtime)\n self.mdk.start()\n self.session = self.mdk.session()",
"def setUp(self):\n self.runtime = fakeRuntime()\n self.runtime.getEnvVarsService().set(\"DATAWIRE_TOKEN\", \"something\")\n self.mdk = MDKImpl(self.runtime)\n self.mdk.start()",
"def setUp(self):\n self.ser = Serial()\n self.device_obj = ZBSensor(self.ser)",
"def test_setup_platform(self, store_mock):\n config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n with assert_setup_component(1, ip.DOMAIN):\n setup_component(self.hass, ip.DOMAIN, config)\n self.hass.block_till_done()\n\n assert self.hass.states.get(\"image_processing.microsoftface_demo_camera\")",
"def setUp(self):\n self.dbus_mock = MagicMock()\n self.mainloop_mock = MagicMock()\n self.gobject_mock = MagicMock()\n\n modules = {\n 'dbus': self.dbus_mock,\n 'dbus.mainloop.glib': self.mainloop_mock,\n 'gi.repository': self.gobject_mock,\n }\n self.dbus_mock.Interface.return_value.GetManagedObjects.return_value = tests.obj_data.full_ubits\n self.dbus_mock.Interface.return_value.Get = mock_get\n self.dbus_mock.Interface.return_value.Set = mock_set\n self.dbus_mock.Interface.return_value.GetAll = mock_get_all\n self.module_patcher = patch.dict('sys.modules', modules)\n self.module_patcher.start()\n from bluezero import adapter\n self.module_under_test = adapter\n self.adapter_device = 'hci0'\n self.adapter_name = 'linaro-alip'\n self.path = '/org/bluez/hci0'",
"def test_create_device_data(self):\n pass",
"def test_create_device(self):\n pass",
"def test_create_device(self):\n pass",
"def setUp(self):\n self.device_key = \"some_key\"\n self.factory = WAPMF(self.device_key)",
"def setUp(self):\n\n TrhphTestCase.setUp(self)\n\n def evt_callback(event):\n log.info(\"CALLBACK: %s\" % str(event))\n\n # needed by DriverTestMixin\n self.driver = TrhphInstrumentDriver(evt_callback)\n self.comms_config = {\n 'addr': self.device_address,\n 'port': self.device_port}",
"def test_create_device1(self):\n pass",
"def setUp(self):\n super().setUp()\n self.devices = _DEVICE_STRATEGY()\n command_line = [\"pool\", \"create\", self._POOLNAME] + self.devices\n RUNNER(command_line)",
"def prepare(self):\n super(Test200SmartSanityClear005, self).prepare()\n\n self.logger.info('Preconditions:')\n self.logger.info('1. Open Micro/WINr; ')\n self.logger.info('2. Set up connection with PLC;')\n self.logger.info('3. Download a project which has OB,DB,SDB;')\n self.MicroWIN.test_prepare('reset_factory_01.smart', False)\n # set cpu mode to run\n self.PLC['1'].set_plc_mode(1)\n self.memory_options = self.PLC['1'].find('memory_options')\n # force some value\n self.memory_options.force('v', 'byte', 0, value=self.force_value)\n time.sleep(5)\n self.PLC['1'].set_plc_mode(0)",
"async def test_device_not_accessible(hass):\n with patch.object(axis.device, \"get_device\", side_effect=axis.errors.CannotConnect):\n await setup_axis_integration(hass)\n assert hass.data[AXIS_DOMAIN] == {}",
"def setUp(cls) -> None:\n cls.data = pd.read_csv(cls.data_location)\n cls.X = cls.data.drop(['y'], axis=1)\n cls.y = cls.data['y']\n\n cls.wml_credentials = get_wml_credentials()\n if not is_cp4d():\n cls.cos_credentials = get_cos_credentials()\n cls.cos_endpoint = cls.cos_credentials['endpoint_url']\n cls.bucket_name = cls.cos_credentials['bucket_name']\n cls.results_cos_path = cls.cos_credentials['results_cos_path']\n\n cls.wml_client = APIClient(wml_credentials=cls.wml_credentials.copy())",
"def setUp(self):\n self.platform = wirelesstagpy.WirelessTags(username=USERNAME, password=PASSWORD)\n self.tag_outdoor = wirelesstagpy.SensorTag(MOCK.OUTDOOR_PROBE, self.platform)\n self.platform._tags[\"fake-1\"] = self.tag_outdoor # pylint: disable=protected-access",
"def setUp(self):\n self.mock_model = Mock()",
"async def setup_axis_integration(hass, config=ENTRY_CONFIG, options=ENTRY_OPTIONS):\n config_entry = MockConfigEntry(\n domain=AXIS_DOMAIN,\n data=deepcopy(config),\n options=deepcopy(options),\n version=3,\n unique_id=FORMATTED_MAC,\n )\n config_entry.add_to_hass(hass)\n\n with respx.mock:\n mock_default_vapix_requests(respx)\n await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n\n return config_entry",
"async def test_device_unknown_error(hass):\n with patch.object(axis.device, \"get_device\", side_effect=Exception):\n await setup_axis_integration(hass)\n assert hass.data[AXIS_DOMAIN] == {}",
"def setUp(self):\n self.hass = get_test_home_assistant()\n with requests_mock.Mocker() as mock_req:\n self.setup_api(MOCK_DATA, mock_req)\n self.addCleanup(self.hass.stop)",
"def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['deviceName'] = 'Android Emulator'\n desired_caps['app'] = os.path.abspath(os.path.join(os.getcwd(), 'apps/Android.apk'))\n desired_caps['appPackage'] = 'com.view.viewglass'\n desired_caps['appActivity'] = 'com.view.viewglass.Splash'\n desired_caps['autoGrantPermissions'] = True\n desired_caps['noReset'] = True\n desired_caps['clearSystemFiles'] = True\n self.driver = webdriver.Remote('http://localhost:4444/wd/hub', desired_caps)",
"def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['deviceName'] = 'Android Emulator'\n desired_caps['app'] = os.path.abspath(os.path.join(os.getcwd(), 'apps/Android.apk'))\n desired_caps['appPackage'] = 'com.view.viewglass'\n desired_caps['appActivity'] = 'com.view.viewglass.Splash'\n desired_caps['autoGrantPermissions'] = True\n desired_caps['noReset'] = True\n desired_caps['clearSystemFiles'] = True\n self.driver = webdriver.Remote('http://localhost:4444/wd/hub', desired_caps)",
"def setUpClass(cls):\n cls.device = DeviceFactory.create()",
"def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['deviceName'] = 'Android Emulator'\n desired_caps['app'] = os.path.abspath(os.path.join(os.path.dirname(__file__), 'apps/Android.apk'))\n desired_caps['appPackage'] = 'com.view.viewglass'\n desired_caps['appActivity'] = 'com.view.viewglass.Splash'\n desired_caps['autoGrantPermissions'] = True\n desired_caps['noReset'] = True\n desired_caps['clearSystemFiles'] = True\n self.driver = webdriver.Remote('http://localhost:4444/wd/hub', desired_caps)",
"def setUp (self):\n x_cryptos = ['ltc', 'xrp', 'xlm', 'eth']\n y_crypto = 'btc'\n kwargs = {'n_rolling_price':1, 'n_rolling_volume':2,\n 'x_assets':['SP500'], 'n_std_window':20}\n\n self.dm = cryp.DesignMatrix(x_cryptos=x_cryptos, y_crypto=y_crypto,\n **kwargs)",
"def test_add_device(self):\n\n pass",
"def setUp(self):\n import protolibs.ics_servers as ics_servers\n from point import Point\n from configobj import ConfigObj\n\n # Get config file\n configfile = '/'.join(['sims', 'rtutank', 'config'])\n config=ConfigObj(infile=configfile, unrepr=True)\n self.config = config\n #Set global variable devconfig here \n devconfig=config['vdevs']['slave'] \n\n ##--Set up points\n points={}\n for p in devconfig['points']:\n points.update( { p['name'] : Point(**p) } ) \n #The ** treats the p dictionary as the arguments to the Point class\n self.server = ics_servers.ModbusRTU(devconfig['icsifaces'][0], points.values())\n self.server.start()",
"def setUp(self):\n import protolibs.ics_servers as ics_servers\n from point import Point\n from configobj import ConfigObj\n\n # Get config file\n configfile = '/'.join(['sims', 'tcptank', 'config'])\n config=ConfigObj(infile=configfile, unrepr=True)\n self.config = config\n #Set global variable devconfig here \n devconfig=config['vdevs']['slave'] \n\n ##--Set up points\n points={}\n for p in devconfig['points']:\n points.update( { p['name'] : Point(**p) } ) \n #The ** treats the p dictionary as the arguments to the Point class\n self.server = ics_servers.ModbusTCP( devconfig['icsifaces'][0], points.values() )\n self.server.start()",
"def setUp(self):\n util.create_mocks()"
] | [
"0.6203854",
"0.6130477",
"0.61156213",
"0.6057996",
"0.60288376",
"0.5934885",
"0.5909151",
"0.5903352",
"0.5903352",
"0.5896112",
"0.5729406",
"0.572272",
"0.5654188",
"0.56205124",
"0.56069463",
"0.55872035",
"0.5541387",
"0.55159056",
"0.55146474",
"0.5496942",
"0.548481",
"0.5483693",
"0.5483693",
"0.5483194",
"0.54777694",
"0.54585314",
"0.54489946",
"0.54385465",
"0.54182905",
"0.5417248"
] | 0.7789957 | 0 |
Test that config flow fails on already configured device. | async def test_flow_fails_already_configured(hass):
await setup_axis_integration(hass)
result = await hass.config_entries.flow.async_init(
AXIS_DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_device = Mock()
mock_device.vapix.params.system_serialnumber = MAC
with patch(
"homeassistant.components.axis.config_flow.get_device",
return_value=mock_device,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
CONF_PORT: 80,
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_flow_fails_device_unavailable(hass):\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN, context={\"source\": \"user\"}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"user\"\n\n with patch(\n \"homeassistant.components.axis.config_flow.get_device\",\n side_effect=config_flow.CannotConnect,\n ):\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={\n CONF_HOST: \"1.2.3.4\",\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n CONF_PORT: 80,\n },\n )\n\n assert result[\"errors\"] == {\"base\": \"device_unavailable\"}",
"async def test_failed_config_flow(hass, error_on_get_data):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input=MOCK_CONFIG\n )\n\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result[\"errors\"] == {\"base\": \"auth\"}",
"async def test_setup_via_discovery_cannot_connect(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n await hass.async_block_till_done()\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"user\"\n assert not result[\"errors\"]\n\n with _patch_discovery():\n result2 = await hass.config_entries.flow.async_configure(result[\"flow_id\"], {})\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == \"form\"\n assert result2[\"step_id\"] == \"pick_device\"\n assert not result2[\"errors\"]\n\n with patch(\n \"homeassistant.components.wiz.wizlight.getBulbConfig\",\n side_effect=WizLightTimeOutError,\n ), _patch_discovery():\n result3 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {CONF_DEVICE: FAKE_MAC},\n )\n await hass.async_block_till_done()\n\n assert result3[\"type\"] == \"abort\"\n assert result3[\"reason\"] == \"cannot_connect\"",
"async def test_abort_if_already_setup(hass: HomeAssistant) -> None:\n config_entry = MockConfigEntry(\n domain=DOMAIN,\n data={\n CONF_HOST: \"1.1.1.1\",\n CONF_PORT: 123,\n CONF_RESOURCES: [\"battery.voltage\"],\n },\n )\n config_entry.add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n mock_pynut = _get_mock_pynutclient(\n list_vars={\"battery.voltage\": \"voltage\"},\n list_ups={\"ups1\": \"UPS 1\"},\n )\n\n with patch(\n \"homeassistant.components.nut.PyNUTClient\",\n return_value=mock_pynut,\n ):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {\n CONF_HOST: \"1.1.1.1\",\n CONF_PORT: 123,\n },\n )\n\n assert result2[\"type\"] == data_entry_flow.FlowResultType.ABORT\n assert result2[\"reason\"] == \"already_configured\"",
"async def test_discovery_cannot_connect(hass: HomeAssistant) -> None:\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_ZEROCONF}, data=TEST_DISCOVERY\n )\n\n with patch(\n \"homeassistant.components.volumio.config_flow.Volumio.get_system_info\",\n side_effect=CannotConnectError,\n ):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={},\n )\n\n assert result2[\"type\"] == \"abort\"\n assert result2[\"reason\"] == \"cannot_connect\"",
"async def test_sddp_exist(hass):\n _create_mock_config_entry(hass)\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_SSDP}, data=SSDP_DATA,\n )\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"",
"async def test_duplicate_error(hass: HomeAssistantType, requests_mock: Mocker) -> None:\n await setup_integration(hass, requests_mock, skip_entry_setup=True)\n\n mock_connection(requests_mock)\n\n user_input = {CONF_HOST: HOST}\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={CONF_SOURCE: SOURCE_IMPORT}, data=user_input\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n\n user_input = {CONF_HOST: HOST}\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data=user_input\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n\n discovery_info = {\n ATTR_UPNP_FRIENDLY_NAME: UPNP_FRIENDLY_NAME,\n ATTR_SSDP_LOCATION: SSDP_LOCATION,\n ATTR_UPNP_SERIAL: UPNP_SERIAL,\n }\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={CONF_SOURCE: SOURCE_SSDP}, data=discovery_info\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"",
"async def test_setup_invalid_config(\n recorder_mock: Recorder, hass: HomeAssistant\n) -> None:\n with patch(\n \"homeassistant.components.sql.config_flow.sqlalchemy.create_engine\",\n ):\n assert not await async_setup_component(hass, DOMAIN, YAML_CONFIG_INVALID)\n await hass.async_block_till_done()",
"async def test_host_already_configured(hass: HomeAssistant, auth_error) -> None:\n\n entry = MockConfigEntry(domain=DOMAIN, data=DEMO_CONFIG_ENTRY)\n entry.add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input=DEMO_USER_INPUT\n )\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"",
"async def test_discovered_by_dhcp_connection_fails(\n hass: HomeAssistant, source, data\n) -> None:\n with patch(\n \"homeassistant.components.wiz.wizlight.getBulbConfig\",\n side_effect=WizLightTimeOutError,\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": source}, data=data\n )\n await hass.async_block_till_done()\n\n assert result[\"type\"] == FlowResultType.ABORT\n assert result[\"reason\"] == \"cannot_connect\"",
"async def test_on_connect_failed(hass: HomeAssistant, side_effect, error) -> None:\n flow_result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": SOURCE_USER, \"show_advanced_options\": True},\n )\n\n with PATCH_GET_HOST, patch(\n \"homeassistant.components.asuswrt.bridge.AsusWrtLegacy\"\n ) as asus_wrt:\n asus_wrt.return_value.connection.async_connect = AsyncMock(\n side_effect=side_effect\n )\n asus_wrt.return_value.async_get_nvram = AsyncMock(return_value={})\n asus_wrt.return_value.is_connected = False\n\n result = await hass.config_entries.flow.async_configure(\n flow_result[\"flow_id\"], user_input=CONFIG_DATA\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"errors\"] == {\"base\": error}",
"async def test_ssdp_discovery_dont_update_configuration(opp, aioclient_mock):\n config_entry = await setup_deconz_integration(opp, aioclient_mock)\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={\n ATTR_SSDP_LOCATION: \"http://1.2.3.4:80/\",\n ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,\n ATTR_UPNP_SERIAL: BRIDGEID,\n },\n context={\"source\": SOURCE_SSDP},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert config_entry.data[CONF_HOST] == \"1.2.3.4\"",
"async def test_zeroconf_flow_already_configured(hass):\n device = await setup_axis_integration(hass)\n assert device.host == \"1.2.3.4\"\n\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n \"hostname\": \"name\",\n \"properties\": {\"macaddress\": MAC},\n },\n context={\"source\": \"zeroconf\"},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert device.host == \"1.2.3.4\"",
"async def test_manual_configuration_dont_update_configuration(opp, aioclient_mock):\n await setup_deconz_integration(opp, aioclient_mock)\n\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"1.2.3.4\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://1.2.3.4:80/api/{API_KEY}/config\",\n json={\"bridgeid\": BRIDGEID},\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"",
"def test_broken_config(broken_config):\n with pytest.raises(RuntimeError, match=\"Error reading config.yml\"):\n abcconfig.get_config(broken_config)",
"async def test_flow_link_cannot_connect(hass: HomeAssistant) -> None:\n disc_bridge = get_discovered_bridge()\n with patch(\n \"homeassistant.components.hue.config_flow.discover_nupnp\",\n return_value=[disc_bridge],\n ):\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n with patch.object(config_flow, \"create_app_key\", side_effect=CannotConnect):\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={\"id\": disc_bridge.id}\n )\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"",
"def test_failure_config(self):\n resource_conf = {\n \"enable_dns_support\": \"true\"\n }\n scan_result = check.scan_resource_conf(conf=resource_conf)\n self.assertEqual(CheckResult.FAILED, scan_result)",
"async def test_import_invalid(hass):\n mocked_device = _create_mocked_device(True)\n _create_mock_config_entry(hass)\n\n with _patch_config_flow_device(mocked_device):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_IMPORT}, data=CONF_DATA\n )\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"connection\"\n\n mocked_device.get_supported_methods.assert_called_once()\n mocked_device.get_interface_information.assert_not_called()",
"def test_setup_with_invalid_config(self):\n setup_component(self.hass, \"sensor\", INVALID_CONFIG_MINIMAL)\n self.hass.block_till_done()\n\n state = self.hass.states.get(\"sensor.dark_sky_summary\")\n assert state is None",
"def test_old_config_fails() -> None:\n with pytest.raises(SystemExit):\n fauxmo.main(config_path_str=\"tests/old-config-sample.json\")",
"def test_tap_config_raise_exception_if_invalid_config_yet_after_retries(self):\n self._assert_raise_exception_on_invalid_file_content(\n test_case_invalid='config',\n invalid_file_contents=('', ' ', 'foo', '{\"foo\": 1')\n )",
"async def test_oppio_discovery_dont_update_configuration(opp, aioclient_mock):\n await setup_deconz_integration(opp, aioclient_mock)\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n CONF_SERIAL: BRIDGEID,\n },\n context={\"source\": SOURCE_OPPIO},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"",
"async def test_user_step_cannot_connect(hass: HomeAssistant) -> None:\n with patch(\n \"homeassistant.components.ld2410_ble.config_flow.async_discovered_service_info\",\n return_value=[LD2410_BLE_DISCOVERY_INFO],\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == FlowResultType.FORM\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"] == {}\n\n with patch(\n \"homeassistant.components.ld2410_ble.config_flow.LD2410BLE.initialise\",\n side_effect=BleakError,\n ):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {\n CONF_ADDRESS: LD2410_BLE_DISCOVERY_INFO.address,\n },\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == FlowResultType.FORM\n assert result2[\"step_id\"] == \"user\"\n assert result2[\"errors\"] == {\"base\": \"cannot_connect\"}\n\n with patch(\n \"homeassistant.components.ld2410_ble.config_flow.LD2410BLE.initialise\",\n ), patch(\n \"homeassistant.components.ld2410_ble.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result3 = await hass.config_entries.flow.async_configure(\n result2[\"flow_id\"],\n {\n CONF_ADDRESS: LD2410_BLE_DISCOVERY_INFO.address,\n },\n )\n await hass.async_block_till_done()\n\n assert result3[\"type\"] == FlowResultType.CREATE_ENTRY\n assert result3[\"title\"] == LD2410_BLE_DISCOVERY_INFO.name\n assert result3[\"data\"] == {\n CONF_ADDRESS: LD2410_BLE_DISCOVERY_INFO.address,\n }\n assert result3[\"result\"].unique_id == LD2410_BLE_DISCOVERY_INFO.address\n assert len(mock_setup_entry.mock_calls) == 1",
"async def test_manual_configuration_after_discovery_ResponseError(opp, aioclient_mock):\n aioclient_mock.get(pydeconz.utils.URL_DISCOVER, exc=pydeconz.errors.ResponseError)\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n assert not opp.config_entries.flow._progress[result[\"flow_id\"]].bridges",
"async def test_connection_error(hass: HomeAssistant, conn_error) -> None:\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input=DEMO_USER_INPUT\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"errors\"] == {\"base\": \"cannot_connect\"}",
"def test_config_step_raises(self):\n\n run_step = self.ConfigStep.create({\n 'name': 'run_step',\n 'job_type': 'run_odoo',\n })\n\n create_step = self.ConfigStep.create({\n 'name': 'test_step',\n 'job_type': 'create_build',\n })\n\n config = self.Config.create({'name': 'test_config'})\n\n # test that the run_odoo step has to be the last one\n with self.assertRaises(UserError):\n config.write({\n 'step_order_ids': [\n (0, 0, {'sequence': 10, 'step_id': run_step.id}),\n (0, 0, {'sequence': 15, 'step_id': create_step.id}),\n ]\n })\n\n # test that the run_odoo step should be preceded by an install step\n with self.assertRaises(UserError):\n config.write({\n 'step_order_ids': [\n (0, 0, {'sequence': 15, 'step_id': run_step.id}),\n (0, 0, {'sequence': 10, 'step_id': create_step.id}),\n ]\n })",
"async def test_flow_ssdp_bad_discovery(opp, aioclient_mock):\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={ATTR_UPNP_MANUFACTURER_URL: \"other\"},\n context={\"source\": SOURCE_SSDP},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"not_deconz_bridge\"",
"async def test_flow_fails_faulty_credentials(hass):\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN, context={\"source\": \"user\"}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"user\"\n\n with patch(\n \"homeassistant.components.axis.config_flow.get_device\",\n side_effect=config_flow.AuthenticationRequired,\n ):\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={\n CONF_HOST: \"1.2.3.4\",\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n CONF_PORT: 80,\n },\n )\n\n assert result[\"errors\"] == {\"base\": \"faulty_credentials\"}",
"def test_invalid_adapter_opts(self):\n self.oslo_config_dict['heat'] = {\n 'interface': 'public',\n 'valid_interfaces': 'private',\n }\n self.assert_service_disabled(\n 'orchestration',\n \"Encountered an exception attempting to process config for \"\n \"project 'heat' (service type 'orchestration'): interface and \"\n \"valid_interfaces are mutually exclusive.\",\n )",
"def test_config():\n if not os.path.exists(CONFIG_DIR):\n raise mupub.BadConfiguration('Configuration folder not found.')\n if not os.path.exists(_CONFIG_FNM):\n raise mupub.BadConfiguration('Configuration file not found.')\n if not os.path.exists(getDBPath()):\n raise mupub.BadConfiguration('Local database not found.')\n if len(CONFIG_DICT) == 0:\n raise mupub.BadConfiguration('Configuration was not loaded.')"
] | [
"0.74356383",
"0.73405707",
"0.72267395",
"0.71680635",
"0.71155447",
"0.70965385",
"0.7008849",
"0.6952568",
"0.6942624",
"0.68808776",
"0.681458",
"0.6776411",
"0.67762613",
"0.67712814",
"0.67666835",
"0.6760448",
"0.6755732",
"0.6752877",
"0.6749037",
"0.67176473",
"0.6717418",
"0.67016625",
"0.66985214",
"0.66920877",
"0.6674365",
"0.66468674",
"0.66174126",
"0.6608221",
"0.658323",
"0.65534765"
] | 0.7511206 | 0 |
Test that config flow fails on faulty credentials. | async def test_flow_fails_faulty_credentials(hass):
result = await hass.config_entries.flow.async_init(
AXIS_DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch(
"homeassistant.components.axis.config_flow.get_device",
side_effect=config_flow.AuthenticationRequired,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
CONF_PORT: 80,
},
)
assert result["errors"] == {"base": "faulty_credentials"} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_wrong_credentials(hass: HomeAssistant, auth_error) -> None:\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input=DEMO_USER_INPUT\n )\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"errors\"] == {\n CONF_USERNAME: \"invalid_auth\",\n CONF_PASSWORD: \"invalid_auth\",\n }",
"async def test_failed_config_flow(hass, error_on_get_data):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input=MOCK_CONFIG\n )\n\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result[\"errors\"] == {\"base\": \"auth\"}",
"def test_tap_config_raise_exception_if_invalid_config_yet_after_retries(self):\n self._assert_raise_exception_on_invalid_file_content(\n test_case_invalid='config',\n invalid_file_contents=('', ' ', 'foo', '{\"foo\": 1')\n )",
"def test_broken_config(broken_config):\n with pytest.raises(RuntimeError, match=\"Error reading config.yml\"):\n abcconfig.get_config(broken_config)",
"async def test_error_wrong_password_ssh(hass: HomeAssistant, config, error) -> None:\n config_data = CONFIG_DATA.copy()\n config_data.update(config)\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": SOURCE_USER, \"show_advanced_options\": True},\n data=config_data,\n )\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"errors\"] == {\"base\": error}",
"def test_old_config_fails() -> None:\n with pytest.raises(SystemExit):\n fauxmo.main(config_path_str=\"tests/old-config-sample.json\")",
"def test_errors():\n client = TestClient()\n client.run(\"config get whatever\", assert_error=True)\n assert \"'whatever' is not a section of conan.conf\" in client.out\n\n client.run(\"config get whatever.what\", assert_error=True)\n assert \"'whatever' is not a section of conan.conf\" in client.out\n\n client.run(\"config get storage.what\", assert_error=True)\n assert \"'what' doesn't exist in [storage]\" in client.out\n\n client.run('config set proxies=https:', assert_error=True)\n assert \"You can't set a full section, please specify a section.key=value\" in client.out\n\n client.run('config set proxies.http:Value', assert_error=True)\n assert \"Please specify 'key=value'\" in client.out",
"async def test_invalid_auth(\n hass: HomeAssistant,\n mock_config_entry: MockConfigEntry,\n mock_jellyfin: MagicMock,\n mock_client: MagicMock,\n) -> None:\n mock_client.auth.connect_to_address.return_value = await async_load_json_fixture(\n hass,\n \"auth-connect-address.json\",\n )\n mock_client.auth.login.return_value = await async_load_json_fixture(\n hass,\n \"auth-login-failure.json\",\n )\n\n mock_config_entry.add_to_hass(hass)\n assert not await hass.config_entries.async_setup(mock_config_entry.entry_id)",
"def test_config():\n if not os.path.exists(CONFIG_DIR):\n raise mupub.BadConfiguration('Configuration folder not found.')\n if not os.path.exists(_CONFIG_FNM):\n raise mupub.BadConfiguration('Configuration file not found.')\n if not os.path.exists(getDBPath()):\n raise mupub.BadConfiguration('Local database not found.')\n if len(CONFIG_DICT) == 0:\n raise mupub.BadConfiguration('Configuration was not loaded.')",
"def test_getcredentials_failed_netrc(netrc):\n netrc.side_effect = FileNotFoundError(\"\")\n server = KasServer()\n assert not server._username\n assert not server._password",
"def test_login_empty_password_error(\n config,\n):\n with pytest.raises(YesssSMS.YesssSMS.MissingLoginCredentialsError):\n _ = YesssSMS.YesssSMS(\"0000000000\", None)",
"def test_nonexisting_config(self):\n try:\n tempdir = tempfile.mkdtemp()\n filename = os.path.join(tempdir, \"Config.yaml\")\n with self.assertRaises(easydms.config.ErrorNoConfiguration):\n easydms.config.Config(filename)\n\n with self.assertRaises(SystemExit) as cm:\n sys.argv = [\"prog\", \"-c\", filename]\n easydms.cli.main()\n self.assertNotEqual(cm.exception.code, 0)\n\n finally:\n shutil.rmtree(tempdir)",
"async def test_cannot_connect(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n mock_connection_error(aioclient_mock)\n\n user_input = MOCK_USER_INPUT.copy()\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={CONF_SOURCE: SOURCE_USER},\n data=user_input,\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"] == {\"base\": \"cannot_connect\"}",
"async def test_invalid_auth(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n mock_connection_invalid_auth(aioclient_mock)\n\n user_input = MOCK_USER_INPUT.copy()\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={CONF_SOURCE: SOURCE_USER},\n data=user_input,\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"] == {\"base\": \"invalid_auth\"}",
"def test_scenarios_that_should_raise_errors(self, kwargs, auth):\n try:\n auth.load_creds(**kwargs)\n # raises ValueError (zero length field name in format) for python 2.6\n # OSError for the rest\n except (OSError, ValueError):\n pass\n except Exception as e:\n pytest.fail(\"Unexpected exception thrown: %s\" % e)\n else:\n pytest.fail(\"OSError exception not thrown.\")",
"async def test_connection_error(hass: HomeAssistant, conn_error) -> None:\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input=DEMO_USER_INPUT\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"errors\"] == {\"base\": \"cannot_connect\"}",
"async def test_reauth_failed_conn_error(hass: HomeAssistant, conn_error) -> None:\n entry = MockConfigEntry(\n domain=DOMAIN,\n data=DEMO_USER_INPUT,\n )\n entry.add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\n \"source\": config_entries.SOURCE_REAUTH,\n \"entry_id\": entry.entry_id,\n },\n data=DEMO_USER_INPUT,\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"reauth_confirm\"\n\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {\n CONF_PASSWORD: \"test-wrong-password\",\n },\n )\n\n assert result2[\"type\"] == \"form\"\n assert result2[\"errors\"] == {\"base\": \"cannot_connect\"}",
"def test_config_key_error():\n c = core.Config()\n\n with pytest.raises(KeyError):\n c['doesNotExist']",
"def test_bad_config_recovery(mock_empty_os_environ):\n\n def check(d):\n if d and \"wrong\" in d:\n raise KeyError(\"Invalid config\")\n return d\n\n climate = core.Climate(prefix=\"this\", settings_file_suffix=\"suffix\", parser=check)\n assert dict(climate.settings) == {}\n\n # Try to set incorrect config\n with pytest.raises(KeyError):\n climate.update({\"wrong\": 2})\n assert dict(climate.settings) == {}, \"Setting should not have been updated\"\n assert climate._updates == [], \"No external data should have been set.\"\n\n # Updating with other fields will still trigger the error\n climate.update({\"right\": 2})\n assert dict(climate.settings) == {\"right\": 2}\n assert climate._updates == [{\"right\": 2}], \"External data should have been set.\"",
"def test_tap_config_raises_exception_if_config_is_none(self):\n with self.assertRaises(commands.RunCommandException) as command_exception:\n self._assert_tap_config(config=None, properties=self.valid_json_file, state=self.valid_json_file)\n\n self.assertEqual('Invalid json file for config: None', str(command_exception.exception))",
"def test_invalid_config_options_output():\n\n with pytest.raises(InputError):\n _check_input_config({\"unknown_key_1\": 1})",
"def test_validate_credentials(self):\n pass",
"def test_invalid_config() -> None:\n config = {\"statsd\": {\"host1\": \"host1\"}}\n\n with pytest.raises(vol.Invalid):\n statsd.CONFIG_SCHEMA(None)\n with pytest.raises(vol.Invalid):\n statsd.CONFIG_SCHEMA(config)",
"async def test_form_cannot_connect(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n with patch(\n \"homeassistant.components.rituals_perfume_genie.config_flow.Account.authenticate\",\n side_effect=ClientResponseError(\n None, None, status=HTTPStatus.INTERNAL_SERVER_ERROR\n ),\n ):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {\n CONF_EMAIL: TEST_EMAIL,\n CONF_PASSWORD: VALID_PASSWORD,\n },\n )\n\n assert result2[\"type\"] == \"form\"\n assert result2[\"errors\"] == {\"base\": \"cannot_connect\"}",
"async def test_options_flow_auth_failure(hass):\n\n entry = await setup_platform(hass)\n\n with patch(\n \"aussiebb.asyncio.AussieBB.get_services\", side_effect=AuthenticationException()\n ):\n\n result1 = await hass.config_entries.options.async_init(entry.entry_id)\n assert result1[\"type\"] == RESULT_TYPE_ABORT\n assert result1[\"reason\"] == \"invalid_auth\"",
"async def test_form_invalid_auth(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n with patch(\n \"homeassistant.components.rituals_perfume_genie.config_flow.Account.authenticate\",\n side_effect=AuthenticationException,\n ):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {\n CONF_EMAIL: TEST_EMAIL,\n CONF_PASSWORD: WRONG_PASSWORD,\n },\n )\n\n assert result2[\"type\"] == \"form\"\n assert result2[\"errors\"] == {\"base\": \"invalid_auth\"}",
"async def test_config_entry_no_authentication(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n aioclient_mock.post(\n \"http://example.local:80/1234/JQ?Parameter=6224,6225,6226\",\n exc=aiohttp.ClientError,\n )\n\n entry = await init_integration_without_auth(hass, aioclient_mock)\n assert entry.state is ConfigEntryState.SETUP_RETRY",
"def test_credential_boolean_parsing_failure():\n init_dict = {\"url\": \"http://example.com\", \"ssl_verify\": \"bogus\"}\n with pytest.raises(CredentialError):\n Credentials(init_dict)",
"async def test_reauth_failed(hass: HomeAssistant, auth_error) -> None:\n entry = MockConfigEntry(\n domain=DOMAIN,\n data=DEMO_USER_INPUT,\n )\n entry.add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\n \"source\": config_entries.SOURCE_REAUTH,\n \"entry_id\": entry.entry_id,\n },\n data=DEMO_USER_INPUT,\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"reauth_confirm\"\n\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {\n CONF_PASSWORD: \"test-wrong-password\",\n },\n )\n\n assert result2[\"type\"] == \"form\"\n assert result2[\"errors\"] == {\n CONF_PASSWORD: \"invalid_auth\",\n }",
"def test_execute_with_invalid_config_get_exception():\n message = FakeMessage()\n message.raw_payload = json.dumps(TestData.JOB_TEMPLATE_PAYLOAD_SINGLE_PAGE_GZIPPED)\n with aioresponses():\n with pytest.raises(Exception) as excinfo:\n worker.execute(message, TestData.RECEPTOR_CONFIG_INVALID, queue.Queue())\n assert \"token or username and password\" in str(excinfo.value)"
] | [
"0.7291825",
"0.7233659",
"0.72253406",
"0.713346",
"0.698576",
"0.69655895",
"0.6949671",
"0.6947241",
"0.6866848",
"0.68621117",
"0.67381537",
"0.6731365",
"0.67082274",
"0.67070335",
"0.6667081",
"0.66505986",
"0.6649751",
"0.66385955",
"0.66336507",
"0.6627732",
"0.66176826",
"0.66155046",
"0.65926325",
"0.6589185",
"0.65688187",
"0.65664047",
"0.656402",
"0.6558646",
"0.65516436",
"0.65247416"
] | 0.75161356 | 0 |
Test that config flow fails on device unavailable. | async def test_flow_fails_device_unavailable(hass):
result = await hass.config_entries.flow.async_init(
AXIS_DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch(
"homeassistant.components.axis.config_flow.get_device",
side_effect=config_flow.CannotConnect,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
CONF_PORT: 80,
},
)
assert result["errors"] == {"base": "device_unavailable"} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_discovery_cannot_connect(hass: HomeAssistant) -> None:\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_ZEROCONF}, data=TEST_DISCOVERY\n )\n\n with patch(\n \"homeassistant.components.volumio.config_flow.Volumio.get_system_info\",\n side_effect=CannotConnectError,\n ):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={},\n )\n\n assert result2[\"type\"] == \"abort\"\n assert result2[\"reason\"] == \"cannot_connect\"",
"async def test_discovered_by_dhcp_connection_fails(\n hass: HomeAssistant, source, data\n) -> None:\n with patch(\n \"homeassistant.components.wiz.wizlight.getBulbConfig\",\n side_effect=WizLightTimeOutError,\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": source}, data=data\n )\n await hass.async_block_till_done()\n\n assert result[\"type\"] == FlowResultType.ABORT\n assert result[\"reason\"] == \"cannot_connect\"",
"async def test_setup_via_discovery_cannot_connect(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n await hass.async_block_till_done()\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"user\"\n assert not result[\"errors\"]\n\n with _patch_discovery():\n result2 = await hass.config_entries.flow.async_configure(result[\"flow_id\"], {})\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == \"form\"\n assert result2[\"step_id\"] == \"pick_device\"\n assert not result2[\"errors\"]\n\n with patch(\n \"homeassistant.components.wiz.wizlight.getBulbConfig\",\n side_effect=WizLightTimeOutError,\n ), _patch_discovery():\n result3 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {CONF_DEVICE: FAKE_MAC},\n )\n await hass.async_block_till_done()\n\n assert result3[\"type\"] == \"abort\"\n assert result3[\"reason\"] == \"cannot_connect\"",
"async def test_flow_ssdp_bad_discovery(opp, aioclient_mock):\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={ATTR_UPNP_MANUFACTURER_URL: \"other\"},\n context={\"source\": SOURCE_SSDP},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"not_deconz_bridge\"",
"def test_setup_with_invalid_config(self):\n setup_component(self.hass, \"sensor\", INVALID_CONFIG_MINIMAL)\n self.hass.block_till_done()\n\n state = self.hass.states.get(\"sensor.dark_sky_summary\")\n assert state is None",
"async def test_zeroconf_flow_ignore_non_axis_device(hass):\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={CONF_HOST: \"169.254.3.4\", \"properties\": {\"macaddress\": \"01234567890\"}},\n context={\"source\": \"zeroconf\"},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"not_axis_device\"",
"async def test_import_invalid(hass):\n mocked_device = _create_mocked_device(True)\n _create_mock_config_entry(hass)\n\n with _patch_config_flow_device(mocked_device):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_IMPORT}, data=CONF_DATA\n )\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"connection\"\n\n mocked_device.get_supported_methods.assert_called_once()\n mocked_device.get_interface_information.assert_not_called()",
"async def test_failed_config_flow(hass, error_on_get_data):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input=MOCK_CONFIG\n )\n\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result[\"errors\"] == {\"base\": \"auth\"}",
"def test_invalid_adapter_opts(self):\n self.oslo_config_dict['heat'] = {\n 'interface': 'public',\n 'valid_interfaces': 'private',\n }\n self.assert_service_disabled(\n 'orchestration',\n \"Encountered an exception attempting to process config for \"\n \"project 'heat' (service type 'orchestration'): interface and \"\n \"valid_interfaces are mutually exclusive.\",\n )",
"def test_failure_config(self):\n resource_conf = {\n \"enable_dns_support\": \"true\"\n }\n scan_result = check.scan_resource_conf(conf=resource_conf)\n self.assertEqual(CheckResult.FAILED, scan_result)",
"async def test_flow_link_cannot_connect(hass: HomeAssistant) -> None:\n disc_bridge = get_discovered_bridge()\n with patch(\n \"homeassistant.components.hue.config_flow.discover_nupnp\",\n return_value=[disc_bridge],\n ):\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n with patch.object(config_flow, \"create_app_key\", side_effect=CannotConnect):\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={\"id\": disc_bridge.id}\n )\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"cannot_connect\"",
"async def test_zero_conf_failure(hass: HomeAssistant) -> None:\n with patch(\n \"homeassistant.components.lidarr.config_flow.LidarrClient.async_try_zeroconf\",\n side_effect=exceptions.ArrZeroConfException,\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={CONF_SOURCE: SOURCE_USER},\n data=MOCK_USER_INPUT,\n )\n\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"][\"base\"] == \"zeroconf_failed\"",
"async def test_setup_via_discovery_exception_finds_nothing(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n await hass.async_block_till_done()\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"user\"\n assert not result[\"errors\"]\n\n with patch(\n \"homeassistant.components.wiz.discovery.find_wizlights\",\n side_effect=OSError,\n ):\n result2 = await hass.config_entries.flow.async_configure(result[\"flow_id\"], {})\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == FlowResultType.ABORT\n assert result2[\"reason\"] == \"no_devices_found\"",
"async def test_setup_missing_config(hass: HomeAssistant) -> None:\n assert await async_setup_component(\n hass, SENSOR_DOMAIN, {SENSOR_DOMAIN: {\"platform\": DOMAIN}}\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 0",
"async def test_on_connect_failed(hass: HomeAssistant, side_effect, error) -> None:\n flow_result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": SOURCE_USER, \"show_advanced_options\": True},\n )\n\n with PATCH_GET_HOST, patch(\n \"homeassistant.components.asuswrt.bridge.AsusWrtLegacy\"\n ) as asus_wrt:\n asus_wrt.return_value.connection.async_connect = AsyncMock(\n side_effect=side_effect\n )\n asus_wrt.return_value.async_get_nvram = AsyncMock(return_value={})\n asus_wrt.return_value.is_connected = False\n\n result = await hass.config_entries.flow.async_configure(\n flow_result[\"flow_id\"], user_input=CONFIG_DATA\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"errors\"] == {\"base\": error}",
"def test_broken_config(broken_config):\n with pytest.raises(RuntimeError, match=\"Error reading config.yml\"):\n abcconfig.get_config(broken_config)",
"async def test_config_not_ready(hass):\n entry = MockConfigEntry(\n domain=DOMAIN,\n title=\"Home\",\n unique_id=\"55.55-122.12\",\n data={\n \"api_key\": \"foo\",\n \"latitude\": 55.55,\n \"longitude\": 122.12,\n \"name\": \"Home\",\n },\n )\n\n with patch(\"airly._private._RequestsHandler.get\", side_effect=ConnectionError()):\n entry.add_to_hass(hass)\n await hass.config_entries.async_setup(entry.entry_id)\n assert entry.state == ENTRY_STATE_SETUP_RETRY",
"async def test_manual_configuration_after_discovery_ResponseError(opp, aioclient_mock):\n aioclient_mock.get(pydeconz.utils.URL_DISCOVER, exc=pydeconz.errors.ResponseError)\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n assert not opp.config_entries.flow._progress[result[\"flow_id\"]].bridges",
"async def test_cannot_connect(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n mock_connection_error(aioclient_mock)\n\n user_input = MOCK_USER_INPUT.copy()\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={CONF_SOURCE: SOURCE_USER},\n data=user_input,\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"] == {\"base\": \"cannot_connect\"}",
"def test_verify_state_of_a_device_when_disconnected_from_the_device():",
"async def test_oppio_discovery_dont_update_configuration(opp, aioclient_mock):\n await setup_deconz_integration(opp, aioclient_mock)\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n CONF_SERIAL: BRIDGEID,\n },\n context={\"source\": SOURCE_OPPIO},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"",
"async def test_user_step_no_devices_found(hass: HomeAssistant) -> None:\n with patch(\n \"homeassistant.components.ld2410_ble.config_flow.async_discovered_service_info\",\n return_value=[NOT_LD2410_BLE_DISCOVERY_INFO],\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == FlowResultType.ABORT\n assert result[\"reason\"] == \"no_devices_found\"",
"async def test_user_step_no_new_devices_found(hass: HomeAssistant) -> None:\n entry = MockConfigEntry(\n domain=DOMAIN,\n data={\n CONF_ADDRESS: LD2410_BLE_DISCOVERY_INFO.address,\n },\n unique_id=LD2410_BLE_DISCOVERY_INFO.address,\n )\n entry.add_to_hass(hass)\n with patch(\n \"homeassistant.components.ld2410_ble.config_flow.async_discovered_service_info\",\n return_value=[LD2410_BLE_DISCOVERY_INFO],\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == FlowResultType.ABORT\n assert result[\"reason\"] == \"no_devices_found\"",
"async def test_options_flow_network_failure(hass):\n\n entry = await setup_platform(hass)\n\n with patch(\n \"aussiebb.asyncio.AussieBB.get_services\", side_effect=ClientConnectionError()\n ):\n\n result1 = await hass.config_entries.options.async_init(entry.entry_id)\n assert result1[\"type\"] == RESULT_TYPE_ABORT\n assert result1[\"reason\"] == \"cannot_connect\"",
"async def test_get_device_device_unavailable(hass):\n with patch(\n \"axis.vapix.Vapix.request\", side_effect=axislib.RequestError\n ), pytest.raises(axis.errors.CannotConnect):\n await axis.device.get_device(hass, host=\"\", port=\"\", username=\"\", password=\"\")",
"def test_no_adapter_opts(self):\n self.oslo_config_dict['heat'] = None\n self.assert_service_disabled(\n 'orchestration',\n \"Encountered an exception attempting to process config for \"\n \"project 'heat' (service type 'orchestration'): no such option\",\n )",
"async def test_user_invalid(hass):\n mocked_device = _create_mocked_device(True)\n _create_mock_config_entry(hass)\n\n with _patch_config_flow_device(mocked_device):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_USER}, data=CONF_DATA\n )\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"] == {\"base\": \"connection\"}\n\n mocked_device.get_supported_methods.assert_called_once()\n mocked_device.get_interface_information.assert_not_called()",
"async def test_setup_invalid_config(\n recorder_mock: Recorder, hass: HomeAssistant\n) -> None:\n with patch(\n \"homeassistant.components.sql.config_flow.sqlalchemy.create_engine\",\n ):\n assert not await async_setup_component(hass, DOMAIN, YAML_CONFIG_INVALID)\n await hass.async_block_till_done()",
"async def test_ssdp_discovery_dont_update_configuration(opp, aioclient_mock):\n config_entry = await setup_deconz_integration(opp, aioclient_mock)\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={\n ATTR_SSDP_LOCATION: \"http://1.2.3.4:80/\",\n ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,\n ATTR_UPNP_SERIAL: BRIDGEID,\n },\n context={\"source\": SOURCE_SSDP},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert config_entry.data[CONF_HOST] == \"1.2.3.4\"",
"def test_parameter_net_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self._fail_network_list = True\n self.configuration.hgst_net = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self._fail_network_list = False"
] | [
"0.75799346",
"0.72134066",
"0.7075894",
"0.7028792",
"0.69954795",
"0.69336754",
"0.6879549",
"0.6869652",
"0.68623453",
"0.67691606",
"0.6765397",
"0.6748087",
"0.6701084",
"0.6691128",
"0.6680153",
"0.666488",
"0.6664693",
"0.66570556",
"0.66200376",
"0.66153383",
"0.6610612",
"0.65833575",
"0.65753543",
"0.65708786",
"0.65676826",
"0.65674746",
"0.65432084",
"0.6541227",
"0.6540991",
"0.6521396"
] | 0.7890586 | 0 |
Test that zeroconf doesn't setup already configured devices. | async def test_zeroconf_flow_already_configured(hass):
device = await setup_axis_integration(hass)
assert device.host == "1.2.3.4"
result = await hass.config_entries.flow.async_init(
AXIS_DOMAIN,
data={
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
"hostname": "name",
"properties": {"macaddress": MAC},
},
context={"source": "zeroconf"},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert device.host == "1.2.3.4" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_zeroconf_flow_ignore_non_axis_device(hass):\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={CONF_HOST: \"169.254.3.4\", \"properties\": {\"macaddress\": \"01234567890\"}},\n context={\"source\": \"zeroconf\"},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"not_axis_device\"",
"async def test_setup_missing_config(hass: HomeAssistant) -> None:\n assert await async_setup_component(\n hass, SENSOR_DOMAIN, {SENSOR_DOMAIN: {\"platform\": DOMAIN}}\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 0",
"async def test_zeroconf_flow_updated_configuration(hass):\n device = await setup_axis_integration(hass)\n assert device.host == \"1.2.3.4\"\n assert device.config_entry.data == {\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_USERNAME: \"username\",\n CONF_PASSWORD: \"password\",\n CONF_MAC: MAC,\n CONF_MODEL: MODEL,\n CONF_NAME: NAME,\n }\n\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={\n CONF_HOST: \"2.3.4.5\",\n CONF_PORT: 8080,\n \"hostname\": \"name\",\n \"properties\": {\"macaddress\": MAC},\n },\n context={\"source\": \"zeroconf\"},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert device.config_entry.data == {\n CONF_HOST: \"2.3.4.5\",\n CONF_PORT: 8080,\n CONF_USERNAME: \"username\",\n CONF_PASSWORD: \"password\",\n CONF_MAC: MAC,\n CONF_MODEL: MODEL,\n CONF_NAME: NAME,\n }",
"async def test_oppio_discovery_dont_update_configuration(opp, aioclient_mock):\n await setup_deconz_integration(opp, aioclient_mock)\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n CONF_SERIAL: BRIDGEID,\n },\n context={\"source\": SOURCE_OPPIO},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"",
"async def test_bridge_zeroconf_already_exists(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(\n aioclient_mock, [(\"0.0.0.0\", \"ecb5faabcabc\"), (\"192.168.1.217\", \"ecb5faabcabc\")]\n )\n entry = MockConfigEntry(\n domain=\"hue\",\n source=config_entries.SOURCE_HOMEKIT,\n data={\"host\": \"0.0.0.0\"},\n unique_id=\"ecb5faabcabc\",\n )\n entry.add_to_hass(hass)\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.217\",\n addresses=[\"192.168.1.217\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5faabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5faabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert entry.data[\"host\"] == \"192.168.1.217\"",
"def test_test_empty_config():\n\n testutils.deploy_config_raw(\"\")\n\n assert prop.test_prop('info', 'sdk') == 0\n\n testutils.undeploy()\n\n return 0",
"async def test_ssdp_discovery_dont_update_existing_oppio_configuration(\n opp, aioclient_mock\n):\n config_entry = await setup_deconz_integration(\n opp, aioclient_mock, source=SOURCE_OPPIO\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={\n ATTR_SSDP_LOCATION: \"http://1.2.3.4:80/\",\n ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,\n ATTR_UPNP_SERIAL: BRIDGEID,\n },\n context={\"source\": SOURCE_SSDP},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert config_entry.data[CONF_HOST] == \"1.2.3.4\"",
"async def test_ssdp_discovery_dont_update_configuration(opp, aioclient_mock):\n config_entry = await setup_deconz_integration(opp, aioclient_mock)\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={\n ATTR_SSDP_LOCATION: \"http://1.2.3.4:80/\",\n ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,\n ATTR_UPNP_SERIAL: BRIDGEID,\n },\n context={\"source\": SOURCE_SSDP},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert config_entry.data[CONF_HOST] == \"1.2.3.4\"",
"def testInitEmpty():\n conf = naiveConf.NaiveConf()\n with pytest.raises(KeyError):\n print conf.x\n conf.x = 5\n assert conf.x == 5",
"async def test_discovery_cannot_connect(hass: HomeAssistant) -> None:\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_ZEROCONF}, data=TEST_DISCOVERY\n )\n\n with patch(\n \"homeassistant.components.volumio.config_flow.Volumio.get_system_info\",\n side_effect=CannotConnectError,\n ):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={},\n )\n\n assert result2[\"type\"] == \"abort\"\n assert result2[\"reason\"] == \"cannot_connect\"",
"def test_no_adapter_opts(self):\n self.oslo_config_dict['heat'] = None\n self.assert_service_disabled(\n 'orchestration',\n \"Encountered an exception attempting to process config for \"\n \"project 'heat' (service type 'orchestration'): no such option\",\n )",
"async def test_zero_conf_failure(hass: HomeAssistant) -> None:\n with patch(\n \"homeassistant.components.lidarr.config_flow.LidarrClient.async_try_zeroconf\",\n side_effect=exceptions.ArrZeroConfException,\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={CONF_SOURCE: SOURCE_USER},\n data=MOCK_USER_INPUT,\n )\n\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"][\"base\"] == \"zeroconf_failed\"",
"def test_poweredUp(self):\n self.assertIdentical(\n IOneTimePadGenerator(self.store),\n self.store.findUnique(AMPConfiguration))",
"async def test_zeroconf_setup(hass):\n result = await hass.config_entries.flow.async_init(\n \"cast\", context={\"source\": \"zeroconf\"}\n )\n assert result[\"type\"] == \"form\"\n\n result = await hass.config_entries.flow.async_configure(result[\"flow_id\"], {})\n\n users = await hass.auth.async_get_users()\n assert len(users) == 1\n assert result[\"type\"] == \"create_entry\"\n assert result[\"result\"].data == {\n \"known_hosts\": None,\n \"user_id\": users[0].id, # Home Assistant cast user\n }",
"async def test_setup_no_config(hass):\n assert await async_setup_component(hass, AXIS_DOMAIN, {})\n assert AXIS_DOMAIN not in hass.data",
"async def test_creating_entry_has_no_devices(opp):\n with patch(\n \"openpeerpower.components.gree.climate.async_setup_entry\", return_value=True\n ) as setup, patch(\n \"openpeerpower.components.gree.bridge.Discovery\", return_value=FakeDiscovery()\n ) as discovery, patch(\n \"openpeerpower.components.gree.config_flow.Discovery\",\n return_value=FakeDiscovery(),\n ) as discovery2:\n discovery.return_value.mock_devices = []\n discovery2.return_value.mock_devices = []\n\n result = await opp.config_entries.flow.async_init(\n GREE_DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n # Confirmation form\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n\n result = await opp.config_entries.flow.async_configure(result[\"flow_id\"], {})\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_ABORT\n\n await opp.async_block_till_done()\n\n assert len(setup.mock_calls) == 0",
"async def test_not_configuring_sonos_not_creates_entry(hass: HomeAssistant) -> None:\n with patch(\n \"homeassistant.components.sonos.async_setup_entry\",\n return_value=True,\n ) as mock_setup:\n await async_setup_component(hass, sonos.DOMAIN, {})\n await hass.async_block_till_done()\n\n assert len(mock_setup.mock_calls) == 0",
"async def test_device_not_accessible(hass):\n with patch.object(axis.device, \"get_device\", side_effect=axis.errors.CannotConnect):\n await setup_axis_integration(hass)\n assert hass.data[AXIS_DOMAIN] == {}",
"def test_get_device_detects_none(hass, mock_openzwave):\n node = MockNode()\n value = MockValue(data=0, node=node)\n values = MockEntityValues(primary=value, node=node)\n\n device = cover.get_device(hass=hass, node=node, values=values, node_config={})\n assert device is None",
"def test_non_jaqcd_device(name_mock):\n _bad_aws_device(wires=2)",
"async def test_zeroconf_flow(hass):\n with patch.object(axis.device, \"get_device\", return_value=Mock()):\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n \"hostname\": \"name\",\n \"properties\": {\"macaddress\": MAC},\n },\n context={\"source\": \"zeroconf\"},\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"user\"\n\n with patch(\"axis.AxisDevice\") as mock_device:\n\n setup_mock_axis_device(mock_device)\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={\n CONF_HOST: \"1.2.3.4\",\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n CONF_PORT: 80,\n },\n )\n\n assert result[\"type\"] == \"create_entry\"\n assert result[\"title\"] == f\"prodnbr - {MAC}\"\n assert result[\"data\"] == {\n CONF_HOST: \"1.2.3.4\",\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n CONF_PORT: 80,\n CONF_MAC: MAC,\n CONF_MODEL: \"prodnbr\",\n CONF_NAME: \"prodnbr 0\",\n }\n\n assert result[\"data\"][CONF_NAME] == \"prodnbr 0\"",
"def test_create_device1(self):\n pass",
"def test_no_such_conf_section(self):\n del self.oslo_config_dict['heat']\n self.assert_service_disabled(\n 'orchestration',\n \"No section for project 'heat' (service type 'orchestration') was \"\n \"present in the config.\",\n )",
"def test_deviceX_1():\n assert 0",
"def test_create_device(self):\n pass",
"def test_create_device(self):\n pass",
"async def test_no_binary_sensors(hass, aioclient_mock):\n await setup_deconz_integration(hass, aioclient_mock)\n assert len(hass.states.async_all()) == 0",
"def setUp(self):\n self.setup_start_servers = False\n super(ZeroConfigTest, self).setUp()",
"async def test_setup_fails_non_root(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n\n assert await async_setup_component(\n hass,\n DOMAIN,\n {},\n )\n await hass.async_block_till_done()\n\n with patch(\"os.geteuid\", return_value=10), patch(\n \"homeassistant.components.dhcp._verify_l2socket_setup\",\n side_effect=Scapy_Exception,\n ), patch(\"homeassistant.components.dhcp.DiscoverHosts.async_discover\"):\n hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)\n await hass.async_block_till_done()\n hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)\n await hass.async_block_till_done()\n\n assert \"Cannot watch for dhcp packets without root or CAP_NET_RAW\" in caplog.text",
"async def test_form_zeroconf(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.5\",\n addresses=[\"192.168.1.5\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=1234,\n properties={},\n type=\"mock_type\",\n ),\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"] == {}\n\n mock_pynut = _get_mock_pynutclient(\n list_vars={\"battery.voltage\": \"voltage\", \"ups.status\": \"OL\"}, list_ups=[\"ups1\"]\n )\n\n with patch(\n \"homeassistant.components.nut.PyNUTClient\",\n return_value=mock_pynut,\n ), patch(\n \"homeassistant.components.nut.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {CONF_USERNAME: \"test-username\", CONF_PASSWORD: \"test-password\"},\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n assert result2[\"title\"] == \"192.168.1.5:1234\"\n assert result2[\"data\"] == {\n CONF_HOST: \"192.168.1.5\",\n CONF_PASSWORD: \"test-password\",\n CONF_PORT: 1234,\n CONF_USERNAME: \"test-username\",\n }\n assert result2[\"result\"].unique_id is None\n assert len(mock_setup_entry.mock_calls) == 1"
] | [
"0.73716295",
"0.67429394",
"0.6723391",
"0.6692479",
"0.66662824",
"0.6579568",
"0.6536504",
"0.6461837",
"0.6448547",
"0.6438222",
"0.640726",
"0.63816595",
"0.6362343",
"0.63232577",
"0.6290591",
"0.62892056",
"0.62886274",
"0.6288221",
"0.62845814",
"0.6267767",
"0.62584347",
"0.62402946",
"0.6236881",
"0.62353",
"0.62254876",
"0.62254876",
"0.6218688",
"0.6217824",
"0.6214028",
"0.62114424"
] | 0.70707625 | 1 |
Test that zeroconf update configuration with new parameters. | async def test_zeroconf_flow_updated_configuration(hass):
device = await setup_axis_integration(hass)
assert device.host == "1.2.3.4"
assert device.config_entry.data == {
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_MAC: MAC,
CONF_MODEL: MODEL,
CONF_NAME: NAME,
}
result = await hass.config_entries.flow.async_init(
AXIS_DOMAIN,
data={
CONF_HOST: "2.3.4.5",
CONF_PORT: 8080,
"hostname": "name",
"properties": {"macaddress": MAC},
},
context={"source": "zeroconf"},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert device.config_entry.data == {
CONF_HOST: "2.3.4.5",
CONF_PORT: 8080,
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_MAC: MAC,
CONF_MODEL: MODEL,
CONF_NAME: NAME,
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_config_change():\n clean_tables()\n config = set_configuration()\n assert config['age']['value'] == \"72\"\n assert config['retainUnsent']['value'] == \"False\" \n\n config = update_configuration(age=0, retain_unsent=True) \n assert config['age']['value'] == \"0\" \n assert config['retainUnsent']['value'] == \"True\"\n\n clean_tables()",
"def test_config_update(get_config):\n cfg = get_config(Config, {'test': 'main'})\n update_from = {\"name\": \"new_name\"}\n cfg.update(update_from)\n\n assert cfg.data.get('name') == \"new_name\", \"config was not updated\"",
"def test_update_reg_ex_config(self):\n pass",
"def testGetConf():\n\n conf = naiveConf.NaiveConf(exampleConfFname)\n\n copyConf = conf.getConf()\n assert conf == copyConf\n\n copyConf.x = None\n assert copyConf.x is None",
"def test_init_overwrite():\n client = TestClient()\n client.run('config init')\n dummy_content = 'DUMMY CONTENT. SHOULD BE REMOVED!'\n save_append(client.cache.conan_conf_path, dummy_content)\n save_append(client.cache.remotes_path, dummy_content)\n save_append(client.cache.settings_path, dummy_content)\n save_append(client.cache.default_profile_path, dummy_content)\n\n client.run('config init --force')\n assert dummy_content not in load(client.cache.conan_conf_path)\n assert dummy_content not in load(client.cache.conan_conf_path)\n assert dummy_content not in load(client.cache.settings_path)\n assert dummy_content not in load(client.cache.remotes_path)\n assert dummy_content not in load(client.cache.default_profile_path)",
"def test_config_overwrite(self):\n inc = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.inc\", False, True)\n ini = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.ini\", False, True)\n\n self.assertEquals(inc, ini)",
"def test_update_hyperflex_ucsm_config_policy(self):\n pass",
"def test_update_system(self):\n pass",
"def test_update_software_configuration_for_system_module(self):\n pass",
"async def test_form_zeroconf(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.5\",\n addresses=[\"192.168.1.5\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=1234,\n properties={},\n type=\"mock_type\",\n ),\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"] == {}\n\n mock_pynut = _get_mock_pynutclient(\n list_vars={\"battery.voltage\": \"voltage\", \"ups.status\": \"OL\"}, list_ups=[\"ups1\"]\n )\n\n with patch(\n \"homeassistant.components.nut.PyNUTClient\",\n return_value=mock_pynut,\n ), patch(\n \"homeassistant.components.nut.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {CONF_USERNAME: \"test-username\", CONF_PASSWORD: \"test-password\"},\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n assert result2[\"title\"] == \"192.168.1.5:1234\"\n assert result2[\"data\"] == {\n CONF_HOST: \"192.168.1.5\",\n CONF_PASSWORD: \"test-password\",\n CONF_PORT: 1234,\n CONF_USERNAME: \"test-username\",\n }\n assert result2[\"result\"].unique_id is None\n assert len(mock_setup_entry.mock_calls) == 1",
"def test_update_configuration(self):\n\n ts_name = 'test-update-1'\n configuration = timeserie_configuration.get_timeserie_configure(\n self.get_local_dynamo_cli(), ts_name)\n self.assertTrue(configuration.default)\n self.assertEquals(configuration.retentions, granularities.RETENTIONS_GRANULARITY)\n self.assertEquals(configuration.timezone, granularities.DEFAULT_TIMEZONE)\n self.assertEquals(configuration.aggregation_method,\n aggregations.DEFAULT_AGGREGATION)\n\n custom_tz = 'America/New_York'\n custom_agg = aggregations.AGGREGATION_LAST\n custom_ret = granularities.RETENTIONS_GRANULARITY\n custom_ret[granularities.SECOND] = 3 * 365 * 12 * 30 * 24 * 60 * 60\n timeserie_configuration.update_timeserie_configuration(\n self.get_local_dynamo_cli(), ts_name, custom_tz, custom_agg, custom_ret)\n\n configuration = timeserie_configuration.get_timeserie_configure(\n self.get_local_dynamo_cli(), ts_name)\n self.assertFalse(configuration.default)\n self.assertEquals(configuration.retentions, custom_ret)\n self.assertEquals(configuration.timezone, custom_tz)\n self.assertEquals(configuration.aggregation_method, custom_agg)",
"def test_configuration_changes(self):\n config = serialization.load_file(join(EXAMPLES, 'complete.yml'))[0]\n s = simulation.from_config(config)\n for i in range(5):\n s.run_simulation(dry_run=True)\n nconfig = s.to_dict()\n del nconfig['topology']\n assert config == nconfig",
"def conf_update(self):\n pass",
"async def test_manual_configuration_update_configuration(hass):\n device = await setup_axis_integration(hass)\n\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN, context={\"source\": \"user\"}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"user\"\n\n mock_device = Mock()\n mock_device.vapix.params.system_serialnumber = MAC\n\n with patch(\n \"homeassistant.components.axis.config_flow.get_device\",\n return_value=mock_device,\n ):\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={\n CONF_HOST: \"2.3.4.5\",\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n CONF_PORT: 80,\n },\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert device.host == \"2.3.4.5\"",
"def test_test_empty_config():\n\n testutils.deploy_config_raw(\"\")\n\n assert prop.test_prop('info', 'sdk') == 0\n\n testutils.undeploy()\n\n return 0",
"def testInitFromNaiveConf():\n conf = naiveConf.NaiveConf(exampleConfFname)\n dConf = naiveConf.NaiveConf(conf)\n assert conf == dConf\n conf['x'] = None\n assert type(dConf.x) == datetime.date",
"def test_update_hyperflex_sys_config_policy(self):\n pass",
"def test_configure_to_reconfigure_param(self):\n\n class ToConfigure(object):\n \"\"\"Class to configure.\"\"\"\n\n def __init__(self):\n super(ToConfigure, self).__init__()\n self.test = None\n\n target = ToConfigure()\n\n param = 'test'\n\n conf = configuration(category('TEST', Parameter(param, value=True)))\n\n self.configurable.configure(conf=conf, targets=[target])\n self.assertTrue(target.test)",
"def testInitEmpty():\n conf = naiveConf.NaiveConf()\n with pytest.raises(KeyError):\n print conf.x\n conf.x = 5\n assert conf.x == 5",
"def test_config_overwrites():\n basepath = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", '..'))\n temppath = '/tmp/'\n\n conf = core.Config(datapath=temppath)\n\n assert conf.basepath.lower() == basepath.lower()\n assert conf.datapath.lower() == temppath.lower()",
"def test_conf(self):\n self.TESTED_UNIT = 'ceph-fs/0'\n\n def _get_conf():\n \"\"\"get/parse ceph daemon response into dict for specified configs.\n\n :returns dict: conf options selected from configs\n :rtype: dict\n \"\"\"\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder\n\n @retry(wait=wait_exponential(multiplier=1, min=4, max=10),\n stop=stop_after_attempt(10))\n def _change_conf_check(mds_config):\n \"\"\"Change configs, then assert to ensure config was set.\n\n Doesn't return a value.\n \"\"\"\n loop = asyncio.get_event_loop()\n crt = model.async_set_application_config('ceph-fs', mds_config)\n loop.run_until_complete(crt)\n results = _get_conf()\n self.assertEquals(\n results['mds_cache_memory_limit'],\n mds_config['mds-cache-memory-limit'])\n self.assertAlmostEqual(\n float(results['mds_cache_reservation']),\n float(mds_config['mds-cache-reservation']))\n self.assertAlmostEqual(\n float(results['mds_health_cache_threshold']),\n float(mds_config['mds-health-cache-threshold']))\n\n # ensure defaults are set\n _get_conf()\n mds_config = {'mds-cache-memory-limit': '4294967296',\n 'mds-cache-reservation': '0.05',\n 'mds-health-cache-threshold': '1.5'}\n _change_conf_check(mds_config)\n\n # change defaults\n mds_config = {'mds-cache-memory-limit': '8589934592',\n 'mds-cache-reservation': '0.10',\n 'mds-health-cache-threshold': '2'}\n _change_conf_check(mds_config)\n\n # Restore config to keep tests idempotent\n mds_config = {'mds-cache-memory-limit': '4294967296',\n 'mds-cache-reservation': '0.05',\n 'mds-health-cache-threshold': '1.5'}\n _change_conf_check(mds_config)",
"async def test_manual_configuration_update_configuration(opp, aioclient_mock):\n config_entry = await setup_deconz_integration(opp, aioclient_mock)\n\n aioclient_mock.get(\n pydeconz.utils.URL_DISCOVER,\n json=[],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN, context={\"source\": SOURCE_USER}\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"manual_input\"\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={CONF_HOST: \"2.3.4.5\", CONF_PORT: 80},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n aioclient_mock.post(\n \"http://2.3.4.5:80/api\",\n json=[{\"success\": {\"username\": API_KEY}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://2.3.4.5:80/api/{API_KEY}/config\",\n json={\"bridgeid\": BRIDGEID},\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert config_entry.data[CONF_HOST] == \"2.3.4.5\"",
"def test_config_device_reset(get_config, monkeypatch):\n monkeypatch.setattr(DeviceConfig, 'minimal_essential_conf', {'test': 'conf'})\n cfg = get_config(DeviceConfig, base_config)\n cfg.save()\n cfg.write_default()\n new_conf = cfg.load()\n\n assert cfg.data == cfg.minimal_essential_conf, 'failed to apply default config'\n assert new_conf == cfg.minimal_essential_conf, 'failed to load default config'",
"def test_config(setup_debug, tmp_path):\n os.chdir(tmp_path)\n \n ssh_tunnels = SSHTunnels(users=[\"bbeeson\"])\n c0 = (TEST_DATA / \"config\").read_text()\n # run and add 'queen'\n c1 = ssh_tunnels.update_config(TEST_DATA / \"config\")\n # run and do nothing\n c2 = ssh_tunnels.update_config(TEST_DATA / \"config\")\n assert len(c1) > len(c0)\n assert len(c1) == len(c2)\n \n # c_ref = (TEST_DATA / \"test_ssh_config2\").read_text()\n # should have just added queen\n #assert c2 == c_ref",
"def testDefault():\n\n conf = naiveConf.NaiveConf(exampleConfFname)\n oldX = conf.x\n conf.default('x', None)\n conf.default('Z', 5)\n\n assert conf.x == oldX\n assert conf.Z == 5",
"def test_update_hyperflex_vcenter_config_policy(self):\n pass",
"def test_update_hyperflex_node_config_policy(self):\n pass",
"def test_set_default(self):\n result = self.param_dict.get_config()\n self.assertEquals(result[\"foo\"], None)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n self.param_dict.update(\"foo=1000\")\n self.assertEquals(self.param_dict.get(\"foo\"), 1000)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n \n self.assertRaises(ValueError, self.param_dict.set_default, \"qux\")",
"async def test_zeroconf_flow(hass):\n with patch.object(axis.device, \"get_device\", return_value=Mock()):\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n \"hostname\": \"name\",\n \"properties\": {\"macaddress\": MAC},\n },\n context={\"source\": \"zeroconf\"},\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"user\"\n\n with patch(\"axis.AxisDevice\") as mock_device:\n\n setup_mock_axis_device(mock_device)\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={\n CONF_HOST: \"1.2.3.4\",\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n CONF_PORT: 80,\n },\n )\n\n assert result[\"type\"] == \"create_entry\"\n assert result[\"title\"] == f\"prodnbr - {MAC}\"\n assert result[\"data\"] == {\n CONF_HOST: \"1.2.3.4\",\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n CONF_PORT: 80,\n CONF_MAC: MAC,\n CONF_MODEL: \"prodnbr\",\n CONF_NAME: \"prodnbr 0\",\n }\n\n assert result[\"data\"][CONF_NAME] == \"prodnbr 0\"",
"def test_update_node_driveconfig(self):\n pass"
] | [
"0.74773693",
"0.68819016",
"0.68549144",
"0.67588663",
"0.6580001",
"0.6533839",
"0.65206283",
"0.6513456",
"0.6498106",
"0.64885044",
"0.6440535",
"0.63835275",
"0.6379493",
"0.6356896",
"0.63532597",
"0.6345493",
"0.6337596",
"0.63324106",
"0.6322648",
"0.6310894",
"0.62890005",
"0.62718725",
"0.6267374",
"0.62443435",
"0.6232257",
"0.62283397",
"0.6220784",
"0.6216884",
"0.621515",
"0.61930925"
] | 0.75051427 | 0 |
Test that zeroconf doesn't setup devices with link local addresses. | async def test_zeroconf_flow_ignore_non_axis_device(hass):
result = await hass.config_entries.flow.async_init(
AXIS_DOMAIN,
data={CONF_HOST: "169.254.3.4", "properties": {"macaddress": "01234567890"}},
context={"source": "zeroconf"},
)
assert result["type"] == "abort"
assert result["reason"] == "not_axis_device" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_zeroconf_flow_ignore_link_local_address(hass):\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={CONF_HOST: \"169.254.3.4\", \"properties\": {\"macaddress\": MAC}},\n context={\"source\": \"zeroconf\"},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"link_local_address\"",
"async def test_device_tracker_ignore_self_assigned_ips_before_start(\n hass: HomeAssistant,\n) -> None:\n hass.states.async_set(\n \"device_tracker.august_connect\",\n STATE_HOME,\n {\n ATTR_HOST_NAME: \"connect\",\n ATTR_IP: \"169.254.210.56\",\n ATTR_SOURCE_TYPE: SourceType.ROUTER,\n ATTR_MAC: \"B8:B7:F1:6D:B5:33\",\n },\n )\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0",
"async def test_bridge_zeroconf_ipv6(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"fd00::eeb5:faff:fe84:b17d\",\n addresses=[\"fd00::eeb5:faff:fe84:b17d\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5faabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5faabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"invalid_host\"",
"async def test_bridge_zeroconf_already_exists(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(\n aioclient_mock, [(\"0.0.0.0\", \"ecb5faabcabc\"), (\"192.168.1.217\", \"ecb5faabcabc\")]\n )\n entry = MockConfigEntry(\n domain=\"hue\",\n source=config_entries.SOURCE_HOMEKIT,\n data={\"host\": \"0.0.0.0\"},\n unique_id=\"ecb5faabcabc\",\n )\n entry.add_to_hass(hass)\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.217\",\n addresses=[\"192.168.1.217\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5faabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5faabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert entry.data[\"host\"] == \"192.168.1.217\"",
"async def test_zeroconf_flow_already_configured(hass):\n device = await setup_axis_integration(hass)\n assert device.host == \"1.2.3.4\"\n\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n \"hostname\": \"name\",\n \"properties\": {\"macaddress\": MAC},\n },\n context={\"source\": \"zeroconf\"},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert device.host == \"1.2.3.4\"",
"async def test_ssdp_discovery_dont_update_existing_oppio_configuration(\n opp, aioclient_mock\n):\n config_entry = await setup_deconz_integration(\n opp, aioclient_mock, source=SOURCE_OPPIO\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={\n ATTR_SSDP_LOCATION: \"http://1.2.3.4:80/\",\n ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,\n ATTR_UPNP_SERIAL: BRIDGEID,\n },\n context={\"source\": SOURCE_SSDP},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert config_entry.data[CONF_HOST] == \"1.2.3.4\"",
"async def test_bridge_zeroconf(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"192.168.1.217\", \"ecb5fafffeabcabc\")])\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.217\",\n addresses=[\"192.168.1.217\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5fafffeabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5fafffeabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"",
"async def test_device_tracker_hostname_and_macaddress_after_start_not_router(\n hass: HomeAssistant,\n) -> None:\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n hass.states.async_set(\n \"device_tracker.august_connect\",\n STATE_HOME,\n {\n ATTR_HOST_NAME: \"connect\",\n ATTR_IP: \"192.168.210.56\",\n ATTR_SOURCE_TYPE: \"something_else\",\n ATTR_MAC: \"B8:B7:F1:6D:B5:33\",\n },\n )\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0",
"def test_connect(self, gateway):\n assert not gateway._devs",
"async def test_ssdp_discovery_dont_update_configuration(opp, aioclient_mock):\n config_entry = await setup_deconz_integration(opp, aioclient_mock)\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={\n ATTR_SSDP_LOCATION: \"http://1.2.3.4:80/\",\n ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,\n ATTR_UPNP_SERIAL: BRIDGEID,\n },\n context={\"source\": SOURCE_SSDP},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert config_entry.data[CONF_HOST] == \"1.2.3.4\"",
"def test_ip_addr_fails(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Throw exception, need to clear internal cached host in driver\n self._fail_ip = True\n self.driver._vgc_host = None\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)",
"async def test_discovery_cannot_connect(hass: HomeAssistant) -> None:\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_ZEROCONF}, data=TEST_DISCOVERY\n )\n\n with patch(\n \"homeassistant.components.volumio.config_flow.Volumio.get_system_info\",\n side_effect=CannotConnectError,\n ):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={},\n )\n\n assert result2[\"type\"] == \"abort\"\n assert result2[\"reason\"] == \"cannot_connect\"",
"def test_verify_list_of_devices_in_my_network():",
"async def test_device_tracker_hostname_and_macaddress_after_start_not_home(\n hass: HomeAssistant,\n) -> None:\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n hass.states.async_set(\n \"device_tracker.august_connect\",\n STATE_NOT_HOME,\n {\n ATTR_HOST_NAME: \"connect\",\n ATTR_IP: \"192.168.210.56\",\n ATTR_SOURCE_TYPE: SourceType.ROUTER,\n ATTR_MAC: \"B8:B7:F1:6D:B5:33\",\n },\n )\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0",
"def test_noop(self):\n self.assertFalse(helpers.getBcastAddrforIPv4())\n self.assertIsNone(helpers.no_op())",
"def test_target_existence(self):\n self.create_ptr(\n ip_str='128.193.0.2', fqdn='nonexistent.oregonstate.edu',\n ip_type='4')",
"async def test_zeroconf_flow_updated_configuration(hass):\n device = await setup_axis_integration(hass)\n assert device.host == \"1.2.3.4\"\n assert device.config_entry.data == {\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_USERNAME: \"username\",\n CONF_PASSWORD: \"password\",\n CONF_MAC: MAC,\n CONF_MODEL: MODEL,\n CONF_NAME: NAME,\n }\n\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={\n CONF_HOST: \"2.3.4.5\",\n CONF_PORT: 8080,\n \"hostname\": \"name\",\n \"properties\": {\"macaddress\": MAC},\n },\n context={\"source\": \"zeroconf\"},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert device.config_entry.data == {\n CONF_HOST: \"2.3.4.5\",\n CONF_PORT: 8080,\n CONF_USERNAME: \"username\",\n CONF_PASSWORD: \"password\",\n CONF_MAC: MAC,\n CONF_MODEL: MODEL,\n CONF_NAME: NAME,\n }",
"async def test_zeroconf_setup(hass):\n result = await hass.config_entries.flow.async_init(\n \"cast\", context={\"source\": \"zeroconf\"}\n )\n assert result[\"type\"] == \"form\"\n\n result = await hass.config_entries.flow.async_configure(result[\"flow_id\"], {})\n\n users = await hass.auth.async_get_users()\n assert len(users) == 1\n assert result[\"type\"] == \"create_entry\"\n assert result[\"result\"].data == {\n \"known_hosts\": None,\n \"user_id\": users[0].id, # Home Assistant cast user\n }",
"async def test_device_tracker_hostname_and_macaddress_after_start_hostname_missing(\n hass: HomeAssistant,\n) -> None:\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n hass.states.async_set(\n \"device_tracker.august_connect\",\n STATE_HOME,\n {\n ATTR_IP: \"192.168.210.56\",\n ATTR_SOURCE_TYPE: SourceType.ROUTER,\n ATTR_MAC: \"B8:B7:F1:6D:B5:33\",\n },\n )\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0",
"async def test_oppio_discovery_dont_update_configuration(opp, aioclient_mock):\n await setup_deconz_integration(opp, aioclient_mock)\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n CONF_SERIAL: BRIDGEID,\n },\n context={\"source\": SOURCE_OPPIO},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"",
"async def test_device_tracker_registered_hostname_none(hass: HomeAssistant) -> None:\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerRegisteredWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n async_dispatcher_send(\n hass,\n CONNECTED_DEVICE_REGISTERED,\n {\"ip\": \"192.168.210.56\", \"mac\": \"b8b7f16db533\", \"host_name\": None},\n )\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()",
"def test_verify_connection_to_a_device():",
"async def test_bridge_homekit_already_configured(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"0.0.0.0\", \"aabbccddeeff\")])\n MockConfigEntry(\n domain=\"hue\", unique_id=\"aabbccddeeff\", data={\"host\": \"0.0.0.0\"}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_HOMEKIT},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"0.0.0.0\",\n addresses=[\"0.0.0.0\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=None,\n properties={zeroconf.ATTR_PROPERTIES_ID: \"aa:bb:cc:dd:ee:ff\"},\n type=\"mock_type\",\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"",
"async def test_setup_fails_non_root(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n\n assert await async_setup_component(\n hass,\n DOMAIN,\n {},\n )\n await hass.async_block_till_done()\n\n with patch(\"os.geteuid\", return_value=10), patch(\n \"homeassistant.components.dhcp._verify_l2socket_setup\",\n side_effect=Scapy_Exception,\n ), patch(\"homeassistant.components.dhcp.DiscoverHosts.async_discover\"):\n hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)\n await hass.async_block_till_done()\n hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)\n await hass.async_block_till_done()\n\n assert \"Cannot watch for dhcp packets without root or CAP_NET_RAW\" in caplog.text",
"def test_non_jaqcd_device(name_mock):\n _bad_aws_device(wires=2)",
"def test_init_no_ip(self):\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n ap = APInfo(port_id=1, ip=None, mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n self.assertEqual(ap.ip, None)",
"def test_init_no_shortlist(self):\n # Create an empty routing table.\n self.node.routing_table = RoutingTable(self.node.network_id)\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n self.assertEqual(True, lookup.done())\n self.assertRaises(RoutingTableEmpty, lookup.result)",
"async def test_device_tracker_hostname_and_macaddress_exists_before_start(\n hass: HomeAssistant,\n) -> None:\n hass.states.async_set(\n \"device_tracker.august_connect\",\n STATE_HOME,\n {\n ATTR_HOST_NAME: \"Connect\",\n ATTR_IP: \"192.168.210.56\",\n ATTR_SOURCE_TYPE: SourceType.ROUTER,\n ATTR_MAC: \"B8:B7:F1:6D:B5:33\",\n },\n )\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"connect\",\n macaddress=\"b8b7f16db533\",\n )",
"def test_get_devices(self):\n pass",
"def test_get_devices(self):\n pass"
] | [
"0.78429693",
"0.6549009",
"0.6358573",
"0.6343826",
"0.6260517",
"0.61632603",
"0.6110315",
"0.60799307",
"0.60784996",
"0.6070055",
"0.60690004",
"0.60521615",
"0.60289437",
"0.6003446",
"0.6001084",
"0.5969774",
"0.59526634",
"0.5932165",
"0.5916638",
"0.5881482",
"0.58779",
"0.5876744",
"0.58453876",
"0.5796049",
"0.57933396",
"0.5779165",
"0.5778426",
"0.5767801",
"0.57537776",
"0.57537776"
] | 0.67138004 | 1 |
Test that zeroconf doesn't setup devices with link local addresses. | async def test_zeroconf_flow_ignore_link_local_address(hass):
result = await hass.config_entries.flow.async_init(
AXIS_DOMAIN,
data={CONF_HOST: "169.254.3.4", "properties": {"macaddress": MAC}},
context={"source": "zeroconf"},
)
assert result["type"] == "abort"
assert result["reason"] == "link_local_address" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_zeroconf_flow_ignore_non_axis_device(hass):\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={CONF_HOST: \"169.254.3.4\", \"properties\": {\"macaddress\": \"01234567890\"}},\n context={\"source\": \"zeroconf\"},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"not_axis_device\"",
"async def test_device_tracker_ignore_self_assigned_ips_before_start(\n hass: HomeAssistant,\n) -> None:\n hass.states.async_set(\n \"device_tracker.august_connect\",\n STATE_HOME,\n {\n ATTR_HOST_NAME: \"connect\",\n ATTR_IP: \"169.254.210.56\",\n ATTR_SOURCE_TYPE: SourceType.ROUTER,\n ATTR_MAC: \"B8:B7:F1:6D:B5:33\",\n },\n )\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0",
"async def test_bridge_zeroconf_ipv6(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"fd00::eeb5:faff:fe84:b17d\",\n addresses=[\"fd00::eeb5:faff:fe84:b17d\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5faabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5faabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"invalid_host\"",
"async def test_bridge_zeroconf_already_exists(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(\n aioclient_mock, [(\"0.0.0.0\", \"ecb5faabcabc\"), (\"192.168.1.217\", \"ecb5faabcabc\")]\n )\n entry = MockConfigEntry(\n domain=\"hue\",\n source=config_entries.SOURCE_HOMEKIT,\n data={\"host\": \"0.0.0.0\"},\n unique_id=\"ecb5faabcabc\",\n )\n entry.add_to_hass(hass)\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.217\",\n addresses=[\"192.168.1.217\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5faabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5faabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert entry.data[\"host\"] == \"192.168.1.217\"",
"async def test_zeroconf_flow_already_configured(hass):\n device = await setup_axis_integration(hass)\n assert device.host == \"1.2.3.4\"\n\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n \"hostname\": \"name\",\n \"properties\": {\"macaddress\": MAC},\n },\n context={\"source\": \"zeroconf\"},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert device.host == \"1.2.3.4\"",
"async def test_ssdp_discovery_dont_update_existing_oppio_configuration(\n opp, aioclient_mock\n):\n config_entry = await setup_deconz_integration(\n opp, aioclient_mock, source=SOURCE_OPPIO\n )\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={\n ATTR_SSDP_LOCATION: \"http://1.2.3.4:80/\",\n ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,\n ATTR_UPNP_SERIAL: BRIDGEID,\n },\n context={\"source\": SOURCE_SSDP},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert config_entry.data[CONF_HOST] == \"1.2.3.4\"",
"async def test_bridge_zeroconf(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"192.168.1.217\", \"ecb5fafffeabcabc\")])\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.217\",\n addresses=[\"192.168.1.217\"],\n port=443,\n hostname=\"Philips-hue.local\",\n type=\"_hue._tcp.local.\",\n name=\"Philips Hue - ABCABC._hue._tcp.local.\",\n properties={\n \"_raw\": {\"bridgeid\": b\"ecb5fafffeabcabc\", \"modelid\": b\"BSB002\"},\n \"bridgeid\": \"ecb5fafffeabcabc\",\n \"modelid\": \"BSB002\",\n },\n ),\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"",
"async def test_device_tracker_hostname_and_macaddress_after_start_not_router(\n hass: HomeAssistant,\n) -> None:\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n hass.states.async_set(\n \"device_tracker.august_connect\",\n STATE_HOME,\n {\n ATTR_HOST_NAME: \"connect\",\n ATTR_IP: \"192.168.210.56\",\n ATTR_SOURCE_TYPE: \"something_else\",\n ATTR_MAC: \"B8:B7:F1:6D:B5:33\",\n },\n )\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0",
"def test_connect(self, gateway):\n assert not gateway._devs",
"async def test_ssdp_discovery_dont_update_configuration(opp, aioclient_mock):\n config_entry = await setup_deconz_integration(opp, aioclient_mock)\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={\n ATTR_SSDP_LOCATION: \"http://1.2.3.4:80/\",\n ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,\n ATTR_UPNP_SERIAL: BRIDGEID,\n },\n context={\"source\": SOURCE_SSDP},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert config_entry.data[CONF_HOST] == \"1.2.3.4\"",
"def test_ip_addr_fails(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Throw exception, need to clear internal cached host in driver\n self._fail_ip = True\n self.driver._vgc_host = None\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)",
"async def test_discovery_cannot_connect(hass: HomeAssistant) -> None:\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_ZEROCONF}, data=TEST_DISCOVERY\n )\n\n with patch(\n \"homeassistant.components.volumio.config_flow.Volumio.get_system_info\",\n side_effect=CannotConnectError,\n ):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={},\n )\n\n assert result2[\"type\"] == \"abort\"\n assert result2[\"reason\"] == \"cannot_connect\"",
"def test_verify_list_of_devices_in_my_network():",
"async def test_device_tracker_hostname_and_macaddress_after_start_not_home(\n hass: HomeAssistant,\n) -> None:\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n hass.states.async_set(\n \"device_tracker.august_connect\",\n STATE_NOT_HOME,\n {\n ATTR_HOST_NAME: \"connect\",\n ATTR_IP: \"192.168.210.56\",\n ATTR_SOURCE_TYPE: SourceType.ROUTER,\n ATTR_MAC: \"B8:B7:F1:6D:B5:33\",\n },\n )\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0",
"def test_noop(self):\n self.assertFalse(helpers.getBcastAddrforIPv4())\n self.assertIsNone(helpers.no_op())",
"def test_target_existence(self):\n self.create_ptr(\n ip_str='128.193.0.2', fqdn='nonexistent.oregonstate.edu',\n ip_type='4')",
"async def test_zeroconf_flow_updated_configuration(hass):\n device = await setup_axis_integration(hass)\n assert device.host == \"1.2.3.4\"\n assert device.config_entry.data == {\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_USERNAME: \"username\",\n CONF_PASSWORD: \"password\",\n CONF_MAC: MAC,\n CONF_MODEL: MODEL,\n CONF_NAME: NAME,\n }\n\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={\n CONF_HOST: \"2.3.4.5\",\n CONF_PORT: 8080,\n \"hostname\": \"name\",\n \"properties\": {\"macaddress\": MAC},\n },\n context={\"source\": \"zeroconf\"},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert device.config_entry.data == {\n CONF_HOST: \"2.3.4.5\",\n CONF_PORT: 8080,\n CONF_USERNAME: \"username\",\n CONF_PASSWORD: \"password\",\n CONF_MAC: MAC,\n CONF_MODEL: MODEL,\n CONF_NAME: NAME,\n }",
"async def test_zeroconf_setup(hass):\n result = await hass.config_entries.flow.async_init(\n \"cast\", context={\"source\": \"zeroconf\"}\n )\n assert result[\"type\"] == \"form\"\n\n result = await hass.config_entries.flow.async_configure(result[\"flow_id\"], {})\n\n users = await hass.auth.async_get_users()\n assert len(users) == 1\n assert result[\"type\"] == \"create_entry\"\n assert result[\"result\"].data == {\n \"known_hosts\": None,\n \"user_id\": users[0].id, # Home Assistant cast user\n }",
"async def test_device_tracker_hostname_and_macaddress_after_start_hostname_missing(\n hass: HomeAssistant,\n) -> None:\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n hass.states.async_set(\n \"device_tracker.august_connect\",\n STATE_HOME,\n {\n ATTR_IP: \"192.168.210.56\",\n ATTR_SOURCE_TYPE: SourceType.ROUTER,\n ATTR_MAC: \"B8:B7:F1:6D:B5:33\",\n },\n )\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0",
"async def test_oppio_discovery_dont_update_configuration(opp, aioclient_mock):\n await setup_deconz_integration(opp, aioclient_mock)\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data={\n CONF_HOST: \"1.2.3.4\",\n CONF_PORT: 80,\n CONF_API_KEY: API_KEY,\n CONF_SERIAL: BRIDGEID,\n },\n context={\"source\": SOURCE_OPPIO},\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"",
"async def test_device_tracker_registered_hostname_none(hass: HomeAssistant) -> None:\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerRegisteredWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n async_dispatcher_send(\n hass,\n CONNECTED_DEVICE_REGISTERED,\n {\"ip\": \"192.168.210.56\", \"mac\": \"b8b7f16db533\", \"host_name\": None},\n )\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()",
"def test_verify_connection_to_a_device():",
"async def test_bridge_homekit_already_configured(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"0.0.0.0\", \"aabbccddeeff\")])\n MockConfigEntry(\n domain=\"hue\", unique_id=\"aabbccddeeff\", data={\"host\": \"0.0.0.0\"}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_HOMEKIT},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"0.0.0.0\",\n addresses=[\"0.0.0.0\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=None,\n properties={zeroconf.ATTR_PROPERTIES_ID: \"aa:bb:cc:dd:ee:ff\"},\n type=\"mock_type\",\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"",
"async def test_setup_fails_non_root(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n\n assert await async_setup_component(\n hass,\n DOMAIN,\n {},\n )\n await hass.async_block_till_done()\n\n with patch(\"os.geteuid\", return_value=10), patch(\n \"homeassistant.components.dhcp._verify_l2socket_setup\",\n side_effect=Scapy_Exception,\n ), patch(\"homeassistant.components.dhcp.DiscoverHosts.async_discover\"):\n hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)\n await hass.async_block_till_done()\n hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)\n await hass.async_block_till_done()\n\n assert \"Cannot watch for dhcp packets without root or CAP_NET_RAW\" in caplog.text",
"def test_non_jaqcd_device(name_mock):\n _bad_aws_device(wires=2)",
"def test_init_no_ip(self):\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n ap = APInfo(port_id=1, ip=None, mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n self.assertEqual(ap.ip, None)",
"def test_init_no_shortlist(self):\n # Create an empty routing table.\n self.node.routing_table = RoutingTable(self.node.network_id)\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n self.assertEqual(True, lookup.done())\n self.assertRaises(RoutingTableEmpty, lookup.result)",
"async def test_device_tracker_hostname_and_macaddress_exists_before_start(\n hass: HomeAssistant,\n) -> None:\n hass.states.async_set(\n \"device_tracker.august_connect\",\n STATE_HOME,\n {\n ATTR_HOST_NAME: \"Connect\",\n ATTR_IP: \"192.168.210.56\",\n ATTR_SOURCE_TYPE: SourceType.ROUTER,\n ATTR_MAC: \"B8:B7:F1:6D:B5:33\",\n },\n )\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"connect\",\n macaddress=\"b8b7f16db533\",\n )",
"def test_empty(self):\n self.assertFalse(isIPv6Address(\"\"))",
"def test_get_devices(self):\n pass"
] | [
"0.67150563",
"0.65487766",
"0.6360014",
"0.63443345",
"0.62614197",
"0.61634874",
"0.6111507",
"0.6079837",
"0.6078594",
"0.6070826",
"0.60689676",
"0.6052472",
"0.60288805",
"0.6003452",
"0.6002379",
"0.59703135",
"0.5953911",
"0.5933855",
"0.5916576",
"0.58817863",
"0.58778596",
"0.5876933",
"0.5845312",
"0.57963",
"0.57931226",
"0.57794386",
"0.5778493",
"0.5767817",
"0.5754636",
"0.5752961"
] | 0.7843846 | 0 |
Creates a request for get trading balance. Requires the "Orderbook, History, Trading balance" API key Access Right. | def create_get_trading_balance_request(self) -> Request:
method = "GET"
path = "/trading/balance"
url = URL(self._create_url(path))
params: Params = {}
url = url.with_query(params)
headers = self._auth.sign(
method=method, url_path=url.path, url_query=url.query_string)
return Request(method=method, url=url, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def request_balance(self, custom_id=None, **params):\n self.conn.send('getTradingBalance', custom_id=custom_id, **params)",
"def get_balance(self):\n r = requests.get(build_api_call(self.base_url, None, 'balance', ''), auth=HTTPBasicAuth(KEY, SECRET))\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'",
"async def handle_get_trading_balance_response(self, response: RequesterResponse\n ) -> HitbtcTradingCurrencyBalances:",
"async def fetch_balance(self, params={}):\n # self api call does not return the 'used' amount - use the v1 version instead(which also returns zero balances)\n # there is a difference between self and the v1 api, namely trading wallet is called margin in v2\n await self.load_markets()\n accountsByType = self.safe_value(self.options, 'v2AccountsByType', {})\n requestedType = self.safe_string(params, 'type', 'exchange')\n accountType = self.safe_string(accountsByType, requestedType, requestedType)\n if accountType is None:\n keys = list(accountsByType.keys())\n raise ExchangeError(self.id + ' fetchBalance() type parameter must be one of ' + ', '.join(keys))\n isDerivative = requestedType == 'derivatives'\n query = self.omit(params, 'type')\n response = await self.privatePostAuthRWallets(query)\n result = {'info': response}\n for i in range(0, len(response)):\n balance = response[i]\n type = self.safe_string(balance, 0)\n currencyId = self.safe_string_lower(balance, 1, '')\n start = len(currencyId) - 2\n isDerivativeCode = currencyId[start:] == 'f0'\n # self will only filter the derivative codes if the requestedType is 'derivatives'\n derivativeCondition = (not isDerivative or isDerivativeCode)\n if (accountType == type) and derivativeCondition:\n code = self.safe_currency_code(currencyId)\n account = self.account()\n account['total'] = self.safe_string(balance, 2)\n account['free'] = self.safe_string(balance, 4)\n result[code] = account\n return self.safe_balance(result)",
"async def fetch_balance(self, params={}):\n await self.load_markets()\n request = {\n 'currency': 'all',\n }\n response = await self.privateGetUserMargin(self.extend(request, params))\n #\n # [\n # {\n # \"account\":1455728,\n # \"currency\":\"XBt\",\n # \"riskLimit\":1000000000000,\n # \"prevState\":\"\",\n # \"state\":\"\",\n # \"action\":\"\",\n # \"amount\":263542,\n # \"pendingCredit\":0,\n # \"pendingDebit\":0,\n # \"confirmedDebit\":0,\n # \"prevRealisedPnl\":0,\n # \"prevUnrealisedPnl\":0,\n # \"grossComm\":0,\n # \"grossOpenCost\":0,\n # \"grossOpenPremium\":0,\n # \"grossExecCost\":0,\n # \"grossMarkValue\":0,\n # \"riskValue\":0,\n # \"taxableMargin\":0,\n # \"initMargin\":0,\n # \"maintMargin\":0,\n # \"sessionMargin\":0,\n # \"targetExcessMargin\":0,\n # \"varMargin\":0,\n # \"realisedPnl\":0,\n # \"unrealisedPnl\":0,\n # \"indicativeTax\":0,\n # \"unrealisedProfit\":0,\n # \"syntheticMargin\":null,\n # \"walletBalance\":263542,\n # \"marginBalance\":263542,\n # \"marginBalancePcnt\":1,\n # \"marginLeverage\":0,\n # \"marginUsedPcnt\":0,\n # \"excessMargin\":263542,\n # \"excessMarginPcnt\":1,\n # \"availableMargin\":263542,\n # \"withdrawableMargin\":263542,\n # \"timestamp\":\"2020-08-03T12:01:01.246Z\",\n # \"grossLastValue\":0,\n # \"commission\":null\n # }\n # ]\n #\n return self.parse_balance(response)",
"def fetch_balance(self, params={}):\n self.load_markets()\n response = self.privateGetAccountBalanceV2(params)\n #\n # {\n # \"AVAILABLE_NIS\": 0.0,\n # \"NIS\": 0.0,\n # \"LOCKED_NIS\": 0.0,\n # \"AVAILABLE_BTC\": 0.0,\n # \"BTC\": 0.0,\n # \"LOCKED_BTC\": 0.0,\n # \"AVAILABLE_ETH\": 0.0,\n # \"ETH\": 0.0,\n # \"LOCKED_ETH\": 0.0,\n # \"AVAILABLE_BCHSV\": 0.0,\n # \"BCHSV\": 0.0,\n # \"LOCKED_BCHSV\": 0.0,\n # \"AVAILABLE_BCHABC\": 0.0,\n # \"BCHABC\": 0.0,\n # \"LOCKED_BCHABC\": 0.0,\n # \"AVAILABLE_LTC\": 0.0,\n # \"LTC\": 0.0,\n # \"LOCKED_LTC\": 0.0,\n # \"AVAILABLE_ETC\": 0.0,\n # \"ETC\": 0.0,\n # \"LOCKED_ETC\": 0.0,\n # \"AVAILABLE_BTG\": 0.0,\n # \"BTG\": 0.0,\n # \"LOCKED_BTG\": 0.0,\n # \"AVAILABLE_GRIN\": 0.0,\n # \"GRIN\": 0.0,\n # \"LOCKED_GRIN\": 0.0,\n # \"Fees\": {\n # \"BtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"EthNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BchabcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"LtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"EtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BtgNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"LtcBtc\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BchsvNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"GrinNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0}\n # }\n # }\n #\n return self.parse_balance(response)",
"def balance():\n address = request.args.get(\"address\")\n balance = p2p.query(\"/balance\", address=address)[\"balance\"]\n payload = jsonpickle.encode({\"balance\": balance})\n return payload, 200, {\"Content-Type\": \"application/json\"}",
"def balance(self):\n url = self.base_url + 'account/balance'\n self.session.headers.update(self.sign(url))\n resp = self.session.get(url)\n try:\n data = resp.json()\n data['amount'] = float(data['amount'])\n return pd.Series(data)\n except:\n return resp",
"def get_balance(self, currency):\n\n result = self.api_query('getInfo', {'coinName': currency, 'need_new':0})\n\n #{'success': True, 'message': '', 'result': {'Currency': 'NXS', 'Balance': 1.55257461, 'Available': 1.55257461, 'Pending': 0.0, 'CryptoAddress': None}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 2}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255221}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255362}}\n\n #{'success': False, 'message': 'INVALID_CURRENCY', 'result': None}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255600}}\n try:\n result = {'success': True, 'message' :'', 'result':{'Currency': currency, 'Balance': result['return']['funds_incl_orders'][currency], 'Available': result['return']['funds'][currency], 'Pending': 0.0, 'CryptoAddress': None}}\n except:\n result = {'success': False, 'message' :'', 'result':{'Currency': currency, 'Balance': 0.0, 'Available': 0.0, 'Pending': 0.0, 'CryptoAddress': None}}\n return result",
"def create_get_trades_request(self, symbol: str,\n limit: Optional[int] = None\n ) -> Request:",
"def get_transactions():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions\".format(wallet)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)",
"def get_account_balances(self):\n params = clean_locals(locals())\n date_time_sent = datetime.datetime.utcnow()\n response = self.request('GetAccountBalances', params, secure=True)\n data = self.process_response(response, date_time_sent, None)\n return parse_account_balance(data.get('data', {})) if data.get('data') else {}",
"def withdraw(self, currency, amount, address):\n return self.api_query('withdraw', {\"currency\": currency, \"amount\": amount, \"address\": address})",
"def get_wallet_balances(self):\r\n method = self.wallet_endpoints['balances']['method']\r\n url = self.base_url + self.wallet_endpoints['balances']['url']\r\n req = requests.request(method, url, headers=self.get_auth_headers())\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res",
"def make_transaction():\n account_id = request.json['account_id']\n aux_account = [account for account in accounts if account['id'] == account_id]\n if len(aux_account) == 0:\n abort(404)\n account_balance = Decimal(aux_account[0].get('balance')).quantize(Decimal('0.00'))\n transaction = request.json['transaction']\n transaction_amount = Decimal(abs(request.json['amount'])).quantize(Decimal('0.00'))\n\n if not request.json:\n abort(400)\n if transaction not in ['withdrawal', 'deposit']:\n abort(400, f'Invalid transaction name: {transaction}')\n if transaction == 'withdrawal':\n transaction_amount = transaction_amount*-1\n\n # the user can't withdraw more than the account has\n validation_sum = (account_balance + transaction_amount).quantize(Decimal('.01'), rounding=ROUND_DOWN)\n if validation_sum >= 0:\n for real_account in accounts:\n if real_account.get('id') == account_id:\n real_account['balance'] = round(float(validation_sum),2)\n else:\n abort(400, {'error':'Not enough funds for this transaction'})\n\n return json.dumps({f'{transaction.capitalize()} Done. New balance': str(validation_sum)}, ensure_ascii=False), 200",
"def getBalance(self, currency=''):\n\n if self.app.getExchange() == 'binance':\n if self.mode == 'live':\n model = BAuthAPI(self.app.getAPIKey(), self.app.getAPISecret())\n df = model.getAccount()\n if isinstance(df, pd.DataFrame):\n if currency == '':\n # retrieve all balances\n return df\n else:\n # retrieve balance of specified currency\n df_filtered = df[df['currency'] == currency]['available']\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR', 'GBP', 'USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))\n else:\n return 0.0\n else:\n # return dummy balances\n if currency == '':\n # retrieve all balances\n return self.balance\n else:\n if self.app.getExchange() == 'binance':\n self.balance = self.balance.replace('QUOTE', currency)\n else: \n # replace QUOTE and BASE placeholders\n if currency in ['EUR','GBP','USD']:\n self.balance = self.balance.replace('QUOTE', currency)\n else:\n self.balance = self.balance.replace('BASE', currency)\n\n if self.balance.currency[self.balance.currency.isin([currency])].empty:\n self.balance.loc[len(self.balance)] = [currency, 0, 0, 0]\n\n # retrieve balance of specified currency\n df = self.balance\n df_filtered = df[df['currency'] == currency]['available']\n\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR', 'GBP', 'USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))\n\n else:\n if self.mode == 'live':\n # if config is provided and live connect to Coinbase Pro account portfolio\n model = CBAuthAPI(self.app.getAPIKey(), self.app.getAPISecret(), self.app.getAPIPassphrase(), self.app.getAPIURL())\n if currency == '':\n # retrieve all balances\n return model.getAccounts()[['currency', 'balance', 'hold', 'available']]\n else:\n df = model.getAccounts()\n # retrieve balance of specified currency\n df_filtered = df[df['currency'] == currency]['available']\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR','GBP','USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))\n \n else:\n # return dummy balances\n\n if currency == '':\n # retrieve all balances\n return self.balance\n else:\n # replace QUOTE and BASE placeholders\n if currency in ['EUR','GBP','USD']:\n self.balance = self.balance.replace('QUOTE', currency)\n elif currency in ['BCH','BTC','ETH','LTC','XLM']:\n self.balance = self.balance.replace('BASE', currency)\n\n if self.balance.currency[self.balance.currency.isin([currency])].empty == True:\n self.balance.loc[len(self.balance)] = [currency,0,0,0]\n\n # retrieve balance of specified currency\n df = self.balance\n df_filtered = df[df['currency'] == currency]['available']\n\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR','GBP','USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))",
"def new_get_buys_transaction_history(self, cb_account):\n date: datetime = now()\n if cb_account == \"wallet_id_btc\":\n return MockAPIObject(\n data=[{\n \"created_at\": str(date + timedelta(days=-1)),\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 10,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 10,\n \"currency\": \"BTC\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 1,\n \"currency\": \"EUR\"\n }\n }]\n }, {\n \"created_at\": str(date + timedelta(days=1)),\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 5,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 5,\n \"currency\": \"BTC\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 0.5,\n \"currency\": \"EUR\"\n }\n }]\n }])\n else:\n return MockAPIObject()",
"def map_to_trading_currency_balance(self, raw_balance: HitbtcRawTradingCurrencyBalanceModel\n ) -> HitbtcTradingCurrencyBalanceModel:\n\n currency = raw_balance[\"currency\"]\n available = Decimal(raw_balance[\"available\"])\n reserved = Decimal(raw_balance[\"reserved\"])\n res = HitbtcTradingCurrencyBalanceModel(\n currency=currency,\n available=available,\n reserved=reserved)\n return res",
"async def fetch_balance(self, params={}):\n await self.load_markets()\n response = await self.privateGetUserAssets(params)\n #\n # {\n # \"success\": \"1\",\n # \"data\": {\n # \"assets\": [\n # {\n # \"asset\": \"jpy\",\n # \"amount_precision\": \"4\",\n # \"onhand_amount\": \"0.0000\",\n # \"locked_amount\": \"0.0000\",\n # \"free_amount\": \"0.0000\",\n # \"stop_deposit\": False,\n # \"stop_withdrawal\": False,\n # \"withdrawal_fee\": {\n # \"threshold\": \"30000.0000\",\n # \"under\": \"550.0000\",\n # \"over\": \"770.0000\"\n # }\n # },\n # {\n # \"asset\": \"btc\",\n # \"amount_precision\": \"8\",\n # \"onhand_amount\": \"0.00000000\",\n # \"locked_amount\": \"0.00000000\",\n # \"free_amount\": \"0.00000000\",\n # \"stop_deposit\": False,\n # \"stop_withdrawal\": False,\n # \"withdrawal_fee\": \"0.00060000\"\n # },\n # ]\n # }\n # }\n #\n return self.parse_balance(response)",
"def get_balance(self, ticker):\n return self.trading_client.account_balance(ticker, 'usd')",
"def test_get_balance_between_stocktakings_missing_params(self):\n # Do a request without any params\n url = \"/stocktakingcollections/balance\"\n res = self.get(url, role=\"admin\")\n self.assertException(res, exc.InvalidData)\n\n # Do a request with only the start id given.\n params = {\"start_id\": 1}\n res = self.get(url, role=\"admin\", params=params)\n self.assertException(res, exc.InvalidData)\n\n # Do a request with only the end id given.\n params = {\"end_id\": 1}\n res = self.get(url, role=\"admin\", params=params)\n self.assertException(res, exc.InvalidData)",
"def get_transactions_trc20():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions/trc20\".format(wallet) # noqa: E501\n\n response = requests.request(\"GET\", url)\n\n print(response.text)",
"def do_balance(self,args):\n \"\"\"Can show total, available(available for trading), or reserved(reserved in open orders)\"\"\"\n \"\"\"usage: balance [available/reserved](optional)\"\"\"\n args = stripoffensive(args)\n if 'available' in args:\n btc,usd = available() \n elif 'reserved' in args:\n btc,usd = reserved()\n else:\n btc,usd = bal()\n word = args if args else \"total\"\n print 'Your %s balance is %.8f BTC and $%.2f USD ' % (word,btc,usd)\n if word == \"total\":\n last = D(bitstamp.ticker()['last'])\n print 'Account Value: $%.2f @ Last BTC Price of $%.2f' % (btc*last+usd,last)",
"def trades(Symbol='tBTCUSD', **params):\n endpoint = f'trades/{Symbol}/hist'\n return request(authenticate=False, version=2, endpoint=endpoint, method='GET', query_params=params)",
"async def get_balance(sochain_url:str, network:str, address:str):\n try:\n balance = await sochain_api.get_balance(sochain_url, network, address)\n if balance == None:\n raise Exception(\"Invalid Address\")\n return balance\n except Exception as err:\n raise Exception(str(err))",
"def get_balance_sheet(api_key, ticker, period, ftype):\n \n settings.set_apikey(api_key)\n df = cv.balance_sheet(ticker = ticker, period = period, ftype = ftype)\n return df",
"def balances():\n loop.run_until_complete(app.exchanges.fetch_balances())\n print(app.exchanges.balances_str)",
"def get_price_history_lookback(access_token,ticker,periodType,period,frequencyType,frequency):\r\n \r\n price_url = 'https://api.tdameritrade.com/v1/marketdata/{}/pricehistory'.format(ticker)\r\n\r\n #The header for getting a quote needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Parameters for period of time and frequency of data to get\r\n params = {'periodType':periodType,\r\n 'period': period,\r\n 'frequencyType': frequencyType,\r\n 'frequency': frequency}\r\n \r\n #Make the get request to TD Ameritrade\r\n price_history_json = requests.get(url=price_url,headers=headers,params=params)\r\n return price_history_json.json()",
"def get_transfer_bid(self):\n api_uri = self._uri_dict.get('getTransferBid')\n data = {}\n r_data = self._post(api_uri, data)\n return r_data",
"async def fetch_ledger(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n request = {\n # 'start': 123,\n }\n #\n # if since is not None:\n # # date-based pagination not supported\n # }\n #\n if limit is not None:\n request['count'] = limit\n currency = None\n if code is not None:\n currency = self.currency(code)\n request['currency'] = currency['id']\n response = await self.privateGetUserWalletHistory(self.extend(request, params))\n #\n # [\n # {\n # transactID: \"69573da3-7744-5467-3207-89fd6efe7a47\",\n # account: 24321,\n # currency: \"XBt\",\n # transactType: \"Withdrawal\", # \"AffiliatePayout\", \"Transfer\", \"Deposit\", \"RealisedPNL\", ...\n # amount: -1000000,\n # fee: 300000,\n # transactStatus: \"Completed\", # \"Canceled\", ...\n # address: \"1Ex4fkF4NhQaQdRWNoYpqiPbDBbq18Kdd9\",\n # tx: \"3BMEX91ZhhKoWtsH9QRb5dNXnmnGpiEetA\",\n # text: \"\",\n # transactTime: \"2017-03-21T20:05:14.388Z\",\n # walletBalance: 0, # balance after\n # marginBalance: null,\n # timestamp: \"2017-03-22T13:09:23.514Z\"\n # }\n # ]\n #\n return self.parse_ledger(response, currency, since, limit)"
] | [
"0.7478715",
"0.646901",
"0.63921934",
"0.6359884",
"0.62434274",
"0.6177274",
"0.59944075",
"0.5980043",
"0.59507793",
"0.5931746",
"0.58371425",
"0.5812701",
"0.581053",
"0.5784304",
"0.57724154",
"0.5724659",
"0.57242286",
"0.56600153",
"0.56398743",
"0.5591215",
"0.5589447",
"0.55737424",
"0.5561727",
"0.55543685",
"0.55485326",
"0.5548174",
"0.55462325",
"0.55315834",
"0.5528095",
"0.55000436"
] | 0.80611444 | 0 |
Creates a request for get active orders. Requires the "Place/cancel orders" API key Access Right. | def create_get_active_orders_request(self, symbol: Optional[Symbol] = None) -> Request:
method = "GET"
path = "/order"
params: Params = {}
if symbol is not None:
params["symbol"] = symbol
url = URL(self._create_url(path))
url = url.with_query(params)
headers = self._auth.sign(
method=method, url_path=url.path, url_query=url.query_string)
return Request(method=method, url=url, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def request_active_orders(self, custom_id=None, **params):\n self.conn.send('getOrders', custom_id=custom_id, **params)",
"def request_orders(self):\r\n if self.use_http():\r\n self.enqueue_http_request(\"money/orders\", {}, \"orders\")\r\n else:\r\n self.send_signed_call(\"private/orders\", {}, \"orders\")",
"def get_orders(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params={**kwargs})",
"def open_orders(self, **params):\n return self._get('openOrders', signed=True, params=params)",
"def get_orders(access_token,start_date,end_date,status):\r\n\r\n orders_url = 'https://api.tdameritrade.com/v1/orders'\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n #Parameters for the order\r\n params = {'accountId':TDAuth_Info.account_num,\r\n 'fromEnteredTime': start_date,\r\n 'toEnteredTime': end_date,\r\n 'status': status}\r\n\r\n #Make the get request to TD Ameritrade\r\n orders_data_json = requests.get(url=orders_url,headers=headers,params=params)\r\n return orders_data_json.json()",
"def get_pending_orders(self):\n\n r = requests.get(build_api_call(self.base_url, ACCOUNTID, 'pending', ''), auth=HTTPBasicAuth(KEY, SECRET))\n\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'",
"async def handle_get_active_orders_response(self, response: RequesterResponse\n ) -> HitbtcOrders:",
"def open_orders(self, **params):\n return self._get('option/openOrders', signed=True, params=params, version=None)",
"def create_get_active_order_request(self, client_order_id: str,\n wait: Optional[int] = None\n ) -> Request:\n\n method = \"GET\"\n path = f\"/order/{client_order_id}\"\n url = URL(self._create_url(path))\n\n params: Params = {}\n if wait is not None:\n params[\"wait\"] = str(wait)\n url = url.with_query(params)\n\n headers = self._auth.sign(\n method=method, url_path=url.path, url_query=url.query_string)\n return Request(method=method, url=url, headers=headers)",
"def get_open_orders(self):\n url = 'https://coincheck.com/api/exchange/orders/opens'\n headers = make_header(url, access_key=self.access_key, secret_key=self.secret_key)\n r = requests.get(url, headers=headers, timeout=self.timeout)\n return json.loads(r.text)",
"async def get_open_orders(self, symbol=None):\n uri = \"/v3/spot/order/current\"\n params = {}\n if symbol:\n params[\"symbol\"] = symbol\n success, error = await self.request(\"GET\", uri, params, auth=True)\n return success, error",
"def query_orders(self):\n return self._call_txtrader_api('query_orders', {})",
"def get_orders(self, oid=None, include_expired=False, orderid=None):\n return self.request(\n 'get',\n '%sorders/%s%s' % (\n safeformat('objects/{:int}/', oid) if oid else \"\",\n \"all/\" if include_expired else \"\",\n safeformat('{:int}', orderid) if orderid else \"\"\n )\n )",
"def get_open_orders(self, asset=None):\n try:\n self.ask_request()\n response = self._request('orders', None)\n order_statuses = response.json()\n except Exception as e:\n raise ExchangeRequestError(error=e)\n\n if 'message' in order_statuses:\n raise ExchangeRequestError(\n error='Unable to retrieve open orders: {}'.format(\n order_statuses['message'])\n )\n\n orders = []\n for order_status in order_statuses:\n order, executed_price = self._create_order(order_status)\n if asset is None or asset == order.sid:\n orders.append(order)\n\n return orders",
"def make_order_request(self, page):\n return api_methods.Orders(\n page=page,\n per_page=self.PER_PAGE,\n from_date=self.from_date,\n start_date=self.start_date,\n end_date=self.end_date,\n deal_id=self.deal_id,\n ).call()",
"async def handle_get_active_order_response(self, response: RequesterResponse\n ) -> HitbtcOrderModel:",
"def get(self):\n return sync.get_open_orders()",
"def rest_open_order(self, orderID=None):\n if orderID:\n o = self.client.Order.Order_getOrders(filter=json.dumps({\"open\": True, \"orderID\": orderID})).result()\n else:\n o = self.client.Order.Order_getOrders(filter=json.dumps({\"open\": True}))\n if o:\n return o.result()[0]\n return None",
"async def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n request = {}\n market = None\n response = None\n if symbol is None:\n response = await self.privatePostAuthROrders(self.extend(request, params))\n else:\n market = self.market(symbol)\n request['symbol'] = market['id']\n response = await self.privatePostAuthROrdersSymbol(self.extend(request, params))\n #\n # [\n # [\n # 95408916206, # Order ID\n # null, # Group Order ID\n # 1653322349926, # Client Order ID\n # \"tDOGE:UST\", # Market ID\n # 1653322349926, # Created Timestamp in milliseconds\n # 1653322349927, # Updated Timestamp in milliseconds\n # -10, # Amount remaining(Positive means buy, negative means sell)\n # -10, # Original amount\n # \"EXCHANGE LIMIT\", # Order type\n # null, # Previous Order Type\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0, # Flags, see parseOrderFlags()\n # \"ACTIVE\", # Order Status, see parseOrderStatus()\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0.11, # Price\n # 0, # Average Price\n # 0, # Trailing Price\n # 0, # Auxiliary Limit price(for STOP LIMIT)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0, # Hidden(0 if False, 1 if True)\n # 0, # Placed ID(If another order caused self order to be placed(OCO) self will be that other order's ID)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # \"API>BFX\", # Routing, indicates origin of action: BFX, ETHFX, API>BFX, API>ETHFX\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # {\"$F7\":1} # additional meta information about the order( $F7 = IS_POST_ONLY(0 if False, 1 if True), $F33 = Leverage(int))\n # ],\n # ]\n #\n return self.parse_orders(response, market, since, limit)",
"def list_orders(self, symbol):\r\n param = {}\r\n param['symbol'] = self.__transfer_symbol(symbol)\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/processing-orders', param, self.timeout)",
"async def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n }\n if limit is not None:\n request['count'] = limit\n if since is not None:\n request['since'] = self.parse_to_int(since / 1000)\n response = await self.privateGetUserSpotActiveOrders(self.extend(request, params))\n data = self.safe_value(response, 'data', {})\n orders = self.safe_value(data, 'orders', [])\n return self.parse_orders(orders, market, since, limit)",
"def rest_open_order(self, orderID):\n o = self.client.Order.Order_getOrders(filter=json.dumps({\"open\": True, \"orderID\": orderID})).result()\n if o[0].__len__():\n return o[0][0]\n return None",
"def list_order(self, orderNo):\r\n param = {}\r\n param['orderNo'] = orderNo\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/order', param, self.timeout)",
"def account_order(self, orderid):\n return self.get(f'orders/{orderid}', auth=True)",
"def get_orders(request):\n close_old_connections()\n try:\n # Give all orders maded on the given date.\n return Order.objects.filter(\n date__date=request.GET['date']).order_by('-date')\n except MultiValueDictKeyError:\n # Give all orders today.\n return Order.objects.filter(\n date__date=datetime.now().date()).order_by('-date')",
"def getOrders(self, market='', action='', status='all'):\n\n # validate market is syntactically correct\n self._checkMarketSyntax(market)\n\n if action != '':\n # validate action is either a buy or sell\n if not action in ['buy', 'sell']:\n raise ValueError('Invalid order action.')\n\n # validate status is open, pending, done, active or all\n if not status in ['open', 'pending', 'done', 'active', 'all', 'filled']:\n raise ValueError('Invalid order status.')\n\n if self.app.getExchange() == 'binance':\n if self.mode == 'live':\n # if config is provided and live connect to Binance account portfolio\n model = BAuthAPI(self.app.getAPIKey(), self.app.getAPISecret(), self.app.getAPIURL())\n # retrieve orders from live Binance account portfolio\n self.orders = model.getOrders(market, action, status)\n return self.orders\n else:\n # return dummy orders\n if market == '':\n return self.orders\n else:\n return self.orders[self.orders['market'] == market] \n if self.app.getExchange() == 'coinbasepro':\n if self.mode == 'live':\n # if config is provided and live connect to Coinbase Pro account portfolio\n model = CBAuthAPI(self.app.getAPIKey(), self.app.getAPISecret(), self.app.getAPIPassphrase(), self.app.getAPIURL())\n # retrieve orders from live Coinbase Pro account portfolio\n self.orders = model.getOrders(market, action, status)\n return self.orders\n else:\n # return dummy orders\n if market == '':\n return self.orders\n else:\n return self.orders[self.orders['market'] == market]",
"async def get_open_orders(self, symbol):\n params = {\n \"symbol\": symbol,\n \"timestamp\": tools.get_cur_timestamp_ms()\n }\n success, error = await self.request(\"GET\", \"/api/v3/openOrders\", params=params, auth=True)\n return success, error",
"def create_cancel_orders_request(self, symbol: Optional[Symbol] = None) -> Request:\n\n method = \"DELETE\"\n path = f\"/order\"\n url = URL(self._create_url(path))\n\n params: Params = {}\n if symbol is not None:\n params[\"symbol\"] = symbol\n url = url.with_query(params)\n\n headers = self._auth.sign(\n method=method, url_path=url.path, url_query=url.query_string)\n return Request(method=method, url=url, headers=headers)",
"async def get_open_orders(self, symbol):\n uri = \"/fapi/v1/openOrders\"\n params = {\n \"symbol\": symbol,\n \"timestamp\": tools.get_cur_timestamp_ms()\n }\n success, error = await self.request(\"GET\", uri, params=params, auth=True)\n return success, error",
"def orders ( self, block: bool = True ):\n\tresult = OutstandingOrders(\n\t\tauth\t\t= self.auth,\n\t\taccount_nbr = self.account_nbr,\n\t\tblock\t\t= block\n\t).request()\n\n\treturn result"
] | [
"0.7395109",
"0.70004517",
"0.6780944",
"0.6571053",
"0.6547748",
"0.6519316",
"0.6504723",
"0.64073473",
"0.63162017",
"0.6253162",
"0.6105293",
"0.6073689",
"0.6062765",
"0.5981852",
"0.59570843",
"0.5870386",
"0.5839957",
"0.58313495",
"0.5808574",
"0.58009",
"0.5772646",
"0.57606024",
"0.5757387",
"0.57391393",
"0.5728366",
"0.57261664",
"0.5710306",
"0.5709969",
"0.57076544",
"0.56861794"
] | 0.7347726 | 1 |
Creates a request for cancel orders endpoint. Requires the "Place/cancel orders" API key Access Right. | def create_cancel_orders_request(self, symbol: Optional[Symbol] = None) -> Request:
method = "DELETE"
path = f"/order"
url = URL(self._create_url(path))
params: Params = {}
if symbol is not None:
params["symbol"] = symbol
url = url.with_query(params)
headers = self._auth.sign(
method=method, url_path=url.path, url_query=url.query_string)
return Request(method=method, url=url, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_cancel_order_request(self, client_order_id: str) -> Request:\n\n method = \"DELETE\"\n path = f\"/order/{client_order_id}\"\n url = URL(self._create_url(path))\n\n params: Params = {}\n url = url.with_query(params)\n\n headers = self._auth.sign(\n method=method, url_path=url.path, url_query=url.query_string)\n return Request(method=method, url=url, headers=headers)",
"def cancel_order(self, **kwargs):\n return self.client.execute(\"order/refund\", \"POST\", kwargs)",
"async def handle_cancel_orders_response(self, response: RequesterResponse\n ) -> HitbtcOrders:",
"def test_cancel_order(self):\n # create a order to cancel\n test_order = OrderFactory()\n resp = self.app.post('/orders',\n json=test_order.serialize(),\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n\n # cancel the order\n new_order = resp.get_json()\n new_order['status'] = 'Cancelled'\n resp = self.app.put('/orders/{}/cancel'.format(new_order['id']),\n json=new_order,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n cancelled_order = resp.get_json()\n self.assertEqual(cancelled_order['status'], 'Cancelled')",
"def _cancel_order_parameters(self, cancel_order_action):\n parameters = {\n 'order_id': cancel_order_action.order_id\n }\n return parameters",
"def cancel_order(self, order):\r\n method = self.private_endpoints['cancel_order']['method']\r\n url = self.base_url + self.private_endpoints['cancel_order']['url'].format(orderId=order)\r\n req = requests.request(method, url, headers=self.get_auth_headers(nonce=True))\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return True\r\n else:\r\n return res",
"def market_cancel(self, orderid):\n return self.delete(f'orders/{orderid}', auth=True)",
"def test_cancel_order_with_order_id():\n\n client = Client(key, secret)\n response = client.cancel_oco_order(**params)\n response.should.equal(mock_item)",
"def test_cancel_oco_order_without_symbol():\n\n client = Client(key, secret)\n client.cancel_oco_order.when.called_with(\"\").should.throw(ParameterRequiredError)",
"async def handle_cancel_order_response(self, response: RequesterResponse\n ) -> HitbtcOrderModel:",
"def cancel_aws_order(order_id):\n mws_shipments = mws.OutboundShipments(\n access_key=MWS_ACCESS_KEY, secret_key=MWS_SECRET_KEY,\n account_id=MWS_MERCHANT_ID, region=\"FR\")\n\n data = dict(Action=\"CancelFulfillmentOrder\", SellerFulfillmentOrderId=order_id)\n return mws_shipments.make_request(data, \"POST\")",
"def delete(self):\n parser = reqparse.RequestParser()\n parser.add_argument('orderId', type=int, required=True,\n help='Order ID to cancel')\n args = parser.parse_args()\n return sync.cancel_order(args['orderId'])",
"def cancel_order(self, walletId, orderId):\n return",
"def cancel(self, uuid):\n\n result = self.api_query('CancelOrder', {'order_id': uuid})\n return result",
"def cancel_order(self, id: str, symbol: Optional[str] = None, params={}):\n request = {\n 'id': id,\n }\n return self.privatePostOrderCancelOrder(self.extend(request, params))",
"def cancel_order(self, symbol, orderId):\n payload = {\n 'symbol': symbol,\n 'orderId': orderId\n }\n\n return self.signed_request('DELETE', '/api/v3/order', **payload)",
"def cancel_order(self, custom_id=None, **params):\n self.conn.send('cancelOrder', custom_id=custom_id, **params)",
"def test_cancel_order_failure(self):\n # create a order to cancel\n test_order = OrderFactory()\n resp = self.app.post('/orders',\n json=test_order.serialize(),\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n\n # cancel the order\n new_order = resp.get_json()\n new_order['status'] = 'Cancelled'\n resp = self.app.put('/orders/{}/cancel'.format(23),\n json=new_order,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_cancel_oco_order_without_orderListId():\n\n client = Client(key, secret)\n client.cancel_oco_order.when.called_with(\"ETHBTC\", orderListId=\"\").should.throw(\n ClientError\n )",
"def cancel(self):\r\n self.require_item()\r\n\r\n url = '{0}/cancel'.format(self.get_url())\r\n request = http.Request('PUT', url)\r\n request.use_xml = False\r\n\r\n return request, parsers.parse_empty",
"async def cancel_order(self, id: str, symbol: Optional[str] = None, params={}):\n cid = self.safe_value_2(params, 'cid', 'clientOrderId') # client order id\n request = None\n if cid is not None:\n cidDate = self.safe_value(params, 'cidDate') # client order id date\n if cidDate is None:\n raise InvalidOrder(self.id + \" canceling an order by clientOrderId('cid') requires both 'cid' and 'cid_date'('YYYY-MM-DD')\")\n request = {\n 'cid': cid,\n 'cid_date': cidDate,\n }\n params = self.omit(params, ['cid', 'clientOrderId'])\n else:\n request = {\n 'id': int(id),\n }\n response = await self.privatePostAuthWOrderCancel(self.extend(request, params))\n order = self.safe_value(response, 4)\n return self.parse_order(order)",
"def test_cancel_order_authentication(self):\n # Test with admin token\n response = self.client.put(\n 'api/v2/parcels/100/cancel', headers=self.admin_token_dict)\n data = json.loads(response.data)\n self.assertEqual(\n data, {message: 'You are not authorized to perform this operation'})\n self.assertEqual(response.status_code, 403)",
"def cancel_payment_by_idempotency_key(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/payments/cancel')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()",
"def test_cancel_order_authentication(self):\n # Test with admin token\n response = self.client.put(\n 'api/v1/parcels/100/cancel', headers=self.admin_token_dict)\n data = json.loads(response.data)\n self.assertEqual(data, {message: 'Cannot perform this operation'})\n self.assertEqual(response.status_code, 401)",
"def cancel(self, currency_pair, order_number):\n return self.api_query('cancelOrder', {\"currencyPair\": currency_pair, \"orderNumber\": order_number})",
"def cancelOrder(self, order_number):\n pass",
"def cancel_pending_orders(self):\n raise NotImplementedError(\"Broker must implement \\\n `cancel_pending_orders()`\")",
"def cancel_operation(\n self,\n ) -> Callable[[operations_pb2.CancelOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"cancel_operation\" not in self._stubs:\n self._stubs[\"cancel_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/CancelOperation\",\n request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"cancel_operation\"]",
"def cancel_operation(\n self,\n ) -> Callable[[operations_pb2.CancelOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"cancel_operation\" not in self._stubs:\n self._stubs[\"cancel_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/CancelOperation\",\n request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"cancel_operation\"]",
"def cancel_operation(\n self,\n ) -> Callable[[operations_pb2.CancelOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"cancel_operation\" not in self._stubs:\n self._stubs[\"cancel_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/CancelOperation\",\n request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"cancel_operation\"]"
] | [
"0.6977676",
"0.6641356",
"0.6572583",
"0.6490388",
"0.6354798",
"0.6248779",
"0.6243973",
"0.6240855",
"0.6232378",
"0.62219054",
"0.6180212",
"0.61791486",
"0.61666495",
"0.61602354",
"0.6125321",
"0.6097856",
"0.60727555",
"0.60603887",
"0.60504097",
"0.60332423",
"0.60234696",
"0.60017943",
"0.5988855",
"0.596842",
"0.5966445",
"0.59489137",
"0.5915871",
"0.59060997",
"0.59060997",
"0.59060997"
] | 0.74875134 | 0 |
Creates a request for cancel order endpoint. Requires the "Place/cancel orders" API key Access Right. | def create_cancel_order_request(self, client_order_id: str) -> Request:
method = "DELETE"
path = f"/order/{client_order_id}"
url = URL(self._create_url(path))
params: Params = {}
url = url.with_query(params)
headers = self._auth.sign(
method=method, url_path=url.path, url_query=url.query_string)
return Request(method=method, url=url, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_cancel_orders_request(self, symbol: Optional[Symbol] = None) -> Request:\n\n method = \"DELETE\"\n path = f\"/order\"\n url = URL(self._create_url(path))\n\n params: Params = {}\n if symbol is not None:\n params[\"symbol\"] = symbol\n url = url.with_query(params)\n\n headers = self._auth.sign(\n method=method, url_path=url.path, url_query=url.query_string)\n return Request(method=method, url=url, headers=headers)",
"def cancel_order(self, **kwargs):\n return self.client.execute(\"order/refund\", \"POST\", kwargs)",
"def test_cancel_order(self):\n # create a order to cancel\n test_order = OrderFactory()\n resp = self.app.post('/orders',\n json=test_order.serialize(),\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n\n # cancel the order\n new_order = resp.get_json()\n new_order['status'] = 'Cancelled'\n resp = self.app.put('/orders/{}/cancel'.format(new_order['id']),\n json=new_order,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n cancelled_order = resp.get_json()\n self.assertEqual(cancelled_order['status'], 'Cancelled')",
"def cancel_order(self, order):\r\n method = self.private_endpoints['cancel_order']['method']\r\n url = self.base_url + self.private_endpoints['cancel_order']['url'].format(orderId=order)\r\n req = requests.request(method, url, headers=self.get_auth_headers(nonce=True))\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return True\r\n else:\r\n return res",
"def _cancel_order_parameters(self, cancel_order_action):\n parameters = {\n 'order_id': cancel_order_action.order_id\n }\n return parameters",
"def test_cancel_oco_order_without_symbol():\n\n client = Client(key, secret)\n client.cancel_oco_order.when.called_with(\"\").should.throw(ParameterRequiredError)",
"def cancel_order(self, symbol, orderId):\n payload = {\n 'symbol': symbol,\n 'orderId': orderId\n }\n\n return self.signed_request('DELETE', '/api/v3/order', **payload)",
"def cancel_order(self, walletId, orderId):\n return",
"def test_cancel_order_with_order_id():\n\n client = Client(key, secret)\n response = client.cancel_oco_order(**params)\n response.should.equal(mock_item)",
"async def handle_cancel_order_response(self, response: RequesterResponse\n ) -> HitbtcOrderModel:",
"def cancel_order(self, custom_id=None, **params):\n self.conn.send('cancelOrder', custom_id=custom_id, **params)",
"async def handle_cancel_orders_response(self, response: RequesterResponse\n ) -> HitbtcOrders:",
"def delete(self):\n parser = reqparse.RequestParser()\n parser.add_argument('orderId', type=int, required=True,\n help='Order ID to cancel')\n args = parser.parse_args()\n return sync.cancel_order(args['orderId'])",
"def cancel(self, uuid):\n\n result = self.api_query('CancelOrder', {'order_id': uuid})\n return result",
"def cancel_order(self, id: str, symbol: Optional[str] = None, params={}):\n request = {\n 'id': id,\n }\n return self.privatePostOrderCancelOrder(self.extend(request, params))",
"def market_cancel(self, orderid):\n return self.delete(f'orders/{orderid}', auth=True)",
"def cancel_aws_order(order_id):\n mws_shipments = mws.OutboundShipments(\n access_key=MWS_ACCESS_KEY, secret_key=MWS_SECRET_KEY,\n account_id=MWS_MERCHANT_ID, region=\"FR\")\n\n data = dict(Action=\"CancelFulfillmentOrder\", SellerFulfillmentOrderId=order_id)\n return mws_shipments.make_request(data, \"POST\")",
"def cancel_operation(\n self,\n ) -> Callable[[operations_pb2.CancelOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"cancel_operation\" not in self._stubs:\n self._stubs[\"cancel_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/CancelOperation\",\n request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"cancel_operation\"]",
"def cancel_operation(\n self,\n ) -> Callable[[operations_pb2.CancelOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"cancel_operation\" not in self._stubs:\n self._stubs[\"cancel_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/CancelOperation\",\n request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"cancel_operation\"]",
"def cancel_operation(\n self,\n ) -> Callable[[operations_pb2.CancelOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"cancel_operation\" not in self._stubs:\n self._stubs[\"cancel_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/CancelOperation\",\n request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"cancel_operation\"]",
"def cancel(self, currency_pair, order_number):\n return self.api_query('cancelOrder', {\"currencyPair\": currency_pair, \"orderNumber\": order_number})",
"def cancel_payment_by_idempotency_key(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/payments/cancel')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()",
"async def cancel_order(self, id: str, symbol: Optional[str] = None, params={}):\n cid = self.safe_value_2(params, 'cid', 'clientOrderId') # client order id\n request = None\n if cid is not None:\n cidDate = self.safe_value(params, 'cidDate') # client order id date\n if cidDate is None:\n raise InvalidOrder(self.id + \" canceling an order by clientOrderId('cid') requires both 'cid' and 'cid_date'('YYYY-MM-DD')\")\n request = {\n 'cid': cid,\n 'cid_date': cidDate,\n }\n params = self.omit(params, ['cid', 'clientOrderId'])\n else:\n request = {\n 'id': int(id),\n }\n response = await self.privatePostAuthWOrderCancel(self.extend(request, params))\n order = self.safe_value(response, 4)\n return self.parse_order(order)",
"def cancel(self):\r\n self.require_item()\r\n\r\n url = '{0}/cancel'.format(self.get_url())\r\n request = http.Request('PUT', url)\r\n request.use_xml = False\r\n\r\n return request, parsers.parse_empty",
"def test_cancel_oco_order_without_orderListId():\n\n client = Client(key, secret)\n client.cancel_oco_order.when.called_with(\"ETHBTC\", orderListId=\"\").should.throw(\n ClientError\n )",
"def CancelOperation(\n self,\n request: google.longrunning.operations_pb2.CancelOperationRequest,\n context: grpc.ServicerContext,\n ) -> google.protobuf.empty_pb2.Empty:",
"def test_cancel_order_failure(self):\n # create a order to cancel\n test_order = OrderFactory()\n resp = self.app.post('/orders',\n json=test_order.serialize(),\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n\n # cancel the order\n new_order = resp.get_json()\n new_order['status'] = 'Cancelled'\n resp = self.app.put('/orders/{}/cancel'.format(23),\n json=new_order,\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def cancelOrder(self, order_number):\n pass",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)"
] | [
"0.74667805",
"0.6814739",
"0.65435386",
"0.64910775",
"0.63818395",
"0.6327267",
"0.63135856",
"0.63040006",
"0.63021636",
"0.62851006",
"0.6268942",
"0.6267777",
"0.62616944",
"0.62519264",
"0.62420887",
"0.62206006",
"0.622041",
"0.62126046",
"0.62126046",
"0.62126046",
"0.6195313",
"0.61858726",
"0.61843413",
"0.6150678",
"0.61063075",
"0.6088923",
"0.608522",
"0.6066239",
"0.6061288",
"0.6061288"
] | 0.71531564 | 1 |
Container to configure 3D plot, i.e colormapping | def plot_config_3d(view, trace, marker_names):
if view == "3D Plot":
with st.expander("3D Plot Configuration", expanded=True):
col_plot_type, col_grid_res, col_fill, col_interp = st.columns(4)
col_col_type, col_choice, col_preview, col_overlay = st.columns(4)
trace["Chart_Type"] = col_plot_type.selectbox("Plot Type", ["Contour","3D Scatter","Surface","Heatmap"], key = "Chart_Type")
col_col_type.selectbox('Color Map Type', ['Sequential','Diverging'], key="Color_Set_Type")
if st.session_state["Color_Set_Type"] == 'Sequential':
color_map = list(sequential_color_dict().keys())
else:
color_map = list(diverging_color_dict().keys())
color_set = col_choice.selectbox("Color Map", color_map)
if st.session_state["Color_Set_Type"] == 'Sequential':
st.session_state['Color_Palette'] = sequential_color_dict().get(color_set)
else:
st.session_state['Color_Palette'] = diverging_color_dict().get(color_set)
colormap_preview = plot_color_set(st.session_state['Color_Palette'], color_set, view)
col_preview.image(colormap_preview, use_column_width = True)
if trace["Chart_Type"] != '3D Scatter':
trace["Grid_Res"] = col_grid_res.number_input("Grid Resolution", min_value=0.0, max_value=100000.0, value=50.0, step=0.5, key="Grid_Res")
trace["Fill_Value"] = col_fill.selectbox("Fill Value", ["nan",0], help="fill missing data with the selected value", key = "Fill_Value")
trace["Interp_Method"] = col_interp.selectbox("Interpolation Method", ["linear","nearest","cubic"], key = "Interp_Method")
else:
trace["Fill_Value"] = None
trace["Interp_Method"] = None
trace["Grid_Res"] = None
st.session_state["Overlay"] = col_overlay.checkbox("Overlay Original Data", help="Display scatter of original data overlayed on chart")
if st.session_state["Overlay"] == True:
st.subheader("Overlay")
col_overlay_alpha, col_overlay_marker, col_overlay_color = st.columns(3)
overlay_alpha = col_overlay_alpha.slider("Opacity",value=0.5,min_value=0.0, max_value=1.0, step=0.01, key = "Overlay_Alpha")
overlay_marker = col_overlay_marker.selectbox("Style", marker_names, help="https://plotly.com/python/marker-style/", key = "Overlay Marker")
overlay_color = col_overlay_color.color_picker('Pick a color ', '#000000', key = "Overlay Color")
else:
overlay_alpha = None
overlay_marker = None
overlay_color = None
else:
trace["Chart_Type"] = None
st.session_state['Color_Palette'] = None
trace["Fill_Value"] = None
trace["Interp_Method"] = None
trace["Grid_Res"] = None
return trace["Chart_Type"], trace["Fill_Value"], trace["Interp_Method"], trace["Grid_Res"], st.session_state['Color_Palette'], st.session_state["Overlay"], overlay_alpha, overlay_marker, overlay_color | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_plot():\n fig = plt.figure(constrained_layout=True, figsize=(7,9), dpi=130)\n gs = fig.add_gridspec(5, 1)\n ax2 = fig.add_subplot(gs[:1, :])\n ax1 = fig.add_subplot(gs[1:, :], projection='3d')\n\n tick_color = (0.2, 0.2, 0.2, 1.0)\n pane_color = (0.12, 0.12, 0.12, 1.0)\n ax1.w_xaxis.set_pane_color(pane_color)\n ax1.w_yaxis.set_pane_color(pane_color)\n ax1.w_zaxis.set_pane_color(pane_color)\n\n ax1.tick_params(axis='x', colors=tick_color)\n ax1.tick_params(axis='y', colors=tick_color)\n ax1.tick_params(axis='z', colors=tick_color)\n ax1.view_init(elev=90, azim=180)\n\n ax1.set_xlim3d(0, 80)\n ax1.set_zlim3d(-2, 5)\n \n return (ax1, ax2)",
"def plot3d(self):\n plot_rupture_wire3d(self)",
"def visualize_in_3d(self,**kwargs):\n fig = plt.figure(figsize=(7,7))\n ax = fig.add_subplot(111, projection='3d')\n\n points = np.vstack([\n c.to_matrix() for c in self.contours if c.inclusion\n ])\n points[:,:2] = points[:,:2] * self.scan.pixel_spacing\n\n # Center the points at the origin for \n # spherical coordinates conversion.\n points = points - points.mean(axis=0)\n\n # Triangulate the azimuth and zenith transformation.\n azimuth = np.arctan2(points[:,1],points[:,0])\n zenith = np.arccos(points[:,2] / np.linalg.norm(points,axis=1))\n azi_zen = np.c_[azimuth.flatten(),zenith.flatten()]\n triangles = Delaunay(azi_zen).simplices\n\n # Start the points at 0 on every axis.\n # This lets the axis ticks to be interpreted as length in mm.\n points = points - points.min(axis=0)\n\n ax.set_xlabel('length (mm)')\n ax.set_ylabel('length (mm)')\n ax.set_zlabel('length (mm)')\n\n # Plot the points.\n ax.plot_trisurf(points[:,0], points[:,1], points[:,2],\n triangles=triangles, **kwargs)\n plt.show()",
"def plot3surface( pot, **kwargs ): \n \n fig = plt.figure( figsize = (8., 8.) ) \n gs = matplotlib.gridspec.GridSpec( 3,2, wspace=0.2) \n \n # Make a list with three perpendicular directions which \n # will define the three surface cuts \n perp = [(np.pi/2., 0.), (np.pi/2., -np.pi/2.), (0., -1.*np.pi/2.) ]\n \n # Iterate to plot the three surface cuts\n yMin = 1e16\n yMax = -1e16 \n Ims = []\n for i in range(3):\n ax0 = fig.add_subplot( gs[i,0], projection='3d')\n ax1 = fig.add_subplot( gs[i,1]) \n \n T0, T1, X, Y, Z = surfcut_points( normal = perp[i], \\\n ax0=ax0, **kwargs ) \n \n EVAL = pot.evalpotential(X,Y,Z)\n im = ax1.pcolormesh( T0, T1, EVAL, \\\n cmap=plt.get_cmap('jet') ) \n plt.axes( ax1 ) \n cbar = plt.colorbar(im)\n cbar.set_label( pot.unitlabel, rotation=0) \n \n ymin = EVAL.min()\n ymax = EVAL.max()\n \n Ims.append(im) \n if ymin < yMin : yMin = ymin\n if ymax > yMax : yMax = ymax \n \n for im in Ims:\n im.set_clim( vmin=yMin, vmax=yMax)",
"def plot_surface_3D(self, length = 30, fps = 30, **kwargs):\n fig = utils.get_figure(scale = 3)\n ax = fig.add_subplot(111, projection = '3d')\n\n # surface_x = self.xi_1_mesh\n # surface_y = self.xi_2_mesh\n # surface_x, surface_y, surface_z = self.surface()\n xyz = self.surface()\n\n # surface_x, surface_y = np.meshgrid(surface_x, surface_y)\n\n # print(np.shape(surface_x))\n # print(np.shape(surface_y))\n # print(np.shape(surface_z))\n\n control_points_x = np.array([control_point[0] for control_point in self.control_net.values()])\n control_points_y = np.array([control_point[1] for control_point in self.control_net.values()])\n control_points_z = np.array([control_point[2] for control_point in self.control_net.values()])\n\n # x_min = min(np.min(surface_x), np.min(control_points_x))\n # x_max = max(np.max(surface_x), np.max(control_points_x))\n # x_range = np.abs(x_max - x_min)\n #\n # y_min = min(np.min(surface_y), np.min(control_points_y))\n # y_max = max(np.max(surface_y), np.max(control_points_y))\n # y_range = np.abs(y_max - y_min)\n #\n # z_min = min(np.min(surface_z), np.min(control_points_z))\n # z_max = max(np.max(surface_z), np.max(control_points_z))\n # z_range = np.abs(z_max - z_min)\n #\n # ax.set_xlim(x_min - 0.05 * x_range, x_max + 0.05 * x_range)\n # ax.set_ylim(y_min - 0.05 * y_range, y_max + 0.05 * y_range)\n # ax.set_zlim(z_min - 0.05 * z_range, z_max + 0.05 * z_range)\n\n ax.scatter(control_points_x, control_points_y, control_points_z, depthshade = False, **CONTROL_POLYGON_KWARGS)\n\n # print(np.max(surface_x), np.max(surface_y), np.max(surface_z))\n # print(np.min(surface_x), np.min(surface_y), np.min(surface_z))\n # print(surface_x)\n # print(surface_y)\n # print(surface_z)\n xyz = np.reshape(xyz, (-1, 3))\n print(xyz.shape)\n x, y, z = xyz[:, 0], xyz[:, 1], xyz[:, 2]\n ax.scatter(x, y, z)\n # ax.plot_trisurf(\n # x, y, z,\n # cmap = plt.get_cmap('viridis'),\n # linewidth = 0,\n # antialiased = True,\n # )\n # ax.plot_surface(surface_x, surface_y, surface_z, rstride = 1, cstride = 1)\n # ax.plot_trisurf(surface_x, surface_y, surface_z)\n # ax.plot_trisurf(surface_x, surface_y, surface_z, **CURVE_KWARGS)\n\n ax.axis('off')\n\n ax.view_init(elev = 45, azim = 0) # note that this resets ax.dist to 10, so we can't use it below\n ax.dist = 7.5 # default is 10, so zoom in a little because there's no axis to take up the rest of the space\n\n plt.show()\n utils.save_current_figure(**kwargs)\n\n ### ANIMATION ###\n\n frames = length * fps\n\n writer = anim.writers['ffmpeg'](fps = fps, bitrate = 2000) # don't need a very high bitrate\n\n def animate(frame):\n print(frame, frames, frame / frames)\n ax.azim = 360 * frame / frames # one full rotation\n return [] # must return the list of artists we modified (i.e., nothing, since all we did is rotate the view)\n\n ani = anim.FuncAnimation(fig, animate, frames = frames, blit = True)\n ani.save(f\"{os.path.join(kwargs['target_dir'], kwargs['name'])}.mp4\", writer = writer)\n\n plt.close()",
"def heatmap3d(xL, yL ,zL, valueL, grid=True, color='cool',\n size=100, marker='o',alpha=0.8,save=False, savepath='./'):\n from mpl_toolkits.mplot3d import Axes3D\n #Normalize valueL into 0 to 1\n normalizedValueL = list( (valueL - min(valueL)) / (max(valueL) - min(valueL)) )\n\n if color=='hot':\n colors = plt.cm.hot_r(normalizedValueL)\n # For color bar display\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.hot_r)\n elif color=='cool':\n colors = plt.cm.cool_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.cool_r)\n elif color=='hsv':\n colors = plt.cm.hsv_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.hsv_r)\n elif color=='jet':\n colors = plt.cm.jet_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.jet_r)\n elif color=='gray':\n colors = plt.cm.gray_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.gray_r)\n elif color=='spring':\n colors = plt.cm.spring_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.spring_r)\n elif color=='summer':\n colors = plt.cm.summer_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.summer_r)\n elif color=='autumn':\n colors = plt.cm.autumn_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.autumn_r)\n elif color=='winter':\n colors = plt.cm.winter_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.winter_r)\n else:\n print('Since there is no color, it will be the default cool')\n colors = plt.cm.cool_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.cool_r)\n\n colmap.set_array(valueL)\n\n fig = plt.figure()\n ax = Axes3D(fig)\n\n # Set the grid on of off\n if not grid:\n ax.grid(False)\n\n ax.scatter(xL,yL,zL, s =size, c=colors, marker=marker, alpha=alpha)\n # For color bar display\n cb = fig.colorbar(colmap)\n\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n if save==True:\n date = datetime.datetime.now()\n plt.savefig(savepath+'3Dheatmap_'+str(date.year)+'_'+ str(date.month)+ \\\n '_'+str(date.day)+'_'+str(date.hour)+'_'+ \\\n str(date.minute)+'_'+str(date.second), dpi=150)\n plt.show()",
"def init_render(self):\n plt.ion() # interactive plot mode, panning, zooming enabled\n self.fig = plt.figure(figsize=(9,7)) # create figure object\n self.ax = self.fig.add_subplot(111, projection=\"3d\") # attach z-axis to plot\n # set axe limits and labels\n self.ax.set_xlim([-self.l1max, self.l1max])\n self.ax.set_ylim([-self.l1max, self.l1max])\n self.ax.set_zlim([-self.l1max, self.l1max])\n self.ax.set_xlabel(\"X\")\n self.ax.set_ylabel(\"Y\")\n self.ax.set_zlabel(\"Z\")\n # add 3 arrows of coordinate base frame\n ax_base = Arrow3D([0.0, self.arrow_len], [0.0, 0.0], [0.0, 0.0],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"r\")\n ay_base = Arrow3D([0.0, 0.0], [0.0, self.arrow_len], [0.0, 0.0],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"g\")\n az_base = Arrow3D([0.0, 0.0], [0.0, 0.0], [0.0, self.arrow_len],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"b\")\n self.ax.add_artist(ax_base)\n self.ax.add_artist(ay_base)\n self.ax.add_artist(az_base)\n plt.show(block=False) # display figure and bring focus (once) to plotting window\n self.fig.tight_layout() # fits the plot to window size",
"def plot3d(self,datarange=None,nx=100,ny=100,clf=True,cb=True,data='auto',**kwargs):\n from enthought.mayavi import mlab as M\n from operator import isMappingType\n\n if data == 'auto':\n if self.data:\n data = self.data[:2]\n else:\n data = None\n\n if data: #TODO:correct coord conv\n xd,yd = data[0][0],data[0][1]\n if datarange is None:\n datarange = (np.min(xd),np.max(xd),np.min(yd),np.max(yd))\n maxmind = (np.max(data[1]),np.min(data[1]))\n elif datarange is None:\n if self.rangehint is not None:\n datarange = self.rangehint\n else:\n raise ValueError(\"Can't choose limits for plotting without data or a range hint\")\n maxmind = None\n\n grid = np.mgrid[datarange[0]:datarange[1]:1j*nx,datarange[2]:datarange[3]:1j*ny]\n res = self(grid)\n\n# if maxmind:\n# norm = plt.normalize(min(np.min(res),maxmind[1]),max(np.max(res),maxmind[0]))\n# else:\n# norm = plt.normalize(np.min(res),np.max(res))\n\n if clf:\n M.clf()\n\n M.mesh(grid[0],grid[1],res)\n\n if cb:\n if isMappingType(cb):\n M.colorbar(**cb)\n else:\n M.colorbar()\n\n if data:\n if isMappingType(data):\n kwscat = dict(data)\n else:\n kwscat = {}\n zd = data[1]\n zres = zd-self((xd,yd))\n kwscat.setdefault('scale_mode','none')\n kwscat.setdefault('scale_factor','auto')\n g = M.points3d(xd,yd,zd,zres,**kwscat)\n if kwscat['scale_factor'] == 'auto':\n g.glyph.glyph.scale_factor /= 2\n\n #M.xlim(datarange[0],datarange[1])\n #M.ylim(datarange[2],datarange[3])",
"def add_satellite(ax, coo_x, coo_y, coo_z):\n from mpl_toolkits.mplot3d import Axes3D\n from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n\n tr = np.transpose(np.vstack((coo_x.cartesian.xyz.value, coo_y.cartesian.xyz.value, coo_z.cartesian.xyz.value)))\n\n alpha_czti = 0.5\n alpha_radiator = 0.5\n alpha_sat = 0.3\n\n color_czti = 'yellow'\n color_radiator = 'black'\n color_sat = 'green'\n\n c_w2 = 0.15 # czti half-width\n c_h = 0.30 # czti height\n c_hr = 0.40 # czti radiator height\n sat_w = 0.6\n\n # For each surface, do the following:\n # verts = []\n # verts.append([tuple(tr.dot(np.array[cx, cy, cz]))])\n # surf = Poly3DCollection(verts)\n # surf.set_alpha()\n # surf.set_color()\n # ax.add_collection3d(surf)\n \n # +x rect\n verts = []\n verts.append(tuple(tr.dot(np.array([c_w2, c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([c_w2, c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([c_w2, -c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_czti)\n surf.set_color(color_czti)\n ax.add_collection3d(surf)\n \n # +y rect\n verts = []\n verts.append(tuple(tr.dot(np.array([c_w2, c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([c_w2, c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_czti)\n surf.set_color(color_czti)\n ax.add_collection3d(surf)\n\n # -y rect\n verts = []\n verts.append(tuple(tr.dot(np.array([c_w2, -c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([c_w2, -c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_czti)\n surf.set_color(color_czti)\n ax.add_collection3d(surf)\n \n # -x radiator plate\n verts = []\n verts.append(tuple(tr.dot(np.array([-c_w2, c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, c_w2, c_hr]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, c_hr]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_radiator)\n surf.set_color(color_radiator)\n ax.add_collection3d(surf)\n\n # # Bottom CZTI only\n # verts = []\n # verts.append(tuple(tr.dot(np.array([c_w2, c_w2, 0]))))\n # verts.append(tuple(tr.dot(np.array([-c_w2, c_w2, 0]))))\n # verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n # verts.append(tuple(tr.dot(np.array([c_w2, -c_w2, 0]))))\n # surf = Poly3DCollection([verts])\n # surf.set_alpha(alpha_czti)\n # surf.set_color(color_czti)\n # ax.add_collection3d(surf)\n\n # Satellite top\n verts = []\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n ax.add_collection3d(surf)\n\n # Satellite bottom\n verts = []\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, -sat_w]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n\n ax.add_collection3d(surf)\n\n # Satellite back (radiator side)\n verts = []\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n ax.add_collection3d(surf)\n\n # Satellite front (opposite radiator side)\n verts = []\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n ax.add_collection3d(surf)\n\n #dpix_mask Satellite right (-y, common to czti)\n verts = []\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n ax.add_collection3d(surf)\n\n # Satellite left (+y)\n verts = []\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n ax.add_collection3d(surf)\n\n return",
"def visualize_3d(grbdir,x, y, z, t, thetax, thetay, name):\n # Set ax.azim and ax.elev to ra, dec\n global runconf\n\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n plt.suptitle(r\"Visualisation of {name} in 3d:$\\theta_x$={tx:0.1f},$\\theta_y$={ty:0.1f}\".format(name=name, tx=thetax, ty=thetay))\n # Z\n ax = plt.subplot(2, 2, 1, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = z.ra.deg\n ax.elev = z.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI pointing (z)\")\n\n # Transient\n ax = plt.subplot(2, 2, 2, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = t.ra.deg\n ax.elev = t.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from nominal \\n transient direction\")\n\n # X\n ax = plt.subplot(2, 2, 3, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = x.ra.deg\n ax.elev = x.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI X axis\")\n\n # Z\n ax = plt.subplot(2, 2, 4, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = y.ra.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI Y axis\")\n\n return",
"def plot_3D_compare(true_lab, pred_lab):\n ref_shape = [true_lab.shape[1], true_lab.shape[2], true_lab.shape[3]]\n true_loc = np.where(true_lab == 1)\n pred_loc = np.where(pred_lab == 1)\n fig = plt.figure()\n ax = plt.axes(projection=\"3d\")\n axl = plt.gca()\n axl.set_xlim3d([0, ref_shape[0]])\n axl.set_ylim3d([0, ref_shape[1]])\n axl.set_zlim3d([0, ref_shape[2]])\n ax.scatter3D(true_loc[0], true_loc[1], true_loc[2], marker=\".\", alpha=0.9)\n ax.scatter3D(pred_loc[0], pred_loc[1], pred_loc[2], marker=\".\", alpha=0.05)\n\n fig.set_facecolor('black')\n ax.set_facecolor('black')\n ax.grid(False)\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n ax.set_xlabel('Width', c='white')\n ax.set_ylabel('Depth', c='white')\n ax.set_zlabel('Height', c='white')\n\n plt.show()",
"def plot_results_3d(p_x, p_y, p_z, h_exp = 0.5):\n plt.figure(figsize = (10, 10))\n ax3d = plt.axes(projection = '3d') \n\n color=iter(cm.rainbow(np.linspace(0,1,p_x.shape[0]))) # (1)\n labels = ['Particle ' + str(pl+1) for pl in np.arange(0, p_x.shape[0], step = 1)]\n \n for p in np.arange(0, p_x.shape[0], step = 1): \n c = next(color) # (1)\n for t in np.arange(0, p_x.shape[1], step = 1): \n ax3d.plot3D(p_x[p, t], p_y[p, t], p_z[p, t], 'x', c = c, label = labels[p]) \n legend_without_duplicate_labels(ax3d)\n ax3d.set_xlabel('X (pixels)') \n ax3d.set_ylabel('Y (pixels') \n ax3d.set_zlabel('Z (pixels)') \n ax3d.set_xlim([origin-150,origin+150])\n ax3d.set_ylim([origin-150,origin+150])\n ax3d.set_zlim([origin-150,origin+150])\n ax3d.set_title('3D particle trajectories - H = ' + str(h_exp))",
"def setup_figure(self):\n # How many data plots are we dealing with in each dimension?\n plots_x = self._dims[0] # Number of columns\n plots_y = self._dims[1] if len(self._dims) > 1 else 1 # Number of rows\n\n # Set up our base row count\n num_rows = plots_y + 1 # Add one more row for the update number\n height_ratios = [1] * plots_y + [0.25]\n num_cols = plots_x + 1 # Add one more column for the colorbar\n width_ratios = [1] * plots_x + [0.10]\n\n if self._is_multi:\n # If we have multiple resources, add another row for the resource legend\n num_rows += 1\n height_ratios.append(0.1)\n\n has_descr = True if len(self._env_str + self._event_str) > 0 else False\n if has_descr:\n # if we need to print some descriptive text, add another at the bottom\n # change this height ratio to make it larger\n num_rows += 1\n height_ratios.append(0.35)\n\n # Create our grid layout\n gs = mpl.gridspec.GridSpec(num_rows, num_cols,\n height_ratios=height_ratios,\n\n width_ratios=width_ratios)\n\n # Plot our category heatmaps\n ndx = 0 # Index into our experiment\n plots = [] # Plots from our experiment\n for col in range(plots_x):\n for row in range(plots_y):\n ax = plt.subplot(gs[row,col])\n base_cmap = self._cmap if not self._is_multi else ColorMaps.gray\n plot = plt.imshow(np.zeros(self._grid_shape), cmap=base_cmap,\n origin='upper', interpolation='nearest',\n vmin=self._vmin, vmax=self._vmax)\n ax.tick_params(axis='both', bottom='off', labelbottom='off',\n left='off', labelleft='off')\n if self._is_left_edge(ndx):\n ax.set_ylabel(self._fact2label(ndx,1))\n if self._is_bottom_edge(ndx):\n ax.set_xlabel(self._fact2label(ndx,0))\n plots.append(plot)\n pa = []\n for pp in self._post_plot:\n pa.append(pp.blit_build(ax, ax_ndx=ndx))\n ndx = ndx+1\n\n # Plot the colorbar\n norm = mpl.colors.Normalize(self._vmin, self._vmax)\n cax = plt.subplot( gs[0:plots_y,-1] ) # Across data rows, last column\n if not self._is_multi:\n cbar = mpl.colorbar.ColorbarBase(cax, cmap=self._cmap, norm=norm, orientation='vertical')\n else:\n cbar = mpl.colorbar.ColorbarBase(cax, cmap=ColorMaps.gray, norm=norm, orientation='vertical')\n cbar.set_label('Abundance')\n\n # Plot the update\n ax = plt.subplot(gs[plots_y,0:plots_x]) # The row after the data plots, across all data plot columns\n ax.tick_params(axis='both', bottom='off', labelbottom='off',\n left='off', labelleft='off')\n ax.set_frame_on(False)\n ax.set_ylim(0,1)\n ax.set_xlim(0,1)\n update = ax.text(0.5,0.25,'Update n/a', ha='center', va='bottom')\n\n # Plot the category legend if needed\n if self._is_multi:\n ax = plt.subplot(gs[plots_y+1,:-1]) # The row after the update axis, acros all data plot columns\n legend_handles = []\n for ndx,cat_name in enumerate(self._categories):\n legend_handles.append(mpl.patches.Patch(color=self._colors[ndx], label=cat_name))\n plt.legend(handles=legend_handles, loc='center', frameon=False, ncol=len(legend_handles))\n ax.tick_params(axis='both', bottom='off', labelbottom='off',\n left='off', labelleft='off')\n ax.set_frame_on(False)\n\n # If we have an environment and event strings, plot them in the final row across all columns\n if has_descr:\n ax = plt.subplot(gs[-1,:])\n desc = self._env_str + '\\n\\n' + self._event_str + '\\n\\n' + f'World: {self._world_size[0]} x {self._world_size[1]}'\n env = ax.text(0.05, 1, desc, ha='left', va='top', fontsize=7)\n ax.tick_params(axis='both', bottom='off', labelbottom='off',\n left='off', labelleft='off')\n ax.set_frame_on(False)\n\n # Title the figure\n plt.suptitle(self._title)\n\n # Store what we need to redraw each frame for blitting.\n # The values in this dictionary may be either a single element\n # or an iterable.\n self._to_draw = {'plots':plots, 'update':update, 'post_plot':pa}",
"def plot3d(self, data, axis2, axis3, mesh, data_type='solution', colormap='blue-red', axes=False,\n cartesian_coordinates=False, interp_size=None, ax_names=None, style=0, *args, **kwargs):\n # if type(axis2) is not Axis1d or type(axis3) is not Axis1d:\n # raise NotImplementedError(\"3D plots with such combination of axes are not supported.\")\n # x_grid, y_grid, z_grid = self.cartesian_coordinates(axis2, axis3)\n # Title\n if data_type == 'solution':\n title = util.text.solution_caption(cartesian_coordinates, self, axis2, axis3).replace('$', '') \\\n .replace('{', '').replace('}', '')\n elif data_type == 'detector_geometry' or data_type == 'detector_geometry_n':\n title = ' ' + re.sub('[${}]', '', util.text.detector_caption(mesh, data_type, cartesian_coordinates))\n if axes:\n if ax_names is None:\n axes = ('{}, {}'.format(self.name, self.units),\n '{}, {}'.format(axis2.name, axis2.units),\n '{}, {}'.format(axis3.name, axis3.units))\n else:\n axes = ax_names\n # Voxel style\n if style == 0:\n if cartesian_coordinates:\n vertices, faces = self.cell_edges3d_cartesian(axis2, axis3)\n else:\n vertices, faces = self.cell_edges3d(axis2, axis3)\n vertices = np.array(vertices)\n if data_type == 'solution':\n x = []\n y = []\n z = []\n new_data = []\n new_faces = []\n shift = 0\n for i, a1 in enumerate(vertices):\n for j, a2 in enumerate(a1):\n for k, a3 in enumerate(a2):\n vert_faces = np.array(faces[i][j][k]) + shift\n for f in vert_faces:\n new_faces.append(f)\n for vert in a3:\n x.append(vert[0])\n y.append(vert[1])\n z.append(vert[2])\n new_data.append(data[i][j][k])\n shift += 1\n plot3d.voxel_plot(new_data, x, y, z, new_faces, title=title, axes=axes, colormap=colormap,\n *args, **kwargs)\n elif data_type == 'detector_geometry' or data_type == 'detector_geometry_n':\n new_data = []\n for det in data:\n x = []\n y = []\n z = []\n det_data = []\n new_faces = []\n shift = 0\n for i, a1 in enumerate(vertices):\n for j, a2 in enumerate(a1):\n for k, a3 in enumerate(a2):\n vert_faces = np.array(faces[i][j][k]) + shift\n for f in vert_faces:\n new_faces.append(f)\n for vert in a3:\n x.append(vert[0])\n y.append(vert[1])\n z.append(vert[2])\n det_data.append(det[i][j][k])\n shift += 1\n new_data.append(det_data)\n plot3d.detector_voxel_plot(new_data, x, y, z, new_faces, title=title, axes=axes, colormap=colormap,\n *args, **kwargs)\n else:\n raise ValueError('data type {} is unknown'.format(data_type))\n return 0, 0\n\n if cartesian_coordinates:\n x_grid, y_grid, z_grid = self.cartesian_coordinates(axis2, axis3)\n else:\n coord = [self.coordinates, axis2.coordinates, axis3.coordinates]\n x_grid, y_grid, z_grid = np.array(np.meshgrid(*coord, indexing='ij'))\n\n if data_type == 'solution':\n # irregular or non-cartesian axes\n if not all((self.regular, axis2.regular, axis3.regular)) or \\\n (cartesian_coordinates and not all(type(x) == cartesian.Axis1d for x in (self, axis2, axis3))):\n if interp_size is None:\n interp_size = 50\n warnings.warn(\"Since axes are not regular, linear interpolation with {} points used. \"\n \"You can change interpolation size with interp_size attribute.\"\n .format(interp_size ** 3))\n x_grid, y_grid, z_grid, new_data = \\\n util.geometry3d_basic.make_regular(data, x_grid, y_grid, z_grid, interp_size)\n new_data = np.nan_to_num(new_data)\n new_data = np.clip(new_data, np.amin(data), np.amax(data))\n mask = mesh.is_in_grid(self.from_cartesian([x_grid, y_grid, z_grid], axis2, axis3), self, axis2, axis3)\n new_data *= mask\n else:\n new_data = data\n # plot\n plot3d.contour3d(new_data, x_grid, y_grid, z_grid,\n title=title, colormap=colormap, axes=axes, style=style, *args, **kwargs)\n\n elif data_type == 'detector_geometry' or data_type == 'detector_geometry_n':\n # irregular axes\n if not all((self.regular, axis2.regular, axis3.regular)) or \\\n (cartesian_coordinates and not all(type(x) == cartesian.Axis1d for x in (self, axis2, axis3))):\n if interp_size is None:\n interp_size = 50\n warnings.warn(\"Since axes are not regular, linear interpolation with {} points used. \"\n \"You can change interpolation size with interp_size attribute.\"\n .format(interp_size ** 3))\n x_grid_n, y_grid_n, z_grid_n = x_grid, y_grid, z_grid\n new_data = np.zeros((data.shape[0], interp_size, interp_size, interp_size))\n # interpolate data for each detector\n print(\"Start interpolation.\")\n mask = None\n for i, d in enumerate(data):\n x_grid, y_grid, z_grid, new_data[i] \\\n = util.geometry3d_basic.make_regular(d, x_grid_n, y_grid_n, z_grid_n, interp_size)\n if mask is None:\n mask = mesh.is_in_grid(self.from_cartesian([x_grid, y_grid, z_grid], axis2, axis3), self, axis2,\n axis3)\n new_data[i] = np.nan_to_num(new_data[i])\n new_data[i] = np.clip(new_data[i], np.amin(data[i]), np.amax(data[i]))\n new_data[i] *= mask\n print('\\r', end='')\n print(\"...\", str((i + 1) * 100 // data.shape[0]) + \"% complete\", end='')\n print('\\r \\r', end='')\n\n else:\n new_data = data\n plot3d.detector_contour3d(new_data, x_grid, y_grid, z_grid,\n title=title, colormap=colormap, axes=axes, style=style, *args, **kwargs)\n else:\n raise ValueError('data type {} is unknown'.format(data_type))\n\n return 0, 0",
"def render(self):\r\n super().render()\r\n layers, titles, latVect, lonVect = self.make_layers()\r\n LON, LAT = np.meshgrid(lonVect, latVect)\r\n lon = LON.flatten()\r\n lat = LAT.flatten()\r\n for i in range(len(layers)):\r\n vals = layers[i].flatten()\r\n hovertext = []\r\n for k in range(len(vals)):\r\n hovertext.append('lon: {:.2f}<br>lat: {:.2f}<br>{}: {:.1e}'.format(lon[k], lat[k], self.variable + self.unit,vals[k]))\r\n if self.levels == 0:\r\n data = [\r\n go.Heatmap(\r\n x=lon,\r\n y=lat,\r\n z=vals,\r\n colorscale=self.cmap,\r\n zmin=self.vmin,\r\n zmax=self.vmax,\r\n hoverinfo='text',\r\n text=hovertext \r\n )\r\n ]\r\n elif self.levels > 0:\r\n data = [\r\n go.Contour(\r\n x=lon,\r\n y=lat,\r\n z=vals,\r\n colorscale=self.cmap,\r\n hoverinfo='text',\r\n text=hovertext, \r\n connectgaps=False,\r\n contours=dict(\r\n coloring='heatmap',\r\n showlabels=True,\r\n start=self.vmin,\r\n end=self.vmax,\r\n size=(self.vmax-self.vmin) / float(self.levels)\r\n )\r\n # line=dict(smoothing=0.85) \r\n )\r\n ] \r\n\r\n\r\n layout = go.Layout(\r\n autosize=False,\r\n title=titles[i],\r\n width=self.width,\r\n height=self.height,\r\n xaxis={'title': self.xlabel},\r\n yaxis={'title': self.ylabel}\r\n ) \r\n\r\n\r\n\r\n if self.surface3D:\r\n data = [\r\n go.Surface(\r\n x=lonVect,\r\n y=latVect,\r\n z=layers[i],\r\n colorscale=self.cmap,\r\n # hoverinfo='text',\r\n # text=hovertext \r\n )\r\n ]\r\n\r\n layout = go.Layout(\r\n autosize=False,\r\n title=titles[i],\r\n width=self.width,\r\n height=self.height,\r\n scene = dict(\r\n xaxis={'title': self.xlabel},\r\n yaxis={'title': self.ylabel},\r\n zaxis={'title': self.variable + self.unit}\r\n )\r\n ) \r\n\r\n\r\n self._save_plotly_(go, data, layout)",
"def plot_unit(x,y,z,elements,data,is2d,isplanet,datatype,options,fig,axgrid,gridindex):\n\t#if we are plotting 3d replace the current axis\n\tif not is2d:\n\t\taxgrid[gridindex].axis('off')\n\t\tax=inset_locator.inset_axes(axgrid[gridindex],width='100%',height='100%',loc=3,borderpad=0,axes_class=Axes3D)\n\t\tax.set_axis_bgcolor((0.7,0.7,0.7))\n\telse:\n\t\tax=axgrid[gridindex]\n\n\t#edgecolor\n\tedgecolor=options.getfieldvalue('edgecolor','None')\n\n\t# colormap\n\t# {{{ give number of colorlevels and transparency\n\tcolorlevels=options.getfieldvalue('colorlevels',128)\n\talpha=options.getfieldvalue('alpha',1)\n\t# }}}\n\t# {{{ define wich colormap to use \n\ttry:\n\t\tdefaultmap=plt.cm.get_cmap('viridis',colorlevels)\n\texcept AttributeError:\n\t\tprint(\"Viridis can't be found (probably too old Matplotlib) reverting to gnuplot colormap\")\n\t\tdefaultmap=truncate_colormap('gnuplot2',0.1,0.9,colorlevels)\n\tcmap=options.getfieldvalue('colormap',defaultmap)\n\tif options.exist('cmap_set_over'):\n\t\tover=options.getfieldvalue('cmap_set_over','0.5')\n\t\tcmap.set_over(over)\n\tif options.exist('cmap_set_under'):\n\t\tunder=options.getfieldvalue('cmap_set_under','0.5')\n\t\tcmap.set_under(under)\n\toptions.addfield('colormap',cmap)\n\t# }}}\t\n\t# {{{ if plotting only one of several layers reduce dataset, same for surface\n\tif options.getfieldvalue('layer',0)>=1:\n\t\tplotlayer=options.getfieldvalue('layer',0)\n\t\tif datatype==1:\n\t\t\tslicesize=np.shape(elements)[0]\n\t\telif datatype in [2,3]:\n\t\t\tslicesize=len(x)\n\t\tdata=data[(plotlayer-1)*slicesize:plotlayer*slicesize]\n\t# }}}\n\t# {{{ Get the colormap limits\n\tif options.exist('clim'):\n\t\tlims=options.getfieldvalue('clim',[np.amin(data),np.amax(data)])\n\telif options.exist('caxis'):\n\t\tlims=options.getfieldvalue('caxis',[np.amin(data),np.amax(data)])\n\telse:\n\t\tif np.amin(data)==np.amax(data):\n\t\t\tlims=[np.amin(data)-0.5,np.amax(data)+0.5]\n\t\telse:\n\t\t\tlims=[np.amin(data),np.amax(data)]\n\t# }}}\n\t# {{{ Set the spread of the colormap (default is normal\n\tif options.exist('log'):\n\t\tnorm = mpl.colors.LogNorm(vmin=lims[0], vmax=lims[1])\n\telse:\n\t\tnorm = mpl.colors.Normalize(vmin=lims[0], vmax=lims[1])\n\tif options.exist('log'):\n\t\tnorm = mpl.colors.LogNorm(vmin=lims[0], vmax=lims[1])\n\telse:\n\t\tnorm = mpl.colors.Normalize(vmin=lims[0], vmax=lims[1])\n\toptions.addfield('colornorm',norm)\n\t# }}}\n\t\n\t# Plot depending on the datatype\n\t# {{{ data are on elements\n\tif datatype==1:\n\t\tif is2d:\n\t\t\tif options.exist('mask'):\n\t\t\t\ttriangles=mpl.tri.Triangulation(x,y,elements,data.mask)\n\t\t\telse:\n\t\t\t\ttriangles=mpl.tri.Triangulation(x,y,elements)\n\t\t\ttri=ax.tripcolor(triangles,data,colorlevels,cmap=cmap,norm=norm,alpha=alpha,edgecolors=edgecolor)\n\t\telse:\n\n\n\t\t\t#first deal with colormap\n\t\t\tloccmap = plt.cm.ScalarMappable(cmap=cmap)\n\t\t\tloccmap.set_array([min(data),max(data)])\n\t\t\tloccmap.set_clim(vmin=min(data),vmax=max(data))\n\n\t\t\t#dealing with prism sides\n\t\t\trecface=np.vstack((elements[:,0],elements[:,1],elements[:,4],elements[:,3])).T\n\t\t\teltind=np.arange(0,np.shape(elements)[0])\n\t\t\trecface=np.vstack((recface,np.vstack((elements[:,1],elements[:,2],elements[:,5],elements[:,4])).T))\n\t\t\teltind=np.hstack((eltind,np.arange(0,np.shape(elements)[0])))\n\t\t\trecface=np.vstack((recface,np.vstack((elements[:,2],elements[:,0],elements[:,3],elements[:,5])).T))\n\t\t\teltind=np.hstack((eltind,np.arange(0,np.shape(elements)[0])))\n\t\t\ttmp = np.ascontiguousarray(np.sort(recface)).view(np.dtype((np.void, recface.dtype.itemsize * recface.shape[1])))\n\t\t\t_, idx, recur = np.unique(tmp, return_index=True, return_counts=True)\n\t\t\trecel= recface[idx[np.where(recur==1)]]\n\t\t\trecindex=eltind[idx[np.where(recur==1)]]\n\t\t\tfor i,rectangle in enumerate(recel):\n\t\t\t\trec=zip(x[rectangle],y[rectangle],z[rectangle])\n\t\t\t\tpl3=Poly3DCollection([rec])\n\t\t\t\tcolor=loccmap.to_rgba(data[recindex[i]])\n\t\t\t\tpl3.set_edgecolor(color)\n\t\t\t\tpl3.set_color(color)\n\t\t\t\tax.add_collection3d(pl3)\n\n\t\t\t#dealing with prism bases\n\t\t\ttriface=np.vstack((elements[:,0:3],elements[:,3:6]))\n\t\t\teltind=np.arange(0,np.shape(elements)[0])\n\t\t\teltind=np.hstack((eltind,np.arange(0,np.shape(elements)[0])))\n\t\t\ttmp = np.ascontiguousarray(triface).view(np.dtype((np.void, triface.dtype.itemsize * triface.shape[1])))\n\t\t\t_, idx,recur = np.unique(tmp, return_index=True,return_counts=True)\n\t\t\t#we keep only top and bottom elements\n\t\t\ttriel= triface[idx[np.where(recur==1)]]\n\t\t\ttriindex=eltind[idx[np.where(recur==1)]]\n\t\t\tfor i,triangle in enumerate(triel):\n\t\t\t\ttri=zip(x[triangle],y[triangle],z[triangle])\n\t\t\t\tpl3=Poly3DCollection([tri])\n\t\t\t\tcolor=loccmap.to_rgba(data[triindex[i]])\n\t\t\t\tpl3.set_edgecolor(color)\n\t\t\t\tpl3.set_color(color)\n\t\t\t\tax.add_collection3d(pl3)\n\t\n\t\t\tax.set_xlim([min(x),max(x)])\n\t\t\tax.set_ylim([min(y),max(y)])\n\t\t\tax.set_zlim([min(z),max(z)])\n\n\t\t\t#raise ValueError('plot_unit error: 3D element plot not supported yet')\n\t\treturn \n\t# }}}\n\t# {{{ data are on nodes\n\telif datatype==2:\n\t\tif is2d:\n\t\t\tif np.ma.is_masked(data):\n\t\t\t\tEltMask=np.asarray([np.any(np.in1d(index,np.where(data.mask))) for index in elements])\n\t\t\t\ttriangles=mpl.tri.Triangulation(x,y,elements,EltMask)\n\t\t\telse:\n\t\t\t\ttriangles=mpl.tri.Triangulation(x,y,elements)\n\t\t\ttri=ax.tricontourf(triangles,data,colorlevels,cmap=cmap,norm=norm,alpha=alpha)\n\t\t\tif edgecolor != 'None':\n\t\t\t\tax.triplot(x,y,elements,color=edgecolor)\n\t\telse:\n\t\t\t#first deal with the colormap\n\t\t\tloccmap = plt.cm.ScalarMappable(cmap=cmap)\n\t\t\tloccmap.set_array([min(data),max(data)])\n\t\t\tloccmap.set_clim(vmin=min(data),vmax=max(data))\n\t\t\t\n\t\t\t#deal with prism sides\n\t\t\trecface=np.vstack((elements[:,0],elements[:,1],elements[:,4],elements[:,3])).T\n\t\t\trecface=np.vstack((recface,np.vstack((elements[:,1],elements[:,2],elements[:,5],elements[:,4])).T))\n\t\t\trecface=np.vstack((recface,np.vstack((elements[:,2],elements[:,0],elements[:,3],elements[:,5])).T))\n\t\t\ttmp = np.ascontiguousarray(np.sort(recface)).view(np.dtype((np.void, recface.dtype.itemsize * recface.shape[1])))\n\t\t\t_, idx, recur = np.unique(tmp, return_index=True, return_counts=True)\n\t\t\trecel= recface[idx[np.where(recur==1)]]\n\t\t\tfor rectangle in recel:\n\t\t\t\trec=zip(x[rectangle],y[rectangle],z[rectangle])\n\t\t\t\tpl3=Poly3DCollection([rec])\n\t\t\t\tcolor=loccmap.to_rgba(np.mean(data[rectangle]))\n\t\t\t\tpl3.set_edgecolor(color)\n\t\t\t\tpl3.set_color(color)\n\t\t\t\tax.add_collection3d(pl3)\n\t\t\t\t\n\t\t\t#deal with prism faces\n\t\t\ttriface=np.vstack((elements[:,0:3],elements[:,3:6]))\n\t\t\ttmp = np.ascontiguousarray(triface).view(np.dtype((np.void, triface.dtype.itemsize * triface.shape[1])))\n\t\t\t_, idx,recur = np.unique(tmp, return_index=True,return_counts=True)\n\t\t\t#we keep only top and bottom elements\n\t\t\ttriel= triface[idx[np.where(recur==1)]]\n\t\t\tfor triangle in triel:\n\t\t\t\ttri=zip(x[triangle],y[triangle],z[triangle])\n\t\t\t\tpl3=Poly3DCollection([tri])\n\t\t\t\tcolor=loccmap.to_rgba(np.mean(data[triangle]))\n\t\t\t\tpl3.set_edgecolor(color)\n\t\t\t\tpl3.set_color(color)\n\t\t\t\tax.add_collection3d(pl3)\n\t\t\t\t\n\t\t\tax.set_xlim([min(x),max(x)])\n\t\t\tax.set_ylim([min(y),max(y)])\n\t\t\tax.set_zlim([min(z),max(z)])\n\t\t\t#raise ValueError('plot_unit error: 3D element plot not supported yet')\n\t\treturn\n\t# }}}\n\t# {{{ plotting quiver\n\telif datatype==3:\n\t\tif is2d:\n\t\t\tQ=plot_quiver(x,y,data,options,ax)\n\t\telse:\n\t\t\traise ValueError('plot_unit error: 3D node plot not supported yet')\n\t\treturn\n\t\n\t# }}}\n\t# {{{ plotting P1 Patch (TODO)\n\n\telif datatype==4:\n\t\tprint 'plot_unit message: P1 patch plot not implemented yet'\n\t\treturn\n\n\t# }}}\n\t# {{{ plotting P0 Patch (TODO)\n\n\telif datatype==5:\n\t\tprint 'plot_unit message: P0 patch plot not implemented yet'\n\t\treturn\n\n\t# }}}\n\telse:\n\t\traise ValueError('datatype=%d not supported' % datatype)",
"def layer_show_3D(layers, width, accuracys, title, path):\n fig = plt.figure(dpi=120, figsize=(8, 6))\n ax = Axes3D(fig)\n fit = inp.interp2d(layers, width, accuracys)\n y_n = np.linspace(min(layers), max(layers), 5120)\n x_n = np.linspace(min(width), max(width), 5120)\n epoches_n = fit(y_n, x_n)\n surf = ax.plot_surface(y_n, x_n, epoches_n, cmap=cm.rainbow)\n # plt.title(title)\n ax.set_xlabel('layers number')\n ax.set_ylabel('kernel width')\n ax.set_zlabel('accuracy')\n fig.colorbar(surf, shrink=0.5, aspect=5)\n # plt.tight_layout()\n plt.savefig(path)",
"def plot_3D(self, title=None, fig_size=None, close=True):\r\n # TODO ajouter des titres\r\n combs = list(itertools.combinations(np.arange(self.features.shape[1]), 3))\r\n idx_plot = 1\r\n if fig_size is not None:\r\n fig = plt.figure(figsize=fig_size)\r\n else:\r\n fig = plt.figure()\r\n if len(combs) % 2 == 1:\r\n n_col, n_row = (int((len(combs) + 1) / 2), int(len(combs) / 2))\r\n else:\r\n n_col, n_row = (int(len(combs) / 2), int(len(combs) / 2))\r\n for x, y, z in combs:\r\n ax = fig.add_subplot(n_row, n_col, idx_plot, projection='3d')\r\n for target in self.targets:\r\n idx = np.where(self.labels == target)\r\n ax.scatter(self.features[idx, x], self.features[idx, y], self.features[idx, z], label=str(target))\r\n if self.features_names is not None:\r\n ax.set_xlabel(str(self.features_names[x]))\r\n ax.set_ylabel(str(self.features_names[y]))\r\n ax.set_zlabel(str(self.features_names[z]))\r\n if title is not None:\r\n ax.set_title(title[idx_plot - 1])\r\n idx_plot += 1\r\n plt.legend(fontsize='small')\r\n if close:\r\n plt.show()\r\n else:\r\n return fig",
"def configureXrenderers( self ):\n init = ' ' * 8 + \"r%s = new X.renderer3D();\" + '\\n' + ' ' * 8 + \"r%s.container = 'r%s';\\n\"+ ' ' * 8 + 'r%s.init();' + '\\n'\n configuredInit = ''\n div = ' ' * 8 + '<div id=\"r%s\" style=\"background-color: %s; width: %s; height: %s;%s\"></div>' + '\\n'\n configuredDiv = ''\n render = ' ' * 8 + '%sr%s.add(scene);' + '\\n'\n render += ' ' * 8 + 'r%s.camera.position = %s;' + '\\n'\n render += ' ' * 8 + 'r%s.camera.up = %s;' + '\\n'\n render += ' ' * 8 + 'r%s.render();%s' + '\\n\\n'\n configuredRender = ''\n\n # check the current layout\n renderers = []\n\n\n if slicer.app.layoutManager().layout == 15:\n # dual 3d\n renderers.append( 0 )\n renderers.append( 1 )\n elif slicer.app.layoutManager().layout == 19:\n # triple 3d\n renderers.append( 0 )\n renderers.append( 1 )\n renderers.append( 2 )\n else:\n # always take just the main 3d view\n renderers.append( 0 )\n\n threeDViews = slicer.app.layoutManager().threeDViewCount\n\n\n for r in xrange( threeDViews ):\n # grab the current 3d view background color\n threeDWidget = slicer.app.layoutManager().threeDWidget( r )\n threeDView = threeDWidget.threeDView()\n\n if not threeDView.isVisible():\n continue\n\n mrmlViewNode = threeDView.mrmlViewNode()\n bgColor = threeDView.backgroundColor.name() + ';'\n\n # grab the current camera position and up vector\n cameraNodes = slicer.util.getNodes( 'vtkMRMLCamera*' )\n cameraNode = None\n\n for c in cameraNodes.items():\n cameraNode = c[1]\n if cameraNode.GetActiveTag() == mrmlViewNode.GetID():\n # found the cameraNode\n break\n\n if not cameraNode:\n raise Exception( 'Something went terribly wrong..' )\n\n camera = cameraNode.GetCamera()\n cameraPosition = str( list(camera.GetPosition()) )\n cameraUp = str( list(camera.GetViewUp()) )\n\n width = '100%'\n height = '100%'\n float = ''\n begin = '';\n end = '';\n\n if ( len( renderers ) == 2 ):\n # dual 3d\n width = '49.35%'\n if threeDWidget.x == 0:\n # this is the left one\n float += 'position:absolute;left:0;bottom:0;'\n else:\n begin = 'r0.onShowtime = function() {'\n end = '}'\n float += 'position:absolute;right:0;bottom:0;'\n elif ( len( renderers ) == 3 ):\n height = '49.25%'\n # triple 3d\n if r != 0:\n # this is the second row\n width = '49.35%'\n if threeDWidget.x == 0:\n # this is the left one\n begin = ' ' * 8 + 'r0.onShowtime = function() {'\n float += 'position:absolute;left:0;bottom:0;'\n else:\n end = ' ' * 8 + '};'\n float += 'position:absolute;right:0;bottom:0;'\n\n configuredInit += init % ( r, r, r, r )\n configuredRender += render % ( begin, r, r, cameraPosition, r, cameraUp, r, end )\n configuredDiv += div % ( r, bgColor, width, height, float )\n\n\n # .. and configure the X.renderer\n header = self.__header % ( configuredInit )\n footer = self.__footer % ( configuredRender, configuredDiv )\n\n return [header, footer]",
"def viewer(\n self, units='nm', \n draw_edges=True, draw_vertices=True,\n color_by='radius'\n ):\n try:\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D \n from matplotlib import cm\n except ImportError:\n print(\"Skeleton.viewer requires matplotlib. Try: pip install matplotlib --upgrade\")\n return\n\n RADII_KEYWORDS = ('radius', 'radii', 'r')\n COMPONENT_KEYWORDS = ('component', 'components', 'c')\n\n fig = plt.figure(figsize=(10,10))\n ax = Axes3D(fig)\n ax.set_xlabel(units)\n ax.set_ylabel(units)\n ax.set_zlabel(units)\n\n # Set plot axes equal. Matplotlib doesn't have an easier way to\n # do this for 3d plots.\n X = self.vertices[:,0]\n Y = self.vertices[:,1]\n Z = self.vertices[:,2]\n\n max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0\n\n mid_x = (X.max()+X.min()) * 0.5\n mid_y = (Y.max()+Y.min()) * 0.5\n mid_z = (Z.max()+Z.min()) * 0.5\n ax.set_xlim(mid_x - max_range, mid_x + max_range)\n ax.set_ylim(mid_y - max_range, mid_y + max_range)\n ax.set_zlim(mid_z - max_range, mid_z + max_range)\n ### END EQUALIZATION CODE ###\n\n component_colors = ['k', 'deeppink', 'dodgerblue', 'mediumaquamarine', 'gold' ]\n\n def draw_component(i, skel):\n component_color = component_colors[ i % len(component_colors) ]\n\n if draw_vertices:\n xs = skel.vertices[:,0]\n ys = skel.vertices[:,1]\n zs = skel.vertices[:,2]\n\n if color_by in RADII_KEYWORDS:\n colmap = cm.ScalarMappable(cmap=cm.get_cmap('rainbow'))\n colmap.set_array(skel.radii)\n\n normed_radii = skel.radii / np.max(skel.radii)\n yg = ax.scatter(xs, ys, zs, c=cm.rainbow(normed_radii), marker='o')\n cbar = fig.colorbar(colmap)\n cbar.set_label('radius (' + units + ')', rotation=270)\n elif color_by in COMPONENT_KEYWORDS:\n yg = ax.scatter(xs, ys, zs, color=component_color, marker='.')\n else:\n yg = ax.scatter(xs, ys, zs, color='k', marker='.')\n\n if draw_edges:\n for e1, e2 in skel.edges:\n pt1, pt2 = skel.vertices[e1], skel.vertices[e2]\n ax.plot( \n [ pt1[0], pt2[0] ],\n [ pt1[1], pt2[1] ],\n zs=[ pt1[2], pt2[2] ],\n color=(component_color if not draw_vertices else 'silver'),\n linewidth=1,\n )\n\n if color_by in COMPONENT_KEYWORDS:\n for i, skel in enumerate(self.components()):\n draw_component(i, skel)\n else:\n draw_component(0, self)\n\n plt.show()",
"def init_fig():\r\n # Set the axis and plot titles\r\n orbit, = ax.plot([], [], [])\r\n satellite, = ax.plot([], [], [], 'o', color='red')\r\n earth, = ax.plot([], [], [], 'o', color='green')\r\n time_text.set_text('')\r\n ax.set_title(Title_3D, fontsize=22)\r\n ax.set_xlim3d([-lim, lim])\r\n ax.set_xlabel('I\\n[km]')\r\n ax.set_ylim3d([-lim, lim])\r\n ax.set_ylabel('J\\n[km]')\r\n ax.set_zlim3d([-lim, lim])\r\n ax.set_zlabel('K\\n[km]')\r\n # plot Earth\r\n\r\n u = np.linspace(0, 2 * np.pi, 100)\r\n v = np.linspace(0, np.pi, 100)\r\n x = R_moon * np.outer(np.cos(u), np.sin(v))\r\n y = R_moon * np.outer(np.sin(u), np.sin(v))\r\n z = R_moon * np.outer(np.ones(np.size(u)), np.cos(v))\r\n ax.plot_wireframe(x, y, z, color=\"grey\", label=\"Moon\", linewidth=0.3, rstride=7, cstride=7)\r\n # Must return the list of artists, but we use a pass\r\n # through so that they aren't created multiple times\r\n return orbit, satellite, earth, time_text",
"def plot_results_traj_3d(p_x, p_y, p_z, xmin, xmax, ymin, ymax, zmin, zmax):\n fig, ax = plt.subplots(2 , 2, figsize = (10, 10))\n \n for p in np.arange(0, p_x.shape[0], step = 1): \n for t in np.arange(0, p_x.shape[1], step = 1): \n ax[0,0].plot(t, p_x[p, t], 'rx') \n ax[0,1].plot(t, p_y[p, t], 'gx') \n ax[1,0].plot(t, p_z[p, t], 'bx') \n ax[1,1].plot(t, p_x[p, t], 'rx') \n ax[1,1].plot(t, p_y[p, t], 'gx') \n ax[1,1].plot(t, p_z[p, t], 'bx') \n for a in ax.flat: \n a.set(xlabel = 'Time steps', ylabel = 'Position')\n ax[0,0].set_title('X (pix)') \n ax[0,0].set_ylim([xmin, xmax]) \n ax[0,1].set_title('Y (pix)') \n ax[0,1].set_ylim([ymin, ymax]) \n ax[1,0].set_title('Z (pix)') \n ax[1,0].set_ylim([zmin, zmax])\n ax[1,1].set_title('Positions combined') \n ax[1,1].set_ylim([np.array([xmin, ymin, zmin]).min(), np.array([xmax, ymax, zmax]).max()])",
"def plot3d(data):\n assert span1 == span2\n span = span1\n # ---------------------- create the figure and axes ---------------------- #\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n # -- discretize the definition space and compute the function's images --- #\n X, Y = discretise_space([defspace1, defspace2], n=span)\n Z = data\n\n # ----------------------- appearance and plotting ------------------------ #\n ax.set_zlim(np.min(Z) - 0.5, np.max(Z) + 0.5)\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.set(xlabel='$W\\_C$', ylabel='$W\\_W$', zlabel=\"Utilité\")#,\n # title='Utilité à {} ticks en fonction de W_W et W_C'.format(ticks))\n\n # Plot the surface.\n surf = ax.plot_surface(X, Y, Z, alpha=0.8, #, cmap='binary'\n linewidth=0, antialiased=False, zorder=1)\n\n plt.show()",
"def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()",
"def scatter3d(self, x, y, z, cs, labels, ptype, colorsMap='jet'):\n cm = plt.get_cmap(colorsMap)\n cNorm = matplotlib.colors.Normalize(vmin=min(cs), vmax=max(cs))\n scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)\n fig = plt.figure(figsize=(9, 7))\n\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(x, y, z, c=scalarMap.to_rgba(cs), edgecolor='none')\n scalarMap.set_array(cs)\n cb = fig.colorbar(scalarMap)\n cb.set_label(labels[3])\n ax.set_xlabel(labels[0])\n ax.set_ylabel(labels[1])\n ax.set_zlabel(labels[2])\n fig.suptitle(os.path.basename(self.conf['General']['sim_dir']))\n if self.conf['General']['save_plots']:\n name = labels[-1] + '_' + ptype + '.png'\n path = os.path.join(self.conf['General']['sim_dir'], name)\n fig.savefig(path)\n if self.conf['General']['show_plots']:\n plt.show()\n plt.close(fig)",
"def cell_edges3d_cartesian(self, axis2, axis3):",
"def plot_3d_B(B, data_name=\"B\"):\n size_label = 15\n size_title = 20\n size_legend = 15\n size_tick = 12\n sns.set_style(\"white\")\n\n pca = PCA(n_components=3)\n pca.fit(B.T)\n B_pca = pca.transform(B.T)\n\n idxp = [idx*2 for idx in range(B.shape[1]/2)]\n idxm = [idx*2+1 for idx in range(B.shape[1]/2)]\n\n fig = plt.figure()\n ax = plt.axes(projection=\"3d\")\n for idx in range(len(idxp)):\n ax.plot3D(\n [B_pca[idxp[idx],0],B_pca[idxm[idx],0]],\n [B_pca[idxp[idx],1],B_pca[idxm[idx],1]],\n [B_pca[idxp[idx],2],B_pca[idxm[idx],2]],\n \"gray\",alpha=0.5)\n\n ax.plot3D(B_pca[idxp,0], B_pca[idxp,1], B_pca[idxp,2], \"2\",label=\"Primary\", markersize=10)\n ax.plot3D(B_pca[idxm,0], B_pca[idxm,1], B_pca[idxm,2],\"1\",label=\"Metastatic\", markersize=10)\n\n ax.legend(fancybox=True, framealpha=0.5, prop={\"size\":size_legend})\n\n plt.xlabel(\"PCA 1\", fontsize=size_label)\n plt.ylabel(\"PCA 2\", fontsize=size_label)\n ax.set_zlabel(\"PCA 3\", fontsize=size_label)\n plt.tick_params(labelsize=size_tick)\n plt.title(data_name, fontsize=size_title)\n plt.xlim([-15,10])\n plt.ylim([0,15])\n ax.set_zlim(-8,4)\n ##fig.savefig(\"figures/fig12pcapathway.pdf\", bbox_inches=\"tight\")",
"def plot3D(self, diaphragmpoints=None, lungpoints=None, fig=None, ax=None, diaphragmcolor='r', lungcolor='g', size=2, howplot=0, dots=0):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n\n if diaphragmpoints is not None and lungpoints is not None:\n points = diaphragmpoints + lungpoints\n elif diaphragmpoints is not None:\n points = diaphragmpoints\n elif lungpoints is not None:\n points = lungpoints\n\n xpts, ypts, zpts = list(), list(), list()\n for i in range(len(points)):\n xpts.append(points[i][0])\n ypts.append(points[i][1])\n zpts.append(points[i][2])\n\n X = np.asarray(xpts)\n Y = np.asarray(ypts)\n Z = np.asarray(zpts)\n\n if howplot == 'wireframe':\n xpts, ypts, zpts = list(), list(), list()\n for i in range(len(pts)):\n xpts.append(pts[i][0])\n ypts.append(pts[i][1])\n zpts.append(pts[i][2])\n\n X = np.asarray([xpts])\n Y = np.asarray([ypts])\n Z = np.asarray([zpts])\n\n if dots == 1:\n ax.scatter(X, Y, Z, s=size, c='r', marker='o')\n\n ax.plot_wireframe(X, Y, Z)\n elif howplot == 1:\n ax.scatter(X, Y, Z, s=size, c=diaphragmcolor, marker='o')\n else:\n ax.scatter(X, Y, Z, s=size, c=diaphragmcolor, marker='o')\n ax.plot_trisurf(X, Y, Z, linewidth=0.2, antialiased=True)\n\n # Create cubic bounding box to simulate equal aspect ratio\n max_range = np.array([X.max() - X.min(), Y.max() - Y.min(), Z.max() - Z.min()]).max()\n Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (X.max() + X.min())\n Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (Y.max() + Y.min())\n Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (Z.max() + Z.min())\n\n # Comment or uncomment following both lines to test the fake bounding box:\n for xb, yb, zb in zip(Xb, Yb, Zb):\n ax.plot([xb], [yb], [zb], 'w')\n\n plt.show()\n # fig.savefig('{}/diaphragm/{}.png'.format(DIR_RESULT))",
"def plot3dproj(x, y, z, *args, color=(0,0,0), shadow_dist=1.0, color_proj=None, \n elev_azim=(39,-47), show_labels=False, **kwargs):\n\n if not color_proj:\n color_proj = lighter(color, .6)\n\n\n if np.isscalar(shadow_dist) == 1:\n sdist_x = shadow_dist\n sdist_y = shadow_dist\n sdist_z = shadow_dist\n else:\n sdist_x, sdist_y, sdist_z = shadow_dist\n\n fig = plt.figure(figsize=(7,7))\n ax = fig.add_subplot(111, projection= '3d')\n \n ax.plot(x, z, *args, zdir='y', zs=sdist_y*np.max(y), color=color_proj, **kwargs)\n ax.plot(y, z, *args, zdir='x', zs=sdist_x*np.min(x), color=color_proj, **kwargs)\n ax.plot(x, y, *args, zdir='z', zs=sdist_z*np.min(z), color=color_proj, **kwargs)\n ax.plot(x, y, z, *args, color=color, **kwargs)\n\n ax.view_init(elev=elev_azim[0], azim=elev_azim[1])\n ax.set_aspect('auto', adjustable='box') \n \n# ratio = 1.0\n# xvals, yvals = ax.get_xlim(), ax.get_ylim()\n# xrange = xvals[1]-xvals[0]\n# yrange = yvals[1]-yvals[0]\n# ax.set_aspect(ratio*(xrange/yrange), adjustable='box')\n fixed_aspect_ratio(1.0)\n\n if not show_labels:\n ax.set_xticklabels([]) \n ax.set_yticklabels([]) \n ax.set_zticklabels([])\n #plt.show()\n\n return ax",
"def dynamic_figure(r=None,z=None,t=None):\n global yt, t0, nt, zcut, theta, cnt, main_title\n\n if r is None:\n RunTimeError(\"r is None\")\n if z is None:\n RunTimeError(\"z is None\")\n if t is None:\n RunTimeError(\"t is None\")\n \n theta = linspace( 0.0, 2.0*scipy.pi, num=100)\n \n R, Z = meshgrid(r,z)\n \n fu = FuModel(nt=100)\n \n ycube = fu.FuCon(R, Z, t) # ycube[:,:,:] DATA CUBE!!!\n\n #-----------------------------FIGURES------------------------------\n fig = figure(figsize=(6,8.5))\n \n t0 = 0\n yt = ycube[:,:,t0]\n \n titulo = 'Fu drug diffusion model: t = %07.2f' % t[t0]\n main_title=fig.text(0.25, 0.95, titulo,\n horizontalalignment='left',\n fontproperties=FontProperties(size=16))\n \n cmap = cm.cool\n subplots_adjust(hspace=0.0)\n \n subplot(211)\n ax = gca()\n im1 = imshow(yt,\n interpolation='bilinear',\n cmap=cmap,\n origin='lower',\n extent=(0,1.0,z[0],z[-1]),\n aspect = 0.23\n )\n \n xlabel(r'R = r/a', fontsize=14)\n ylabel(r'Z = z/l', fontsize=14)\n\n subplot(212,aspect='equal')\n ax = gca()\n\n #\n #--- cylindrical cross-section\n \n RAD, THETA = meshgrid(r,theta)\n Xpos = RAD*cos(THETA)\n Ypos = RAD*sin(THETA)\n \n #\n # top cap of cylinder\n #\n #zcut = len(z)-1\n zcut = 0\n if z[0] != 0.0:\n zcut = int(len(z)/2+0.5)\n title('Z = %f'%z[zcut],fontsize=16)\n #\n # Take a cross-section of the cylinder at some point Z ==> radius array of length len(r) \n # -----\n # Trick\n # -----\n # Now create a matrix[len(theta),len(r)] by replicating the above array len(theta) times\n # along dimension 0\n #\n \n zx = outer( ones(len(theta)), yt[zcut,:] )\n \n im2 = pcolormesh(Xpos, Ypos,\n zx,\n shading='flat',\n cmap=cmap)\n \n xlabel(r'X/a', fontsize=14)\n ylabel(r'Y/a', fontsize=14)\n \n #\n #--- new axis for colorbar\n #\n norm = colors.Normalize(vmin=0.0, vmax=1.0)\n im1.set_norm(norm)\n im2.set_norm(norm)\n im1.add_observer(im2)\n \n pos = ax.get_position()\n l, b, w, h = getattr(pos, 'bounds', pos)\n cax = axes([l+w+0.015, b, 0.025, h]) # setup colorbar axes\n colorbar(im1, cax, orientation='vertical') \n \n manager = get_current_fig_manager()\n \n cnt = 0\n files = []\n nt = len(t)\n #\n #------------------------------------------------------------------\n def updatefig(*args):\n global yt, t0, nt, zcut, theta, cnt, main_title\n \n t0 += 1\n if t0 > nt-1:\n return False\n \n yt = ycube[:,:,t0]\n zx = outer( ones(len(theta)), yt[zcut,:] )\n Nx, Ny = yt.shape\n im1.set_array(yt)\n im2.set_array(ravel(zx[0:Nx-1,0:Ny-1]))\n titulo = 'Fu drug diffusion model: t = %07.2f' % t[t0]\n main_title.set_text(titulo)\n manager.canvas.draw()\n \n #fname = '_tmp%03d.jpg' % t0\n fname = '_tmp%03d.png' % t0\n savefig(fname)\n files.append(fname)\n \n cnt += 1\n return True\n #\n #------------------------------------------------------------------\n\n cnt = 0\n gobject.idle_add(updatefig)\n\n ioff()\n show()\n\n command = \"ffmpeg -r 10 -sameq -i _tmp%03d.png test.mp4\"\n #command = \"mencoder -ovc xvid -xvidencopts \" + \\\n # \"pass=2:bitrate=15999:max_bframes=0 \" + \\\n # \"-oac copy -mf fps=10:type=jpeg 'mf://_tmp*.jpg' -vf harddup -ofps 10 \" + \\\n # \"-noskip -of avi -o outputfile.avi\"\n os.system(command)\n for fname in files: os.remove(fname) #clean up\n\n return True"
] | [
"0.70101243",
"0.6881151",
"0.6811236",
"0.67459273",
"0.66721",
"0.6604098",
"0.6516716",
"0.64925385",
"0.6487245",
"0.64215606",
"0.6397657",
"0.636553",
"0.63541204",
"0.63450056",
"0.6343613",
"0.62795347",
"0.6222476",
"0.6220117",
"0.62002784",
"0.6191841",
"0.6178734",
"0.61262375",
"0.6116756",
"0.61112016",
"0.610366",
"0.61008066",
"0.60609025",
"0.60354537",
"0.6008917",
"0.60067827"
] | 0.6949409 | 1 |
Returns True if there's a repeat in column n | def col_repeat(self, board, n):
this_col = []
for i, x in enumerate(board):
if i % 12 == n:
this_col.append(x)
# println("col "+str(n))
# println(this_col)
for letter in 'abcdef':
if this_col.count(letter) > 1:
# println(this_col)
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_col(sudoku):\r\n for col in range(9):\r\n for row in range(8):\r\n test = sudoku[row][col]\r\n for i in range(row+1,9):\r\n if sudoku[i][col] == test:\r\n return True #returns True is there is more than two of the same numbers in a column\r",
"def check_row(sudoku):\r\n for row in range(len(sudoku)):\r\n for col in range(len(sudoku)):\r\n if sudoku[row].count(sudoku[row][col]) != 1:\r\n return True #returns True is there is more than two of the same numbers in a row\r",
"def is_column_duplicate(self, values, column, value):\n for row in range(0, Nd):\n if values[row][column] == value:\n return True\n return False",
"def in_col(n: int, row: int, col: int, grid: List) -> bool:\n for x in range(9):\n if x!= row and n == grid[x][col]:\n return True\n return False",
"def valid(n, board, row, col):\n for i in range(col):\n if board[row][i] == 1:\n return False\n x = row\n y = col\n while x >= 0 and y >= 0:\n if board[x][y] == 1:\n return False\n x -= 1\n y -= 1\n x = row\n y = col\n while x < n and y >= 0:\n if board[x][y] == 1:\n return False\n x += 1\n y -= 1\n return True",
"def in_row(n: int, row: int, col: int, grid: List) -> bool:\n for x in range(9):\n if x != col and n == grid[row][x]:\n return True\n return False",
"def check_norepeat(grid):\n\n def conflict(gr, row, col):\n char = gr[row][col]\n r = gr[row]\n c = [row[col] for row in gr]\n\n rowcount = r.count(char)\n colcount = c.count(char)\n\n if (rowcount != 1) or (colcount != 1):\n return True\n\n return False\n\n norepeat = True\n\n for row in range(len(grid)):\n for col in range(len(grid)):\n if conflict(grid, row, col):\n norepeat = False\n\n return norepeat",
"def block_repeat(self, board, n):\r\n this_block = []\r\n row_start = 3 * (n // 4)\r\n col_start = 3 * (n % 4)\r\n for r in range(3):\r\n this_row = board[(12 * (row_start + r) + col_start):(12 * (row_start + r) + col_start + 3)]\r\n for x in this_row:\r\n this_block.append(x)\r\n # println(n)\r\n # println(this_block)\r\n for letter in 'abcdef':\r\n if this_block.count(letter) > 1:\r\n # println(this_block)\r\n return True\r\n return False",
"def is_repetition(self, num: int = 3) -> bool:\n\n if sum(self.history_board == self.board_fen()) == num:\n return True\n return False",
"def _is_repeating(password):\r\n n = 1\r\n while n < len(password):\r\n if password[n] == password[n-1]:\r\n return True\r\n n += 1\r\n return False",
"def is_fivefold_repetition(self) -> bool:\n return self.is_repetition(3)",
"def full():\r\n\r\n count = 0\r\n for slot in board:\r\n if slot not in '012345678':\r\n count += 1\r\n return count == 9",
"def multipleQueensAlongColumns(board):\n (rows, columns) = (len(board), len(board[0]))\n\n for column in range(columns):\n count = 0\n\n for row in range(rows):\n if board[row][column] == 1:\n count += 1\n if count > 1:\n return True\n\n return False",
"def TestColumn(SudokuGrid):\r\n for i in range(9):\r\n for j in range(8):\r\n for k in range(j+1,9):\r\n if SudokuGrid[j][i]==SudokuGrid[k][i]:\r\n return False\r\n return True",
"def match_in_a_row(s, n=5, c=None):\n for i, v in enumerate(s[:-(n - 1)]):\n if v != c:\n continue\n if all(v == s[i + j] for j in range(1, n)):\n return True\n return False",
"def is_four_in_column(board, row, column):\n sequence = [board[row][column] for j in range(4)]\n column = [board[i][column] for i in range(len(board))]\n if is_subset(sequence, column):\n return True\n else:\n return False",
"def check(self):\n for row in self.grid:\n for i in range(1, 10):\n if row.count(i) != 1:\n return False\n\n for col in range(9):\n lst = [row[col] for row in self.grid]\n for i in range(1, 10):\n if lst.count(i) != 1:\n return False\n \n for i in range(3):\n for j in range(3):\n lst = [row[j* 3:(j*3) + 3] for row in self.grid[i * 3:(i*3) + 3]] \n flat_list = []\n for k in lst:\n for number in k:\n flat_list.append(number)\n \n for check_number in range(1, 10):\n if flat_list.count(check_number) != 1:\n return False\n return True",
"def check_columns(self):\n\t\ti=0\n\t\tfor i in range(len(self.board[i])):\n\t\t\tpts = 0\n\t\t\tfor j in range(len(self.board)):\n\t\t\t\tif self.board[j][i] == self.marker:\n\t\t\t\t\tpts+=1\n\t\t\tif pts == 3:\n\t\t\t\tprint('YOU WON')\n\t\t\t\treturn True",
"def check_uniqueness_in_rows(board: list):\n for i in range(0, len(board)):\n check_number_in_str = any(map(str.isdigit, board[i]))\n if check_number_in_str:\n numbers = []\n for j in board[i]:\n try:\n numbers.append(int(j))\n except:\n pass\n if len(numbers) != len(set(numbers)):\n result = False\n break\n else:\n result = True\n return result",
"def check_occ(seats:List[str], i: int, j: int) -> bool:\n occupied_count = (seats[i - 1][j] == \"#\") +\\\n (seats[i - 1][j - 1] == \"#\") +\\\n (seats[i - 1][j + 1] == \"#\") +\\\n (seats[i + 1][j] == \"#\") +\\\n (seats[i + 1][j - 1] == \"#\") +\\\n (seats[i + 1][j + 1] == \"#\") +\\\n (seats[i][j - 1] == \"#\") +\\\n (seats[i][j + 1] == \"#\")\n\n return occupied_count >= 4",
"def is_duplicates(trajs):\n if len(trajs) < 2:\n return False \n for j in range(len(trajs)-1):\n for i in range(j+1, len(trajs)):\n R = (trajs[i].get_slice()[:,:2]==trajs[j].get_slice()[:,:2])\n if isinstance(R, bool):\n if R:\n return True \n elif R.all():\n return True \n else:\n pass\n return False",
"def has_n_same(string, n):\n all_chars = {}\n for char in string: # sum up count of each char\n all_chars.setdefault(char, 0)\n all_chars[char] += 1\n for char, count in all_chars.items(): # check how many appeared n times\n if count == n:\n return True\n return False",
"def _all_elem_same(self, row):\n return len(set(row)) == 1 and row[0] != self.BLANK_CELL_CHAR",
"def isEmptyColumn(self, j, rowns ):\n for i in range(0,rowns) :\n if not self.isEmpty(i,j):\n return False\n return True",
"def repetition_happened(self):\n repetition = False\n if len(self.moves) >= 12:\n if self.moves[-1][0] == self.moves[-5][0] == self.moves[-9][0] and \\\n self.moves[-1][1] == self.moves[-5][1] == self.moves[-9][1] and \\\n self.moves[-2][0] == self.moves[-6][0] == self.moves[-10][0] and \\\n self.moves[-2][1] == self.moves[-6][1] == self.moves[-10][1] and \\\n self.moves[-3][0] == self.moves[-7][0] == self.moves[-11][0] and \\\n self.moves[-3][1] == self.moves[-7][1] == self.moves[-11][1] and \\\n self.moves[-4][0] == self.moves[-8][0] == self.moves[-12][0] and \\\n self.moves[-4][1] == self.moves[-8][1] == self.moves[-12][1]:\n repetition = True\n\n return repetition",
"def col_win(board):\n\tfor col in range(3):\n\t\tif board[0][col] != EMPTY and board[0][col] == board[1][col] == board[2][col]:\n\t\t\treturn True\n\treturn False",
"def check(chessboard, row, col, n):\n for i in range(col):\n if chessboard[row][i] == 1:\n return False\n\n for j, i in zip(range(row, -1, -1), range(col, -1, -1)):\n if chessboard[j][i] == 1:\n return False\n \n for j, i in zip(range(row, n, 1), range(col, -1, -1)):\n if chessboard[j][i] == 1:\n return False\n\n return True",
"def multipleQueensAlongRow(board):\n\n (rows, columns) = (len(board), len(board[0]))\n\n for row in range(rows):\n if board[row].count(1) > 1:\n return True\n\n return False",
"def check_rows(self):\n\t\tfor i in range(len(self.board)):\n\t\t\tpts = 0\n\t\t\tfor j in range(len(self.board[i])):\n\t\t\t\tif self.board[i][j] == self.marker:\n\t\t\t\t\tpts+=1\n\t\t\tif pts == 3:\n\t\t\t\tprint('YOU WON')\n\t\t\t\treturn True",
"def check_columns(self, win: list) -> bool:\r\n for row in range(self.size):\r\n column = [self.tags[x][row] for x in range(self.size)]\r\n for j in range(len(column) - len(win) + 1):\r\n if win == column[j:j+self.win_condition]:\r\n return True"
] | [
"0.7043869",
"0.6976969",
"0.6894629",
"0.6694764",
"0.661783",
"0.6513339",
"0.64967775",
"0.64636016",
"0.6400933",
"0.6364203",
"0.6359629",
"0.6325058",
"0.63167566",
"0.6282769",
"0.62749267",
"0.62481886",
"0.62461656",
"0.624436",
"0.619371",
"0.618974",
"0.6165413",
"0.61237854",
"0.6105286",
"0.61016023",
"0.6099468",
"0.60930026",
"0.60568005",
"0.6041918",
"0.6028532",
"0.6002102"
] | 0.7761181 | 0 |
REturns True if there's a repeat in 3x3 block n | def block_repeat(self, board, n):
this_block = []
row_start = 3 * (n // 4)
col_start = 3 * (n % 4)
for r in range(3):
this_row = board[(12 * (row_start + r) + col_start):(12 * (row_start + r) + col_start + 3)]
for x in this_row:
this_block.append(x)
# println(n)
# println(this_block)
for letter in 'abcdef':
if this_block.count(letter) > 1:
# println(this_block)
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_fivefold_repetition(self) -> bool:\n return self.is_repetition(3)",
"def answer_ok(a):\n (rightly_positioned, permutated) = a\n if (rightly_positioned + permutated > number_of_positions) \\\n or (rightly_positioned + permutated < len(colours) - number_of_positions):\n return False\n if rightly_positioned == 3 and permutated == 1:\n return False\n return True",
"def McNuggets(n):\n a=0\n b=0\n c=0\n while 6*a + 9*b + 20*c < n:\n for a in range((n//6)+1):\n for b in range((n//9)+1):\n for c in range ((n//20)+1):\n if 6*a + 9*b + 20*c == n:\n return print(True)\n if 6*a + 9*b + 20*c != n:\n return print(False)",
"def check_if_legal(row, blocks):\n counter = 0\n compare_lst = []\n for square in row:\n if square == Black:\n counter += 1\n else:\n if counter > 0:\n compare_lst.append(counter)\n counter = 0\n if counter > 0:\n compare_lst.append(counter)\n if compare_lst == blocks:\n return True\n return False",
"def check_norepeat(grid):\n\n def conflict(gr, row, col):\n char = gr[row][col]\n r = gr[row]\n c = [row[col] for row in gr]\n\n rowcount = r.count(char)\n colcount = c.count(char)\n\n if (rowcount != 1) or (colcount != 1):\n return True\n\n return False\n\n norepeat = True\n\n for row in range(len(grid)):\n for col in range(len(grid)):\n if conflict(grid, row, col):\n norepeat = False\n\n return norepeat",
"def McNuggets(n):\n # Your Code Here\n for c in xrange( n/20+2):\n for b in xrange( (n-20*c)/9+2):\n for a in xrange ((n-20*c-9*b)/6 +2):\n if (6*a + 9*b + 20*c) == n :\n return True\n return False",
"def block(array):\r\n grid = []\r\n for z in range(0,7,3): #0,3,6\r\n #vertical down 3\r\n for n in range(0,7,3): #0,3,6\r\n #horiz across 3\r\n line = []\r\n for i in range(3):\r\n for j in range(3):\r\n vert,hor = i+z,j+n\r\n line.append(array[vert][hor])\r\n grid.append(line)\r\n won = True\r\n for i in range(len(grid)):\r\n if won == True:\r\n if len(grid[i]) != len(set(grid[i])):\r\n won = False\r\n else:\r\n pass\r\n else:\r\n break\r\n return won",
"def corrected_clump_tester(clump):\n tester = True\n for block in clump:\n if len(block) >= 3: # Fixed block!\n tester = False\n break\n return tester",
"def McNuggets(n):\n a = 0\n b = 0\n c = 0\n\n while 6*a + 9*b + 20*c < n:\n while 6*a + 9*b + 20*c < n:\n while 6*a + 9*b + 20*c < n:\n c += 1\n if 6*a + 9*b + 20*c == n:\n print a, b, c\n return True\n c = 0\n b += 1\n if 6*a + 9*b + 20*c == n:\n print a, b, c\n return True\n b = 0\n a += 1\n\n\n return False",
"def full():\r\n\r\n count = 0\r\n for slot in board:\r\n if slot not in '012345678':\r\n count += 1\r\n return count == 9",
"def McNuggets(n):\n # Your Code Here\n\n high = n//6+1\n\n if n != 0:\n for i in range(high):\n for j in range(high):\n for k in range(high):\n if 6*k + 9*j + 20*i == n:\n return True\n\n return False\n\n else:\n return False",
"def test_only_three_card_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [0], False, True))\n\n self.assertFalse( f( 1, 0, [0], False, True))\n self.assertTrue( f( 1, 1, [0], False, True))\n self.assertTrue( f( 1, 0, [3], False, True))\n self.assertTrue( f( 1, 3, [0], False, True))\n\n self.assertFalse( f( 1, 1, [2], False, True))\n self.assertFalse( f( 1, 1, [3], False, True))\n self.assertFalse( f( 1, 1, [4], False, True))\n\n self.assertTrue( f( 2, 2, [0], False, True))\n self.assertTrue( f( 2, 1, [3], False, True))\n self.assertTrue( f( 2, 3, [3], False, True))\n self.assertTrue( f( 2, 6, [0], False, True))\n self.assertTrue( f( 2, 0, [6], False, True))\n self.assertFalse( f( 2, 4, [3], False, True))\n\n self.assertFalse( f( 3, 1, [], False, True))\n self.assertFalse( f( 3, 2, [], False, True))\n self.assertFalse( f( 3, 0, [3], False, True))\n self.assertFalse( f( 3, 0, [6], False, True))\n self.assertTrue( f( 3, 3, [], False, True))\n self.assertTrue( f( 3, 2, [3], False, True))\n self.assertTrue( f( 3, 3, [6], False, True))\n self.assertTrue( f( 3, 1, [6], False, True))\n self.assertTrue( f( 3, 0, [9], False, True))\n\n self.assertTrue( f(13,13, [], False, True))\n self.assertTrue( f(13,39, [], False, True))\n self.assertTrue( f(13, 0, [39], False, True))\n self.assertTrue( f(13,15, [24], False, True))\n self.assertTrue( f(13,15, [], False, True))\n self.assertTrue( f(13,12, [3], False, True))\n self.assertFalse( f(13,14, [], False, True))\n\n self.assertFalse( f( 6, 1, [3,6,9], False, True))\n self.assertTrue( f( 7, 1, [3,6,9], False, True))\n self.assertFalse( f( 8, 1, [3,6,9], False, True))",
"def is_repeat(self, state):\n\t\tif not self.state.repeats():\n\t\t\treturn False\n\t\treturn state.repeated_rep() in self.visitedStates",
"def valid(n, board, row, col):\n for i in range(col):\n if board[row][i] == 1:\n return False\n x = row\n y = col\n while x >= 0 and y >= 0:\n if board[x][y] == 1:\n return False\n x -= 1\n y -= 1\n x = row\n y = col\n while x < n and y >= 0:\n if board[x][y] == 1:\n return False\n x += 1\n y -= 1\n return True",
"def check(self):\n for row in self.grid:\n for i in range(1, 10):\n if row.count(i) != 1:\n return False\n\n for col in range(9):\n lst = [row[col] for row in self.grid]\n for i in range(1, 10):\n if lst.count(i) != 1:\n return False\n \n for i in range(3):\n for j in range(3):\n lst = [row[j* 3:(j*3) + 3] for row in self.grid[i * 3:(i*3) + 3]] \n flat_list = []\n for k in lst:\n for number in k:\n flat_list.append(number)\n \n for check_number in range(1, 10):\n if flat_list.count(check_number) != 1:\n return False\n return True",
"def repetition_happened(self):\n repetition = False\n if len(self.moves) >= 12:\n if self.moves[-1][0] == self.moves[-5][0] == self.moves[-9][0] and \\\n self.moves[-1][1] == self.moves[-5][1] == self.moves[-9][1] and \\\n self.moves[-2][0] == self.moves[-6][0] == self.moves[-10][0] and \\\n self.moves[-2][1] == self.moves[-6][1] == self.moves[-10][1] and \\\n self.moves[-3][0] == self.moves[-7][0] == self.moves[-11][0] and \\\n self.moves[-3][1] == self.moves[-7][1] == self.moves[-11][1] and \\\n self.moves[-4][0] == self.moves[-8][0] == self.moves[-12][0] and \\\n self.moves[-4][1] == self.moves[-8][1] == self.moves[-12][1]:\n repetition = True\n\n return repetition",
"def is_repetition(self, num: int = 3) -> bool:\n\n if sum(self.history_board == self.board_fen()) == num:\n return True\n return False",
"def McNuggets(n):\n # Your Code Here\n\n for a in range(0, n/6+1):\n for b in range(0, n/9+1):\n for c in range(0, n/20+1):\n if 6*a+9*b+20*c == n:\n return True\n return False",
"def check_row(sudoku):\r\n for row in range(len(sudoku)):\r\n for col in range(len(sudoku)):\r\n if sudoku[row].count(sudoku[row][col]) != 1:\r\n return True #returns True is there is more than two of the same numbers in a row\r",
"def col_repeat(self, board, n):\r\n this_col = []\r\n for i, x in enumerate(board):\r\n if i % 12 == n:\r\n this_col.append(x)\r\n # println(\"col \"+str(n))\r\n # println(this_col)\r\n for letter in 'abcdef':\r\n if this_col.count(letter) > 1:\r\n # println(this_col)\r\n return True\r\n return False",
"def McNuggets(n):\n \n '''if n == 0:\n return True\n for i in (6, 9, 20):\n if n >= i and McNuggets(n - i):\n return True\n return False\n '''\n \n for a in range(0,n):\n for b in range(0,n):\n for c in range(0,n):\n if 6*a+9*b+20*c == n:\n return True\n return False",
"def blocks_are_equal(i, j, types, text, n):\n while i < n and j < n:\n if text[i] == text[j]:\n if is_lms(i, types) and is_lms(j, types):\n return True\n else:\n i += 1\n j += 1\n else:\n return False\n return False",
"def org_clump_tester(clump):\n tester = True\n for block in clump:\n if len(clump) >= 3: # clump should be block!\n tester = False\n break\n return tester",
"def _is_repeating(password):\r\n n = 1\r\n while n < len(password):\r\n if password[n] == password[n-1]:\r\n return True\r\n n += 1\r\n return False",
"def sudoku_isready(A):\r\n x = isqrt(len(A))\r\n if x*x == len(A):\r\n return True\r\n return False",
"def is_correct(sudoku):\n\n # Check for repeated numbers on each row\n for row in sudoku:\n if DIGITS - set(row):\n return False\n\n # Check for repeated numbers on each column\n for column_index in range(9):\n if DIGITS - set([row[column_index] for row in sudoku]):\n return False\n\n # Check for repeated numbers on each box\n for box_number in range(9):\n seen_in_box = set([])\n box_row_base = (box_number / 3) * 3\n box_col_base = (box_number % 3) * 3\n for box_index in range(9):\n seen_in_box.add(sudoku[box_row_base + box_index / 3][box_col_base + box_index % 3])\n if DIGITS - seen_in_box:\n return False\n\n # If none of the previous checks failed, the Sudoku is correct\n return True",
"def is_sequential(self):\n counter = 1\n for r in range(0, 4):\n for c in range(0, 4):\n if counter == 16:\n return True\n elif self.get((r, c)) != counter:\n return False\n counter += 1",
"def is_block_duplicate(self, values, row, column, value):\n i = 3 * (int(row / 3))\n j = 3 * (int(column / 3))\n\n if ((values[i][j] == value)\n or (values[i][j + 1] == value)\n or (values[i][j + 2] == value)\n or (values[i + 1][j] == value)\n or (values[i + 1][j + 1] == value)\n or (values[i + 1][j + 2] == value)\n or (values[i + 2][j] == value)\n or (values[i + 2][j + 1] == value)\n or (values[i + 2][j + 2] == value)):\n return True\n else:\n return False",
"def test_two_and_three_card_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [], True, True))\n\n self.assertFalse( f( 1, 0, [], True, True))\n self.assertFalse( f( 1, 0, [1], True, True))\n self.assertTrue( f( 1, 0, [2], True, True))\n self.assertTrue( f( 1, 0, [3], True, True))\n self.assertFalse( f( 1, 0, [4], True, True))\n self.assertTrue( f( 1, 1, [], True, True))\n self.assertTrue( f( 1, 2, [], True, True))\n self.assertTrue( f( 1, 3, [], True, True))\n self.assertFalse( f( 1, 4, [], True, True))\n\n self.assertFalse( f( 1, 1, [2], True, True))\n self.assertFalse( f( 1, 1, [3], True, True))\n self.assertFalse( f( 1, 2, [2], True, True))\n self.assertFalse( f( 1, 3, [2], True, True))\n self.assertFalse( f( 1, 3, [3], True, True))\n\n self.assertTrue( f( 2, 1, [2], True, True))\n self.assertTrue( f( 2, 1, [3], True, True))\n self.assertTrue( f( 2, 0, [4], True, True))\n self.assertTrue( f( 2, 0, [5], True, True))\n self.assertTrue( f( 2, 0, [6], True, True))\n self.assertTrue( f( 2, 4, [], True, True))\n self.assertTrue( f( 2, 5, [], True, True))\n self.assertTrue( f( 2, 6, [], True, True))\n \n self.assertTrue( f(13, 26, [], True, True))\n self.assertTrue( f(13, 39, [], True, True))\n self.assertTrue( f(13, 0, [26], True, True))\n self.assertTrue( f(13, 14, [12], True, True))\n self.assertTrue( f(13, 13, [10], True, True))\n self.assertTrue( f(13, 15, [11], True, True))\n self.assertFalse( f(13, 40, [], True, True))\n self.assertFalse( f(13, 11, [3], True, True))\n\n self.assertFalse( f(4, 1, [2,3,6], True, True))\n self.assertTrue( f(5, 1, [2,3,6], True, True))\n self.assertTrue( f(6, 1, [2,3,6], True, True))\n self.assertFalse( f(7, 1, [2,3,6], True, True))",
"def isthmus1D(cube):\n \n return countComponents26(cube) >= 2;"
] | [
"0.660592",
"0.6549456",
"0.6492034",
"0.6472273",
"0.64570206",
"0.64001465",
"0.6373682",
"0.6357246",
"0.63565445",
"0.6317953",
"0.6310353",
"0.63013834",
"0.62700784",
"0.62675667",
"0.62305206",
"0.6215564",
"0.6173477",
"0.61486995",
"0.6148334",
"0.61291665",
"0.61206055",
"0.609784",
"0.608324",
"0.6074367",
"0.6068296",
"0.60306334",
"0.6008515",
"0.600537",
"0.59904104",
"0.59900564"
] | 0.7840495 | 0 |
Test number of records in Department table | def test_department_model(self):
self.assertEqual(Department.query.count(), 3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_count(self):\r\n assert self.table.objects.count() == 12\r\n\r\n q = self.table.objects(test_id=0)\r\n assert q.count() == 4",
"def test_number_of_records_for_spell(self):\n records = self.admitgen.data.findall('record')\n self.assertEqual(9, len(records),\n 'Incorrect number of records generated')",
"def test_employee_model(self):\n self.assertEqual(Employee.query.count(), 2)",
"def testGetEmpCount(self):\n\tself.assertEqual(Employee.getEmpCount(),1) # test getEmpCount() whether return correct answer",
"def count_records(self, conn, when='After insert') -> None:\n cur = conn.cursor()\n cur.execute(f\"SELECT COUNT(*) FROM {self.table}\")\n print(f\"{when}, the table {self.table} contains {cur.fetchall()[0][0]} records.\")",
"def count_support(projection):\n\tprev_id = -1\n\tsize = 0\n\tfor p in projection:\n\t\tif prev_id != p.id:\n\t\t\tprev_id = p.id\n\t\t\tsize += 1\n\treturn size",
"def test_count(self):\r\n assert TestModel.objects.count() == 12\r\n\r\n q = TestModel.objects(test_id=0)\r\n assert q.count() == 4",
"def test_data_source_soaps_id_dynamic_datas_count_get(self):\n pass",
"def test_query_expression_count(self):\r\n assert self.table.objects.count() == 12\r\n\r\n q = self.table.objects(self.table.column('test_id') == 0)\r\n assert q.count() == 4",
"def test_create_deck_count(self):\n create_deck()\n self.assertEqual(Card.objects.count(), 52)",
"def __len__(self):\n with SessionContext(self.SessionClass) as session:\n return session.query(PAW2_DBObject).count()",
"def get_num_records(self):\n return self.__num_records",
"def test_count(db_4_tasks):\n assert(tasks.count() == 4)",
"def employees_count(self, obj):\n return obj.employees_count()",
"def valid_record_count(self) -> int:\n return pulumi.get(self, \"valid_record_count\")",
"def field_length(self, fieldname):\n\t\t# todo: is this right?\n\t\tquery = {fieldname: {'$exists': 1}}\n\t\treturn self.index.collection.find(query).count()",
"def test_asset_assignee_is_created_when_a_department_is_saved(self):\n department = Department.objects.create(name=\"Success\")\n self.assertEqual(len(AssetAssignee.objects.filter(department=department)), 1)",
"def generic_record_count(data_df: Optional[DataFrame]) -> int:\n return len(data_df)",
"def dataCount(self, collectionName):\n count = collectionName.find().count()\n return count",
"def check_row_counts(self):\n\n df_len = len(self.df)\n sql = \"select count(*) from clock_staging;\"\n result = self.session.execute(sql).fetchone()[0]\n if df_len != result:\n raise ValueError(\n \"Count of Staging Table (clock_staging) does not match the CSV file!\"\n )",
"def check_for_diaries():\n if len(DiaryModel.diary) >=1:\n return (len(DiaryModel.diary))",
"def test_cantidad_alumnos_pendientes(self):\n alumnos = self.manipulator.filtrar_alumnos_de_materia_periodo(\n self.dataframe, '01045', '2001-01-01', '2020-10-10')\n cantidad = self.manipulator.cantidad_alumnos_pendientes(\n alumnos, '01045')\n self.assertEqual(cantidad, 1)",
"def testArticleCount(self):\n\n self.articleCount(17)",
"def cantidad_de_entregas(self):\r\n return self.entrega_set.count()",
"def test_b_count_id(self):\n storage = FileStorage()\n count = storage.count(Amenity)\n self.assertEqual(1, count)\n count = storage.count(State)\n self.assertEqual(1, count)\n count = storage.count(City)\n self.assertEqual(1, count)\n count = storage.count(User)\n self.assertEqual(1, count)\n count = storage.count(Place)\n self.assertEqual(1, count)\n count = storage.count(Review)\n self.assertEqual(1, count)",
"def __len__(self):\n return self.dbms.get_nb_fields(self.table, self.db)",
"def count(cls):\n return DBSESSION.query(cls).count()",
"def size(self):\n return len(self.records)",
"def test_query_expression_count(self):\r\n assert TestModel.objects.count() == 12\r\n\r\n q = TestModel.objects(TestModel.test_id == 0)\r\n assert q.count() == 4",
"def test_default_num_products(self):\n products = acme_report.generate_products()\n self.assertEqual(len(products), 30)"
] | [
"0.62203896",
"0.5982845",
"0.59480697",
"0.5862384",
"0.58320177",
"0.5715334",
"0.56990075",
"0.5692022",
"0.566264",
"0.5654501",
"0.56526834",
"0.5624333",
"0.5583349",
"0.55637956",
"0.5535726",
"0.5500543",
"0.5476498",
"0.5445695",
"0.5443733",
"0.5439879",
"0.5430316",
"0.5404104",
"0.5382737",
"0.5379343",
"0.5377401",
"0.53708494",
"0.536368",
"0.5358898",
"0.53586197",
"0.5349976"
] | 0.7332401 | 0 |
Test number of records in Employee table | def test_employee_model(self):
self.assertEqual(Employee.query.count(), 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testGetEmpCount(self):\n\tself.assertEqual(Employee.getEmpCount(),1) # test getEmpCount() whether return correct answer",
"def employees_count(self, obj):\n return obj.employees_count()",
"def test_count(self):\r\n assert self.table.objects.count() == 12\r\n\r\n q = self.table.objects(test_id=0)\r\n assert q.count() == 4",
"def count_records(self, conn, when='After insert') -> None:\n cur = conn.cursor()\n cur.execute(f\"SELECT COUNT(*) FROM {self.table}\")\n print(f\"{when}, the table {self.table} contains {cur.fetchall()[0][0]} records.\")",
"def display_count():\n print \"There are %d employees\" % Employee.count",
"def number_of_employees(self) -> object:\n return self._number_of_employees",
"def test_query_expression_count(self):\r\n assert self.table.objects.count() == 12\r\n\r\n q = self.table.objects(self.table.column('test_id') == 0)\r\n assert q.count() == 4",
"def test_number_of_records_for_spell(self):\n records = self.admitgen.data.findall('record')\n self.assertEqual(9, len(records),\n 'Incorrect number of records generated')",
"def employee_count(self, employee_count):\n\n self._employee_count = employee_count",
"def generic_record_count(data_df: Optional[DataFrame]) -> int:\n return len(data_df)",
"def get_num_records(self):\n return self.__num_records",
"def test_get_state_comparison_stats_employee_count(self):\n\n key = 'Employee count'\n\n # 1 employee in Kentucky\n for _ in range(1):\n employee = EmployeeFactory()\n employee.bureau_states.add(self.kentucky)\n\n # 2 employees in Texas\n for _ in range(2):\n employee = EmployeeFactory()\n employee.bureau_states.add(self.texas)\n\n # 3 employees in Mississippi\n for _ in range(3):\n employee = EmployeeFactory()\n employee.bureau_states.add(self.mississippi)\n\n expected_output = [('Mississippi', 3), ('Texas', 2)]\n\n stats = get_state_comparison_stats(number=2)\n top_states = self.get_state_stats_for_key(stats, key)\n\n self.assertListEqual(top_states, expected_output,\n f\"'{key}' should contain states with the top x employee counts\")",
"def count_employees(self, count=0):\n if self.children:\n for child in self.children:\n count += 1 + child.count_employees() # recursion: count_employees(child)\n\n if not self.children: # base case but that is already covered in line 48\n return count\n\n return count",
"def test_count(self):\r\n assert TestModel.objects.count() == 12\r\n\r\n q = TestModel.objects(test_id=0)\r\n assert q.count() == 4",
"def test_new_model_creation(self):\n initial_count = Employer.objects.count()\n self.new_employer.save()\n new_count = Employer.objects.count()\n self.assertNotEqual(initial_count, new_count)\n\n self.name2 = 'employe223'\n self.new_employee = Employee(\n name=self.name2, employer=self.new_employer)\n self.new_employee.save()\n self.assertEqual(len(Employee.objects.all()), 1)",
"def number_of_employees(self, number_of_employees: object):\n\n self._number_of_employees = number_of_employees",
"def check_row_counts(self):\n\n df_len = len(self.df)\n sql = \"select count(*) from clock_staging;\"\n result = self.session.execute(sql).fetchone()[0]\n if df_len != result:\n raise ValueError(\n \"Count of Staging Table (clock_staging) does not match the CSV file!\"\n )",
"def test_count(self):\n mapper(User, users)\n q = create_session().query(User)\n self.assert_(q.count()==3)\n self.assert_(q.count(users.c.user_id.in_([8,9]))==2)",
"def test_number_of_nulls(self):\n self.assertEqual(em.number_of_nulls(self.test_df), 3)",
"def test_department_model(self):\n self.assertEqual(Department.query.count(), 3)",
"def test_query_expression_count(self):\r\n assert TestModel.objects.count() == 12\r\n\r\n q = TestModel.objects(TestModel.test_id == 0)\r\n assert q.count() == 4",
"def _check_employee(self):\n\n for record in self:\n\n if record.nik_number:\n # find duplicate nik\n employee_ids = self.search([('id', 'not in', self.ids), ('nik_number', '=', record.nik_number)])\n if employee_ids:\n error_msg = _(\"There is duplicate of Employee Identity Number.\")\n raise ValidationError(error_msg)\n\n # check nik format. it required base_indonesia\n if not record._check_nik(record):\n error_msg = _(\"NIK did not match with Company Code.\")\n raise ValidationError(error_msg)\n\n if record.identification_id:\n employee_ids = self.search([('id', 'not in', self.ids), ('identification_id', '=', record.identification_id)])\n if employee_ids:\n error_msg = _(\"There is duplicate of Identification Number.\")\n raise ValidationError(error_msg)\n\n return True",
"def getNumRows(self) -> int:\n ...",
"def __len__(self):\n with SessionContext(self.SessionClass) as session:\n return session.query(PAW2_DBObject).count()",
"def test_max_number_of_records(self):\n self._config['Number of examples'] = '2'\n result = self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)\n self.assertLen(result, 2)",
"def __len__(self, schema):\r\n raise NotImplementedError",
"def test_count(database):\n assert len(database.credentials) == 2",
"def valid_record_count(self) -> int:\n return pulumi.get(self, \"valid_record_count\")",
"def test_count(db_4_tasks):\n assert(tasks.count() == 4)",
"def test_count(self):\n self._test_count_func(count)"
] | [
"0.6884496",
"0.68361723",
"0.6434314",
"0.6396647",
"0.63903815",
"0.6369274",
"0.61661106",
"0.6060245",
"0.5996377",
"0.5972735",
"0.58855087",
"0.58750457",
"0.5851834",
"0.58493793",
"0.5823841",
"0.57912415",
"0.5771921",
"0.57699734",
"0.57473636",
"0.5741941",
"0.57115287",
"0.56397575",
"0.5610063",
"0.5610032",
"0.5605454",
"0.55849236",
"0.5580544",
"0.5576475",
"0.5573339",
"0.5558682"
] | 0.72737366 | 0 |
Test API can create a department (POST request) | def test_department_creation(self):
res = self.client().post(service_url, json={"dep_name": "test dep 4", "description": "testing department 4"})
self.assertEqual(res.status_code, 201)
self.assertIn('dep 4', str(res.data)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post():\n\n logger.debug('Catch POST request by URL /api/departments.')\n args = department_args.parse_args()\n try:\n id_ = ds.add(name=args['name'], email=args['email'])\n created_department = ds.get(id_)\n except IntegrityError:\n return {'message': f\"Department with name {args['name']} already \"\n \"exists.\"}, 404\n except Exception:\n return {'message': \"Can't post department.\"}, 404\n return marshal_departments(created_department), 201",
"def add_department():\n details = request.get_json()\n errors = check_department_keys(request)\n if errors:\n return raise_error(400, \"Invalid {} key\".format(', '.join(errors)))\n department_name = details['department_name']\n if DepartmentsModel().get_department_name(department_name):\n return raise_error(400,\n \"{} department already exists\".format(department_name))\n response = DepartmentsModel(department_name).save()\n return Serializer.serialize(response, 201, \"Department added successfully\")",
"def test_perform_create(self):\n data = {\n 'name': 'Jane Joe',\n 'crm': 1234,\n 'email': '[email protected]',\n 'phone': '+55998754128'\n }\n response = self.unath_client.post(reverse('doctor-list'), data=data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n response = self.client.post(reverse('doctor-list'), data=data)\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)",
"def post(id_=None):\n\n logger.debug('Catch POST request by URL /api/departments/%i.', id_)\n return abort(405)",
"def test_create_valid(self):\n url = '/api/users/'\n username = str(uuid1())[:8]\n data = {\n 'EmailAddress': '{}@dbca.wa.gov.au'.format(username),\n 'DisplayName': 'Doe, John',\n 'SamAccountName': username,\n 'DistinguishedName': 'CN={},OU=Users,DC=domain'.format(username),\n 'AccountExpirationDate': datetime.now().isoformat(),\n 'Enabled': True,\n 'ObjectGUID': str(uuid1()),\n 'GivenName': 'John',\n 'Surname': 'Doe',\n 'Title': 'Content Creator',\n 'Modified': datetime.now().isoformat(),\n }\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n # A DepartmentUser with that email should now exist.\n self.assertTrue(DepartmentUser.objects.filter(email=data['EmailAddress']).exists())",
"def test_create_valid_alt(self):\n url = '/api/users/'\n username = str(uuid1())[:8]\n data = {\n 'email': '{}@dbca.wa.gov.au'.format(username),\n 'name': 'Doe, John',\n 'username': username,\n 'ad_dn': 'CN={},OU=Users,DC=domain'.format(username),\n 'expiry_date': datetime.now().isoformat(),\n 'active': True,\n 'ad_guid': str(uuid1()),\n 'given_name': 'John',\n 'surname': 'Doe',\n 'title': 'Content Creator',\n 'date_ad_updated': datetime.now().isoformat(),\n }\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertTrue(DepartmentUser.objects.filter(email=data['email']).exists())",
"def test_department_can_be_edited(self):\n res = self.client().put(service_url, json={\"id_dep\": 1, \"dep_name\": \"\", \"description\": \"this is a new description\"})\n self.assertEqual(res.status_code, 204)\n results = self.client().get(service_url+'/1')\n self.assertIn('is a new', str(results.data))\n self.assertIn('dep 1', str(results.data))",
"def test_office_creation(self):\n url = '/api/v1/consultorios/'\n data = {\n \"hospital\": \"Angeles Roma\",\n \"office\": \"306\"\n }\n request = self.client.post(url, data)\n\n self.assertEqual(request.status_code, status.HTTP_201_CREATED)",
"def test_api_can_get_department_by_id(self):\n res = self.client().get(service_url+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('dep 1', str(res.data))",
"def test_api_can_create_a_post(self):\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)",
"def add_department():\n logger.debug('Routed to /departments/add')\n\n if request.method == 'POST':\n name = request.form.get(\"name\")\n email = request.form.get(\"email\")\n\n try:\n ds.add(name, email)\n except IntegrityError as exception:\n logger.error('Can\\'t add department with name %s and email \"%s\". '\n 'Exception: %s', name, email, str(exception))\n session['name'] = name\n session['email'] = email\n flash(f'Department with name {name} already exists.')\n return redirect(request.referrer)\n except Exception as exception:\n logger.error('Can\\'t add department with name %s and email %s. '\n 'Exception: %s', name, email, str(exception))\n abort(404)\n return redirect(url_for('department.show_all_departments'))\n\n titles = ['Name', 'E-mail']\n return render_template('add_department.html',\n title='Add department',\n table_title='Adding new department',\n headers=titles)",
"def test_department_list_view_does_not_require_login(self):\n\n FireDepartment.objects.create(name='Test db', population=0)\n c = Client()\n response = c.get('/departments')\n self.assertEqual(response.status_code, 200)",
"def test_pacient_creation(self):\n url = '/api/v1/pacientes/'\n data = {\n \"name\": \"Victor\",\n \"last_name\": \"Herver\",\n \"mother_name\": \"Segura\",\n \"father_name\": \"Guadalupe Segura Delgado\",\n \"phone\": \"5515336643\",\n \"birthdate\": \"2016-07-16\",\n \"gender\": \"M\",\n \"email\": \"[email protected]\"\n }\n request = self.client.post(url, data)\n\n self.assertEqual(request.status_code, status.HTTP_201_CREATED)",
"def departments_no_id():\n if request.method == 'GET':\n all_departments = storage.all('Department')\n all_departments = [obj.to_json() for obj in all_departments.values()]\n return jsonify(all_departments)\n\n if request.method == 'POST':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n if req_json.get(\"name\") is None:\n abort(400, 'Missing name')\n Department = CNC.get(\"Department\")\n new_object = Department(**req_json)\n new_object.save()\n return jsonify(new_object.to_json()), 201",
"def test_department_deletion(self):\n res = self.client().delete(service_url, json={\"id_dep\": 1})\n self.assertEqual(res.status_code, 204)\n # Test to see if it exists, should return a 400\n result = self.client().get(service_url+'/1')\n self.assertEqual(result.status_code, 400)",
"def test_create_patient(self):\n url = reverse('patient:patient-list')\n data = {\n \"birth_date\": \"1980-05-21\",\n \"patient_name\": \"testpatient2\",\n \"status\": \"A\",\n \"gender\": \"M\",\n \"patient_contact\" : \"+12342134523\"\n }\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Patient.objects.count(), 2)",
"def test_create_account(self):\n url = reverse('hospital_list')\n data = {'name': 'DabApps','mobile': 846800258}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Hospital.objects.count(), 1)\n self.assertEqual(Hospital.objects.get().name, 'DabApps')\n self.assertEqual(Hospital.objects.get().mobile, 846800258)",
"def add_department():\n form = AddDepartment()\n if request.method == 'POST':\n if form.validate_on_submit():\n new_department = Department(name=form.name.data)\n db.session.add(new_department)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n flash('Department already exists!', 'warning')\n return redirect(url_for('add_department'))\n\n flash(f'Department {form.name.data} created!', 'success')\n return redirect(url_for('home'))\n\n flash('Name not defined.', 'warning')\n return render_template('department/department_add.html', form=form)",
"def add_department():\r\n check_admin()\r\n\r\n add_department = True\r\n\r\n form = DepartmentForm()\r\n if form.validate_on_submit():\r\n department = Department(name=form.name.data,\r\n description=form.description.data)\r\n try:\r\n # add department to the database\r\n db.session.add(department)\r\n db.session.commit()\r\n flash('You have successfully added a new department.')\r\n except:\r\n # in case department name already exists\r\n flash('Error: department name already exists.',category='error')\r\n\r\n # redirect to departments page\r\n return redirect(url_for('admin.list_departments'))\r\n\r\n # load department template\r\n return render_template('admin/departments/department.html', action=\"Add\",\r\n add_department=add_department, form=form,\r\n title=\"Add Department\")",
"def test_create_doctor(self):\n test_password = 'ooooooooooooooooooooooo'\n username = faker.first_name()\n data = {'username': username, 'email': faker.email(), 'password1': test_password, 'password2': test_password, 'is_doctor': True}\n response = self.client.post(self.url, data, format='json')\n # import pudb; pudb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Doctor.objects.count(), 1)\n # self.assertEqual(Account.objects.get().name, 'DabApps')",
"def add_department():\n\tcheck_admin()\n\n\tadd_department = True\n\n\tform = DepartmentForm()\n\tif form.validate_on_submit():\n\t\tdepartment = Department(name=form.name.data,description=form.description.data)\n\n\t\ttry:\n\t\t\t#add department to the database\n\t\t\tdb.session.add(department)\n\t\t\tdb.session.commit()\n\t\t\tflash(\"You have successsfully added a new department.\")\n\t\texcept:\n\t\t\t#incase the department already exists\n\t\t\tflash(\"Error: department already exists.\")\n\t#once the admin creates a new department,they will be redirected to the departments page\n\treturn render_template('admin/departments/department.html',action=\"Add\", add_department= add_department,form=form,title = \"Add Department\")",
"def test_api_can_post(self):\n\n # check person created\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)\n\n # check studio created\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)\n \n # check film created\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)",
"def test_duo_account_post(self):\n pass",
"def test_create_organization(self):\n self.test_login_user()\n url = reverse('MGA:create_organization')\n data = {'name': \"event\"}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def test_post_adventure(self):\n body = Adventure()\n headers = {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n }\n response = self.client.open(\n \"/adventure\",\n method=\"POST\",\n headers=headers,\n data=json.dumps(body),\n content_type=\"application/json\",\n )\n self.assert200(response, \"Response body is : \" + response.data.decode(\"utf-8\"))",
"def test_perform_create(self):\n\n response = self.client.post(reverse('action-list'), data=self.data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['name'], self.data['name'])\n self.assertTrue(len(response.data['institution']), self.data['institution'])",
"def test_create_a_post(self):\n self.login_client('test_user', 'testing')\n # hit the API endpoint\n response = self.make_a_request(\n kind=\"post\",\n data=self.valid_data\n )\n self.assertEqual(response.data, self.valid_data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n # test with invalid data\n response = self.make_a_request(\n kind=\"post\",\n data=self.invalid_data\n )\n self.assertEqual(\n response.data[\"message\"],\n \"Both title and body are required to add a song\"\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_task(self):\n response =self.client.post(reverse('todos'),self.data,format=\"json\")\n self.assertEqual(201,response.status_code)",
"def test_api_can_get_all_departments(self):\n res = self.client().get(service_url)\n self.assertEqual(res.status_code, 200)\n self.assertIn('dep 1', str(res.data))\n self.assertIn('dep 2', str(res.data))\n self.assertIn('dep 3', str(res.data))",
"def test_pediatrician_creation(self):\n url = '/api/v1/pediatras/'\n data = {\n \"name\": \"Norma\",\n \"last_name\": \"Rodiguez\",\n \"mother_name\": \"Mendoza\",\n \"phone\": \"5598765432\",\n \"mail\": \"[email protected]\",\n }\n request = self.client.post(url, data)\n\n self.assertEqual(request.status_code, status.HTTP_201_CREATED)"
] | [
"0.801832",
"0.7346861",
"0.7263449",
"0.7247853",
"0.7096167",
"0.70810187",
"0.6990688",
"0.68851185",
"0.68181944",
"0.6713173",
"0.6675945",
"0.6634122",
"0.66043496",
"0.65878016",
"0.6573157",
"0.6558932",
"0.65565145",
"0.6529389",
"0.65058947",
"0.6498868",
"0.6477474",
"0.6449006",
"0.6408143",
"0.6389381",
"0.6368895",
"0.6361736",
"0.6349649",
"0.6346692",
"0.63282865",
"0.6312194"
] | 0.8781946 | 0 |
Test API can get a list of all departments(GET request) | def test_api_can_get_all_departments(self):
res = self.client().get(service_url)
self.assertEqual(res.status_code, 200)
self.assertIn('dep 1', str(res.data))
self.assertIn('dep 2', str(res.data))
self.assertIn('dep 3', str(res.data)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def getDepartments(self, ):\n payload = {}\n \n\n # Parameter validation\n schema = CatalogValidator.getDepartments()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/departments\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/departments\", ), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")",
"def get():\n\n logger.debug('Catch GET request by URL /api/departments.')\n departments = ds.get_all()\n return marshal_departments(departments)",
"def test_api_can_get_department_by_id(self):\n res = self.client().get(service_url+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('dep 1', str(res.data))",
"def get_departments():\n\n term = '201931' # Get current term from somewhered\n maxCount = 300\n\n # Call getsubjects\n params = {\n 'dataType': 'json',\n 'term': term,\n 'offset': 1,\n 'max': maxCount\n }\n\n r = requests.get(BASE_URL, params=params)\n\n json = ''\n # Attempt to convert it to JSON\n try:\n json = r.json()\n except:\n print('Error converting depts to JSON')\n\n return json",
"def get_departments() -> list:\n return Department.query.all()",
"def get_departments(self) -> list:\n return self.client.departments.get_all()",
"def departments():\n # gather data from db about all departments\n return render_template(\"departments.html\")",
"def test_agencies_search_list(self):\n\n query = \"department\"\n response = self.client.get(reverse('agencies') + \"?query=\" + query)\n self.assertEqual(response.status_code, 200)\n\n content = response.content.decode('utf-8')\n self.assertTrue('Department of Homeland Security' in content)\n self.assertTrue('Department of Commerce' in content)\n self.assertTrue('Patent and Trademark Office' not in content)",
"def show_all_departments():\n\n logger.debug('Function show_all_departments(). Routed to /departments')\n titles = ['Name', 'Average Salary', 'Employees']\n departments = ds.get_all()\n logger.info('Get list of departments, length is %i', len(departments))\n return render_template('departments.html',\n title='Departments',\n table_title='List of Departments',\n headers=titles,\n departments=departments)",
"def departments(request):\n if 'selected_package' in request.session:\n del request.session['selected_package']\n assert isinstance(request, HttpRequest)\n status, result = api.show_departments()\n return render(\n request,\n 'app/departments.html',\n {\n 'title': 'แผนกและแพ็คเกจ',\n 'departments': result,\n 'logged_user': request.session.get('user')\n }\n )",
"def get(id_):\n\n logger.debug('Catch GET request by URL /api/departments/%i.', id_)\n try:\n department = ds.get(id_)\n if not department.id:\n raise Exception\n except Exception:\n logger.error('There is no department with id %i', id_)\n return {'message': f'There is no department with {id_}.'}, 404\n return marshal_departments(department)",
"def test_department_creation(self):\n res = self.client().post(service_url, json={\"dep_name\": \"test dep 4\", \"description\": \"testing department 4\"})\n self.assertEqual(res.status_code, 201)\n self.assertIn('dep 4', str(res.data))",
"def test_department_list_view_does_not_require_login(self):\n\n FireDepartment.objects.create(name='Test db', population=0)\n c = Client()\n response = c.get('/departments')\n self.assertEqual(response.status_code, 200)",
"def list_departments():\n \t check_admin()\n\n #check all the departments in the database and assign them to a variable.departments \n \t departments = Department.query.all()\n\n \t return render_template('admin/departments/departments.html',departments = departments,title = \"Departments\")",
"async def test_list_fleet(client):\n group_param = {}\n params = [('access_token', 'access_token_example'),\n ('starting_after', 'starting_after_example'),\n ('ending_before', 'ending_before_example'),\n ('limit', 56)]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/list',\n headers=headers,\n json=group_param,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')",
"def test_api_can_get_all_employees(self):\n res = self.client().get(service_url_emp)\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))",
"async def getApplicationDepartmentListing(self, page_no=None, page_size=None, q=None):\n payload = {}\n \n if page_no:\n payload[\"page_no\"] = page_no\n \n if page_size:\n payload[\"page_size\"] = page_size\n \n if q:\n payload[\"q\"] = q\n \n\n # Parameter validation\n schema = CatalogValidator.getApplicationDepartmentListing()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/department\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[{\"in\":\"query\",\"name\":\"page_no\",\"description\":\"The page number to navigate through the given set of results\",\"schema\":{\"type\":\"integer\"},\"required\":false},{\"in\":\"query\",\"name\":\"page_size\",\"description\":\"Number of items to retrieve in each page. Default is 12.\",\"schema\":{\"type\":\"integer\",\"default\":12},\"required\":false},{\"in\":\"query\",\"name\":\"q\",\"description\":\"Search query with brand name.Use this parameter to search department by name.\",\"schema\":{\"type\":\"string\"},\"required\":false}],\"query\":[{\"in\":\"query\",\"name\":\"page_no\",\"description\":\"The page number to navigate through the given set of results\",\"schema\":{\"type\":\"integer\"},\"required\":false},{\"in\":\"query\",\"name\":\"page_size\",\"description\":\"Number of items to retrieve in each page. Default is 12.\",\"schema\":{\"type\":\"integer\",\"default\":12},\"required\":false},{\"in\":\"query\",\"name\":\"q\",\"description\":\"Search query with brand name.Use this parameter to search department by name.\",\"schema\":{\"type\":\"string\"},\"required\":false}],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", page_no=page_no, page_size=page_size, q=q)\n query_string = await create_query_string(page_no=page_no, page_size=page_size, q=q)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/department\", page_no=page_no, page_size=page_size, q=q), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")",
"def parseDepartments(self, response):\n sel = Selector(response)\n departments = sel.xpath('//li[@data-school=\"Engineering\" and @data-type=\"department\"]/a')\n for d in departments:\n item = CourseItem(response.request.meta[\"item\"])\n item['department'] = d.xpath('span/text()').get()\n href = d.xpath('@href').get().strip()\n url = urljoin('https://web-app.usc.edu', href)\n yield Request(url=url,callback=self.parseCourses,meta={'item':item}, dont_filter=True)",
"def test_data_org_unit(self):\n url = '/api/options/?list=org_unit'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n # Org unit names will be present in the response.\n self.assertContains(response, self.dept.name)\n self.assertContains(response, self.div1.name)\n self.assertContains(response, self.div2.name)",
"async def test_get_fleet_maintenance_list(client):\n group_param = {}\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/maintenance/list',\n headers=headers,\n json=group_param,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')",
"def test_pacient_list(self):\n url = '/api/v1/pacientes/'\n request = self.client.get(url)\n self.assertEqual(request.status_code, status.HTTP_200_OK)",
"def test_get_list(self):\n #Validate the response\n resp = self.client.get('/api/v1/purchase-order/', format='json')\n self.assertEqual(resp.status_code, 200)\n \n #Validate the returned data\n resp = resp.data\n self.assertIsInstance(resp, list)\n self.assertEqual(len(resp), 1)",
"def display_departmentlist():\n\tdeptid = 0\n\tprint\n\tprint '[*] Fetching departments list'\n\n\t# call the api function\n\tsupportdepartments = whmcs.getsupportdepartments()\n\tif supportdepartments == None:\n\t\tprint '[x] WHMCS getsupportdepartments API function call failed.'\n\t\tprint '[!] exiting.'\n\t\t_exit(0)\n\n\t# reconnect if ssl or url error orccured\n\twhile supportdepartments == 'sslerror' or supportdepartments == 'urlerror':\n\t\tprint '[!] Re-establishing connection after 5 seconds'\n\t\ttry: time.sleep(5)\n\t\texcept KeyboardInterrupt: print '\\n[!] exiting.'; _exit()\n\t\tsupportdepartments = whmcs.getsupportdepartments()\n\n\tresult = supportdepartments.get('result')\n\ttotalresults = supportdepartments.get('totalresults')\n\tif result != 'success' or totalresults == 0:\n\t\tprint '[x] Unable to find any support departments on (%s).' % (parser.get('whmcs', 'server'))\n\t\tprint '[x] %s.' % supportdepartments.get('message')\n\t\t_exit()\n\n\t#############################\n\t## Display Department List ##\n\t#############################\n\t# Eg: {'departments': { 'department': [{'id': ,'name': ,'awaitingreply': ,'opentickets': ,}, {...}]}}\n\n\tdepartments = supportdepartments.get('departments').get('department')\n\trowformat = '| %-5s | %-20s | %-15s | %-15s |'\n\theader = ('ID', 'Department', 'Awaiting Reply', 'Open Tickets')\n\ttitle = rowformat % header\n\tprint '-' * len(title)\n\tprint title\n\tprint '-' * len(title)\n\tdeptlist = []\n\tfor department in departments:\n\t\tdeptid = department['id']\n\t\tdeptlist.append(deptid)\n\t\tdeptname=department['name']\n\t\tif len(deptname) > 20:\n\t\t\tdeptname = deptname[:20-4]+'...'\n\t\tprint rowformat % (deptid, deptname, department.get('awaitingreply'), department.get('opentickets'))\n\t\tprint '-' * len(title)\n\n\t# Display department ID selection prompt\n\twhile 1:\n\t\ttry:\n\t\t\tdeptid = raw_input('[+] Select Department ID: ')\n\t\texcept KeyboardInterrupt:\n\t\t\tprint '\\n[!] exiting.cleanly.'\n\t\t\texit()\n\n\t\tif type(deptid) != int and deptid not in deptlist:\n\t\t\tprint '[!] Invalid Department ID (%s).' % deptid\n\t\telse:\n\t\t\tbreak\n\treturn deptid",
"def test_pediatrician_list(self):\n url = '/api/v1/pediatras/'\n request = self.client.get(url)\n self.assertEqual(request.status_code, status.HTTP_200_OK)",
"def get_data(department_id):\n url='https://covidstats.com.ar/ws/evolucion?comprimido=1&departamentos[]={}'\n with urllib.request.urlopen(url.format(department_id)) as req:\n return json.loads(req.read().decode())",
"def departments_no_id():\n if request.method == 'GET':\n all_departments = storage.all('Department')\n all_departments = [obj.to_json() for obj in all_departments.values()]\n return jsonify(all_departments)\n\n if request.method == 'POST':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n if req_json.get(\"name\") is None:\n abort(400, 'Missing name')\n Department = CNC.get(\"Department\")\n new_object = Department(**req_json)\n new_object.save()\n return jsonify(new_object.to_json()), 201",
"def get(self, request, department):\n try:\n dep = DepartmentsModel.objects.get(name__iexact=department)\n except DepartmentsModel.DoesNotExist:\n return HttpResponse('Department not found')\n result = {'department': dep.name,\n 'questions': list(),\n 'grades': list(),\n 'stages': list(),\n 'sections': list()}\n\n for grade in list(GradesModel.objects.all()):\n result['grades'].append({'name': grade.name})\n\n for question in list(dep.questions.order_by('id').all()):\n result['questions'].append({'name': question.name, 'stages': question.f_stage.name, 'hint': question.hint})\n result['stages'].append({'name': question.f_stage.name, 'section': question.f_stage.f_section.name})\n result['sections'].append({'name': question.f_stage.f_section.name})\n\n # Remove duplicates from dict\n result['stages'] = [dict(names) for names in set(tuple(item.items()) for item in result['stages'])]\n result['sections'] = [dict(names) for names in set(tuple(item.items()) for item in result['sections'])]\n return JsonResponse(result)",
"def test_office_list(self):\n url = '/api/v1/consultorios/'\n request = self.client.get(url)\n self.assertEqual(request.status_code, status.HTTP_200_OK)",
"def test_get_all(self):\n response = self.client.get('/api/v1/parcels')\n result = json.loads(response.data.decode())\n self.assertEqual(result[\"message\"], \"Success\", msg = \"No orders to retrieve\")\n self.assertEqual(response.status_code, 200)",
"def test_department_deletion(self):\n res = self.client().delete(service_url, json={\"id_dep\": 1})\n self.assertEqual(res.status_code, 204)\n # Test to see if it exists, should return a 400\n result = self.client().get(service_url+'/1')\n self.assertEqual(result.status_code, 400)"
] | [
"0.7658562",
"0.76109993",
"0.7608823",
"0.7466871",
"0.72229016",
"0.7188552",
"0.71461266",
"0.7073292",
"0.7052737",
"0.6924505",
"0.68709964",
"0.6787654",
"0.67171353",
"0.6716329",
"0.6634516",
"0.64955044",
"0.6487888",
"0.64635545",
"0.64385515",
"0.641409",
"0.64051193",
"0.63999325",
"0.6362599",
"0.63419676",
"0.63289267",
"0.6321439",
"0.63127273",
"0.62647706",
"0.62446356",
"0.6240759"
] | 0.880523 | 0 |
Test API can get a single department by it's id | def test_api_can_get_department_by_id(self):
res = self.client().get(service_url+'/1')
self.assertEqual(res.status_code, 200)
self.assertIn('dep 1', str(res.data)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(id_):\n\n logger.debug('Catch GET request by URL /api/departments/%i.', id_)\n try:\n department = ds.get(id_)\n if not department.id:\n raise Exception\n except Exception:\n logger.error('There is no department with id %i', id_)\n return {'message': f'There is no department with {id_}.'}, 404\n return marshal_departments(department)",
"def show_department(id_: int):\n\n logger.debug('Routed to /departments/%i', id_)\n titles = ['Name', 'Average Salary', 'Employees', 'E-mail']\n department = None\n\n try:\n department = ds.get(id_)\n except IntegrityError:\n logger.error(\"Can't find employee with id %i\", id_)\n abort(404)\n\n logger.info('Get department %s', department.name)\n return render_template('department.html',\n title=f'Department {department.name}',\n table_title=f'Department: {department.name}',\n headers=titles,\n department=department)",
"def get_department_by_id(department_id):\n return Department.query.get(department_id)",
"def get(self, department_id):\n department = get_department_by_id(department_id)\n employees = set_employees_by_id(department_id)\n department_obj = {\n \"id\": department.id,\n \"name\": department.name,\n \"employees\": [\n {\n \"id\": employee.id,\n \"name\": employee.name,\n \"salary\": employee.salary,\n }\n for employee in employees\n ],\n }\n return department_obj, 200",
"def departments_with_id(department_id=None):\n department_obj = storage.get('Department', department_id)\n if department_obj is None:\n abort(404, 'Not found')\n\n if request.method == 'GET':\n return jsonify(department_obj.to_json())\n\n if request.method == 'DELETE':\n department_obj.delete()\n del department_obj\n return jsonify({})\n\n if request.method == 'PUT':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n department_obj.bm_update(req_json)\n return jsonify(department_obj.to_json())",
"def department(department_id):\n\n department_obj = Department.query.get_or_404(department_id)\n employees = Employee.query.filter_by(department_id=department_id)\n return render_template('department/department.html',\n department=department_obj, employees=employees)",
"def test_department_creation(self):\n res = self.client().post(service_url, json={\"dep_name\": \"test dep 4\", \"description\": \"testing department 4\"})\n self.assertEqual(res.status_code, 201)\n self.assertIn('dep 4', str(res.data))",
"def department(department_id):\n # gather data from db about all employees\n return render_template(\"department.html\",\n department_id=department_id)",
"def test_department_deletion(self):\n res = self.client().delete(service_url, json={\"id_dep\": 1})\n self.assertEqual(res.status_code, 204)\n # Test to see if it exists, should return a 400\n result = self.client().get(service_url+'/1')\n self.assertEqual(result.status_code, 400)",
"def get_data(department_id):\n url='https://covidstats.com.ar/ws/evolucion?comprimido=1&departamentos[]={}'\n with urllib.request.urlopen(url.format(department_id)) as req:\n return json.loads(req.read().decode())",
"def test_api_can_get_all_departments(self):\n res = self.client().get(service_url)\n self.assertEqual(res.status_code, 200)\n self.assertIn('dep 1', str(res.data))\n self.assertIn('dep 2', str(res.data))\n self.assertIn('dep 3', str(res.data))",
"def test_api_can_get_employee_by_id(self):\n res = self.client().get(service_url_emp+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))",
"def delete(id_=None):\n\n logger.debug('Catch DELETE request by URL /api/departments/%i.', id_)\n ds.delete(id_)\n return '', 204",
"def set_department_by_id(department_id):\n return Department.query.filter(id=department_id).one()",
"def test_department_detail_view_does_not_require_login(self):\n\n fd = FireDepartment.objects.create(name='Test db', population=0)\n c = Client()\n response = c.get(fd.get_absolute_url())\n self.assertEqual(response.status_code, 200)",
"def get_dessert_by_id(dessert_id: int):\n return get_data_by_id(\"Desserts\", dessert_id)",
"def departments_no_id():\n if request.method == 'GET':\n all_departments = storage.all('Department')\n all_departments = [obj.to_json() for obj in all_departments.values()]\n return jsonify(all_departments)\n\n if request.method == 'POST':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n if req_json.get(\"name\") is None:\n abort(400, 'Missing name')\n Department = CNC.get(\"Department\")\n new_object = Department(**req_json)\n new_object.save()\n return jsonify(new_object.to_json()), 201",
"def test_department_can_be_edited(self):\n res = self.client().put(service_url, json={\"id_dep\": 1, \"dep_name\": \"\", \"description\": \"this is a new description\"})\n self.assertEqual(res.status_code, 204)\n results = self.client().get(service_url+'/1')\n self.assertIn('is a new', str(results.data))\n self.assertIn('dep 1', str(results.data))",
"def post(id_=None):\n\n logger.debug('Catch POST request by URL /api/departments/%i.', id_)\n return abort(405)",
"def put(id_=None):\n\n logger.debug('Catch PUT request by URL /api/departments/%i.', id_)\n try:\n args = department_args.parse_args()\n ds.update(id_, name=args['name'], email=args['email'])\n except Exception:\n return {'message': \"Can't update department.\"}, 404\n return marshal_departments(ds.get(id_)), 200",
"def test_department_list_view_does_not_require_login(self):\n\n FireDepartment.objects.create(name='Test db', population=0)\n c = Client()\n response = c.get('/departments')\n self.assertEqual(response.status_code, 200)",
"def test_get_one_period(self):\n url = reverse_lazy('api:admin-get-periods', kwargs={'period_id': self.test_period.id})\n self.client.force_login(self.test_user_employee)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('id'), self.test_period.id, response_json)\n self.assertEqual(response_json.get('length'), self.test_period.length, response_json)\n self.assertEqual(response_json.get('length_type'), self.test_period.length_type, response_json)\n self.assertIsInstance(response_json.get('employer'), dict, response_json)\n self.assertEqual(response_json.get('employer').get('id'), self.test_employer.id, response_json)\n self.assertEqual(response_json.get('employer').get('title'), self.test_employer.title, response_json)\n self.assertEqual(response_json.get('employer').get('picture'), self.test_employer.picture, response_json)\n self.assertIsInstance(response_json.get('payments'), list, response_json)\n self.assertEqual(len(response_json.get('payments')), 3, response_json)",
"def update_department(id_: int):\n logger.debug('Routed to /departments/%i/update', id_)\n\n if request.method == 'POST':\n name = request.form.get(\"name\")\n email = request.form.get(\"email\")\n\n try:\n ds.update(id_, name, email)\n except IntegrityError as exception:\n logger.error('Can\\'t update department with name %s and email %s. '\n 'Exception: %s', name, email, str(exception))\n\n session['name'] = name\n session['email'] = email\n flash(f'Department with name {name} already exists.')\n return redirect(request.referrer)\n\n except Exception as exception:\n logger.error('Can\\'t add department with name %s and email %s. '\n 'Exception: %s', name, email, str(exception))\n abort(404)\n\n logger.info(\n 'Successfully updated department with id %i. It\\'s name = %s, '\n 'email = %s', id_, name, email)\n return redirect(url_for(\"department.show_department\", id_=id_))\n\n department = None\n try:\n department = ds.get(id_)\n except IntegrityError:\n logger.error(\"Can't get department with id %i\", id_)\n abort(404)\n\n titles = ['Name', 'Average Salary', 'Employees', 'E-mail']\n logger.info('Get department %s', department.name)\n return render_template('edit_department.html',\n title='Update department',\n table_title=f'Updating department: '\n f'{department.name}',\n headers=titles,\n department=department)",
"def test_api_can_get_expense_by_id(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(rv.status_code, 201)\n result_in_json = json.loads(rv.data.decode('utf-8').replace(\"'\", \"\\\"\"))\n results = self.client().get(\n '/expenses/{}'.format(result_in_json['id']), headers=dict(Authorization=\"Bearer \" + access_token))\n res = json.loads(results.data)\n self.assertEqual(results.status_code, 200)\n self.assertEqual('snacks', str(res['name']))",
"async def getDepartments(self, ):\n payload = {}\n \n\n # Parameter validation\n schema = CatalogValidator.getDepartments()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/departments\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/departments\", ), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")",
"def get():\n\n logger.debug('Catch GET request by URL /api/departments.')\n departments = ds.get_all()\n return marshal_departments(departments)",
"def delete(self, department_id):\n department = get_department_by_id(department_id)\n db.session.delete(department)\n db.session.commit()\n return {}, 204",
"def post():\n\n logger.debug('Catch POST request by URL /api/departments.')\n args = department_args.parse_args()\n try:\n id_ = ds.add(name=args['name'], email=args['email'])\n created_department = ds.get(id_)\n except IntegrityError:\n return {'message': f\"Department with name {args['name']} already \"\n \"exists.\"}, 404\n except Exception:\n return {'message': \"Can't post department.\"}, 404\n return marshal_departments(created_department), 201",
"def department(self) -> object:\n return self._department",
"def delete_department(id):\r\n check_admin()\r\n\r\n department = Department.query.get_or_404(id)\r\n db.session.delete(department)\r\n db.session.commit()\r\n flash('You have successfully deleted the department.')\r\n\r\n # redirect to the departments page\r\n return redirect(url_for('admin.list_departments'))\r\n\r\n return render_template(title=\"Delete Department\")"
] | [
"0.78734773",
"0.7596805",
"0.7467394",
"0.73101795",
"0.72399706",
"0.7006971",
"0.69550437",
"0.6916523",
"0.677674",
"0.67633426",
"0.6730946",
"0.660801",
"0.6550366",
"0.6519978",
"0.6404605",
"0.6393788",
"0.6373831",
"0.63526964",
"0.6323746",
"0.6293206",
"0.62138426",
"0.61168003",
"0.61145467",
"0.6053989",
"0.6043597",
"0.6028237",
"0.6006021",
"0.5999709",
"0.59471494",
"0.5941597"
] | 0.8889061 | 0 |
Test API can edit an existing department (PUT request) | def test_department_can_be_edited(self):
res = self.client().put(service_url, json={"id_dep": 1, "dep_name": "", "description": "this is a new description"})
self.assertEqual(res.status_code, 204)
results = self.client().get(service_url+'/1')
self.assertIn('is a new', str(results.data))
self.assertIn('dep 1', str(results.data)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def put(id_=None):\n\n logger.debug('Catch PUT request by URL /api/departments/%i.', id_)\n try:\n args = department_args.parse_args()\n ds.update(id_, name=args['name'], email=args['email'])\n except Exception:\n return {'message': \"Can't update department.\"}, 404\n return marshal_departments(ds.get(id_)), 200",
"def put(self, department_id):\n department = get_department_by_id(department_id)\n department.name = request.json[\"name\"]\n db.session.commit()\n return {}, 200",
"def put():\n\n logger.debug('Catch PUT request by URL /api/departments.')\n return abort(405)",
"def test_update_office(self):\n url = '/api/v1/consultorios/{}/'.format(self.app_client.id)\n\n data = {\n \"hospital\": \"Hospital 2\"\n }\n\n request = self.client.patch(url, data)\n self.assertEqual(request.status_code, status.HTTP_200_OK)",
"def test_update(self):\n doctor = DoctorFactory.create(id=21)\n data = {'name': 'Joe'}\n self.assertNotEqual(doctor.name, data['name'])\n\n response = self.unath_client.put(reverse('doctor-detail', args=[21]), data=data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n response = self.client.put(reverse('doctor-detail', args=[21]), data=data)\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)",
"def test_update(self):\n tz = pytz.timezone(settings.TIME_ZONE)\n self.assertFalse(self.user1.o365_licence)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {\n 'Surname': 'Lebowski',\n 'title': 'Bean Counter',\n 'o365_licence': True,\n\n 'email' : '[email protected]' ,\n 'name' : 'Mike' ,\n 'username' : 'MikeLebowski' ,\n 'ad_guid' : '123',\n 'expiry_date' : '2019-03-12',\n 'given_name' : 'Mike',\n #'Enabled' :'True',\n 'active' : True,\n 'deleted' : False,\n\n\n\n }\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertEqual(user.surname, data['Surname'])\n self.assertEqual(user.title, data['title'])\n\n self.assertEqual(user.name , data['name'])\n self.assertEqual(user.email, data['email'])\n self.assertEqual(user.username, data['username'])\n\n #self.assertEqual(user.expiry_date, data['expiry_date'])\n\n self.assertEqual(user.ad_guid, data['ad_guid'])\n\n self.assertEqual(user.expiry_date, tz.localize(parse(data['expiry_date'])))\n\n self.assertEqual(user.given_name, data['given_name'])\n #self.assertEqual(user.active, data['Enabled'])\n self.assertEqual(user.active, data['active'])\n self.assertEqual(user.ad_deleted, data['deleted'])\n\n self.assertTrue(user.o365_licence)\n self.assertTrue(user.in_sync)",
"def test_edit_office(self):\n access_token = self.generate_admin_token()\n self.create_office()\n update_data = {\n \"name\":\"Office of the president\",\n \"Type\": \"federal\"\n }\n response=self.client.patch(\n \"api/v2/admin/offices/1\",\n data=json.dumps(update_data),\n headers={\"content-type\":\"application/json\",\n \"Authorization\": f\"Bearer {access_token}\"}\n )\n \n self.assertEqual(response.status_code, 200)",
"def update_department(department_id):\n details = request.get_json()\n errors = check_department_keys(request)\n if errors:\n return raise_error(400, \"Invalid {} key\".format(', '.join(errors)))\n department_name = details['department_name']\n if DepartmentsModel().get_department_name(department_name):\n return raise_error(\n 400,\n \"{} department already exists\".format(department_name))\n response = DepartmentsModel().edit_department(department_name,\n department_id)\n if response:\n return Serializer.serialize(response, 200,\n 'Department updated successfully')\n return raise_error(404, \"Department not found\")",
"def test_department_creation(self):\n res = self.client().post(service_url, json={\"dep_name\": \"test dep 4\", \"description\": \"testing department 4\"})\n self.assertEqual(res.status_code, 201)\n self.assertIn('dep 4', str(res.data))",
"def update_department(id_: int):\n logger.debug('Routed to /departments/%i/update', id_)\n\n if request.method == 'POST':\n name = request.form.get(\"name\")\n email = request.form.get(\"email\")\n\n try:\n ds.update(id_, name, email)\n except IntegrityError as exception:\n logger.error('Can\\'t update department with name %s and email %s. '\n 'Exception: %s', name, email, str(exception))\n\n session['name'] = name\n session['email'] = email\n flash(f'Department with name {name} already exists.')\n return redirect(request.referrer)\n\n except Exception as exception:\n logger.error('Can\\'t add department with name %s and email %s. '\n 'Exception: %s', name, email, str(exception))\n abort(404)\n\n logger.info(\n 'Successfully updated department with id %i. It\\'s name = %s, '\n 'email = %s', id_, name, email)\n return redirect(url_for(\"department.show_department\", id_=id_))\n\n department = None\n try:\n department = ds.get(id_)\n except IntegrityError:\n logger.error(\"Can't get department with id %i\", id_)\n abort(404)\n\n titles = ['Name', 'Average Salary', 'Employees', 'E-mail']\n logger.info('Get department %s', department.name)\n return render_template('edit_department.html',\n title='Update department',\n table_title=f'Updating department: '\n f'{department.name}',\n headers=titles,\n department=department)",
"def test_update(self):\n payload = {\n 'name': 'Pecho inclinado',\n 'description': \"New description\",\n 'muscle_group': \"pecho\"\n }\n response = self.client.put(\n '/exercises/{}/'.format(self.exer1.id), data=payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n Exercise.objects.get(id=self.exer1.id).name, payload['name'])",
"def put(self, request, pk):\n data = request.data\n data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n EmployeeDetail.objects.filter(pk=pk).update(department=department, manager=manager, **data)\n return Response(\n data=\"request.data\"\n )",
"def test_api_can_get_department_by_id(self):\n res = self.client().get(service_url+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('dep 1', str(res.data))",
"def test_update_pacient(self):\n url = '/api/v1/pacientes/{}/'.format(self.app_client.id)\n\n data = {\n \"name\": \"Ernesto\"\n }\n\n request = self.client.patch(url, data)\n self.assertEqual(request.status_code, status.HTTP_200_OK)",
"def test_dietitian_edit_account(self):\n\n data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"street-address\": \"33 Blue St\", \n \"city\": \"San Francisco\", \"state\": \"CA\", \"zipcode\": \"43223\"}\n\n result = self.client.post(\"/dietitian/1/account/edit\", data=data,\n follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"successfully updated\", result.data)",
"def departments_with_id(department_id=None):\n department_obj = storage.get('Department', department_id)\n if department_obj is None:\n abort(404, 'Not found')\n\n if request.method == 'GET':\n return jsonify(department_obj.to_json())\n\n if request.method == 'DELETE':\n department_obj.delete()\n del department_obj\n return jsonify({})\n\n if request.method == 'PUT':\n req_json = request.get_json()\n if req_json is None:\n abort(400, 'Not a JSON')\n department_obj.bm_update(req_json)\n return jsonify(department_obj.to_json())",
"def test_update(self):\n payload = {\n 'id': self.rout1.id,\n 'name': 'Tuesday routine',\n 'exercises': [self.exer1.id]\n }\n response = self.client.put(\n '/routines/{}/'.format(self.rout1.id), data=payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n Routine.objects.get(id=self.rout1.id).name, payload['name'])",
"def update_department(department_id):\n\n form = UpdateDepartment()\n department_obj = Department.query.get_or_404(department_id)\n if request.method == 'POST':\n if form.validate_on_submit():\n department_obj.name = form.name.data\n try:\n db.session.commit()\n except IntegrityError:\n flash('Department with this name already exists.', 'warning')\n db.session.rollback()\n return redirect(url_for('update_department',\n department_id=department_obj.id))\n flash('Department name successfully changed!', 'success')\n return redirect(url_for('home'))\n return render_template('department/department_update.html',\n form=form, department=department_obj)",
"def test_editing_patient_goals(self):\n\n data = {\"goal-body\": \"Edited goal body.\"}\n result = self.client.post(\"/goal/1/edit.json\", data=data)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Edited goal\", result.data)",
"async def updateAppDepartment(self, department_uid=None, body=\"\"):\n payload = {}\n \n if department_uid:\n payload[\"department_uid\"] = department_uid\n \n\n # Parameter validation\n schema = CatalogValidator.updateAppDepartment()\n schema.dump(schema.load(payload))\n \n # Body validation\n from .models import ApplicationDepartmentJson\n schema = ApplicationDepartmentJson()\n schema.dump(schema.load(body))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/department/{department_uid}\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"Id of the company associated to department custom json.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"application id for which the custom_json is associated.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"department_uid\",\"description\":\"department id for which the custom_json is associated.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"Id of the company associated to department custom json.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"application id for which the custom_json is associated.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"department_uid\",\"description\":\"department id for which the custom_json is associated.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", department_uid=department_uid)\n query_string = await create_query_string(department_uid=department_uid)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"PATCH\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"patch\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/department/{department_uid}\", department_uid=department_uid), query_string, headers, body, exclude_headers=exclude_headers), data=body)",
"def test_patient_edit_account(self):\n\n data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \"email\": \"[email protected]\",\n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\", \"phone\": \"8884445555\",\n \"birthdate\":\"1984-05-05\"}\n\n result = self.client.post(\"/patient/1/account/edit\", data=data,\n follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"successfully updated\", result.data)",
"def test_update_pet(self):\n body = Pet()\n response = self.client.open(\n '/pet',\n method='PUT',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_update():\n payload = {'age': 99}\n sample_uuid = get_sample_id()\n response = requests.put(f'http://localhost:5000/api/persons/{sample_uuid}', json=payload)\n data = response.json()\n\n assert response.status_code == 200\n for field in FIELDS:\n assert field in data",
"def test_edit_post(api_client):\n post_id = 1\n body = {\n \"id\": 1,\n \"title\": \"new_title\",\n \"body\": \"bar\",\n \"userId\": 1\n }\n r = api_client.put(path=f\"/posts/{post_id}\", data=body).json()\n assert r[\"title\"] == body[\"title\"]",
"def test_patient_edit_account_as_patient(self):\n\n data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \"email\": \"[email protected]\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\", \"phone\": \"8884445555\", \n \"birthdate\":\"1984-05-05\"}\n\n result = self.client.post(\"/patient/1/account/edit\", data=data,\n follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"successfully updated\", result.data)",
"def test_partial_update(self):\n doctor = DoctorFactory.create(id=22)\n data = {'name': 'Joe'}\n self.assertNotEqual(doctor.name, data['name'])\n\n response = self.unath_client.patch(reverse('doctor-detail', args=[22]), data=data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n response = self.client.patch(reverse('doctor-detail', args=[22]), data=data)\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)",
"def taco_test_put_update(self):\n body = '{ \"id\": 400, \"name\": \"item4\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item/4', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '204'))",
"def test_department_deletion(self):\n res = self.client().delete(service_url, json={\"id_dep\": 1})\n self.assertEqual(res.status_code, 204)\n # Test to see if it exists, should return a 400\n result = self.client().get(service_url+'/1')\n self.assertEqual(result.status_code, 400)",
"def test_successful_article_edit(self):\n saved_article = self.create_article()\n url = saved_article[0]\n token = saved_article[2]\n response = self.test_client.put(url, self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['message'], \"Article has been successfully updated.\")",
"def test_update_client(self):\n url = '/api/v1/pediatras/{}/'.format(self.app_client.id)\n\n data = {\n \"name\": \"Ernesto\"\n }\n\n request = self.client.patch(url, data)\n self.assertEqual(request.status_code, status.HTTP_200_OK)"
] | [
"0.7635879",
"0.75541437",
"0.7310086",
"0.71458596",
"0.7124142",
"0.682343",
"0.6818235",
"0.6771202",
"0.6658555",
"0.66222185",
"0.6613502",
"0.6584068",
"0.6572079",
"0.65327775",
"0.6495819",
"0.6454379",
"0.6428005",
"0.64252895",
"0.6404784",
"0.639821",
"0.63961864",
"0.638543",
"0.638503",
"0.6330922",
"0.6312269",
"0.6300279",
"0.6288513",
"0.6266745",
"0.6253429",
"0.62250084"
] | 0.86693555 | 0 |
Test API can delete an existing department. (DELETE request) | def test_department_deletion(self):
res = self.client().delete(service_url, json={"id_dep": 1})
self.assertEqual(res.status_code, 204)
# Test to see if it exists, should return a 400
result = self.client().get(service_url+'/1')
self.assertEqual(result.status_code, 400) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(id_=None):\n\n logger.debug('Catch DELETE request by URL /api/departments/%i.', id_)\n ds.delete(id_)\n return '', 204",
"def delete(self, department_id):\n department = get_department_by_id(department_id)\n db.session.delete(department)\n db.session.commit()\n return {}, 204",
"def test_employee_deletion(self):\n res = self.client().delete(service_url_emp, json={\"id_emp\": 1})\n self.assertEqual(res.status_code, 204)\n # Test to see if it exists, should return a 400\n result = self.client().get(service_url_emp+'/1')\n self.assertEqual(result.status_code, 400)",
"def test_delete(self):\n self.assertFalse(self.user1.ad_deleted)\n self.assertTrue(self.user1.active)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {'Deleted': True}\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertTrue(user.ad_deleted)\n self.assertFalse(user.active)\n self.assertTrue(user.in_sync)\n # Also delete a second object, to check for silly 'empty string' collisions.\n url = '/api/users/{}/'.format(self.user2.ad_guid)\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)",
"def test_department_creation(self):\n res = self.client().post(service_url, json={\"dep_name\": \"test dep 4\", \"description\": \"testing department 4\"})\n self.assertEqual(res.status_code, 201)\n self.assertIn('dep 4', str(res.data))",
"def test_api_can_get_department_by_id(self):\n res = self.client().get(service_url+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('dep 1', str(res.data))",
"def test_delete(self):\n query = {\"id\":0}\n result = self.app.delete('/testParaDelete', query_string=query)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.data, 'ok')",
"def test_destroy(self):\n DoctorFactory.create(id=15)\n response = self.unath_client.get(reverse('doctor-detail', args=[15]))\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n response = self.client.get(reverse('doctor-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(len(response.data), 1)\n\n response = self.client.delete(reverse('doctor-detail', args=[15]))\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n response = self.client.get(reverse('doctor-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)",
"def test_delete():\n sample_uuid = get_sample_id()\n response = requests.delete(f'http://localhost:5000/api/persons/{sample_uuid}')\n\n assert response.status_code == 200",
"def delete_department(department_id):\n\n department_obj = Department.query.get_or_404(department_id)\n db.session.delete(department_obj)\n db.session.commit()\n flash(f'Department {department_obj.name} successfully deleted.', 'success')\n return redirect(url_for('home'))",
"def test_delete_organization(self):\n pass",
"def delete_department(id):\r\n check_admin()\r\n\r\n department = Department.query.get_or_404(id)\r\n db.session.delete(department)\r\n db.session.commit()\r\n flash('You have successfully deleted the department.')\r\n\r\n # redirect to the departments page\r\n return redirect(url_for('admin.list_departments'))\r\n\r\n return render_template(title=\"Delete Department\")",
"def test_deleting_patient_goals(self):\n\n data = {\"goal\": 1}\n result = self.client.post(\"/delete-goal\", data=data)\n goal = Goal.query.get(1)\n\n self.assertEqual(result.status_code, 200)\n self.assertIsNone(goal)",
"def test_department_can_be_edited(self):\n res = self.client().put(service_url, json={\"id_dep\": 1, \"dep_name\": \"\", \"description\": \"this is a new description\"})\n self.assertEqual(res.status_code, 204)\n results = self.client().get(service_url+'/1')\n self.assertIn('is a new', str(results.data))\n self.assertIn('dep 1', str(results.data))",
"def test_delete_pet(self):\n headers = [('api_key', 'api_key_example')]\n response = self.client.open(\n '/pet/{petId}'.format(pet_id=789),\n method='DELETE',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_duo_account_delete(self):\n pass",
"def delete_department(department_id):\n response = DepartmentsModel().get_department_by_id(department_id)\n if response:\n DepartmentsModel().delete(department_id)\n return Serializer.serialize(response, 200,\n \"Department deleted successfully\")\n return raise_error(404, 'Department not found')",
"def test_deleting_patient_posts(self):\n\n data = {\"post\": 1}\n result = self.client.post(\"/delete-post\", data=data)\n post = Post.query.get(1)\n\n self.assertEqual(result.status_code, 200)\n self.assertIsNone(post)",
"def test_calendar_view_delete(self):\n # delete calendar\n request = self.factory.post('/module/calendar/del/1/', follow=True)\n request.user = self.user\n request.session = {}\n response = calendar_del(request, 1)\n self.assertEqual(response.status_code, 302)\n\n request = self.factory.post('/module/calendar/del/', {'select': '1'})\n request.user = self.user\n request.session = {}\n response = calendar_del(request, 0)\n self.assertEqual(response.status_code, 302)",
"def test_delete(self):\n pass",
"def test_delete(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.delete(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )",
"def test_delete(self):\n activity = Activity.objects.first()\n url, parsed = self.prepare_urls('v1:activity-detail', subdomain=self.company.subdomain, kwargs={'pk': activity.id})\n \n response = self.client.delete(url, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.delete(url, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n url, parsed = self.prepare_urls('v1:activity-list', subdomain=self.company.subdomain)\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(self.activities_count-1, content['count']) # deleted 1 activity",
"def test_delete_patient(self):\n response = self.client.delete(\n reverse('patient:patient-detail', kwargs={'pk': Patient.objects.get().id}))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Patient.objects.count(), 0)",
"def test_category_delete(self):\n login = self.autheniticate()\n token = json.loads(login.data.decode()).get('token')\n self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res = self.app.delete('/api/v2/categories/1',\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res1 = json.loads(res.data.decode())\n self.assertEqual(res1['status'], 'Deleted!')\n self.assertEqual(res.status_code, 200)",
"def test_delete_a_todo(self):\n # hit the API endpoint\n response = self.delete_a_todo(1)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n # test with invalid data\n response = self.delete_a_todo(100)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_delete_question(self):\n\n question = {\n \"title\" : \"Blue\",\n \"question\": \"How do I refactor tests with database?\"\n }\n\n self.app.post('/api/v1/questions',\n data=json.dumps(question),\n content_type='application/json'\n )\n question_id = id_generator(\"Blue\")\n res = self.app.delete('/api/v1/questions/'+str(question_id))\n self.assertEqual(res.status_code, 200)",
"def test_DELETE(self):\n if not self.url:\n return\n response = self.client.delete(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])",
"def test_calendar_user_view_delete(self):\n # delete calendar_setting\n request = self.factory.post('/module/calendar_user/del/4/', follow=True)\n request.user = self.user\n request.session = {}\n #response = calendar_user_del(request, 4)\n #self.assertEqual(response.status_code, 302)\n\n request = self.factory.post('/module/calendar_user/del/', {'select': '1'})\n request.user = self.user\n request.session = {}\n #response = calendar_user_del(request, 0)\n #self.assertEqual(response.status_code, 302)",
"def test_otoroshi_controllers_adminapi_tcp_service_api_controller_delete_entity_action(self):\n pass",
"def test_deleteorganizations_item(self):\n pass"
] | [
"0.7808423",
"0.7420233",
"0.73379195",
"0.7330726",
"0.7278499",
"0.7205929",
"0.72051007",
"0.71552616",
"0.71247154",
"0.70944077",
"0.7052728",
"0.7027502",
"0.6964331",
"0.6899749",
"0.6838181",
"0.6759945",
"0.67122805",
"0.66914827",
"0.66903144",
"0.6680656",
"0.6645842",
"0.6645624",
"0.66351134",
"0.6633428",
"0.66066366",
"0.6594867",
"0.6574351",
"0.656239",
"0.65587705",
"0.6554345"
] | 0.8739359 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.