query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Gets the end_action of this SubscriptionSuspensionCreate. When the suspension reaches the planned end date the end action will be carried out. This action is only executed when the suspension is ended automatically based on the end date.
|
def end_action(self):
return self._end_action
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def end_action(self, end_action):\n if end_action is None:\n raise ValueError(\"Invalid value for `end_action`, must not be `None`\")\n\n self._end_action = end_action",
"def planned_end_date(self):\n return self._planned_end_date",
"def end(self):\n # type: () -> datetime\n return self._end",
"def end(self) -> datetime:\n return self._end",
"def end_date(self):\n return self._end_date",
"def end_date(self):\n return self._end_date",
"def end_date(self):\n return self._end_date",
"def end_date(self):\n return self.__end_date",
"def end_date(self) -> str:\n return pulumi.get(self, \"end_date\")",
"def end_date(self) -> str:\n return pulumi.get(self, \"end_date\")",
"def end_date(self) -> str:\n return self._end_date",
"def end_date(self) -> Optional[str]:\n return pulumi.get(self, \"end_date\")",
"def end_date(self) -> Optional[str]:\n return pulumi.get(self, \"end_date\")",
"def get_end(self):\n return self.start + timedelta(minutes=self.duration)",
"def get_outbound_statements_end_date(self):\n return self.get_text_from_element(self.outbound_statements_end_date_locator, True)",
"def get_end(self):\n return self._start + self._duration",
"def end_date_time(self) -> Optional[str]:\n return pulumi.get(self, \"end_date_time\")",
"def get_inbound_statements_end_date(self):\n return self.get_text_from_element(self.inbound_statements_end_date_locator, True)",
"def get_inbound_statement_details_end_date(self):\n return self.get_text_from_element(self.inbound_statements_details_end_date_locator, True)",
"def end_time(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"end_time\")",
"def end_time(self):\n return self._end_time",
"def end_time(self):\n return self._end_time",
"def end_time(self):\n return self._end_time",
"def end_time(self):\n return self._end_time",
"def end_time(self):\n return self._end_time",
"def end_time(self):\n return self._end_time",
"def end_time(self) -> str:\n return self._end_time",
"def end_time(self) -> Optional[str]:\n return pulumi.get(self, \"end_time\")",
"def end(self):\n return self.properties.get(\"end\", DateTimeTimeZone())",
"def cal_end(self):\n return self.datetime_end"
] |
[
"0.6143122",
"0.5793038",
"0.5673183",
"0.55080134",
"0.5461245",
"0.5461245",
"0.5461245",
"0.545847",
"0.54330206",
"0.54330206",
"0.5424896",
"0.5364844",
"0.5364844",
"0.53592825",
"0.5262286",
"0.5255115",
"0.5213438",
"0.5212324",
"0.51485586",
"0.5137694",
"0.51369447",
"0.51369447",
"0.51369447",
"0.51369447",
"0.51369447",
"0.51369447",
"0.5121121",
"0.51137406",
"0.5097673",
"0.5084742"
] |
0.6580785
|
0
|
Sets the end_action of this SubscriptionSuspensionCreate. When the suspension reaches the planned end date the end action will be carried out. This action is only executed when the suspension is ended automatically based on the end date.
|
def end_action(self, end_action):
if end_action is None:
raise ValueError("Invalid value for `end_action`, must not be `None`")
self._end_action = end_action
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n self._end_date = end_date",
"def end_date(self, end_date):\n if end_date is None:\n raise ValueError(\"Invalid value for `end_date`, must not be `None`\")\n\n self._end_date = end_date",
"def end_date_time(self, end_date_time):\n\n self._end_date_time = end_date_time",
"def set_statement_end_date(self, end_date):\n end_date_to_set = None\n if end_date != \"\":\n end_date_to_set = end_date\n else:\n end_date_to_set = self.get_date(last_day_of_last_month=True)\n self.set_value_into_input_field(self.statement_end_date_locator, end_date_to_set)\n return end_date_to_set",
"def end(self, end):\n # type: (datetime) -> None\n\n if end is not None:\n if not isinstance(end, datetime):\n raise TypeError(\"Invalid type for `end`, type has to be `datetime`\")\n\n self._end = end",
"def end_action(self):\n return self._end_action",
"def end_date(self, end_date: str):\n if end_date is None:\n raise ValueError(\"Invalid value for `end_date`, must not be `None`\") # noqa: E501\n\n self._end_date = end_date",
"def end(self, end):\n\n self._end = end",
"def end(self, end):\n\n self._end = end",
"def end(self, end):\n\n self._end = end",
"def end(self, end):\n if end is None:\n self._set('end', end)\n else:\n try:\n self._set('end', Timestamp.to_datetime(end))\n except (TypeError, ValueError) as e:\n raise ValidationError(e)",
"def end_date(self, end_date):\n if self.local_vars_configuration.client_side_validation and end_date is None: # noqa: E501\n raise ValueError(\"Invalid value for `end_date`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n end_date is not None and len(end_date) < 1):\n raise ValueError(\"Invalid value for `end_date`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._end_date = end_date",
"def end_time(self, end_time):\n\n self._end_time = end_time",
"def end_time(self, end_time):\n\n self._end_time = end_time",
"def end_time(self, end_time):\n\n self._end_time = end_time",
"def end_date(self, end_date):\n if end_date is None:\n end_date = datetime.utcnow()\n\n self._end_date = dt_utils.parse_date(end_date)",
"def end(self, end):\n if end is None:\n raise ValueError(\"Invalid value for `end`, must not be `None`\") # noqa: E501\n\n self._end = end",
"def end_time(self, end_time):\n self._end_time = end_time",
"def end_time(self, end_time):\n self._end_time = end_time",
"def end_time_stamp(self, end_time_stamp):\n\n self._end_time_stamp = end_time_stamp",
"def end(self, end):\n if self._configuration.client_side_validation and end is None:\n raise ValueError(\"Invalid value for `end`, must not be `None`\") # noqa: E501\n if (self._configuration.client_side_validation and\n end is not None and end < 0): # noqa: E501\n raise ValueError(\"Invalid value for `end`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._end = end",
"def end_time(self, end_time):\n if end_time is None:\n raise ValueError(\"Invalid value for `end_time`, must not be `None`\") # noqa: E501\n\n self._end_time = end_time"
] |
[
"0.5936373",
"0.5936373",
"0.5936373",
"0.5936373",
"0.5936373",
"0.5936373",
"0.5936373",
"0.5936373",
"0.5866914",
"0.57698655",
"0.57370526",
"0.57332045",
"0.5727915",
"0.56884915",
"0.5685108",
"0.56798905",
"0.56798905",
"0.56798905",
"0.56483084",
"0.5638522",
"0.5519646",
"0.5519646",
"0.5519646",
"0.54898554",
"0.5466804",
"0.5454662",
"0.5454662",
"0.5422284",
"0.5296619",
"0.5288491"
] |
0.7567335
|
0
|
Gets the note of this SubscriptionSuspensionCreate. The note may contain some internal information for the suspension. The note will not be disclosed to the subscriber.
|
def note(self):
return self._note
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_note(self):\r\n return self.__note",
"def get_note(self):\n return self.note",
"def note(self) -> str:\n return self.__note",
"def note(self) -> str:\n return self.__note",
"def note(self) -> str:\n return self.__note",
"def note(self):\n if self._simplecell:\n self.fetch()\n return self._note",
"def note(self):\n return self.title_str",
"def new_note(self, context, payload):\n\n note = PipedriveCRMNote( \n note_id= payload[\"current\"][\"id\"],\n content= payload[\"current\"][\"content\"],\n deal_id= payload[\"current\"][\"deal_id\"],\n pin_note_on_specified_deal= payload[\"current\"][\"pinned_to_deal_flag\"],\n organization_id= payload[\"current\"][\"org_id\"],\n pin_note_on_specified_organization= payload[\"current\"][\"pinned_to_organization_flag\"],\n person_id= payload[\"current\"][\"person_id\"],\n pin_note_on_specified_person= payload[\"current\"][\"pinned_to_person_flag\"],\n lead_id= payload[\"current\"][\"lead_id\"],\n pin_note_on_specified_lead= payload[\"current\"][\"pinned_to_lead_flag\"]\n )\n return note.__dict__",
"def getNote(self):\n return (self.noteName[0], self.accidental, self.octave)",
"def note(self, note_id):\r\n return notes.Note(self, note_id)",
"def note(self, key=None):\n if key is None:\n raise SimplenoteError('Unable to get note: Key not given')\n url = self.base_url + 'data/' + key\n note = self._process_query(url)\n return note",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Note':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = NoteArgs.__new__(NoteArgs)\n\n __props__.__dict__[\"attestation_authority\"] = None\n __props__.__dict__[\"base_image\"] = None\n __props__.__dict__[\"build_type\"] = None\n __props__.__dict__[\"compliance\"] = None\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"deployable\"] = None\n __props__.__dict__[\"discovery\"] = None\n __props__.__dict__[\"dsse_attestation\"] = None\n __props__.__dict__[\"expiration_time\"] = None\n __props__.__dict__[\"kind\"] = None\n __props__.__dict__[\"long_description\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"note_id\"] = None\n __props__.__dict__[\"package\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"related_url\"] = None\n __props__.__dict__[\"sbom\"] = None\n __props__.__dict__[\"sbom_reference\"] = None\n __props__.__dict__[\"short_description\"] = None\n __props__.__dict__[\"spdx_file\"] = None\n __props__.__dict__[\"spdx_package\"] = None\n __props__.__dict__[\"spdx_relationship\"] = None\n __props__.__dict__[\"update_time\"] = None\n __props__.__dict__[\"upgrade\"] = None\n __props__.__dict__[\"vulnerability_assessment\"] = None\n __props__.__dict__[\"vulnerability_type\"] = None\n return Note(resource_name, opts=opts, __props__=__props__)",
"def _set_note(self):\n if self._report_data and self._report_data['note']:\n note = self._report_data['note']\n if note.get('createDateTime'):\n note['createDateTime'] = Report._to_report_datetime(note.get('createDateTime'))\n if note.get('expiryDateTime') and str(note['expiryDateTime']).startswith('0001-01-01'):\n note['expiryDateTime'] = ''\n elif note.get('expiryDateTime'):\n note['expiryDateTime'] = Report._to_report_datetime(note.get('expiryDateTime'), False)\n if note.get('effectiveDateTime'):\n note['effectiveDateTime'] = Report._to_report_datetime(note.get('effectiveDateTime'))\n if note.get('givingNoticeParty') and note['givingNoticeParty'].get('phoneNumber'):\n phone = note['givingNoticeParty'].get('phoneNumber')\n note['givingNoticeParty']['phoneNumber'] = phone[0:3] + '-' + phone[3:6] + '-' + phone[6:]",
"def getNoteName(self):\n return self.noteName",
"def notification(self):\n return self._notification",
"def note(self, note_id):\n return Note(self, note_id)",
"def create_a_note(self, data):\n return self.client._post(\"/notes\", json=data)",
"def create_note(self, owner, title, text, note_type, important):\r\n note = self.create(owner=owner, title=title, text=text, note_type=note_type, important=important)\r\n return note",
"def notes(self):\n return Notes(self)",
"def note(self, note_id):\r\n return TicketNote(self, note_id)",
"def note_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"note_id\")",
"def notes(self) -> Optional[str]:\n return pulumi.get(self, \"notes\")",
"def notes(self):\r\n return notes.Notes(self)",
"def notes(self):\r\n return notes.Notes(self)",
"def getNotes(self):\n return self.__notes",
"def notes(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"notes\")",
"def notes(self) -> str:\n return self._notes",
"def notes(self):\n return self.__notes",
"def __repr__(self):\n return str(self.notes)",
"def createNote(self, authenticationToken, note):\r\n self.send_createNote(authenticationToken, note)\r\n return self.recv_createNote()"
] |
[
"0.7117262",
"0.7055138",
"0.6737177",
"0.6737177",
"0.6737177",
"0.6518097",
"0.6388679",
"0.6200899",
"0.6169843",
"0.6135926",
"0.5952921",
"0.5938423",
"0.59052694",
"0.58353114",
"0.58256793",
"0.58082664",
"0.5805374",
"0.5785396",
"0.5724418",
"0.5721736",
"0.5707965",
"0.5658687",
"0.56440514",
"0.56440514",
"0.5596916",
"0.5585631",
"0.5581166",
"0.5551309",
"0.5541932",
"0.5538387"
] |
0.7211165
|
0
|
Sets the note of this SubscriptionSuspensionCreate. The note may contain some internal information for the suspension. The note will not be disclosed to the subscriber.
|
def note(self, note):
if note is not None and len(note) > 300:
raise ValueError("Invalid value for `note`, length must be less than or equal to `300`")
self._note = note
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _set_note(self):\n if self._report_data and self._report_data['note']:\n note = self._report_data['note']\n if note.get('createDateTime'):\n note['createDateTime'] = Report._to_report_datetime(note.get('createDateTime'))\n if note.get('expiryDateTime') and str(note['expiryDateTime']).startswith('0001-01-01'):\n note['expiryDateTime'] = ''\n elif note.get('expiryDateTime'):\n note['expiryDateTime'] = Report._to_report_datetime(note.get('expiryDateTime'), False)\n if note.get('effectiveDateTime'):\n note['effectiveDateTime'] = Report._to_report_datetime(note.get('effectiveDateTime'))\n if note.get('givingNoticeParty') and note['givingNoticeParty'].get('phoneNumber'):\n phone = note['givingNoticeParty'].get('phoneNumber')\n note['givingNoticeParty']['phoneNumber'] = phone[0:3] + '-' + phone[3:6] + '-' + phone[6:]",
"def notes(self, notes):\n\n self._notes = notes",
"def notes(self, notes):\n\n self._notes = notes",
"def notes(self, notes):\n\n self._notes = notes",
"def notes(self, notes):\n\n self._notes = notes",
"def notes(self, notes):\n\n self._notes = notes",
"def notes(self, notes: str):\n self._notes = notes",
"def update_note(self, new_note):\r\n self.__note = new_note",
"def add_note(self, note):\n now = timezone.now()\n self.logger.info(\n \"(%s)(%s)(%s) - %s\" % (now, self.task_type, self.task.uuid, note)\n )\n note = \"%s - (%s)\" % (note, now)\n self.task.add_task_note(note)",
"def set_dispute_note(self, note_prefix):\n note_to_set = note_prefix + self.random_string_generator(6)\n self.set_value_into_input_field(self.new_note_text_field_locator, note_to_set)\n return note_to_set",
"def notes(self, notes):\n if (self.local_vars_configuration.client_side_validation and\n notes is not None and len(notes) > 255):\n raise ValueError(\"Invalid value for `notes`, length must be less than or equal to `255`\") # noqa: E501\n\n self._notes = notes",
"def notes(self, notes):\n if notes is None:\n raise ValueError(\"Invalid value for `notes`, must not be `None`\") # noqa: E501\n\n self._notes = notes",
"def addNote(self, note):\n logger.debug(\"Func: addNote\")\n\n if not self._currentBaseSceneName:\n logger.warning(\"No Base Scene file selected\")\n return\n if self._currentVersionIndex == -1:\n logger.warning(\"No Version selected\")\n return\n now = datetime.datetime.now().strftime(\"%d/%m/%Y-%H:%M\")\n self._currentNotes = \"%s\\n[%s] on %s\\n%s\\n\" % (self._currentNotes, self.currentUser, now, note)\n self._currentSceneInfo[\"Versions\"][self._currentVersionIndex-1][\"Note\"] = self._currentNotes\n self._dumpJson(self._currentSceneInfo, self._baseScenesInCategory[self._currentBaseSceneName])",
"def note(self) -> str:\n return self.__note",
"def note(self) -> str:\n return self.__note",
"def note(self) -> str:\n return self.__note",
"def note(self):\n return self._note",
"def create_note(self, owner, title, text, note_type, important):\r\n note = self.create(owner=owner, title=title, text=text, note_type=note_type, important=important)\r\n return note",
"def createNote(self, authenticationToken, note):\r\n pass",
"def note(self, note_id):\n return Note(self, note_id)",
"def setNotes(self, *args):\n return _libsbml.SBase_setNotes(self, *args)",
"def new_note(self, context, payload):\n\n note = PipedriveCRMNote( \n note_id= payload[\"current\"][\"id\"],\n content= payload[\"current\"][\"content\"],\n deal_id= payload[\"current\"][\"deal_id\"],\n pin_note_on_specified_deal= payload[\"current\"][\"pinned_to_deal_flag\"],\n organization_id= payload[\"current\"][\"org_id\"],\n pin_note_on_specified_organization= payload[\"current\"][\"pinned_to_organization_flag\"],\n person_id= payload[\"current\"][\"person_id\"],\n pin_note_on_specified_person= payload[\"current\"][\"pinned_to_person_flag\"],\n lead_id= payload[\"current\"][\"lead_id\"],\n pin_note_on_specified_lead= payload[\"current\"][\"pinned_to_lead_flag\"]\n )\n return note.__dict__",
"def display_note(self, note):\n\t\tself.canvas.itemconfig(self.note, text = note)",
"async def _add_note(\n self,\n ctx: Context,\n note: Union[discord.Message, str],\n *,\n reason: str = None\n ):\n\n if isinstance(note, discord.Message):\n content = note.clean_content\n author = str(note.author)\n channel = note.channel.mention\n jump_url = note.jump_url\n else:\n content = note\n author = None\n channel = None\n jump_url = None\n\n async with self.config.member(ctx.author).notes() as notes:\n notes.append({\n \"note\": content,\n \"reason\": reason or \"No reason\",\n \"author\": author,\n \"channel\": channel,\n \"jump_url\": jump_url\n })\n\n await ctx.message.add_reaction(CHECK_MARK)",
"def note(self, note_id):\r\n return notes.Note(self, note_id)",
"def create_a_note(self, data):\n return self.client._post(\"/notes\", json=data)",
"def get_note(self):\r\n return self.__note",
"def create_note(self, noteTitle, note, date):\n user = User.objects.create(username='userdemo')\n user.set_password('calnote24')\n user.save()\n Note.objects.create(noteTitle=noteTitle, note=note, date=date, user_id=user.id)",
"def createNote(self, authenticationToken, note):\r\n self.send_createNote(authenticationToken, note)\r\n return self.recv_createNote()",
"def note(self, note_id):\r\n return TicketNote(self, note_id)"
] |
[
"0.65763193",
"0.63949037",
"0.63949037",
"0.63949037",
"0.63949037",
"0.63949037",
"0.6318726",
"0.62110776",
"0.61995065",
"0.6052707",
"0.603377",
"0.59962285",
"0.59353656",
"0.5909708",
"0.5909708",
"0.5909708",
"0.59027666",
"0.5827151",
"0.5797912",
"0.5792845",
"0.57734096",
"0.57563525",
"0.57545245",
"0.5570765",
"0.5564968",
"0.5534592",
"0.552601",
"0.552294",
"0.5512301",
"0.5458033"
] |
0.6840734
|
0
|
Gets the planned_end_date of this SubscriptionSuspensionCreate. The planned end date of the suspension identifies the date on which the suspension will be ended automatically.
|
def planned_end_date(self):
return self._planned_end_date
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def planned_end_date(self, planned_end_date):\n if planned_end_date is None:\n raise ValueError(\"Invalid value for `planned_end_date`, must not be `None`\")\n\n self._planned_end_date = planned_end_date",
"def planned_termination_date(self):\n return self._planned_termination_date",
"def end_date(self) -> Optional[str]:\n return pulumi.get(self, \"end_date\")",
"def end_date(self) -> Optional[str]:\n return pulumi.get(self, \"end_date\")",
"def end_date(self) -> str:\n return pulumi.get(self, \"end_date\")",
"def end_date(self) -> str:\n return pulumi.get(self, \"end_date\")",
"def end_date(self):\n return self._end_date",
"def end_date(self):\n return self._end_date",
"def end_date(self):\n return self._end_date",
"def end_date(self):\n return self.__end_date",
"def get_inbound_statement_details_end_date(self):\n return self.get_text_from_element(self.inbound_statements_details_end_date_locator, True)",
"def get_inbound_statements_end_date(self):\n return self.get_text_from_element(self.inbound_statements_end_date_locator, True)",
"def end_date(self) -> str:\n return self._end_date",
"def get_bundle_end_date(self):\n if not self._bundle_end_date:\n max_date = self.engine.execute(\n \"\"\"\n SELECT\n MAX(end_date)\n FROM (\n SELECT\n end_date\n FROM\n equities\n UNION\n SELECT\n end_date\n FROM\n futures_contracts\n )\n \"\"\"\n ).scalar()\n self._bundle_end_date = pd.Timestamp(max_date, tz=\"UTC\")\n\n return self._bundle_end_date",
"def end_date_time(self) -> Optional[str]:\n return pulumi.get(self, \"end_date_time\")",
"def get_outbound_statements_end_date(self):\n return self.get_text_from_element(self.outbound_statements_end_date_locator, True)",
"def end_date(self, end_date):\n if self.local_vars_configuration.client_side_validation and end_date is None: # noqa: E501\n raise ValueError(\"Invalid value for `end_date`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n end_date is not None and len(end_date) < 1):\n raise ValueError(\"Invalid value for `end_date`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._end_date = end_date",
"def roa_validity_end_date(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"roa_validity_end_date\")",
"def computed_enddate(self):\n if self.enddate:\n # you need to add a day to enddate if your dates are meant to be inclusive\n offset = datetime.timedelta(days=1 if self.inclusive else 0)\n return (self.enddate + offset)",
"def get_outbound_statements_grid_end_date(self):\n return self.get_specific_column_value_from_grid(self.outbound_statements_grid_div_id, self.outbound_statements_grid_row_count, self.end_date_column_name)",
"def get_inbound_statement_details_final_payment_due_date(self):\n return self.get_text_from_element(self.inbound_statements_details_final_payment_due_date_locator, True)",
"def roa_validity_end_date(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"roa_validity_end_date\")",
"def roa_validity_end_date(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"roa_validity_end_date\")",
"def get_outbound_statement_summary_end_date(self):\n return self.get_text_from_element(self.outbound_statements_summary_end_date_locator, False)",
"def scheduled_end_date_time(self):\n if \"scheduledEndDateTime\" in self._prop_dict:\n if isinstance(self._prop_dict[\"scheduledEndDateTime\"], OneDriveObjectBase):\n return self._prop_dict[\"scheduledEndDateTime\"]\n else :\n self._prop_dict[\"scheduledEndDateTime\"] = DateTimeTimeZone(self._prop_dict[\"scheduledEndDateTime\"])\n return self._prop_dict[\"scheduledEndDateTime\"]\n\n return None",
"def set_statement_end_date(self, end_date):\n end_date_to_set = None\n if end_date != \"\":\n end_date_to_set = end_date\n else:\n end_date_to_set = self.get_date(last_day_of_last_month=True)\n self.set_value_into_input_field(self.statement_end_date_locator, end_date_to_set)\n return end_date_to_set",
"def get_inbound_statements_grid_end_date(self):\n return self.get_specific_column_value_from_grid(self.inbound_statement_grid_div_id, self.inbound_statements_grid_row_count, self.inbound_statements_grid_end_date_column_name)",
"def get_inbound_statements_grid_final_payment_due_date(self):\n return self.get_specific_column_value_from_grid(self.inbound_statement_grid_div_id, self.inbound_statements_grid_row_count, self.inbound_statements_grid_final_payment_due_date_column_name)",
"def planned_termination_date(self, planned_termination_date):\n\n self._planned_termination_date = planned_termination_date",
"def set_non_recurring_charge_end_date(self, end_date):\n if end_date is None:\n statement_summary_end_date_element = self.wait().until(EC.presence_of_element_located(self.statement_details_end_date_locator), 'statement details end date locator not found before specified time out')\n non_recurring_charge_end_date = str(statement_summary_end_date_element.text)\n elif end_date == \"\":\n current_date = datetime.date.today()\n next_month = current_date.replace(day=28) + datetime.timedelta(days=4)\n last_day_of_current_month = next_month - datetime.timedelta(days=next_month.day)\n non_recurring_charge_end_date = \"%d/%d/%d\" % (last_day_of_current_month.month, last_day_of_current_month.day, last_day_of_current_month.year)\n else:\n non_recurring_charge_end_date = end_date\n self.set_value_into_input_field(self.non_recurring_charge_end_date_locator, non_recurring_charge_end_date)\n return non_recurring_charge_end_date"
] |
[
"0.67940325",
"0.6618622",
"0.6293864",
"0.6293864",
"0.6230565",
"0.6230565",
"0.6213361",
"0.6213361",
"0.6213361",
"0.6192062",
"0.6126461",
"0.6047898",
"0.6047435",
"0.6009267",
"0.5971236",
"0.596319",
"0.5941742",
"0.59058607",
"0.58910954",
"0.58020884",
"0.58003604",
"0.57550853",
"0.57550853",
"0.5728539",
"0.5698935",
"0.5688503",
"0.56545955",
"0.5450905",
"0.5445183",
"0.53358334"
] |
0.791573
|
0
|
Sets the planned_end_date of this SubscriptionSuspensionCreate. The planned end date of the suspension identifies the date on which the suspension will be ended automatically.
|
def planned_end_date(self, planned_end_date):
if planned_end_date is None:
raise ValueError("Invalid value for `planned_end_date`, must not be `None`")
self._planned_end_date = planned_end_date
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_statement_end_date(self, end_date):\n end_date_to_set = None\n if end_date != \"\":\n end_date_to_set = end_date\n else:\n end_date_to_set = self.get_date(last_day_of_last_month=True)\n self.set_value_into_input_field(self.statement_end_date_locator, end_date_to_set)\n return end_date_to_set",
"def planned_termination_date(self, planned_termination_date):\n\n self._planned_termination_date = planned_termination_date",
"def planned_end_date(self):\n return self._planned_end_date",
"def end_date(self, end_date):\n if self.local_vars_configuration.client_side_validation and end_date is None: # noqa: E501\n raise ValueError(\"Invalid value for `end_date`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n end_date is not None and len(end_date) < 1):\n raise ValueError(\"Invalid value for `end_date`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._end_date = end_date",
"def end_date(self, end_date):\n if end_date is None:\n raise ValueError(\"Invalid value for `end_date`, must not be `None`\")\n\n self._end_date = end_date",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n\n self._end_date = end_date",
"def end_date(self, end_date):\n self._end_date = end_date",
"def set_end_date(self, date):\n pass",
"def end_date(self, end_date):\n if end_date is None:\n end_date = datetime.utcnow()\n\n self._end_date = dt_utils.parse_date(end_date)",
"def end_date(self, end_date: str):\n if end_date is None:\n raise ValueError(\"Invalid value for `end_date`, must not be `None`\") # noqa: E501\n\n self._end_date = end_date",
"def change_end_date(self, new_end_date):\n self.end_date = new_end_date",
"def set_adjustment_charge_end_date(self, end_date):\n self.set_value_into_input_field(self.end_date_locator, end_date)",
"def end_date_time(self, end_date_time):\n\n self._end_date_time = end_date_time",
"def set_non_recurring_charge_end_date(self, end_date):\n if end_date is None:\n statement_summary_end_date_element = self.wait().until(EC.presence_of_element_located(self.statement_details_end_date_locator), 'statement details end date locator not found before specified time out')\n non_recurring_charge_end_date = str(statement_summary_end_date_element.text)\n elif end_date == \"\":\n current_date = datetime.date.today()\n next_month = current_date.replace(day=28) + datetime.timedelta(days=4)\n last_day_of_current_month = next_month - datetime.timedelta(days=next_month.day)\n non_recurring_charge_end_date = \"%d/%d/%d\" % (last_day_of_current_month.month, last_day_of_current_month.day, last_day_of_current_month.year)\n else:\n non_recurring_charge_end_date = end_date\n self.set_value_into_input_field(self.non_recurring_charge_end_date_locator, non_recurring_charge_end_date)\n return non_recurring_charge_end_date",
"def planned_purge_date(self, planned_purge_date):\n\n self._planned_purge_date = planned_purge_date",
"def planned_purge_date(self, planned_purge_date):\n\n self._planned_purge_date = planned_purge_date",
"def end_date(self, value):\n\n if not isinstance(value, datetime):\n raise TypeError(_pretty_message(\n '''\n end_date must be an instance of datetime.datetime, not %s\n ''',\n _type_name(value)\n ))\n\n self._end_date = value",
"def rating_end_date(self, rating_end_date):\n\n self._rating_end_date = rating_end_date",
"def appointment_end_date(end_date):\n\n session.attributes['end_date'] = str(end_date)\n msg = render_template('end_time')\n return question(msg)",
"def published_dts_end(self, published_dts_end):\n\n self._published_dts_end = published_dts_end",
"def set_bulk_edit_end_date(self, end_date, future_date):\n if future_date is True:\n end_date = self.get_date(future_date=True, number_of_days_to_add=30)\n self.set_value_into_input_field(self.bulk_edit_end_date_inputbox_locator, end_date)",
"def coverage_end_date(self, coverage_end_date):\n\n self._coverage_end_date = coverage_end_date",
"def coverage_end_date(self, coverage_end_date):\n\n self._coverage_end_date = coverage_end_date"
] |
[
"0.67294335",
"0.6711568",
"0.65507555",
"0.6507257",
"0.64512366",
"0.6432217",
"0.6432217",
"0.6432217",
"0.6432217",
"0.6432217",
"0.6432217",
"0.6432217",
"0.6432217",
"0.6384521",
"0.6304877",
"0.6233103",
"0.6215956",
"0.6067937",
"0.59753764",
"0.5912095",
"0.5822383",
"0.57173514",
"0.57173514",
"0.57072365",
"0.56989425",
"0.55967104",
"0.55741197",
"0.5523372",
"0.54861856",
"0.54861856"
] |
0.81324995
|
0
|
Sets the subscription of this SubscriptionSuspensionCreate.
|
def subscription(self, subscription):
if subscription is None:
raise ValueError("Invalid value for `subscription`, must not be `None`")
self._subscription = subscription
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def subscription(self, subscription):\n\n self._subscription = subscription",
"def subscription_id(self, subscription_id):\n\n self._subscription_id = subscription_id",
"def subscription_id(self, subscription_id):\n\n self._subscription_id = subscription_id",
"def subscription_id(self, subscription_id):\n\n self._subscription_id = subscription_id",
"def touch_subscription(self):\n try:\n # check if subscription exists\n sub = self.client.get_subscription(subscription=self.subscription_path)\n\n except NotFound:\n self._create_subscription()\n\n else:\n self.topic_path = sub.topic\n print(f\"Subscription exists: {self.subscription_path}\")\n print(f\"Connected to topic: {self.topic_path}\")",
"def set_subscription(self, value):\n self.pub_socket.setsockopt(zmq.SUBSCRIBE, value)",
"def setup_subscription(subscription, info: GraphQLResolveInfo, variables, complete_on_error=False):\n excluded_field_nodes = filter_selection_set(info)\n variables = frappe._dict(variables)\n subscription_id = frappe.generate_hash(f\"{subscription}-{frappe.session.user}\", length=8)\n\n subscription_data = frappe._dict(\n subscribed_at=now_datetime(),\n last_ping=now_datetime(),\n variables=variables,\n subscription_id=subscription_id,\n selection_set=excluded_field_nodes,\n user=frappe.session.user,\n complete_on_error=complete_on_error\n )\n\n frappe.cache().hset(\n get_subscription_redis_key(subscription), subscription_id, subscription_data)\n\n return frappe._dict(\n subscription_id=subscription_id\n )",
"def subscription(self):\r\n return SubscriptionResource(self)",
"def handle_create(self):\n subscription = self.client().subscription(\n self.properties[self.QUEUE_NAME],\n subscriber=self.properties[self.SUBSCRIBER],\n ttl=self.properties[self.TTL],\n options=self.properties[self.OPTIONS]\n )\n self.resource_id_set(subscription.id)",
"def subscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = Subscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()",
"def set_endpoint_subscription_id(\n *, login_manager: LoginManager, endpoint_id: str, subscription_id: Optional[str]\n) -> None:\n transfer_client = login_manager.get_transfer_client()\n\n res = transfer_client.put(\n f\"/endpoint/{endpoint_id}/subscription\",\n data={\"subscription_id\": subscription_id},\n )\n formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key=\"message\")",
"def create_subscription_in_snuba(query_subscription_id, **kwargs):\n try:\n subscription = QuerySubscription.objects.get(id=query_subscription_id)\n except QuerySubscription.DoesNotExist:\n metrics.incr(\"snuba.subscriptions.create.subscription_does_not_exist\")\n return\n if subscription.status != QuerySubscription.Status.CREATING.value:\n metrics.incr(\"snuba.subscriptions.create.incorrect_status\")\n return\n if subscription.subscription_id is not None:\n metrics.incr(\"snuba.subscriptions.create.already_created_in_snuba\")\n # This mostly shouldn't happen, but it's possible that a subscription can get\n # into this state. Just attempt to delete the existing subscription and then\n # create a new one.\n try:\n _delete_from_snuba(\n QueryDatasets(subscription.snuba_query.dataset), subscription.subscription_id\n )\n except SnubaError:\n logger.exception(\"Failed to delete subscription\")\n\n subscription_id = _create_in_snuba(subscription)\n subscription.update(\n status=QuerySubscription.Status.ACTIVE.value, subscription_id=subscription_id\n )",
"def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response",
"def set_subscription_validator(self, function):\n self._pres_manager.set_subscription_validator(function)",
"def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass",
"def subscribe(self, **subscription_request):\n return self.subscribe_impl(mode='subscribe', **subscription_request)",
"def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)",
"def set_subscription_for_development_events_v0(self, subscription_id, **kwargs):\n # type: (str, **Any) -> Union[ApiResponse, object, BadRequestError_a8ac8b44, Error_d660d58]\n operation_name = \"set_subscription_for_development_events_v0\"\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'subscription_id' is set\n if ('subscription_id' not in params) or (params['subscription_id'] is None):\n raise ValueError(\n \"Missing the required parameter `subscription_id` when calling `\" + operation_name + \"`\")\n\n resource_path = '/v0/developmentEvents/subscriptions/{subscriptionId}'\n resource_path = resource_path.replace('{format}', 'json')\n\n path_params = {} # type: Dict\n if 'subscription_id' in params:\n path_params['subscriptionId'] = params['subscription_id']\n\n query_params = [] # type: List\n\n header_params = [] # type: List\n\n body_params = None\n if 'update_subscription_request' in params:\n body_params = params['update_subscription_request']\n header_params.append(('Content-type', 'application/json'))\n header_params.append(('User-Agent', self.user_agent))\n\n # Response Type\n full_response = False\n if 'full_response' in params:\n full_response = params['full_response']\n\n # Authentication setting\n access_token = self._lwa_service_client.get_access_token_from_refresh_token()\n authorization_value = \"Bearer \" + access_token\n header_params.append(('Authorization', authorization_value))\n\n error_definitions = [] # type: List\n error_definitions.append(ServiceClientResponse(response_type=None, status_code=204, message=\"No content.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.bad_request_error.BadRequestError\", status_code=400, message=\"Server cannot process the request due to a client error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=401, message=\"The auth token is invalid/expired or doesn't have access to the resource.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.bad_request_error.BadRequestError\", status_code=403, message=\"The operation being requested is not allowed.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=404, message=\"The resource being requested is not found.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=429, message=\"Exceed the permitted request limit. Throttling criteria includes total requests, per API, ClientId, and CustomerId.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=500, message=\"Internal Server Error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=503, message=\"Service Unavailable.\"))\n\n api_response = self.invoke(\n method=\"PUT\",\n endpoint=self._api_endpoint,\n path=resource_path,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n body=body_params,\n response_definitions=error_definitions,\n response_type=None)\n\n if full_response:\n return api_response\n \n return None",
"def resume_subscription(self,\n subscription_id,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions/{subscription_id}/resume')\n .http_method(HttpMethodEnum.POST)\n .template_param(Parameter()\n .key('subscription_id')\n .value(subscription_id)\n .should_encode(True))\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()",
"def post_update_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response",
"def pause_subscription(self,\n subscription_id,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions/{subscription_id}/pause')\n .http_method(HttpMethodEnum.POST)\n .template_param(Parameter()\n .key('subscription_id')\n .value(subscription_id)\n .should_encode(True))\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()",
"def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass",
"def pause_subscription(self):\n res = self._pause_subscription()\n self._subscribed = not self.subscribed\n return res",
"def _create_subscription(self):\n try:\n self.client.create_subscription(\n name=self.subscription_path, topic=self.topic_path\n )\n except NotFound:\n # suitable topic does not exist in the Pitt-Google project\n raise ValueError(\n (\n f\"A subscription named {self.subscription_name} does not exist\"\n \"in the Google Cloud Platform project \"\n f\"{settings.GOOGLE_CLOUD_PROJECT}, \"\n \"and one cannot be automatically create because Pitt-Google \"\n \"does not publish a public topic with the same name.\"\n )\n )\n else:\n self._log_and_print(f\"Created subscription: {self.subscription_path}\")",
"def _subscribe(self):\n self.subscribed = True\n self.subscribe_date = now()\n self.unsubscribed = False",
"def set_subscription(request, token, subscribe): # pylint: disable=unused-argument\r\n try:\r\n username = UsernameCipher().decrypt(token.encode())\r\n user = User.objects.get(username=username)\r\n except UnicodeDecodeError:\r\n raise Http404(\"base64url\")\r\n except UsernameDecryptionException as exn:\r\n raise Http404(exn.message)\r\n except User.DoesNotExist:\r\n raise Http404(\"username\")\r\n\r\n if subscribe:\r\n UserPreference.objects.get_or_create(user=user,\r\n key=NOTIFICATION_PREF_KEY,\r\n defaults={\r\n \"value\": UsernameCipher.encrypt(user.username)\r\n })\r\n return render_to_response(\"resubscribe.html\", {'token': token})\r\n else:\r\n UserPreference.objects.filter(user=user, key=NOTIFICATION_PREF_KEY).delete()\r\n return render_to_response(\"unsubscribe.html\", {'token': token})",
"def __call__(\n self,\n request: pubsub.Subscription,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Subscription:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"put\",\n \"uri\": \"/v1/{name=projects/*/subscriptions/*}\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_create_subscription(\n request, metadata\n )\n pb_request = pubsub.Subscription.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Subscription()\n pb_resp = pubsub.Subscription.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_create_subscription(resp)\n return resp",
"def subscription(self):\n return self._subscription",
"def subscription(self):\n return self._subscription",
"def select_subscription(profile=None, sub_name_or_id=None):\n if profile is None:\n profile = subscription_profile()\n\n if sub_name_or_id is None:\n sub_name_or_id = _prompt_sub_id_selection(profile)\n\n profile.set_active_subscription(sub_name_or_id)\n return profile"
] |
[
"0.73514724",
"0.65007716",
"0.65007716",
"0.65007716",
"0.6084177",
"0.6023973",
"0.60062104",
"0.5801773",
"0.57880664",
"0.55934745",
"0.5577292",
"0.5570897",
"0.5563788",
"0.54974383",
"0.54484826",
"0.54439104",
"0.54293483",
"0.5423903",
"0.5307691",
"0.5285355",
"0.52828723",
"0.52640617",
"0.52600384",
"0.5244101",
"0.5211094",
"0.51666397",
"0.5160047",
"0.5147003",
"0.5147003",
"0.5104674"
] |
0.71707314
|
1
|
String representation of the workspace
|
def __repr__(self):
return "<Workspace({0})>".format(self.name)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def workspace(self) -> str:\n return self._workspace",
"def workspaceInfo(self):\n pass",
"def name(self):\n return self.attributes.workspace.name",
"def __str__(self):\n\t\treturn \"'SEW object with workdir %s'\" % (self.workdir)",
"def __str__(self):\n string = \"\"\"\n Project Factory:\\n\n Directory: {}\\n\n Size: {}\\n\n \"\"\".format(self._directory, len(self.projects))\n return string",
"def macro_str(self):\n str = '-' * (2 * self.SIZE + 1) + '\\n'\n for row in self.boards:\n str += ' '\n for board in row:\n str += board.state.value + ' '\n str += '\\n' + '-' * (2 * self.SIZE + 1) + '\\n'\n return str",
"def get_output_workspace_names(self):\n mode = ''\n if self._corrections_workspace != '' and self._can_workspace != '':\n mode = 'Correct_1'\n elif self._corrections_workspace != '':\n mode = 'Corrected'\n else:\n mode = 'Subtract_1'\n\n workspace_name_stem = 'irs1_graphite002_%s' % mode\n\n output_workspaces = {\n 'reduced_workspace': workspace_name_stem + '_red',\n 'rqw_workspace': workspace_name_stem + '_rqw',\n }\n\n if self._can_workspace != '':\n output_workspaces['result_workspace'] = workspace_name_stem + '_Result'\n\n return output_workspaces",
"def __str__(self):\n return '{}.{} >> {}'.format(self.scope, self.name,\n '/'.join(map(str, self.variables)))",
"def __repr__(self):\n string = self.__class__.__name__\n string += f'\\n\\tEnvSpec: {self.env_spec}'\n string += f'\\n\\tRecurrent: {self.recurrent}'\n \n return string",
"def __str__(self):\n to_print = '{} : {}\\n'.format('Name'.ljust(34),self.name)\n to_print = to_print + '{} : {}\\n'.format('Name'.ljust(34),self.pathloss.name)\n to_print = to_print + '{} : {}\\n'.format('Number of samples'.ljust(34),self.nsamples)\n to_print = to_print + '{} : {}\\n'.format('Sensor model'.ljust(34),self.sensor_model.name)\n to_print = to_print + '{} : {}\\n'.format('Motion model'.ljust(34),self.motion_model.name)\n return to_print",
"def __str__(self):\n return str(self.path.relative_to(os.getcwd()))",
"def __str__(self):\n rep=\"This system has \"+str(self.NL)+\" layers.\\n\"\n rep+=\"The parameters for the each layers are:\\n\"\n for i in range(self.NL-1):\n rep+=\"Layer no. \"+str(i)+\":\\t \"+str(self.layers[i])\n rep+=\"Coupled to the next layer with strength:\\t\"+str(self.couplings[i])+\"\\n\"\n rep+=\"Layer no. \"+str(self.NL-1)+\":\\t \"+str(self.layers[self.NL-1])\n \n return rep",
"def toString(self):\n st = \" \\n\"\n st += \"Title: \" +self.getTitle()+ \"\\n\"\n st += \"Path: \" +self.getPath()+ \"\\n\"\n st += \"Description: \" +self.getDescription()+ \"\\n\"\n return st",
"def __str__(self):\n\n instructions = []\n if self.directory:\n instructions.append(comment('OpenMPI'))\n else:\n instructions.append(comment(\n 'OpenMPI version {}'.format(self.version)))\n instructions.append(packages(ospackages=self.__ospackages))\n if self.directory:\n # Use source from local build context\n instructions.append(\n copy(src=self.directory,\n dest=os.path.join(self.__wd, self.directory)))\n instructions.append(shell(commands=self.__commands))\n if self.__environment_variables:\n instructions.append(environment(\n variables=self.__environment_variables))\n\n return '\\n'.join(str(x) for x in instructions)",
"def workspace_path(self):\n return self._path_temp",
"def get_workspace(self):\n wid = self._config[\"workspace\"]\n return sim_consts.workspace_origin[wid], sim_consts.workspace_size[wid]",
"def str(self):\n out = \"{0}:\".format(self.gtype) if self.gtype else \"\"\n out += \"{0}\".format(repr(self.coords))\n out += \"[{0}]\".format(str(self.goalPtr)) if self.goalPtr else \"\"\n return out",
"def __str__(self):\n return str(self._root)",
"def __str__(self):\n s = \"Scene Object: \" + self.name + \"\\n\"\n s += \" Wavelength: \" + str(np.min(self.wave)) + \":\" + str(self.bin_width) + \":\" + str(np.max(self.wave))\n s += \" nm\\n\"\n s += \" [Row, Col]: \" + str(self.shape) + \"\\n\"\n s += \" [Width, Height]: [\" + \"%.2g\" % self.width + \", \" + \"%.2g\" % self.height + \"] m\\n\"\n s += \" Horizontal field of view: \" + \"%.2g\" % self.fov + \" deg\\n\"\n s += \" Sample size: \" + \"%.2g\" % self.sample_size + \" meters/sample\\n\"\n s += \" Mean luminance: \" + \"%.2g\" % self.mean_luminance + \" cd/m2\"\n return s",
"def __str__( self ) :\n\n return( '%s with projectile \"%s\", target \"%s\", evaluation \"%s\", path \"%s\", standardTarget \"%s\" and standardEvaluation \"%s\".' % \n ( self.moniker, self.projectile, self.target, self.evaluation, self.path, self.standardTarget, self.standardEvaluation ) )",
"def __str__(self):\n astr = ' variables:\\t[ '\n for var in self.variables:\n astr += str(var) + ', '\n astr = astr[:-2] + ' ]\\n assumptions :\\t[ '\n for assumption in self.assumptions.cnf:\n astr += assumption.formula + ', '\n astr = astr[:-2] + ' ]\\n guarantees :\\t[ '\n for guarantee in self.guarantees.cnf:\n astr += guarantee.formula + ', '\n # astr = astr[:-2] + ' ]\\n guarantees_unsat :\\t[ '\n # for guarantee in self.guarantees.cnf:\n # astr += guarantee.unsaturated + ', '\n return astr[:-2] + ' ]\\n'",
"def __repr__(self):\r\n return f\"{self.name} {self.status_name} {self.window_start} {self.wiki_url} {self.pad_location} {self.image}\"",
"def __str__(self):\n astr = '[\\n name: [ ' + self.name + ' ]\\n'\n astr += ' variables: [ '\n for var, init in self.variables:\n astr += '(' + var + ' := ' + init + '), '\n astr = astr[:-2] + ' ]\\n assumptions: [ '\n for assumption in self.assumptions:\n astr += assumption + ', '\n astr = astr[:-2] + ' ]\\n guarantees: [ '\n for guarantee in self.guarantees:\n astr += guarantee + ', '\n return astr[:-2] + ' ]\\n]'",
"def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")",
"def __str__(self):\r\n out = (\r\n ' *** Grid dimensions ***\\n'\r\n ' Origin: ( {0.ox:f}, {0.oy:f}, {0.oz:f})\\n'\r\n ' Delta: ( {0.dx:f}, {0.dy:f}, {0.dz:f})\\n'\r\n ' Size: ( {0.lx:f}, {0.ly:f}, {0.lz:f})\\n'\r\n ' N: ( {0.nx:d}, {0.ny:d}, {0.nz:d})\\n'\r\n ' type: {0.gtype}\\n'\r\n ' points: {0.points}\\n'\r\n ' cells: {0.cells}\\n'\r\n ' name: {0.gname}\\n'\r\n ).format(self)\r\n\r\n return out",
"def __str__(self):\n str = '-' * (self.SIZE ** 2 + self.SIZE + 1) + '\\n'\n for row in self.boards:\n for i in range(self.SIZE):\n str += '|'\n for board in row:\n for square in board.export_grid()[i]:\n str += square.value\n str += '|'\n str += '\\n'\n str += '-' * (self.SIZE ** 2 + self.SIZE + 1) + '\\n'\n return str",
"def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")",
"def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")",
"def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")"
] |
[
"0.73150843",
"0.7238596",
"0.6897872",
"0.6884405",
"0.6769728",
"0.66749203",
"0.6637215",
"0.6621079",
"0.64703304",
"0.6444655",
"0.6442687",
"0.6409415",
"0.64087206",
"0.639957",
"0.636927",
"0.63623023",
"0.6358274",
"0.630217",
"0.6301894",
"0.6288384",
"0.6274253",
"0.6263023",
"0.6259271",
"0.62560856",
"0.62560856",
"0.6222758",
"0.62152714",
"0.62141144",
"0.62141144",
"0.62141144"
] |
0.79356015
|
0
|
Removes preprocessing combinations that should not occur in a single pipeline.
|
def remove_incompatible_operations(pipelines):
def find_duplicates(pipelines):
for idx in range(len(pipelines)):
for idx_ in range(idx + 1, len(pipelines)):
if pipelines[idx] == pipelines[idx_]:
return idx
return -1
def _remove_illegal_combination(pipelines, combination):
illegal_pipes = []
pipelines_ = []
for idx, pipeline in enumerate(pipelines):
combination_ = list(set.intersection(set(pipeline.keys()), set(combination)))
actives = [pipeline[key] != None for key in pipeline if key in combination_]
if sum(actives) > 1:
illegal_pipes.append(idx) # Store the index of bad combination
for param in combination_: # Generate substituting legal combinations
if pipeline[param] != None: # we need to make new pipe
pipeline_ = pipeline.copy()
for param_ in combination_: # Set ALL conflicting parameters to None
pipeline_[param_] = None
pipeline_[param] = pipeline[param] # Set current parameter back to original value
pipelines_.append(pipeline_)
new_pipelines = [i for j, i in enumerate(pipelines) if j not in illegal_pipes]
# new_pipelines.extend(pipelines_)
return new_pipelines, pipelines_
illegal_combinations = [['BASELINE', 'MSC', 'EMSC', 'RNV', 'SNV', 'LSNV'],
['SMOOTH', 'SAVGOL']]
for combination in illegal_combinations:
pipelines, new_pipes = _remove_illegal_combination(pipelines, combination)
pipelines.extend(new_pipes)
pipelines_set = {json.dumps(pipeline, sort_keys=True) for pipeline in pipelines}
pipelines = [json.loads(item) for item in pipelines_set]
return pipelines
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _build_preprocessing(self):\n\n # For now, do nothing\n pass",
"def preprocess(self, cfg_pipeline):\n return",
"def _eliminate_common_key_with_none(stages, context, can_pack=lambda s: True):\n # type: (Iterable[Stage], TransformContext, Callable[[str], Union[bool, int]]) -> Iterable[Stage]\n\n # Partition stages by whether they are eligible for common KeyWithNone\n # elimination, and group eligible KeyWithNone stages by parent and\n # environment.\n def get_stage_key(stage):\n if len(stage.transforms) == 1 and can_pack(stage.name):\n transform = only_transform(stage.transforms)\n if (transform.spec.urn == common_urns.primitives.PAR_DO.urn and\n len(transform.inputs) == 1 and len(transform.outputs) == 1):\n pardo_payload = proto_utils.parse_Bytes(\n transform.spec.payload, beam_runner_api_pb2.ParDoPayload)\n if pardo_payload.do_fn.urn == python_urns.KEY_WITH_NONE_DOFN:\n return (only_element(transform.inputs.values()), stage.environment)\n return None\n\n grouped_eligible_stages, ineligible_stages = _group_stages_by_key(\n stages, get_stage_key)\n\n # Eliminate stages and build the PCollection remapping dictionary.\n pcoll_id_remap = {}\n remaining_stages = []\n for sibling_stages in grouped_eligible_stages.values():\n if len(sibling_stages) > 1:\n output_pcoll_ids = [\n only_element(stage.transforms[0].outputs.values())\n for stage in sibling_stages\n ]\n parent = _parent_for_fused_stages(sibling_stages, context)\n for to_delete_pcoll_id in output_pcoll_ids[1:]:\n pcoll_id_remap[to_delete_pcoll_id] = output_pcoll_ids[0]\n del context.components.pcollections[to_delete_pcoll_id]\n sibling_stages[0].parent = parent\n sibling_stages[0].name = _make_pack_name(\n stage.name for stage in sibling_stages)\n only_transform(\n sibling_stages[0].transforms).unique_name = _make_pack_name(\n only_transform(stage.transforms).unique_name\n for stage in sibling_stages)\n\n remaining_stages.append(sibling_stages[0])\n\n # Remap all transforms in components.\n for transform in context.components.transforms.values():\n _remap_input_pcolls(transform, pcoll_id_remap)\n\n # Yield stages while remapping input PCollections if needed.\n stages_to_yield = itertools.chain(ineligible_stages, remaining_stages)\n for stage in stages_to_yield:\n transform = only_transform(stage.transforms)\n _remap_input_pcolls(transform, pcoll_id_remap)\n yield stage",
"def clean():\n filter_phase_data()\n combine_phase_data()\n remove_duplicates_phase_data()",
"def stop_procrastinating(self):\n for layer, l in sorted(self.postponed.items()):\n for fun, args, rgb in l:\n self.set_source_rgb(*rgb)\n fun(*args, procrastinate=0)\n self.postponed = {}",
"def dataset_NER_prepocess(dataset):\n preprocessed = []\n\n try:\n preprocessed = stop_word_remove(dataset)\n\n if not preprocessed:\n preprocessed = dataset\n\n preprocessed = adverb_remove(dataset)\n\n if not preprocessed:\n preprocessed = dataset\n\n preprocessed = verb_remove(dataset)\n\n if not preprocessed:\n preprocessed = dataset\n preprocessed = adjective_remove(dataset)\n\n if not preprocessed:\n preprocessed = dataset\n\n preprocessed = special_symbols_remove(dataset)\n\n except Exception as e:\n print(e)\n return None\n\n return preprocessed",
"def _remove_pre_post_1q(self, circ):\n dag = circuit_to_dag(circ)\n del_list = []\n for node in dag.topological_op_nodes():\n if len(node.qargs) > 1:\n break\n del_list.append(node)\n for node in reversed(list(dag.topological_op_nodes())):\n if len(node.qargs) > 1:\n break\n del_list.append(node)\n for node in del_list:\n dag.remove_op_node(node)\n return dag_to_circuit(dag)",
"def fixupProcess(self):\n # Make sure that for each output module the following parameters exist\n # in the PSet returned from the framework:\n # fileName\n # logicalFileName\n # dataset.dataTier\n # dataset.filterName\n if hasattr(self.process, \"outputModules\"):\n outputModuleNames = list(self.process.outputModules) \n elif hasattr(self.process, \"outputModules_\"):\n outputModuleNames = self.process.outputModules_()\n elif hasattr(self.process, \"_Process__outputmodules\"):\n outputModuleNames = list(self.process._Process__outputmodules)\n else:\n msg = \"Error loading output modules from process\"\n raise AttributeError(msg)\n\n for outMod in outputModuleNames:\n tweak = PSetTweak()\n self.logger.info(\"DEBUG output module = %s\", outMod)\n tweak.addParameter(\"process.options\", \"customTypeCms.untracked.PSet()\")\n tweak.addParameter(\"process.%s.dataset\" % outMod, \"customTypeCms.untracked.PSet(dataTier=cms.untracked.string(''), filterName=cms.untracked.string(''))\")\n self.applyPsetTweak(tweak, skipIfSet=True, cleanupTweak=True)\n #tweak.addParameter(\"process.%s.dataset.dataTier\" % outMod, \"customTypeCms.untracked.string('')\")\n #tweak.addParameter(\"process.%s.dataset.filterName\" % outMod, \"customTypeCms.untracked.string('')\")\n tweak.addParameter(\"process.%s.fileName\" % outMod, \"customTypeCms.untracked.string('')\")\n tweak.addParameter(\"process.%s.logicalFileName\" % outMod, \"customTypeCms.untracked.string('')\")\n self.applyPsetTweak(tweak, skipIfSet=True)\n\n return",
"def apply_feature_filter(self):\n self.features = set()\n for language in self.data.values():\n features_in_data = set(language.keys())\n features_to_keep = features_in_data & self.feature_filter\n self.features |= features_to_keep\n features_to_remove = features_in_data - features_to_keep\n for feat in features_to_remove:\n language.pop(feat)\n self.features = sorted(list(self.features))",
"def preprocess(self):\n pass",
"def preprocess(self):\n pass",
"def preprocess(self):\n pass",
"def removeModulesNotOnAPathExcluding( process, keepList=() ):\n allMods=set((x for x in process.producers_().iterkeys()))\n allMods.update((x for x in process.filters_().iterkeys()))\n allMods.update((x for x in process.analyzers_().iterkeys()))\n allMods.update((x for x in process.outputModules_().iterkeys()))\n \n modulesOnPaths = set()\n for p in process.paths_():\n modulesOnPaths.update( (x for x in getattr(process,p).moduleNames())) \n for p in process.endpaths_():\n modulesOnPaths.update( (x for x in getattr(process,p).moduleNames()))\n\n notOnPaths = allMods.difference(modulesOnPaths)\n \n keepModuleNames = set( (x.label_() for x in keepList) )\n \n getRidOf = notOnPaths.difference(keepModuleNames)\n \n for n in getRidOf:\n delattr(process,n)",
"def applyMorphologicalCleaning(self, image):",
"def _unset_pipeline_cfg(self, field):",
"def remove_activation_hooks(self):\n for h in self.hooks:\n h.remove()\n h = None\n for l in self.list_mods:\n if ('norm' in self.list_mods):\n (b, l) = l\n # Skip non-prunable layers\n if (hasattr(l, 'prune_values')):\n l.prune_values = None\n self.hooks = None",
"def construct_pipelines(config):\n\n\n def _get_argument_combinations(arguments):\n \"\"\" Utility to function to obtain all permutations of preprocessing arguments. \"\"\"\n arg_names = sorted(arguments)\n combinations = itertools.product(*(arguments[arg] for arg in arg_names))\n combinations = [dict(zip(arg_names, arg_values)) for arg_values in combinations]\n return combinations\n\n options = {}\n for key in config.keys():\n # 1. Check if we got also_skip\n if 'also_skip' in config[key] and config[key]['also_skip']:\n config[key].pop('also_skip')\n options[key] = _get_argument_combinations(config[key])\n options[key].append(None)\n else:\n options[key] = _get_argument_combinations(config[key])\n\n return _get_argument_combinations(options)",
"def reset(self):\n super().reset()\n whitelist = []\n for parent in self.cls.mro():\n whitelist.extend(getattr(parent, 'tab_whitelist', []))\n\n if getattr(parent, \"tab_component_names\", False):\n for cpt_name in parent.component_names:\n if getattr(parent, cpt_name).kind != Kind.omitted:\n whitelist.append(cpt_name)\n\n self._includes = set(whitelist)",
"def strip(self):\n types = [type(self.strip),\n type(self.values),\n type(self.__ne__),\n type(self.__class__)]\n\n for attr in dir(self):\n if not type(getattr(self, attr)) in types:\n if any(i in attr for i in self.keep) or attr[0:2] == '__':\n continue\n else:\n x = getattr(self, attr)\n del x\n for molecule in self.values():\n molecule.strip_molecule(self.keep)\n exit()",
"def ignore_suboptimal_combinations(self,active_models):\n if self.transform =='max':\n not_trained = ['ElasticNetClassifier',\n 'PassiveAggressiveClassifier',\n 'RidgeClassifierCV',]\n elif self.transform =='normal':\n not_trained = ['ElasticNetClassifier',\n 'PassiveAggressiveClassifier',\n 'SDGClassifier',\n 'LinearSVC',\n 'RidgeClassifierCV',\n 'LogisticRegression',]\n elif self.transform == 'tfidf':\n not_trained = ['ElasticNetClassifier',\n 'PassiveAggressiveClassifier',\n 'RidgeClassifierCV',\n 'LogisticRegression',]\n else:\n not_trained = []\n active_models = [model for model in active_models if model not in not_trained]\n return active_models",
"def unusedFromKDOTDataPreparation():",
"def _strip_result(context_features):\n stripped = []\n processed = []\n for feature in context_features:\n keyword = feature['data'][0][1]\n if keyword not in processed:\n stripped.append(feature)\n processed.append(keyword)\n return stripped",
"def _filter_pipeline_parameters(dct):\n return {k: v for k, v in dct.items() if k not in non_pipeline_parameter_names and k != dynamic_param_name}",
"def stage_two_preprocessing(data: pd.Series) -> pd.Series:\n # designed to be run after remove_contractions\n data_ = data.dropna()\n data_ = remove_punctuation(data_)\n data_ = numbers_to_words(data_)\n data_ = remove_stopwords(data_)\n return data_",
"def get_non_refinement_pipeline():\n node_scaling = PrimaryNode('scaling')\n node_rf = SecondaryNode('rf', nodes_from=[node_scaling])\n node_logit = SecondaryNode('logit', nodes_from=[node_scaling])\n node_xgboost = SecondaryNode('xgboost', nodes_from=[node_logit, node_rf])\n pipeline = Pipeline(node_xgboost)\n return pipeline",
"def _prune_heads(self, heads_to_prune):\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)",
"def _prune_heads(self, heads_to_prune):\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)",
"def _prune_heads(self, heads_to_prune):\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)",
"def _remove_outputs_which_are_inputs(self, required_outputs, inputs):\n copy_required_outputs = set(required_outputs)\n for input_type in inputs:\n if input_type in copy_required_outputs:\n copy_required_outputs.remove(input_type)\n return copy_required_outputs",
"def _FilterProtonsAndElectrons(self):\n self.reactants = filter(lambda c: c.compound.kegg_id not in \n ['C00080', 'C05359'], self.reactants)"
] |
[
"0.61325556",
"0.59824824",
"0.56957024",
"0.56825066",
"0.5645361",
"0.5435082",
"0.5411837",
"0.53826714",
"0.5352468",
"0.5347234",
"0.5347234",
"0.5347234",
"0.53351414",
"0.5322323",
"0.53163415",
"0.53124636",
"0.5310005",
"0.53048676",
"0.53015953",
"0.5284387",
"0.52828795",
"0.5271139",
"0.5268921",
"0.524905",
"0.5235243",
"0.5223988",
"0.5223988",
"0.5223988",
"0.5210934",
"0.520771"
] |
0.61943585
|
0
|
reads input_filename and output_filename from command line. creates L4 object and returns the output if it always fired and the output based on the distribution given by the sigmoid function. returns the frequency of different neurons fired over a number of runs. create L23 object and returns what fired in a given run and the frequency with which neurons fired over a number of runs, without recurrence. input parameters are described in "usage" sections of the input file.
|
def main():
# -------- input ------------------------
# get command line arguments into args array
args = sys.argv[1:]
if not args or len(args) > 2:
print "usage: input_filename output_filename"
sys.exit(1)
input_file = open(args[0], 'r')
output_filename = args[1]
# get parameters separately
params = (input_file.readline()).split()
if len(params) > 10:
print "usage: n23 mean stdp_eta n4 L k beta theta [non-k] [random]"
sys.exit(1)
n23 = int(params[0])
mean = float(params[1])
stdp_eta = float(params[2])
n4 = int(params[3])
L = int(params[4])
k = int(params[5])
beta = float(params[6])
theta = float(params[7])
non_k = -1
rand = False
if len(params) > 5:
non_k = float(params[8])
if len(params) > 6:
rand = params[9]
# get stimuli into array
stimuli = input_file.readline()
stimuli = [int(i) for i in stimuli.split()]
stimuli = numpy.array(stimuli)
if len(stimuli) != L:
print "error! there must be %d stimuli" % L
sys.exit(1)
# -------- layer 4 testing ------------------------
# create L4 object
l4 = ldefs.L4(n4, L, k, randomize=rand, non_k=non_k)
# print output vector of neurons that fired:
exc_vec = lfuncs.excitation(l4, stimuli)
sig_out = lfuncs.sig_prob(exc_vec, beta, theta)
L4_spike_vec = lfuncs.probvec_to_spikevec(sig_out)
print "---- Layer 4: ---- "
print "Excitation: " + lfuncs.spikevec_toString(exc_vec)
print "Spiked: " + lfuncs.spikevec_toString(L4_spike_vec)
# test frequencies of firing (commented out for convenience)
# freq.L4_freqs(n4, sig_out)
# -------- layer 2/3 testing ------------------------
l23 = ldefs.L23(n23, n4, mean, stdp_eta)
spiking_input = lfuncs.spikevec_where(L4_spike_vec)
# give L4 input to get excitation, feed to sigmoid, use probabilities to get spikes
exc_vec = lfuncs.net_input_vector(l23.inputWeightMatrix, spiking_input)
sig_out = lfuncs.sig_prob(exc_vec, beta, theta)
L23_spike_vec = lfuncs.probvec_to_spikevec(sig_out)
# write outputs to file: columns for each spiking neuron j in input,
# summed excitation, and sigmoid probabilities
file = open(output_filename, 'a')
# commented out for convenience
# for j in lfuncs.spikevec_where(L4_spike_vec):
# j_out = [round(l23.inputWeightMatrix[i][j], 2) for i in range(len(l23.inputWeightMatrix))]
# file.write("\n\nj%d: %s" % (j, str(j_out)))
# file.write("\n\nexc " + str([round(i,2) for i in exc_vec]))
# file.write("\n\nsig " + str([round(i, 2) for i in sig_out]))
file.write(str([round(i, 2) for i in sig_out]))
file.close()
print "---- Layer 2/3: ---- "
print "Spiked: " + lfuncs.spikevec_toString(L23_spike_vec)
# test frequencies of firing (commented out for convenience)
# freq.L23_freqs(n23, output_filename, sig_out)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')",
"def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')",
"def run(config, workdir, files, output=None, n=1):\n file = None\n for i in range(1, n+1):\n Logger.info(f'Starting run {i}/{n}')\n\n if output:\n file = f'{output}/run_{i}.dat'\n\n # Make sure these directories are clean\n for file in glob(f'{workdir}/lut_h2o/*'):\n os.remove(file)\n for file in glob(f'{workdir}/output/*'):\n os.remove(file)\n\n profile(\n args = SimpleNamespace(\n input_radiance = files[0],\n input_loc = files[1],\n input_obs = files[2],\n working_directory = workdir,\n config_file = config,\n wavelength_path = None,\n log_file = None,\n logging_level = 'DEBUG',\n pressure_elevation = None\n ),\n output = file\n )",
"def main(filename, frames, batch_size, num_classes, input_length):\n # Get our data.\n X_train, _, y_train, _ = get_data(filename, frames, num_classes, input_length)\n\n # Get sizes.\n num_classes = len(y_train[0])\n\n # Get our network.\n net = get_network_wide(frames, input_length, num_classes)\n\n # Get our model.\n model = tflearn.DNN(net, tensorboard_verbose=0)\n model.load('checkpoints/rnn.tflearn')\n\n # Evaluate.\n print(model.evaluate(X_train, y_train))",
"def main(input_filepath, output_filepath):\n\n logging.info(\"reading %s\", input_filepath)\n train_test = pd.read_hdf(input_filepath, 'train_test')\n meta = pd.read_hdf(input_filepath, 'meta')\n meta_org = pd.read_hdf(input_filepath, 'meta_org')\n\n sel_series = train_test[train_test.entry_type.isin(['train', 'cold_start'])]\\\n ['series_id'].unique()\n train_series, validate_series = train_test_split(sel_series, random_state=1)\n\n logging.info(\"calc train_test\")\n train_test = calc_final_features(train_test, meta, meta_org=meta_org, verbose=True)\n\n sel = train_test[train_test.entry_type.isin(['train', 'cold_start'])]\n train = sel[sel.series_id.isin(train_series)]\n validate = sel[sel.series_id.isin(validate_series)]\n test = train_test[train_test.entry_type.isin(['test'])]\n\n logging.info(\"writing %s\", output_filepath)\n train.to_hdf(output_filepath, \"train\", mode=\"w\")\n validate.to_hdf(output_filepath, \"validate\", mode=\"a\")\n test.to_hdf(output_filepath, \"test\", mode=\"a\")\n for k in ['meta', 'submission']:\n df = pd.read_hdf(input_filepath, k)\n df.to_hdf(output_filepath, k, mode=\"a\")",
"def main(input=None):\r\n start = time.time()\r\n \r\n print(input)\r\n \r\n print(\"Working...\")\r\n \r\n # Parse file\r\n size, instructions, transType = inputParse(input)\r\n \r\n lightsCheck = lights.LightTest(size)\r\n \r\n lineCount = 0 \r\n for line in instructions:\r\n \r\n lightsCheck.apply(instructions, lineCount, transType)\r\n lineCount += 1\r\n\r\n print(\"Number of lights on:\", lightsCheck.lightsCount())\r\n print(\"Elapsed time:\", time.time() - start)\r\n return 0",
"def main():\n parser = argparse.ArgumentParser(\n description=\"making feature file argsurations.\")\n\n parser.add_argument(\n \"--waveforms\", default=None,\n help=\"directory or list of filename of input wavfile\")\n parser.add_argument(\n \"--hdf5dir\", default=None,\n help=\"directory to save hdf5\")\n parser.add_argument(\n \"--wavdir\", default=None,\n help=\"directory to save of preprocessed wav file\")\n parser.add_argument(\n \"--fs\", default=16000,\n type=int, help=\"Sampling frequency\")\n parser.add_argument(\n \"--shiftms\", default=5,\n type=float, help=\"Frame shift in msec\")\n parser.add_argument(\n \"--feature_type\", default=\"world\", choices=[\"world\", \"melspc\", \"mcep\"],\n type=str, help=\"feature type\")\n parser.add_argument(\n \"--mspc_dim\", default=80,\n type=int, help=\"Dimension of mel spectrogram\")\n parser.add_argument(\n \"--minf0\", default=40,\n type=int, help=\"minimum f0 for world analysis\")\n parser.add_argument(\n \"--maxf0\", default=400,\n type=int, help=\"maximum f0 for world analysis\")\n parser.add_argument(\n \"--fmin\", default=None, nargs=\"?\",\n type=int, help=\"minimum frequency for melspc\")\n parser.add_argument(\n \"--fmax\", default=None, nargs=\"?\",\n type=int, help=\"maximum frequency for melspc\")\n parser.add_argument(\n \"--mcep_dim\", default=24,\n type=int, help=\"Dimension of mel cepstrum\")\n parser.add_argument(\n \"--mcep_alpha\", default=0.41,\n type=float, help=\"Alpha of mel cepstrum\")\n parser.add_argument(\n \"--fftl\", default=1024,\n type=int, help=\"FFT length\")\n parser.add_argument(\n \"--highpass_cutoff\", default=70,\n type=int, help=\"Cut off frequency in lowpass filter\")\n parser.add_argument(\n \"--save_wav\", default=True,\n type=strtobool, help=\"Whether to save filtered wav file\")\n parser.add_argument(\n \"--n_jobs\", default=10,\n type=int, help=\"number of parallel jobs\")\n parser.add_argument(\n \"--verbose\", default=1,\n type=int, help=\"log message level\")\n\n args = parser.parse_args()\n\n # set log level\n if args.verbose == 1:\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S')\n elif args.verbose > 1:\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S')\n else:\n logging.basicConfig(level=logging.WARNING,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S')\n logging.warning(\"logging is disabled.\")\n\n # show arguments\n for key, value in vars(args).items():\n logging.info(\"%s = %s\" % (key, str(value)))\n\n # read list\n if os.path.isdir(args.waveforms):\n file_list = sorted(find_files(args.waveforms, \"*.wav\"))\n else:\n file_list = read_txt(args.waveforms)\n logging.info(\"number of utterances = %d\" % len(file_list))\n\n # check directory existence\n if not os.path.exists(args.wavdir) and args.highpass_cutoff != 0 and args.save_wav:\n os.makedirs(args.wavdir)\n if not os.path.exists(args.hdf5dir):\n os.makedirs(args.hdf5dir)\n\n # divide list\n file_lists = np.array_split(file_list, args.n_jobs)\n file_lists = [f_list.tolist() for f_list in file_lists]\n\n # multi processing\n processes = []\n if args.feature_type == \"world\":\n target_fn = world_feature_extract\n elif args.feature_type == \"melspc\":\n target_fn = melspectrogram_extract\n else:\n target_fn = melcepstrum_extract\n for f in file_lists:\n p = mp.Process(target=target_fn, args=(f, args,))\n p.start()\n processes.append(p)\n\n # wait for all process\n for p in processes:\n p.join()",
"def _run(input_file_name, num_zenith_angle_bins, num_albedo_bins,\n num_shortwave_sfc_down_flux_bins, num_aod_bins, num_surface_temp_bins,\n num_longwave_sfc_down_flux_bins, num_longwave_toa_up_flux_bins,\n example_dir_name, output_dir_name):\n\n # Process input args.\n if num_zenith_angle_bins <= 0:\n num_zenith_angle_bins = None\n else:\n error_checking.assert_is_geq(num_zenith_angle_bins, 3)\n\n if num_albedo_bins <= 0:\n num_albedo_bins = None\n else:\n error_checking.assert_is_geq(num_albedo_bins, 3)\n\n if num_shortwave_sfc_down_flux_bins <= 0:\n num_shortwave_sfc_down_flux_bins = None\n else:\n error_checking.assert_is_geq(num_shortwave_sfc_down_flux_bins, 3)\n\n if num_aod_bins <= 0:\n num_aod_bins = None\n else:\n error_checking.assert_is_geq(num_aod_bins, 3)\n\n if num_surface_temp_bins <= 0:\n num_surface_temp_bins = None\n else:\n error_checking.assert_is_geq(num_surface_temp_bins, 3)\n\n if num_longwave_sfc_down_flux_bins <= 0:\n num_longwave_sfc_down_flux_bins = None\n else:\n error_checking.assert_is_geq(num_longwave_sfc_down_flux_bins, 3)\n\n if num_longwave_toa_up_flux_bins <= 0:\n num_longwave_toa_up_flux_bins = None\n else:\n error_checking.assert_is_geq(num_longwave_toa_up_flux_bins, 3)\n\n print('Reading data from: \"{0:s}\"...\\n'.format(input_file_name))\n prediction_dict = prediction_io.read_file(input_file_name)\n\n if num_zenith_angle_bins is not None:\n edge_zenith_angles_rad = numpy.linspace(\n 0, MAX_ZENITH_ANGLE_RAD, num=num_zenith_angle_bins + 1, dtype=float\n )\n min_zenith_angles_rad = edge_zenith_angles_rad[:-1]\n max_zenith_angles_rad = edge_zenith_angles_rad[1:]\n\n for k in range(num_zenith_angle_bins):\n this_prediction_dict = prediction_io.subset_by_zenith_angle(\n prediction_dict=copy.deepcopy(prediction_dict),\n min_zenith_angle_rad=min_zenith_angles_rad[k],\n max_zenith_angle_rad=max_zenith_angles_rad[k]\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, zenith_angle_bin=k,\n raise_error_if_missing=False\n )\n print((\n 'Writing {0:d} examples (with zenith angles {1:.4f}...{2:.4f} '\n 'rad) to: \"{3:s}\"...'\n ).format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n min_zenith_angles_rad[k], max_zenith_angles_rad[k],\n this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=\n this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )\n\n print('\\n')\n\n if num_albedo_bins is not None:\n edge_albedos = numpy.linspace(\n 0, 1, num=num_albedo_bins + 1, dtype=float\n )\n min_albedos = edge_albedos[:-1]\n max_albedos = edge_albedos[1:]\n\n # Split by albedo.\n for k in range(num_albedo_bins):\n this_prediction_dict = prediction_io.subset_by_albedo(\n prediction_dict=copy.deepcopy(prediction_dict),\n min_albedo=min_albedos[k], max_albedo=max_albedos[k]\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, albedo_bin=k,\n raise_error_if_missing=False\n )\n print((\n 'Writing {0:d} examples (with albedos {1:.4f}...{2:.4f}) '\n 'to: \"{3:s}\"...'\n ).format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n min_albedos[k], max_albedos[k], this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=\n this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )\n\n print('\\n')\n\n # Split by month.\n for k in range(1, 13):\n this_prediction_dict = prediction_io.subset_by_month(\n prediction_dict=copy.deepcopy(prediction_dict), desired_month=k\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, month=k,\n raise_error_if_missing=False\n )\n print('Writing {0:d} examples to: \"{1:s}\"...'.format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )\n\n print('\\n')\n\n if num_shortwave_sfc_down_flux_bins is not None:\n edge_fluxes_w_m02 = numpy.linspace(\n 0, MAX_SHORTWAVE_SFC_DOWN_FLUX_W_M02,\n num=num_shortwave_sfc_down_flux_bins + 1, dtype=float\n )\n min_fluxes_w_m02 = edge_fluxes_w_m02[:-1]\n max_fluxes_w_m02 = edge_fluxes_w_m02[1:]\n max_fluxes_w_m02[-1] = numpy.inf\n\n for k in range(num_shortwave_sfc_down_flux_bins):\n this_prediction_dict = (\n prediction_io.subset_by_shortwave_sfc_down_flux(\n prediction_dict=copy.deepcopy(prediction_dict),\n min_flux_w_m02=min_fluxes_w_m02[k],\n max_flux_w_m02=max_fluxes_w_m02[k]\n )\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, shortwave_sfc_down_flux_bin=k,\n raise_error_if_missing=False\n )\n print((\n 'Writing {0:d} examples (with shortwave surface downwelling '\n 'fluxes of {1:.4f}...{2:.4f} W m^-2) to: \"{3:s}\"...'\n ).format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n min_fluxes_w_m02[k], max_fluxes_w_m02[k], this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=\n this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )\n\n print('\\n')\n\n if num_longwave_sfc_down_flux_bins is not None:\n edge_fluxes_w_m02 = numpy.linspace(\n 0, MAX_LONGWAVE_SFC_DOWN_FLUX_W_M02,\n num=num_longwave_sfc_down_flux_bins + 1, dtype=float\n )\n min_fluxes_w_m02 = edge_fluxes_w_m02[:-1]\n max_fluxes_w_m02 = edge_fluxes_w_m02[1:]\n max_fluxes_w_m02[-1] = numpy.inf\n\n for k in range(num_longwave_sfc_down_flux_bins):\n this_prediction_dict = (\n prediction_io.subset_by_longwave_sfc_down_flux(\n prediction_dict=copy.deepcopy(prediction_dict),\n min_flux_w_m02=min_fluxes_w_m02[k],\n max_flux_w_m02=max_fluxes_w_m02[k]\n )\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, longwave_sfc_down_flux_bin=k,\n raise_error_if_missing=False\n )\n print((\n 'Writing {0:d} examples (with longwave surface downwelling '\n 'fluxes of {1:.4f}...{2:.4f} W m^-2) to: \"{3:s}\"...'\n ).format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n min_fluxes_w_m02[k], max_fluxes_w_m02[k], this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=\n this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )\n\n print('\\n')\n\n if num_longwave_toa_up_flux_bins is not None:\n edge_fluxes_w_m02 = numpy.linspace(\n 0, MAX_LONGWAVE_TOA_UP_FLUX_W_M02,\n num=num_longwave_toa_up_flux_bins + 1, dtype=float\n )\n min_fluxes_w_m02 = edge_fluxes_w_m02[:-1]\n max_fluxes_w_m02 = edge_fluxes_w_m02[1:]\n max_fluxes_w_m02[-1] = numpy.inf\n\n for k in range(num_longwave_toa_up_flux_bins):\n this_prediction_dict = (\n prediction_io.subset_by_longwave_toa_up_flux(\n prediction_dict=copy.deepcopy(prediction_dict),\n min_flux_w_m02=min_fluxes_w_m02[k],\n max_flux_w_m02=max_fluxes_w_m02[k]\n )\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, longwave_toa_up_flux_bin=k,\n raise_error_if_missing=False\n )\n print((\n 'Writing {0:d} examples (with longwave TOA upwelling '\n 'fluxes of {1:.4f}...{2:.4f} W m^-2) to: \"{3:s}\"...'\n ).format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n min_fluxes_w_m02[k], max_fluxes_w_m02[k], this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=\n this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )\n\n print('\\n')\n\n if num_aod_bins is None and num_surface_temp_bins is None:\n return\n\n valid_times_unix_sec = example_utils.parse_example_ids(\n prediction_dict[prediction_io.EXAMPLE_IDS_KEY]\n )[example_utils.VALID_TIMES_KEY]\n\n example_file_names = example_io.find_many_files(\n directory_name=example_dir_name,\n first_time_unix_sec=numpy.min(valid_times_unix_sec),\n last_time_unix_sec=numpy.max(valid_times_unix_sec),\n raise_error_if_any_missing=False\n )\n\n example_id_strings = []\n aerosol_extinction_matrix_metres01 = numpy.array([])\n height_matrix_m_agl = numpy.array([])\n surface_temps_kelvins = numpy.array([])\n\n for this_file_name in example_file_names:\n print('Reading data from: \"{0:s}\"...'.format(this_file_name))\n this_example_dict = example_io.read_file(\n netcdf_file_name=this_file_name, exclude_summit_greenland=False,\n max_shortwave_heating_k_day01=numpy.inf,\n min_longwave_heating_k_day01=-1 * numpy.inf,\n max_longwave_heating_k_day01=numpy.inf\n )\n\n example_id_strings += this_example_dict[example_utils.EXAMPLE_IDS_KEY]\n\n if num_surface_temp_bins is not None:\n these_temps_kelvins = example_utils.get_field_from_dict(\n example_dict=this_example_dict,\n field_name=example_utils.SURFACE_TEMPERATURE_NAME\n )\n surface_temps_kelvins = numpy.concatenate(\n (surface_temps_kelvins, these_temps_kelvins), axis=0\n )\n\n if num_aod_bins is None:\n continue\n\n this_extinction_matrix_metres01 = example_utils.get_field_from_dict(\n example_dict=this_example_dict,\n field_name=example_utils.AEROSOL_EXTINCTION_NAME\n )\n\n if aerosol_extinction_matrix_metres01.size == 0:\n aerosol_extinction_matrix_metres01 = (\n this_extinction_matrix_metres01 + 0.\n )\n else:\n aerosol_extinction_matrix_metres01 = numpy.concatenate((\n aerosol_extinction_matrix_metres01,\n this_extinction_matrix_metres01\n ), axis=0)\n\n if (\n example_utils.HEIGHT_NAME in\n this_example_dict[example_utils.VECTOR_PREDICTOR_NAMES_KEY]\n ):\n this_height_matrix_m_agl = example_utils.get_field_from_dict(\n example_dict=this_example_dict,\n field_name=example_utils.HEIGHT_NAME\n )\n\n if height_matrix_m_agl.size == 0:\n height_matrix_m_agl = this_height_matrix_m_agl + 0.\n else:\n height_matrix_m_agl = numpy.concatenate(\n (height_matrix_m_agl, this_height_matrix_m_agl), axis=0\n )\n else:\n if height_matrix_m_agl.size == 0:\n height_matrix_m_agl = (\n this_example_dict[example_utils.HEIGHTS_KEY] + 0.\n )\n\n desired_indices = example_utils.find_examples(\n all_id_strings=example_id_strings,\n desired_id_strings=prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n allow_missing=False\n )\n del example_id_strings\n\n if num_surface_temp_bins is not None:\n surface_temps_kelvins = surface_temps_kelvins[desired_indices]\n\n edge_temps_kelvins = numpy.linspace(\n MIN_SURFACE_TEMP_KELVINS, MAX_SURFACE_TEMP_KELVINS,\n num=num_surface_temp_bins + 1, dtype=float\n )\n min_temps_kelvins = edge_temps_kelvins[:-1]\n max_temps_kelvins = edge_temps_kelvins[1:]\n min_temps_kelvins[0] = -numpy.inf\n max_temps_kelvins[-1] = numpy.inf\n\n for k in range(num_surface_temp_bins):\n these_indices = numpy.where(numpy.logical_and(\n surface_temps_kelvins >= min_temps_kelvins[k],\n surface_temps_kelvins <= max_temps_kelvins[k]\n ))[0]\n\n this_prediction_dict = prediction_io.subset_by_index(\n prediction_dict=copy.deepcopy(prediction_dict),\n desired_indices=these_indices\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, surface_temp_bin=k,\n raise_error_if_missing=False\n )\n print((\n 'Writing {0:d} examples (with surface temperatures of '\n '{1:.4f}...{2:.4f} K) to: \"{3:s}\"...'\n ).format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n min_temps_kelvins[k], max_temps_kelvins[k],\n this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=\n this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )\n\n if num_aod_bins is None:\n return\n\n aerosol_extinction_matrix_metres01 = (\n aerosol_extinction_matrix_metres01[desired_indices, :]\n )\n\n if len(height_matrix_m_agl.shape) == 2:\n height_matrix_m_agl = height_matrix_m_agl[desired_indices, :]\n num_examples = aerosol_extinction_matrix_metres01.shape[0]\n aerosol_optical_depths = numpy.full(num_examples, numpy.nan)\n print('\\n')\n\n for i in range(num_examples):\n if numpy.mod(i, 1000) == 0:\n print((\n 'Have computed aerosol optical depth for {0:d} of {1:d} '\n 'profiles...'\n ).format(\n i, num_examples\n ))\n\n aerosol_optical_depths[i] = simps(\n y=aerosol_extinction_matrix_metres01[i, :],\n x=height_matrix_m_agl[i, :],\n even='avg'\n )\n\n print((\n 'Have computed aerosol optical depth for all {0:d} profiles!\\n'\n ).format(\n num_examples\n ))\n else:\n aerosol_optical_depths = simps(\n y=aerosol_extinction_matrix_metres01, x=height_matrix_m_agl,\n axis=-1, even='avg'\n )\n\n edge_aerosol_optical_depths = numpy.linspace(\n 0, MAX_AEROSOL_OPTICAL_DEPTH,\n num=num_aod_bins + 1, dtype=float\n )\n min_aerosol_optical_depths = edge_aerosol_optical_depths[:-1]\n max_aerosol_optical_depths = edge_aerosol_optical_depths[1:]\n max_aerosol_optical_depths[-1] = numpy.inf\n\n for k in range(num_aod_bins):\n these_indices = numpy.where(numpy.logical_and(\n aerosol_optical_depths >= min_aerosol_optical_depths[k],\n aerosol_optical_depths <= max_aerosol_optical_depths[k]\n ))[0]\n\n this_prediction_dict = prediction_io.subset_by_index(\n prediction_dict=copy.deepcopy(prediction_dict),\n desired_indices=these_indices\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, aerosol_optical_depth_bin=k,\n raise_error_if_missing=False\n )\n print((\n 'Writing {0:d} examples (with aerosol optical depths of '\n '{1:.4f}...{2:.4f}) to: \"{3:s}\"...'\n ).format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n min_aerosol_optical_depths[k],\n max_aerosol_optical_depths[k],\n this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=\n this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )",
"def convert_to_lf(input: str, output_1: str, output_2: str = None):\n\n commands = {'predicates': ['jump', 'run', 'look', 'turn', 'walk'],\n 'directions': ['right', 'left'],\n 'manners': ['around', 'opposite'],\n 'connectives': ['and', 'after'],\n 'repetitions': ['twice', 'thrice']}\n\n examples_parsed = []\n\n all_possible_tokens = []\n for tokens in commands.values():\n all_possible_tokens += tokens\n with open(input, 'r') as f:\n for row in f:\n connective = None\n question = row.split('OUT:')[0].replace('IN:', '').strip()\n denotation = row.split('OUT:')[1].strip()\n parts = [question]\n for token in parts[0].split(' '):\n assert token in all_possible_tokens\n for connective_candidate in commands['connectives']:\n parts = parts[0].split(connective_candidate)\n if len(parts) > 1:\n connective = connective_candidate\n break\n inner_programs = []\n for i, part in enumerate(parts):\n inner_programs.append(get_inner_program(part.split(' '), commands))\n if not connective:\n assert len(inner_programs) == 1\n program = inner_programs[0]\n else:\n assert len(inner_programs) == 2\n program = '{} ( {} , {} )'.format(PREFIX+connective+SUFFIX, inner_programs[0],\n inner_programs[1])\n program = program.replace(' ', ' ')\n examples_parsed.append({'question': question, 'program': program, 'answer': denotation})\n if output_2 is not None: # take 20% for dev\n random.shuffle(examples_parsed)\n train_size = math.ceil(0.8 * len(examples_parsed))\n with open(output_1, 'w') as f_1:\n with open(output_2, 'w') as f_2:\n for i, ex in enumerate(examples_parsed):\n if i < train_size:\n json.dump(ex, f_1)\n f_1.write('\\n')\n else:\n json.dump(ex, f_2)\n f_2.write('\\n')\n else:\n with open(output_1, 'w') as f_1:\n for i, ex in enumerate(examples_parsed):\n json.dump(ex, f_1)\n f_1.write('\\n')",
"def main():\n argparser = ArgumentParser()\n argparser.add_argument('--case', type=int, required=True,\n help='case number to create observations e.g. 1 if 1.json')\n args = argparser.parse_args()\n\n case = args.case\n observation_file = os.path.join(OBSERVATION_DIR, '{}.json'.format(case))\n with open(observation_file, 'r') as f:\n observation_config = json.load(f)\n\n nodes = observation_config['nodes']\n edges = observation_config['edges']\n observations = observation_config['observations']\n\n # solution part\n parameters = _get_learned_parameters(nodes=nodes, edges=edges, observations=observations)\n # end solution part\n\n # json only recognises floats, not np.float, so we need to cast the values into floats.\n for node, node_params in parameters.items():\n for param, val in node_params.items():\n node_params[param] = float(val)\n parameters[node] = node_params\n\n if not os.path.exists(PREDICTION_DIR):\n os.makedirs(PREDICTION_DIR)\n prediction_file = os.path.join(PREDICTION_DIR, '{}.json'.format(case))\n\n with open(prediction_file, 'w') as f:\n json.dump(parameters, f, indent=1)\n print('INFO: Results for test case {} are stored in {}'.format(case, prediction_file))",
"def DNN_Spectral_Mapping(args):\r\n PATH_ROOT = os.getcwd()\r\n os.chdir(PATH_ROOT)\r\n\r\n # noisy_train ; input of DNN\r\n path_dnn_noisy_train = os.path.join(PATH_ROOT, args.input_noisy_train)\r\n dnn_magnitude_noisy_train,_,sr = perform_stft(path_dnn_noisy_train, args)\r\n # dnn_magnitude_noisy_train= splice_frames(dnn_magnitude_noisy_train.T, args.left_context, args.right_context).T\r\n\r\n # clean_train ; output of DNN\r\n path_dnn_clean_train = os.path.join(PATH_ROOT, args.input_clean_train)\r\n dnn_magnitude_clean_train,_,_ = perform_stft(path_dnn_clean_train, args)\r\n\r\n # noise_train\r\n path_noise = os.path.join(PATH_ROOT, args.input_noise)\r\n dnn_magnitude_noise_train,_,_ = perform_stft(path_noise, args)\r\n\r\n path_clean_test = os.path.join(PATH_ROOT , args.input_clean_test)\r\n (sr, clean_test) = wav.read(path_clean_test)\r\n\r\n # noisy_test\r\n path_noisy_test = os.path.join(PATH_ROOT, args.input_noisy_test)\r\n (sr, noisy_test) = wav.read(path_noisy_test)\r\n dnn_magnitude_noisy_test, dnn_phase_noisy_test, _ = perform_stft(path_noisy_test, args)\r\n # magnitude_noisy_test= splice_frames(magnitude_noisy_test.T, args.left_context, args.right_context).T\r\n\r\n X_train = np.log(dnn_magnitude_noisy_train.T**2)\r\n y_train = np.log(dnn_magnitude_clean_train.T**2)\r\n X_test = np.log(dnn_magnitude_noisy_test.T**2)\r\n\r\n # DNN training stage\r\n #####################################################################################\r\n k.clear_session()\r\n def get_dnn_model(X_train, y_train, args):\r\n # LeakyReLU, PReLU, ELU, ThresholdedReLU, SReLU\r\n model = Sequential()\r\n model.add(Dense(args.n_hidden, input_dim=X_train.shape[1], init='glorot_normal')) # glorot_normal,he_normal\r\n model.add(BatchNormalization())\r\n # model.add(Activation('relu'))\r\n model.add(LeakyReLU(alpha=0.1))\r\n model.add(Dropout(args.drop_out))\r\n\r\n model.add(Dense(args.n_hidden, init='glorot_normal'))\r\n model.add(BatchNormalization())\r\n # model.add(Activation('relu'))\r\n model.add(LeakyReLU(alpha=0.1))\r\n model.add(Dropout(args.drop_out))\r\n\r\n model.add(Dense(args.n_hidden, init='glorot_normal'))\r\n model.add(BatchNormalization())\r\n # model.add(Activation('relu'))\r\n model.add(LeakyReLU(alpha=0.1))\r\n model.add(Dropout(args.drop_out))\r\n\r\n model.add(Dense(units=y_train.shape[1], init='glorot_normal'))\r\n model.add(BatchNormalization())\r\n model.add(Activation('linear'))\r\n\r\n model.compile(loss='mse',\r\n optimizer='adam',\r\n metrics=['mse'])\r\n # model.summary()\r\n return model\r\n\r\n model = get_dnn_model(X_train, y_train, args)\r\n with tf.device('/gpu:0'):\r\n model_info = model.fit(X_train, y_train, batch_size=args.n_batch, epochs=args.n_epoch)\r\n # plot_model_history(model_info)\r\n print(\"Training complete.\")\r\n\r\n # Enhancement stage\r\n #####################################################################################\r\n magnitude_estimated_clean = model.predict(X_test).T\r\n magnitude_estimated_clean = np.exp(np.sqrt(magnitude_estimated_clean))\r\n # magnitude_estimated_clean = magnitude_estimated_clean.astype('int16')\r\n\r\n # magnitude_estimated_clean=norm(magnitude_estimated_clean)\r\n #Reconstruction\r\n stft_reconstructed_clean = merge_magphase(magnitude_estimated_clean, dnn_phase_noisy_test)\r\n signal_reconstructed_clean =librosa.istft(stft_reconstructed_clean, hop_length=args.hop_size, window=args.window)\r\n signal_reconstructed_clean = signal_reconstructed_clean.astype('int16')\r\n #####################################################################################\r\n output_path_estimated_noisy_test = os.path.join(PATH_ROOT, args.output_file)\r\n wav.write(output_path_estimated_noisy_test,sr,signal_reconstructed_clean)\r\n\r\n # Display signals, spectrograms\r\n show_signal(clean_test,noisy_test,signal_reconstructed_clean,sr)\r\n show_spectrogram(clean_test,noisy_test, signal_reconstructed_clean, sr, args.num_FFT,args.hop_size)\r\n # =============================================================================\r\n # PESQ\r\n # =============================================================================\r\n # PATH_MATLAB='\"C:/Program Files/MATLAB/R2014a/bin/matlab.exe\"'\r\n\r\n # PATH_MATLAB1 = os.path.join(PATH_ROOT , 'PESQ_MATLAB/execute_pesq.m')\r\n # from pymatbridge import Matlab\r\n # mlab = Matlab()\r\n # mlab = Matlab(executable=PATH_MATLAB)\r\n # mlab.start()\r\n\r\n # #PATH_MATLAB1 = os.path.join(PATH_ROOT , \"PESQ_MATLAB\",\"execute_pesq.m\")\r\n # result_PESQ = mlab.run_func(PATH_MATLAB1, {'arg1': sr})\r\n # noisy_original_PESQ = result_PESQ['result'][0][0]\r\n # enhanced_PESQ = result_PESQ['result'][1][0]\r\n # mlab.stop()\r\n\r\n # snr=args.input_noisy_test\r\n # name=snr[53:-9]\r\n # print(\"[%s]\\n Original: %.2f\\n Spectral-Mapping\\t: %.2f\"%(name,noisy_original_PESQ,enhanced_PESQ))\r",
"def n_n(output_path):\n lef = {}\n rig = {}\n rellef = {}\n relrig = {}\n\n triple = open(Path(output_path, \"train2id.txt\"), \"r\")\n valid = open(Path(output_path, \"valid2id.txt\"), \"r\")\n test = open(Path(output_path, \"test2id.txt\"), \"r\")\n\n ls = triple.readlines()\n tot = len(ls) - 1\n\n # (int)(triple.readline())\n for i in range(tot):\n content = ls[i + 1]\n # content = triple.readline()\n h, t, r = content.strip().split()\n if not (h, r) in lef:\n lef[(h, r)] = []\n if not (r, t) in rig:\n rig[(r, t)] = []\n lef[(h, r)].append(t)\n rig[(r, t)].append(h)\n if not r in rellef:\n rellef[r] = {}\n if not r in relrig:\n relrig[r] = {}\n rellef[r][h] = 1\n relrig[r][t] = 1\n\n ls = valid.readlines()\n tot = len(ls) - 1\n # (int)(valid.readline())\n for i in range(tot):\n content = ls[i + 1]\n # content = valid.readline()\n h, t, r = content.strip().split()\n if not (h, r) in lef:\n lef[(h, r)] = []\n if not (r, t) in rig:\n rig[(r, t)] = []\n lef[(h, r)].append(t)\n rig[(r, t)].append(h)\n if not r in rellef:\n rellef[r] = {}\n if not r in relrig:\n relrig[r] = {}\n rellef[r][h] = 1\n relrig[r][t] = 1\n\n ls = test.readlines()\n tot = len(ls) - 1\n # (int)(test.readline())\n for i in range(tot):\n content = ls[i + 1]\n # content = test.readline()\n h, t, r = content.strip().split()\n if not (h, r) in lef:\n lef[(h, r)] = []\n if not (r, t) in rig:\n rig[(r, t)] = []\n lef[(h, r)].append(t)\n rig[(r, t)].append(h)\n if not r in rellef:\n rellef[r] = {}\n if not r in relrig:\n relrig[r] = {}\n rellef[r][h] = 1\n relrig[r][t] = 1\n\n test.close()\n valid.close()\n triple.close()\n\n f = open(Path(output_path, \"type_constrain.txt\"), \"w\")\n f.write(\"%d\\n\" % (len(rellef)))\n for i in rellef:\n f.write(\"%s\\t%d\" % (i, len(rellef[i])))\n for j in rellef[i]:\n f.write(\"\\t%s\" % (j))\n f.write(\"\\n\")\n f.write(\"%s\\t%d\" % (i, len(relrig[i])))\n for j in relrig[i]:\n f.write(\"\\t%s\" % (j))\n f.write(\"\\n\")\n f.close()\n\n rellef = {}\n totlef = {}\n relrig = {}\n totrig = {}\n\n for i in lef:\n if not i[1] in rellef:\n rellef[i[1]] = 0\n totlef[i[1]] = 0\n rellef[i[1]] += len(lef[i])\n totlef[i[1]] += 1.0\n\n for i in rig:\n if not i[0] in relrig:\n relrig[i[0]] = 0\n totrig[i[0]] = 0\n relrig[i[0]] += len(rig[i])\n totrig[i[0]] += 1.0\n\n s11 = 0\n s1n = 0\n sn1 = 0\n snn = 0\n f = open(Path(output_path, \"test2id.txt\"), \"r\")\n ls = f.readlines()\n tot = len(ls) - 1\n # tot = (int)(f.readline())\n for i in range(tot):\n content = ls[i + 1]\n # content = f.readline()\n h, t, r = content.strip().split()\n rign = rellef[r] / totlef[r]\n lefn = relrig[r] / totrig[r]\n if rign <= 1.5 and lefn <= 1.5:\n s11 += 1\n if rign > 1.5 and lefn <= 1.5:\n s1n += 1\n if rign <= 1.5 and lefn > 1.5:\n sn1 += 1\n if rign > 1.5 and lefn > 1.5:\n snn += 1\n f.close()\n\n f = open(Path(output_path, \"test2id.txt\"), \"r\")\n f11 = open(Path(output_path, \"1-1.txt\"), \"w\")\n f1n = open(Path(output_path, \"1-n.txt\"), \"w\")\n fn1 = open(Path(output_path, \"n-1.txt\"), \"w\")\n fnn = open(Path(output_path, \"n-n.txt\"), \"w\")\n fall = open(Path(output_path,\"test2id_all.txt\"), \"w\")\n fall = open(Path(output_path,\"test2id_all.txt\"), \"w\")\n\n ls = f.readlines()\n tot = len(ls) - 1\n\n # tot = (int)(f.readline())\n fall.write(\"%d\\n\" % (tot))\n f11.write(\"%d\\n\" % (s11))\n f1n.write(\"%d\\n\" % (s1n))\n fn1.write(\"%d\\n\" % (sn1))\n fnn.write(\"%d\\n\" % (snn))\n for i in range(tot):\n content = ls[i + 1]\n # content = f.readline()\n h, t, r = content.strip().split()\n rign = rellef[r] / totlef[r]\n lefn = relrig[r] / totrig[r]\n if rign <= 1.5 and lefn <= 1.5:\n f11.write(content)\n fall.write(\"0\" + \"\\t\" + content)\n if rign > 1.5 and lefn <= 1.5:\n f1n.write(content)\n fall.write(\"1\" + \"\\t\" + content)\n if rign <= 1.5 and lefn > 1.5:\n fn1.write(content)\n fall.write(\"2\" + \"\\t\" + content)\n if rign > 1.5 and lefn > 1.5:\n fnn.write(content)\n fall.write(\"3\" + \"\\t\" + content)\n fall.close()\n f.close()\n f11.close()\n f1n.close()\n fn1.close()\n fnn.close()",
"def main():\n\tif len(sys.argv) < 12 or len(sys.argv) > 13:\n\t\tprint(\"Input parameters must be: 'filename lambda mu C c0 Q theta L H simulation_time is_debug repeats(optionally)'\")\n\telse:\n\t\tstart_time = time.time()\n\n\t\tfile_name = sys.argv[1]\n\t\tlambd = float(sys.argv[2])\n\t\tmu = float(sys.argv[3])\n\t\tC = int(sys.argv[4])\n\t\tc0 = int(sys.argv[5])\n\t\tQ = int(sys.argv[6])\n\t\ttheta = float(sys.argv[7])\n\t\tL = int(sys.argv[8])\n\t\tH = int(sys.argv[9])\n\t\tsimulation_time = float(sys.argv[10]);\n\t\tis_debug = True if sys.argv[11] == \"True\" else False;\n\t\trepeats = int(sys.argv[12]) if len(sys.argv) == 13 else 1;\n\n\t\tprint(\"Simulation started for params: lambda =\", lambd,\n\t\t\t \", mu =\", mu,\n\t\t\t \", C =\", C,\n\t\t\t \", c0 =\", c0,\n\t\t\t \", Q =\", Q,\n\t\t\t \", theta =\", theta,\n\t\t\t \", L =\", L,\n\t\t\t \", H =\", H,\n\t\t\t \", repeats =\", repeats)\n\n\t\tblocked = 0\n\t\tserved = 0\n\t\tgenerated = 0\n\t\tB = 0\n\t\tN = 0\n\n\t\tsimulation = Simulation(\"m/m/c[c0]/r[l,h]\", lambd, mu, theta, C, c0, L, H, simulation_time, Q, is_debug)\n\t\tfor i in range(0, repeats):\n\t\t\tsimulation = Simulation(\"m/m/c[c0]/r[l,h]\", lambd, mu, theta, C, c0, L, H, simulation_time, Q, is_debug)\n\t\t\tsimulation.start()\n\t\t\tblocked += simulation.queue.blocked\n\t\t\tserved += simulation.served_count\n\t\t\tgenerated += simulation.flow.generated_count\n\t\t\tB += simulation.queue.blocked/(simulation.served_count+simulation.queue.blocked)\n\t\t\tN += simulation.served_count/simulation_time\n\t\tend_time = time.time()\n\n\t\tblocked = blocked/repeats\n\t\tserved = served/repeats\n\t\tgenerated = generated/repeats\n\t\tB = B/repeats\n\t\tN = N/repeats\n\n\t\tprint( \"\")\n\t\tprint( \"Summary results:\")\n\t\tprint( \"blocked=\", blocked, \" served=\", served, \", generated=\", generated)\n\t\tprint(\"B = \", B)\n\t\tprint(\"N = \", N)\n\t\tprint(\"Execution time = %s seconds\" % (end_time - start_time))\n\t\tprint( \"... to be implemented more summary ...\")\n\n\t\t# write stats to file\n\t\tabs_path = os.path.abspath(__file__)\n\t\tpath = os.path.relpath('stats', abs_path)\n\t\tpath = os.path.join(path, file_name + '-(%s,%s,%s,%s,%s,%s,%s,%s).csv' % (lambd,mu,theta,C,c0,L,H,simulation_time))\n\n\t\toutfile=open(path,'w')\n\t\toutput = csv.writer(outfile, delimiter=';')\n\t\toutput.writerow(['Request ID','Queue', 'Arrival_Time','Queue_Arrival_time','Server_Arrival_time','alpha','beta'])\n\n\t\ti=0\n\t\tfor request in simulation.served_requests:\n\t\t\ti=i+1\n\t\t\toutrow=[]\n\t\t\toutrow.append(request.ID)\n\t\t\toutrow.append(request.queue_size_at_serving)\n\t\t\toutrow.append(request.arrival_time)\n\t\t\toutrow.append(request.queue_arrival_time)\n\t\t\toutrow.append(request.server_arrival_time)\n\t\t\toutrow.append(request.alpha)\n\t\t\toutrow.append(request.beta)\n\t\t\toutput.writerow(outrow)\n\t\toutfile.close()\n\n\t\treturn simulation",
"def main(fileName='default.out', missPenalty=30):\n numTracks = 0\n flag1 = False\n flag2 = False\n flag3 = False\n flag4 = False\n flag5 = False\n flag6 = False\n flag7 = False\n flag8 = False\n flag9 = False\n flag10 = False\n flag11 = False\n flag12 = False\n flag13 = False\n flag14 = False\n flag15 = False\n flag16 = False\n flag17 = False\n flag18 = False\n flag19 = False\n flag20 = False\n flag21 = False\n flag22 = False\n flag23 = False\n flag24 = False\n flag25 = False\n flag26 = False\n flag27 = False\n flag28 = False\n flag29 = False\n flag30 = False\n flag31 = False\n if len(sys.argv) > 1:\n fileName = sys.argv[1]\n else:\n print \">python process.py <output file> <track 1> <track 2> [<track 3>] [<track 4>] [<track 5>]\"\n return 0\n if len(sys.argv) > 2:\n fileNameIn1 = sys.argv[2]\n flag1 = True\n else:\n print \"Not enough input files to create tracks...\"\n print \">python process.py <output file> <track 1> <track 2> [<track 3>] [<track 4>] [<track 5>]\"\n return 0 \n if len(sys.argv) > 3:\n fileNameIn2 = sys.argv[3]\n numTracks = numTracks + 1\n flag2 = True\n else:\n print \"Only one input file present, just use it as your track :) ...\"\n return 0 \n if len(sys.argv) > 4:\n fileNameIn3 = sys.argv[4]\n numTracks = numTracks + 1\n flag3 = True\n\n if len(sys.argv) > 5:\n fileNameIn4 = sys.argv[5]\n numTracks = numTracks + 1\n flag4 = True\n\n if len(sys.argv) > 6:\n fileNameIn5 = sys.argv[6]\n numTracks = numTracks + 1\n flag5 = True \n\n if len(sys.argv) > 7:\n fileNameIn6 = sys.argv[7]\n numTracks = numTracks + 1\n flag6 = True\n\n if len(sys.argv) > 8:\n fileNameIn7 = sys.argv[8]\n numTracks = numTracks + 1\n flag7 = True \n\n if len(sys.argv) > 9:\n fileNameIn8 = sys.argv[9]\n numTracks = numTracks + 1\n flag8 = True\n\n if len(sys.argv) > 10:\n fileNameIn9 = sys.argv[10]\n numTracks = numTracks + 1\n flag9 = True \n\n if len(sys.argv) > 11:\n fileNameIn10 = sys.argv[11]\n numTracks = numTracks + 1\n flag10 = True \n\n if len(sys.argv) > 12:\n fileNameIn11 = sys.argv[12]\n numTracks = numTracks + 1\n flag11 = True\n\n if len(sys.argv) > 13:\n fileNameIn12 = sys.argv[13]\n numTracks = numTracks + 1\n flag12 = True \n \n if len(sys.argv) > 14:\n fileNameIn13 = sys.argv[14]\n numTracks = numTracks + 1\n flag13 = True\n\n if len(sys.argv) > 15:\n fileNameIn14 = sys.argv[15]\n numTracks = numTracks + 1\n flag14 = True\n\n if len(sys.argv) > 16:\n fileNameIn15 = sys.argv[16]\n numTracks = numTracks + 1\n flag15 = True\n\n if len(sys.argv) > 17:\n fileNameIn16 = sys.argv[17]\n numTracks = numTracks + 1\n flag16 = True\n\n if len(sys.argv) > 18:\n fileNameIn17 = sys.argv[18]\n numTracks = numTracks + 1\n flag17 = True\n\n if len(sys.argv) > 19:\n fileNameIn18 = sys.argv[19]\n numTracks = numTracks + 1\n flag18 = True \n\n if len(sys.argv) > 20:\n fileNameIn19 = sys.argv[20]\n numTracks = numTracks + 1\n flag19 = True \n\n if len(sys.argv) > 21:\n fileNameIn20 = sys.argv[21]\n numTracks = numTracks + 1\n flag20 = True \n\n if len(sys.argv) > 22:\n fileNameIn21 = sys.argv[22]\n numTracks = numTracks + 1\n flag21 = True \n\n if len(sys.argv) > 23:\n fileNameIn22 = sys.argv[23]\n numTracks = numTracks + 1\n flag22 = True \n\n if len(sys.argv) > 24:\n fileNameIn23 = sys.argv[24]\n numTracks = numTracks + 1\n flag23 = True \n\n if len(sys.argv) > 25:\n fileNameIn24 = sys.argv[25]\n numTracks = numTracks + 1\n flag24 = True\n\n if len(sys.argv) > 26:\n fileNameIn25 = sys.argv[26]\n numTracks = numTracks + 1\n flag25 = True\n\n if len(sys.argv) > 27:\n fileNameIn26 = sys.argv[27]\n numTracks = numTracks + 1\n flag26 = True\n\n if len(sys.argv) > 28:\n fileNameIn27 = sys.argv[28]\n numTracks = numTracks + 1\n flag27 = True\n\n if len(sys.argv) > 29:\n fileNameIn28 = sys.argv[29]\n numTracks = numTracks + 1\n flag28 = True\n\n if len(sys.argv) > 30:\n fileNameIn29 = sys.argv[30]\n numTracks = numTracks + 1\n flag29 = True\n\n if len(sys.argv) > 31:\n fileNameIn30 = sys.argv[31]\n numTracks = numTracks + 1\n flag30 = True\n\n if len(sys.argv) > 32:\n fileNameIn31 = sys.argv[32]\n numTracks = numTracks + 1\n flag31 = True\n\n node0 = ReadNucTable(fileNameIn1)\n numAttributes = len(node0[0])\n node1 = ReadNucTable(fileNameIn2)\n if (flag3):\n node2 = ReadNucTable(fileNameIn3)\n if (flag4):\n node3 = ReadNucTable(fileNameIn4)\n if (flag5):\n node4 = ReadNucTable(fileNameIn5)\n if (flag6):\n node5 = ReadNucTable(fileNameIn6)\n if (flag7):\n node6 = ReadNucTable(fileNameIn7)\n if (flag8):\n node7 = ReadNucTable(fileNameIn8)\n if (flag9):\n node8 = ReadNucTable(fileNameIn9)\n if (flag10):\n node9 = ReadNucTable(fileNameIn10)\n if (flag11):\n node10 = ReadNucTable(fileNameIn11)\n if (flag12):\n node11 = ReadNucTable(fileNameIn12)\n if (flag13):\n node12 = ReadNucTable(fileNameIn13)\n if (flag14):\n node13 = ReadNucTable(fileNameIn14)\n if (flag15):\n node14 = ReadNucTable(fileNameIn15)\n if (flag16):\n node15 = ReadNucTable(fileNameIn16)\n if (flag17):\n node16 = ReadNucTable(fileNameIn17) \n if (flag18):\n node17 = ReadNucTable(fileNameIn18) \n if (flag19):\n node18 = ReadNucTable(fileNameIn19)\n if (flag20):\n node19 = ReadNucTable(fileNameIn20) \n if (flag21):\n node20 = ReadNucTable(fileNameIn21) \n if (flag22):\n node21 = ReadNucTable(fileNameIn22)\n if (flag23):\n node22 = ReadNucTable(fileNameIn23)\n if (flag24):\n node23 = ReadNucTable(fileNameIn24)\n if (flag25):\n node24 = ReadNucTable(fileNameIn25)\n if (flag26):\n node25 = ReadNucTable(fileNameIn26)\n if (flag27):\n node26 = ReadNucTable(fileNameIn27)\n if (flag28):\n node27 = ReadNucTable(fileNameIn28)\n if (flag29):\n node28 = ReadNucTable(fileNameIn29)\n if (flag30):\n node29 = ReadNucTable(fileNameIn30)\n if (flag31):\n node30 = ReadNucTable(fileNameIn31)\n #modifications to add more layers go here\n \n##this part for building solution\n##NB!!! It requires gpsol installed (no check for this) \n \n layer0 = hg.layerOfNodes(node0)\n layer1 = hg.layerOfNodes(node1)\n if (flag3):\n layer2 = hg.layerOfNodes(node2)\n if (flag4):\n layer3 = hg.layerOfNodes(node3)\n if (flag5):\n layer4 = hg.layerOfNodes(node4)\n if (flag6):\n layer5 = hg.layerOfNodes(node5)\n if (flag7):\n layer6 = hg.layerOfNodes(node6)\n if (flag8):\n layer7 = hg.layerOfNodes(node7)\n if (flag9):\n layer8 = hg.layerOfNodes(node8)\n if (flag10):\n layer9 = hg.layerOfNodes(node9)\n if (flag11):\n layer10 = hg.layerOfNodes(node10)\n if (flag12):\n layer11 = hg.layerOfNodes(node11)\n if (flag13):\n layer12 = hg.layerOfNodes(node12)\n if (flag14):\n layer13 = hg.layerOfNodes(node13)\n if (flag15):\n layer14 = hg.layerOfNodes(node14)\n if (flag16):\n layer15 = hg.layerOfNodes(node15)\n if (flag17):\n layer16 = hg.layerOfNodes(node16)\n if (flag18):\n layer17 = hg.layerOfNodes(node17)\n if (flag19):\n layer18 = hg.layerOfNodes(node18)\n if (flag20):\n layer19 = hg.layerOfNodes(node19)\n if (flag21):\n layer20 = hg.layerOfNodes(node20)\n if (flag22):\n layer21 = hg.layerOfNodes(node21)\n if (flag23):\n layer22 = hg.layerOfNodes(node22)\n if (flag24):\n layer23 = hg.layerOfNodes(node23)\n if (flag25):\n layer24 = hg.layerOfNodes(node24)\n if (flag26):\n layer25 = hg.layerOfNodes(node25)\n if (flag27):\n layer26 = hg.layerOfNodes(node26)\n if (flag28):\n layer27 = hg.layerOfNodes(node27)\n if (flag29):\n layer28 = hg.layerOfNodes(node28)\n if (flag30):\n layer29 = hg.layerOfNodes(node29)\n if (flag31):\n layer30 = hg.layerOfNodes(node30)\n #modifications to add more layers go here\n print 'layers processed'\n graph = hg.hyperGraph(layer0)\n graph.missPenalty = missPenalty\n graph.okil = missPenalty*2\n print \"layer0 done\"\n graph.AddLayer(layer1)\n print \"layer1 done\"\n if (flag3):\n graph.AddLayer(layer2)\n print \"layer2 done\"\n if (flag4):\n graph.AddLayer(layer3)\n print \"layer3 done\"\n if (flag5):\n graph.AddLayer(layer4)\n print \"layer4 done\"\n if (flag6):\n graph.AddLayer(layer5)\n print \"layer5 done\"\n if (flag7):\n graph.AddLayer(layer6)\n print \"layer6 done\"\n if (flag8):\n graph.AddLayer(layer7)\n print \"layer7 done\"\n if (flag9):\n graph.AddLayer(layer8)\n print \"layer8 done\"\n if (flag10):\n graph.AddLayer(layer9)\n print \"layer9 done\"\n if (flag11):\n graph.AddLayer(layer10)\n print \"layer10 done\"\n if (flag12):\n graph.AddLayer(layer11)\n print \"layer11 done\"\n if (flag13):\n graph.AddLayer(layer12)\n print \"layer12 done\"\n if (flag14):\n graph.AddLayer(layer13)\n print \"layer13 done\"\n if (flag15):\n graph.AddLayer(layer14)\n print \"layer14 done\"\n if (flag16):\n graph.AddLayer(layer15)\n print \"layer15 done\"\n if (flag17):\n graph.AddLayer(layer16)\n print \"layer16 done\"\n if (flag18):\n graph.AddLayer(layer17)\n print \"layer17 done\"\n if (flag19):\n graph.AddLayer(layer18)\n print \"layer18 done\"\n if (flag20):\n graph.AddLayer(layer19)\n print \"layer19 done\"\n if (flag21):\n graph.AddLayer(layer20)\n print \"layer20 done\"\n if (flag22):\n graph.AddLayer(layer21)\n print \"layer21 done\"\n if (flag23):\n graph.AddLayer(layer22)\n print \"layer22 done\"\n if (flag24):\n graph.AddLayer(layer23)\n print \"layer23 done\"\n if (flag25):\n graph.AddLayer(layer24)\n print \"layer24 done\"\n if (flag26):\n graph.AddLayer(layer25)\n print \"layer25 done\"\n if (flag27):\n graph.AddLayer(layer26)\n print \"layer26 done\"\n if (flag28):\n graph.AddLayer(layer27)\n print \"layer27 done\"\n if (flag29):\n graph.AddLayer(layer28)\n print \"layer28 done\"\n if (flag30):\n graph.AddLayer(layer29)\n print \"layer29 done\"\n if (flag31):\n graph.AddLayer(layer30)\n print \"layer30 done\"\n #modifications to add more layers go here\n\n graph.EdgeCostComputation()\n print 'done building graph'\n hg.CPLEXprint(graph, fileName+'_tmp.lp')\n # run linear solver gpsol and parse it's output\n print 'start linear solver'\n os.system('./runLS.sh '+fileName+'_tmp.lp')\n print 'linear solution done'\n lpSol = lp.ReadColumn(fileName+'_tmp.csv')\n print 'solution read'\n os.system('rm '+fileName+'_tmp.lp')\n os.system('rm '+fileName+'_tmp.csv')\n os.system('rm '+fileName+'_tmp.sol')\n table = graph.GetTrackStat(lpSol, numAttributes)\n print table\n np.savetxt(fileName,\n table, delimiter='\\t', fmt='%.2f')\n print \"Finally Done\"\n return 1",
"def main(args):\n input_file = args[1]\n output_occupations = args[2]\n output_states = args[3]\n\n print(\"Analyzing input file:\")\n summary = process_data.Summary(input_file)\n print(\"Reading input data\")\n summary.read_file()\n\n print(\"Computing summaries\")\n occupations = summary.get_results(input_format.Concept.SOC_NAME)\n states = summary.get_results(input_format.Concept.WORK_STATE)\n\n print(\"Writing results\")\n occupations.to_file(output_occupations)\n states.to_file(output_states)",
"def run(argv=sys.argv[1:]):\n clparser = argparse.ArgumentParser(description='Determine whether there' +\n ' are traces of helium in a given spectrum.')\n clparser.add_argument('-v', '--version', action='version',\n version='%(prog)s ' + __version__)\n clparser.add_argument('-a', '--plot-all', action='store_true',\n help='draw plot showing all the lines found in spectrum')\n clparser.add_argument('-p', '--plot', action='store_true',\n help='draw plot showing helium lines in spectrum')\n clparser.add_argument('filenames', nargs='+',\n help='spectrum files to process')\n clparser.add_argument('--verbose', action='store_true',\n help='verbose output (prints lines and signal to noise ratio)')\n clparser.add_argument('-t', '--threshold', nargs='?', type=float,\n const=1.0, default=1.0,\n help='a signal raises that many times above the background noise')\n args = clparser.parse_args(argv)\n\n for fname in args.filenames:\n find_helium(fname, plot=args.plot, plot_all=args.plot_all,\n verbose=args.verbose, threshold=args.threshold)",
"def read_examples(input_file, output_mode = 'classification'):\n examples = []\n labels = []\n toxicity = []\n weights = []\n unique_id = 0\n\n # Comments with the following indentities will have a higher wright in the loss\n identity_columns = [\n 'male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish',\n 'muslim', 'black', 'white', 'psychiatric_or_mental_illness']\n with open(input_file, \"r\", encoding='utf-8') as reader:\n csv_reader = csv.reader(reader, delimiter=',')\n for _i, line in enumerate(csv_reader):\n\n if _i == 0:\n # Get headers and look for identity columnns\n headers = list(line)\n # Stores its positions for futher use\n interesting_positions = [headers.index(interest_identity) for interest_identity in identity_columns]\n\n else:\n # Get toxicity ground truth\n target = float(line[1])\n if target >= 0.5:\n \tlabel = \"Toxic\"\n else:\n \tlabel = \"OK\"\n text = line[2]\n text_a = None\n text_b = None\n m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", text)\n if m is None:\n text_a = text\n else:\n print(text_a, text_b)\n text_a = m.group(1)\n text_b = m.group(2)\n # store class or float toxicity depending on mode\n if output_mode != 'classification':\n label = target\n\n # Calculate weight\n weight = 0.25\n # Subgroup:\n\n weight+= 0.25*(sum([float(line[interest])>=0.5 for interest in interesting_positions if line[interest] !=''])>=1)\n # Background Positive, Subgroup Negative\n weight+=0.25*((target>=0.5)*sum([float(line[interest])<0.5 for interest in interesting_positions if line[interest] !=''])>=1)\n # Background Negative, Subgroup Positive\n weight+= 0.25*((target<0.5)*sum([float(line[interest])>=0.5 for interest in interesting_positions if line[interest] !=''])>=1)\n # Original implementation\n '''\n # Overall\n weights = np.ones((len(x_train),)) / 4\n # Subgroup\n weights += (train[identity_columns].fillna(0).values>=0.5).sum(axis=1).astype(bool).astype(np.int) / 4\n # Background Positive, Subgroup Negative\n weights += (( (train['target'].values>=0.5).astype(bool).astype(np.int) +\n (train[identity_columns].fillna(0).values<0.5).sum(axis=1).astype(bool).astype(np.int) ) > 1 ).astype(bool).astype(np.int) / 4\n # Background Negative, Subgroup Positive\n weights += (( (train['target'].values<0.5).astype(bool).astype(np.int) +\n (train[identity_columns].fillna(0).values>=0.5).sum(axis=1).astype(bool).astype(np.int) ) > 1 ).astype(bool).astype(np.int) / 4\n \n '''\n examples.append(\n InputExample(guid=unique_id, text_a=text_a, text_b=text_b, label = label, weight = weight))\n labels.append(label)\n toxicity.append(target)\n unique_id += 1\n\n\n\n return examples, labels, toxicity",
"def main(input_filepath, output_model_filepath):\n logger = logging.getLogger(__name__)\n logger.info('training hotel cluster embeddings models')\n\n input_file = os.path.join(input_filepath, 'sentences.pkl')\n output_model_file = os.path.join(output_model_filepath, 'hotelcluster2vec.bin')\n\n train(input_file, output_model_file)",
"def main(argv):\n logging.basicConfig(format=\"%(message)s\", level=logging.INFO, stream=sys.stdout)\n logger = logging.getLogger(\"demo4\")\n\n # Define some parameters we'll use below and make directories if needed.\n cat_file_name = os.path.join('input','galsim_default_input.asc')\n if not os.path.isdir('output'):\n os.mkdir('output')\n multi_file_name = os.path.join('output','multi.fits')\n\n random_seed = galsim.BaseDeviate(8241573).raw()\n sky_level = 1.e6 # ADU / arcsec^2\n pixel_scale = 1.0 # arcsec / pixel (size units in input catalog are pixels)\n gal_flux = 1.e6 # arbitrary choice, makes nice (not too) noisy images\n gal_g1 = -0.009 #\n gal_g2 = 0.011 #\n\n # the fraction of flux in each component\n # 40% is in the bulge, 60% in a disk. 70% of that disk light is placed\n # into point sources distributed as a random walk\n\n bulge_frac = 0.4\n disk_frac = 0.6\n knot_frac = 0.42\n smooth_disk_frac = 0.18\n\n # number of knots of star formation. To simulate a nice irregular (all the\n # flux is in knots) we find ~100 is a minimum number needed, but we will\n # just use 10 here to make the demo run fast.\n\n n_knots = 10\n\n xsize = 64 # pixels\n ysize = 64 # pixels\n\n logger.info('Starting demo script 4 using:')\n logger.info(' - parameters taken from catalog %r',cat_file_name)\n logger.info(' - Moffat PSF (parameters from catalog)')\n logger.info(' - pixel scale = %.2f',pixel_scale)\n logger.info(' - Bulge + Disc galaxies (parameters from catalog)')\n logger.info(' - 100 Point sources, distributed as random walk')\n logger.info(' - Applied gravitational shear = (%.3f,%.3f)',gal_g1,gal_g2)\n logger.info(' - Poisson noise (sky level = %.1e).', sky_level)\n\n # Read in the input catalog\n cat = galsim.Catalog(cat_file_name)\n\n\n # save a list of the galaxy images in the \"images\" list variable:\n images = []\n for k in range(cat.nobjects):\n # Initialize the (pseudo-)random number generator that we will be using below.\n # Use a different random seed for each object to get different noise realizations.\n # Using sequential random seeds here is safer than it sounds. We use Mersenne Twister\n # random number generators that are designed to be used with this kind of seeding.\n # However, to be extra safe, we actually initialize one random number generator with this\n # seed, generate and throw away two random values with that, and then use the next value\n # to seed a completely different Mersenne Twister RNG. The result is that successive\n # RNGs created this way produce very independent random number streams.\n rng = galsim.BaseDeviate(random_seed+k+1)\n\n # Take the Moffat beta from the first column (called 0) of the input catalog:\n # Note: cat.get(k,col) returns a string. To get the value as a float, use either\n # cat.getFloat(k,col) or float(cat.get(k,col))\n beta = cat.getFloat(k,0)\n # A Moffat's size may be either scale_radius, fwhm, or half_light_radius.\n # Here we use fwhm, taking from the catalog as well.\n fwhm = cat.getFloat(k,1)\n # A Moffat profile may be truncated if desired\n # The units for this are expected to be arcsec (or specifically -- whatever units\n # you are using for all the size values as defined by the pixel_scale).\n trunc = cat.getFloat(k,4)\n # Note: You may omit the flux, since the default is flux=1.\n psf = galsim.Moffat(beta=beta, fwhm=fwhm, trunc=trunc)\n\n # Take the (e1, e2) shape parameters from the catalog as well.\n psf = psf.shear(e1=cat.getFloat(k,2), e2=cat.getFloat(k,3))\n\n # Galaxy is a bulge + disk(+knots) with parameters taken from the catalog:\n\n # put some fraction of the disk light into knots of star formation\n\n disk_hlr = cat.getFloat(k,5)\n disk_e1 = cat.getFloat(k,6)\n disk_e2 = cat.getFloat(k,7)\n bulge_hlr = cat.getFloat(k,8)\n bulge_e1 = cat.getFloat(k,9)\n bulge_e2 = cat.getFloat(k,10)\n\n smooth_disk = galsim.Exponential(flux=smooth_disk_frac, half_light_radius=disk_hlr)\n\n knots = galsim.RandomKnots(n_knots, half_light_radius=disk_hlr, flux=knot_frac, rng=rng)\n\n disk = galsim.Add([smooth_disk, knots])\n disk = disk.shear(e1=disk_e1, e2=disk_e2)\n\n # the rest of the light goes into the bulge\n bulge = galsim.DeVaucouleurs(flux=bulge_frac, half_light_radius=bulge_hlr)\n bulge = bulge.shear(e1=bulge_e1, e2=bulge_e2)\n\n # The flux of an Add object is the sum of the component fluxes.\n # Note that in demo3.py, a similar addition was performed by the binary operator \"+\".\n gal = galsim.Add([disk, bulge])\n\n # This flux may be overridden by withFlux. The relative fluxes of the components\n # remains the same, but the total flux is set to gal_flux.\n gal = gal.withFlux(gal_flux)\n gal = gal.shear(g1=gal_g1, g2=gal_g2)\n\n # The center of the object is normally placed at the center of the postage stamp image.\n # You can change that with shift:\n gal = gal.shift(dx=cat.getFloat(k,11), dy=cat.getFloat(k,12))\n\n final = galsim.Convolve([psf, gal])\n\n # Draw the profile\n image = galsim.ImageF(xsize, ysize)\n final.drawImage(image, scale=pixel_scale)\n\n # Add Poisson noise to the image:\n image.addNoise(galsim.PoissonNoise(rng, sky_level * pixel_scale**2))\n\n logger.info('Drew image for object at row %d in the input catalog'%k)\n \n # Add the image to our list of images\n images.append(image)\n \n # Now write the images to a multi-extension fits file. Each image will be in its own HDU.\n galsim.fits.writeMulti(images, multi_file_name)\n logger.info('Images written to multi-extension fits file %r',multi_file_name)",
"def final_fmllr_est_func(\n log_path: str,\n dictionaries: List[str],\n feature_strings: Dict[str, str],\n model_path: str,\n fmllr_options: ConfigDict,\n trans_paths: Dict[str, str],\n spk2utt_paths: Dict[str, str],\n tmp_lat_paths: Dict[str, str],\n) -> None:\n with open(log_path, \"w\", encoding=\"utf8\") as log_file:\n for dict_name in dictionaries:\n feature_string = feature_strings[dict_name]\n trans_path = trans_paths[dict_name]\n temp_trans_path = trans_path + \".temp\"\n spk2utt_path = spk2utt_paths[dict_name]\n tmp_lat_path = tmp_lat_paths[dict_name]\n determinize_proc = subprocess.Popen(\n [\n thirdparty_binary(\"lattice-determinize-pruned\"),\n f\"--acoustic-scale={fmllr_options['acoustic_scale']}\",\n \"--beam=4.0\",\n f\"ark:{tmp_lat_path}\",\n \"ark:-\",\n ],\n stderr=log_file,\n stdout=subprocess.PIPE,\n env=os.environ,\n )\n\n latt_post_proc = subprocess.Popen(\n [\n thirdparty_binary(\"lattice-to-post\"),\n f\"--acoustic-scale={fmllr_options['acoustic_scale']}\",\n \"ark:-\",\n \"ark:-\",\n ],\n stdin=determinize_proc.stdout,\n stdout=subprocess.PIPE,\n stderr=log_file,\n env=os.environ,\n )\n weight_silence_proc = subprocess.Popen(\n [\n thirdparty_binary(\"weight-silence-post\"),\n f\"{fmllr_options['silence_weight']}\",\n fmllr_options[\"sil_phones\"],\n model_path,\n \"ark:-\",\n \"ark:-\",\n ],\n stdin=latt_post_proc.stdout,\n stdout=subprocess.PIPE,\n stderr=log_file,\n env=os.environ,\n )\n fmllr_proc = subprocess.Popen(\n [\n thirdparty_binary(\"gmm-est-fmllr\"),\n f\"--fmllr-update-type={fmllr_options['fmllr_update_type']}\",\n f\"--spk2utt=ark:{spk2utt_path}\",\n model_path,\n feature_string,\n \"ark,s,cs:-\",\n \"ark:-\",\n ],\n stdin=weight_silence_proc.stdout,\n stdout=subprocess.PIPE,\n stderr=log_file,\n env=os.environ,\n )\n\n compose_proc = subprocess.Popen(\n [\n thirdparty_binary(\"compose-transforms\"),\n \"--b-is-affine=true\",\n \"ark:-\",\n f\"ark:{trans_path}\",\n f\"ark:{temp_trans_path}\",\n ],\n stderr=log_file,\n stdin=fmllr_proc.stdout,\n env=os.environ,\n )\n compose_proc.communicate()\n os.remove(trans_path)\n os.rename(temp_trans_path, trans_path)",
"def main():\n # set up the program to take in arguments from the command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"xTrain\",\n help=\"filename for features of the training data\")\n parser.add_argument(\"yTrain\",\n help=\"filename for labels associated with training data\")\n parser.add_argument(\"xTest\",\n help=\"filename for features of the test data\")\n parser.add_argument(\"yTest\",\n help=\"filename for labels associated with the test data\")\n parser.add_argument(\"lr\", type=float, help=\"learning rate\")\n parser.add_argument(\"bs\", type=int, help=\"batch size\")\n parser.add_argument(\"epoch\", type=int, help=\"max number of epochs\")\n parser.add_argument(\"--seed\", default=334, \n type=int, help=\"default seed number\")\n\n args = parser.parse_args()\n # load the train and test data\n xTrain = file_to_numpy(args.xTrain)\n yTrain = file_to_numpy(args.yTrain)\n xTest = file_to_numpy(args.xTest)\n yTest = file_to_numpy(args.yTest)\n\n # setting the seed for deterministic behavior\n np.random.seed(args.seed) \n model = SgdLR(args.lr, args.bs, args.epoch)\n trainStats = model.train_predict(xTrain, yTrain, xTest, yTest)\n print(trainStats)",
"def main(input_filepath: str = \"./data\",\n output_filepath: str = \"./data\") -> None:\n logger = logging.getLogger(__name__)\n logger.info(\"making final data set from raw data\")\n\n raw_data_dir = path.abspath(input_filepath)\n if path.isdir(raw_data_dir):\n\n processed_data_dir = path.abspath(output_filepath)\n\n logger.info(\"start\")\n filenames = [\"train.txt\", \"valid.txt\", \"test.txt\"]\n create_index(filenames, raw_data_dir, processed_data_dir)\n prepare_datasets(filenames, raw_data_dir, processed_data_dir)\n\n else:\n logger.info(\"File or directory does not exist\")\n\n logger.info(\"finished\")",
"def run_from_file(f):\n #set defaults\n x_loops=1;max_steps=0;display_on=True;max_fps=10;garden_size=13;tako_number=20\n pop_max=40;max_width=1800;max_height=900;collect_data=True;export_all=False\n rand_nets=False;max_gen=0;genetic_mode=\"Plain\";learning_on=False\n seeds=None;garden_mode=\"Diverse Static\";family_detection=None;family_mod=0\n record_inbreeding=True;inbreed_lim=1.1;filename=\"default file\"\n hla_genes=0;binary_health=0;carrier_percentage=40;two_envs=False\n diff_envs=False;migration_rate=0;phen_pref=False\n\n \n atr_dict = {\"x_loops\": x_loops, \"max_steps\": max_steps,\n \"display_on\": display_on, \"max_fps\": max_fps,\n \"garden_size\": garden_size,\n \"tako_number\": tako_number, \"pop_max\": pop_max,\n \"max_width\": max_width, \"max_height\": max_height,\n \"collect_data\": collect_data, \"export_all\": export_all,\n \"rand_nets\": rand_nets, \"max_gen\": max_gen,\n \"genetic_mode\": genetic_mode, \"learning_on\": learning_on,\n \"seeds\": seeds, \"garden_mode\": garden_mode,\n \"family_detection\": family_detection, \"family_mod\": family_mod,\n \"record_inbreeding\": record_inbreeding,\n \"inbreed_lim\": inbreed_lim, \"filename\": filename,\n \"hla_genes\": hla_genes, \"binary_health\": binary_health,\n \"carrier_percentage\": carrier_percentage,\n \"two_envs\": two_envs, \"diff_envs\": diff_envs,\n \"migration_rate\": migration_rate, \"phen_pref\": phen_pref}\n \n ints = [\"x_loops\", \"max_steps\", \"garden_size\", \"tako_number\", \"pop_max\",\n \"max_width\", \"max_height\", \"max_gen\", \"hla_genes\",\n \"binary_health\", \"carrier_percentage\", \"max_fps\"]\n floats = [\"family_mod\", \"inbreed_lim\", \"migration_rate\"]\n strs = [\"genetic_mode\", \"garden_mode\", \"filename\"]\n bools = [\"display_on\", \"collect_data\", \"export_all\", \"rand_nets\",\n \"learning_on\", \"record_inbreeding\", \"two_envs\", \"diff_envs\",\n \"phen_pref\"]\n\n #then sets all user-defined settings from the file f\n with open(f) as exp_file:\n for line in exp_file:\n #comments\n if line[0] == \"#\":\n pass\n #blank line = run what we have, then continue\n #to read the file for a new set of parameters\n elif line == \"\\n\":\n run_experiment(atr_dict[\"x_loops\"], atr_dict[\"max_steps\"],\n atr_dict[\"display_on\"], atr_dict[\"max_fps\"],\n atr_dict[\"garden_size\"],\n atr_dict[\"tako_number\"], atr_dict[\"pop_max\"],\n atr_dict[\"max_width\"], atr_dict[\"max_height\"],\n atr_dict[\"collect_data\"], atr_dict[\"export_all\"],\n atr_dict[\"rand_nets\"], atr_dict[\"max_gen\"],\n atr_dict[\"genetic_mode\"],\n atr_dict[\"learning_on\"],\n atr_dict[\"seeds\"], atr_dict[\"garden_mode\"],\n atr_dict[\"family_detection\"],\n atr_dict[\"family_mod\"],\n atr_dict[\"record_inbreeding\"],\n atr_dict[\"inbreed_lim\"],\n atr_dict[\"hla_genes\"], atr_dict[\"binary_health\"],\n atr_dict[\"carrier_percentage\"],\n atr_dict[\"filename\"],\n atr_dict[\"two_envs\"],\n atr_dict[\"diff_envs\"],\n atr_dict[\"migration_rate\"],\n atr_dict[\"phen_pref\"])\n #reset defaults\n atr_dict = {\"x_loops\": x_loops, \"max_steps\": max_steps,\n \"display_on\": display_on, \"max_fps\": max_fps,\n \"garden_size\": garden_size,\n \"tako_number\": tako_number, \"pop_max\": pop_max,\n \"max_width\": max_width, \"max_height\": max_height,\n \"collect_data\": collect_data, \"export_all\": export_all,\n \"rand_nets\": rand_nets, \"max_gen\": max_gen,\n \"genetic_mode\": genetic_mode, \"learning_on\": learning_on,\n \"seeds\": seeds, \"garden_mode\": garden_mode,\n \"family_detection\": family_detection,\n \"family_mod\": family_mod,\n \"record_inbreeding\": record_inbreeding,\n \"inbreed_lim\": inbreed_lim, \"filename\": filename,\n \"hla_genes\": hla_genes, \"binary_health\": binary_health,\n \"carrier_percentage\": carrier_percentage,\n \"two_envs\": two_envs, \"diff_envs\": diff_envs,\n \"migration_rate\": migration_rate, \"phen_pref\": phen_pref}\n else:\n #get rid of newline character\n line = line[:-1]\n line = line.split(\": \")\n if line[0] in ints:\n val = int(line[1])\n elif line[0] in floats:\n val = float(line[1])\n elif line[0] in bools:\n val = True if line[1] == \"True\" else False\n elif line[0] in strs:\n val = line[1]\n elif line[0] == \"family_detection\":\n if line[1] == \"None\":\n val = None\n else:\n val = line[1]\n elif line[0] == \"seeds\":\n val = line[1].split(\" \")\n atr_dict[line[0]] = val\n #run the last one in the file\n run_experiment(atr_dict[\"x_loops\"], atr_dict[\"max_steps\"],\n atr_dict[\"display_on\"], atr_dict[\"max_fps\"],\n atr_dict[\"garden_size\"],\n atr_dict[\"tako_number\"], atr_dict[\"pop_max\"],\n atr_dict[\"max_width\"], atr_dict[\"max_height\"],\n atr_dict[\"collect_data\"], atr_dict[\"export_all\"],\n atr_dict[\"rand_nets\"], atr_dict[\"max_gen\"],\n atr_dict[\"genetic_mode\"],\n atr_dict[\"learning_on\"],\n atr_dict[\"seeds\"], atr_dict[\"garden_mode\"],\n atr_dict[\"family_detection\"],\n atr_dict[\"family_mod\"],\n atr_dict[\"record_inbreeding\"],\n atr_dict[\"inbreed_lim\"], atr_dict[\"hla_genes\"],\n atr_dict[\"binary_health\"], atr_dict[\"carrier_percentage\"],\n atr_dict[\"two_envs\"], atr_dict[\"diff_envs\"],\n atr_dict[\"migration_rate\"], atr_dict[\"phen_pref\"],\n atr_dict[\"filename\"])",
"def evaluate_hmdb51_fusion():\n vlen = 0\n ob_suffix = '-max.feat.npy.gz'\n fv_suffix = '_fv.npy.gz'\n ob_root = '/home/syq/research_final/data/features/ob_hmdb51_pooled_python/'\n fv_root = '/home/syq/research_final/data/dense-traj/fv_hmdb51_python/'\n hmdb_splits = 'testTrainMulti_7030_splits/'\n categories = os.listdir(fv_root)\n weight = 1.0\n weights = [i / 20.0 for i in range(21)]\n acc_to_weights = {}\n\n for weight in weights:\n print \"Weight: %.2f\" % weight\n accs = np.zeros(3)\n for splitnum in range(1,4):\n ts = time.time()\n trainfiles, testfiles = hmdb51_splits.loadsplit(categories,\n hmdb_splits,\n splitnum)\n print 'Have %d train files' % len(trainfiles)\n print 'Have %d test files' % len(testfiles)\n\n if not vlen:\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(trainfiles[0][0][:-4],\n ob_suffix)),\"rb\")\n vlen_ob = len(np.load(fp))\n fp.close()\n print \"OB vector length is %d\" % vlen_ob\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(trainfiles[0][0][:-4],\n fv_suffix)),\"rb\")\n vlen_fv = len(np.load(fp))\n fp.close()\n print \"IDTFV vector length is %d\" % vlen_fv\n\n Dtrain_ob = np.zeros( (len(trainfiles),vlen_ob), np.float32 )\n Dtrain_fv = np.zeros( (len(trainfiles),vlen_fv), np.float32 )\n\n Ytrain = np.ones ( (len(trainfiles) )) * -1000\n\n for fi,f in enumerate(trainfiles):\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(f[0][:-4],\n ob_suffix)),\"rb\")\n Dtrain_ob[fi][:] = np.load(fp)\n fp.close()\n Ytrain[fi] = f[1]\n\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(f[0][:-4],\n fv_suffix)),\"rb\")\n Dtrain_fv[fi][:] = np.load(fp)\n fp.close()\n\n Dtest_ob = np.zeros( (len(testfiles),vlen_ob), np.float32 )\n Dtest_fv = np.zeros( (len(testfiles),vlen_fv), np.float32 )\n\n Ytest = np.ones ( (len(testfiles) )) * -1000\n\n for fi,f in enumerate(testfiles):\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(f[0][:-4],\n ob_suffix)),\"rb\")\n Dtest_ob[fi][:] = np.load(fp)\n fp.close()\n Ytest[fi] = f[1]\n\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(f[0][:-4],\n fv_suffix)),\"rb\")\n Dtest_fv[fi][:] = np.load(fp)\n fp.close()\n\n \"\"\"\n Early fusion\n Dtrain = np.hstack((Dtrain_ob, Dtrain_fv))\n Dtest = np.hstack((Dtest_ob, Dtest_fv))\n\n clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n acc = clf.fit(Dtrain, Ytrain).score(Dtest, Ytest)\n \"\"\"\n fv_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n\n ob_clf = OneVsRestClassifier(estimator=SVC(C=10,\n cache_size=1000,\n kernel='linear',\n probability=True),\n n_jobs=-1)\n\n # Get probabilities for late fusion\n Dtrain_fv = fv_clf.fit(Dtrain_fv, Ytrain).decision_function(Dtrain_fv)\n Dtrain_ob = ob_clf.fit(Dtrain_ob, Ytrain).decision_function(Dtrain_ob)\n Dtest_fv = fv_clf.decision_function(Dtest_fv)\n Dtest_ob = ob_clf.decision_function(Dtest_ob)\n\n # Scale decision values b/w 0 and 1\n Dtrain_fv = preprocessing.normalize(Dtrain_fv)\n Dtrain_ob = preprocessing.normalize(Dtrain_ob)\n Dtest_fv = preprocessing.normalize(Dtest_fv)\n Dtest_ob = preprocessing.normalize(Dtest_ob)\n\n # Late fusion\n scores_train = (Dtrain_fv * weight) + (Dtrain_ob * (1 - weight))\n latefusion_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=-1)\n latefusion_clf.fit(scores_train, Ytrain)\n\n scores_test = (Dtest_fv * weight) + (Dtest_ob * (1 - weight))\n acc = latefusion_clf.score(scores_test, Ytest)\n print 'Fold', splitnum, 'late fusion acc', acc\n print \"Train & testing time %.3f\" % (time.time() - ts)\n accs[splitnum-1] = acc\n acc_to_weights[weight] = accs\n\n print \"Mean accuracy: %.3f\" % accs.mean()\n with open(\"hmdb51_weight_gridsearch.txt\", \"w\") as f:\n for weight, accs in acc_to_weights.items():\n f.write(str(weight) + str(accs) + '\\n')\n return acc_to_weights\n\n \"\"\"\n with open('fv_hmdb51_accs.txt', 'w') as f:\n f.write(\"%s\\nMean:%.3f\" % (str(accs), np.mean(accs)))\n \"\"\"",
"def main():\n # Set hyperparameters: batch size, learning rate, hidden layers, activ. fn\n bs = 64\n epochs = 1000\n lr = 10 ** (-5)\n h_layers = [32, 16]\n a_fn = F.relu\n\n # Construct Dataset from file; form DataLoaders\n train_ds, valid_ds = form_datasets(DATA_PATH / SAMPLE_FILE)\n train_dl, valid_dl = form_dataloaders(train_ds, valid_ds, bs, preprocess)\n\n # Gather target inverse scaler fn\n t_inv_scaler = train_ds.target_scaler[\"stargazers\"]\n\n # Intialize model (w/ GPU support), optimization method, and loss function\n model = dff.DFF(D_in=21, D_hid=h_layers, D_out=1, a_fn=a_fn)\n model.to(DEV)\n opt = optim.Adam(model.parameters(), lr=lr)\n loss_func = F.mse_loss\n fit_args = (model, loss_func, opt, train_dl, valid_dl, t_inv_scaler)\n\n # Generate descriptive filename string for csv logs\n prefix = \"FINAL_\"\n model_str = dff.hyper_str(h_layers, lr, opt, a_fn, bs, epochs, prefix)\n print(model_str)\n\n # Train, validate, and store loss\n dff.fit(epochs, *fit_args, LOG_PATH, model_str)",
"def lfq_parser_2x():\n from tools import file_importer, file_outporter\n # from random import random\n from math import log10\n \n print(\"this is lfq parser_2x\")\n \n relPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n outPath = \"bob/processed/OST-24-05-2017_combined_no0_2.csv\"\n inpF = file_importer(relPath)\n outF = file_outporter(outPath) \n headerFlag = True\n rowCount = 0\n for inpLine in inpF:\n if headerFlag: \n headerFlag = False\n lfqColCount = 0\n inpList = inpLine.split(\"\\t\")\n for headerI in inpList:\n if \"LFQ intensity\" in headerI:\n break\n else: lfqColCount += 1\n outF.write(\"ID,Protein ID, Gene name,\") # write header into output file\n for headI in inpList[lfqColCount].split(\"|\"):\n outF.write(headI + \",\")\n for headI in inpList[lfqColCount + 1].split(\"|\")[:-1]:\n outF.write(headI + \",\")\n outF.write(inpList[lfqColCount + 1].split(\"|\")[-1] + \"\\n\")\n rowCount += 1\n continue\n \n inpLine = inpLine.strip()\n inpItems = inpLine.split(\"\\t\") \n inpLFQ = inpItems[lfqColCount].split(\"|\") + inpItems[lfqColCount + 1].split(\"|\") # get lfq intensity scores\n \n procLFQ = []\n for lfqi in inpLFQ:\n if lfqi == \"_\": procLFQ.append(0)\n else: procLFQ.append(int(lfqi))\n if sum(procLFQ[:3])<=sum(procLFQ[3:]): continue # get rid of contaminants in control sample\n \n sumOne = inpLFQ[1] + inpLFQ[2]\n sumTwo = inpLFQ[1] + inpLFQ[3]\n sumThree = inpLFQ[2] + inpLFQ[3]\n \n if sumOne == \"__\" or sumTwo == \"__\" or sumThree == \"__\": continue # test if protein is being detected in at least 2 OST samples\n \n outF.write(str(rowCount) + \",\")\n \n\n inpName = max(inpItems[0].split(\"|\"), key=len) # get unique protein ID\n inpGeneName = max(inpItems[6].split(\"|\"), key=len) # and gene name\n outF.write(inpName + \",\" + inpGeneName + \",\")\n \n inpLFQ = inpItems[lfqColCount].split(\"|\") + inpItems[lfqColCount + 1].split(\"|\") \n # print inpLFQ\n for lfqI in inpLFQ[:-1]: # write lfq values\n if lfqI == \"_\" or lfqI == \"0\":\n outF.write(str(0) + \",\") ################## try with log2 values this time\n else:\n try:\n outF.write(str(round(log10(int(lfqI)))) + \",\")\n except ValueError:\n print(inpItems)\n raise\n \n if inpLFQ[-1] == \"_\" or inpLFQ[-1] == \"0\": outF.write(str(0) + \"\\n\")\n else: outF.write(str(round(log10(int(inpLFQ[-1])))) + \"\\n\")\n \n \n rowCount += 1\n\n print(\"lfq parser 2x finished successfully\")",
"def __init__(self, input_file):\r\n self.input_file = input_file\r\n self.no_process = 0\r\n self.ids = []\r\n self.weights = []",
"def readOutputfile(filename, verbose=False):\n\n # -----------------------------------------------------------------------------\n # Defining the classes for data structure\n T_Simulation = namedtuple('Simulation', ['step'])\n T_Step = namedtuple('Step', ['element', 'node'])\n\n T_Displacement = namedtuple('Displacement', ['ux', 'uy'])\n\n T_Element = namedtuple('Element', ['gp', 'avstrain', 'avstress', 'eqstrain'])\n T_GP = namedtuple('GP', ['stress', 'strain'])\n T_Stresses = namedtuple('Stresses', ['xx', 'yy', 'zz', 'yz', 'zx', 'xy'])\n T_Strains = namedtuple('Strains', ['xx', 'yy', 'zz', 'yz', 'zx', 'xy'])\n # -----------------------------------------------------------------------------\n\n nSteps = 0 # Simulation step counter\n\n SimData = T_Simulation(list())\n\n with open(filename) as f:\n line = f.readline() # Read in the first line of the input file\n while True: # Loop over all lines of the input file\n # Read the nodes displacements\n #line = f.readline()\n #print(line)\n if line == 'DofManager output:\\n': # String starts a list of nodes displacement information\n nSteps += 1 # The above string starts a new simulation step\n line = f.readline() # Cancel ---------- seperator\n line = f.readline()\n Nodes = list() # Initialize/clear list of nodes\n\n while line != '\\n' and line != 'Element output:\\n': # Strings that finish the list\n #\t\t\t\tnNode = int(line.strip().split()[1]) # Node id\n line = f.readline()\n dim1 = float(line.strip().split()[3]) # Displacement dim1\n line = f.readline()\n dim2 = float(line.strip().split()[3]) # Displacement dim2\n Nodes.append(\n T_Displacement(dim1, dim2)) # Append displacements of the current node to the node list\n line = f.readline()\n\n\n if verbose:\n print('Step {}: Dofs completed.\\n'.format(nSteps))\n print('---------------------------------\\n')\n\n # Read the stresses an strains at Gauss points\n elif line == 'Element output:\\n': # String starts a list elements, GPs, strains and stresses\n line = f.readline() # Cancel ---------- seperator\n line = f.readline()\n Elements = list() # Initialize/clear list of elements\n\n while line != '\\n' and line != '\\tR E A C T I O N S O U T P U T:\\n': # Strings that finish the list\n #\t\t\t\t\tnElement = line.strip().split()[2] # Element id\n line = f.readline()\n GPs = T_Element(list(), 0, 0, 0) # List of Gauss points\n\n while line != '\\n' and line.strip().split()[0] == 'GP': # String that starts a new GP\n #\t\t\t\t\t\tnGP = int(line.strip().split()[1].split('.')[1]) # GP id\n tmp = [float(i) for i in line.strip().split()[4:10]] # Read the strains\n strain = T_Strains(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5])\n line = f.readline()\n tmp = [float(i) for i in line.strip().split()[1:7]] # Read the stresses\n stress = T_Stresses(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5])\n GPs.gp.append(\n T_GP(stress, strain)) # Append stresses and strains of the current GP to the GP list\n line = f.readline()\n\n\n Elements.append(GPs) # Append GP list of the current element to the element list\n\n if verbose:\n print('Step {}: GPs completed.\\n'.format(nSteps))\n print('---------------------------------\\n')\n\n SimData.step.append(T_Step(Elements, Nodes)) # Append element and node list of the current step to the step list\n #print('the file input ends')\n #print(nSteps)\n # only needed with a while loop\n # Jump over the lines until we reach the next time step (Caught by if-clause)\n try:\n line = f.readline() # Will generate an error if files end is reached\n if line == \"\":\n raise EOFError\n except:\n if verbose: print(\"End of file reached.\\n\")\n break # Break the 'while True' loop\n\n # -----------------------------------------------------------------------------\n\n\n print('averaging the stress')\n # Averaging of strains and stress of GPs of each element\n for istep in range(len(SimData.step)):\n\n for ielement in range(len(SimData.step[istep].element)):\n print(len)\n # Initialization before each element\n stresses = np.array([0., 0., 0., 0., 0., 0.])\n strains = np.array([0., 0., 0., 0., 0., 0.])\n\n for igp in range(len(SimData.step[istep].element[ielement])):\n print(igp)\n # Add up all data of all GPs\n #stresses[:] += SimData.step[istep].element[ielement].gp[igp].stress[:]\n strains[:] += SimData.step[istep].element[ielement].gp[igp].strain[:]\n\n # Divide GP sum by number of GPs\n stresses /= len(SimData.step[istep].element[ielement])\n strains /= len(SimData.step[istep].element[ielement])\n # Replace the field (initialized with 0) with new information\n SimData.step[istep].element[ielement] = SimData.step[istep].element[ielement]._replace(\n avstress=T_Stresses(stresses[0], stresses[1], stresses[2], stresses[3], stresses[4], stresses[5]))\n SimData.step[istep].element[ielement] = SimData.step[istep].element[ielement]._replace(\n avstrain=T_Strains(strains[0], strains[1], strains[2], strains[3], strains[4], strains[5]))\n print('Analysis finished')\n return SimData",
"def encoder_proc(gen_filename, nature_filename, result, out_file, feature_size=41, frames=200):\n # extract features\n gen_features, nature_features = extract_feature(gen_filename, nature_filename, feature_size, frames)\n\n #logging.debug('gen features:raw {}'.format(gen_features))\n #logging.debug('nat features:raw {}'.format(nature_features))\n #logging.debug('result {}'.format(result))\n\n if result:\n # normal\n gen_mean, nat_mean, gen_std, nat_std = result\n\n gen_features = (gen_features - gen_mean) / gen_std\n nature_features = (nature_features - nat_mean) / nat_std\n #logging.debug('gen features:normal {}'.format(gen_features))\n #logging.debug('nat features:normal {}'.format(nature_features))\n\n assert (np.mean(gen_features, axis=0) - gen_mean).all()\n assert (np.std(gen_features, axis=0) - gen_std).all()\n assert (np.mean(nature_features, axis=0) - nat_mean).all()\n assert (np.std(nature_features, axis=0) - nat_std).all()\n\n # bank of 200 frame\n n_frames = int(gen_features.shape[0] / frames)\n logging.info('{} of {} frames'.format(n_frames, frames))\n\n # transpose axis\n gen_features = np.transpose(gen_features, (1,0)) # feature x frame\n nature_features = np.transpose(nature_features, (1,0)) # feature x frame\n\n # add new axis\n gen_features = gen_features[:, :, np.newaxis] # feature x frame x channel\n nature_features = nature_features[:, :, np.newaxis] # feature x frame x channel\n logging.info('features last shape={}'.format(gen_features.shape))\n\n # Example to TFRecords\n for n in tqdm(xrange(n_frames), desc='Write Example', ascii=True, leave=False):\n gen_list = gen_features[:, n*frames:(n+1)*frames, :].flatten()\n nat_list = nature_features[:, n*frames:(n+1)*frames, :].flatten()\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'depth': _int64_feature(1), # channels\n 'height': _int64_feature(feature_size), # feature_szie (41)\n 'width': _int64_feature(frames), # frames (200)\n 'encoding': _int64_feature(0), # no encoding, fix to 0\n 'image_raw': _floats_feature(gen_list), # gen_features\n 'label': _floats_feature(nat_list) # nature_features\n }))\n #logging.debug('One Example: {}'.format(example))\n out_file.write(example.SerializeToString())",
"def frequency(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['frequency']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for label in self.uuid_map:\n if label == 'LSTATE':\n distillate_label = label\n else:\n distillate_label = get_distillate_label([label])\n if 'ANG' not in distillate_label:\n continue\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_label = label\n dep_name = fields['deps'][0]\n dep_uuid = self.uuid_map[label]\n deps = [[dep_label, dep_name, dep_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"FREQ\"\n params = [[param_section_name, param_section_value],\n [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map[label+\"_1-SEC\"] = emitted[-3][-36:]\n output_uuid_map[label+\"_C37\"] = emitted[-2][-36:]\n\n filename = \"{0}/FREQ_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map"
] |
[
"0.605931",
"0.605931",
"0.57579064",
"0.5723161",
"0.5692624",
"0.56384605",
"0.5591998",
"0.5568752",
"0.55570334",
"0.5528021",
"0.54544705",
"0.5442652",
"0.54408765",
"0.54330486",
"0.5428779",
"0.5377762",
"0.5329455",
"0.532752",
"0.53155154",
"0.5314784",
"0.53111833",
"0.5309282",
"0.5288081",
"0.5287893",
"0.52815115",
"0.5270923",
"0.5251785",
"0.5246032",
"0.5243188",
"0.5239161"
] |
0.65101767
|
0
|
Compute the probability of a connection at EVERY LOCATION in the matrix Does not depend on the actual observed values of data
|
def compute_prob_matrix(tgt_latent, tgt_data, model_name='LogisticDistance'):
ss = tgt_latent['relations']['R1']['ss']
ass = tgt_latent['domains']['d1']['assignment']
hps = tgt_latent['relations']['R1']['hps']
data_conn = tgt_data['relations']['R1']['data']
N = data_conn.shape[0]
pred = np.zeros((N, N))
for i in range(N):
for j in range(N):
c1 = ass[i]
c2 = ass[j]
c = ss[(c1, c2)]
if model_name == "LogisticDistance":
dist = data_conn['distance'][i, j]
y = irm.util.logistic(dist, c['mu'], c['lambda'])
y = y * (hps['p_max'] - hps['p_min']) + hps['p_min']
elif model_name == "BetaBernoulliNonConj":
y = c['p']
else:
raise NotImplementedError()
pred[i, j] = y
return pred
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def connection_probability(prev_props : EdgeProps, curr_props : EdgeProps) -> float:\n d = Delay.__compute_delay(\n arrival_time = prev_props['arr_time'],\n next_departure_time = curr_props['dep_time'], \n ttype = prev_props['ttype'], \n previous_route_id = prev_props['trip_id'],\n next_route_id = curr_props['trip_id']\n )\n proba = Delay.__connection_probability(\n delay = d, \n gamma_params = prev_props['gamma'], \n ttype = curr_props['ttype'], \n previous_route_id = prev_props['trip_id'],\n next_route_id = curr_props['trip_id']\n )\n return proba",
"def data_to_probability(matrix, filter=True):\n probability = matrix / np.sum(matrix)\n if filter:\n probability = uniform_filter(probability, 5)\n probability[probability < 0.0] = 0\n probability[matrix < 0.0] = 0\n return probability",
"def prob_distr(input_matrix):\n size = len(input_matrix)\n prob_matrix = np.zeros((size, size), dtype=complex)\n for m in range(size):\n for n in range(size):\n prob_matrix[m, n] = input_matrix[m, n, m, n]\n\n return prob_matrix",
"def probability(distances):\n v = [1.0/(d + 1) for d in distances]\n s = sum(v)\n return [i/s for i in v]",
"def mutate(self, perturbing_probability):\n for con in self.connections.values():\n if random() < perturbing_probability:\n con.weight *= random_gaussian()\n else:\n con.weight = random(-1, 1)",
"def mn_joint_dist(para_mat, para_val, oc):\n p = np.zeros(oc.shape[0])\n for i in np.arange(para_mat.shape[0]):\n ind = np.all(oc[:, para_mat[i, :]], axis=1)\n p[ind] = p[ind]+para_val[i]\n\n p = np.exp(p)\n if np.abs(np.sum(p)-1) > 1e-6:\n ValueError('Check your model: abs(sum(p)-1) > 1e-6.')\n else:\n p = p / np.sum(p)\n\n return p",
"def topology_random_connect(self, probability):\n\t\tfor i in range(len(self.sites) - 1):\n\t\t\tfor j in range(i + 1, len(self.sites)):\n\t\t\t\tif not (self.sites[j] in self.sites[i].neighbors):\n\t\t\t\t\tif numpy.random.rand() < probability:\n\t\t\t\t\t\tself.sites[i].neighbors.append(self.sites[j])\n\t\t\t\t\t\tself.sites[j].neighbors.append(self.sites[i])",
"def compute_neighbours_probability_matrix(n_matrix, src, d_matrix, sigma_neigh):\n\n np_matrix = np.zeros(n_matrix.shape, dtype=float)\n for i in range(src.shape[0]):\n n_neig = len(np.where(n_matrix[i] > -1)[0])\n np_matrix[i, 0:n_neig] = \\\n np.exp(-d_matrix[i, n_matrix[i, 0:n_neig]] ** 2\n / (2 * sigma_neigh ** 2))\n np_matrix[i] = np_matrix[i] / np.sum(np_matrix[i])\n return np_matrix",
"def selection_profiles_by_chance(true, compare):\n n_neurons, M = true.shape\n probabilities = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n n = np.count_nonzero(true[neuron])\n N = np.count_nonzero(compare[neuron])\n rv = hypergeom(M=M, n=n, N=N)\n\n overlap = np.count_nonzero(true[neuron] * compare[neuron])\n probabilities[neuron] = 1 - rv.cdf(x=overlap)\n\n return probabilities",
"def probOfAllNodeObservations( self ):\n aLeaf = list( self._hyperGraph.leaves )[ 0 ]\n\n total = LogVar( 0 )\n for i in range( aLeaf.N ):\n _u = aLeaf.getFullJoint( i )\n total += _u\n return total",
"def check_probability(self, x, k_neighbours, expected_class, classifier, view = 0):\n match_number = 0\n distances, indexes = classifier.kneighbors(x.reshape(1, -1), k_neighbours)\n for idx in indexes:\n for element in idx:\n predict = classifier.predict(self.data[view][element].reshape(1, -1))\n if predict[0] and predict[0] == expected_class:\n match_number += 1\n return float(match_number)/k_neighbours",
"def prob4():\n#raise NotImplementedError(\"Problem 4 Incomplete\")\n h = lambda x : x[0] < -1 and x[1] > 1\n f = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([0,0]),cov=np.eye(2))\n g = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([-1,1]),cov=np.eye(2))\n X = np.random.multivariate_normal(mean=np.array([-1,1]),cov=np.eye(2),size=10000)\n return 1./10000*np.sum(np.apply_along_axis(h,1,X)*np.apply_along_axis(f,1,X)/np.apply_along_axis(g,1,X))",
"def _convergence_criterion(self,points,_,log_prob_norm):\n return np.sum(log_prob_norm)",
"def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint",
"def probability(N_dr, L_opmin, L_opmax, L_min, L_max, L_d):\n opening_nomullignas = []\n opening_withmullignas = []\n sum_nomulligans = 0\n sum_withmulligans = 0\n mulligan_coeff = 0\n\n for i in range(L_opmin, min(L_opmax + 1, 8)): # first make a list of tuples of the form:\n # (number_of_lands_in_opening_hand, probability_of_drawing_such_a_hand)\n a = hypergeom(i, 7, 60, L_d)\n opening_nomullignas.append((i, a))\n mulligan_coeff = mulligan_coeff + a # this will be used later for calculating the probability of\n # taking the mulligan and is used as a coefficient before the mulligan sum\n for (x, y) in opening_nomullignas: # use the list of tuples to calculate the first part of equation 5\n partial_nomulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_nomulligans = partial_nomulligans + hypergeom(j, N_dr, 53, L_d - x)\n sum_nomulligans = sum_nomulligans + partial_nomulligans * y\n\n mulligan_coeff = 1 - mulligan_coeff # probability of mulliganing\n for i in range(L_opmin, min(L_opmax + 1, 7)): # doing the same thing as before, but drawing 6 instead of 7 cards\n a = hypergeom(i, 6, 60, L_d)\n opening_withmullignas.append((i, a))\n\n for (x, y) in opening_withmullignas:\n partial_withmulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_withmulligans = partial_withmulligans + hypergeom(j, N_dr, 54, L_d - x)\n sum_withmulligans = sum_withmulligans + partial_withmulligans * y\n total_withmulligans = mulligan_coeff * sum_withmulligans\n\n return total_withmulligans + sum_nomulligans",
"def test_equal_probability():\n from numpy import array, sqrt, count_nonzero\n\n energy = MagicMock()\n\n density = array([1, 0, 99])\n mc = MonteCarlo(energy, density)\n changes_at_zero = [\n (density - mc.change_density(density))[0] != 0 for i in range(10000)]\n assert count_nonzero(changes_at_zero) \\\n == approx(0.01 * len(changes_at_zero), 0.5 * sqrt(len(changes_at_zero)))",
"def compute_adj_matrix_fitness_CUDA(population, result, d_adj_matrix, length_outer, length_inner):\n\n i = cuda.grid(1) #computes grid index\n if i < length_outer:\n counter = 0.0\n for index in range(length_inner):\n waypoint1 = population[i][index - 1]\n waypoint2 = population[i][index]\n counter += d_adj_matrix[waypoint1, waypoint2]\n\n result[i] = counter",
"def probability(self, X_):\n X = np.c_[np.ones(X_.shape[0]), X_] # Add one for bias to the first columns\n probs = np.zeros(X.shape[0])\n ### YOUR CODE HERE\n z = X.dot(self.w)\n probs = log_reg.logistic(z)\n ### END CODE\n assert probs.shape == (X.shape[0],)\n return probs",
"def getActionProb(self, canonicalBoard, temp=1):\n for i in range(self.args.numMCTSSims):\n dir_noise = (i == 0 and self.dirichlet_noise)\n self.search(canonicalBoard, dirichlet_noise=dir_noise)\n\n s = self.game.stringRepresentation(canonicalBoard)\n counts = [\n self.Nsa[(s, a)] if (s, a) in self.Nsa else 0\n for a in range(self.game.getActionSize())\n ]\n\n if temp == 0:\n bestAs = np.array(np.argwhere(counts == np.max(counts))).flatten()\n bestA = np.random.choice(bestAs)\n probs = [0] * len(counts)\n probs[bestA] = 1\n return probs\n\n counts = [x**(1. / temp) for x in counts]\n counts_sum = float(sum(counts))\n probs = [x / counts_sum for x in counts]\n return probs",
"def test_bp_example():\n signal = np.array([4, 7, 9, 10, 6, 11, 3])\n\n pe = permutation_entropy(signal, 2)\n\n assert 0.91 < pe < 0.92 # Should be approx 0.918.\n\n pe = permutation_entropy(signal, 3)\n\n assert 1.52 < pe < 1.53 # Should be approx 1.522.",
"def prob4():\n\n\n N = 500000\n random_draws = np.random.multivariate_normal(mean = [-1,1], cov =[[1,0],[0,1]], size = N)\n\n h = lambda x: x[0] < -1 and x[1] > 1\n f = lambda x: stats.multivariate_normal(mean = [ 0, 0]).pdf(x)\n g = lambda x: stats.multivariate_normal(mean = [-1, 1]).pdf(x)\n\n probability = [h(random_draws[i]) * f(random_draws[i]) / g(random_draws[i]) for i in range(N)]\n\n return 1./N * np.sum(probability)",
"def get_random_connections(dim, frac=0.5):\n connect = np.random.binomial(1, frac, size=(dim, dim))\n \"\"\"only keep upper triangle matrix (without diagonal)\"\"\"\n connect = np.triu(connect, 1)\n return connect",
"def joint_entropy(P):\n P_nan = P.copy()\n P_nan[P_nan == 0] = np.nan\n return np.nansum(np.multiply(P_nan, np.log2(1 / P_nan)))",
"def pon(self):\n return self.table[1, 1] / (self.table[0, 1] + self.table[1, 1])",
"def specific_binding_fraction(matrix,n=10000):\n return mean([score(matrix,random_site(10)) < -8 for i in xrange(n)])",
"def col_prob(P, X=alph):\n\n return sum([P[x] ** 2 for x in X])",
"def effective_pairs(self):\n out = 0\n hdmat = self.hdmatrix()\n for i in xrange(len(hdmat[0])):\n for j in xrange(i+1, len(hdmat[0])): \n out += hdmat[i,j]**2\n return out",
"def self_loop_proportion(Gu):\n if nx.is_directed(Gu): # pragma: no cover\n msg = \"`Gu` must be undirected\"\n raise ValueError(msg)\n return sum(u == v for u, v, k in Gu.edges) / len(Gu.edges)",
"def calcProbXY(data,allEdges):\n\tfor edge in allEdges:\n\t\tcount = 0\n\t\tcount1 = 0\n\t\tfor tumor in data:\n\t\t\tif ((edge[0] in tumor) or (edge[1] in tumor)):\n\t\t\t\tcount1 += 1\n\t\t\tif ((edge[0] in tumor) and (edge[1] in tumor)):\n\t\t\t\tcount += 1\n\t\tedge.append(count/len(data))\n\t\tedge.append(count/count1)\n\treturn allEdges",
"def probability_move_to_cell(self, animal, total_propensity):\n return self.propensity_to_move(animal) / total_propensity"
] |
[
"0.5911384",
"0.58029",
"0.57641166",
"0.575981",
"0.5756224",
"0.5751847",
"0.57237226",
"0.572289",
"0.56258786",
"0.5621492",
"0.5568151",
"0.55451894",
"0.5514418",
"0.55129284",
"0.55073756",
"0.5503898",
"0.5477835",
"0.5472042",
"0.5439989",
"0.5433168",
"0.54328525",
"0.542502",
"0.5424133",
"0.54207444",
"0.54169184",
"0.54130006",
"0.5366569",
"0.5365797",
"0.53601336",
"0.5358644"
] |
0.60475785
|
0
|
assumes m is one of the movements 'Up', 'Down', 'Left', 'Right', returns a Board resulting from the movement m
|
def childAfterMove(self, m):
indexOfZero = self.tiles.index(0)
initial = self.tiles[:]
child = Board(initial)
if m == 'Up':
assert(indexOfZero > 2)
temp = self.tiles[indexOfZero - 3]
child.tiles[indexOfZero] = temp
child.tiles[indexOfZero - 3] = 0
elif m == 'Down':
assert(indexOfZero < 6)
temp = self.tiles[indexOfZero + 3]
child.tiles[indexOfZero] = temp
child.tiles[indexOfZero + 3] = 0
elif m == 'Left':
assert(indexOfZero != 0 and indexOfZero != 3 and indexOfZero != 6)
temp = self.tiles[indexOfZero - 1]
child.tiles[indexOfZero] = temp
child.tiles[indexOfZero - 1] = 0
elif m == 'Right':
assert(indexOfZero != 2 and indexOfZero != 5 and indexOfZero != 8)
temp = self.tiles[indexOfZero + 1]
child.tiles[indexOfZero] = temp
child.tiles[indexOfZero + 1] = 0
else:
print(m + ' is not a legal movement')
return child
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getMove(self, board):\n pass",
"def get_move(moves):\n pass",
"def move(self, direction):\r\n direc = list(OFFSETS[direction])\r\n line = []\r\n dummy_board = self.board[:]\r\n if direction == 3:\r\n for i in range(self.height):\r\n self.board[i] = merge(self.board[i])\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n elif direction == 4:\r\n for i in range(self.height):\r\n line = self.board[i][::-1]\r\n self.board[i] = merge(line)\r\n self.board[i] = self.board[i][::-1]\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n \r\n elif direction == 1 or 2:\r\n dummy_board = str(self.board[:])\r\n if direction == 1:\r\n tile = [0,0]\r\n elif direction == 2:\r\n tile = [self.height - 1, 0]\r\n for i in range(self.width):\r\n tile2 = tile[:]\r\n while len(line) < self.height:\r\n line.append(self.get_tile(*tile2))\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n line = merge(line)\r\n tile2 = tile[:]\r\n for i in range(self.height):\r\n self.set_tile(*(tile2+[line[0]]))\r\n line.remove(line[0])\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n tile = [x+y for x,y in zip(tile, [0,1])]\r\n if dummy_board != self.__str__():\r\n self.new_tile()\r\n return self.board",
"def move(self, m):\n if m not in \"RLUD\":\n raise ValueError(\n (\"Not a legal move: '{}', should be one of \" +\n \"the 'RLUD'.\").format(m))\n if m not in self.legal_moves:\n raise ValueError(\n (\"Not a legal move at this state: '{}', \" +\n \"should be one of the '{}'.\").format(m, self.legal_moves))\n\n posdiff = (0, 0)\n if m == 'L':\n posdiff = (0, 1)\n elif m == 'R':\n posdiff = (0, -1)\n elif m == 'U':\n posdiff = (1, 0)\n elif m == 'D':\n posdiff = (-1, 0)\n\n empty_position = self.get_position(0)\n newpuz = self.swap((empty_position[0] - posdiff[0],\n empty_position[1] - posdiff[1]))\n return newpuz",
"def moved_board(board):\n return legal_move_on(board=board).map(\n lambda (start, end): board.move(start=start, end=end),\n )",
"def getMove(self, board):\r\n raise NotImplementedError(\"must be implemented in subclass\")",
"def board_from_moles(moles):\n board = 0\n for mole in moles:\n board = toggle(board, mole)\n return board",
"def get_moves(self):",
"def get_possible_moves(board):\n\n possible_moves = []\n\n ret_tuple_left = move_left(board)\n ret_tuple_right = move_right(board)\n ret_tuple_up = move_up(board)\n ret_tuple_down = move_down(board)\n\n if ret_tuple_left[0]:\n possible_moves.append(ret_tuple_left[1])\n if ret_tuple_right[0]:\n possible_moves.append(ret_tuple_right[1])\n if ret_tuple_up[0]:\n possible_moves.append(ret_tuple_up[1])\n if ret_tuple_down[0]:\n possible_moves.append(ret_tuple_down[1])\n\n return possible_moves",
"def legalMoves(self):\n moves = []\n indexOfZero = self.tiles.index(0)\n \n if indexOfZero == 0:\n moves.append('Down')\n moves.append('Right')\n elif indexOfZero == 1:\n moves.append('Down')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 2:\n moves.append('Down')\n moves.append('Left')\n elif indexOfZero == 3:\n moves.append('Up')\n moves.append('Down')\n moves.append('Right')\n elif indexOfZero == 4:\n moves.append('Up')\n moves.append('Down')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 5:\n moves.append('Up')\n moves.append('Down')\n moves.append('Left')\n elif indexOfZero == 6:\n moves.append('Up')\n moves.append('Right')\n elif indexOfZero == 7:\n moves.append('Up')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 8:\n moves.append('Up')\n moves.append('Left')\n else:\n print('something wrong with board')\n return moves",
"def get_move(board, player):\n row, col = 0, 0\n return row, col",
"def get_move(board, player):\r\n row, col = 0, 0\r\n return row, col",
"def make_move(self, direction):\r\n\t\tif direction == 0:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x-1][self.y] = self.board[self.x-1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x -= 1\r\n\r\n\t\telif direction == 1:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y+1] = self.board[self.x][self.y+1], self.board[self.x][self.y]\r\n\t\t\tself.y += 1\r\n\r\n\t\telif direction == 2:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x+1][self.y] = self.board[self.x+1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x += 1\r\n\r\n\t\telif direction == 3:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y-1] = self.board[self.x][self.y-1], self.board[self.x][self.y]\r\n\t\t\tself.y -= 1",
"def findPlacesToMove():\n movesDestinations = [];\n \n curY = curBlank[0];\n curX = curBlank[1];\n\n if(curY-1 >= 1): #UP\n movesDestinations.append((curY-1, curX));\n if(curY+1 <= n): #DOWN\n movesDestinations.append((curY+1, curX));\n if(curX-1 >= 1): #LEFT\n movesDestinations.append((curY, curX-1));\n if(curX+1 <= n): #RIGHT\n movesDestinations.append((curY, curX+1));\n \n return movesDestinations;",
"def moveable(self, board):\n # horizontal\n if self.direction == \"horizontal\":\n # the position to which the car wants to move is either 1 more or 1 less column wise\n right = self.get_cols()[1] + self.size - 1\n left = self.get_cols()[0] - 1\n # check if right or left is out of the boards margins \n if right > board.width_height:\n move_left = board.positions[self.get_rows()[0]][left]\n move_right = None\n elif left < 0:\n move_right = board.positions[self.get_rows()[0]][right]\n move_left = None\n else: \n move_right = board.positions[self.get_rows()[0]][right]\n move_left = board.positions[self.get_rows()[0]][left]\n\n # try to move left and right\n if move_right == \"x\" and move_left == \"x\":\n return \"leftright\"\n elif move_right == \"x\":\n return \"right\"\n elif move_left == \"x\":\n return \"left\"\n else: \n return \"none\"\n \n # vertical\n else:\n up = self.get_rows()[0] - 1\n #print(up)\n down = self.get_rows()[1] + self.size - 1\n # check if up or down is out of the boards margins \n if up < 0:\n # no room on the board for upward movement\n move_down = board.positions[down][self.get_cols()[0]]\n move_up = None\n elif down > board.width_height:\n # no room on the board for downward movement\n move_up = board.positions[up][self.get_cols()[0]]\n move_down = None\n else:\n # both up and down are possible positions on the board\n move_up = board.positions[up][self.get_cols()[0]]\n move_down = board.positions[down][self.get_cols()[0]]\n\n # try to move up and down\n if move_down == \"x\" and move_up == \"x\":\n return \"updown\"\n elif move_up == \"x\":\n return \"up\"\n elif move_down == \"x\":\n return \"down\"\n else: \n return \"none\"",
"def move(self, board, move_dir):\n if move_dir == \"right\":\n # failsafe: do not move through other cars on board\n if board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] == 'r':\n print(\"No movement!\")\n return board\n \n # give board correct new positions (characters)\n else:\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\"\n\n # change car objects positions\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) + 1)\n return board\n elif move_dir == \"left\": \n if board.positions[self.get_rows()[0]][self.get_cols()[0] - 1].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] == 'r':\n print(\"No movement!\")\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 2)] = \"x\"\n\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) - 1)\n return board\n elif move_dir == \"up\":\n #print(board.positions[self.get_rows()[0] - 1][self.get_cols()[0]])\n if board.positions[self.get_rows()[0] - 1][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n else:\n board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] = self.name[0]\n board.positions[self.get_rows()[1] + (self.size - 2)][self.get_cols()[0]] = \"x\"\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) - 1) + \".\" + str(self.get_cols()[0])\n\n #print(board)\n return board\n elif move_dir == \"down\": \n try: \n if board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n except IndexError:\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\" \n board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] = self.name[0]\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) + 1) + \".\" + str(self.get_cols()[0]) \n \n #print(self.position)\n #print(board)\n \n return board\n else:\n #print(\"NO MOVEMENT!\")\n return board",
"def move(self, action):\n \n currentState = self.state\n\n if action == \"up\":\n newState = (self.state[0] - 1, self.state[1])\n elif action == \"down\":\n newState = (self.state[0] + 1, self.state[1])\n elif action == \"right\":\n newState = (self.state[0], self.state[1] + 1)\n elif action == \"left\":\n newState = (self.state[0], self.state[1] - 1)\n else:\n raise NameError(action, 'This is not a valid action!')\n\n # Need to check if the new state is a legal move\n if (newState[0] >= 0) & (newState[0] <= 1) & (newState[1] >= 0) & (newState[1] <= 2):\n return newState\n else:\n print('This move takes you off the board, you have not moved!')\n return currentState",
"def moveDown(board):\n\tboard = roterVenstre(board)\n\tboard = moveRight(board)\n\tboard = roterHøjre(board)\n\treturn board",
"def move(self, board):\n\n if board.get_number_of_moves() == 0:\n random_row = randint(0, 2)\n random_column = randint(0, 2)\n\n if random_row == 1 or random_column == 1:\n random_row = 1\n random_column = 1\n elif random_row == 2:\n random_row = board.get_dimension()-1\n\n if random_column == 2:\n random_column = board.get_dimension()-1\n\n move = (random_row, random_column)\n elif board.get_number_of_moves() == 1 or board.get_number_of_moves() == 2:\n if board.get_piece(1,1) == ' ':\n move = (1, 1)\n else:\n board_dimension = board.get_dimension()-1\n corners = [(0, 0), (0, board_dimension), (board_dimension, 0), (board_dimension, board_dimension)]\n corners = self.remove_filled_positions(corners, board)\n\n move = corners[randint(0, len(corners)-1)]\n else:\n move = self.check_for_winner(board)\n\n if move == (-1, -1):\n board_dimension = board.get_dimension()-1\n corner1_moves = self.remove_filled_positions([(0, 0), (2, 2)], board)\n corner2_moves = self.remove_filled_positions([(0, 2), (2, 0)], board)\n\n non_corner_moves = self.remove_filled_positions([(1, 0), (2, 1), (1, 2), (0, 1)], board)\n\n center_piece = board.get_piece(1, 1)\n corner_pieces = [board.get_piece(0, 0), board.get_piece(board_dimension, 0), board.get_piece(0, board_dimension), board.get_piece(board_dimension, board_dimension)]\n\n if corner_pieces[0] != self._piece and corner_pieces[0] != ' ' and corner_pieces[0] == corner_pieces[3]:\n move = non_corner_moves[randint(0, 3)]\n elif corner_pieces[1] != self._piece and corner_pieces[1] != ' ' and corner_pieces[1] == corner_pieces[2]:\n move = non_corner_moves[randint(0, 3)]\n elif len(corner2_moves) > 0 and corner_pieces[0] != self._piece and corner_pieces[0] == center_piece and corner_pieces[3] == self._piece:\n move = corner2_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[1] != self._piece and corner_pieces[1] == center_piece and corner_pieces[2] == self._piece:\n move = corner1_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[2] != self._piece and corner_pieces[2] == center_piece and corner_pieces[1] == self._piece:\n move = corner1_moves[0]\n elif len(corner2_moves) > 0 and corner_pieces[3] != self._piece and corner_pieces[3] == center_piece and corner_pieces[0] == self._piece:\n move = corner2_moves[0]\n else:\n move = self.can_complete_two_in_row(board)\n\n if move == (-1, -1):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move",
"def getOpponentMove(move, playerBoard, oppBoard, playerSeeds, oppSeeds):\r\n pass",
"def domove(board, move):\n for m in getmovesfromoracle(board):\n if m['move'] == move:\n return(m['board'])\n return(None)",
"def get_moves(side_size, col, row):\n return {\n 'up': {\n 'rel_pos': -1*side_size,\n 'is_movable': row > 0\n },\n 'left': {\n 'rel_pos': -1,\n 'is_movable': col > 0\n },\n 'down': {\n 'rel_pos': side_size,\n 'is_movable': row < (side_size-1)\n },\n 'right': {\n 'rel_pos': 1,\n 'is_movable': col < (side_size-1)\n },\n }",
"def make_move(move):\n global manatee_pos\n global hyacinths\n global hyacinth_pos\n\n # Ends the program if movement is out of bounds\n if move == (0, 0):\n return None\n new_pos = (manatee_pos[0] + move[0], manatee_pos[1] + move[1])\n if new_pos[0] < 0 or new_pos[0] >= len(map):\n return None\n if new_pos[1] < 0 or new_pos[1] >= len(map[new_pos[0]]):\n return None\n\n entity = map[new_pos[0]][new_pos[1]]\n if entity == \"#\" or entity == \"G\":\n # Runs if movement is impossible\n return None\n if entity == \" \" or entity == \".\":\n # Runs if normal movement is possible\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n if entity == \"O\":\n # Runs if manatee wins game\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return \"win\"\n if entity == \"\\\\\":\n # Runs if manatee eats hyacinth\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n hyacinths += 1\n if len(hyacinth_pos) == hyacinths:\n map[grate_pos[0]][grate_pos[1]] = \"O\"\n return None\n if entity == \"*\":\n # Checks if manatee can push boat\n if move[0] == 0:\n new_boat_pos = (new_pos[0] + move[0], new_pos[1] + move[1])\n if new_boat_pos[0] < 0 or new_boat_pos[0] >= len(map):\n return None\n if new_boat_pos[1] < 0 \\\n or new_boat_pos[1] >= len(map[new_boat_pos[0]]):\n return None\n if map[new_boat_pos[0]][new_boat_pos[1]] == \" \":\n map[new_boat_pos[0]][new_boat_pos[1]] = \"*\"\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n return None",
"def get_move(self, board):\n color = 1\n interval = [-math.inf, math.inf]\n if board.count(color) + board.count(-1 * color) < 6:\n self.step_count = 0\n self.step_count += 2 \n if self.step_count < 45:\n _, move = self._max(board, color, 0, *interval)\n else:\n _, move = self._max(board, color, -2, *interval)\n return move",
"def mm_move(board, player):\n result = board.check_win() # get result of the current board\n if result == None:\n move_list = board.get_empty_squares() # get the tree branches and possible next moves\n best = (None, (-1, -1))\n for step in move_list:\n bd_clone = board.clone()\n bd_clone.move(step[0], step[1], player) #make a move on a cloned board\n next_player = provided.switch_player(player)\n next_score = mm_move(bd_clone, next_player) #make a recursive call to mm_move() pasing the cloned board and the 'other' player\n if player == 3: #if it is oppo O--min\n if best[0] == None or (next_score[0] < best[0]):\n best = (next_score[0], step)\n #print best\n elif player ==2: #if it is X--max\n if best[0] == None or (next_score[0] > best[0]):\n best = (next_score[0], step)\n return best\n else:\n return SCORES[result], (-1, -1)",
"def make_random_move(self):\n # get copy of the empty board\n board = set([(i, j) for i in range(self.height) for j in range(self.width)])\n\n for move in board:\n if not move in self.moves_made and not move in self.mines:\n return move\n\n return None",
"def move(direction: str, board : list) -> list:\n board_length = len(board)\n x, y = find_empty_space(board)\n \n increment_x = 0 \n increment_y = 0\n\n if direction == Direction.Up:\n increment_x, increment_y = Coordinate.Up.value\n elif direction == Direction.Down:\n increment_x, increment_y = Coordinate.Down.value\n elif direction == Direction.Left:\n increment_x, increment_y = Coordinate.Left.value\n elif direction == Direction.Right:\n increment_x, increment_y = Coordinate.Right.value\n\n x_new = x + increment_x\n y_new = y + increment_y\n\n is_valid = is_valid_move(x_new, y_new, board_length)\n\n if is_valid: \n temp = board[x][y]\n board[x][y] = board[x_new][y_new]\n board[x_new][y_new] = temp\n return board\n return None",
"def moveUp(board):\n\tboard = roterVenstre(board)\n\tboard = moveLeft(board)\n\tboard = roterHøjre(board)\n\treturn board",
"def moves(self):\n\n moves = list()\n\n for row in range(HEIGHT):\n for col in range(WIDTH):\n\n move = (row, col)\n\n if self.board[row][col] == 9:\n moves.append(move)\n\n if self.board[row][col] == 1 or self.board[row][col] == 2:\n\n move = (row - 1, col)\n\n if self.board[row - 1][col] == 1 or self.board[row - 1][col] == 2:\n\n pass\n\n else:\n\n moves.append(move)\n\n return moves",
"def calculate_next_move(self, visit):\n self.depth += 1\n new_boards = []\n for vehicle_id in range(len(self.vehicles)):\n vehicle = self.vehicles[vehicle_id]\n state = self.get_board()\n if vehicle.orientation == 0: #horizontal\n if vehicle.x > 0: #left\n if state[vehicle.y][vehicle.x-1] == \"..\":\n self.vehicles[vehicle_id].x -=1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x += 1\n\n if vehicle.x + vehicle.length <= (len(state)-1): #right\n if state[vehicle.y][vehicle.x+vehicle.length] == \"..\":\n self.vehicles[vehicle_id].x += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x -= 1\n\n else: #vertical\n if vehicle.y - 1 >= 0: #up\n if state[vehicle.y-1][vehicle.x] == \"..\":\n self.vehicles[vehicle_id].y -= 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y += 1\n\n if vehicle.y + vehicle.length <= (len(state)-1):\n if state[vehicle.y + vehicle.length][vehicle.x] == \"..\":#down\n self.vehicles[vehicle_id].y += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y -= 1\n self.depth -= 1\n return new_boards"
] |
[
"0.6781174",
"0.66301507",
"0.6560112",
"0.65389156",
"0.6438552",
"0.63536054",
"0.6336471",
"0.6326873",
"0.63040113",
"0.6285235",
"0.6271884",
"0.6262698",
"0.622721",
"0.6174536",
"0.61644375",
"0.61414325",
"0.612391",
"0.6094978",
"0.6090433",
"0.60757256",
"0.60735303",
"0.6048297",
"0.60456806",
"0.6022725",
"0.6012804",
"0.5999989",
"0.5997998",
"0.5995962",
"0.5989983",
"0.5972536"
] |
0.6940232
|
0
|
Returns true if (r,c) is on board, false otherwise.
|
def is_on_board(self, r, c):
return 0 <= r <= 7 and 0 <= c <= 7
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_on_board(x: int, y: int) -> bool:\n return x >= 0 and x < BOARDWIDTH and y < BOARDHEIGHT",
"def inBoard(self, row, col):\n return 0 <= row < self.rows and 0 <= col < self.cols",
"def on_board(self, pos):\n i, j = pos\n return 0 <= i < COLS and 0 <= j < ROWS",
"def has_win(self, r, c, mark):\n row = r - 1\n col = COL_MAP[c]\n cnt = 0\n board_range = xrange(-1 * PADDING, PADDING + 1)\n\n # check vertical\n for dr in board_range:\n cnt = cnt + 1 if self.piece_at(row + dr, col) == mark else cnt\n if cnt == WIDTH:\n return True\n else:\n cnt = 0\n\n # check horizontal\n for dc in board_range:\n cnt = cnt + 1 if self.piece_at(row, col + dc) == mark else cnt\n if cnt == WIDTH:\n return True\n else:\n cnt = 0\n\n # check diagonal rightdown\n for dd in board_range:\n cnt = cnt + 1 if self.piece_at(row + dd, col + dd) == mark else cnt\n if cnt == WIDTH:\n return True\n else:\n cnt = 0\n\n # check diagonal rightup\n for dd in board_range:\n cnt = cnt + 1 if self.piece_at(row - dd, col + dd) == mark else cnt\n if cnt == WIDTH:\n return True\n\n return False",
"def on_board(self):\n if -1 < self._rank < 8 and \\\n -1 < self._file < 8:\n return True\n\n return False",
"def on_board(hexe):\n\n cube = axial_to_cube(hexe)\n\n # check each bound\n for axis in cube:\n if abs(axis) > BOARD_BOUND:\n return False\n return True",
"def is_cell_valid(board, r, c):\n return is_cell_row_valid(board, r, c) or is_cell_col_valid(board, r, c)",
"def col_win(board):\n\tfor col in range(3):\n\t\tif board[0][col] != EMPTY and board[0][col] == board[1][col] == board[2][col]:\n\t\t\treturn True\n\treturn False",
"def is_cell_on_board(cell, board_shape): # TODO: Remove\n return (0, 0) <= cell < board_shape",
"def __is_board_full(self):\r\n for row in self.__board:\r\n if {self.PLAYER1, self.PLAYER2} & set(row) != 0:\r\n return False\r\n return True",
"def has_clashes(the_board):\r\n for c in range(1, len(the_board)):\r\n if col_clashes(the_board, c):\r\n return True\r\n return False",
"def in_board(self,pos : np.ndarray) -> bool:\r\n if 0 > pos[0] or pos[0] >= BOARD_SIZE:\r\n return False\r\n if 0 > pos[1] or pos[1] >= BOARD_SIZE:\r\n return False\r\n\r\n return True",
"def game_won(self):\n\n # Makes sure every tile is colored,\n for column in self.board:\n for tile in column:\n if not tile.color:\n return False\n\n # Makes sure each color has a line.\n colors = set()\n for dot in self.dots:\n dot_tile = self.board[dot.x][dot.y]\n colors.add(dot.color)\n for dot in self.dots:\n dot_tile = self.board[dot.x][dot.y]\n # If we've already found a line for this color.\n if dot.color not in colors:\n continue\n # If this dot starts a line and ends at the other dot.\n if dot_tile.next and not dot_tile.line_end().is_dot:\n return False\n elif dot_tile.next:\n colors.remove(dot.color)\n # If colors isn't empty, not all colors have lines.\n return not colors",
"def check_won (grid):\r\n w=False\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]>=32:\r\n w=True\r\n break\r\n return w",
"def row_win(board):\n\tfor row in range(3):\n\t\tif board[row][0] != EMPTY and board[row][0] == board[row][1] == board[row][2]:\n\t\t\treturn True\n\treturn False",
"def is_board_valid(bd):\n return is_rows_valid(bd) and is_cols_valid(bd) and is_sqrs_valid(bd)",
"def _any_piece_in_way(self, from_row, from_col, dr, dc, dm, toRow=None, toCol=None):\n if toRow != None and toCol != None and (toRow == from_row):\n colDiff = abs(toCol - from_col)\n for i in range(1, colDiff):\n if self.board.squares[from_row][from_col + i * dc] != None:\n return False\n\n pass\n\n for i in range(1, dm):\n if self.board.squares[from_row + i * dr][from_col + i * dc] != None:\n return False\n return True",
"def is_winning(self):\n\n current_board = self.current_board\n\n # check rows\n for row in current_board:\n row = set(row)\n if (\"X\" not in row and \"-\" not in row) or (\"O\" not in row and \"-\" not in row):\n return True\n\n # check columns\n for i in range(len(current_board)):\n column_to_check = set()\n \n for j in range(len(current_board)):\n column_to_check.add(current_board[j][i])\n\n if (\"X\" not in column_to_check and \"-\" not in column_to_check) or (\"O\" not in column_to_check and \"-\" not in column_to_check):\n return True\n \n # check diagonals\n forward_diagonal_check = set()\n backward_diagonal_check = set()\n \n for i in range(len(current_board)):\n forward_diagonal_check.add(current_board[i][i])\n backward_diagonal_check.add(current_board[i][len(current_board)-1-i])\n\n if forward_diagonal_check == {\"X\"} or forward_diagonal_check == {\"O\"}:\n return True\n\n if backward_diagonal_check == {\"X\"} or backward_diagonal_check == {\"O\"}:\n return True",
"def check_won (grid):\r\n for i in range (4):\r\n for j in range (4):\r\n if grid[i][j] >= 32:\r\n return True\r\n return False",
"def _check_if_position_on_board(coord: tuple, board_size: int):\n in_row = coord[0] in range(board_size)\n in_col = coord[1] in range(board_size)\n return in_row and in_col",
"def has_clashes(the_board):\r\n for col in range(1,len(the_board)):\r\n if col_clashes(the_board, col):\r\n return True\r\n return False",
"def has_clashes(the_board):\r\n for col in range(1,len(the_board)):\r\n if col_clashes(the_board, col):\r\n return True\r\n return False",
"def check_won (grid):\r\n for i in range(4): \r\n for j in range(4):\r\n if grid[i][j] >= 32:\r\n return True\r\n return False",
"def is_win(character: list, board: list) -> bool:\n if character[0] == board[-1][0] and character[1] == board[-1][1]:\n return True\n else:\n return False",
"def diag_win(board):\n\tif board[1][1] != EMPTY and (board[1][1] == board[0][2] == board[2][0] or board[1][1] == board[0][0] == board[2][2]):\n\t\treturn True\n\treturn False",
"def has_clashes(the_board):\n for col in range(1,len(the_board)):\n if col_clashes(the_board, col):\n return True\n return False",
"def has_clashes(the_board):\n for col in range(1,len(the_board)):\n if col_clashes(the_board, col):\n return True\n return False",
"def has_clashes(the_board):\n for col in range(1, len(the_board)):\n if col_clashes(the_board, col):\n return True\n return False",
"def check_if_computer_can_win():\n position = row_wise_checking(computer)\n if position != -1:\n board[position] = computer\n else:\n position = column_wise_checking(computer)\n if position != -1:\n board[position] = computer\n else:\n position = diagonal_wise_checking(computer)\n if position != -1:\n board[position] = computer\n else:\n position = take_corner()\n if position != -1:\n board[position] = computer\n else:\n return -1",
"def is_in_board(self):\n return self.is_alive()"
] |
[
"0.7762859",
"0.75517225",
"0.75112927",
"0.74653816",
"0.7369882",
"0.73649544",
"0.73205835",
"0.7095848",
"0.70651495",
"0.70537746",
"0.70246136",
"0.7009773",
"0.6989529",
"0.6960644",
"0.6959121",
"0.6956892",
"0.69516766",
"0.6949291",
"0.691275",
"0.6909919",
"0.69020647",
"0.69020647",
"0.689185",
"0.6879753",
"0.687211",
"0.68718016",
"0.68718016",
"0.6851389",
"0.68509805",
"0.6838352"
] |
0.90845925
|
0
|
Returns the color value of self_color's opponent.
|
def get_opponent_color(self, self_color):
return abs(self_color - 1)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_opponent_color(self, mycolor):\n if mycolor == ChessGame.BLACK:\n return ChessGame.WHITE\n elif mycolor == ChessGame.WHITE:\n return ChessGame.BLACK\n else:\n raise NotImplementedError()",
"def player(self):\n return self._color",
"def getTurn(self):\r\n return self.players[self.getCurrentPlayer()].getColor()",
"def getCurrentColor(self):\n if self.__currentnode__ is None:\n return None\n else:\n return self.__currentnode__.getPlayer().getColor()",
"def get_color(self):\n return self.color",
"def get_color(self):\n\n return self.color",
"def get_color(self):\r\n return self.__color",
"def opponent(self, player):\r\n # player = core.BLACK (can do this for any static var)\r\n if player == core.BLACK:\r\n return core.WHITE\r\n else:\r\n return core.BLACK",
"def opponent(player):\n return BLACK if player is WHITE else WHITE",
"def get_color(self):\r\n if self.color:\r\n return \"RED\"\r\n else:\r\n return \"BLACK\"",
"def _get_color(self):\n return self.__color",
"def _get_color(self):\n return self.__color",
"def _get_color(self):\n return self.__color",
"def _get_color(self):\n return self.__color",
"def get_color(self):\r\n return self._color",
"def get_color(self):\n return self._color",
"def get_color(self):\n return self._color",
"def getOpponent(self):\n return self.__opponent",
"def get_color(self):\n\n return self._color",
"def get_color(self) -> str:\n return self.color",
"def color(self):\n return self.__color",
"def getColor(self):\r\n return self.color",
"def get_color(self) -> str:\r\n return self.color",
"def get_colour(self):\n return self.colour",
"def getColor(self):\n return self.color",
"def color(self):\n return self._color",
"def color(self):\n return self._color",
"def get_color(self):\n return COLOR_DICT[self.element]",
"def getColor(self):\n return self.__color",
"def getColor(self):\n return self.__color"
] |
[
"0.75106376",
"0.74137187",
"0.7160636",
"0.7123983",
"0.69804937",
"0.6936204",
"0.6918651",
"0.6908823",
"0.6872566",
"0.6858778",
"0.684322",
"0.684322",
"0.684322",
"0.684322",
"0.6842772",
"0.6800654",
"0.6800654",
"0.6766684",
"0.66826147",
"0.6656713",
"0.6648412",
"0.66317856",
"0.66279787",
"0.66193944",
"0.6561174",
"0.65434057",
"0.65434057",
"0.6488967",
"0.64774084",
"0.64774084"
] |
0.8906139
|
0
|
Returns the amount of self_color's discs on board.
|
def get_disk_count(self, self_color, board):
count = 0
for r in range(8):
for c in range(8):
if board[r][c] == self_color:
count += 1
return count
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_num_black_pieces(self):\n return self.num_black_pieces",
"def count_discs(self, player: Player) -> int:\n count = 0\n player_disc = disc.get_disc(player)\n for i in range(self.size):\n for j in range(self.size):\n if self._grid[i][j] == player_disc:\n count += 1\n return count",
"def count_colors(board, color):\n n = 0\n for cell in board:\n if cell == color:\n n += 1\n elif cell == cinv(color):\n n -= 1\n return n",
"def get_num_explored(self):\n return self.__num_explored",
"def countDiff(self, color):\n count = 0\n for y in range(self.n):\n for x in range(self.n):\n if self[x][y]==color:\n count += 1\n if self[x][y]==-color:\n count -= 1\n return count",
"def count_legal_moves(board, color):\n return len(legal_moves(board, color))",
"def get_num_white_pieces(self):\n return self.num_white_pieces",
"def get_sides_count(self):\r\n return self.__sides_count",
"def get_size(self):\n return len(self.board)",
"def score(self, board: Block) -> int:\n grid = _flatten(board)\n left = grid[0]\n right = grid[-1]\n top = [i[0] for i in grid]\n bottom = [i[-1] for i in grid]\n score0 = left.count(self.colour)\n score1 = right.count(self.colour)\n score2 = top.count(self.colour)\n score3 = bottom.count(self.colour)\n return score0 + score1 + score2 + score3",
"def amount(self):\n return len(self.circles)",
"def get_number_of_cheeses(self):\n number = 0\n for i in range(len(self._stools)):\n number += len(self._stools[i])\n return number",
"def get_counts(self):\n counts = [0, 0]\n for i in range(self._num_rows):\n for j in range(self._num_cols):\n if self._board[i][j] == \"B\":\n counts[0] += 1\n elif self._board[i][j] == \"W\":\n counts[1] += 1\n return counts",
"def on_the_edge_without_neighbors(self, board, color):\n disks_on_the_edge_without_neighbors = 0\n disks_on_the_edge = self.get_on_edge(board, color)\n for disk_on_edge in disks_on_the_edge:\n if not self.get_opposite_neighbors_on_edge(board, disk_on_edge):\n disks_on_the_edge_without_neighbors += 1\n return disks_on_the_edge_without_neighbors",
"def value(self):\n black, white = 0, 0\n for sq in Othello.squares():\n piece = self.__board[sq]\n if piece == BLACK: black += 1\n elif piece == WHITE: white += 1\n if black == white:\n return 0.5\n elif black > white:\n return 1\n else:\n return 0",
"def __len__(self):\r\n return len(self.board)",
"def eval_board(self, board):\n\t\ts = 0\n\t\t\n\t\tfor i in board.columns:\n\t\t\tfor j in board.rows:\n\t\t\t\tif board[i+j] == self.color:\n\t\t\t\t\n\t\t\t\t\tif i in ['A', 'H'] or j in ['1', '8']:\n\t\t\t\t\t\tif i + j in ['A1', 'A8', 'H1', 'H8']:\n\t\t\t\t\t\t\ts += 4\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ts += 2\n\t\t\t\t\telse:\n\t\t\t\t\t\ts += 1\n\t\treturn s",
"def score(self, board: Block) -> int:\r\n score = 0\r\n flat = _flatten(board)\r\n\r\n perimeter = []\r\n perimeter.extend(flat[0][1:-1])\r\n perimeter.extend(flat[-1][1:-1])\r\n for i in range(1, len(flat) - 1):\r\n perimeter.append(flat[i][0])\r\n perimeter.append(flat[i][-1])\r\n\r\n if flat[0][0] == self.colour:\r\n score += 2\r\n if flat[0][-1] == self.colour:\r\n score += 2\r\n if flat[-1][0] == self.colour:\r\n score += 2\r\n if flat[-1][-1] == self.colour:\r\n score += 2\r\n\r\n for element in perimeter:\r\n if element == self.colour:\r\n score += 1\r\n\r\n return score",
"def rec_count(color : str) -> int:\n return sum(\n (1 + rec_count(child)) * count\n for child, count in contents[color].items()\n )",
"def count(self,color):\n count = 0\n for y in range(0,HEIGHT):\n for x in range(0,WIDTH):\n if(self.gameState[x,y]==color):\n count+=1\n return count",
"def get_num_moves(self, player: PlayerColor) -> int:\r\n player_squares: List[Square] = self.get_player_squares(player)\r\n count: int = 0\r\n for player_square in player_squares:\r\n adj_squares: List[Square] = \\\r\n self._get_adjacent_squares(player_square.pos)\r\n for adj_square in adj_squares:\r\n if (adj_square.state == SquareState.OPEN):\r\n count += 1\r\n elif(adj_square.state == SquareState.OCCUPIED):\r\n opposite_square: Square = \\\r\n self.squares.get(\r\n self._get_opposite_pos(player_square.pos,\r\n adj_square.pos))\r\n if (opposite_square is not None\r\n and opposite_square.state == SquareState.OPEN):\r\n count += 1\r\n\r\n return count",
"def queryNumberOfColors(self):\n self._numColorsInUse = \\\n self._readInt('How many colors are available', 2, len(self._palette))\n return self._numColorsInUse",
"def get_disc_count(self) -> Optional[int]:\n return self.disc_count",
"def get_num_pieces(self):\n return self.num_pieces",
"def color_depth(self):\n return self._color_depth",
"def getNumberPoints(self, move):\r\n (current_point_white, current_point_black) = self._board.get_nb_pieces()\r\n self._board.push(move)\r\n (new_point_white, new_point_black) = self._board.get_nb_pieces()\r\n self._board.pop()\r\n \r\n if(self._mycolor == 1): #black\r\n return (new_point_black-current_point_black) \r\n else:\r\n return (new_point_white-current_point_white)",
"def get_lab_col_cnt(self):\n return zip(self.get_labels(), self.colors, self.data)",
"def visual_len(self) -> int:\n return visual_len(self)",
"def currentScore(self, playerColor):\n total = 0\n for col in range(0, 8):\n for row in range(0, 8):\n if self.board[col][row].color == playerColor:\n total+=1\n return total",
"def columns(self) -> int:\n return self.__squares[0].__len__()"
] |
[
"0.66852605",
"0.66430324",
"0.6342966",
"0.6210851",
"0.6190696",
"0.6121007",
"0.610449",
"0.5924346",
"0.59230477",
"0.59166294",
"0.58878237",
"0.58751136",
"0.58189535",
"0.5802374",
"0.5773629",
"0.575617",
"0.57365835",
"0.5736171",
"0.5729498",
"0.5720513",
"0.5717215",
"0.5698251",
"0.5693359",
"0.5680232",
"0.5677649",
"0.56753623",
"0.5674731",
"0.566444",
"0.5654667",
"0.56359357"
] |
0.76983726
|
0
|
Check whether self_color has any valid moves in direction (delta) of (coords).
|
def check_moves(self, board, self_color, coords, delta):
found_opponent = False
for i in range(1, 8):
dr = coords[0] + i * delta[0]
dc = coords[1] + i * delta[1]
if self.is_on_board(dr, dc):
if board[dr][dc] == self_color:
break
elif board[dr][dc] == self.get_opponent_color(self_color):
found_opponent = True
elif board[dr][dc] == self.EMPTY:
if found_opponent:
return dr, dc
else:
break
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def validate_move(self, move_from, move_to, board):\n\n from_coordinates = JanggiGame.translate_to_grid(move_from)\n to_coordinates = JanggiGame.translate_to_grid(move_to)\n from_col = from_coordinates[0]\n from_row = from_coordinates[1]\n to_col = to_coordinates[0]\n to_row = to_coordinates[1]\n\n # for red soldiers (who can only move downward or to the side)\n if self.get_color() == 'red':\n # if destination within the board and the move is strictly one downward or to the side\n if (to_col in range(9) and to_row in range(10) and\n ((abs(to_col - from_col) == 1 and to_row == from_row) or (to_col == from_col and to_row - from_row == 1))):\n return True\n # if moving diagonally within the blue palace\n if from_coordinates in [[3,7],[5,7]] and to_coordinates == [4,8]:\n return True\n if from_coordinates == [4,8] and to_coordinates in [[3,9],[5,9]]:\n return True\n\n return False\n\n # for blue soldiers (who can only move upward or to the side)\n if self.get_color() == 'blue':\n # if destination within the board and the move is strictly one upward or to the side\n if (to_col in range(9) and to_row in range(10) and\n ((abs(to_col - from_col) == 1 and to_row == from_row) or (to_col == from_col and to_row - from_row == -1))):\n return True\n # if moving diagonally within the red palace\n if from_coordinates in [[3, 2], [5, 2]] and to_coordinates == [4, 1]:\n return True\n if from_coordinates == [4, 1] and to_coordinates in [[3, 0], [5, 0]]:\n return True\n\n return False\n\n return False",
"def check_reached(self):\n m_x, m_y = self.destination.get_pos()\n m_radius = self.destination.radius\n distance_centre = math.sqrt((m_x - self.x)**2 + (m_y - self.y)**2)\n sum_radii = m_radius + self.radius\n if distance_centre < sum_radii:\n self.color = pygame.colordict.THECOLORS['green']\n self.has_reached = True",
"def validate_move(self, move_from, move_to, board):\n\n from_coordinates = JanggiGame.translate_to_grid(move_from)\n to_coordinates = JanggiGame.translate_to_grid(move_to)\n from_col = from_coordinates[0]\n from_row = from_coordinates[1]\n to_col = to_coordinates[0]\n to_row = to_coordinates[1]\n\n if self._color == 'red':\n # if destination within the palace:\n if (to_col in range(3,6) and to_row in range(3) and\n # and the move is 1 horizontal or 1 vertical:\n (((abs(to_col-from_col) == 1 and to_row-from_row == 0) or\n (to_col-from_col == 0 and abs(to_row-from_row) == 1)) or\n # or the move is one diagonal:\n ((from_coordinates == [4,1] and to_coordinates in [[3,0],[3,2],[5,0],[5,2]]) or\n (from_coordinates in [[3,0],[3,2],[5,0],[5,2]] and to_coordinates == [4,1]))\n )\n ):\n return True\n else:\n return False\n\n if self._color == 'blue':\n # if destination within the palace:\n if (to_col in range(3,6) and to_row in range(7,10) and\n # and the move is 1 horizontal or 1 vertical:\n (((abs(to_col-from_col) == 1 and to_row-from_row == 0) or\n (to_col-from_col == 0 and abs(to_row-from_row) == 1)) or\n # or the move is one diagonal:\n ((from_coordinates == [4,8] and to_coordinates in [[3,7],[3,9],[5,7],[5,9]]) or\n (from_coordinates in [[3,7],[3,9],[5,7],[5,9]] and to_coordinates == [4,8]))\n )\n ):\n return True\n else:\n return False",
"def validate_move(self, move_from, move_to, board):\n\n from_coordinates = JanggiGame.translate_to_grid(move_from)\n to_coordinates = JanggiGame.translate_to_grid(move_to)\n from_col = from_coordinates[0]\n from_row = from_coordinates[1]\n to_col = to_coordinates[0]\n to_row = to_coordinates[1]\n\n if self._color == 'red':\n # if destination within the palace:\n if (to_col in range(3, 6) and to_row in range(3) and\n # and the move is 1 horizontal or 1 vertical:\n (((abs(to_col - from_col) == 1 and to_row - from_row == 0) or\n (to_col - from_col == 0 and abs(to_row - from_row) == 1)) or\n # or the move is one diagonal:\n ((from_coordinates == [4, 1] and to_coordinates in [[3, 0], [3, 2], [5, 0], [5, 2]]) or\n (from_coordinates in [[3, 0], [3, 2], [5, 0], [5, 2]] and to_coordinates == [4, 1]))\n )\n ):\n return True\n else:\n return False\n\n if self._color == 'blue':\n # if destination within the palace:\n if (to_col in range(3, 6) and to_row in range(7, 10) and\n # and the move is 1 horizontal or 1 vertical:\n (((abs(to_col - from_col) == 1 and to_row - from_row == 0) or\n (to_col - from_col == 0 and abs(to_row - from_row) == 1)) or\n # or the move is one diagonal:\n ((from_coordinates == [4, 8] and to_coordinates in [[3, 7], [3, 9], [5, 7], [5, 9]]) or\n (from_coordinates in [[3, 7], [3, 9], [5, 7], [5, 9]] and to_coordinates == [4, 8]))\n )\n ):\n return True\n else:\n return False",
"def has_valid_move(self, cur_square, board):\n coords = cur_square.coords\n neighbor_list = [tuple(map(sum, zip(coords, offset))) for offset in self._offsets]\n return self.has_valid_move_in_list(coords, neighbor_list, board)",
"def verify_legal_move(self, direction):\n for b_x, b_y in self.get_block_positions(self.active_piece.FIGURE):\n\n if direction == \"LEFT\":\n b_x -= 1\n elif direction == \"RIGHT\":\n b_x += 1\n elif direction == \"DOWN\":\n b_y += 1\n else:\n raise ValueError\n\n if b_x < 0 or b_x >= self.WIDTH:\n return False\n\n if b_y < 0 or b_y >= self.HEIGHT:\n return False\n\n if self.board[b_y][b_x] != 0:\n return False\n return True",
"def _isvalidmove(self, from_, to_):\n if self.board[from_].occupant is None:\n print(\"Moving from empty square\")\n return False\n piece = self.board[from_].occupant\n\n if piece.color != self.to_move:\n print(\"Wrong color\")\n return False\n\n if self.is_checked:\n if piece.notation != 'K':\n print(\"King is checked!\")\n return False\n\n diff = (\n to_cartesian(to_)[0] - to_cartesian(from_)[0],\n to_cartesian(to_)[1] - to_cartesian(from_)[1]\n )\n if not piece.hopping:\n if self.board.isblocked(from_, to_):\n print(\"Move blocked by other pieces\")\n return False\n\n if self.board[to_].occupant is not None:\n if piece.color == self.board[to_].occupant.color:\n print(\"Cannot capture friendly\")\n return False\n\n if diff not in piece.get_captures():\n print(\"Invalid piece capture\")\n return False\n\n if diff not in piece.get_moves():\n print(\"Invalid piece move\")\n return False\n\n return True",
"def is_queen_move_valid(self, from_row, from_col, to_row, to_col):\n # if not on same colored diagonal\n if abs(from_row - to_row) != abs(from_col - to_col):\n # if on same col? (like rook)\n if from_row != to_row and (from_col == to_col):\n dc = 0\n dr = 1 if to_row - from_row > 0 else -1\n # elif on same row?\n elif from_col != to_col and (from_row == to_row):\n dr = 0\n dc = 1 if to_col - from_col > 0 else -1\n else:\n # if not on same col or row\n return False\n else:\n # on same colored diagonal (moves like bishop)\n dr = 1 if to_row - from_row > 0 else -1\n dc = 1 if to_col - from_col > 0 else -1\n\n # check if any pieces are in the way of destination\n dm = abs(to_row - from_row)\n return self._any_piece_in_way(from_row, from_col, dr, dc, dm, toRow=to_row, toCol=to_col)",
"def check_legal(self, cur_pos, new_pos, board, state):\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n\n if state == \"UNFINISHED\":\n # Make sure the position you're going into isn't your own piece\n if board[new_row][new_col] is not None:\n if self.piece_type(new_pos, board).get_color() == self._color:\n return False\n \n # Checking diagonals in the palace\n if cur_pos and new_pos in self._special:\n # Checking if the movement is in the same column\n if new_col == cur_col and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n # Checking if the movement is in the same row\n elif new_row == cur_row and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n # Checking all possible diagonals\n elif new_row == cur_row + 1 and new_col == cur_col + 1 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n elif new_row == cur_row - 1 and new_col == cur_col - 1 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n elif new_row == cur_row + 2 and new_col == cur_col + 2 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n elif new_row == cur_col - 2 and new_row == cur_col - 2 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True \n # Checking if the movement is in the same column\n if new_col == cur_col and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n # Checking if the movement is in the same row\n elif new_row == cur_row and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n else:\n return False\n else:\n return False",
"def is_solved(self):\n colors = ['green', 'blue', 'red', 'orange', 'white', 'yellow']\n for row in range(3):\n for column in range(3):\n if self.front[row][column] != colors[0]:\n return False\n for row in range(3):\n for column in range(3):\n if self.back[row][column] != colors[1]:\n return False\n for row in range(3):\n for column in range(3):\n if self.right[row][column] != colors[2]:\n return False\n for row in range(3):\n for column in range(3):\n if self.left[row][column] != colors[3]:\n return False\n for row in range(3):\n for column in range(3):\n if self.up[row][column] != colors[4]:\n return False\n for row in range(3):\n for column in range(3):\n if self.down[row][column] != colors[5]:\n return False\n return True",
"def will_collide(self, direction=None):\n new_origin, new_positions = self.active_piece.try_move(direction=direction)\n for row, col in new_positions:\n y, x = new_origin[0] + row, new_origin[1] + col\n if y > 19 or y < 0 or x > 9 or x < 0 or self.board[y][x] == '#':\n return True\n return False",
"def check_legal(self, cur_pos, new_pos, board, state): \n if cur_pos and new_pos in self._special:\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n\n if state == \"UNFINISHED\":\n if new_pos in self._special: # Make sure that the piece it's trying to take isn't it's own\n if board[new_row][new_col] is not None:\n if self.piece_type(new_pos, board).get_color() == self._color:\n return False\n \n if new_pos in self._special: # if its in the palace\n # Checking if the movement is left or right (one column apart) from the cur_pos\n if (new_col == cur_col + 1 or new_col == cur_col - 1) and new_row == cur_row:\n return True\n # Checking if forward or backward movement is legal\n elif (new_row == cur_row - 1 or new_row == cur_row + 1) and (new_col == cur_col):\n return True\n # Checking if diagonal lines are possible\n elif cur_pos in self._corners:\n if (new_row == cur_row + 1 or new_row == cur_row - 1) and (new_col == cur_col - 1 or new_col == cur_col + 1):\n return True\n else:\n return False\n else:\n return False",
"def check_legal(self, cur_pos, new_pos, board, state):\n if cur_pos and new_pos in self._special:\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n\n if state == \"UNFINISHED\":\n if new_pos in self._special: # Make sure that the piece it's trying to take isn't it's own\n if board[new_row][new_col] is not None:\n if self.piece_type(new_pos, board).get_color() == self._color:\n return False\n \n if new_pos in self._special: # if its in the palace\n # Checking if the movement is left or right (one column apart) from the cur_pos\n if (new_col == cur_col + 1 or new_col == cur_col - 1) and new_row == cur_row:\n return True\n # Checking if forward or backward movement is legal\n elif (new_row == cur_row - 1 or new_row == cur_row + 1) and (new_col == cur_col):\n return True\n # Checking if diagonal lines are possible\n elif cur_pos in self._corners:\n if (new_row == cur_row + 1 or new_row == cur_row - 1) and (new_col == cur_col - 1 or new_col == cur_col + 1):\n return True\n else:\n return False\n else:\n return False",
"def is_valid_pos(self, pos_step):\n return not (self.pos.x % pos_step or self.pos.y % pos_step)",
"def is_valid_move(self, side_color, x, y):\n return self.disc_place(side_color, x, y, check_only=True)",
"def is_move_valid(self, direction, reference_board=None):\n # Verify a left move does not take you off the board.\n if (direction == \"l\"):\n if (self._current_loc.get_column() == 0):\n return False\n # Verify an up move does not take you off the board.\n elif (direction == \"u\"):\n # Verify the move does not take you off the board.\n if (self._current_loc.get_row() == 0):\n return False\n # Verify a right move does not take you off the board.\n elif (direction == \"r\"):\n current_row = self._current_loc.get_row()\n max_column_number = len(self._untraversed_board[current_row])\n if self._current_loc.get_column() + 1 == max_column_number:\n return False\n # Verify a down move does not take you off the board.\n elif (direction == \"d\"):\n if self._current_loc.get_row() + 1 == len(self._untraversed_board):\n return False\n else:\n assert False, \"Invalid move direction.\"\n\n # Get the new location for a move in the specified direction.\n new_location = self._calculate_move_location(direction)\n new_row = new_location.get_row()\n new_col = new_location.get_column()\n # Verify the space is available\n if(reference_board is None):\n return BoardPath._untraversed_board[new_row][new_col] != \"#\"\n else:\n return reference_board[new_row][new_col] != \"#\"",
"def _check_neighbors(self):\n for direction, dir_info in self.DIRECTIONS.items():\n pos = Point(\n self.position.x + dir_info[\"mask\"][0],\n self.position.y + dir_info[\"mask\"][1]\n )\n status = self.move(direction)\n self.grid[status].add(pos)\n if status in (1, 2):\n # moved\n self.move(dir_info[\"opposite\"])\n yield pos",
"def check_legal(self, cur_pos, new_pos, board, state):\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n \n if state == \"UNFINISHED\":\n # Make sure the position you're going into isn't your own piece\n if board[new_row][new_col] is not None:\n if self.piece_type(new_pos, board).get_color() == self._color:\n return False\n # Check if you're in the palace\n if new_pos in self._special: # Make sure that the piece it's trying to take isn't it's own\n if board[new_row][new_col] is not None:\n if self.piece_type(new_pos, board).get_color() == self._color:\n return False \n # Checking if the movement is left or right (one column apart) from the cur_pos\n if (new_col == cur_col + 1 or new_col == cur_col - 1) and new_row == cur_row:\n return True\n\n # Checking if forward movement is legal\n elif self._color == 'BLUE':\n print(\"this soldier is blue\")\n if new_row == cur_row - 1 and new_col == cur_col:\n print(\"The blue soldier is trying to move forward\")\n # cant take your own piece\n if self.piece_type(new_pos, board) is not None:\n print(\"There's a piece here\")\n if self.piece_type(new_pos, board).get_color == self._color:\n print(\"Trying to take it's own color piece\")\n return False\n return True\n elif self._color == 'RED':\n print(\"this soldier is red\")\n if new_row == cur_row + 1 and new_col == cur_col:\n print(\"The red soldier is trying to move forward\")\n if self.piece_type(new_pos, board) is not None:\n print(\"There's a piece here\")\n if self.piece_type(new_pos, board).get_color == self._color:\n print(\"Trying to take it's own color piece\")\n return False\n return True\n else:\n return False\n else:\n return False",
"def checkAdjacent(self, direction, maze):\n\n y = self.y\n x = self.x\n\n # Shift x or y depending on the given direction.\n if direction in NS:\n y = shift[direction](y)\n elif direction in EW:\n x = shift[direction](x)\n\n # Check new location for obstacle or unwanted direction\n if maze[y][x] == 1 or ([x, y] in self.fullpath()) or (self.moved() is False and direction in self.dead_end_direction()[-1]):\n return False\n else:\n return True",
"def color_check_mate(self, mycolor):\n\n if not self.color_in_check(mycolor):\n return False\n\n incheck = True\n for (x, y) in self.__players[mycolor]:\n moves = self._get_piece_moves(x, y)\n for to in moves:\n res, captured = self._make_move((x, y), to)\n if not self.color_in_check(mycolor):\n incheck = False\n\n self._unmake_move(to, (x, y), captured)\n if not incheck:\n return False\n\n return incheck",
"def available_moves(self) -> bool:\n has_move = False\n for i in range(self.col):\n if self.valid_column(i):\n has_move = True\n return has_move",
"def checkValidMove(self, move):\n boardCopy = copy.deepcopy(self)\n tilesChange = False\n if move == Move.UP:\n boardCopy.moveUp()\n elif move == Move.DOWN:\n boardCopy.moveDown()\n elif move == Move.LEFT:\n boardCopy.moveLeft()\n elif move == Move.RIGHT:\n boardCopy.moveRight()\n else:\n raise ValueError('Invalid Move was input')\n \n for i in range(4):\n for j in range(4):\n if boardCopy.getTile(i,j) != self.getTile(i,j):\n tilesChange = True\n del(boardCopy)\n return tilesChange",
"def checkLegalMove(self, initialPosition, destinationPosition, colorIndex):\n checkColor = self.grid.REPRESENTATION[colorIndex]\n otherColor = self.grid.REPRESENTATION[1-colorIndex]\n emptyColor = self.grid.REPRESENTATION[2]\n if self.grid[initialPosition] != checkColor:\n print 'The piece you are trying to move is not yours! Please reselect your move.'\n return False\n if self.grid[destinationPosition] != emptyColor:\n print 'The destination position of your move is not empty! Please reselect your move.'\n return False\n if initialPosition == destinationPosition:\n print 'The initial and destination position of your move are the same. Please reselect your move.'\n return False\n\n if initialPosition[0] == destinationPosition[0]:\n x = initialPosition[0]\n if (destinationPosition[1] - initialPosition[1]) %2 != 0:\n print 'Invalid move! Please reselect your move.'\n return False\n if initialPosition[1] < destinationPosition[1]:\n for i in range(initialPosition[1]+1, destinationPosition[1], 2):\n if self.grid[(x, i)] != otherColor or self.grid[(x, i+1)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n else:\n for i in range(initialPosition[1]-1, destinationPosition[1], -2):\n if self.grid[(x, i)] != otherColor or self.grid[(x, i-1)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n elif initialPosition[1] == destinationPosition[1]:\n y = initialPosition[1]\n if (destinationPosition[0] - initialPosition[0])%2 != 0:\n print 'Invalid move! Please reselect your move.'\n return False\n if initialPosition[0] < destinationPosition[0]:\n for i in range(initialPosition[0]+1, destinationPosition[0], 2):\n if self.grid[(i, y)] != otherColor or self.grid[(i+1, y)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n else:\n for i in range(initialPosition[0]-1, destinationPosition[0], -2):\n if self.grid[(i, y)] != otherColor or self.grid[(i-1, y)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n # make turns\n print 'Making turns is invalid move! Please reselect your move.'\n return False",
"def check_path(self, cur_pos, new_pos, board, state):\n\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n cannon_pieces = [Cannon('BLUE'), Cannon('RED')]\n \n # Ensures the range is always in the right order\n if new_row > cur_row: \n ran_r = range(cur_row + 1, new_row, 1)\n elif cur_row > new_row:\n ran_r = range(cur_row - 1, new_row, -1)\n \n elif new_col > cur_col:\n ran_c = range(cur_col + 1, new_col, 1)\n elif cur_col > new_col:\n ran_c = range(cur_col - 1, new_col, -1)\n else:\n return False\n \n # Checking if the movement is left or right is legal\n if new_row == cur_row:\n print(\"it's in the new_row == cur_row\")\n # Check if there is a legal piece (a non-Cannon) is contained in the path\n counter = 0\n print(counter)\n for col_spot in ran_c:\n if board[cur_row][col_spot] is not None:\n counter += 1\n\n if counter == 0: \n print(\"jump!\")\n return True\n \n # Checking if the movement vertical is legal\n if new_col == cur_col:\n print(\"it's in the new_col == cur_col\")\n # Check if there is a legal piece (a non-Cannon) is contained in the path\n counter = 0\n for row_spot in ran_r:\n if board[row_spot][cur_col] is not None:\n counter += 1\n print(board[row_spot][cur_col])\n print(counter)\n if counter == 0:\n print(\"jump!\")\n return True",
"def is_rook_move_valid(self, from_row, from_col, to_row, to_col):\n # if not on same column or row\n if ((from_row != to_row and from_col != to_col) or\n (from_row == to_row and from_col == to_col)):\n return False\n\n # check if any pieces are in the way of destination\n if from_row != to_row:\n dc = 0\n dr = 1 if to_row - from_row > 0 else -1\n if from_col != to_col:\n dr = 0\n dc = 1 if to_col - from_col > 0 else -1\n dm = abs(to_row - from_row)\n\n retVal = self._any_piece_in_way(from_row, from_col, dr, dc, dm, toRow=to_row, toCol=to_col)\n\n # Casting: Rook invalidation\n if retVal and (from_row == 0 or from_row == 7):\n piece = self.board.squares[from_row][from_col]\n piece_color = self.piece_color(piece)\n if piece_color == \"white\":\n if from_col == 0:\n self.whiteCanCastleQside = False\n elif from_col == 7:\n self.whiteCanCastleKside = False\n else:\n if from_col == 0:\n self.blackCanCastleQside = False\n elif from_col == 7:\n self.blackCanCastleKside = False\n\n return retVal",
"def _valid_move_exists(self):\n lst = []\n for i_row in range(self._num_rows):\n for i_col in range(self._num_cols):\n if self._valid_placement(i_row, i_col)[0]:\n lst.append((i_row, i_col))\n\n return lst != [] #If lst != [], then the list has elements -> valid move(s) exist",
"def can_move(self):\r\n for wall in self.app.walls:\r\n if vec(self.grid_pos+self.direction) == wall:\r\n return False\r\n return True",
"def is_knight_move_valid(self, from_row, from_col, to_row, to_col):\n # check for valid move\n if ((abs(from_row - to_row) == 1 and abs(from_col - to_col) == 2) or\n (abs(from_row - to_row) == 2 and abs(from_col - to_col) == 1)):\n return True\n return False",
"def move_is_valid(self, pos):\n\n if (not isinstance(pos, tuple) or len(pos) != 2 or \n not isinstance(pos[0], int) or not isinstance(pos[1], int)):\n return False\n y, x = pos\n if (y >= 0 and y < self.size and x >= 0 and x < self.size and \n self.board[pos] == HexBoard.EMPTY):\n return True\n else:\n return False",
"def is_move_valid(self, from_row, from_col, to_row, to_col):\n # check is taking own piece?\n if self._is_taking_own_piece(from_row, from_col, to_row, to_col):\n return False\n\n piece = self.board.squares[from_row][from_col]\n if piece == ChessPiece.W_ROOK or piece == ChessPiece.B_ROOK:\n return self.is_rook_move_valid(from_row, from_col,\n to_row, to_col)\n if piece == ChessPiece.W_KNIGHT or piece == ChessPiece.B_KNIGHT:\n return self.is_knight_move_valid(from_row, from_col,\n to_row, to_col)\n if piece == ChessPiece.W_BISHOP or piece == ChessPiece.B_BISHOP:\n return self.is_bishop_move_valid(from_row, from_col,\n to_row, to_col)\n if piece == ChessPiece.W_QUEEN or piece == ChessPiece.B_QUEEN:\n return self.is_queen_move_valid(from_row, from_col,\n to_row, to_col)\n if piece == ChessPiece.W_KING or piece == ChessPiece.B_KING:\n return self.is_king_move_valid(from_row, from_col,\n to_row, to_col)\n if piece == ChessPiece.W_PAWN or piece == ChessPiece.B_PAWN:\n return self.is_pawn_move_valid(from_row, from_col,\n to_row, to_col)"
] |
[
"0.6963955",
"0.6874568",
"0.6862489",
"0.6853493",
"0.673482",
"0.6734331",
"0.6709162",
"0.6708506",
"0.6673571",
"0.6617804",
"0.66103816",
"0.66061807",
"0.65898937",
"0.6579249",
"0.6572609",
"0.65449744",
"0.6529392",
"0.65056103",
"0.6483118",
"0.64825475",
"0.6477906",
"0.64674985",
"0.6457467",
"0.6453294",
"0.64447385",
"0.64405173",
"0.6413071",
"0.63934284",
"0.63860404",
"0.6379162"
] |
0.7871493
|
0
|
Returns all moves self_color could make on the given board.
|
def find_possible_moves(self, board, self_color):
possible_moves = []
delta = [(0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1)]
for r in range(len(board)):
for c in range(len(board[r])):
if board[r][c] == self_color:
for i in range(0, 8):
coords = (r, c)
found_move = self.check_moves(board, self_color, coords, delta[i])
if found_move is not None and found_move not in possible_moves:
possible_moves.append(found_move)
return possible_moves
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_all_possible_moves(self):\r\n moves = []\r\n for i in range(8):\r\n for j in range(8):\r\n color = self.board[i][j][0]\r\n if (color == 'b' and not self.turn_white) or (color == 'w' and self.turn_white):\r\n p_type = self.board[i][j][1]\r\n if p_type == 'r':\r\n self.get_rook_moves(i, j, moves)\r\n elif p_type == 'k':\r\n self.get_king_moves(i, j, moves)\r\n elif p_type == 'q':\r\n self.get_queen_moves(i, j, moves)\r\n elif p_type == 'p':\r\n self.get_pawn_moves(i, j, moves)\r\n elif p_type == 'b':\r\n self.get_bishop_moves(i, j, moves)\r\n elif p_type == 'n':\r\n self.get_knight_moves(i, j, moves)\r\n return moves",
"def checkMoves(self,board):\n possibleMoves = []\n\n for c in xrange(0,8):\n for r in xrange(0,8):\n if board.isValidMove(self.tile,c,r):\n possibleMoves.append(c+r*8)\n\n return possibleMoves",
"def get_possibles_moves(board: numpy.ndarray) -> List[Move]:\n return [tuple(k) for k in numpy.argwhere(board == -1) if 0 != k[0] != board.shape[0] - 1 != k[1] != 0]",
"def get_legal_moves(self, color):\n moves = [] # stores the legal moves.\n # Get all the squares with pieces of the given color.\n for x in range(self.n):\n for y in range(self.n):\n if self[x][y]==0:\n moves.append((x,y))\n return moves",
"def get_possible_moves(self, board):\n possible_moves = []\n\n # search in each direction for possible squares to move to\n for direction in [(0, 1), (0, -1), (1, 0), (-1, 0)]:\n possible_moves.extend(\n self._get_possible_moves_in_dir(board, rank_incr=direction[0], file_incr=direction[1])\n )\n\n return possible_moves",
"def get_possible_moves(board):\n\tpossible_moves = []\n\n\tfor count, player in enumerate(board):\n\t\tif player is not server_player and player is not user_player:\n\t\t\tpossible_moves.append(count)\n\n\treturn possible_moves",
"def get_possible_moves(board):\n\n possible_moves = []\n\n ret_tuple_left = move_left(board)\n ret_tuple_right = move_right(board)\n ret_tuple_up = move_up(board)\n ret_tuple_down = move_down(board)\n\n if ret_tuple_left[0]:\n possible_moves.append(ret_tuple_left[1])\n if ret_tuple_right[0]:\n possible_moves.append(ret_tuple_right[1])\n if ret_tuple_up[0]:\n possible_moves.append(ret_tuple_up[1])\n if ret_tuple_down[0]:\n possible_moves.append(ret_tuple_down[1])\n\n return possible_moves",
"def moves(self):\n\n moves = list()\n\n for row in range(HEIGHT):\n for col in range(WIDTH):\n\n move = (row, col)\n\n if self.board[row][col] == 9:\n moves.append(move)\n\n if self.board[row][col] == 1 or self.board[row][col] == 2:\n\n move = (row - 1, col)\n\n if self.board[row - 1][col] == 1 or self.board[row - 1][col] == 2:\n\n pass\n\n else:\n\n moves.append(move)\n\n return moves",
"def legal_moves(self, player, board):\r\n #go through the whole board and check whether the piece is on the board or not\r\n #num/row size - num%col == num2/row size - num@%col\r\n #num/row size + num%col\r\n moves = list()\r\n opp = self.opponent(player)\r\n #print(board)\r\n for i in self.squares():\r\n if board[i] == core.EMPTY:\r\n for d in core.DIRECTIONS:\r\n endPt = self.find_bracket(i, player, board, d)\r\n if endPt!= None:\r\n moves.append(i)\r\n break\r\n\r\n return moves",
"def _get_possible_moves(board, lightcycle):\n result = []\n for diff in ((0, 1, PlayerActions.MOVE_DOWN), (1, 0, PlayerActions.MOVE_RIGHT), (0, -1, PlayerActions.MOVE_UP), (-1, 0, PlayerActions.MOVE_LEFT)):\n next_x = lightcycle['position'][0] + diff[0]\n next_y = lightcycle['position'][1] + diff[1]\n if 0 <= next_x < len(board) and 0 <= next_y < len(board[0]):\n if board[next_x][next_y] in (EMPTY, POWERUP):\n result += [diff]\n return result",
"def get_all_moves(self, board, player):\n result = []\n for startx in range(8):\n for starty in range(8):\n for destx in range(8):\n for desty in range(8):\n if self.is_legal_move(board, [startx, starty], [destx, desty], player):\n result.append([[startx, starty], [destx, desty]])\n return result",
"def get_all_moves(self):\n # 2d matrix of true/false, true if something can be placed\n legal_move_board = []\n possible_move_list = []\n for row in range(self.size):\n move_row = []\n for col in range(self.size):\n empty = self.board[row][col].state == PegState.EMPTY\n move_row.append(empty)\n if empty:\n possible_move_list.append((row, col))\n legal_move_board.append(move_row)\n \n # every position where something can be placed (list of tuples) (Combined with above)\n \"\"\" possible_move_list = []\n for row in range(self.size):\n for col in range(self.size):\n if legal_move_board[row][col] == True:\n possible_move_list.append((row, col))\n \"\"\"\n return legal_move_board, possible_move_list",
"def possible_moves(self): \n return [a + 1 for a, b in enumerate(self.board) if b == 0]",
"def get_available_moves(self, board):\n pass",
"def get_legal_moves(self, color):\n moves = set() # stores the legal moves.\n color = max(0, color)\n\n # Get all the squares with pieces of the given color.\n for y in range(self.n):\n for x in range(self.n):\n if self[x][y]==color:\n newmoves = self.get_moves_for_square((x,y))\n moves.update(newmoves)\n return list(moves)",
"def get_moves_for_square(self, square):\n (x,y) = square\n\n # determine the color of the piece.\n color = self[x][y]\n\n # skip empty source squares.\n if color==0:\n return []\n\n # search all possible directions.\n moves = []\n for direction in self.__directions:\n move = self._discover_move(square, direction)\n if move:\n # print(square,move,direction)\n moves.append(move)\n\n # return the generated move list\n return moves",
"def get_possible_moves_white(item, board):\n obj_list = []\n\n # Get board to end-state\n for pos_tuple in item[1]:\n obj_list.append(board[pos_tuple[1][0]][pos_tuple[1][1]])\n move_on_own_board(pos_tuple[0], pos_tuple[1], board)\n\n new_positions = get_all_positions(board, True)\n for i in range(len(new_positions)):\n new_positions[i] = item[1] + [new_positions[i]]\n\n # Get board back to init-state\n for i in reversed(range(len(item[1]))):\n move_on_own_board(item[1][i][1], item[1][i][0], board)\n board[item[1][i][1][0]][item[1][i][1][1]] = obj_list[i]\n\n return new_positions",
"def get_all_moves(board, player):\n moves = []\n if not (player_has_won(board, player) or\n player_has_won(board, utils.get_opponent(player)) or\n (not is_valid_board(board))):\n for index in range(9):\n if board[index] == config.NO_PLAYER:\n moves += [index]\n return moves",
"def moves(self):\n\n # define a full range, which we can compare against columns,\n # rows, or blocks. they're all the same when stored as sets.\n line = set(range(1, 10))\n moves = []\n\n # iterate every cell on the board\n for row in range(0, 9):\n for col in range(0, 9):\n\n # ignore this cell if it's already filled\n i = self._index(col, row)\n if self.data[i] is not None:\n continue\n\n # fetch the adjacent cells\n row_values = set(self._row(row))\n col_values = set(self._column(col))\n bck_values = set(self._block(col, row))\n\n # subtract the values present in the adjacent cells\n # (since this cell *can't* be of any of those values),\n # to leave the list of possibilities for this cell\n missing = line.difference(row_values, col_values, bck_values)\n\n # if there's only *one* possibility, we've found the\n # solution to this cell\n if len(missing) == 1:\n moves.append((col, row, missing.pop()))\n\n return moves",
"def get_possible_moves(self, board: np.ndarray):\n board_size = board.shape[0]\n moves = []\n if abs(self.value) == 1:\n if self.start_row <= 2:\n directions = [np.array((1, -1)), np.array((1, 1))]\n else:\n directions = [np.array((-1, 1)), np.array((-1, -1))]\n else:\n directions = [np.array((-1, 1)), np.array((1, 1)), np.array((-1, -1)), np.array((1, -1))]\n for direction in directions:\n within_board = True\n i = 1\n while within_board:\n coord = self.coord + direction * i\n within_board = _check_if_position_on_board(coord, board_size)\n # break if first step is already out of board\n if not within_board:\n break\n value_board = board[coord[0], coord[1]]\n # break if there is a stone of them same player in the way\n if value_board < 0 and self.value < 0 or value_board > 0 and self.value > 0:\n break\n # if there is no stone, than add this to move list.\n if value_board == 0:\n moves += [{\"old_coord\": self.coord, \"new_coord\": coord, \"jumped_stones\": [], \"jumped_values\": 0,\n \"move_coords\": [coord]}]\n # if there is a stone of the enemy\n if (value_board < 0 < self.value) or (self.value < 0 < value_board):\n # check if it can be jumped\n coord_jump = coord + direction\n move_coords = [coord_jump.copy()]\n within_board_after_jump = _check_if_position_on_board(coord_jump, board_size)\n # break if place behind stone is out of border\n if not within_board_after_jump:\n break\n value_board_jump = board[coord_jump[0], coord_jump[1]]\n jumped_stones = []\n # break if there is no free place\n if value_board_jump != 0:\n break\n jumped_stones += [coord]\n moves_tmp = self.jump_chain(directions, board, coord_jump, value_board, jumped_stones, move_coords)\n if len(moves_tmp) > 0:\n moves += moves_tmp\n else:\n moves += [{\"old_coord\": self.coord, \"new_coord\": coord_jump, \"jumped_stones\": jumped_stones,\n \"jumped_values\": abs(value_board), \"move_coords\": [coord_jump]}]\n i += 1\n # break if normal stone, because they can only move one field\n if abs(self.value) == 1:\n break\n return moves",
"def get_available_moves(self, board):\n available_moves = []\n for fieldx in range(len(board)):\n column = []\n for fieldy in range(len(board)):\n legit_move = board[self.posy][self.posx].is_valid_move(board, fieldx, fieldy)\n column.append(legit_move)\n available_moves.append(column)\n return available_moves",
"def get_legal_moves(self, board):\n moves = set()\n capture_moves = set()\n if not (self.field_row*self.color_value == 1 or self.field_row*self.color_value == -6):\n self.pot_moves = {(1*self.color_value, 0)}\n\n for move in self.pot_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if self.path_clear(board, move, target_row, target_col):\n if board.status[target_row, target_col] * self.color_value == 0:\n moves.add(move)\n\n for move in self.pot_capture_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if self.path_clear(board, move, target_row, target_col):\n if board.status[target_row, target_col] * self.color_value < 0:\n capture_moves.add(move)\n self.legal_moves = moves\n self.legal_capture_moves = capture_moves",
"def get_all_positions(board, white_turn):\n list = []\n for row in range(8):\n for col in range(8):\n # White\n if white_turn and white_piece_on_pos((row, col), board):\n obj = board[row][col]\n if type(obj) is Pawn:\n for valid_pos in valid_positions_pawn_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Tower:\n for valid_pos in valid_positions_tower_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Bishop:\n for valid_pos in valid_positions_bishop_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Horse:\n for valid_pos in valid_positions_horse_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Queen:\n for valid_pos in valid_positions_queen_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is King:\n for valid_pos in valid_positions_king_white((row, col), board):\n list.append(((row, col), valid_pos))\n # Black\n elif (not white_turn) and black_piece_on_pos((row, col), board):\n obj = board[row][col]\n if type(obj) is Pawn:\n for valid_pos in valid_positions_pawn_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Tower:\n for valid_pos in valid_positions_tower_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Bishop:\n for valid_pos in valid_positions_bishop_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Horse:\n for valid_pos in valid_positions_horse_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Queen:\n for valid_pos in valid_positions_queen_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is King:\n for valid_pos in valid_positions_king_black((row, col), board):\n list.append(((row, col), valid_pos))\n return list",
"def get_moves(self, board, player):\r\n moves = []\r\n for x in range(self.board_width):\r\n for y in range(self.board_width):\r\n if board[x][y] == 0:\r\n copy = board.copy()\r\n copy[x][y] = player\r\n moves.append(copy)\r\n return moves",
"def possible_moves(self, board):\n\n coordinate_list = []\n algebraic_from = JanggiGame.translate_to_algebraic(self._location)\n for i, col in enumerate(board):\n for j, row in enumerate(col):\n algebraic_to = JanggiGame.translate_to_algebraic([i,j])\n if self.validate_move(algebraic_from,algebraic_to,board) is True:\n coordinate_list.append([i,j])\n\n return coordinate_list",
"def get_available_moves(self, team_color):\n squares = self.squares_with_pieces_of_color(team_color)\n moves = []\n for square in squares:\n moves.extend(square.get_moves(self))\n return moves",
"def get_next_moves1(self):\n moves = []\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n if self.board[i][j] == \"\":\n next_board = copy.deepcopy(self.board)\n next_board[i][j] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n next_turn = get_opponent(self.turn)\n moves.append(DotsAndBoxesState(next_board, next_turn))\n return moves",
"def get_possible_moves_black(item, board):\n obj_list = []\n\n # Get board to end-state\n for pos_tuple in item[1]:\n obj_list.append(board[pos_tuple[1][0]][pos_tuple[1][1]])\n move_on_own_board(pos_tuple[0], pos_tuple[1], board)\n\n new_positions = get_all_positions(board, False)\n for i in range(len(new_positions)):\n new_positions[i] = item[1] + [new_positions[i]]\n\n # Get board back to init-state\n for i in reversed(range(len(item[1]))):\n move_on_own_board(item[1][i][1], item[1][i][0], board)\n board[item[1][i][1][0]][item[1][i][1][1]] = obj_list[i]\n\n return new_positions",
"def get_possible_moves(self) -> list:\n p1_count = 0\n p2_count = 0\n ley_line_total = (self.side_length + 1) * 3\n for itype in self.current_ley_lines:\n for line in itype:\n if line[0] == '1':\n p1_count += 1\n if line[0] == '2':\n p2_count += 1\n if p1_count >= ley_line_total / 2 or p2_count >= ley_line_total / 2:\n return []\n moves = []\n for letter in self.current_board:\n if letter.isalpha():\n moves.append(letter)\n return moves",
"def get_moves(self, board):\n self.available_moves = [move for move in board.legal_moves]"
] |
[
"0.7883645",
"0.767973",
"0.75464517",
"0.7480288",
"0.74703455",
"0.7433279",
"0.73575103",
"0.7273743",
"0.72572374",
"0.7243118",
"0.723341",
"0.71530664",
"0.7130875",
"0.71287894",
"0.7128543",
"0.7122341",
"0.7116005",
"0.7108686",
"0.70947456",
"0.7019704",
"0.7010493",
"0.69954205",
"0.69922435",
"0.6977541",
"0.6973667",
"0.6942037",
"0.6922102",
"0.6912996",
"0.68356663",
"0.6829062"
] |
0.84355676
|
0
|
Find discs that need to be flipped to update the board.
|
def find_flippable_disks(self, board, self_color, coords, delta):
found_opponent = False
flip_positions = []
for i in range(1, 8):
dr = coords[0] + i * delta[0]
dc = coords[0] + i * delta[1]
if self.is_on_board(dr, dc):
if board[dr][dc] == self.EMPTY:
break
elif board[dr][dc] == self.get_opponent_color(self_color):
found_opponent = True
flip_positions.append((dr, dc))
elif board[dr][dc] == self_color:
if found_opponent:
return flip_positions
else:
break
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def flipper(pos, coul, state_board):\n\ttflips = []\n\tfor i in range(-1,2): # -1 to 1\n\t\tfor j in range(-1,2): #-1 to 1\n\t\t\tfor k in range(1,9): # 1 to 8\n\t\t\t\tif state_board[pos[0]+i*k][pos[1]+j*k] == 0 or state_board[pos[0]+i*k][pos[1]+j*k] == -1: # if the case is empty or out of bounds\n\t\t\t\t\tbreak;\n\t\t\t\telif state_board[pos[0]+i*k][pos[1]+j*k] == coul: # if it is the same color\n\t\t\t\t\tif k > 1: # if it is not directly next to pos\n\t\t\t\t\t\tfor h in range(1,k): # add all the pieces in between to tflips\n\t\t\t\t\t\t\tif not [pos[0]+i*h,pos[1]+j*h] in tflips: #get rid of duplicates\n\t\t\t\t\t\t\t\ttflips.append([pos[0]+i*h,pos[1]+j*h])\n\t\t\t\t\telse:\n\t\t\t\t\t\tbreak;\n\treturn tflips",
"def get_winning_discs(self):\r\n for i in range(6):\r\n for j in range(7):\r\n if self.board[i][j] == 'O':\r\n continue\r\n if self.is_horizontal_four(i, j):\r\n return [(i, x) for x in range(j, j + 4)]\r\n if self.is_vertical_four(i, j):\r\n return [(x, j) for x in range(i, i - 4, -1)]\r\n if self.is_neg_diagonal_four(i, j):\r\n return [(x, y) for x, y in zip(\r\n range(i, i + 4), range(j, j - 4, -1))]\r\n if self.is_pos_diagonal_four(i, j):\r\n return [(x, y)\r\n for x, y in zip(range(i, i + 4), range(j, j + 4))]",
"def _label_flips(self, i_row, i_col, direction):\n vert_move, hori_move = i_row, i_col #Initially start at the opposing cell\n candidates = []\n\n #Perhaps could have done if 0 > vert_move > num_rows and 0 > hori_move > num_cols instead!\n while ((self._board[vert_move][hori_move] != self._turn) and not #This can be True in one of two ways! \n self._is_dead_end(vert_move, hori_move, direction) and # think: \"W\" or \" \"\n self._board[vert_move][hori_move] != \" \"):\n candidates.append((vert_move, hori_move))\n if direction == \"n\":\n vert_move += 1\n elif direction == \"ne\":\n hori_move -= 1\n vert_move += 1\n elif direction == \"e\":\n hori_move -= 1\n elif direction == \"se\":\n hori_move -= 1\n vert_move -= 1\n elif direction == \"s\":\n vert_move -= 1\n elif direction == \"sw\":\n hori_move += 1\n vert_move -= 1\n elif direction == \"w\":\n hori_move += 1\n elif direction == \"nw\":\n hori_move += 1\n vert_move += 1\n #Watch out, index can go out of range after several iterations\n #of the loop body, not just once you enter the loop!!!\n\n ending_cell = self._board[vert_move][hori_move] \n if ending_cell == self._turn: #If the ending cell is same color, then flip can be done.\n return (True, candidates)\n else:\n return (False, [])",
"def _flip(self,update_stack,index):\n cell=game.get_cell(index)\n if cell.ifFlipped()==False:\n cell.flip()\n game.decrease_remain()\n if cell.isMine()==False and cell.get_neighbor()>0:\n update_stack[str(index)]=cell.get_neighbor()\n return\n elif cell.isMine()==False and cell.get_neighbor()==0:\n update_stack[str(index)]=cell.get_neighbor()\n temp_r=index/self._col_num\n temp_c=index%self._col_num\n shift=[[temp_r+dr,temp_c+dc] for dr in self.shifts for dc in self.shifts\n if [temp_r+dr,temp_c+dc]!=[temp_r,temp_c]\n and temp_r+dr in range(0,self._row_num)\n and temp_c+dc in range(0,self._col_num)]\n for s in shift:\n self._flip(update_stack,s[0]*self._col_num+s[1])",
"def hidden_round(self):\n self.change = False\n for row in range(self.board_size):\n hidden = self.find_singles(self.possibles[row])\n hidden = [[num, (row, pos)] for num, pos in hidden]\n if hidden:\n self.set_hidden(hidden)\n for col in range(self.board_size):\n hidden = self.find_singles([self.possibles[row][col] for row in range(self.board_size)])\n hidden = [[num, (pos, col)] for num, pos in hidden]\n if hidden:\n self.set_hidden(hidden)\n for index in range(self.board_size):\n squ = self.squares[index]\n hidden = self.find_singles([self.possibles[cell[0]][cell[1]] for cell in squ])\n hidden = [[num, squ[pos]] for num, pos in hidden]\n if hidden:\n self.set_hidden(hidden)",
"def possible(state_board,turn):\n\tlegal_moves = [] # list of legal moves as Move objects\n\tfor i in range(1,9):\n\t\tfor j in range(1,9):\n\t\t\tif state_board[i][j] == 0:\n\t\t\t\tif flipper([i,j],turn,state_board) != []:\n\t\t\t\t\t# if there are flipped pieces, it appends this move to\n\t\t\t\t\t# the legal moves and draws it in light greens\n\t\t\t\t\tlegal_moves.append((i,j))\n\t\t\t\t\tdrawPiece((i,j),3)\n\t\t\t\telse:\n\t\t\t\t\t# if it is 0 and is not legal, make sure it is of bgcolor\n\t\t\t\t\tdrawPiece((i,j),0)\n\t\n\treturn legal_moves",
"def make_flips(self, move, player, board, direction):\r\n curr = move + direction\r\n opp = self.opponent(player)\r\n while(board[curr]==opp):\r\n board[curr] = player\r\n curr += direction\r\n #return board\r",
"def _discover_move(self, origin, direction):\n x, y = origin\n color = self[x][y]\n flips = []\n\n for x, y in OthelloBoard._increment_move(origin, direction, self.n):\n if self[x][y] == 0:\n if flips:\n # print(\"Found\", x,y)\n return (x, y)\n else:\n return None\n elif self[x][y] == color:\n return None\n elif self[x][y] == -color:\n # print(\"Flip\",x,y)\n flips.append((x, y))",
"def _get_death_zone_changes(self) -> Tuple[List[Square], List[Square]]:\r\n\r\n eliminated_squares: List[Square] = []\r\n new_corners: List[Square] = []\r\n\r\n original_corners: Dict[str, Square] = self._get_corner_squares()\r\n eliminated_squares.extend(\r\n self._select_squares(original_corners[Board._TOP_LEFT].pos,\r\n original_corners[Board._TOP_RIGHT].pos,\r\n offset = Pos2D(0, 1)))\r\n eliminated_squares.extend(\r\n self._select_squares(original_corners[Board._TOP_RIGHT].pos,\r\n original_corners[Board._BOTTOM_RIGHT].pos,\r\n offset = Pos2D(1, 1)))\r\n eliminated_squares.extend(\r\n self._select_squares(original_corners[Board._BOTTOM_LEFT].pos,\r\n original_corners[Board._BOTTOM_RIGHT].pos,\r\n offset = Pos2D(0, 1)))\r\n # TODO Consider that this means that top left will be in eliminated_squares TWICE due to the first argument\r\n # to _select_squares always being inclusive.\r\n eliminated_squares.extend(\r\n self._select_squares(original_corners[Board._TOP_LEFT].pos,\r\n original_corners[Board._BOTTOM_LEFT].pos,\r\n offset=Pos2D(1, 0)))\r\n\r\n new_corners.append(\r\n self.squares[original_corners[Board._TOP_LEFT].pos\r\n + Pos2D(1, 1)])\r\n new_corners.append(\r\n self.squares[original_corners[Board._BOTTOM_LEFT].pos\r\n + Pos2D(1, -1)])\r\n new_corners.append(\r\n self.squares[original_corners[Board._BOTTOM_RIGHT].pos\r\n + Pos2D(-1, -1)])\r\n new_corners.append(\r\n self.squares[original_corners[Board._TOP_RIGHT].pos\r\n + Pos2D(-1, 1)])\r\n\r\n\r\n\r\n return (eliminated_squares, new_corners)",
"def find_moves(self):\n\n from itertools import product\n free_position = self.find_free()\n return [list(free_position+i) for i in [[0,1],[1,0],[-1,0],[0,-1]] if tuple(i+free_position) in product(range(self.size),repeat=2)]",
"def test_flip_piece():\n board = Board(640, 640, 8)\n board.start_game()\n board.gm.flip_pieces = [(3, 3)]\n current_color = board.game_pieces[3][3].color\n board.flip_pieces()\n assert board.game_pieces[3][3].color != current_color\n \n board.gm.flip_pieces = [(3, 4)]\n current_color = board.game_pieces[3][4].color\n board.flip_pieces()\n assert board.game_pieces[3][4].color != current_color",
"def inneReverse(board):\r\n\theight = len(board)\r\n\twidth = len(board[0])\r\n\tfor row in range(height):\r\n\t\tfor col in range(width):\r\n\t\t\tif board[row][col] == 1:\r\n\t\t\t\tboard[row][col]=0\r\n\t\t\telif board[row][col] == 0:\r\n\t\t\t\tboard[row][col]=1\r\n\treturn board",
"def moves(self):\n\n # define a full range, which we can compare against columns,\n # rows, or blocks. they're all the same when stored as sets.\n line = set(range(1, 10))\n moves = []\n\n # iterate every cell on the board\n for row in range(0, 9):\n for col in range(0, 9):\n\n # ignore this cell if it's already filled\n i = self._index(col, row)\n if self.data[i] is not None:\n continue\n\n # fetch the adjacent cells\n row_values = set(self._row(row))\n col_values = set(self._column(col))\n bck_values = set(self._block(col, row))\n\n # subtract the values present in the adjacent cells\n # (since this cell *can't* be of any of those values),\n # to leave the list of possibilities for this cell\n missing = line.difference(row_values, col_values, bck_values)\n\n # if there's only *one* possibility, we've found the\n # solution to this cell\n if len(missing) == 1:\n moves.append((col, row, missing.pop()))\n\n return moves",
"def flankingDirections(self, col, row, playerColor):\n flankingDirections = []\n for direction in self.directions:\n try:\n adjacent = direction(self, col, row)\n if adjacent.color != playerColor and adjacent.color != \"empty\":\n while True:\n colNext = adjacent.col\n rowNext = adjacent.row\n adjacent = direction(self, colNext, rowNext)\n if adjacent.color == playerColor: # successfully flanked opposing piece\n flankingDirections.append(direction)\n break\n if adjacent.color == \"empty\":\n break\n else:\n continue\n except offBoardError:\n continue\n return flankingDirections",
"def td_flip(self):\n self.cw_rotate()\n self.cw_rotate()\n self.lr_flip()\n self.find_edges()",
"def computeSide(self):\n side = 0\n for c in 'abcdefgh':\n side += self.getPieceType(c,1)\n side += self.getPieceType(c,2)\n side -= self.getPieceType(c,7)\n side -= self.getPieceType(c,8) \n rospy.loginfo(\"Computed side value of: %d\" % side)\n if side > 0:\n self.side = self.WHITE # good to go\n else:\n self.side = self.BLACK \n # need to setup board \n temp_board = BoardState(self.side) \n for i in range(8):\n temp_board.setPiece(i, 2, self.makePiece(ChessPiece.WHITE_PAWN, self.getPiece(7-i, 7)) )\n temp_board.setPiece(i, 7, self.makePiece(ChessPiece.BLACK_PAWN, self.getPiece(7-i, 2)) )\n\n temp_board.setPiece('a', 1, self.makePiece(ChessPiece.WHITE_ROOK, self.getPiece('h',8)) )\n temp_board.setPiece('b', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, self.getPiece('g',8)))\n temp_board.setPiece('c', 1, self.makePiece(ChessPiece.WHITE_BISHOP, self.getPiece('f',8)))\n temp_board.setPiece('d', 1, self.makePiece(ChessPiece.WHITE_QUEEN, self.getPiece('e',8)))\n temp_board.setPiece('e', 1, self.makePiece(ChessPiece.WHITE_KING, self.getPiece('d',8)))\n temp_board.setPiece('f', 1, self.makePiece(ChessPiece.WHITE_BISHOP, self.getPiece('c',8)))\n temp_board.setPiece('g', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, self.getPiece('b',8)))\n temp_board.setPiece('h', 1, self.makePiece(ChessPiece.WHITE_ROOK, self.getPiece('a',8)))\n\n temp_board.setPiece('a', 8, self.makePiece(ChessPiece.BLACK_ROOK, self.getPiece('h',1)) )\n temp_board.setPiece('b', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, self.getPiece('g',1)) )\n temp_board.setPiece('c', 8, self.makePiece(ChessPiece.BLACK_BISHOP, self.getPiece('f',1)) )\n temp_board.setPiece('d', 8, self.makePiece(ChessPiece.BLACK_QUEEN, self.getPiece('e',1)) )\n temp_board.setPiece('e', 8, self.makePiece(ChessPiece.BLACK_KING, self.getPiece('d',1)) )\n temp_board.setPiece('f', 8, self.makePiece(ChessPiece.BLACK_BISHOP, self.getPiece('c',1)) )\n temp_board.setPiece('g', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, self.getPiece('b',1)) )\n temp_board.setPiece('h', 8, self.makePiece(ChessPiece.BLACK_ROOK, self.getPiece('a',1)) ) \n\n self.values = temp_board.values\n self.printBoard()\n\n self.last_move = \"go\"",
"def __update_col_facedown(self, col):\n all_is_facedown = True\n # Loop all card in column\n for card in self.solitaire[col]:\n # If no card is represented\n if card == 0:\n break\n # If at least 1 card is faceup we return from method\n if not card.is_facedown:\n all_is_facedown = False\n return\n # If all card is facedown, we can flip and reviel a new card\n # so we have one less card facing down in that column\n if all_is_facedown:\n if self.col_facedown[col] > 0:\n print(f\"Vend kort i kolonne {col}\")\n self.col_facedown[col] -= 1",
"def _get_flips(self, origin, direction, color):\n #initialize variables\n flips = [origin]\n\n for x, y in OthelloBoard._increment_move(origin, direction, self.n):\n #print(x,y)\n if self[x][y] == 0:\n return []\n if self[x][y] == -color:\n flips.append((x, y))\n elif self[x][y] == color and len(flips) > 0:\n #print(flips)\n return flips\n\n return []",
"def get_possibles_moves(board: numpy.ndarray) -> List[Move]:\n return [tuple(k) for k in numpy.argwhere(board == -1) if 0 != k[0] != board.shape[0] - 1 != k[1] != 0]",
"def inverted_board(self):\r\n invert_board = []\r\n for line_index in range(len(self.board) - 1, -1,\r\n -1): # For each number\r\n # (descending) from the max index line\r\n # of the matrix (len-1) to 0 (included)\r\n\r\n invert_board.append(self.board[line_index]) #\r\n return invert_board",
"def _flip_dirs(self, adj_opp_cells):\n lst = []\n for cell in adj_opp_cells:\n lst.append(self._label_flips(cell[0], cell[1], cell[2]))\n\n #print(\"FOR TESTING: lst: \", lst) #FOR TESTING\n lst2 = []\n for e in lst: #lst has elements of the form (boolean, list)\n if e[0] == True:\n lst2.append(e)\n\n if lst2 == []:\n return (False, lst2)\n else:\n lst3 = []\n for e in lst2:\n for t in e[1]:\n lst3.append(t)\n return (True, lst3)",
"def flip_cards(self):\n for card_ in self.cards:\n card_.flip()",
"def update_board(self, board, self_color, coords):\r\n delta = [(0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1)]\r\n\r\n updated_board = [row[:] for row in board]\r\n updated_board[coords[0]][coords[1]] = self_color\r\n\r\n flip_positions = []\r\n for i in range(0, 8):\r\n flip_positions = self.find_flippable_disks(board, self_color, coords, delta[i])\r\n if flip_positions is not None:\r\n for flip_r, flip_c in flip_positions:\r\n updated_board[flip_r][flip_c] = self_color\r\n return updated_board",
"def _flip(self, i_row, i_col, flip_lst):\n self._board[i_row][i_col] = self._turn\n for cell in flip_lst:\n self._board[cell[0]][cell[1]] = self._turn",
"def flip_around(self, row: int, col: int) -> None:\n # we treat the 8 adjacent cases :\n neighbours = np.array([[-1, -1], [-1, 0], [-1, 1],\n [0, -1], [0, 1],\n [1, -1], [1, 0], [1, 1]], dtype=int)\n for dr, dc in neighbours:\n row_neighbour = row + dr\n col_neighbour = col + dc\n if not self.is_in_the_grid(row_neighbour, col_neighbour):\n continue\n self.flip(row_neighbour, col_neighbour)",
"def check_inverted_diagonal(self, has_player_got_4, board, pos, player_no):\n for i in range(1, 4):\n if (self.height // 80 > pos[1] - i >= 0) and pos[0] - i >= 0:\n if board[(pos[0] - i, pos[1] - i)] == player_no:\n has_player_got_4.add((pos[0] - i, pos[1] - i))\n print(\"Added top-left: \" + str((pos[0] - i, pos[1] - i)))\n else:\n break\n for i in range(1, 4):\n if (self.height // 80 > pos[1] + i >= 0) and pos[0] + i < self.width // 80:\n if board[(pos[0] + i, pos[1] + i)] == player_no:\n has_player_got_4.add((pos[0] + i, pos[1] + i))\n print(\"Added bottom-right: \" + str((pos[0] + i, pos[1] + i)))\n else:\n break",
"def legalMoves( self, row, col):\n moves = []\n if(row != 0 and self.board[row - 1][col] == 0):\n moves.append(0)\n if(col != self.n - 1 and self.board[row][col + 1] == 0):\n moves.append(2)\n if(row != self.n - 1 and self.board[row + 1][col] == 0):\n moves.append(4)\n if(col != 0 and self.board[row][col - 1] == 0):\n moves.append(6)\n \n if (row + col) % 2 == 0: # can follow the cross\n if (row != 0 and col != 0 and self.board[row - 1][col - 1] == 0):\n moves.append(7)\n if (row != 0 and col != self.n - 1 and self.board[row - 1][col + 1] == 0):\n moves.append(1)\n if (row != self.n - 1 and col != self.n - 1 and self.board[row + 1][col + 1] == 0):\n moves.append(3)\n if (row != self.n - 1 and col != 0 and self.board[row + 1][col - 1] == 0):\n moves.append(5)\n\n return moves",
"def fn(i, seen):\n if i == n: return ans.append([\"\".join(x) for x in board])\n for j in range(n): \n pos = {(\"col\", j), (\"diag\", i-j), (\"anti\", i+j)}\n if not pos & seen: \n board[i][j] = \"Q\"\n seen |= pos\n fn(i+1, seen)\n board[i][j] = \".\"\n seen -= pos",
"def getBoardCards(self):\n return list(map(lambda x:x&63, self.board_cards))",
"def find_disconnected_voxels(im, conn=None):\n if im.ndim == 2:\n if conn == 4:\n strel = disk(1)\n elif conn in [None, 8]:\n strel = square(3)\n elif im.ndim == 3:\n if conn == 6:\n strel = ball(1)\n elif conn in [None, 26]:\n strel = cube(3)\n labels, N = spim.label(input=im, structure=strel)\n holes = clear_border(labels=labels) > 0\n return holes"
] |
[
"0.6389612",
"0.6263736",
"0.61926514",
"0.56448823",
"0.5501057",
"0.549984",
"0.54695046",
"0.54583836",
"0.5436001",
"0.5432135",
"0.53606504",
"0.53226405",
"0.5284864",
"0.5280242",
"0.5267073",
"0.525425",
"0.5233514",
"0.5232512",
"0.52298003",
"0.5218444",
"0.51716125",
"0.51483005",
"0.51427263",
"0.5120893",
"0.51057184",
"0.5103367",
"0.50979996",
"0.50568366",
"0.505109",
"0.5041603"
] |
0.6420867
|
0
|
Returns a rough approximation of the amount of stable disks self_color has in a particular corner.
|
def get_stable_disks(self, board, self_color, corner_coords):
step_row = 1 if corner_coords[0] == 0 else -1
step_col = 1 if corner_coords[1] == 0 else -1
bound_row = abs(corner_coords[0] - 7)
bound_col = abs(corner_coords[1] - 7)
cur_row = corner_coords[0]
cur_col = corner_coords[1]
stable_disks = 0
while cur_row != bound_row:
cur_col = corner_coords[1]
while cur_col != bound_col:
if board[cur_row][cur_col] == self_color:
stable_disks += 1
else:
break
cur_col += step_col
# Move bound_col down so we get a diagonally shaped edge
# Whatever is out of new bound cannot be stable by definition
if (cur_col > 0 and step_col == -1) or (cur_col < 7 and step_col == 1):
bound_col = cur_col - 1
if bound_col < 0 or bound_col > 7:
break
cur_row += step_row
return stable_disks
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_disk_count(self, self_color, board):\r\n count = 0\r\n for r in range(8):\r\n for c in range(8):\r\n if board[r][c] == self_color:\r\n count += 1\r\n return count",
"def split_necessity(self):\n return max(self._color_var_rel) * self.n_pix\n # return reduce(int.__mul__, (l-u for u,l in self.bounds)) * self.n_pix",
"def calc_thickness(self):\n s = \"::: calculating z-varying thickness :::\"\n print_text(s, cls=self)\n #H = project(self.S - self.x[2], self.Q, annotate=False)\n H = self.vert_integrate(Constant(1.0), d='down')\n Hv = H.vector()\n Hv[Hv < 0] = 0.0\n print_min_max(H, 'H', cls=self)\n return H",
"def _cal_meaningful_corners(self):\n corners = np.where(self._free_of_clash)\n corners = np.array(corners, dtype=int)\n corners = corners.transpose()\n return corners",
"def critical_thickness(self):\n horizontal_tail_thickness = sorted(self.stabilizer_h.solid.faces, key=lambda f: f.cog.y)[-1].bbox.height\n vertical_tail_thickness = sorted(self.stabilizer_vright.solid.faces, key=lambda f: f.cog.z)[0].bbox.length\n if horizontal_tail_thickness >= vertical_tail_thickness:\n critical_thickness = horizontal_tail_thickness\n else:\n critical_thickness = vertical_tail_thickness\n return critical_thickness",
"def get_self_self(self):\n self.self_self = np.zeros(self.n_t)\n for i, tri in enumerate(self.tri_save):\n C_types = self.c_types[tri]\n C_self_self = (C_types-np.roll(C_types,1,axis=1))==0\n self.self_self[i] = np.sum(C_self_self)/C_self_self.size",
"def evaluate(self, board):\r\n\r\n self_moves = self.find_possible_moves(board, self.my_color)\r\n opponent_moves = self.find_possible_moves(board, self.opponent_color)\r\n\r\n mobility = 0 # Mobility captures Self's profit in amount of available moves\r\n disk_parity = 0 # Disk parity captures Self's profit in raw disk amount\r\n corners = 0 # Corners captures Self's profit in occupied corners\r\n corner_proximity = 0 # Corner proximity captures the risk of giving away a free corner\r\n stability = 0 # Stability captures Self's profit in unflippable disks\r\n\r\n # Calculating mobility heuristic\r\n self_immediate_mobility = len(self_moves)\r\n opponent_immediate_mobility = len(opponent_moves)\r\n\r\n if self_immediate_mobility + opponent_immediate_mobility != 0:\r\n mobility = 100 * (self_immediate_mobility - opponent_immediate_mobility) / (self_immediate_mobility + opponent_immediate_mobility)\r\n\r\n # Calculate disk parity heuristic\r\n self_disks = self.get_disk_count(self.my_color, board)\r\n opponent_disks = self.get_disk_count(self.opponent_color, board)\r\n\r\n disk_parity = 100 * (self_disks - opponent_disks) / (self_disks + opponent_disks)\r\n\r\n # Calculating corner heuristic\r\n corners_list = [(0,0), (0,7), (7,0), (7,7)]\r\n self_corners = 0\r\n opponent_corners = 0\r\n\r\n for corner in corners_list:\r\n if board[corner[0]][corner[1]] == self.my_color:\r\n self_corners += 1\r\n if board[corner[0]][corner[1]] == self.opponent_color:\r\n opponent_corners += 1\r\n\r\n if self_corners + opponent_corners != 0:\r\n corners = 100 * (self_corners - opponent_corners) / (self_corners + opponent_corners)\r\n\r\n # Calculating corner proximity heuristic\r\n corners_proximity_list = [(0, 1), (1, 0), (1, 1), (0, 6), (1, 6), (1, 7), (6, 0), (6, 1), (7, 1), (6, 6), (7, 6), (6, 7)]\r\n self_corner_proximity = 0\r\n opponent_corner_proximity = 0\r\n\r\n for cell in corners_proximity_list:\r\n if board[cell[0]][cell[1]] == self.my_color:\r\n self_corner_proximity += 1\r\n if board[cell[0]][cell[1]] == self.opponent_color:\r\n opponent_corner_proximity += 1\r\n\r\n if self_corner_proximity + opponent_corner_proximity != 0:\r\n corner_proximity = 100 * (self_corner_proximity - opponent_corner_proximity) / (self_corner_proximity + opponent_corner_proximity)\r\n\r\n # Calculating stability heuristic\r\n self_stability = self.get_stable_disks(board, self.my_color, (0, 0)) + \\\r\n self.get_stable_disks(board, self.my_color, (0, 7)) + \\\r\n self.get_stable_disks(board, self.my_color, (7, 0)) + \\\r\n self.get_stable_disks(board, self.my_color, (7, 7))\r\n\r\n opponent_stability = self.get_stable_disks(board, self.opponent_color, (0, 0)) + \\\r\n self.get_stable_disks(board, self.opponent_color, (0, 7)) + \\\r\n self.get_stable_disks(board, self.opponent_color, (7, 0)) + \\\r\n self.get_stable_disks(board, self.opponent_color, (7, 7))\r\n\r\n if self_stability + opponent_stability != 0:\r\n stability = 100 * (self_stability - opponent_stability) / (self_stability + opponent_stability)\r\n\r\n # Calculating the final value\r\n disk_total = self.get_disk_count(self.my_color, board) + self.get_disk_count(self.opponent_color, board)\r\n\r\n # In early-game, focus on maximal mobility and stability. Avoid amassing too many disks.\r\n if disk_total < 15:\r\n heuristic_value = 30 * corners - \\\r\n 15 * corner_proximity + \\\r\n 30 * mobility + \\\r\n 30 * stability\r\n\r\n # In mid-game, focus on capturing corners and further building stability\r\n elif disk_total < 45:\r\n heuristic_value = 30 * corners - \\\r\n 15 * corner_proximity + \\\r\n 20 * mobility + \\\r\n 35 * stability\r\n\r\n # In late-game, focus on getting as many discs as possible\r\n else:\r\n heuristic_value = 30 * corners + \\\r\n 15 * mobility + \\\r\n 30 * stability + \\\r\n 35 * disk_parity\r\n\r\n return heuristic_value",
"def _undiscovered_blob_size(self, pos: Tuple[int, int],\r\n board: List[List[Tuple[int, int, int]]],\r\n visited: List[List[int]]) -> int:\r\n board_size = len(board)\r\n if pos[0] < 0 or pos[0] >= board_size \\\r\n or pos[1] < 0 or pos[1] >= board_size:\r\n return 0\r\n column = pos[0]\r\n row = pos[1]\r\n if not board[column][row] == self.colour:\r\n visited[column][row] = 0\r\n return 0\r\n score = 1\r\n visited[column][row] = 1\r\n # upper cell\r\n if row - 1 >= 0:\r\n if visited[column][row - 1] == -1:\r\n score += self._undiscovered_blob_size((column, row - 1),\r\n board, visited)\r\n # lower cell\r\n if row + 1 <= board_size - 1:\r\n if visited[column][row + 1] == -1:\r\n score += self._undiscovered_blob_size((column, row + 1),\r\n board, visited)\r\n # left cell\r\n if column - 1 >= 0:\r\n if visited[column - 1][row] == -1:\r\n score += self._undiscovered_blob_size((column - 1, row),\r\n board, visited)\r\n if column + 1 <= board_size - 1:\r\n if visited[column + 1][row] == -1:\r\n score += self._undiscovered_blob_size((column + 1, row),\r\n board, visited)\r\n return score",
"def value(self):\n black, white = 0, 0\n for sq in Othello.squares():\n piece = self.__board[sq]\n if piece == BLACK: black += 1\n elif piece == WHITE: white += 1\n if black == white:\n return 0.5\n elif black > white:\n return 1\n else:\n return 0",
"def num_cells_up(self):\n if hasattr(self, '__num_cells_up__'):\n return self.__num_cells_up__\n elif self.shared_coboundaries is not None:\n assert self.upper_index is not None\n return int(self.shared_coboundaries.max()) + 1\n assert self.upper_index is None\n return 0",
"def current_knc_coverage():\n covered = 0\n total = 0\n for layer in layer_to_compute:\n covered = covered + np.count_nonzero(knc_cov_dict[layer.name])\n total = total + np.size(knc_cov_dict[layer.name])\n return covered / float(total)",
"def corner_score(self, size: int,\n board: List[List[Tuple[int, int, int]]]) -> int:\n score = 0\n bound = size - 1\n corners = [(0, 0), (0, bound), (bound, 0), (bound, bound)]\n for corner in corners:\n if board[corner[0]][corner[1]] == self.colour:\n score += 1\n return score",
"def _undiscovered_blob_size(self, pos: Tuple[int, int],\n board: List[List[Tuple[int, int, int]]],\n visited: List[List[int]]) -> int:\n x_0 = pos[0]\n y_0 = pos[1]\n if x_0 > len(board) - 1 or y_0 > len(board) - 1:\n return 0\n if visited[x_0][y_0] != -1:\n return 0\n if board[x_0][y_0] != self.colour:\n visited[x_0][y_0] = 0\n return 0\n else:\n visited[x_0][y_0] = 1\n size0 = self._undiscovered_blob_size((x_0, y_0 + 1), board, visited)\n size1 = self._undiscovered_blob_size((x_0, y_0 - 1), board, visited)\n size2 = self._undiscovered_blob_size((x_0 + 1, y_0), board, visited)\n size3 = self._undiscovered_blob_size((x_0 - 1, y_0), board, visited)\n return size0 + size1 + size2 + size3 + 1",
"def _undiscovered_blob_size(self, pos: Tuple[int, int],\r\n board: List[List[Tuple[int, int, int]]],\r\n visited: List[List[int]]) -> int:\r\n count = len(board)\r\n\r\n if pos[0] >= count or pos[1] >= count:\r\n return 0\r\n\r\n if visited[pos[0]][pos[1]] == 1 or visited[pos[0]][pos[1]] == 0:\r\n return 0\r\n\r\n if board[pos[0]][pos[1]] is not self.colour:\r\n visited[pos[0]][pos[1]] = 0\r\n return 0\r\n\r\n visited[pos[0]][pos[1]] = 1\r\n\r\n top = self._undiscovered_blob_size((pos[0], pos[1] - 1),\\\r\n board, visited)\r\n\r\n bottom = self._undiscovered_blob_size((pos[0], pos[1] + 1),\\\r\n board, visited)\r\n\r\n right = self._undiscovered_blob_size((pos[0] + 1, pos[1]),\\\r\n board, visited)\r\n\r\n left = self._undiscovered_blob_size((pos[0] - 1, pos[1]),\\\r\n board, visited)\r\n\r\n total = 1 + top + bottom + right + left\r\n return total",
"def _estimateDepth(self, size, neighbourRadius):\n neighbourRadius *= 1.5\n for i in xrange(100):\n j = 2**i\n spacings = [c/j for c in size]\n maxSpace = max(spacings)\n if maxSpace < neighbourRadius:\n return i+1",
"def _undiscovered_blob_size(self, pos: Tuple[int, int],\r\n board: List[List[Tuple[int, int, int]]],\r\n visited: List[List[int]]) -> int:\r\n col = pos[1]\r\n row = pos[0]\r\n blob_size = 0\r\n\r\n if col >= len(board) or col < 0 or row >= len(board[0]) or row < 0:\r\n return 0\r\n else:\r\n if board[col][row] == self.colour and visited[col][row] == -1:\r\n blob_size += 1\r\n visited[col][row] = 1\r\n blob_size += self._undiscovered_blob_size((row, col+1),\r\n board, visited)\r\n blob_size += self._undiscovered_blob_size((row, col - 1),\r\n board, visited)\r\n blob_size += self._undiscovered_blob_size((row + 1, col),\r\n board, visited)\r\n blob_size += self._undiscovered_blob_size((row - 1, col),\r\n board, visited)\r\n elif board[col][row] != self.colour:\r\n if visited[col][row] == -1:\r\n visited[col][row] = 0\r\n return blob_size",
"def get_freecyls(self):\n lastcyl = 0\n for p in self.parts:\n if p[2] > lastcyl:\n lastcyl = p[2]\n return self.drvcyls - lastcyl - 1 # cylinder numbers start at 0",
"def GetHiCorner(self):\n ...",
"def corners(self, board):\n # Calculating already captured corners\n computer_corners = 0\n computer_corners = computer_corners + 1 if board[0][0] == self.computer_num else computer_corners\n computer_corners = computer_corners + 1 if board[0][\n self.board_size - 1] == self.computer_num else computer_corners\n computer_corners = computer_corners + 1 if board[self.board_size - 1][\n 0] == self.computer_num else computer_corners\n computer_corners = computer_corners + 1 if board[self.board_size - 1][\n self.board_size - 1] == self.computer_num else computer_corners\n\n opponent_corners = 0\n opponent_corners = opponent_corners + 1 if board[0][0] == self.opponent_num else opponent_corners\n opponent_corners = opponent_corners + 1 if board[0][\n self.board_size - 1] == self.opponent_num else opponent_corners\n opponent_corners = opponent_corners + 1 if board[self.board_size - 1][\n 0] == self.opponent_num else opponent_corners\n opponent_corners = opponent_corners + 1 if board[self.board_size - 1][\n self.board_size - 1] == self.opponent_num else opponent_corners\n\n # Calculating potential corners\n valid_moves_computer = self.game.find_valid_moves(self.computer_color, board, self.board_size)\n computer_potential_corner = 0\n computer_potential_corner = computer_potential_corner + 1 if valid_moves_computer[0][\n 0] == 1 else computer_potential_corner\n computer_potential_corner = computer_potential_corner + 1 if valid_moves_computer[0][\n self.board_size - 1] == 1 else computer_potential_corner\n computer_potential_corner = computer_potential_corner + 1 if valid_moves_computer[self.board_size - 1][\n 0] == 1 else computer_potential_corner\n computer_potential_corner = computer_potential_corner + 1 if valid_moves_computer[self.board_size - 1][\n self.board_size - 1] == 1 else computer_potential_corner\n\n valid_moves_opponent = self.game.find_valid_moves(self.opponent_color, board, self.board_size)\n opponent_potential_corner = 0\n opponent_potential_corner = opponent_potential_corner + 1 if valid_moves_opponent[0][\n 0] == 1 else opponent_potential_corner\n opponent_potential_corner = opponent_potential_corner + 1 if valid_moves_opponent[0][\n self.board_size - 1] == 1 else opponent_potential_corner\n opponent_potential_corner = opponent_potential_corner + 1 if valid_moves_opponent[self.board_size - 1][\n 0] == 1 else opponent_potential_corner\n opponent_potential_corner = opponent_potential_corner + 1 if valid_moves_opponent[self.board_size - 1][\n self.board_size - 1] == 1 else opponent_potential_corner\n\n # Calculating potential corners for both players\n valid_moves = valid_moves_opponent + valid_moves_computer\n common_potential_corner = 0\n common_potential_corner = common_potential_corner + 1 if valid_moves[0][\n 0] == 2 else common_potential_corner\n common_potential_corner = common_potential_corner + 1 if valid_moves[0][\n self.board_size - 1] == 1 else common_potential_corner\n common_potential_corner = common_potential_corner + 1 if valid_moves[self.board_size - 1][\n 0] == 2 else common_potential_corner\n common_potential_corner = common_potential_corner + 1 if valid_moves[self.board_size - 1][\n self.board_size - 1] == 2 else common_potential_corner\n computer_potential_corner -= common_potential_corner\n opponent_potential_corner -= common_potential_corner\n\n numerator = computer_corners + computer_potential_corner - common_potential_corner - opponent_corners - opponent_potential_corner\n denominator = computer_corners + computer_potential_corner + common_potential_corner + opponent_corners \\\n + opponent_potential_corner\n if denominator == 0:\n return 0\n return 100 * numerator / denominator",
"def __len__(self):\n return self.info.NumRings()",
"def getClearWaterDepth(inp):\n\ty90 = getY90(inp)\n\tinp = sorted(inp, key = lambda x: x[0])\n\ts = 0\n\tif inp[0][0] > 0:\n\t\ts += (1-inp[0][1]/2.0) * inp[0][0]\n\tfor i in xrange(1,len(inp)):\n\t\tprev = inp[i-1]\n\t\tcur = inp[i]\n\t\tif cur[0] > y90:\n\t\t\ttop = 0.9\n\t\t\tbase = prev[1]\n\t\t\theight = y90 - prev[0]\n\t\t\ts += (1 - (top + base)/2) * height\n\t\t\tbreak\n\t\tbase = prev[1]\n\t\ttop = cur[1]\n\t\theight = cur[0] - prev[0]\n\t\ts += (1 - (top + base)/2) * height\n\t\t\n\treturn s",
"def num_cells_down(self):\n if self.dim == 0:\n return None\n if hasattr(self, '__num_cells_down__'):\n return self.__num_cells_down__\n if self.lower_index is None:\n return 0\n raise ValueError('Cannot infer the number of cells in the cochain below.')",
"def visual_len(self) -> int:\n return visual_len(self)",
"def __len__(self):\r\n return int(np.ceil(len(self.pathways) / float(self.batch_size)))",
"def on_the_edge_without_neighbors(self, board, color):\n disks_on_the_edge_without_neighbors = 0\n disks_on_the_edge = self.get_on_edge(board, color)\n for disk_on_edge in disks_on_the_edge:\n if not self.get_opposite_neighbors_on_edge(board, disk_on_edge):\n disks_on_the_edge_without_neighbors += 1\n return disks_on_the_edge_without_neighbors",
"def bottom_left_tile_value(self):\n\t\treturn self.expected_cols * (self.expected_rows - 1) + 1",
"def penalize_corners_heuristic(game, player):\n if game.is_loser(player):\n return float('-inf')\n\n if game.is_winner(player):\n return float('inf')\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n # Penalize player for moving to corner positions\n corner_weight = 2\n if is_curr_location_corner(game, game.get_player_location(player)):\n own_moves -= corner_weight\n\n return float(own_moves - opp_moves)",
"def get_num_explored(self):\n return self.__num_explored",
"def balanceFactor(self):\n leftHeight = self.left.height if self.left != None else 0\n rightHeight = self.right.height if self.right != None else 0\n return rightHeight - leftHeight",
"def _calculate_r0(self):\n\n self.r0 = self.coherence_cell_size * (np.cos(np.deg2rad(self.zenith_angle)))**(3/5)"
] |
[
"0.68481916",
"0.5906089",
"0.5816553",
"0.5672052",
"0.5643364",
"0.5552818",
"0.55456066",
"0.55377346",
"0.55131733",
"0.5477361",
"0.5455857",
"0.54394853",
"0.5436614",
"0.54305196",
"0.5416301",
"0.53964823",
"0.5382843",
"0.5376668",
"0.5370833",
"0.5350355",
"0.530487",
"0.5282394",
"0.5279146",
"0.5274775",
"0.5272129",
"0.5267037",
"0.5264655",
"0.52608883",
"0.52512985",
"0.52473736"
] |
0.72993684
|
0
|
Prints the information about control keys to the command line.
|
def print_controls(self):
print("TAKEOFF: {}".format(self._control_keys(COMMAND_TAKEOFF)))
print("LAND: {}".format(self._control_keys(COMMAND_LAND)))
print("EMERGENCY: {}".format(self._control_keys(COMMAND_EMERGENCY)))
print("HOVER: {}".format(self._control_keys(COMMAND_HOVER)))
print()
print("LEFT: {}".format(self._control_keys(COMMAND_FLY_LEFT)))
print("RIGHT: {}".format(self._control_keys(COMMAND_FLY_RIGHT)))
print("FORWARD: {}".format(self._control_keys(COMMAND_FLY_FORWARD)))
print("BACKWARD: {}".format(self._control_keys(COMMAND_FLY_BACKWARD)))
print()
print("UP: {}".format(self._control_keys(COMMAND_FLY_UP)))
print("DOWN: {}".format(self._control_keys(COMMAND_FLY_DOWN)))
print()
print("ROT. CLOCKWISE: {}".format(self._control_keys(COMMAND_ROTATE_CLOCKWISE)))
print("ROT. C.CLOCKWISE: {}".format(self._control_keys(COMMAND_ROTATE_COUNTERCLOCKWISE)))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def show_controls(self):\n\n print('\\nControls: ')\n print(' ENTER - begin playback of MPO pattern')\n print(' p - go to previous sample')\n print(' n - go to next sample')\n print(' SPACE - Mark index of desired sub-trajectory')\n print(' q - quit program and write results to training file')",
"def print_interact_help():\n print(\"Commands:\")\n print(\"\\tj - up\")\n print(\"\\tk - down\")\n print(\"\\t<Space> - switch Bought to BoughtX\")\n print(\"\\t<Enter> - send Enter to Quicken\")\n print(\"\\t<Escape> - quit\")",
"def print_command(self,command):\n print \"Command (%X): \" % command\n if (command & COMMAND_ENABLE) > 0:\n print \"\\tENABLE\"\n if (command & COMMAND_ENABLE_INTERRUPT) > 0:\n print \"\\tENABLE INTERRUPT\"",
"def show_commands(self):\n print(\n ''\n '\\n\\t' + bc.OKBLUE + 'COMMANDS:' + bc.ENDC +\n '\\n\\t' + '---------' +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'run', 'Run the script')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'runcom', 'Run program with specific arguments <runcom [ARGS]>')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'info', 'Information')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'help', 'Help')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'so', 'Show options')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'sa', 'Show module info')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'set', 'Set options, <set [PARAMETER] [VALUE]>')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'invoke', 'Invoke module')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'exit', 'Exit')) +\n '\\n'\n )",
"def print_menu_Tasks():\r\n print(\"\"\"\r\n Menu of Options\r\n 1) Add a new keyboard\r\n 2) Save Keyboards to File\r\n 3) Show current keyboard list\r\n 4) Exit Program\r\n \"\"\")",
"def _control_keys(self, command):\n return [key for key, comm in self.KEY_TO_COMMAND.items() if comm == command]",
"def drawMenu(self):\n try:\n for key in self.order_of_keys:\n print(\"\\r[key {:8}] : {}\".format(key, self.keybindings[key][self.KEY_DESCRIPTION]))\n except KeyError:\n print(\"Error: Keys found GoPiGo3WithKeyboard.order_of_keys don't match with those in GoPiGo3WithKeyboard.keybindings.\")",
"def show_menu():\r\n print(\"Write a number of the next options:\")\r\n for key, value in enumerate(options):\r\n print(\"{}. {}\".format(key, value))",
"def printMenu():\n # tWelc = PrettyTable(['Welcome to the CLI-of the repository classifier'])\n print('Welcome to the CLI of the repository classifier')\n print(strStopper1)\n t = PrettyTable(['Action', ' Shortcut '])\n t.add_row(['Show Menu', '- m -'])\n t.add_row([' Predict repositories form txt-file ', '- i -'])\n t.add_row(['Input URL', '- u -'])\n t.add_row(['Show Info', '- f -'])\n t.add_row(['Train Model', '- t -'])\n t.add_row(['set GitHub-Token', '- g -'])\n t.add_row(['Help', '- h -'])\n t.add_row(['Quit', '- q -'])\n print(t)\n print('')",
"def ctrl_v():\n from pynput.keyboard import Key, Controller\n kb = Controller()\n kb.press(Key.ctrl)\n kb.press('v')\n kb.release('v')\n kb.release(Key.ctrl)",
"def menu_cust(self):\n intro = \"Here are the options available for you to choose from:\"\n option1 = \"[1] UNLOCK THE CAR\"\n option2 = \"[2] RETURN THE CAR\"\n option3 = \"[3] BACK\"\n print(intro, option1, option2, option3, sep='\\n')",
"def show_key_options(json_dict, backtrack):\n print(\"Keys available:\")\n for key in json_dict:\n print(key, end=\" \"*5)\n key = input(\"\\nEnter next key: \")\n step_into(json_dict, key, backtrack)",
"def at_ctrl(seq, num):\n at(\"CTRL\", seq, [num, 0])",
"def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))",
"def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))",
"def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))",
"def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))",
"def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))",
"def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))",
"def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))",
"def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))",
"def display_options(self):\n print()\n options = list(self.get_commands().values())\n options.sort(key=lambda op: int(op.name))\n\n for option in options:\n print(f'{\"%3d\" % int(option.name)}. {option.description}')",
"def printMenu():\n print (\"Calculator menu:\")\n print (\" + for adding a rational number\")\n print (\" c to clear the calculator\")\n print (\" u to undo the last operation\")\n print (\" x to close the calculator\")",
"def _print_menu(self):\n # Create header line.\n header = \"%s Menu:\" % (self.__name)\n header = header.title()\n print(header)\n\n # Show the iterations counter.\n iterations = self._status.get_value(\"iterations\")\n print(\"(Iteration %d)\" % (iterations))\n\n self._print_custom()\n\n # Display the options alphabetically.\n option_names = list(self.__options.keys())\n option_names.sort()\n for option in option_names:\n desc, command = self.__options[option]\n print(\"\\t%s: %s\" % (option, desc))",
"def help_opt(self):\n print(OPTIONS)",
"def instructions():\n print(\n \"\"\"\n TURNING ON TELEVISION\n\n Use your keypad to interact with the television:\n 1. Enter a number to change change\n 2. Enter \"up\" or \"down\" to adjust volume\n 3. Enter \"off\" to turn on television\n\n \"\"\")",
"def cli(ctx):\n return ctx.gi.cannedkeys.get_keys()",
"def print_menu():\r\n clear()\r\n print(\"Ratatouille Server\")\r\n print(\"---------------------------\")\r\n print(\"\")\r\n\r\n for (index, func) in MENU.items():\r\n print(\"%d - %s\" % (index, func.__name__))\r\n\r\n return raw_input(\"Choose an option: \").lstrip()",
"def on_press_show_key(key):\n print(f\"{key} pressed\")",
"def print_main_menu():\n print(\"\\nWelcome to the Zendesk Ticket Viewing System!\\nInstructions:\")\n print(\"~ Enter '1' to view all tickets\")\n print(\"~ Enter '2' to view a certain ticket\")\n print(\"~ Enter '3' to view these options again\")\n print(\"To exit the ticketing system enter 'quit'\")"
] |
[
"0.6690821",
"0.6412796",
"0.63261616",
"0.6192623",
"0.60906035",
"0.6070361",
"0.603995",
"0.5988708",
"0.595326",
"0.59040827",
"0.584231",
"0.5718754",
"0.56762505",
"0.56488466",
"0.56488466",
"0.56488466",
"0.56488466",
"0.5638558",
"0.5638558",
"0.5638558",
"0.5638558",
"0.5608107",
"0.558772",
"0.5581933",
"0.5572964",
"0.55627465",
"0.5558221",
"0.5537525",
"0.54913205",
"0.5475284"
] |
0.6862665
|
0
|
Returns a list of control keys for the given command.
|
def _control_keys(self, command):
return [key for key, comm in self.KEY_TO_COMMAND.items() if comm == command]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def list_commands(self, ctx):\n commands = self._iter_commands()\n return commands.keys()",
"def get_control_ids(self) -> List[str]:\n return self._control_dict.keys()",
"def list_commands(self, context):\n\t\treturn self.commands.keys()",
"def cli(ctx):\n return ctx.gi.cannedkeys.get_keys()",
"def read_keys(self) -> list[KeyPress]:",
"def getCommandList(self):\n return self.commands.keys()",
"def get_command_names(self):\n return list(self.commands.keys())",
"def list_commands(self, ctx): # noqa\n return self.commands.keys()",
"def get_commands(self):\n\t\treturn list(self.command_handlers.keys())",
"def get_commands(self):\r\n return list(filter(None, self._commands.keys()))",
"def keys(self, pattern=\"*\"):\n return self._command(b'KEYS', pattern, handler=list_of_keys)",
"def do_list_commands(self):\n result = \"\\n\".join(self.commands.keys())\n return result, True",
"def hkeys(self, key):\n return self._command(b'HKEYS', key, handler=list_of_keys)",
"def control_name_list(self):\n return list(self._controls.keys())",
"def get_commands(self):\n return list(self.commands.values())",
"def _get_commands(self) -> list:\n return [i[1] for i in inspect.getmembers(self, predicate=lambda i: hasattr(i, \"is_cmd\"))]",
"def terminal_commands(self):\n return OrderedDict([\n ('query_commands', (['hi', 'how', 'hello'], self._query_commands)),\n ('control_stop', (['stop'], self._control_stop)),\n ('control_pause', (['pause'], self._control_pause)),\n ('control_play', (['start', 'play'], self._control_play)),\n ('query_info', (['who', 'what'], self._query_info)),\n ('control_forward', (['skip', 'next'], self._control_skip)),\n\n ])",
"def commands(self) -> typing.List[str]:\n return self._values.get(\"commands\")",
"def get_keys(self):\n return [(['up', 'down', 'pg.up', 'pg.down'],\n 'navigate through the fields.'),\n (['esc'], 'backtrack to the previous pane or exit.'),\n (['F1', '?'], 'open this pane help.')]",
"def keys(self):\n return self._modes.keys()",
"def list_commands(self, ctx):\n commands = []\n for filename in os.listdir(cmd_folder):\n if filename.endswith('.py') and filename.startswith('cmd_'):\n commands.append(filename[4:-3])\n commands.sort()\n return commands",
"def get_key_list(self) -> list:\n return self.key_functs.keys()",
"def get_commands(self):\r\n return self._commands",
"def get_keys(self):\r\n k_list = []\r\n try:\r\n for k in self.buttons:\r\n if self.buttons[k] != 0:\r\n k_list.append(k)\r\n return k_list\r\n except KeyError:\r\n pass\r\n return k_list",
"def list_commands(self, ctx):\n return self.daemon.list_actions()",
"def commands(self, *ignored):\n return [command.rsplit(\"_\").pop() for command in dir(self) if command.startswith(\"command_\")]",
"def extract_commands(self):\n # import pdb; pdb.set_trace()\n left_i = 0\n right_i = 1\n commands = {}\n cmd = self.cmd\n\n if not cmd:\n return\n while left_i < len(cmd):\n sub_cmd = cmd[left_i:right_i]\n if sub_cmd in self.action_list:\n arg_len, arguments = self.extract_command_arguments(right_i)\n commands[sub_cmd] = arguments\n left_i = right_i + arg_len\n right_i = left_i + 1\n else:\n left_i, right_i = self.update_i(left_i, right_i)\n return commands",
"def get_commands(self):\n return self._commands",
"def getCommands(self):\r\n return [z for x, y, z in self._log if x == 'command']",
"def binary_commands(self):\n return OrderedDict([\n ('control_union', (['or', 'and'], self._control_union)),\n ])"
] |
[
"0.6953709",
"0.6734895",
"0.6625436",
"0.6571796",
"0.6570962",
"0.6561314",
"0.65246344",
"0.65141964",
"0.6504601",
"0.6437386",
"0.6390944",
"0.62402505",
"0.6233641",
"0.6228321",
"0.6145984",
"0.61125696",
"0.6094259",
"0.6051538",
"0.5979562",
"0.5978968",
"0.5935132",
"0.5889179",
"0.58828247",
"0.5881513",
"0.585176",
"0.5814328",
"0.5814078",
"0.5802994",
"0.58021736",
"0.57818574"
] |
0.89397156
|
0
|
Reads raw corpus from `RAW_CORPUS_PATH`.
|
def get_raw_corpus():
with open(RAW_CORPUS_PATH, 'r') as f:
return f.read().splitlines()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def read_raw_text(self, raw_path: str = None):\n\n if raw_path.rsplit(\".\")[-1] == \"json\":\n self.import_from_json(raw_path)\n return\n\n if raw_path is not None:\n self.raw_path = raw_path\n\n if self.raw_path is None:\n raise Exception(\"Found no file to read\")\n\n file = open(raw_path, \"r\")\n raw = file.read()\n file.close()\n\n self.sentences += get_sentences(raw, self.cM.use_spacy)\n\n self.loaded(False)",
"def read_corpus_from_file(input_file): \n \n print ('reading corpus')\n file = open(input_file, 'r')\n corpus = file.read()\n return corpus",
"def readCorpus(file_path):\r\n if '.json' in file_path:\r\n return pd.read_json(file_path, lines=True)\r\n else:\r\n return pd.read_csv(file_path)",
"def read_single_raw_cloud(self, raw_cloud_path):\n raise NotImplementedError",
"def get_corpus():\n corpus_raw = []\n files = os.listdir()\n\n for name in files:\n if \".txt\" in name:\n try:\n file = open(name, \"rt\", encoding='utf8')\n data_org = file.read()\n corpus_raw.append(data_org)\n except:\n print(\"ERROR: Couldn't open a .txt file. Please ensure that the text is UTF-8 encoded.\")\n elif \".docx\" in name:\n try:\n data_org = docx2txt.process(name)\n corpus_raw.append(data_org)\n except:\n print(\"ERROR: Couldn't open a .docx file. Please ensure that the text is UTF-8 encoded.\")\n else:\n print(\"ERROR: Cannot print non .txt or .docx files. Please verify the input folder's contents.\")\n\n return corpus_raw",
"def read_raw(file_path):\n file = open(file_path, 'rb')\n content = file.read()\n file.close()\n return content",
"def read_corpus(category=\"crude\"):\n files = reuters.fileids(category)\n return [[START_TOKEN] + [w.lower() for w in list(reuters.words(f))] + [END_TOKEN] for f in files]",
"def fetch_corpous_from_file(filepath):\n f = open(filepath, 'r')\n corpus_text = f.read() \n corpus_sentence_list = corpus_text.lower().split('.')\n corpus_list_sent_processed = [remove_special_chars(item) for item in corpus_sentence_list if len(item)>1] \n return corpus_list_sent_processed",
"def read_corpus(dir):\n corpus = {}\n file_names = glob.glob(f\"{dir}/*\")\n for file_name in file_names:\n name = os.path.splitext(os.path.basename(file_name))[0]\n text = \" \".join(open(file_name, \"rt\").readlines())\n text = text.replace(\"\\n \\n\", \" \")\n text = text.replace(\"\\n\", \"\")\n text = text.replace(\" \", \" \")\n corpus[os.path.splitext(name)[0]] = text\n return corpus",
"def read_raw_from_file(fname):\n with open(fname) as fh:\n content = fh.read()\n return parse_raw_string(content)",
"def load_corpus():\n # Define directory structure\n parent_path = os.getcwd() + '/'\n corpus_path = parent_path + 'corpus_data/'\n corpus_name = corpus_path + 'train_corpus_vocab.pickle'\n # Load corpus vocabulary\n with open(corpus_name, 'rb') as handle:\n train_vocab = pickle.load(handle)\n return(corpus_path, train_vocab)",
"def load_raw_text():\n if not os.path.exists( os.path.join( DATA_HOME, RAW_TEXT_FILE ) ) or \\\n not os.path.exists( os.path.join( DATA_HOME, LABELS_FILE ) ):\n print( 'no prior files found. staring from scratch' )\n rev, rat = parse_json( os.path.join( DATA_HOME, JSON_FILE ) )\n y = np.array( rat )\n print( 'saving data to files' )\n pickle.dump( rev , open( os.path.join( DATA_HOME, RAW_TEXT_FILE ), 'wb' ) )\n pickle.dump( y , open( os.path.join( DATA_HOME, LABELS_FILE ), 'wb' ) )\n else:\n print( 'found raw text and labes. loading...' )\n rev = pickle.load( open( os.path.join( DATA_HOME, RAW_TEXT_FILE ), 'rb' ) )\n y = pickle.load( open( os.path.join( DATA_HOME, LABELS_FILE ), 'rb' ) )\n print( 'done' )\n \n return rev, y",
"def read_corpus(corpus_path):\n idx = 0\n data = []\n with open(corpus_path, encoding='utf-8') as fr:\n lines = fr.readlines()\n sent_, tag_, pos_, ner_ = [], [], [], []\n\n for line in lines:\n idx += 1\n if line.find(\"DOC-ID\") < 0 and line != '\\n':\n try:\n [char, label, pos_tag, ner_tag] = line.strip().split()\n sent_.append(char)\n tag_.append(label)\n pos_.append(pos_tag)\n ner_.append(ner_tag)\n except:\n print(line)\n else:\n # print(line)\n if idx > 1:\n data.append((sent_, tag_, pos_, ner_))\n sent_, tag_, pos_, ner_ = [], [], [], []\n\n return data",
"def read_file(path):\n # Mystery arguments:\n strictness = False\n # Read the string:\n return _iterate_bibtexsource(_bibtex.open_file(path, strictness))",
"def load_input(self, path):\n f = codecs.open(path, 'r', 'utf-8')\n raw_text = f.read()\n return raw_text",
"def load_corpus(fn):\n return corpora.svmlightcorpus.SvmLightCorpus(fn)",
"def test_read_raw_supported(fname):\n read_raw(fname)\n read_raw(fname, verbose=False)\n raw = read_raw(fname, preload=True)\n assert \"data loaded\" in str(raw)",
"def load_conll(path, exclude=False, file_encoding='utf-8'):\n corpus = []\n\n with open(path) as f:\n sent = []\n for line in f:\n es = line.rstrip().split()\n if len(es) > 1:\n word = es[0].decode(file_encoding).lower()\n# word = RE_NUM.sub(u'0', word)\n tag = es[1].decode(file_encoding)\n syn = es[2].decode(file_encoding)\n ne = es[3].decode(file_encoding) # you can ingore 1-3 for n2n SRL task, but we parse here just in case\n prd = es[4].decode(file_encoding)#Target\n prop = []\n\n if len(es) > 5:\n prop = es[5:]\n sent.append((word, tag, syn, ne, prd, prop))\n else:\n if exclude and (len(sent[0][5]) == 0 or len(sent) < 2):\n pass\n else:\n corpus.append(sent)\n sent = []\n\n if sent:\n corpus.append(sent)\n\n return corpus",
"def load_raw_data(path: str) -> pd.DataFrame:\n data = []\n with open(path) as file:\n for line in file:\n data.append(line)\n data_df = pd.DataFrame(data, columns = {'tweet'})\n return data_df",
"def read_file(self,fname):\n try:\n self.raw=spiketrain.read_file(fname)\n except Exception:\n self.raw=None\n raise",
"def open_file_path(self, file_path):\n\n\t\ttext = []\n\t\twith open(file_path, \"r\") as f:\n\t\t\tfor line in f:\n\t\t\t\ttext.append(line)\n\n\t\tself.raw = text",
"def read_conll_file(file_name):\n data = []\n current_words = []\n current_tags = []\n\n for line in codecs.open(file_name, encoding='utf-8'):\n line = line.strip()\n \n if line:\n if line[0] == '#':\n continue # skip comments\n tok = line.split('\\t')\n if '-' in tok[0] or '.' in tok[0]:\n continue # skip special tokenized words\n word = tok[1]\n tag = tok[3]\n \n current_words.append(word)\n current_tags.append(tag)\n else:\n if current_words: # skip empty lines\n data.append((current_words, current_tags))\n current_words = []\n current_tags = []\n\n # check for last one\n if current_tags != [] and not raw:\n data.append((current_words, current_tags))\n return data",
"def load_data(path):\n input_file = os.path.join(path)\n with open(input_file, 'r', encoding='utf-8') as f:\n return f.read()",
"def read_corr_binned(lens_run, rand_run, bin_scheme):\n\n fname=get_corr_binned_file(lens_run, rand_run, bin_scheme)\n print(\"reading:\",fname)\n return fitsio.read(fname)",
"def read_corpus(filename, word_index, max_l, pad=2, clean_string=False,\n textField=3):\n corpus = []\n with open(filename) as f:\n for line in f:\n fields = line.strip().split(\"\\t\")\n text = fields[textField]\n if clean_string:\n text_clean = clean_str(text)\n else:\n text_clean = text.lower()\n sent = get_idx_from_sent(text_clean, word_index, max_l, pad)\n corpus.append(sent)\n return np.array(corpus, dtype=\"int32\")",
"def read(path):",
"def read_raw_files(path):\n dfs = []\n try:\n for file in os.listdir(path):\n dfs.append(\n spark.read.format('com.github.saurfang.sas.spark') \\\n .load(file)\n )\n except Exception as e:\n logger.error('Failed to read raw SAS files...')\n logger.error(e)\n raise\n return concat_df(*ds)",
"def read_txt(path):\n \n with open(path, \"r\") as f:\n return f.read().splitlines()",
"def test_load_without_csv(self):\n\n corpus = Corpus(common.TEST_CORPUS_PATH)\n assert len(corpus) == 99\n assert isinstance(corpus.documents, list)\n assert corpus.name is None",
"def get_corpus_text(nr_files=199):\n fileids = nltk.corpus.treebank_raw.fileids()[:nr_files]\n corpus_text = nltk.corpus.treebank_raw.raw(fileids)\n corpus_text = corpus_text.replace(\".START\", \"\")\n return corpus_text"
] |
[
"0.6848885",
"0.6403135",
"0.63154155",
"0.610387",
"0.608671",
"0.5862478",
"0.5852928",
"0.562985",
"0.56262004",
"0.5601899",
"0.5561709",
"0.5552782",
"0.55422187",
"0.5523861",
"0.5523692",
"0.5389133",
"0.53259766",
"0.53013366",
"0.52998686",
"0.5299813",
"0.5295166",
"0.52789265",
"0.52494204",
"0.5244017",
"0.52212244",
"0.52159834",
"0.5190735",
"0.5178457",
"0.5161031",
"0.5152809"
] |
0.8086149
|
0
|
Reads raw datetimes from `RAW_DATETIMES_PATH`.
|
def get_raw_datetimes():
raw_datetimes = []
with open(RAW_DATETIMES_PATH, 'r') as f:
for x in f.read().splitlines():
try:
raw_datetimes.append(datetime.datetime(year=int(x[1:5]), month=int(x[6:8]), day=int(x[9:11])))
except ValueError:
raw_datetimes.append('NA')
return raw_datetimes
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def read_times(self, slices=None):\n times = netCDF4.num2date(\n datetime.strptime(\n self.get_handler().SOURCE_START_DATE.split('.')[0],\n '%Y%m%d%H%M%S'\n )\n )\n return numpy.ma.array([times])",
"def get_raw_data():\n raw_corpus = get_raw_corpus()\n raw_datetimes = get_raw_datetimes()\n raw_data = []\n for i, raw_datetime in enumerate(raw_datetimes):\n raw_data.append([raw_datetime, raw_corpus[i]])\n return raw_data",
"def parse_data_from_file(path):\n print(path.stem)\n \n raw = path.stem.split('-')\n\n rawdate = raw[0][2:]\n print(rawdate)\n date = rawdate[6:] + \"/\" + rawdate[4:6] + '/' + rawdate[0:4]\n rawtime = raw[1]\n time = rawtime[0:2] + \"h\" + rawtime[2:4] + \"m\" + rawtime[4:6] + \"s\"\n dt = datetime.strptime(rawdate+rawtime, '%Y%m%d%H%M%S')\n print(dt)\n return dt",
"def load_timestamps(data_path):\n timestamp_file = os.path.join(data_path, 'oxts', 'timestamps.txt')\n\n # Read and parse the timestamps\n timestamps = []\n with open(timestamp_file, 'r') as f:\n for line in f.readlines():\n # NB: datetime only supports microseconds, but KITTI timestamps\n # give nanoseconds, so need to truncate last 4 characters to\n # get rid of \\n (counts as 1) and extra 3 digits\n t = datetime.datetime.strptime(line[:-4], '%Y-%m-%d %H:%M:%S.%f')\n timestamps.append(t)\n return timestamps",
"def get_datetimes(file_name):\n csv_file = open(file_name, 'rb')\n file_content = csv.reader(csv_file)\n\n # ignore header\n file_content.next()\n\n datetimes = []\n\n for row in file_content:\n datetimes.append(row[0])\n\n csv_file.close()\n\n return datetimes",
"def read(self):\n with open(self) as f:\n return Timestamp.load(f)",
"def _read_antti_datetime(dt_file):\n # NOTE: genfromtxt() doesn't work with gzipped files as it should, so we\n # unzip the file ourself, and use io.BytesIO to fake out genfromtext()\n if dt_file.split('.')[-1] == 'gz':\n ff = gzip.open(dt_file, 'r')\n else:\n ff = open(dt_file, 'r')\n\n sIO = io.BytesIO(ff.read().encode())\n ff.close()\n\n ymdHMS = np.genfromtxt(sIO, comments=\"%\")\n DT = np.array([dt.datetime(*elem) for elem in ymdHMS.astype('int')])\n sIO.close()\n\n return DT",
"def read_datetime_set(filename, seq_len):\n\tdate_set = []\n\twith open(os.path.join(info_path, \"squence_len_{}\".format(seq_len), filename), 'r') as f:\n\t\tfor line in f:\n\t\t\tnew_line = line.rstrip('\\n').split('\\t')\n\t\t\tdate_set.append([int(new_line[0]), int(new_line[1])])\n\treturn np.array(date_set)",
"def load_timestamps(timestamps_data_path):\n timestamp_file = os.path.join(\n timestamps_data_path, 'data.csv')\n\n timestamps = []\n with codecs.open(timestamp_file, 'r', 'utf-8') as f:\n for line in islice(f, 1, None):\n t = float(\"{:.9f}\".format(float(line.split(',')[0]) / 1e9))\n timestamps.append(t) \n\n # Subselect the chosen range of frames, if any\n return timestamps",
"def _load_time_series(self, path: str) -> np.ndarray:\n items = []\n previous = None\n for item in sorted(pathlib.Path(path).glob(\"*.nc\")):\n with xr.open_dataset(item) as ds:\n current = ds.ocean_time.values[0].astype(\"datetime64[M]\")\n if (previous is not None\n and (current - previous != np.timedelta64(1, \"M\"))):\n raise ValueError(\"Time series not continuous\")\n items.append((current, str(item)))\n previous = current\n length = max(len(item[1]) for item in items)\n return np.array(\n items,\n dtype={\n \"names\": (\"date\", \"path\"),\n \"formats\": (\"datetime64[M]\", f\"U{length}\"),\n },\n )",
"def read_raw_data(self):\n dat_file = os.path.join(DATA_DIR, self.patient_number + '.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(dat_file))\n time = []\n voltage1 = []\n voltage2 = []\n with open(dat_file, 'r') as fd:\n for line in fd:\n line = line.split()\n time.append(line[0])\n voltage1.append(float(line[1]))\n voltage2.append(float(line[2]))\n\n tags_file = os.path.join(DATA_DIR, self.patient_number + '_tag.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(tags_file))\n tags_time = []\n tags = []\n r_peaks_indexes = []\n with open(tags_file, 'r') as fd:\n for line in fd:\n line = line.split()\n tags_time.append(line[0])\n tags.append(line[2])\n r_peaks_indexes.append(int(line[1]))\n return time, voltage1, voltage2, tags_time, tags, r_peaks_indexes",
"def _read_raw_temperature():\n with open(device_file, 'r') as f:\n content = f.readlines()\n return content",
"def read_timestamps(self, tasks):\n from reframe.core.deferrable import evaluate\n\n self.begin_stamps = []\n self.end_stamps = []\n for t in tasks:\n with open(evaluate(t.check.stdout), 'r') as f:\n self.begin_stamps.append(float(f.readline().strip()))\n self.end_stamps.append(float(f.readline().strip()))\n\n self.begin_stamps.sort()\n self.end_stamps.sort()",
"def load_timestamps(ts_file):\n ts = []\n with open(ts_file, 'r') as f:\n for line in f.readlines():\n line = line.split()\n if line[0] != \"#\":\n ts.append(line)\n\n return ts",
"def load_data(file_path, drop_na=True, utc_time=False):\n df = pd.read_csv(file_path, parse_dates=True, keep_date_col=True, index_col=0, infer_datetime_format=True, error_bad_lines=False)\n if drop_na == True: \n df = df.dropna()\n if utc_time == True:\n df.index = pd.to_datetime(df.index, utc=True, unit='s')\n return df",
"def read_parse_raw_data(path):\n file_list = TopologyHelper.get_file_list(path)\n print(\"Reading \" + str(len(file_list)) + \" files from \" + path)\n topology_info = []\n file_name = []\n for file in file_list:\n try:\n r = TopologyHelper.parse_file(file)\n tmp = (r[0])['Topology']\n topology_info.append(tmp)\n t = r[1]\n file_name.append(t)\n except:\n continue\n print(\"Parsing completed\")\n return file_name, topology_info",
"def get_data_from_csv_full_path(filepath, datatypes, date_column_list):\n\n dataframe = pandas.read_csv(filepath, dtype=datatypes, date_parser=pandas.to_datetime, parse_dates=date_column_list)\n\n return dataframe",
"def read_weather_data():\n # Check if UTC to gmt+1 conversion is being handled correctly\n weather = pd.read_csv('//datc//opschaler//weather_data//knmi_10_min_raw_data//output//df_combined_uncleaned.csv',\n delimiter='\\t', comment='#',\n parse_dates=['datetime'])\n weather = weather.set_index(['datetime'])\n return weather",
"def data_input(path, complete=False, nrows=10000):\n\n if complete:\n df = pd.read_csv(path)\n\n else:\n df = pd.read_csv(path, nrows=nrows)\n df[\"date_time\"] = pd.to_datetime(\n df[\"date_time\"], format=\"%Y-%m-%d %H:%M:%S\")\n\n #Maybe we could get rid of the exact timestamp if not useful\n #-> .apply(lambda x: x.date())\n return df",
"def read_datetime(self):\n with GPIOTimingContentManager(self.gpio, start=self._start_tx, end=self._end_tx):\n self._write_byte(self.REG_BURST_READ)\n\n regs = list()\n for _ in range(self.REG_SIZE):\n regs.append(self._read_byte())\n\n # Decode bytes to datetime\n return datetime.datetime.strptime(\" \".join([\"{:x}\".format(x) for x in regs]), self.DT_STR_FMT)",
"def _load_data(filename):\n\n def str2date(s):\n \"\"\"Converts a string to a datetime\"\"\"\n return datetime.strptime(s.decode(), \"%Y-%m-%d %H:%M:%S\")\n\n # Load the data\n return np.recfromcsv(filename, converters={0: str2date}, comments=\"#\")",
"def load_rsvp_responses_file(filename):\n with open(filename, 'r') as fp:\n reader = csv.DictReader(fp)\n times = [int(float(row['time'])) for row in reader]\n return times",
"def read_raw(file_path):\n file = open(file_path, 'rb')\n content = file.read()\n file.close()\n return content",
"def load_timestamps(self):\n print('Loading timestamps for sequence ' + self.sequence + '...')\n\n timestamp_file = os.path.join(self.sequence_path, 'times.txt')\n\n # Read and parse the timestamps\n self.timestamps = []\n with open(timestamp_file, 'r') as f:\n for line in f.readlines():\n t = dt.timedelta(seconds=float(line))\n self.timestamps.append(t)\n\n # Subselect the chosen range of frames, if any\n if self.frame_range:\n self.timestamps = [self.timestamps[i] for i in self.frame_range]\n\n print('Found ' + str(len(self.timestamps)) + ' timestamps...')\n\n print('done.')",
"def load_raw(fname):\n # Read all the data from the file\n ctd = []\n with open(fname) as ctdfile:\n \n for line in ctdfile:\n \n if (line.find('*') < 0) and (line.find('#') < 0):\n \n # This line contains data; parse the line\n entries = line.strip().split()\n # Convert data to float64\n entries = [np.float64(entries[i]) \n for i in range(len(entries))]\n # Append to list\n ctd.append(entries)\n \n # Return the raw data as an numpy array\n return np.array(ctd)",
"def extract_datetime(fpath):\n try:\n handle = open(fpath, 'rb')\n if hexlify(handle.read(2)) != hexlify(u'MZ'):\n handle.close()\n return\n except:\n return\n\n try:\n handle.seek(60, 0)\n offset = handle.read(4)\n offset = hexlify(offset[::-1])\n\n if offset == '':\n handle.close()\n return\n\n offset = int(offset, 16)\n handle.seek(offset+8, 0)\n dword = handle.read(4)\n handle.close()\n\n t = unpack(\">L\", dword[::-1])[0]\n except:\n return\n return datetime.datetime.fromtimestamp(t)",
"def _strptime(cls, raw: typing.Optional[str]) -> typing.Optional[datetime.datetime]:\n if not raw:\n return None\n return datetime.datetime.strptime(raw, cls._TSFMT)",
"def load_timestamps_img(data_path):\n timestamp_file = os.path.join(data_path, 'image_00', 'timestamps.txt')\n\n # Read and parse the timestamps\n timestamps = []\n with open(timestamp_file, 'r') as f:\n for line in f.readlines():\n # NB: datetime only supports microseconds, but KITTI timestamps\n # give nanoseconds, so need to truncate last 4 characters to\n # get rid of \\n (counts as 1) and extra 3 digits\n t = datetime.datetime.strptime(line[:-4], '%Y-%m-%d %H:%M:%S.%f')\n timestamps.append(t)\n return timestamps",
"def readData(self):\n f = open(self.filename)\n self.time = []\n self.data = []\n for line in f:\n if line.find('BAD FLAG') > 0:\n self.badValue = float(line.split(':')[1].strip())\n if line.find('LONGITUDE') > 0:\n self.lon = line.split(':')[1].strip()\n if line.find('LATITUDE') > 0:\n self.lat = line.split(':')[1].strip()\n if len(line) > 6 and line[2] == '-' and line[6] == '-':\n parts = line.rsplit(None, 1)\n # data line\n timeStamp = datetime.datetime.strptime(parts[0], '%d-%b-%Y %H')\n t = timeArray.datetimeToEpochTime(timeStamp)\n self.time.append(t)\n val = float(parts[1])\n self.data.append(val)\n\n self.time = np.array(self.time)\n self.data = np.array(self.data)\n # remove bad values\n if self.badValue:\n goodIx = self.data != self.badValue\n self.time = self.time[goodIx]\n self.data = self.data[goodIx]\n self.fileIsRead = True",
"def load_times(file_name):\n data = np.loadtxt(file_name)\n data = data[data[:, 0].argsort()]\n times = data[:, 0]\n values = data[:, 1]\n\n # Remove the mean amplitude and shift time origin\n times -= times[0]\n values -= np.mean(values)\n\n return times, values"
] |
[
"0.6244591",
"0.61131406",
"0.60976774",
"0.58365625",
"0.58308834",
"0.5646238",
"0.5640784",
"0.56179684",
"0.54649955",
"0.5452194",
"0.53788567",
"0.5321501",
"0.52975816",
"0.5276463",
"0.52416366",
"0.52309686",
"0.5192991",
"0.5172008",
"0.5170778",
"0.51664054",
"0.51615304",
"0.5139738",
"0.5099307",
"0.5098938",
"0.5071041",
"0.50680536",
"0.50653726",
"0.505852",
"0.50566447",
"0.50464547"
] |
0.774482
|
0
|
Transforms `raw_corpus` and `raw_dates` into a list of lists where each inner list is a timestamped article.
|
def get_raw_data():
raw_corpus = get_raw_corpus()
raw_datetimes = get_raw_datetimes()
raw_data = []
for i, raw_datetime in enumerate(raw_datetimes):
raw_data.append([raw_datetime, raw_corpus[i]])
return raw_data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def solr_transformed_dates(solr_client: Solr, parsed_dates: typing.List):\n return [solr_client._from_python(date) for date in parsed_dates] # pylint: disable=protected-access",
"def to_twodim_list(self):\n if self._timestampFormat is None:\n return self._timeseriesData\n\n datalist = []\n append = datalist.append\n convert = TimeSeries.convert_epoch_to_timestamp\n for entry in self._timeseriesData:\n append([convert(entry[0], self._timestampFormat), entry[1:]])\n\n return datalist",
"def to_twodim_list(self):\n tsformat = self._timestampFormat\n\n if tsformat is None:\n return self._timeseriesData\n\n datalist = []\n append = datalist.append\n convert = TimeSeries.convert_epoch_to_timestamp\n for entry in self._timeseriesData:\n append([convert(entry[0], tsformat), entry[1]])\n\n return datalist",
"def date_parser(dates):\n final_date = []\n for date in dates:\n final_date = final_date + [date[0:10]]\n return final_date",
"def date_to_list(data_index):\n results = []\n for row in data:\n results.append(datetime.strptime(row[data_index], '%Y-%m-%d'))\n return results",
"def timeStamps(dataset):\n \n timestamps = []\n \n for index, row in enumerate(dataset):\n try:\n timeObj = datetime.datetime.strptime(timeStampFix(row), '%y:%j:%H:%M:%S')\n except ValueError:\n print('Failed to create datetime object for ' + timeStampFix(row))\n timestamps.append(timeObj)\n \n return timestamps",
"def get_all_dates(all_articles):\n all_dates = []\n for title in all_articles:\n date_as_string = all_articles[title]['date']\n date_as_datetime = datetime.datetime.strptime(date_as_string, \"%Y-%m-%dT%H:%M:%SZ\")\n all_dates.append(date_as_datetime)\n return all_dates",
"def protobuf_timestamps_to_dates(protobuf_timestamps):\n date_list = []\n \n for protobuf_timestamp in protobuf_timestamps:\n _timestamp = Timestamp()\n _timestamp.FromJsonString(value = protobuf_timestamp)\n _date = _timestamp.ToDatetime().date()\n date_list.append(_date)\n \n return date_list",
"def transform(self, docs):\n return [doc for doc in docs]",
"def test_list_of_datetimes(self):\n plot_index = pd.date_range(start=\"2000-1-1\", freq=\"D\", periods=10000)\n freq = 'M'\n\n dates = pd.Series(1, index=plot_index).resample(freq).index\n tl = formatter.TimestampLocator(plot_index, xticks=dates)\n test = tl._process(3, 900)\n\n tl = formatter.TimestampLocator(plot_index, freq=freq)\n correct = tl._process(3, 900)\n tm.assert_almost_equal(test, correct)\n\n freq = 'MS'\n dates = pd.Series(1, index=plot_index).resample(freq).index\n tl = formatter.TimestampLocator(plot_index, xticks=dates)\n test = tl._process(3, 900)\n\n tl = formatter.TimestampLocator(plot_index, freq=freq)\n correct = tl._process(3, 900)\n tm.assert_almost_equal(test, correct)\n\n # straight list of dates\n freq = 'MS'\n dates = pd.Series(1, index=plot_index).resample(freq).index\n dates = list(dates)\n tl = formatter.TimestampLocator(plot_index, xticks=dates)\n test = tl._process(3, 900)\n\n tl = formatter.TimestampLocator(plot_index, freq=freq)\n correct = tl._process(3, 900)\n tm.assert_almost_equal(test, correct)",
"def date_parser(dates): \n new_dates = []\n for i in range(len(dates)):\n a = dates[i][:10] \n new_dates.append(a)\n return new_dates",
"def normalize_dates(dates):\n n_dates = []\n for date, pos in dates:\n for r in date_re:\n m = r.match(date)\n if m:\n d = m.groupdict()\n month = month_to_int(d['month'])\n day = d['day']\n year = d['year']\n if month and day:\n n_dates += [('{}-{:02}-{:02}'.\n format(year, int(month), int(day)),)]\n else:\n n_dates += [('{}'.format(year),)]\n\n break\n\n n_dates.sort(key=lambda d: len(d), reverse=True)\n return n_dates",
"def _parse_dates(self, articles, date_type, locations=[]):\n # Don't parse if parse_dates is False\n if self.parse_dates is False:\n return articles\n \n # Create parsed_articles list\n parsed_articles = []\n\n # For every article parse date_string into datetime.datetime\n for article in articles:\n parsed_article = article\n for location in locations:\n parsed_article[location] = self._parse_date(parsed_article[location], date_type)\n parsed_articles.append(article)\n\n return parsed_articles",
"def datetime_to_list(date):\n return [date.year, date.month, date.day,\n date.hour, date.minute, date.second]",
"def datetime_to_list(date):\n return [date.year, date.month, date.day,\n date.hour, date.minute, date.second]",
"def get_timestamps( self, raster_pos=None ):\n if raster_pos is None:\n headers = self.time_specific_headers\n else:\n headers = self.get_raster_pos_headers( raster_pos )\n \n return [to_epoch( from_Tformat( h['DATE_OBS'] ) ) for h in headers]",
"def tstamps_for_daterange(self, start_date, end_date):\n img_offsets = np.array([timedelta(hours=0)])\n\n timestamps = []\n diff = end_date - start_date\n for i in range(diff.days + 1):\n daily_dates = start_date + timedelta(days=i) + img_offsets\n timestamps.extend(daily_dates.tolist())\n\n return timestamps",
"def get_timestamped_strings(self):\n ret_list = []\n i = 0\n while i < len(self.__string_list):\n ret_list.append(self.__timestamp_list[i].strftime(\"%Y-%m-%d %H:%M:%S\")+\" \"+self.__string_list[i])\n i += 1\n return ret_list",
"def convert_all_timestamps(results: List[ResponseObject]) -> List[ResponseObject]:\n results = [convert_generic_timestamps(result) for result in results]\n results = [convert_observation_timestamps(result) for result in results]\n return results",
"def tstamps_for_daterange(self, start_date, end_date):\n\n img_offsets = np.array([timedelta(hours=h) for h in self.h_steps])\n\n timestamps = []\n diff = end_date - start_date\n for i in range(diff.days + 1):\n daily_dates = start_date + timedelta(days=i) + img_offsets\n timestamps.extend(daily_dates.tolist())\n\n return timestamps",
"def tstamps_for_daterange(self, start_date, end_date):\n img_offsets = np.array([timedelta(hours=h) for h in self.h_steps])\n\n timestamps = []\n diff = end_date - start_date\n for i in range(diff.days + 1):\n daily_dates = start_date + timedelta(days=i) + img_offsets\n timestamps.extend(daily_dates.tolist())\n\n return timestamps",
"def extract_years(timestamps):\n return np.asarray([dt.year for dt in timestamps.astype(datetime)])",
"def get_dates(raw_table) -> \"list of dates\":\n dates = []\n found_first = False\n for i, dstr in enumerate([raw_table[i][0] for i in range(0, len(raw_table))]):\n if dstr:\n if len(dstr.split(\"/\")) == 3:\n d = datetime.datetime.strptime(dstr, '%m/%d/%Y')\n elif len(dstr.split(\"-\")) == 3:\n d = datetime.datetime.strptime(dstr, '%Y-%m-%d')\n else:\n # Not necessarily an error, could just be a non-date cell\n logging.debug(\"unknown date-format: {}\".format(dstr))\n continue\n dates.append(d)\n if not found_first:\n found_first = True\n logging.debug(\"Found first date: '{}' at i: {}\".format(d.isoformat(), i))\n elif found_first:\n logging.debug(\"Last date: {}\".format(d))\n break\n return dates",
"def clean_data(X):\n X_cleaned = []\n for i in X:\n X_cleaned.append([i[0], i[1], i[2].timestamp(), i[3], i[4].timestamp(), i[5]])\n return X_cleaned",
"def date_parser(dates):\n # extract the date only from dates: Olwethu\n date_list = []\n for i in dates:\n i = i.split(' ')\n # append each date to a new list: Olwethu\n date_list.append(i[0])\n \n return date_list",
"def _get_timestamps(self, time_interval: RawTimeIntervalType | None, bbox: BBox) -> list[dt.datetime]:\n if any(feat_type.is_timeless() for feat_type, _, _ in self.features if feat_type.is_array()):\n return []\n\n timestamps = get_available_timestamps(\n bbox=bbox,\n time_interval=time_interval,\n data_collection=self.data_collection,\n maxcc=self.maxcc,\n config=self.config,\n )\n\n return self.timestamp_filter(timestamps, self.time_difference)",
"def transform_array(obj):\n # Check for astype failures (putative Numpy < 1.7)\n dt2001 = np.datetime64('2001')\n legacy_datetime64 = (dt2001.astype('int64') ==\n dt2001.astype('datetime64[ms]').astype('int64'))\n ## not quite correct, truncates to ms..\n if obj.dtype.kind == 'M':\n if legacy_datetime64:\n if obj.dtype == np.dtype('datetime64[ns]'):\n return (obj.astype('int64') / 10**6.0).tolist()\n else:\n return (obj.astype('datetime64[us]').astype('int64') / 1000.).tolist()\n elif obj.dtype.kind in ('u', 'i', 'f'):\n return transform_numerical_array(obj)\n return obj.tolist()",
"def _get_timestamps(self, time_interval: RawTimeIntervalType | None, bbox: BBox) -> list[dt.datetime]:",
"def convert_datetime_objs(list_of_dates):\n datetime_list = []\n for date in list_of_dates:\n date_obj = datetime.datetime.strptime(date, '%d.%m.%Y')\n datetime_list.append(date_obj)\n return datetime_list",
"def convert_corpus_to_list(text_corpus):\n\n text_corpus = text_corpus.values.tolist()\n return text_corpus"
] |
[
"0.683058",
"0.6323778",
"0.6128271",
"0.60786724",
"0.60340846",
"0.602659",
"0.59372884",
"0.5706374",
"0.5697947",
"0.5654273",
"0.56450975",
"0.5622019",
"0.56163025",
"0.55910176",
"0.55910176",
"0.5584497",
"0.55833334",
"0.5565312",
"0.55243546",
"0.55132765",
"0.5501394",
"0.54682654",
"0.5459732",
"0.54572356",
"0.5451516",
"0.540185",
"0.5396009",
"0.53840554",
"0.53574294",
"0.53515327"
] |
0.72453225
|
0
|
Converts `article_sents` into a single string.
|
def get_article_str(article_sents):
article_str = ""
for nlp_sent in article_sents:
article_str += (' ' + nlp_sent.text + ' ')
return article_str
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_article_as_string(article,\n preprocess_type=PreprocessWordType.LEMMATIZE):\n article_string = ''\n for word in article.words:\n preprocessed_word = query_utils.preprocess_word(word, preprocess_type)\n if article_string == '':\n article_string = preprocessed_word\n else:\n article_string += (' ' + preprocessed_word)\n return article_string",
"def _get_full_entity(entity: spacy.tokens.Token) -> str:\n entity_string = SpacyEventExtractor._get_chunk(entity)\n\n word = entity\n while True:\n prep, word = SpacyEventExtractor._get_prep_with_word(word)\n if word is None:\n break\n entity_string += \" \" + prep\n return entity_string",
"def get_article_text(self, article: BeautifulSoup):\n # Removes unwanted elements in article, like ads\n for elm in article.find_all(self.parsing_template.ignore_content_tag):\n elm.decompose()\n\n return self.get_text(article, self.parsing_template.content)",
"def get_article_text(article):\n soup = BeautifulSoup(article.content)\n return soup.get_text()",
"def _entity_as_text(self):\n return str(self.value)",
"def article_text(self, article):\n\n if isinstance(article, str):\n self.cmd = \"select old_text from page \" \\\n \"inner join revision on rev_page = page_id \" \\\n \"inner join text on old_id = rev_text_id \" \\\n \"where page_title = '%s';\" % article\n\n else: # Assume it's an id\n self.cmd = \"select old_text from revision \" \\\n \"inner join text on old_id = rev_text_id \" \\\n \"where rev_page = '%d';\" % article\n\n return self._query_single()",
"def to_string(student_list):\n student_info = \"\"\n for student in student_list:\n student_info += f\"{str(student)}\\n\"\n return student_info",
"def l2s(l):\n return ''.join(l)",
"def get_itr_str(iterable) -> str:\n\treturn str([str(i) for i in iterable])",
"def to_sentences(self, sents_id: List[List[int]]) -> List[List[str]]:\n sents = []\n for s_id in sents_id:\n s = []\n for w_id in s_id:\n s.append(self.word_from_id(w_id))\n sents.append(s)\n return sents",
"def transform_article(article):\n tokens = clean_article(article)\n lemmatizer = WordNetLemmatizer()\n lemmatized = [lemmatizer.lemmatize(token) for token in tokens]\n transformed_article = \" \".join(lemmatized)\n return transformed_article",
"def rsset2str(self, elements):\n if len(elements) == 0:\n return \"0\"\n s = \"{\"\n for c in elements:\n s += \" \" + self._reaction_system.get_entity_name(c)\n s += \" }\"\n return s",
"def to_str(self):\n return '\\n'.join(str(entry) for entry in self)",
"def authors_string(self):\n all_authors = Author.objects.filter(book=self).order_by('last_name')\n return u', '.join([unicode(a) for a in all_authors])",
"def str(x):\n return str(x)[:200]",
"def to_string(self) -> str:\n return f\"<Document, #sentences: {len(self.sentences)}>\"",
"def summarize(article, lang, num_sentences):\r\n summary = tldr_matrix(article, lang)[:num_sentences]\r\n return \"\\n\".join([_[1] for _ in sorted(summary, key=lambda x: x[0])])",
"def entries_to_str(entries):\r\n db = BibDatabase()\r\n db.entries = entries\r\n return bibtexparser.dumps(db)",
"def ainvs_to_string(ainvs):\n return ainvs if type(ainvs)==type('') else \";\".join([NFelt(ai) for ai in ainvs])",
"def get_string_of_transactions(self):\n s = \"\"\n for transaction in self.transactions:\n s += transaction.to_string()\n return s",
"def _convert_to_string(srs):\n return srs.map(lambda x: str(x))",
"def sentence_join(self, sentences):\n return \" \".join(sentences)",
"def abstract2sents(abstract):\n\tcur = 0\n\tsents = []\n\twhile True:\n\t\ttry:\n\t\t\tstart_p = abstract.index(SENTENCE_START, cur)\n\t\t\tend_p = abstract.index(SENTENCE_END, start_p + 1)\n\t\t\tcur = end_p + len(SENTENCE_END)\n\t\t\tsents.append(abstract[start_p + len(SENTENCE_START):end_p].strip())\n\t\texcept ValueError as e: # no more sentences\n\t\t\treturn sents",
"def tag_sents(self, sents):\n # WORK HERE!!",
"def entity_extract(self, eid):\n fname = os.path.join(\n self.data_dir_base, \"entities\", self.code, \"extracts\", f\"{eid}.txt\"\n )\n if os.path.exists(fname):\n with open(fname) as f:\n return \"\".join(f.readlines())\n return \"\"",
"def __str__(self) -> str:\n return \"\\n\".join(str(x) for x in self.content)",
"def random_sentence():\n article = random_article()\n random_sentence = random.choice(article.sentence_list)\n\n return {'sentence' : unicode(random_sentence),\n 'title' : unicode(article.title),\n 'revision_id' : article.revision_id }",
"def tostring(self, transformer=None):\n\t\tresult = ''\n\t\tif not transformer:\n\t\t\ttransformer = str\n\t\tfor start in range(0, len(self.data), self.dims[0]):\n\t\t\tfor c in self.data[start:start+self.dims[0]]:\n\t\t\t\tresult += transformer(c)\n\t\t\tresult += '\\n'\n\t\treturn result",
"def get_article(article_id):\n article = db_session.query(Article)\\\n .filter_by(id=article_id).one()\n return jsonify(article.serialize)",
"def obj_to_string(rental):\n string = rental.id + ';' + rental.movie_id + ';' + rental.client_id + ';' \\\n + str(rental.rented_date) + ';' + str(rental.due_date) + ';' + str(rental.returned_date) + '\\n'\n return string"
] |
[
"0.67486",
"0.612108",
"0.5690145",
"0.56773126",
"0.5410131",
"0.5308889",
"0.520001",
"0.51668775",
"0.5153415",
"0.5148386",
"0.5125402",
"0.50992966",
"0.5094515",
"0.5066397",
"0.50654906",
"0.50575686",
"0.5049806",
"0.5031486",
"0.5018384",
"0.49969393",
"0.49943385",
"0.49935094",
"0.49894983",
"0.4937179",
"0.4932812",
"0.49324948",
"0.49256903",
"0.49112216",
"0.4904658",
"0.48895052"
] |
0.8428992
|
0
|
Show detail of topology, device and more. Show [topology, device]
|
def do_show(self, args):
args = args.split(" ")
if args[0] == '':
print("Incorrect command.")
return
elif args[0] == 'device':
if len(args) < 2:
if len(self.topology.devices) == 0:
print("No device in this topology.")
return
for index, device in enumerate(self.topology.devices):
print("[{}] {}".format(index, device.infomation_text()))
return
device_ip = args[1]
device = self.topology.get_device_by_ip(device_ip)
if device is None:
print("Not found device IP {}".format(device_ip))
return
if len(args) < 3:
# Todo show device info
print(device.infomation_text())
return
if args[2] == 'route':
routes = device.get_routes()
self.print_pretty_routes(routes, device.get_interfaces())
return
if 'interface'.startswith(args[2]):
interfaces = device.get_interfaces()
self.print_interfaces(interfaces)
return
elif args[0] == 'flow':
# print(len(self.topology.get_flows()))
for flow in self.topology.get_flows():
print(flow)
return
elif args[0] == 'route':
return self.show_route(args[1:])
elif args[0] == 'graph':
G = self.topology.create_graph()
# edge_labels = nx.get_edge_attributes(G,'state')
nx.draw_networkx_edge_labels(G, pos=nx.spring_layout(G))
# nx.draw_networkx_edge_labels(G, pos, labels = edge_labels)
plt.rcParams["figure.figsize"] = [30, 30]
nx.draw_circular(G, with_labels=True)
filename = "imgs/topo-{}.png".format(time.time())
plt.savefig(filename)
plt.show(block=False)
elif args[0] == 'topology':
self.topology.print_matrix()
elif args[0] == 'version':
print("SDN Handmade: 0.0.1")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def show_device_information_long(self):\n\n for device in self._devices:\n print(\"\")\n if device['Device Type'].startswith(\"enclosu\"):\n if device.get('Device Type'):\n print(\"{0:>32}: {1}\".format(\"Device Type\", device['Device Type']))\n if device['Device Type Description']:\n print(\"{0:>32}: {1}\".format(\"Device Description\", device['Device Type Description']))\n if device.get('SCSI Device Name'):\n print(\"{0:>32}: {1}\".format(\"SCSI Device Name\", device['SCSI Device Name']))\n if device.get('Product Identification'):\n print(\"{0:>32}: {1}\".format(\"Product Identification\", device['Product Identification']))\n if device.get('Vendor Identification'):\n print(\"{0:>32}: {1}\".format(\"Vendor Identification\", device['Vendor Identification']))\n if device.get('Firmware Version'):\n print(\"{0:>32}: {1}\".format(\"Firmware Version\", device['Firmware Version']))\n if device.get('Serial Number'):\n print(\"{0:>32}: {1}\".format(\"Serial Number\", device['Serial Number']))\n if device.get('SAS Address'):\n print(\"{0:>32}: {1}\".format(\"SAS Address\", device['SAS Address']))\n else:\n if device.get('Device Type'):\n print(\"{0:>32}: {1}\".format(\"Device Type\", device['Device Type']))\n if device['Device Type Description']:\n print(\"{0:>32}: {1}\".format(\"Device Description\", device['Device Type Description']))\n if device.get('Linux Device Name'):\n print(\"{0:>32}: {1}\".format(\"Linux Device Name\", device['Linux Device Name']))\n if device.get('SCSI Device Name'):\n print(\"{0:>32}: {1}\".format(\"SCSI Device Name\", device['SCSI Device Name']))\n if device.get('Product Identification'):\n print(\"{0:>32}: {1}\".format(\"Product Identification\", device['Product Identification']))\n if device.get('Vendor Identification'):\n print(\"{0:>32}: {1}\".format(\"Vendor Identification\", device['Vendor Identification']))\n if device.get('Firmware Version'):\n print(\"{0:>32}: {1}\".format(\"Firmware Version\", device['Firmware Version']))\n if device.get('Serial Number'):\n print(\"{0:>32}: {1}\".format(\"Serial Number\", device['Serial Number']))\n if device.get('Drive Capacity'):\n print(\"{0:>32}: {1}\".format(\"Drive Capacity\", device['Drive Capacity']))\n if device.get('Block Length'):\n print(\"{0:>32}: {1}\".format(\"Block Length\", device['Block Length']))\n if device.get('Power On Hours'):\n print(\"{0:>32}: {1}\".format(\"Power On Hours\", device['Power On Hours']))\n if device.get('Current Temperature'):\n print(\"{0:>32}: {1}\".format(\"Current Temperature\", device['Current Temperature']))\n if device.get('SAS Address'):\n print(\"{0:>32}: {1}\".format(\"SAS Address\", device['SAS Address']))\n if device.get('Enclosure Device'):\n print(\"{0:>32}: {1}\".format(\"Enclosure Device\", device['Enclosure Device']))\n if device.get('Enclosure Slot'):\n print(\"{0:>32}: {1}\".format(\"Enclosure Slot\", device['Enclosure Slot']))\n if device.get('Slot Description'):\n print(\"{0:>32}: {1}\".format(\"Slot Desciption\", device['Slot Description']))\n\n if len(self._devices):\n print(\"\")",
"def show_device_information(self):\n\n if self._json_format:\n print(json.dumps(self._devices, indent=4, separators=(',', ': ')))\n return\n\n if self._long_format:\n self.show_device_information_long()\n elif self._include_enclosures and self._number_enclosures:\n self.show_device_information_enclosures()\n else:\n self.show_device_information_only()",
"def print_device_info(device):\n assert(isinstance(device, Device))\n print(\" Device Name : %s\" % device.name)\n print(\" OS Type : %s\" % device.os_type)\n print(\" IP Address : %s\" % device.ip_addr)\n print(\" Interfaces : %s\" % \", \".join(device.iflist))",
"def showtopologies():\n middleware.protocolObj.showTopologies()",
"def print_device_info(device_info):\n assert(isinstance(device_info, tuple))\n assert(len(device_info) == 4)\n print(\" Device Name : %s\" % device_info[0])\n print(\" OS Type : %s\" % device_info[1])\n print(\" IP Address : %s\" % device_info[2])\n print(\" Interfaces : %s\" % device_info[3])",
"def print_devices(detail_level=0):\n if detail_level < 0:\n raise ValueError('Negative detail level: {!r}'.format(detail_level))\n if detail_level < 1:\n for platform_idx, platform in enumerate(ocl.get_platforms()):\n print('Platform [{}]: {} ({})'.format(platform_idx, platform.name,\n platform.version))\n for device_idx, device in enumerate(platform.get_devices()):\n print(' Device [{}]: {}'.format(device_idx, device.name))\n print() # Additional line as seperator for readability\n return\n\n\n # Specialised formatting functions for specific pieces of information.\n # Device type macros (used for ocl.device_info.TYPE):\n def device_type(info):\n \"\"\"Translating the bit map into human readable categories.\"\"\"\n options = {(1 << 0): 'CL_DEVICE_TYPE_DEFAULT',\n (1 << 1): 'CL_DEVICE_TYPE_CPU',\n (1 << 2): 'CL_DEVICE_TYPE_GPU',\n (1 << 3): 'CL_DEVICE_TYPE_ACCELERATOR',\n (1 << 4): 'CL_DEVICE_TYPE_CUSTOM'}\n return options.get(info, 'Undefined Device Type')\n\n\n def fp_config_formatting(info):\n \"\"\"Translating the bit map into human readable categories.\"\"\"\n # From: OpenCL/AMDAPPSDK-3.0/include/CL/cl.h\n options = [((1 << 0), 'CL_FP_DENORM'),\n ((1 << 1), 'CL_FP_INF_NAN'),\n ((1 << 2), 'CL_FP_ROUND_TO_NEAREST'),\n ((1 << 3), 'CL_FP_ROUND_TO_ZERO'),\n ((1 << 4), 'CL_FP_ROUND_TO_INF'),\n ((1 << 5), 'CL_FP_FMA'),\n ((1 << 6), 'CL_FP_SOFT_FLOAT'),\n ((1 << 7), 'CL_FP_CORRECTLY_ROUNDED_DIVIDE_SQRT')]\n # The initial line shows the bitmap, following lines\n # explicitly show the meaning and availability.\n option_breakdown = [bin(info)]\n for bitfield, option in options:\n is_available = bool(bitfield & info)\n option_breakdown.append('{}={}'.format(option, is_available))\n return ('\\n\\t'+' '*(device_maxwidth+3)).join(option_breakdown)\n\n\n def platform_extension_formatting(info):\n \"\"\"Splitting the extensions and displaying each on aligned lines.\"\"\"\n return ('\\n'+' '*(platform_maxwidth+3)).join(info.split())\n\n\n def device_extension_formatting(info):\n \"\"\"Splitting the extensions and displaying each on aligned lines.\"\"\"\n return ('\\n\\t'+' '*(device_maxwidth+3)).join(info.split())\n\n # The following two option collections are lists of tuples with 2 or 3\n # components. The first is the detail level at which it should be\n # displayed. The second is the name of the parameter. The third is\n # optional and should, if available, be a formatting function. The\n # default is to use `str()`.\n\n # Complete set of possible parameters for ocl.platform_info:\n platform_info_options = [\n (1, 'NAME'),\n (4, 'PROFILE'),\n (4, 'VENDOR'),\n (1, 'VERSION'),\n (2, 'EXTENSIONS', platform_extension_formatting)]\n\n # Complete set of possible parameters for ocl.device_info:\n device_info_options = [\n (3, 'ADDRESS_BITS'),\n (5, 'ATTRIBUTE_ASYNC_ENGINE_COUNT_NV'),\n (1, 'AVAILABLE', bool),\n (5, 'AVAILABLE_ASYNC_QUEUES_AMD'),\n (5, 'BOARD_NAME_AMD'),\n (3, 'BUILT_IN_KERNELS'),\n (1, 'COMPILER_AVAILABLE', bool),\n (5, 'COMPUTE_CAPABILITY_MAJOR_NV'),\n (5, 'COMPUTE_CAPABILITY_MINOR_NV'),\n (5, 'CORE_TEMPERATURE_ALTERA'),\n (3, 'DOUBLE_FP_CONFIG', fp_config_formatting),\n (1, 'DRIVER_VERSION'),\n (4, 'ENDIAN_LITTLE'),\n (4, 'ERROR_CORRECTION_SUPPORT', bool),\n (4, 'EXECUTION_CAPABILITIES', bool),\n (3, 'EXTENSIONS', device_extension_formatting),\n (5, 'EXT_MEM_PADDING_IN_BYTES_QCOM'),\n (5, 'GFXIP_MAJOR_AMD'),\n (5, 'GFXIP_MINOR_AMD'),\n (5, 'GLOBAL_FREE_MEMORY_AMD'),\n (2, 'GLOBAL_MEM_CACHELINE_SIZE', memory_size),\n (2, 'GLOBAL_MEM_CACHE_SIZE', memory_size),\n (2, 'GLOBAL_MEM_CACHE_TYPE'),\n (5, 'GLOBAL_MEM_CHANNELS_AMD'),\n (5, 'GLOBAL_MEM_CHANNEL_BANKS_AMD'),\n (5, 'GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD'),\n (2, 'GLOBAL_MEM_SIZE', memory_size),\n (3, 'GLOBAL_VARIABLE_PREFERRED_TOTAL_SIZE'),\n (5, 'GPU_OVERLAP_NV'),\n (3, 'HALF_FP_CONFIG', fp_config_formatting),\n (3, 'HOST_UNIFIED_MEMORY', bool),\n (3, 'IMAGE2D_MAX_HEIGHT'),\n (3, 'IMAGE2D_MAX_WIDTH'),\n (3, 'IMAGE3D_MAX_DEPTH'),\n (3, 'IMAGE3D_MAX_HEIGHT'),\n (3, 'IMAGE3D_MAX_WIDTH'),\n (3, 'IMAGE_MAX_ARRAY_SIZE'),\n (3, 'IMAGE_MAX_BUFFER_SIZE', memory_size),\n (3, 'IMAGE_SUPPORT', bool),\n (5, 'INTEGRATED_MEMORY_NV'),\n (2, 'KERNEL_EXEC_TIMEOUT_NV'),\n (1, 'LINKER_AVAILABLE', bool),\n (5, 'LOCAL_MEM_BANKS_AMD'),\n (2, 'LOCAL_MEM_SIZE', memory_size),\n (5, 'LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD'),\n (2, 'LOCAL_MEM_TYPE'),\n (5, 'MAX_ATOMIC_COUNTERS_EXT'),\n (2, 'MAX_CLOCK_FREQUENCY'),\n (2, 'MAX_COMPUTE_UNITS'),\n (2, 'MAX_CONSTANT_ARGS'),\n (2, 'MAX_CONSTANT_BUFFER_SIZE', memory_size),\n (2, 'MAX_GLOBAL_VARIABLE_SIZE'),\n (2, 'MAX_MEM_ALLOC_SIZE', memory_size),\n (4, 'MAX_ON_DEVICE_EVENTS'),\n (4, 'MAX_ON_DEVICE_QUEUES'),\n (4, 'MAX_PARAMETER_SIZE'),\n (4, 'MAX_PIPE_ARGS'),\n (4, 'MAX_READ_IMAGE_ARGS'),\n (4, 'MAX_READ_WRITE_IMAGE_ARGS'),\n (4, 'MAX_SAMPLERS'),\n (2, 'MAX_WORK_GROUP_SIZE'),\n (2, 'MAX_WORK_ITEM_DIMENSIONS'),\n (2, 'MAX_WORK_ITEM_SIZES'),\n (3, 'MAX_WRITE_IMAGE_ARGS'),\n (4, 'MEM_BASE_ADDR_ALIGN'),\n (5, 'ME_VERSION_INTEL'),\n (4, 'MIN_DATA_TYPE_ALIGN_SIZE'),\n (1, 'NAME'),\n (4, 'NATIVE_VECTOR_WIDTH_CHAR'),\n (4, 'NATIVE_VECTOR_WIDTH_DOUBLE'),\n (4, 'NATIVE_VECTOR_WIDTH_FLOAT'),\n (4, 'NATIVE_VECTOR_WIDTH_HALF'),\n (4, 'NATIVE_VECTOR_WIDTH_INT'),\n (4, 'NATIVE_VECTOR_WIDTH_LONG'),\n (4, 'NATIVE_VECTOR_WIDTH_SHORT'),\n (5, 'NUM_SIMULTANEOUS_INTEROPS_INTEL'),\n (1, 'OPENCL_C_VERSION'),\n (5, 'PAGE_SIZE_QCOM'),\n #(5, 'PARENT_DEVICE'), # Somehow, this crashes Python.\n (5, 'PARTITION_AFFINITY_DOMAIN'),\n (5, 'PARTITION_MAX_SUB_DEVICES'),\n (5, 'PARTITION_PROPERTIES'),\n (5, 'PARTITION_TYPE'),\n (5, 'PCI_BUS_ID_NV'),\n (5, 'PCI_SLOT_ID_NV'),\n (5, 'PIPE_MAX_ACTIVE_RESERVATIONS'),\n (5, 'PIPE_MAX_PACKET_SIZE'),\n (4, 'PLATFORM'),\n (4, 'PREFERRED_GLOBAL_ATOMIC_ALIGNMENT'),\n (4, 'PREFERRED_INTEROP_USER_SYNC'),\n (4, 'PREFERRED_LOCAL_ATOMIC_ALIGNMENT'),\n (4, 'PREFERRED_PLATFORM_ATOMIC_ALIGNMENT'),\n (4, 'PREFERRED_VECTOR_WIDTH_CHAR'),\n (4, 'PREFERRED_VECTOR_WIDTH_DOUBLE'),\n (4, 'PREFERRED_VECTOR_WIDTH_FLOAT'),\n (4, 'PREFERRED_VECTOR_WIDTH_HALF'),\n (4, 'PREFERRED_VECTOR_WIDTH_INT'),\n (4, 'PREFERRED_VECTOR_WIDTH_LONG'),\n (4, 'PREFERRED_VECTOR_WIDTH_SHORT'),\n (4, 'PRINTF_BUFFER_SIZE'),\n (4, 'PROFILE'),\n (5, 'PROFILING_TIMER_OFFSET_AMD'),\n (3, 'PROFILING_TIMER_RESOLUTION'),\n (4, 'QUEUE_ON_DEVICE_MAX_SIZE'),\n (4, 'QUEUE_ON_DEVICE_PREFERRED_SIZE'),\n (4, 'QUEUE_ON_DEVICE_PROPERTIES'),\n (4, 'QUEUE_ON_HOST_PROPERTIES'),\n (4, 'QUEUE_PROPERTIES'),\n (4, 'REFERENCE_COUNT'),\n (5, 'REGISTERS_PER_BLOCK_NV'),\n (5, 'SIMD_INSTRUCTION_WIDTH_AMD'),\n (5, 'SIMD_PER_COMPUTE_UNIT_AMD'),\n (5, 'SIMD_WIDTH_AMD'),\n (5, 'SIMULTANEOUS_INTEROPS_INTEL'),\n (3, 'SINGLE_FP_CONFIG', fp_config_formatting),\n (5, 'SPIR_VERSIONS'),\n (5, 'SVM_CAPABILITIES'),\n (5, 'THREAD_TRACE_SUPPORTED_AMD'),\n (5, 'TOPOLOGY_AMD'),\n (1, 'TYPE', device_type),\n (1, 'VENDOR'),\n (5, 'VENDOR_ID'),\n (1, 'VERSION'),\n (5, 'WARP_SIZE_NV'),\n (5, 'WAVEFRONT_WIDTH_AMD')]\n\n # Options which should be displayed are selected by their assigned level.\n selector = lambda x: (x[0] <= detail_level)\n platform_options = list(filter(selector, platform_info_options))\n device_options = list(filter(selector, device_info_options))\n\n # Some formatting preperations\n template = '{1:<{0}} : {2}'\n global platform_maxwidth\n platform_maxwidth = max(map(len, (t[1] for t in platform_options)))\n global device_maxwidth\n device_maxwidth = max(map(len, (t[1] for t in device_options)))\n\n for platform_idx, platform in enumerate(ocl.get_platforms()):\n print(' Platform {}:'.format(platform_idx))\n for tup in platform_options:\n # Unpacking the option tuple. If there is no specified\n # formatting function at index 2, assume `str`.\n name = tup[1]\n formatting = str if len(tup) < 3 else tup[2]\n # Attempt to retrieve the information from the device,\n # and assume none is available if the retrieval fails.\n try:\n info = platform.get_info(getattr(ocl.platform_info, name))\n except:\n info = 'Parameter not available.'\n formatting = str\n print(template.format(platform_maxwidth, name, formatting(info)))\n for device_idx, device in enumerate(platform.get_devices()):\n print('\\t Device {}.{}:'.format(platform_idx, device_idx))\n for tup in device_options:\n # Unpacking the option tuple. If there is no specified\n # formatting function at index 2, assume `str`.\n name = tup[1]\n formatting = str if len(tup) < 3 else tup[2]\n # Attempt to retrieve the information from the device,\n # and assume none is available if the retrieval fails.\n try:\n info = device.get_info(getattr(ocl.device_info, name))\n except:\n info = 'Parameter not available.'\n formatting = str\n print('\\t'+template.format(device_maxwidth, name,\n formatting(info)))\n print()\n return",
"def show_device_information_only(self):\n\n # TODO: Optimize formatting later!\n if self._sas_addresses:\n if self._report_header:\n print(\"\\n\")\n print(\" Linux SCSI Firmware Drive Block Curr\")\n print(\" Device Device Vendor Product Revision Capacity Length Temp Serial Number SAS Address\")\n print(\"---------- ---------- -------- ---------------- -------- ------------ ------ ---- -------------- ------------------\")\n\n for device in self._devices:\n if not device['Device Type'].startswith(\"disk\"):\n continue\n print('{dsf:<10} {sdsf:<10} {vid:<8} {pid:<16} {fw:<8} {capacity:>12} {blocklen:>4} {temp:<4} {serial:<14} {sas:<18}'\n .format(dsf=device['Linux Device Name'],\n sdsf=device['SCSI Device Name'],\n vid=device['Vendor Identification'],\n pid=device['Product Identification'],\n fw=device['Firmware Version'],\n capacity=device['Drive Capacity'],\n blocklen=device['Block Length'],\n temp=device['Current Temperature'],\n serial=device['Serial Number'],\n sas=device['SAS Address']))\n else:\n if self._report_header:\n print(\"\\n\")\n print(\" Linux SCSI Firmware Drive Block Curr\")\n print(\" Device Device Vendor Product Revision Capacity Length Temp Serial Number\")\n print(\"---------- ---------- -------- ---------------- -------- ------------ ------ ---- -------------\")\n\n for device in self._devices:\n if not device['Device Type'].startswith(\"disk\"):\n continue\n print('{dsf:<10} {sdsf:<10} {vid:<8} {pid:<16} {fw:<8} {capacity:>12} {blocklen:>4} {temp:<4} {serial:<14}'\n .format(dsf=device['Linux Device Name'],\n sdsf=device['SCSI Device Name'],\n vid=device['Vendor Identification'],\n pid=device['Product Identification'],\n fw=device['Firmware Version'],\n capacity=device['Drive Capacity'],\n blocklen=device['Block Length'],\n temp=device['Current Temperature'],\n serial=device['Serial Number']))\n\n if self._report_header:\n print(\"\\n\")",
"def showStatistics(self):\n\n deviceName = self.deviceName()\n\n if deviceName:\n stats = a.sys.net.lnx.device.DeviceUtils.getStatistics(self.name, self._log, deviceName) \n if stats:\n for key in stats:\n print \"%s: %s\" % (key, stats[key])",
"def print_device_info(nodemap):\n print(\"\\n*** DEVICE INFORMATION ***\\n\")\n\n try:\n result = True\n node_device_information = PySpin.CCategoryPtr(nodemap.GetNode(\"DeviceInformation\"))\n\n if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):\n features = node_device_information.GetFeatures()\n for feature in features:\n node_feature = PySpin.CValuePtr(feature)\n print(\"%s: %s\" % (node_feature.GetName(),\n node_feature.ToString() if PySpin.IsReadable(node_feature) else \"Node not readable\"))\n\n else:\n print(\"Device control information not available.\")\n\n except PySpin.SpinnakerException as ex:\n print(\"Error: %s\" % ex)\n return False\n\n return result",
"def device_info(node):\n\n if \"cpu\" in node and \"total_mbufs\" in node[\"cpu\"]:\n total_mbufs = node[\"cpu\"][\"total_mbufs\"]\n if total_mbufs != 0:\n print(\"Total Number of Buffers: {}\".format(total_mbufs))\n\n vpp = VppPCIUtil(node)\n vpp.get_all_devices()\n linkup_devs = vpp.get_link_up_devices()\n if len(linkup_devs):\n print(\"\\nDevices with link up (can not be used with VPP):\")\n vpp.show_vpp_devices(linkup_devs, show_header=False)\n # for dev in linkup_devs:\n # print (\" \" + dev)\n kernel_devs = vpp.get_kernel_devices()\n if len(kernel_devs):\n print(\"\\nDevices bound to kernel drivers:\")\n vpp.show_vpp_devices(kernel_devs, show_header=False)\n else:\n print(\"\\nNo devices bound to kernel drivers\")\n\n dpdk_devs = vpp.get_dpdk_devices()\n if len(dpdk_devs):\n print(\"\\nDevices bound to DPDK drivers:\")\n vpp.show_vpp_devices(dpdk_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices bound to DPDK drivers\")\n\n other_devs = vpp.get_other_devices()\n if len(other_devs):\n print(\"\\nDevices not bound to Kernel or DPDK drivers:\")\n vpp.show_vpp_devices(other_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices not bound to Kernel or DPDK drivers\")\n\n vpputl = VPPUtil()\n interfaces = vpputl.get_hardware(node)\n if interfaces == {}:\n return\n\n print(\"\\nDevices in use by VPP:\")\n\n if len(interfaces.items()) < 2:\n print(\"None\")\n return\n\n print(\n \"{:30} {:4} {:4} {:7} {:4} {:7}\".format(\n \"Name\", \"Numa\", \"RXQs\", \"RXDescs\", \"TXQs\", \"TXDescs\"\n )\n )\n for intf in sorted(interfaces.items()):\n name = intf[0]\n value = intf[1]\n if name == \"local0\":\n continue\n numa = rx_qs = rx_ds = tx_qs = tx_ds = \"\"\n if \"numa\" in value:\n numa = int(value[\"numa\"])\n if \"rx queues\" in value:\n rx_qs = int(value[\"rx queues\"])\n if \"rx descs\" in value:\n rx_ds = int(value[\"rx descs\"])\n if \"tx queues\" in value:\n tx_qs = int(value[\"tx queues\"])\n if \"tx descs\" in value:\n tx_ds = int(value[\"tx descs\"])\n\n print(\n \"{:30} {:>4} {:>4} {:>7} {:>4} {:>7}\".format(\n name, numa, rx_qs, rx_ds, tx_qs, tx_ds\n )\n )",
"def print_details(self):\n print(\"[{}]\".format(self.name))\n print(\"ID: \" + str(self.id))\n print(\"name: %s\" % self.name)\n print(\"URL: %s\" % self.url)\n print(\"CPUs: \" + str(self.cpus) + \" cores\")\n print(\"Mem: \" + self.memory_str)\n print(\"Tasks: \" + str(self.tasks_len))\n print(\"Uptime %s\" + self.uptime)\n print(\"Uptime Descriptive %s\" + self.uptime_descriptive)\n print(\" \")",
"def show_info(self):\n txt = \"Brand: %s\\nModel: %s\\nHostname: %s\\n\"%(self.brand, self.model, self.hostname)\n return txt",
"def show_info(self):\n print 'Querying the station for the configuration...'\n config = self.station.getConfig()\n for key in sorted(config):\n print '%s: %s' % (key, config[key])",
"def device_info(self):\n info = {\n \"identifiers\": {\n (\n DOMAIN,\n \"serial-number\",\n self._ctrl.data[\"routerboard\"][\"serial-number\"],\n \"switch\",\n \"NAT\",\n )\n },\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} NAT\",\n }\n return info",
"def device_info(self):\n info = {\n \"connections\": {(CONNECTION_NETWORK_MAC, self._data[\"port-mac-address\"])},\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} {self._data['default-name']}\",\n }\n return info",
"def get_details(self):\n print(self.name)\n print(10 * \"-\" + \"\\n\")\n print(self.description)\n for direction in self.linked_rooms:\n room = self.linked_rooms[direction]\n print(\"The \" + room.get_name() + \" is \" + direction)\n print(\"\\n\")",
"def __repr__(self):\n\t\t\tnum = cuda.Device.count()\n\t\t\tstring = \"\"\n\t\t\tstring += (\"%d device(s) found:\\n\"%num)\n\t\t\tfor i in range(num):\n\t\t\t\tstring += ( \" %d) %s (Id: %d)\\n\"%((i+1),cuda.Device(i).name(),i))\n\t\t\t\tstring += (\" Memory: %.2f GB\\n\"%(cuda.Device(i).total_memory()/1e9))\n\t\t\treturn string",
"def device_info(self):\n return {\n \"name\": self._alias,\n \"model\": self._model,\n \"manufacturer\": \"TP-Link\",\n \"connections\": {(dr.CONNECTION_NETWORK_MAC, self._mac)},\n \"sw_version\": self._sysinfo[\"sw_ver\"],\n }",
"def show(self,mode=0,level=0,ident=''):\n if self.locked : l='L'\n else : l=' '\n tmp= '%sd%-3d %s %-6s %-30s Vendor: %-10s %-10s Size: %10s' % \\\n (ident,self.idx,l,self.name,self.guid.strip()[-29:],self.vendor,self.model,printsz(self.size))\n if level>0:\n tmp+='\\n'+ident+' Paths:'\n for p in self.paths.values() : tmp+='\\n'+p.show(mode,level-1,ident+' ')\n tmp+='\\n'+ident+' Partitions:'\n for p in self.partitions.values() : tmp+='\\n'+p.show(mode,level-1,ident+' ')\n tmp+='\\n'\n return tmp",
"def print_device_info(nodemap):\r\n\r\n #print('*** DEVICE INFORMATION ***\\n')\r\n\r\n try:\r\n result = True\r\n node_device_information = PySpin.CCategoryPtr(nodemap.GetNode('DeviceInformation'))\r\n\r\n if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):\r\n features = node_device_information.GetFeatures()\r\n #for feature in features:\r\n #node_feature = PySpin.CValuePtr(feature)\r\n #print('%s: %s' % (node_feature.GetName(),\r\n #node_feature.ToString() if PySpin.IsReadable(node_feature) else 'Node not readable'))\r\n\r\n else:\r\n print('Device control information not available.')\r\n\r\n except PySpin.SpinnakerException as ex:\r\n print('Error: %s' % ex)\r\n return False\r\n\r\n return result",
"def showStateOnOs(self):\n\n deviceName = self.deviceName()\n\n if deviceName:\n rc = a.sys.net.lnx.device.IpLink.showDevice(self._log, deviceName) \n if a.sys.net.lnx.common.Command.isReturnOk(rc):\n print rc[1] # stdout\n else:\n print rc[2] # stderr",
"def print_detailed_host(total_cores, used_cores, total_nodes, empty_nodes, desired_host, \n disabled_cores, disabled_nodes, node_list):\n \n print('\\nDetailed info pertaining to: ' + desired_host)\n print('Total Nodes: {0}'.format(str(len(node_list)))) \n print('Total Cores : {0}'.format(total_cores) + PRINT_INDENT + 'Used Cores: {0}'.format(used_cores)\n + PRINT_INDENT + 'Free Cores: {0}'.format(str(total_cores - used_cores - disabled_cores)) \n + PRINT_INDENT + 'Disabled Cores: {0}'.format(disabled_cores))\n print('\\nThe following is a list of each node within {0}:\\n'.format(desired_host))\n print('Node name'.ljust(int(TERMWIDTH/2)) + 'Used Cores/Total Cores')\n for node in node_list:\n cores = str(node.get_used()) + '/' + str(node.get_total())\n if node.get_disabled_switch():\n disabled = 'Unavailable'\n else:\n disabled = ''\n print((PRINT_INDENT + node.get_name()).ljust(int(TERMWIDTH/2)) + PRINT_INDENT + (str(cores).rjust(5,' ') \\\n + PRINT_INDENT + disabled))\n return",
"def device_info(self) -> Dict[str, Any]:\n agreement = self.toon.agreement\n model = agreement.display_hardware_version.rpartition('/')[0]\n sw_version = agreement.display_software_version.rpartition('/')[-1]\n return {\n 'identifiers': {\n (DOMAIN, agreement.id),\n },\n 'name': 'Toon Display',\n 'manufacturer': 'Eneco',\n 'model': model,\n 'sw_version': sw_version,\n }",
"def show_info(self, handle=sys.stdout):\n pt = PrettyTable(['EntryInfo', 'Value'])\n pt.align = 'r'\n pt.align['EntryInfo'] = 'l'\n pt.align['Value'] = 'l'\n pt.float_format = '8.5'\n\n # Gather all device information, do not show private\n # information that begins with an underscore\n show_info = self.post()\n public_keys = sorted([key for key in show_info.keys()\n if not key.startswith('_')])\n for key in public_keys:\n pt.add_row([key, show_info[key]])\n\n print(pt, file=handle)",
"def info_view():\n output = {\n 'info': 'GET /api/v1',\n 'devices': 'GET /api/v1/devices',\n 'get device': 'GET /api/v1/devices/<device>',\n 'edit device': 'PUT /api/v1/devices/<device>',\n 'color schemes': 'GET /api/v1/schemes',\n 'get color scheme': 'GET /api/v1/schemes/<scheme>',\n 'edit color scheme': 'PUT /api/v1/schemes/<scheme>',\n 'delete color scheme': 'DELETE /api/v1/schemes/<scheme>'\n }\n return jsonify(output)",
"def print_core_info(device):\n\n print(\"Found a {}!\".format(device.board_name()))\n print(\" Board ID: {}\".format(device.board_id()))\n print(\" Firmware version: {}\".format(device.firmware_version()))\n print(\" Part ID: {}\".format(device.part_id()))\n print(\" Serial number: {}\".format(device.serial_number()))\n\n # If this board has any version warnings to display, dipslay them.\n warnings = device.version_warnings()\n if warnings:\n wrapped_warnings = textwrap.wrap(warnings)\n wrapped_warnings = \"\\n\".join([\" {}\".format(line) for line in wrapped_warnings])\n print(\"\\n !!! WARNING !!!\\n{}\\n\".format(wrapped_warnings))",
"def info(self):\n import string\n results = self.info_list()\n labels = \"%-8s %-9s %-4s %-8s %-8s %-4s\" % \\\n ('MACHINE','CPU','GHZ','MB TOTAL',\n 'MB FREE','LOAD')\n print labels\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%-8s %2dx%-6s %4.1f %8.1f %8.1f %4.2f\" % \\\n (name[-8:], res['cpu_count'],res['cpu_type'][-6:], \\\n res['cpu_speed'],res['mem_total'],res['mem_free'],\\\n res['load_1'])\n print s",
"def print_info(self):\n\n n_metabolites = len(self.metabolites)\n n_reactions = len(self.reactions)\n n_constraints = len(self.constraints)\n n_variables = len(self.variables)\n\n info = pd.DataFrame(columns=['value'])\n info.loc['name'] = self.name\n info.loc['description'] = self.description\n info.loc['num constraints'] = n_constraints\n info.loc['num variables'] = n_variables\n info.loc['num metabolites'] = n_metabolites\n info.loc['num reactions'] = n_reactions\n info.index.name = 'key'\n\n print(info)",
"def get_devices_summary():\n\n # This function was created to replace get_devices_information\n # because it wasn't detecting virtual systems in Palo Alto Virtual Systems\n global nipper_xml\n devices = {}\n headings = []\n\n # Add the table headings to a list\n for h in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/headings/heading\"):\n if h not in headings:\n headings.append(h.text)\n\n for device in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/tablebody/tablerow\"):\n values = []\n for i in device.findall('./tablecell/item'):\n if i not in values:\n values.append(i.text)\n if DEBUG:\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Name')], values[headings.index('Name')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Device')], values[headings.index('Device')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[0])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[1])\n devices[values[headings.index('Name')]] = {'name': values[headings.index('Name')],\n 'type': values[headings.index('Device')],\n 'os': values[headings.index('OS')].split(' ')[0],\n 'osversion': values[headings.index('OS')].split(' ')[1]\n }\n\n if DEBUG:\n print info + \"Device Object:\"\n print devices\n raw_input(warn + \"Press enter to continue\")\n return devices",
"def show_dev_info(dev):\n if dev is None:\n raise ValueError('- Invalid device')\n show_dev_descriptor(dev)\n return 0"
] |
[
"0.7024573",
"0.6975774",
"0.6967318",
"0.6879825",
"0.6828446",
"0.656477",
"0.6419597",
"0.63903284",
"0.63215077",
"0.6316704",
"0.631509",
"0.6278415",
"0.62708306",
"0.62081003",
"0.6189719",
"0.61110306",
"0.6107462",
"0.60931426",
"0.6081408",
"0.60599726",
"0.6034592",
"0.59920603",
"0.5988746",
"0.5987517",
"0.5956012",
"0.5954549",
"0.59496963",
"0.59128463",
"0.5911005",
"0.5910765"
] |
0.75043815
|
0
|
Enter to device mode
|
def do_device(self, args):
self.device_command.cmdloop("Enter to device mode")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _turn_on_dev_mode(self):\n if self._device is not None:\n self._char_write(self._BLE_SERVICE_ANTI_DOS,\n [ord(c) for c in self._ANTI_DOS_MESSAGE])\n self._char_write(self._BLE_SERVICE_TX_POWER,\n [self._TX_POWER_VALUE])\n # Sending 0x01 to the wake service wakes the sphero.\n self._char_write(self._BLE_SERVICE_WAKE, [0x01])",
"def mode(self, channel, target, command=\"\"):\n time.sleep(1)\n self.s.send(\"MODE %s %s%s\\n\" % (channel, target, (command and (\" \" + command))))\n logger.log(\"MODE %s %s%s\" % (channel, target, (command and (\" \" + command)))).LogSend()",
"def turnOnSdkMode(self):\n \n command = b\"\\x90\\x01\\x01\"\n #print(\"turnOnSdkMode run, command: \")\n #print(command)\n \n self.sendCommand(command)",
"def enable(self):\n # Netmiko reports enable and config mode as being enabled\n if not self.native.check_enable_mode():\n self.native.enable()\n # Ensure device is not in config mode\n if self.native.check_config_mode():\n self.native.exit_config_mode()\n\n log.debug(\"Host %s: Device enabled.\", self.host)",
"def turn_on(self, **kwargs):\n self._state = True\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device,'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":1 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":1 }', 5)",
"def on(config: dict):\n switch_device(config, config[\"inching\"], \"on\")",
"def _select_mode(self):\n self.__check_mode()\n if self.mode[\"auto_mode\"]:\n self.mode_auto()\n elif self.mode[\"auto_mode\"] is None: # Do Nothing\n self.mode_standby()\n else:\n self.mode_manual()",
"def force_switch_on(self):\n self.turn_on_modem()",
"def change_device(self):\n if self.state.ser:\n UsbHost.close_port(self.state.ser)\n device = self.CBDevices.currentText()\n if device:\n comport = self.devices[int(device)]\n self.state.ser = UsbHost.open_port(comport)\n if not self.state.ser:\n self.statusbar.showMessage(\"Выбранный порт более недоступен. Произведите повторный поиск\")\n return\n answer: str = self.UsbHost.send_command(self.state.ser, \"ping\", device)\n if answer in wrong_answers:\n error_message(\"Выбранный девайс не отвечает\")\n self.statusbar.showMessage(\"Выбранный порт более недоступен. Произведите повторный поиск\")\n return\n self.state.device_id = int(device)\n self.state.comport = comport\n self.create_message()\n self.set_controls_state(True)\n self.BtnL1.click()\n self.BtnAttenuate.click()\n self.SpinDACValue.setValue(35000)\n self.BtnSetDACValue.click()\n self.set_sw(\"0 1\")",
"def startMode(self):\n raise NotImplementedError('startMode() should be implemented')",
"def _open_device(self):\r\n self._lib = windll.LoadLibrary(\"lib\\\\ps2000a.dll\")\r\n c_handle = c_int16()\r\n with self._driver_lock:\r\n m = self._lib.ps2000aOpenUnit(byref(c_handle),None)\r\n if m == 286:\r\n m = self._lib.ps2000aChangePowerSource(c_handle,\r\n c_int32(m))\r\n check_result(m)\r\n self._handle = c_handle\r\n\r\n return True",
"def open_device_dialog(self):\n res, device = DeviceDialog.get_device(self.indexer)\n if res and device:\n self.serial = device.serial\n if self.serial:\n caps_str = None\n self.open_device(self.serial, caps_str)",
"def mode(self, target, *data):\n self.send_line('MODE %s %s' % (target, ' '.join(data)), nowait=True)",
"def onClick(self):\n self.app.setActiveMode(\"start\")",
"def mark_playfield_active_from_device_action(self):\n self._playfield_switch_hit()",
"def enter_immediately_gesture_engine(self):\n self.write_flag_data([True], APDS_9960.GESTURE_CONFIG_4_REG_ADDRESS, 0)",
"def OnModeOn(self, event):\n\n\t\t#~ raw_code = event.GetRawKeyCode()\n\t\tmodifiers = event.GetModifiers()\n\n\t\t#~ if raw_code == 75 and modifiers==3:\n\t\t\t#~ self.Close()\n\t\tif modifiers==2:\n\t\t\tself.mode = \"ctrl\"\n\t\tprint \"ctrl down....\\n\"",
"def set_drive_mode(mode):",
"def startapp(self, command):\n e = self.emu\n e.alt(\"F2\")\n e.shortwait()\n e.clickat(self.screen.center)\n e.shortwait()\n e.type(command + \"\\n\")\n e.longwait()",
"def mode_remote(self):\n self.send(\"!MR\")\n # time.sleep(2.0)\n # No feedback, so query to verify set\n got = self.get_mode()\n assert got == \"R\", got",
"def change_mode(self):\n master.destroy()\n os.system(\"add_mode_run.py\")",
"def turn_on_modem(self):\n if not self.is_power_on():\n self._logger.debug(\"Switching modem on...\")\n self.set_pin()\n # give modem some time to login\n time.sleep(10)\n else:\n self._logger.debug(\"Modem is already powered on...\")",
"def mannequin_mode(self, event=None):\n if not self._mannequin_mode:\n self.set_action_status_message('mannequin_mode', 'requested')\n subprocess.Popen(['rosrun', 'pr2_controller_manager', \n 'pr2_controller_manager', 'stop', 'GPSPR2Plugin'], stdout=DEVNULL)\n self._mm_process = subprocess.Popen(['roslaunch',\n 'pr2_mannequin_mode', 'pr2_mannequin_mode.launch'], stdout=DEVNULL)\n self._mannequin_mode = True\n self.set_action_status_message('mannequin_mode', 'completed',\n message='mannequin mode toggled on')\n else:\n self.set_action_status_message('mannequin_mode', 'requested')\n self._mm_process.send_signal(signal.SIGINT)\n subprocess.Popen(['rosrun', 'pr2_controller_manager',\n 'pr2_controller_manager', 'start', 'GPSPR2Plugin'], stdout=DEVNULL)\n self._mannequin_mode = False\n self.set_action_status_message('mannequin_mode', 'completed',\n message='mannequin mode toggled off')",
"def set_monitor_mode(controller_name):\n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"down\"])\n subprocess.run([\"airmon-ng\", \"check\", \"kill\"])\n subprocess.run([\"iw\", wifi_name, \"set\", \"monitor\", \"none\"])\n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"up\"])",
"def ControlDevice(self, p_device_obj, p_bridge_obj, p_control):\n pass",
"def set_mode(self, mode, port):\n port = int(port)\n self._validate_port(\"set_mode\", port)\n self._validate_mode(mode)\n logger.debug(\"{} setting power mode to {} for usb port {}\".format(\n self._device_name, mode, port))\n self._shell_fn(self._command_dict[\"SET_MODE\"].format(mode, port))",
"def startMode(self):\n return True, None",
"def enter_data_mode(self):\n if self._sermode == SerialModem.Mode.DATA:\n return True\n \n response = self.run_at_command(\"ATO\\r\")\n \n if response is not None:\n if response[0:2] == \"OK\":\n self._sermode = SerialModem.Mode.DATA;\n return True;\n \n return False;",
"def switch_state():\n\tDmg.OpenWindow()",
"def enter_sleep_mode(self):\n self.execute(SdpI2cCmdEnterSleepMode())"
] |
[
"0.6871464",
"0.6374481",
"0.6312063",
"0.6263293",
"0.62074643",
"0.6196528",
"0.61516935",
"0.60767496",
"0.6055326",
"0.6039394",
"0.6032635",
"0.60067725",
"0.59976095",
"0.599694",
"0.58823013",
"0.5876625",
"0.5849049",
"0.58332497",
"0.5821363",
"0.57796013",
"0.5777636",
"0.57234716",
"0.5700517",
"0.56995344",
"0.56978273",
"0.5696763",
"0.5686542",
"0.56710285",
"0.5669753",
"0.56651884"
] |
0.81906086
|
0
|
Enter to config mode
|
def do_config(self, args):
self.config_command.cmdloop("Enter to config mode")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def GoToConfig(self):\n self.write_ack(MID.GoToConfig)",
"def state_choose_enter(cfg, app, win):",
"def state_chosen_enter(cfg, app, win):",
"def state_print_enter(cfg, app, win):",
"def state_processing_enter(cfg, app, win):",
"def config_mode(self, config_command=\"config\", pattern=\">config\"):\n return super().config_mode(config_command=config_command, pattern=pattern)",
"def state_preview_enter(cfg, app, win):",
"def config_mode(self, config_command='sudo su'):\n return self.enable(cmd=config_command)",
"def state_choose_do(cfg, app, win, events):",
"def state_chosen_do(cfg, app, win, events):",
"def state_capture_enter(cfg, app, win):",
"def on_pre_enter(self):\n Logger.info('Application: Changed to the Settings screen.')",
"def activate(obj, config):\n # activates pf9-express config file\n click.echo(\"Activating config %s\" % config)\n dir_path = obj['pf9_exp_conf_dir']\n\n if os.path.exists(dir_path + 'express.conf'):\n with open(dir_path + 'express.conf', 'r') as current:\n lines = current.readlines()\n current.close()\n for line in lines:\n if 'config_name|' in line:\n line = line.strip()\n name = line.replace('config_name|','')\n\n filename = name + '.conf'\n shutil.move(dir_path + 'express.conf', dir_path + filename)\n\n files = [f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))]\n\n for f in files:\n if f == (config + '.conf'):\n shutil.move(dir_path + f, dir_path + 'express.conf')\n\n click.echo('Config %s is now active' % config)",
"def state_finish_enter(cfg, app, win):",
"def exit_config_mode(self):\n return \"\"",
"def state_wait_enter(cfg, app, win):",
"def config():",
"def config():",
"def open_configuration(self,event):\n configDevFrame = Single_deviceconf(parent=self, ID=996)\n configDevFrame.Centre()\n configDevFrame.Show()\n configDevFrame.ShowModal()\n configDevFrame.Destroy()",
"def launch_configtool():\r\n from PyQt4 import QtGui\r\n from freeseer.frontend.configtool.configtool import ConfigToolApp\r\n\r\n profile = settings.profile_manager.get()\r\n config = profile.get_config('freeseer.conf', settings.FreeseerConfig,\r\n storage_args=['Global'], read_only=False)\r\n\r\n app = QtGui.QApplication(sys.argv)\r\n main = ConfigToolApp(profile, config)\r\n main.show()\r\n sys.exit(app.exec_())",
"def is_config_mode(self):\n\n return self._connection.get_prompt().strip().startswith('(')",
"def on_pickConfigButton(self, cfg_t, button):\n\n ctxt = \"\"\n cfg_iter = self.cbox[cfg_t].get_active_iter()\n\n # cfg_iter points to user choice? ...\n if cfg_iter != None:\n model = self.cbox[cfg_t].get_model()\n cfgname = model[cfg_iter][0]\n\n # ... or to an Entry?\n else:\n cfgname = self.cbox[cfg_t].get_child().get_text()\n\n if cfgname == \"\":\n ctxt += \"C'mon now! Ya gotta type in a name.\\n\"\n self.consoleBuffer.insert_at_cursor(ctxt)\n return\n\n # are we making a new config?\n if cfgname not in self.cfgmgr.get_cfg_list(cfg_t):\n ctxt += \"Making new {0} '{1}' ...\\n\".format(cfg_t, cfgname)\n\n if not self.cfgmgr.make_config( cfg_t, cfgname ):\n ctxt += self.cfgmgr.errstr()\n self.consoleBuffer.insert_at_cursor(ctxt)\n return\n\n self.sm[cfg_t].set_configf_by_name(cfgname)\n self._load_combobox(cfg_t=cfg_t)\n\n ctxt += \"Activating {0} '{1}' ... \".format(cfg_t, cfgname)\n\n # update ConfigManager and cfg state\n if self.cfgmgr.set_current_cfg_by_name(cfg_t, cfgname):\n ctxt += \"{0} '{1}' activated.\\n\".format(cfg_t, cfgname)\n ctxt += \"==> INFO: {}\\n\".format( self.sm[cfg_t].get('info') )\n else:\n ctxt += \"\\n==> ERROR: could not activate {0} '{1}'\\n\".format(cfg_t, cfgname)\n\n self.consoleBuffer.insert_at_cursor(ctxt)\n\n self._refresh_ui( block='profile', act_l=[cfg_t,] )",
"def printConfig():\n # Why not log instead? Are we asking user to confirm settings?\n pass # until implemented",
"def onConfigureMessage(self, config):\n self.setState(\"starting\")",
"def config(interactive=False):\n cfg = ConfigManager()\n if interactive:\n cfg.setup_config_interactive()\n print(cfg)",
"def config_select(self, first_run=False):\n self.clear_strip()\n strings = ['IMAGE', 'TIME', 'LOOP', 'BRIGHTNESS']\n funcs = [self.config_image, self.config_time, self.config_loop,\n self.config_brightness]\n group = self.make_ui_group(True, strings[self.config_mode])\n board.DISPLAY.brightness = 1 # Screen on\n prev_mode = self.config_mode\n reload_image = first_run\n\n while True:\n action_left, action_right = (self.button_left.action(),\n self.button_right.action())\n if action_left is RichButton.HOLD:\n # Call one of the configuration sub-menu functions.\n # These all return two booleans. One indicates whether\n # the setting change requires reloading the image,\n # other indicates if it was a R button hold, in which\n # case this should return to paint mode.\n reload, paint = funcs[self.config_mode]()\n # Image reload is not immediate, it can wait until\n # returning to paint.\n reload_image |= reload\n if paint:\n break # Exit loop, resume paint\n else:\n board.DISPLAY.show(group) # Put config UI back up\n elif action_right is RichButton.HOLD:\n break\n elif action_left is RichButton.TAP:\n self.config_mode = (self.config_mode - 1) % len(strings)\n elif action_right is RichButton.TAP:\n self.config_mode = (self.config_mode + 1) % len(strings)\n\n if self.config_mode is not prev_mode:\n # Create/destroy mode descriptions as needed\n group.pop()\n group.append(centered_label(strings[self.config_mode],\n 40, 3))\n prev_mode = self.config_mode\n\n # Before exiting to paint mode, check if new image needs loaded\n if reload_image:\n self.load_image()",
"def configure_switch(self, config):\n raise NotImplementedError",
"def start():\n import OnlineEnv as Online\n Online.end_config(False)\n #Online.end_config(True)",
"def state_failsafe_enter(cfg, app, win):",
"def confirm(self):\n\t\t# TODO: write the current control scheme to config.ini\n\t\tdefault_controls = self.default_controls()\n\t\tconfig = ConfigParser.RawConfigParser()\n\t\tconfig.add_section('controls')\n\t\tconfig.add_section('default_controls')\n\t\tfor i in xrange(len(CONTROLS_OPTIONS) - 2): \n\t\t\tconfig.set('controls', CONTROLS_OPTIONS[i], self.control_map[i])\n\t\t\tconfig.set('default_controls', CONTROLS_OPTIONS[i], default_controls[i] )\n\t\twith open('config.ini', 'wb') as configfile: config.write(configfile)\n\t\tself.player.current_level.screen_manager.switch_to_options_screen(self.player)"
] |
[
"0.72448474",
"0.7243817",
"0.7200898",
"0.6668514",
"0.6579885",
"0.6569821",
"0.6567963",
"0.6484108",
"0.64485",
"0.64465827",
"0.6421999",
"0.6413546",
"0.6402942",
"0.63392574",
"0.6282004",
"0.62720376",
"0.62643874",
"0.62643874",
"0.6227191",
"0.6213349",
"0.6186388",
"0.6150721",
"0.61367923",
"0.61236817",
"0.6118406",
"0.60920113",
"0.60881996",
"0.6063285",
"0.60378164",
"0.5997182"
] |
0.81245357
|
0
|
Builds the enemy board. Arguments boardDimensions dimensions of the (square) enemy board. shipsAfloat the size and counts of the initial enemy fleet.
|
def __init__(self, boardDimensions, shipsAfloat):
self.enemyBoard = [[BoardState.OPEN for j in range(boardDimensions)] for i in range(boardDimensions)]
self.boardDimensions = boardDimensions
self.shipsAfloat = shipsAfloat
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _build_board(y_size, x_size, game_board):\n\n for y_coordinate in range(1, y_size + 1):\n for x_coordinate in range(1, x_size + 1):\n game_board[(x_coordinate, y_coordinate)] = {0: {}, 1: {}, 2: {}}",
"def __init__(self, board_size=MAX_BOARD_SIZE, cell_size=MAX_CELL_SIZE, dead_color=DEAD, alive_color=ALIVE):\n self._board_size = board_size\n self._cell_size = cell_size\n self.dead_color = dead_color\n self.alive_color = alive_color\n\n self.board = []\n self.mode = 0",
"def create_fleet(si_settings, screen, ship, aliens, images):\r\n # Create an alien and find the number of aliens in a row.\r\n # Spacing between each alien is equal to one alien width.\r\n alien = Alien(si_settings, screen, images[0], si_settings.alien_points[0])\r\n print(str(alien))\r\n number_aliens_x = get_number_aliens_x(si_settings, alien.rect.width)\r\n print(\"num aliens: \" + str(number_aliens_x))\r\n number_rows = get_number_rows(si_settings, ship.rect.height, alien.rect.height)\r\n print(\"num rows: \" + str(number_rows))\r\n\r\n # Create the fleet of aliens.\r\n for row_number in range(number_rows):\r\n for alien_number in range(number_aliens_x):\r\n create_alien(si_settings, screen, aliens, alien_number, row_number, images[0], si_settings.alien_points[0])\r\n\r\n alien = Alien(si_settings, screen, images[1], si_settings.alien_points[1])\r\n print(str(alien))\r\n number_aliens_x = get_number_aliens_x(si_settings, alien.rect.width)\r\n print(\"num aliens: \" + str(number_aliens_x))\r\n number_rows = get_number_rows(si_settings, ship.rect.height, alien.rect.height)\r\n print(\"num rows: \" + str(number_rows))\r\n\r\n # Create the fleet of aliens.\r\n for row_number in range(number_rows):\r\n for alien_number in range(number_aliens_x):\r\n create_alien(si_settings, screen, aliens, alien_number, row_number + 2, images[1], si_settings.alien_points[1])\r\n\r\n alien = Alien(si_settings, screen, images[2], si_settings.alien_points[2])\r\n print(str(alien))\r\n number_aliens_x = get_number_aliens_x(si_settings, alien.rect.width)\r\n print(\"num aliens: \" + str(number_aliens_x))\r\n number_rows = get_number_rows(si_settings, ship.rect.height, alien.rect.height)\r\n print(\"num rows: \" + str(number_rows))\r\n\r\n # Create the fleet of aliens.\r\n for row_number in range(number_rows):\r\n for alien_number in range(number_aliens_x):\r\n create_alien(si_settings, screen, aliens, alien_number, row_number + 4, images[2], si_settings.alien_points[2])",
"def _create_fleet(self):\n # Make an alien and find the amount of aliens in a row\n alien = Alien(self)\n alien_width, alien_height = alien.rect.size\n available_space_x = self.settings.screen_width-(2 * alien_width)\n number_aliens_x = available_space_x // (2* alien_width)\n\n # Determine the amount of alien rows\n ship_height = self.ship.rect.height\n available_space_y = (self.settings.screen_height - (3 * alien_height) - ship_height)\n number_rows = available_space_y // (2 * alien_height)\n\n # Create full fleet\n for row_number in range (number_rows):\n for alien_number in range(number_aliens_x):\n self._create_alien(alien_number, row_number)",
"def advance_board(self):\n board = self.board\n rules = self.energy_rules\n h, w = board.shape\n beta = 1.0 / max(1e-20, self.temperature)\n if len(rules[0]) - 1 == 4:\n neighborhood = np.array([[0,1,0],[1,0,1],[0,1,0]])\n elif len(rules[0]) - 1 == 6:\n neighborhood = np.array([[0,1,1],[1,0,1],[1,1,0]])\n elif len(rules[0]) - 1 == 8:\n neighborhood = np.array([[1,1,1],[1,0,1],[1,1,1]])\n else:\n raise RuntimeError(\"async rules must have length 5, 7, or 9\")\n rng = get_rng()\n for _ in range(int(board.size * self.cells_per_update)):\n x = rng.choice(w)\n y = rng.choice(h)\n if board[y, x] & CellTypes.frozen:\n continue\n neighbors = board.view(wrapping_array)[y-1:y+2, x-1:x+2] * neighborhood\n alive_neighbors = np.sum(neighbors & CellTypes.alive > 0)\n spawn_neighbors = np.sum(neighbors & CellTypes.spawning > 0)\n frozen = np.sum(neighbors & CellTypes.freezing) > 0\n if frozen:\n continue\n if board[y, x] & CellTypes.alive:\n H = rules[0][alive_neighbors]\n else:\n H = rules[1][alive_neighbors]\n\n P = 0.5 + 0.5*np.tanh(H * beta)\n P = 1 - (1-P)*(1-self.spawn_prob)**spawn_neighbors\n board[y, x] = CellTypes.life if coinflip(P) else CellTypes.empty",
"def _create_fleet(self):\n alien = Alien(self)\n alien_width, alien_height = alien.rect.size\n available_space_y = self.settings.screen_height - (2 * alien_height)\n number_aliens_y = available_space_y // (2 * alien_height)\n sideways_ship_width = self.sideways_ship.rect.width\n available_space_x = (\n self.settings.screen_width - (3 * alien_width) -\n sideways_ship_width\n )\n number_columns = available_space_x // (2 * alien_width) + 1\n for column_number in (range(1, number_columns)):\n for alien_number in range(number_aliens_y):\n self._create_alien(alien_number, column_number)",
"def _create_fleet(self):\n alien = Alien(self)\n alien_width, alien_height = alien.rect.size\n\n # leaving a width of an UFO as space on left/right side of screen\n available_space_x = self.settings.screen_width - (2 * alien_width)\n\n # calculating the spacing between UFOs\n number_aliens_x = available_space_x // (2 * alien_width)\n\n # calculating the vertical space by taking screen height and subtracting\n # UFO row height from top + our own ships height from bottom\n # then further subtract 2x UFO height\n ship_height = self.ship.rect.height\n available_space_y = self.settings.screen_height - (2 * alien_height) - ship_height\n\n # we also want some space under the last row of aliens\n # so to calculate the number of rows we divide the screen space left by 2 * UFO height\n number_rows = available_space_y // (2 * alien_height)\n\n # creating a fleet of UFOs\n for row_number in range(number_rows):\n for alien_number in range(number_aliens_x):\n self._create_alien(alien_number, row_number)",
"def _create_fleet(self):\n alien = Alien(self) # used for calculations, NOT part of fleet\n # <Alien sprite(in 0 groups)>\n\n # Get dimensions for ship & alien\n ship_height = self.ship.rect.height\n alien_width, alien_height = alien.rect.size # (60, 58)\n \n # find available space for aliens to fit on screen\n available_space_y = (self.settings.screen_height - (3 * alien_height) - ship_height)\n available_space_x = self.settings.screen_width - ( 2 * alien_width )\n # available_space_y = (800) - (3 * 58) - 48 = 578\n # available_space_x = 1200 - (2 * 60) = 1080\n\n # determine total number of aliens per row & total number of rows \n number_aliens_x = available_space_x // ( 2 * alien_width )\n number_rows = available_space_y // ( 2 * alien_height )\n # number_aliens_x = 1080 // (2 * 60) = 9\n # number_rows = 578 // (2 * 58) = 4\n\n # Create rows of aliens\n for row_number in range(number_rows):\n for alien_number in range(number_aliens_x):\n # Fill row with aliens\n self._create_alien(alien_number, row_number )\n\n # rect = <rect(x, y, width, height)> <rect(180, 58, 60, 58)>",
"def create_fleet(ai_settings, screen, ship, aliens):\n\t#Create an alien and find the number of aliens in a row\n\t#Spacing between each alien is equal to one alien width\n\talien = Alien(ai_settings, screen)\n\tnumber_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)\n\tnumber_rows = get_number_rows(ai_settings, ship.rect.height, alien.rect.height)\n\t\n\t#Create the fleet of aliens\n\tfor row_number in range(number_rows):\n\t\tfor alien_number in range(number_aliens_x):\n\t\t\tcreate_alien(ai_settings, screen, aliens, alien_number, row_number)",
"def create_fleet(si_settings,screen,ship,aliens):\n #creates an alien and finds the number in a row\n #each alien has has space equal to one alien next to it\n alien = Alien(si_settings,screen)\n number_aliens_x = get_number_aliens_x(si_settings,alien.rect.width)\n number_rows = get_number_rows(si_settings,ship.rect.height,alien.rect.height)\n\n\n #Creates fleet of aliens\n for row_number in range(number_rows):\n for alien_number in range(number_aliens_x):\n create_alien(si_settings,screen,aliens,alien_number,row_number)",
"def create_fleet(ai_settings, screen, ship, aliens):\n # Create an alien and find the nuber of aliens in a row.\n # Spacing between each alien is equal to one alien width.\n alien = Alien(ai_settings, screen)\n number_aliens_x = get_number_alliens_x(ai_settings, alien.rect.width)\n number_rows = get_number_rows(ai_settings, ship.rect.height, alien.rect.height)\n\n # Create fleet of aliens.\n for row_number in range(number_rows):\n # Create row of aliens.\n for alien_number in range(number_aliens_x):\n # Create an alien and place it in the row.\n create_alien(ai_settings, screen, aliens, alien_number, row_number)",
"def __init__(self, frame, players, shipList, boardsize, opponent, number):\n\n self.myframe = frame\n self.players = players\n self.shipList = shipList\n self.boardsize = boardsize\n self.isComputer = opponent # 0 human, 1 computer\n self.playerNumber = number\n self.shipID = 101 # Every ship is identified by an id\n self.exitstatus = 0\n\n # List representing every attacked location on the board\n # 0 = not attacked yet, 1 = attacked\n self.hit = [0 for i in range(pow(self.boardsize, 2))]\n\n # Keeps track of ships alongwith their original sizes\n # As ships are hit, their size in tracker will be decremented\n # Size of zero represents sunk ship\n # Example: {101:5, 102:0, 103:7, 104:1, 105:0}\n # So, ships with id \"102\" and \"105\" have been sunk\n self.tracker = {}\n\n # Create canvas\n self.canvas = Canvas(\\\n self.myframe, background='white', highlightthickness=0,\\\n width=pow(self.boardsize, 2) + 140,\\\n height=pow(self.boardsize, 2) + 160)\n self.canvas.pack(fill=BOTH, expand=TRUE)\n\n # Draw board on canvas\n self.squares = []\n for y in xrange(self.boardsize):\n for x in xrange(self.boardsize):\n self.squares.append(self.canvas.create_rectangle(\\\n x * 20 + 20, y * 20 + 40, x * 20 + 40, y * 20 + 60, \\\n fill='#0055ff', width=2))\n self.canvas.addtag_withtag('square', self.squares[-1])\n\n self.placeShips()",
"def __init__(self, board = np.random.randint(2, size=(5, 5), dtype = np.uint8)): \n # Check for valid filetype for board\n if not isinstance(board, np.ndarray):\n raise NotImplementedError(\"Board must be an numpy.array.\")\n # Check for valid board size.\n if any(x < 2 for x in board.shape):\n raise NotImplementedError(\"Board state invalid! Must be at least 2x2.\")\n # Check for valid board cell entries.\n if not np.all(np.isin(board, [0,1])):\n raise NotImplementedError(\"Board state invalid! Must be filled with 0s and 1s.\")\n\n # Define lookup table for cell evaluation\n self.lookup = np.asarray([[0,0,0,1,0,0,0,0,0],[0,0,1,1,0,0,0,0,0]])\n\n # Define expanded board for easier border cell calculations\n expanded_shape = tuple(d+2 for d in board.shape)\n board_slice = (slice(1, -1),) * 2\n self.expanded_board = np.zeros(expanded_shape,dtype = np.uint8)\n self.expanded_board[board_slice] = board\n self.board = self.expanded_board[board_slice]",
"def __init__(self, board=None, workers=None):\n if board:\n self._board = []\n for row in range(self.BOARD_SIZE):\n self._board.append([])\n for col in range(self.BOARD_SIZE):\n try:\n height = board[row][col]\n except IndexError:\n height = 0\n self._board[row].append(Building(height))\n else:\n self._board = [[Building() for col in range(self.BOARD_SIZE)]\n for row in range(self.BOARD_SIZE)]\n\n if workers:\n self._workers = workers\n else:\n self._workers = {}",
"def __init__(self, boardDimensions, shipsAfloat): \r\n ShotSelector.__init__(self, boardDimensions, shipsAfloat)\r\n self.remainingCoordinates = [Coordinates(i, j) for i in range(self.boardDimensions) for j in range(self.boardDimensions)]\r\n random.shuffle(self.remainingCoordinates)",
"def make_board(self):\n http = urllib3.PoolManager()\n r = http.request('GET', 'http://www.cse.msu.edu/~ruppmatt/itm891/tiles.pickle')\n tiles = pickle.loads(r.data)\n self.assets = tiles\n self.gameboard = Image.new('RGBA', (64*(self.world_width+2), 64*(self.world_height+2)))\n # Laydown land\n for c in range(0,self.world_width):\n for r in range(0, self.world_height):\n x = (c+1)*64\n y = (r+1)*64\n tile_ndx = np.random.choice(len(tiles['land']))\n self.gameboard.paste(tiles['land'][tile_ndx], (x,y)) \n # Laydown water\n for c in range(0,self.world_width):\n x = (c+1)*64\n yy = (self.world_height+1)*64\n self.gameboard.paste(tiles['water']['edge_north'], (x,0))\n self.gameboard.paste(tiles['water']['edge_south'], (x, yy))\n for r in range(0,self.world_height):\n y = (r+1)*64\n xx = (self.world_width+1)*64\n self.gameboard.paste(tiles['water']['edge_west'], (0,y))\n self.gameboard.paste(tiles['water']['edge_east'], (xx,y))\n self.gameboard.paste(tiles['water']['corner_nw'], (0,0))\n self.gameboard.paste(tiles['water']['corner_sw'], (0,(self.world_height+1)*64))\n self.gameboard.paste(tiles['water']['corner_ne'], ((self.world_width+1)*64,0))\n self.gameboard.paste(tiles['water']['corner_se'], ((self.world_width+1)*64,(self.world_height+1)*64))\n \n # Some land lines\n draw = ImageDraw.Draw(self.gameboard)\n for c in range(0,self.world_width-1):\n y_1 = 64\n y_2 = 64*(self.world_height+1)\n x = (2+c)*64\n draw.line([(x,y_1),(x,y_2)], fill='white', width=1)\n for r in range(0,self.world_height-1):\n y = (2+r)*64\n x_1= 64\n x_2 = 64 * (self.world_width+1)\n draw.line([(x_1,y),(x_2,y)], fill='white', width=1)\n return",
"def __init__(self, board_size, ships):\r\n self.__board_size = board_size\r\n self.__ships = ships\r\n self.__bombs_dict = dict()",
"def __init__(self, size, given_cells):\n self.ROWS = string.ascii_uppercase[:size ** 2]\n self.COLS = [str(i) for i in range(1, size ** 2)]\n self.size = size\n self.given_cells = given_cells\n self.board = self.create_board()\n self.squares = [utility.cross(i, j) for i in [self.ROWS[i:i + size] for i in range(0, len(self.ROWS), size)]\n for j in [self.COLS[i:i + size] for i in range(0, len(self.COLS), size)]]\n self.attach_neighbors()\n self.update_neighbor_values_by_given()\n print(\"Initial board:\")\n GUI.print_sudoku(self.board, self.size)",
"def __init__(self, boardDimensions, shipsAfloat):\r\n ShotSelector.__init__(self, boardDimensions, shipsAfloat)\r\n self.shipsToSink = []",
"def __init__(self, board_size=BOARD_SIZE, num_mines=NUM_MINES):\n\n self.board_size = board_size\n self.num_mines = num_mines\n self.board = place_mines(board_size, num_mines)\n self.my_board = np.ones((board_size, board_size), dtype=int) * CLOSED\n self.valid_actions = np.ones((self.board_size, self.board_size), dtype=np.bool)\n\n self.observation_space = spaces.Box(low=-2, high=9,\n shape=(self.board_size, self.board_size), dtype=np.int)\n self.action_space = spaces.MultiDiscrete([self.board_size, self.board_size])",
"def create_board(self, size):\n x = np.arange(0, size[0])\n y = np.arange(0, size[1])\n board = np.meshgrid(x, y)\n return board",
"def _create_fleet(self):\n\t\t#Make an alien\n\t\talien = Alien (self)\n\t\talien_width, alien_height = alien.rect.size\n\t\tship_height = self.ship.rect.height\n\t\t\n\t\tavailable_space = self.settings.screen_width - (2*alien_width)\n\t\tavailable_space_y = self.settings.screen_height - (3*alien_height) - ship_height\n\t\tnumber_aliens_x = available_space // (2*alien_width)\n\t\tnumber_rows = available_space_y // (2*alien_height)\n\t\t\n\n\t\t#create first row of aliens\n\t\tfor row_number in range (number_rows):\n\t\t\tfor alien_number in range(number_aliens_x):\n\t\t\t\tself._create_alien(alien_number, alien_width, alien_height, row_number)",
"def create_fleet(ai_settings, screen, ship, aliens):\n #creates and finds no of aliens in a row\n #spacing between aliens is an alien width\n alien = Alien(ai_settings, screen)\n number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)\n number_rows = get_number_rows(ai_settings, ship.rect.height, alien.rect.height)\n\n #create first row of aliens\n for row_number in range(number_rows):\n for alien_number in range(number_aliens_x):\n #create an alien and place it in row\n create_alien(ai_settings, screen, aliens, alien_number, row_number)",
"def __init__(self, board_size=BOARD_SIZE, num_mines=NUM_MINES):\n\n self.board_size = board_size\n self.num_mines = num_mines\n self.board = place_mines(board_size, num_mines)\n self.my_board = np.ones((board_size, board_size), dtype=int) * CLOSED\n self.num_actions = 0\n\n self.observation_space = spaces.Box(low=-2, high=9,\n shape=(self.board_size, self.board_size), dtype=np.int)\n self.action_space = spaces.Discrete(self.board_size*self.board_size)\n self.valid_actions = np.ones((self.board_size * self.board_size), dtype=np.bool)",
"def __init__(self, params):\n position = recordtype(\"position\", [\"x\", \"y\", \"kind\"])\n coordinate = recordtype(\"coordinate\", [\"x\", \"y\"])\n\n self.width = params[\"m\"]\n self.height = params[\"n\"]\n self.count = 0\n self.pieces = params[\"pieces\"]\n\n self.board = []\n for _ in self.pieces:\n self.board.append(position(0, 0, 0))\n\n self.board_index = 0\n self.last_xy = []\n self.last_index = [0] * number_of_types\n\n for _ in range(number_of_types):\n coord_list = []\n for _ in range(len(self.pieces) + 1):\n coord_list.append(coordinate(0, 0))\n self.last_xy.append(coord_list)\n\n self.attacked_cols = [0] * self.width\n self.attacked_rows = [0] * self.height\n self.attacked_diag_l = [0] * (self.width + self.height)\n self.attacked_diag_r = [0] * (self.width + self.height)\n self.attacked_cells = [0] * ((self.width+4) * (self.height+4))\n\n self.king_rules = [\n coordinate(-1, 0), coordinate(1, 0), coordinate(0, -1), coordinate(0, 1),\n coordinate(-1, -1), coordinate(1, 1), coordinate(1, -1), coordinate(-1, 1)\n ]\n\n self.knight_rules = [\n coordinate(-2, -1), coordinate(-2, 1), coordinate(2, -1), coordinate(2, 1),\n coordinate(-1, -2), coordinate(-1, 2), coordinate(1, -2), coordinate(1, 2)\n ]",
"def __init__(self):\n self._board_area = [[\" \" for i in range(20)] for j in range(20)]\n\n # Starting setup for board includes these coordinates black, and their mirror white\n black_start = [(1, 2), (2, 2), (2, 1), (2, 3), (3, 2), (4, 1), (4, 3), (5, 2), (6, 1), (6, 3), (7, 1),\n (7, 2), (7, 3), (8, 1), (8, 2), (8, 3), (9, 1), (9, 2), (9, 3), (10, 1), (10, 2), (10, 3),\n (11, 1), (11, 3), (12, 1), (12, 2), (12, 3), (13, 1), (13, 3), (14, 2), (15, 1), (15, 3),\n (16, 2), (17, 1), (17, 2), (17, 3), (18, 2), (2, 6), (5, 6), (8, 6), (11, 6),\n (14, 6), (17, 6)]\n\n # Border points set for clearing out stones that move beyond the border\n self._border = set((0, i) for i in range(20)) | set((19, i) for i in range(20))\n self._border = self._border | set((i, 0) for i in range(20)) | set((i, 19) for i in range(20))\n\n # Fill black and white stones\n for coord in black_start:\n self._board_area[coord[0]][coord[1]] = \"B\"\n self._board_area[coord[0]][-coord[1] - 1] = \"W\"\n\n # Alphabetic indexing of board for alpha-numeric movement inputs\n self._locmap = dict(zip(\"abcdefghijklmnopqrst\", range(20)))",
"def spawn_enemies(self, number: int = None) -> None:\n\n # Make the enemies into rows of 6\n for j in range(2):\n self.enemies.add([EnemyShip(self.sensitivity, self.screen_width // 4 + i * self.screen_width // 10,\n self.screen_height // 2 - EnemyShip.sprites[0].get_height() * j,\n self.wave_random(), self.screen_width, self.screen_height,\n self.get_random_direction(), self.mob_bullet, self.debug) for i in range(6)])",
"def __init__(self, board_dim= DEFAULT_DIM):\r\n self.width = board_dim\r\n self.height = board_dim\r\n\r\n self.grid = np.array([[' '] * self.width for i in range(self.height)])\r\n self.num_checkers = 0 # keeps track of how many checkers have been added\r\n\r\n self.available_moves = [(row, col) for row in range(self.height) for col in range(self.width)]\r\n\r\n # Specify the winning condition based on the board's dimension\r\n if (self.width < 5):\r\n self.win_condition = self.width\r\n else:\r\n self.win_condition = 5",
"def ZoneBuilder():\n\n # Part 1: Zone Dimensions\n matrix, xaxis, yaxis, zaxis = dimensions()\n\n # Part 2: Assigning Room Existance.\n matrix = existance(matrix, xaxis, yaxis, zaxis)\n \n # Part 3: Creating room walls.\n \n # First, generate walls adjacent to void spaces.\n matrix = enclose_rooms(matrix, xaxis, yaxis, zaxis)\n \n matrix = select_walls(matrix, xaxis, yaxis, zaxis)",
"def __init__(self, board_dimensions: tuple, initial_position: list = None) -> None:\n assert len(board_dimensions) == 2, \"board dimensions must be 2 digit array\"\n assert all(\n [dim >= 0 for dim in board_dimensions]\n ), \"dimensions must be positive\"\n self.board_dimensions = board_dimensions\n if initial_position:\n assert type(initial_position) == list, \"Position must be length 2 list\"\n assert (\n len(initial_position) == 2\n ), \"Position must be a list of length 2 containing x and y coordinates where top left of the board is [0,0]\"\n assert (\n 0 <= initial_position[0] < self.board_dimensions[0]\n ), \"Invalid initial x position\"\n assert (\n 0 <= initial_position[1] < self.board_dimensions[1]\n ), \"invalid initial y position\"\n self.position = initial_position.copy()\n else:\n self.position = [\n np.random.randint(0, board_dimensions[0] - 1),\n np.random.randint(0, board_dimensions[1] - 1),\n ]"
] |
[
"0.5798147",
"0.5782923",
"0.56396955",
"0.5630348",
"0.5585761",
"0.55785453",
"0.5550585",
"0.5549059",
"0.55364156",
"0.5524999",
"0.5519438",
"0.5479709",
"0.5463151",
"0.54622513",
"0.54564345",
"0.5421945",
"0.5414699",
"0.5409982",
"0.5408475",
"0.5398837",
"0.5394371",
"0.53856707",
"0.53845227",
"0.5382947",
"0.5348993",
"0.533787",
"0.5316364",
"0.5314427",
"0.5259075",
"0.52463806"
] |
0.7309646
|
0
|
For debugging purposes, prints the remaining ships afloat.
|
def printShipsAfloat(self):
logging.debug("ships afloat")
sb = []
for size in self.shipsAfloat:
number = self.shipsAfloat[size]
sb.append(str(size))
sb.append(":")
sb.append(str(number))
sb.append(" ")
logging.debug("".join(sb))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def printShipsToSink(self):\r\n sb = []\r\n for sinkingShip in self.shipsToSink:\r\n shot = self.mapToShot(sinkingShip.bullseye)\r\n sb.append(str(shot))\r\n sb.append(\":\")\r\n sb.append(str(sinkingShip.size))\r\n sb.append(\" \")\r\n logging.debug(\"\".join(sb))",
"def print_inventory(self):\n print(\"Backpack:\")\n # Loop for each item in the players inventory\n for item in self.inventory:\n print('* ' + str(item))\n # Assigns the best weapon\n best_weapon = self.most_powerful_weapon()\n # print statement telling the best weapon in inventory\n print(\"Your best weapon is your {}\".format(best_weapon))",
"def print_summary_fuel_reactor(fs):\n print(\"\\nResults:\")\n print(\"==========================================\")\n print(\"---Moving Bed Fuel Reactor---\") \n \n print(\"\\nInlet gas: \", \n \"\\nCO2: \", value(fs.MB_fuel.F[0,'CO2']), \"mol/s\",\n \"\\nH20: \", value(fs.MB_fuel.F[0,'H2O']), \"mol/s\",\n \"\\nCH4: \", value(fs.MB_fuel.F[0,'CH4']), \"mol/s\",\n \"\\nCO2: \", value(fs.MB_fuel.Gas_M[0,'CO2']), \"kg/s\",\n \"\\nH20: \", value(fs.MB_fuel.Gas_M[0,'H2O']), \"kg/s\",\n \"\\nCH4: \", value(fs.MB_fuel.Gas_M[0,'CH4']), \"kg/s\")\n print(\"\\nOutlet gas: \", \n \"\\nCO2: \", value(fs.MB_fuel.F[1,'CO2']), \"mol/s\",\n \"\\nH20: \", value(fs.MB_fuel.F[1,'H2O']), \"mol/s\", \n \"\\nCH4: \", value(fs.MB_fuel.F[1,'CH4']), \"mol/s\",\n \"\\nCO2: \", value(fs.MB_fuel.Gas_M[1,'CO2']), \"kg/s\",\n \"\\nH20: \", value(fs.MB_fuel.Gas_M[1,'H2O']), \"kg/s\", \n \"\\nCH4: \", value(fs.MB_fuel.Gas_M[1,'CH4']), \"kg/s\")\n print(\"\\nInlet solids: \", \n \"\\nFe2O3: \", value(fs.MB_fuel.Solid_F[1,'Fe2O3']), \"mol/s\",\n \"\\nFe3O4: \", value(fs.MB_fuel.Solid_F[1,'Fe3O4']), \"mol/s\", \n \"\\nAl: \", value(fs.MB_fuel.Solid_F[1,'Al2O3']), \"mol/s\",\n \"\\nFe2O3: \", value(fs.MB_fuel.Solid_M[1,'Fe2O3']), \"kg/s\",\n \"\\nFe3O4: \", value(fs.MB_fuel.Solid_M[1,'Fe3O4']), \"kg/s\", \n \"\\nAl: \", value(fs.MB_fuel.Solid_M[1,'Al2O3']), \"kg/s\")\n print(\"\\nOutlet solids: \", \n \"\\nFe2O3: \", value(fs.MB_fuel.Solid_F[0,'Fe2O3']), \"mol/s\",\n \"\\nFe3O4: \", value(fs.MB_fuel.Solid_F[0,'Fe3O4']), \"mol/s\", \n \"\\nAl: \", value(fs.MB_fuel.Solid_F[0,'Al2O3']), \"mol/s\",\n \"\\nFe2O3: \", value(fs.MB_fuel.Solid_M[0,'Fe2O3']), \"kg/s\",\n \"\\nFe3O4: \", value(fs.MB_fuel.Solid_M[0,'Fe3O4']), \"kg/s\", \n \"\\nAl: \", value(fs.MB_fuel.Solid_M[0,'Al2O3']), \"kg/s\") \n \n print(\"\\nGas inlet velocity: \", value(fs.MB_fuel.vg[0]), \"m/s\")\n print(\"Gas outlet velocity: \", value(fs.MB_fuel.vg[1]), \"m/s\")\n print(\"Solids velocity: \", value(fs.MB_fuel.vs), \"m/s\") \n \n print(\"\\nHeat of reaction @ z=0: \", \n value(fs.MB_fuel.DH_rxn_s[0]), \"J/(mol reaction)\")\n print(\"Heat of reaction @ z=1: \", \n value(fs.MB_fuel.DH_rxn_s[1]), \"J/(mol reaction)\")\n \n print(\"\\nCH4 conversion: \", value(fs.MB_fuel.X_gas)*100, \" %\")\n print(\"Fe2O3 conversion: \", value(fs.MB_fuel.X_OC)*100, \" %\")\n \n print('\\nPressure @inlet: ', value(fs.MB_fuel.P[0]))\n print('Pressure @outlet: ', value(fs.MB_fuel.Gas_Out_P))\n \n print(\"\\nReactor bed height:\", value(fs.MB_fuel.L), \" m\")\n print(\"Reactor bed diameter:\", value(fs.MB_fuel.Dr), \" m\")\n# print(\"Refractory wall thickness\", value(fs.MB.refractory_th), \" m\")\n \n print(\"\\nInlet gas flow:\", value(fs.MB_fuel.Gas_In_F), \" mol/s\")\n print(\"Outlet gas flow:\", value(fs.MB_fuel.Ftotal[1]), \" mol/s\")\n print(\"Inlet solids flow:\", value(fs.MB_fuel.Solid_In_M), \" kg/s\")\n print(\"Outlet solids flow:\", value(fs.MB_fuel.Solid_Out_M), \" kg/s\")\n print(\"Inlet solids temperature:\", value(fs.MB_fuel.Solid_In_Ts), \" K\")\n print(\"Outlet solids temperature:\", value(fs.MB_fuel.Solid_Out_Ts), \" K\")\n \n print(\"Inlet gas temperature:\", value(fs.MB_fuel.Tg[0]), \" K\")\n print(\"Outlet gas temperature:\", value(fs.MB_fuel.Tg[1]), \" K\") \n \n print(\"\\nInlet solid mass fractions: \", \n \"\\nFe2O3: \", value(fs.MB_fuel.x[1,'Fe2O3']),\n \"\\nFe3O4: \", value(fs.MB_fuel.x[1,'Fe3O4']), \n \"\\nAl2O3: \", value(fs.MB_fuel.x[1,'Al2O3']))\n print(\"Outlet solid mass fractions: \", \n \"\\nFe2O3: \", value(fs.MB_fuel.x[0,'Fe2O3']),\n \"\\nFe3O4: \", value(fs.MB_fuel.x[0,'Fe3O4']), \n \"\\nAl2O3: \", value(fs.MB_fuel.x[0,'Al2O3']))",
"def affichage(self):\r\n print(self.ship)\r\n for a in range(self.dim+2):\r\n for b in range(self.dim+2):\r\n print(self.plat[b][a],end=\" \")\r\n print(\"\")",
"def debugprint(self, cur_pos): \n print(\"cur_pos = \", cur_pos)\n print(\"Distance map:\")\n print(self.distance_map)\n print(\"Frontier:\")\n print(sorted(self.frontier.items(), key=lambda x:x[1] ))\n print(\"Footprint:\")\n print(self.footprint)\n print(\"--------------\")",
"def display_piles():\n global piles\n global num_piles\n y = 0\n for x in piles:\n print('pile '+str(y)+' = '+str(x))\n y+=1;",
"def display(self):\n print (\"+\" + \"-\"*self.size + \"+\")\n for i in range(self.size):\n terrain_strs = [Terrain.display_string(self.array[j, i]) for j in range(self.size)]\n print(\"|\" + \"\".join(terrain_strs) + \"|\")\n print (\"+\" + \"-\"*self.size + \"+\")",
"def main():\n BATTLEFIELD_CONF = {\n 'field_height': 10,\n 'field_width': 10,\n }\n\n FLEET_PROPERTIES = {\n 'submarine': {'length': 1, 'direction': None, },\n 'destroyer': {'length': 2, 'direction': 'horizontal', },\n 'cruiser': {'length': 3, 'direction': 'horizontal', },\n 'carrier': {'length': 4, 'direction': 'vertical', },\n }\n\n battle_field = FieldDesigner()\n\n battle_field.design_field(\n height=BATTLEFIELD_CONF['field_height'],\n width=BATTLEFIELD_CONF['field_width'],\n )\n\n submarine = BattleShip(\n ship_length=FLEET_PROPERTIES['submarine']['length'],\n direction=FLEET_PROPERTIES['submarine']['direction'],\n field=battle_field,\n max_field_grid=BATTLEFIELD_CONF['field_width'],\n type='submarine',\n )\n\n cruiser = BattleShip(\n ship_length=FLEET_PROPERTIES['cruiser']['length'],\n direction=FLEET_PROPERTIES['cruiser']['direction'],\n field=battle_field,\n max_field_grid=BATTLEFIELD_CONF['field_width'],\n type='cruiser',\n )\n\n destroyer = BattleShip(\n ship_length=FLEET_PROPERTIES['destroyer']['length'],\n direction=FLEET_PROPERTIES['destroyer']['direction'],\n field=battle_field,\n max_field_grid=BATTLEFIELD_CONF['field_width'],\n type='destroyer',\n )\n\n carrier = BattleShip(\n ship_length=FLEET_PROPERTIES['carrier']['length'],\n direction=FLEET_PROPERTIES['carrier']['direction'],\n field=battle_field,\n max_field_grid=BATTLEFIELD_CONF['field_width'],\n type='carrier',\n )\n\n submarine01 = submarine.place_ship()\n cruiser01 = cruiser.place_ship()\n destroyer01 = destroyer.place_ship()\n carrier01 = carrier.place_ship()\n\n fleet_inventory = Fleet(\n [\n submarine01,\n cruiser01,\n destroyer01,\n carrier01,\n ]\n )\n\n print(fleet_inventory)\n print(battle_field)",
"def debug(self):\n neighbors = len(self.__neighbors)\n string = self.__repr__() + f' neighbors: {self.living_neighbors()}/{neighbors}'\n for neighbor in self.__neighbors:\n string += '\\n ' + neighbor.__repr__()\n print(string)",
"def display(self):\n for i in range(0, len(self.__drawn)):\n if self.__drawn[i]:\n print(str(i+1) + \". You drew a short straw!\")\n else:\n print(str(i+1) + \". You're okay.\")",
"def print_output():\n print(\"count: [primary: \"+str(primary_shards)+\", replica: \"+str(secondary_shards)+\"]\")\n print(\"size: [primary: \"+pretty_print_storage(total_size_primary)+\", replica: \"+pretty_print_storage(total_size_secondary)+\"]\")\n print(\"disk-max-node: \"+max_size_node_name)\n print(\"watermark-breached: \"+str(watermark_breached))",
"def display_ship(self):\r\n self.screen.blit(self.ship, self.rect)",
"def ship_shoot(ship, x, y):\n click.echo('Ship %s fires to %s,%s' % (ship, x, y))",
"def print(self):\n print(' 0 1 2 3 4 5 6 7')\n for j in range(7, -1, -1):\n row = '%s:' % j\n for i in range(8):\n row += self._rubrics[i][j].to_str_with_color\n print(row)\n print(' 0 1 2 3 4 5 6 7')\n\n for key in self._removed_pieces[PieceColor.BLACK].keys():\n print('removed black piece: %s -> %s %s %s' % (\n key, self._removed_pieces[PieceColor.BLACK][key].position.to_str(),\n self._removed_pieces[PieceColor.BLACK][key].piece_type,\n self._removed_pieces[PieceColor.BLACK][key].name)\n )\n\n for key in self._removed_pieces[PieceColor.WHITE].keys():\n print('removed white piece: %s -> %s %s %s' % (\n key, self._removed_pieces[PieceColor.WHITE][key].position.to_str(),\n self._removed_pieces[PieceColor.WHITE][key].piece_type,\n self._removed_pieces[PieceColor.WHITE][key].name)\n )",
"def show_fixedblock(self):\n fb = self.station.get_raw_fixed_block(unbuffered=True)\n for i, ptr in enumerate(range(len(fb))):\n print('%02x' % fb[ptr], end=' ')\n if (i+1) % 16 == 0:\n print()",
"def print_moves(self):\n print self._current_moves\n self._current_moves = \"\"",
"def generate_fish_report(self):\n if len(self.fish) == 0:\n print('No fish in here, come back later')\n\n for species, count in self.fish.items():\n print(f'{species}: {count}')",
"def print_inventory(self):\n\t\tfor item, amount in self.inventoryDictionary.items():\n\t\t\tprint (\"Item: \" + item.name + \" Quantity: \" + str(amount))\n\t\t\tprint (item.description + \"\\n\")\n\n\t\tprint(\"Currently equipped: \")\n\t\tprint(\"Main Hand: \" + self.equippedMainHand.name)\n\t\tprint(\"Armor: \" + self.equippedArmor.name)",
"def print_usage(self):\r\n print 'Total Usage: %f compute seconds' % self.box_usage\r\n cost = self.box_usage * 0.14\r\n print 'Approximate Cost: $%f' % cost",
"def _print(self):\n self.i += 1\n np.set_printoptions(precision=3, suppress=True)\n if self.i%40 == 0:\n self.i = 0\n print self.ekf.current_state_estimate[4:7]",
"def print(self):\n print(\"-----\", self.name, \"-----\")\n print(\"Enable index\", self.enable_index)\n print(\"End index:\", self.stop_index)\n print(\"Measurement length (from the very beginning to the end index):\", self.time_vec[self.stop_index])\n print()\n print(\"Start temperature (hot):\", self.temp_hot_start)\n print(\"Start temperature (cold):\", self.temp_cold_start)\n print(\"Start temperature (mean):\", self.temp_start)\n print(\"End temperature (hot):\", self.temp_hot[self.stop_index])\n print(\"End temperature (cold)\", self.temp_cold[self.stop_index])\n print()\n print(\"Max temperature\", self.temp_max)\n print(\"Min temperature\", self.temp_min)\n print()\n print(\"Heat pump\")\n print(\"Energy input:\", self.work_inp)\n print(\"Q_hot\", self.qhot_pump)\n print(\"Q_cold\", self.qcold_pump)\n print(\"Q_cold + W\", self.qcold_pump + self.work_inp)\n print(\"E_lost\", self.qcold_pump + self.work_inp - self.qhot_pump)\n print(\"Coefficient of performance COP_hot\", self.qhot_pump / self.work_inp)\n print(\"Coefficient of performance COP_cold\", self.qcold_pump / self.work_inp)\n print(\"Ideal COP_hot with the setup\", self.qhot_pump/(self.qhot_pump-self.qcold_pump))\n print(\"Ideal COP_cold with the setup\", self.qcold_pump / (self.qhot_pump - self.qcold_pump))\n print(\"Ideal Carnot COP_hot\", (self.temp_max+273.15)/(self.temp_max-self.temp_min))\n print(\"Ideal Carnot COP_cold\", (self.temp_min+273.15)/(self.temp_max-self.temp_min))\n print(\"Efficiency fraction out of ideal Carnot cooler\", (self.qcold_pump / self.work_inp)/((self.temp_min+273.15)/(self.temp_max-self.temp_min)))\n\n if self.not_air:\n print(\"Heat transfer through insulator, hot side\", self.heat_loss_pump_hot)\n print(\"Heat transfer through insulator, cold side\", self.heat_loss_pump_cold)\n print(\"Estimated Q_hot with resistor\", self.qhot_resistor)\n else:\n print(\"Estimated Q_hot with resistor (=energy input)\", self.qhot_resistor)\n #\n # I think it should be defined for Q_hot too. Yep, TODO that\n # Also calculate heatloss due to conduction TODO remove these comments when ready\n # Todo implement resistive heater calculations\n print()\n print(\"Heat engine\")\n print(\"Energy generated:\", self.work_gen)\n print(\"Q_hot\", self.qhot_engine)\n print(\"Q_cold\", self.qcold_engine)\n print(\"Q_hot - Q_cold\", self.qhot_engine - self.qcold_engine)\n print(\"E_lost\", -self.qcold_engine - self.work_gen + self.qhot_engine)\n print(\"\\\"Heat transfer efficiency\\\" (%)\", self.work_gen / (self.qhot_engine - self.qcold_engine) * 100)\n print(\"Efficiency e\", self.work_gen / self.qhot_engine)\n print(\"Ideal efficiency with the setup\", 1 - (self.qcold_engine / self.qhot_engine))\n print(\"Ideal Carnot efficiency\", (self.temp_max-self.temp_min)/(self.temp_max+273.15))\n if self.not_air:\n print(\"Heat transfer through insulator, hot side\", self.heat_loss_gen_hot)\n print(\"Heat transfer through insulator, cold side\", self.heat_loss_gen_cold)\n print()\n print(\"Total efficiency of cycle\", self.work_gen/self.work_inp)\n # About the efficiency of peltier elements (#telok@IRCnet, 2016-07-27)\n # 19:10 < AgenttiX> Oletteko kokeilleet TECin ohjaamista Arduinolla? Toimisiko tämä kytkentä? http://garagelab.com/profiles/blogs/how-to-use-a-peltier-with-arduino\n # --\n # 20:21 <@hrst> Ei toimi. Peltieriä ei voi ohjata PWM:llä.\n # 20:22 <@hrst> Hyötysuhde on PWM:llä paska, mikä on ongelma koska se on muutenkin liian paska, ja sen lisäksi se hajoaa mekaaniseen värähtelyyn ennemmin tai myöhemmin.\n print(\"-----\\n\")",
"def print_usage(self):\n print('Total Usage: %f compute seconds' % self.box_usage)\n cost = self.box_usage * 0.14\n print('Approximate Cost: $%f' % cost)",
"def show_trunk(height=2):\n for k in range(height):\n print(\"|\".center(GROUND_WIDTH))",
"def spillAlle(self):\r\n for sang in self._sanger:\r\n print(f\"Nå spilles følgende: {sang}\")",
"def print(self) -> str:\n if self.is_unoccupied():\n return \"\"\n return str(\"%s-%s\" % (self.piece.color.name, self.piece.name.name))",
"def print_gpx_part_info(gpx_part, gpx_file, indentation=' '):\n length_2d = gpx_part.length_2d()\n length_3d = gpx_part.length_3d()\n print('%sLength 2D: %s' % (indentation, length_2d / 1000.))\n print('%sLength 3D: %s' % (indentation, length_3d / 1000.))\n\n moving_time, stopped_time, moving_distance, stopped_distance, max_speed = gpx_part.get_moving_data()\n print('%sMoving time: %s' % (indentation, format_time(moving_time)))\n print('%sStopped time: %s' % (indentation, format_time(stopped_time)))\n #print('%sStopped distance: %sm' % stopped_distance)\n print('%sMax speed: %sm/s = %skm/h' % (indentation, max_speed, max_speed * 60. ** 2 / 1000. if max_speed else 0))\n\n uphill, downhill = gpx_part.get_uphill_downhill()\n print('%sTotal uphill: %sm' % (indentation, uphill))\n print('%sTotal downhill: %sm' % (indentation, downhill))\n\n start_time, end_time = gpx_part.get_time_bounds()\n print('%sStarted: %s' % (indentation, start_time))\n print('%sEnded: %s' % (indentation, end_time))\n\n points_no = len(list(gpx_part.walk(only_points=True)))\n print('%sPoints: %s' % (indentation, points_no))\n\n distances = []\n previous_point = None\n for point in gpx_part.walk(only_points=True):\n if previous_point:\n distance = point.distance_2d(previous_point)\n distances.append(distance)\n previous_point = point\n print('%sAvg distance between points: %sm' % (indentation, sum(distances) / len(list(gpx.walk()))))\n\n x, y1, y2 = histogram(gpx, max_speed)\n plot_gpx_histogram(x,y1,y2, gpx_file) \n\n print('')",
"def showp():\n def show1(i):\n a=SAC.queryDouble('carma.Ovro%d.Drive.Point.offsetAz' % (i+1) ,qmax_)\n e=SAC.queryDouble('carma.Ovro%d.Drive.Point.offsetEl' % (i+1) ,qmax_)\n return (a,e)\n print ' ant dAz dEl'\n for i in range(6):\n (a,e) = show1(i)\n print ' 00%d %7.3f %7.3f' % (i+1,a,e)",
"def show( self):\n def symbol( i):\n return i<0 and (i==-2 and ' ' or '0') or chr(ord('a') + i)\n \n X, Y = np.max( self.board.positions, 0)\n # -2 to indicate outside board.\n display = np.zeros( (X+1,Y+1), dtype=int) - 2 \n for x, y in self.board.positions:\n display[x, y] = -1 # -1 to indicate unoccupied\n for p, i in self.occupation.items():\n x, y = self.board.positions[p]\n display[x, y] = i\n for x in xrange(X+1):\n s = ''.join( [ symbol( display[x, y]) for y in xrange(Y+1) ])\n print s",
"def display_ships(game: game_code.BattleshipGame, player_1: bool) -> None:\r\n for cell_number in range(0, 8):\r\n for cell_letter in range(0, 8):\r\n piece = game.get_board()[cell_number][cell_letter]\r\n if piece is not None:\r\n cell = index_to_algebraic((cell_number, cell_letter))\r\n display_ship(player_1, cell, piece.kind)",
"def printMoves(lift, improved_algorithm):\n # improved algorithm is drawn to the left of original\n if improved_algorithm == True:\n xcoord_buffer = 500\n else:\n xcoord_buffer = 0\n # update text with final stats\n w.create_text(100 + xcoord_buffer,10,fill=\"darkblue\",font=\"Times 20 italic bold\",\n text=\"Total Moves: \"+str(lift.total_moves))\n w.create_text(125 + xcoord_buffer,40,fill=\"darkblue\",font=\"Times 20 italic bold\",\n text=\"All passengers delivered.\")\n w.update()"
] |
[
"0.6759293",
"0.61066717",
"0.6097267",
"0.60054314",
"0.59553313",
"0.58824235",
"0.5686818",
"0.5643296",
"0.56205136",
"0.56184435",
"0.5578437",
"0.5568185",
"0.5563322",
"0.55528456",
"0.5536101",
"0.5521141",
"0.55188745",
"0.5515266",
"0.5513358",
"0.54816514",
"0.54807436",
"0.5463348",
"0.54557395",
"0.54434323",
"0.54338026",
"0.5424427",
"0.5416321",
"0.5411065",
"0.54028744",
"0.5398557"
] |
0.7785846
|
0
|
Updates internal state given the results of a shot. Arguments shot Shot of the form LetterNumber. hit True, if the shot was a hit. sunk Size of the sunk ship, if the shot sunk it.
|
def shotResult(self, shot, hit, sunk):
logging.debug("shot result: %s, hit: %d, sunk: %d" % (shot, hit, sunk))
coordinates = self.mapToCoordinates(shot)
# If a ship was sunk, remove it from the fleet.
if sunk:
sunk = str(sunk)
assert(self.shipsAfloat[sunk] > 0)
self.shipsAfloat[sunk] -= 1
# Remove any counts that went to 0.
if self.shipsAfloat[sunk] == 0:
del(self.shipsAfloat[sunk])
self.enemyBoard[coordinates.x][coordinates.y] = BoardState.BULLSEYE
else:
if hit:
self.enemyBoard[coordinates.x][coordinates.y] = BoardState.HIT
else:
self.enemyBoard[coordinates.x][coordinates.y] = BoardState.MISS
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def ship_hit(si_settings,screen,stats,sb,ship,aliens,bullets):\n if stats.ships_left > 0:\n # Decrement ships_left.\n stats.ships_left -= 1\n #update Scoreboard\n sb.prep_ships()\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)\n #empties aliens and bullets\n aliens.empty()\n bullets.empty()\n #makes new aliens and centers ship\n create_fleet(si_settings,screen,ship,aliens)\n ship.center_ship()\n #stop\n sleep(0.5)",
"def shotResult(self, shot, hit, sunk):\r\n ShotSelector.shotResult(self, shot, hit, sunk)\r\n coordinates = self.mapToCoordinates(shot)\r\n if sunk:\r\n self.shipsToSink.append(SinkingShip(coordinates, sunk))\r\n self.sinkShips()\r\n self.printShipsAfloat()\r\n self.printShipsToSink()",
"def ship_hit(si_settings, screen, stats, sb, ship, aliens, bullets, alienBullets, images):\r\n if stats.ships_left > 0:\r\n # Decrement ships_left.\r\n stats.ships_left -= 1\r\n\r\n # Animate the ship explosion\r\n ship_explosion(si_settings, screen, ship)\r\n\r\n # Update scoreboard.\r\n sb.prep_ships()\r\n\r\n # Empty the list of aliens and bullets.\r\n aliens.empty()\r\n bullets.empty()\r\n alienBullets.empty()\r\n\r\n # Create a new fleet and center the ship.\r\n create_fleet(si_settings, screen, ship, aliens, images)\r\n ship.center_ship()\r\n\r\n # Pause.\r\n sleep(0.5)\r\n else:\r\n stats.game_active = False\r\n pygame.mouse.set_visible(True)",
"def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):\n if stats.ships_left > 0:\n #decrement the value of ships_left\n stats.ships_left -= 1\n #update scoreboard\n sb.prep_ships()\n #when hit remove bullets and aliens from screen\n aliens.empty()\n bullets.empty()\n #create a new fleet with ship at centre\n create_fleet(ai_settings, screen,ship, aliens)\n ship.center_ship()\n #pause for a sec to collect defeat\n sleep(1.0)\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)",
"def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):\n if stats.ships_left > 0:\n # Decrement ships left.\n stats.ships_left -= 1\n\n # Update scoreboard.\n sb.prep_ships()\n\n # Empty the list of aliens and bullets.\n aliens.empty()\n bullets.empty()\n\n # Create new fleet.\n create_fleet(ai_settings, screen, ship, aliens)\n\n # Center the ship.\n ship.center_ship()\n\n # Pause for a while.\n sleep(0.5)\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)",
"def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):\n\tif stats.ships_left > 0:\n\t\t#Decrement ships_left\n\t\tstats.ships_left -= 1\n\t\t\n\t\t#Update scoreboard\n\t\tsb.prep_ships()\n\t\t\n\t\t#Empty the list of aliens and bullets\n\t\taliens.empty()\n\t\tbullets.empty()\n\t\t\n\t\t#Create a new fleet and center the ship\n\t\tcreate_fleet(ai_settings, screen, ship, aliens)\n\t\tship.center_ship()\n\t\t\n\t\t#Pause\n\t\tsleep(0.5)\n\n\telse:\n\t\tstats.game_active = False \n\t\tpygame.mouse.set_visible(True)",
"def process_shot(self):\n if self.has_active_ship():\n self.mark = constants.HIT_SHIP_MARK\n self.hit_count += 1\n if self.hit_count == self.ship.power:\n self.mark = constants.DEAD_SHIP_MARK\n return constants.KILL\n else:\n return constants.HIT\n elif not self.occupied or self.mark == constants.MISS_HIT_MARK:\n self.mark = constants.MISS_HIT_MARK\n return constants.MISS",
"def bomb(self, index):\n\n coords = self.canvas.coords(self.squares[index])\n x, y = coords[0] + 10, coords[1] + 10\n tag = self.hit[index]\n\n # Count moves for player (used for scoring)\n if self.players.winCondition == 1:\n self.players.moves[self.playerNumber][0] += 1\n\n # Hit\n if tag != 0:\n self.tracker[tag] -= 1\n\n # Count moves for player (used in scoring)\n if self.players.winCondition == 1:\n self.players.moves[self.playerNumber].append(\\\n self.players.moves[self.playerNumber][0])\n self.players.moves[self.playerNumber][0] = 0\n\n # Ship was sunk\n if self.tracker[tag] == 0:\n text = []\n tagname = 'tag%s' % tag\n\n # Bonus points equal to the size of ship\n # awarded for sinking entire ship\n if self.players.winCondition == 0:\n self.players.score[self.playerNumber] += \\\n self.counter_copy[tag]\n\n # Show bombed location with black & orange flashing bar\n for i in range(5):\n text.append(self.canvas.create_text(\\\n x, y, text='O', fill='red'))\n self.canvas.addtag_withtag('text', text[-1])\n self.canvas.tag_raise(tagname, 'square')\n for i in range(3): # Flashing bar\n self.canvas.itemconfig(tagname, {'fill': 'black'})\n self.canvas.update()\n self.myframe.after(100)\n self.canvas.itemconfig(tagname, {'fill': 'orange'})\n self.canvas.update()\n self.myframe.after(100)\n\n self.hit[index] = 5\n self.players.message[not self.playerNumber] = \\\n '%s,\\nYour ship of size %s was sunk by enemy' % \\\n (self.players.usernames[not self.playerNumber], \\\n self.counter_copy[tag])\n self.players.endOfTurn(self.tracker)\n return\n\n # Hit, but not sunk. Player gets only 1 point\n if self.players.winCondition == 0:\n self.players.score[self.playerNumber] += 1\n\n # Show hit location with flashing black & red circle\n text = []\n for i in range(3):\n del text[:]\n for i in range(5): # flash black circle\n text.append(self.canvas.create_text(\\\n x, y, text='O', fill='black'))\n self.canvas.addtag_withtag('text', text[-1])\n self.canvas.update()\n self.myframe.after(100)\n del text[:]\n for i in range(5): # flash red circle\n text.append(self.canvas.create_text(\\\n x, y, text='O', fill='red'))\n self.canvas.addtag_withtag('text', text[-1])\n self.canvas.update()\n self.myframe.after(100)\n\n # Complete miss. Draw 'X'\n else:\n for i in range(5):\n text = self.canvas.create_text(x, y, text='X', fill='yellow')\n self.canvas.addtag_withtag('text', text)\n self.canvas.update()\n self.myframe.after(250)\n self.hit[index] = 5\n self.players.endOfTurn(self.tracker)",
"def ship_hit(ai_settings, stats, screen, ship, aliens, bullets):\n\tif stats.ship_left > 1:\n\t\tstats.ship_left -= 1\n\t\n\t\t# Empty aliens and bullets\n\t\taliens.empty()\n\t\tbullets.empty()\n\t\n\t\t# Restore the initial screen\n\t\tcreate_fleet(ai_settings, screen, aliens)\n\t\tship.center_ship()\n\t\n\t\t# Pause\n\t\tsleep(1.0)\n\telse:\n\t\tstats.game_active = False",
"def ship_size(data, cell):\n if type(data) != dict:\n print('Wrong argument data')\n return None\n if type(cell) != tuple:\n print(\"Second argument must be a tuple\")\n return None\n if type(cell[0]) != str:\n print(\"First element of the second argument must be a str - A..J\")\n return None\n if type(cell[1]) != int:\n print(\"Second element of the second argument must be a number - 1..10\")\n return None\n if not has_ship(data, cell):\n return 0\n x = ord(cell[0].upper()) - 64\n y = cell[1]\n if x < 1 or x > 10:\n print('Wrong coordinate. Must be from A to J.')\n return None\n if y < 1 or y > 10:\n print('Wrong coordinate. Must be from 1 to 10.')\n return None\n size = 1\n coords = {(x, y)}\n if data[(x+1, y)] == 'damaged' or data[(x+1, y)] or data[(x-1, y)] == 'damaged' or data[(x-1, y)]:\n start = x\n while(data[(start-1, y)] == 'damaged' or data[(start-1, y)]):\n coords = coords | {(start-1, y)}\n size += 1\n start -= 1\n if start < 2:\n break\n start = x\n while(data[(start+1, y)] == 'damaged' or data[(start+1, y)]):\n coords = coords | {(start + 1, y)}\n size += 1\n start += 1\n if start > 10:\n break\n elif data[(x, y+1)] == 'damaged' or data[(x, y+1)] or data[(x, y-1)] == 'damaged' or data[(x, y-1)]:\n start = y\n while(data[(x, start-1)] == 'damaged' or data[(x, start-1)]):\n coords = coords | {(x, start - 1)}\n size += 1\n start -= 1\n if start < 1:\n break\n start = y\n while(data[(x, start+1)] == 'damaged' or data[(x, start+1)]):\n coords = coords | {(x, start + 1)}\n size += 1\n start += 1\n if start > 10:\n break\n return (size, coords)",
"async def play_shotgun(game_state) -> None:\n big_inside, lesser_counter = count_zombies(game_state)\n if big_inside and lesser_counter == 0:\n play_weapon(game_state, Supply.SHOTGUN, strong=True)\n elif lesser_counter <= 1 and not big_inside:\n play_weapon(game_state, Supply.SHOTGUN)\n elif lesser_counter > 1 and not big_inside:\n play_weapon(game_state, Supply.SHOTGUN, destroyed=False)\n play_weapon(game_state, Supply.SHOTGUN)\n else:\n message = 'What survivors should do [0/1]?\\n[0]: kill big zombie\\n' \\\n f'[1]: kill up to two lesser zombies ({lesser_counter} inside)\\n>'\n action = await get_action(game_state, message, ['0', '1'])\n if action == '0':\n play_weapon(game_state, Supply.SHOTGUN, strong=True)\n elif lesser_counter == 1:\n play_weapon(game_state, Supply.SHOTGUN)\n else:\n play_weapon(game_state, Supply.SHOTGUN, destroyed=False)\n play_weapon(game_state, Supply.SHOTGUN)",
"def play_a_shot(self, req):\n game = models.BattleShip.getByUrlKey(req.url_key)\n return game.shoot(str(req.player), (str(req.y) + str(req.x - 1)))",
"def calculate_shot(self, player_ships: list):\n board = Board(self.__size)\n prob_board = Board(self.__size)\n\n for move in self.__moves:\n x, y = move[1], move[2]\n board.shoot(x, y)\n prob_board.board[x][y] = -1000\n\n if move[0] == ShotResult.HIT:\n if any(ship.sunk and [anything, x, y] in ship.pieces for ship in player_ships):\n # part of a sunken ship; no need to increase neighbours probability\n continue\n\n for (i, j) in [(1, 0), (0, 1), (-1, 0), (0, -1)]:\n try: # easier to ask for forgiveness that permission :d\n if (ShotResult.HIT, x - i, y - j) in self.__moves: # opposite neighbour\n prob_board.board[x + i][y + j] += self.offset\n prob_board.board[x + i][y + j] += self.offset\n except IndexError:\n pass\n\n final_x, final_y = 0, 0\n max_prob = -1\n for s in player_ships:\n if not s.sunk: # the only time we use unsunken ships; we use just their size\n for i in range(self.__size):\n for j in range(self.__size):\n for o in range(0, 2): # for every (x, y, orientation) possible\n try:\n board.check(Ship(s.type, o, i, j))\n for offset in range(s.size):\n x, y = i - offset * o, j + offset * (not o)\n prob_board.board[x][y] += 1 # increase the probability of each piece\n except IllegalMove:\n pass\n\n for i in range(self.__size):\n for j in range(self.__size):\n if prob_board.board[i][j] > max_prob:\n final_x, final_y = i, j\n max_prob = prob_board.board[i][j]\n elif prob_board.board[i][j] == max_prob:\n if randint(0, 10) < 5: # random aspect to the ai, harder to predict\n final_x, final_y = i, j\n return final_x, final_y",
"def run_shoe_rack_manipulation(self, debug=False, push_in_distance=0.00):\n\n print(\"\\n\\n--- Running Shoe Manipulation-------\\n\\n\")\n\n # self.taskRunner.callOnMain(self._poser_visualizer.visualize_result)\n\n if not self.check_category_goal_estimation_succeeded():\n return False\n\n # check that we really are doing mug\n category_manipulation_type = self.state.cache['category_manipulation_goal']['type']\n assert category_manipulation_type == CategoryManipulationType.SHOE_ON_RACK\n\n\n speed = self.graspingParams['speed']['fast']\n self.moveHome(speed=speed)\n\n result = self.state.cache['category_manipulation_goal']['result']\n T_W_fingertip = ros_numpy.numpify(result.T_world_gripper_fingertip)\n T_W_fingertip_vtk = transformUtils.getTransformFromNumpy(T_W_fingertip)\n\n grasp_data = GraspData.from_gripper_fingertip_frame(T_W_fingertip)\n grasp_data.gripper.params[\"hand_inner_diameter\"] = result.gripper_width\n grasp_data.gripper.params[\"hand_inner_diameter\"] = 0.07\n self.state.grasp_data = grasp_data\n\n # rotate the grasp to align with nominal\n params = self.getParamsForCurrentLocation()\n grasp_z_axis_nominal = np.array(params['grasp']['grasp_nominal_direction'])\n grasp_data.rotate_grasp_frame_to_nominal(grasp_z_axis_nominal)\n\n\n\n\n def vis_function():\n vis.updateFrame(T_W_fingertip_vtk, \"gripper fingertip frame\", scale=0.15, parent=self._vis_container)\n\n vis.updateFrame(grasp_data.grasp_frame, \"grasp frame\", scale=0.15, parent=self._vis_container)\n\n self.visualize_grasp(grasp_data)\n\n self.taskRunner.callOnMain(vis_function)\n\n # execute the grasp\n force_threshold_magnitude = 30\n object_in_gripper = self.execute_grasp(grasp_data, close_gripper=True, use_cartesian_plan=True, force_threshold_magnitude=force_threshold_magnitude, push_in_distance=0.04, ee_speed_m_s=0.1)\n\n\n if not object_in_gripper:\n print(\"grasp failed, returning\")\n return False\n\n print \"object_in_gripper:\", object_in_gripper\n\n T_goal_obs = self.state.cache['category_manipulation_goal'][\"T_goal_obs\"]\n T_W_G = self.state.cache['gripper_frame_at_grasp']\n\n\n\n pre_grasp_pose = self.state.cache['pre_grasp_ik_response'].joint_state.position\n pickup_speed = self.graspingParams['speed']['pickup']\n\n if not object_in_gripper:\n # open the gripper and back away\n self.gripperDriver.send_open_gripper_set_distance_from_current()\n self.robotService.moveToJointPosition(pre_grasp_pose,\n maxJointDegreesPerSecond=\n pickup_speed)\n return False\n\n # pickup the object\n self.robotService.moveToJointPosition(pre_grasp_pose,\n maxJointDegreesPerSecond=\n pickup_speed)\n\n # move home\n self.moveHome()\n\n # move to approach pose\n speed = self.graspingParams['speed']['fast']\n q_approach = np.array(self._stored_poses_director[\"left_table\"][\"shoe_approach\"])\n self.robotService.moveToJointPosition(q_approach, maxJointDegreesPerSecond=speed)\n\n\n # compute some poses\n T_goal_obs = ros_numpy.numpify(result.T_goal_obs) # 4 x 4 numpy matrix\n T_goal_obs_vtk = transformUtils.getTransformFromNumpy(T_goal_obs)\n object_manip = ObjectManipulation(T_goal_object=T_goal_obs_vtk, T_W_G=T_W_G)\n object_manip.compute_transforms()\n T_W_Gn_vtk = object_manip.T_W_Gn # gripper to world for place pose\n\n T_pre_goal_obs = ros_numpy.numpify(result.T_pre_goal_obs)\n T_pre_goal_obs_vtk = transformUtils.getTransformFromNumpy(T_pre_goal_obs)\n object_manip_approach = ObjectManipulation(T_goal_object=T_pre_goal_obs_vtk, T_W_G=T_W_G)\n object_manip_approach.compute_transforms()\n T_W_Gn_approach_vtk = object_manip_approach.T_W_Gn\n\n # move this down by push_in_distance\n\n pos, quat = transformUtils.poseFromTransform(T_W_Gn_approach_vtk)\n T_W_Gn_approach_vtk = transformUtils.transformFromPose(pos, quat)\n\n\n # now convert these to ee poses for running IK\n pos, quat = transformUtils.poseFromTransform(T_W_Gn_vtk)\n pos[2] -= push_in_distance\n T_W_Gn_vtk = transformUtils.transformFromPose(pos, quat)\n T_W_ee_vtk = self.getIiwaLinkEEFrameFromGraspFrame(T_W_Gn_vtk)\n T_W_ee = transformUtils.getNumpyFromTransform(T_W_ee_vtk)\n\n T_W_ee_approach_vtk = self.getIiwaLinkEEFrameFromGraspFrame(T_W_Gn_approach_vtk)\n T_W_ee_approach = transformUtils.getNumpyFromTransform(T_W_ee_approach_vtk)\n\n\n # place the object\n force_threshold_magnitude = 50 # shoes are heavy\n q_nom = np.array(self._stored_poses_director[\"Grasping\"][\"above_table_pre_grasp\"])\n q_nom = np.array(self._stored_poses_director[\"left_table\"][\"above_table_pre_grasp\"])\n code =self.execute_place_new(T_W_ee, T_W_ee_approach, q_nom=q_nom, use_cartesian_plan=True, force_threshold_magnitude=force_threshold_magnitude)\n\n print(\"\\n\\n--- Finished Shoe Manipulation-------\\n\\n\")\n\n self._shoe_manipulation_counter += 1\n\n return code",
"def hit(self):\n\n self.units.pop()\n return (len(self.units) == 0) # Returns True if the ship has been sunk",
"def _ship_hit (self):\n\n\t\tself.stats.ship_left -=1\n\n\t\t\"\"\"get rid of remaining bullets and ships\"\"\"\n\t\tself.aliens.empty()\n\t\tself.bullets.empty()\n\n\t\t#Create a new fleet\n\n\t\tself._create_fleet()\n\t\tself.ship.center_ship()\n\n\t\t#pause\n\t\tsleep (0.5)",
"def shot(self):\n\n SHOTS_ROWS = (\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\")\n SHOTS_COLS = (\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\")\n\n shot_msg = sys.stdin.readline()[:-1]\n if len(shot_msg) and shot_msg[0] in SHOTS_ROWS and shot_msg[1:] in SHOTS_COLS:\n self.last_shot = shot_msg\n self.client.send((\"S\" + shot_msg).encode(\"utf-8\")) # Send shot\n sys.stdout.flush()\n clear()",
"def hit(self):\n assert not self.damaged\n self.damaged = True\n self.game_piece.hit()",
"def actions(self, ship: SpaceShip, input_data: Dict[str, Tuple]) -> None:\n # ship.turn_rate = 180.0\n ship.thrust = ship.thrust_range[1]\n ship.shoot()",
"def updateShotInfo(*args):\n shot = cmds.textScrollList(widgets[\"shotListTSL\"], q=True, si=True)[0]\n\n #clear all text fields\n clearFields()\n\n pi.currentShotFolder = cFuncs.fixPath(os.path.join(pi.currentProject, \"shots\", shot))\n pi.currentVariant = \"\" \n######---------reset the pi variables for the shot stuff\n\n lists = [\"anmVariationsTSL\", \"lgtVariationsTSL\", \"fxVariationsTSL\"]\n types = [\"anm\", \"lgt\", \"fx\"]\n\n #loop through types of files in shot - anm, lgt, fx\n for x in range(3):\n shotTypeFolder = \"{0}/{1}\".format(pi.currentShotFolder, types[x])\n #clear the list\n cmds.textScrollList(widgets[lists[x]], e=True, ra=True)\n cmds.image(widgets[\"shotInfoPic\"], e=True, image = \"{0}/defaultAssetImage.jpg\".format(pi.images))\n vars = cFuncs.getShotVariantList(shotTypeFolder)\n if vars:\n for var in vars:\n cmds.textScrollList(widgets[lists[x]], e=True, a=var, sc=partial(updateVariantInfo, var, shotTypeFolder))",
"def Shuriken(self):\n\t\tprint(self.name.title() + \" is now shotting.\")",
"def ship_hit(ai_settings, stats, screen, ship, boss, bullets,boss_bullets):\n if stats.ships_left > 1:\t\n # Decrement ships_left\n stats.ships_left -= 1\n # Empty the list of bullets\n bullets.empty()\n boss_bullets.empty()\n #center the ship.\n ship.center_ship()\n # Pause.\n #sleep(0.5)\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)",
"def update_shanten(self):\n\n player_hand = self.concealed_part\n self.shanten_std = count_shanten_std(player_hand)\n if self.is_concealed:\n self.shanten_7 = count_shanten_seven_pairs(player_hand)\n self.shanten_13 = count_shanten_13_orphans(player_hand)\n else:\n self.shanten_7 = None\n self.shanten_13 = None",
"def update_screen(ai_settings, screen, ship):",
"def _ship_hit(self):\n\n if self.stats.ships_left > 0:\n #Decrement ships\n self.stats.ships_left -= 1\n self.sb.prep_ships()\n\n #Get rid of remaining aliens and bullets\n self.aliens.empty()\n self.bullets.empty()\n\n # Recenter the ship\n self.ship.center_ship\n\n # pause\n sleep(0.5)\n else:\n self.stats.game_active = False\n pygame.mouse.set_visible(True)",
"def ship_bullet_hits(ai, var, screen, ship, enemies, shots, blasters, shockers, items, hub):\r\n\tfor shot in shots.copy():\r\n\t\tif pygame.sprite.spritecollideany(shot, enemies):\r\n\t\t\tfor enemy in enemies.copy():\r\n\t\t\t\tif pygame.sprite.spritecollideany(enemy, shots):\r\n\t\t\t\t\tenemy.hits += 1\r\n\t\t\t\t\tif enemy.hits >= enemy.hp:\r\n\t\t\t\t\t\tif not enemy.dead:\r\n\t\t\t\t\t\t\titem_chance(ai, var, screen, enemy, items, hub)\r\n\t\t\t\t\t\t\tenemy.dead = 1\r\n\t\t\t\t\t\t\tenemy.dead_time = pygame.time.get_ticks()\r\n\t\t\t\t\t\t\tenemy_death(ai, var, screen, enemy, shockers)\r\n\t\t\tshot.remove(shots)\r\n\t\t\t\r\n\tfor blast in blasters.copy():\r\n\t\tif pygame.sprite.spritecollideany(blast, enemies):\r\n\t\t\tfor enemy in enemies.copy():\r\n\t\t\t\tif pygame.sprite.spritecollideany(enemy, blasters):\r\n\t\t\t\t\tenemy.hits += 10\r\n\t\t\t\t\tif enemy.hits >= enemy.hp:\r\n\t\t\t\t\t\tif not enemy.boomed:\r\n\t\t\t\t\t\t\tenemy.boomed = 1\r\n\t\t\t\t\t\t\tenemy.t_boomed = pygame.time.get_ticks()\r\n\t\t\t\t\t\t\titem_chance(ai, var, screen, enemy, items, hub)\r\n\t\t\t\t\t\t\tenemy.dead = 1\r\n\t\t\t\t\t\t\tenemy.dead_time = pygame.time.get_ticks()\r\n\t\t\t\t\t\t\tenemy_death(ai, var, screen, enemy, shockers)",
"def HellFire_ShotGuns(self):\n\t\tprint(self.name.title() + \" is now shotting.\")",
"def _ship_hit(self):\n if self.stats.ships_left > 0:\n # Decrement ships_left\n self.stats.ships_left -= 1\n self.sb.prep_ships()\n\n # Remove remianing aliens & bullets\n self.aliens.empty()\n self.bullets.empty()\n\n # Create new fleet and ship at start location\n self._create_fleet()\n self.ship.center_ship()\n\n # pause\n sleep(0.5)\n else:\n self.stats.game_active = False \n pygame.mouse.set_visible(True)",
"def update(self):\n head = (\n (self.snake[0][0] + self.xv * self.ac),\n (self.snake[0][1] + self.yv * self.ac),\n )\n self.snake.insert(0, head)\n\n if head == self.apple:\n self.score += 10\n self.apple = None\n while not self.apple:\n xy = random.choice(MAP)\n self.apple = xy if xy not in self.snake else None\n else:\n self.snake.pop()\n\n if (\n head[0] < 0\n or head[0] >= WIN_W\n or head[1] < BLOCK_H\n or head[1] >= WIN_H\n ):\n self.alive = False\n\n if head in self.snake[1:]:\n self.alive = False\n\n if self.fps % 100 == 0:\n self.fps += 5",
"def shots_per_game(self, shots_per_game):\n\n self._shots_per_game = shots_per_game"
] |
[
"0.61077094",
"0.6027895",
"0.59807974",
"0.5970617",
"0.5844651",
"0.58269143",
"0.57184863",
"0.56714153",
"0.5588444",
"0.5556509",
"0.5482947",
"0.54752976",
"0.5434543",
"0.53959507",
"0.5395703",
"0.53421646",
"0.53262115",
"0.5318554",
"0.531314",
"0.5285579",
"0.5282902",
"0.52814746",
"0.5269872",
"0.52693075",
"0.5254263",
"0.5155386",
"0.51518774",
"0.514918",
"0.5135525",
"0.5126396"
] |
0.682624
|
0
|
Maps a shot to x and y coordinates.
|
def mapToCoordinates(self, shot):
toks = shot.split("-")
return Coordinates(ord(toks[0]) - ord("A"), int(toks[1]) - 1)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def mapToShot(self, coordinates):\r\n return chr(coordinates.x + ord(\"A\")) + \"-\" + str(coordinates.y + 1)",
"def map_loc_to_pixel((x, y), xc = 17.25, yc = 630, run = 17.25):\n xp, yp = xc + x*run, yc - y*run\n return (xp, yp)",
"def coord(self, x, y):\n origin_x = self._raster_meta['transform'][3]\n origin_y = self._raster_meta['transform'][0]\n pixel_x = self._raster_meta['transform'][5]\n pixel_y = self._raster_meta['transform'][1]\n\n x = int((x - origin_x) / pixel_x)\n y = int((y - origin_y) / pixel_y)\n return self[x, y]",
"def to_position(self, x, y, i, j):\n return (x * self.SIZE + i, y * self.SIZE + j)",
"def get_tile_location(self):\n if self.rect.x == 0:\n tile_x = 0\n elif self.rect.x % 32 == 0:\n tile_x = (self.rect.x / 32)\n else:\n tile_x = 0\n\n if self.rect.y == 0:\n tile_y = 0\n elif self.rect.y % 32 == 0:\n tile_y = (self.rect.y / 32)\n else:\n tile_y = 0\n\n return [tile_x, tile_y]",
"def get_shot_location():\n global LAST_SHOT\n available = [(x, y) for x in range(10) for y in range(10) if MY_SHOTS[x][y] is None]\n coords = random.choice(available)\n LAST_SHOT = coords\n return json.dumps(coords)",
"def get_location(self):\r\n return self.__x, self.__y",
"def tile_at(self, zoom, position):\n x, y = self.project_pixels(position, zoom)\n return (zoom, int(x/self.tileSize), int(y/self.tileSize))",
"def coordinates(self):",
"def to_pygame(point):\n return int(point.x), int(-point.y+500)",
"def pixel2coords(self, x, y):\n xoff, a, b, yoff, d, e = self.geotransform()\n\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n return (xp, yp)",
"def set_position(self, x_position, y_position):\n \n # Checks if the tile position is within the x boundaries of the maze\n if x_position >= 0 and x_position <= 18: \n \n # The new self.rect.x is determined by multiplying the tile value by 50,\n # representing the pixel position of the sprite, and adding 14 as a slight\n # offset to center the image more appropriately on the tile\n self.rect.x = (x_position*50) + 14\n \n # Checks if the tile position is within the y boundaries of the maze \n if y_position >= 0 and y_position <= 18:\n \n # The new self.rect.y is determined by multiplying the tile value by 50,\n # representing the pixel position of the sprite, and adding 3 as a slight\n # offset to center the image more appropriately on the tile \n self.rect.y = (y_position*50) + 3\n\n # Returns the updated self.rect.x and self.rect.y to the caller\n return self.rect.x, self.rect.y",
"def set_position(self, x_position, y_position): \n \n # Checks if the tile position is within the x boundaries of the maze\n if x_position >= 0 and x_position <= 18: \n \n # The new self.rect.x is determined by multiplying the tile value by 50,\n # representing the pixel position of the sprite, and adding 3 as a slight\n # offset to center the image more appropriately on the tile \n self.rect.x = (x_position*50) + 3\n \n # Checks if the tile position is within the y boundaries of the maze \n if y_position >= 0 and y_position <= 18:\n \n # The new self.rect.y is determined by multiplying the tile value by 50,\n # representing the pixel position of the sprite, and adding 3 as a slight\n # offset to center the image more appropriately on the tile \n self.rect.y = (y_position*50)\n \n # Returns the updated self.rect.x and self.rect.y to the caller\n return self.rect.x, self.rect.y",
"def pixel_to_position(self, pixel):\n x, y = pixel\n return y // 60, x // 60",
"def tile(self, x: int, y: int):\n return self.awmap.tile(x, y)",
"def ship_shoot(ship, x, y):\n click.echo('Ship %s fires to %s,%s' % (ship, x, y))",
"def get(self):\n return (self.x,self.y);",
"def xy(event):\n return map(int, event.get_coords())",
"def position(self, x, y):\n self.x = x \n self.y = y\n self.pos[0] = x \n self.pos[1] = y",
"def __init__(self, data, (x,y)):\n\t\tGameImage.__init__(self, data)\n\t\tself.coords = (x,y)",
"def locate(x, y):\n position(x * 6, y)",
"def compute_coordinates(self):\n self._x, self._y = self.board.index_to_coordinates(self.index)",
"def coordinates(self) -> Tuple[int, int]:\n return self.x, self.y",
"def coords2D(self):\n return (self.x, self.y)",
"def getCoord(self, i):\n _x = self.__xpts[i]\n _y = self.__ypts[i]\n return _x, _y",
"def position(self):\n return self.x, self.y",
"def coord (i, j):\r\n return j, i",
"def shoot(self, point: Point) -> Tuple[bool, bool, ShipType]:\n\n # Shot off board\n if not self.point_in_board(point):\n raise InvalidShotException(f'{point} is not on the board')\n\n # Point has already been shot\n elif self.point_is_shot(point):\n raise InvalidShotException(f'{point} has already been shot')\n\n else:\n self.shot_locations.add(point)\n is_hit = True if point in self.all_ship_locations else False\n is_sunk = False\n ship_sunk = None\n\n if is_hit:\n # find out which one of the ships was shot\n for k, v in self.individual_ship_locations.items():\n # if v was the ship that was shot\n if point in v:\n # remove the point from v\n v.remove(point)\n if len(v) == 0:\n is_sunk = True\n ship_sunk = k\n\n return is_hit, is_sunk, ship_sunk",
"def spot_coords(self,spot):\n if spot == '1':\n return (330 - 60 ,335 - 15)\n if spot == '2':\n return (419 - 60, 335 - 15)\n if spot == '3':\n return (591 - 60, 159 - 15)\n if spot == '4':\n return (588 - 60, 248 - 15)",
"def get(self):\n return self.x, self.y"
] |
[
"0.7004169",
"0.6585423",
"0.6171817",
"0.6050263",
"0.6022578",
"0.59397626",
"0.59272003",
"0.59251267",
"0.5873501",
"0.58347553",
"0.5832571",
"0.58019704",
"0.57846975",
"0.5765359",
"0.5759559",
"0.57559067",
"0.5753961",
"0.57537895",
"0.5746078",
"0.57412815",
"0.57404935",
"0.5739591",
"0.57274425",
"0.5722359",
"0.5698932",
"0.5695952",
"0.5682817",
"0.568251",
"0.56574553",
"0.5656612"
] |
0.76549584
|
0
|
Maps x and y coordinates to a shot.
|
def mapToShot(self, coordinates):
return chr(coordinates.x + ord("A")) + "-" + str(coordinates.y + 1)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def mapToCoordinates(self, shot):\r\n toks = shot.split(\"-\")\r\n return Coordinates(ord(toks[0]) - ord(\"A\"), int(toks[1]) - 1)",
"def ship_shoot(ship, x, y):\n click.echo('Ship %s fires to %s,%s' % (ship, x, y))",
"def map_loc_to_pixel((x, y), xc = 17.25, yc = 630, run = 17.25):\n xp, yp = xc + x*run, yc - y*run\n return (xp, yp)",
"def shoot(self):\n shots = Shooting(self.rect.centerx, self.rect.bottom)\n # Adding the shots to sprite lists created\n all_sprites_list.add(shots)\n shooting_list.add(shots)",
"def to_world(self, x, y, **kwargs):",
"def teleport(self, x, y):\n self.rect.x = x\n self.rect.y = y",
"async def fire(self,\n start_x: int,\n start_y: int,\n x_speed: Optional[Union[float, int]] = 0,\n y_speed: Optional[Union[float, int]] = -1) -> NoReturn:\n\n x, y = start_x, start_y\n self._canvas.addstr(round(y), round(x), '*')\n await sleep(0)\n\n self._canvas.addstr(round(y), round(x), 'O')\n await sleep(0)\n self._canvas.addstr(round(y), round(x), ' ')\n\n x += x_speed\n y += y_speed\n\n symbol = '-' if x_speed else '|'\n\n max_y, max_x = get_canvas_size(self._canvas)\n curses.beep()\n fire_shot_object = MapObject(Frame(symbol), x, y)\n while 1 < y < max_y and 1 < x < max_x:\n self._canvas.addstr(round(y), round(x), symbol)\n await sleep(0)\n self._canvas.addstr(round(y), round(x), ' ')\n fire_shot_object.change_coordinates(x + x_speed, y + y_speed)\n for obj_id, obj in self._dynamic_objects.items():\n if obj_id.startswith('rubbish') and obj & fire_shot_object:\n draw_frame(self._canvas, obj.x, obj.y, obj.frame,\n negative=True)\n self._dynamic_objects.pop(obj_id)\n await self.explode(obj.x, obj.y)\n return\n\n y += y_speed\n x += x_speed",
"def shoot(self, point: Point) -> Tuple[bool, bool, ShipType]:\n\n # Shot off board\n if not self.point_in_board(point):\n raise InvalidShotException(f'{point} is not on the board')\n\n # Point has already been shot\n elif self.point_is_shot(point):\n raise InvalidShotException(f'{point} has already been shot')\n\n else:\n self.shot_locations.add(point)\n is_hit = True if point in self.all_ship_locations else False\n is_sunk = False\n ship_sunk = None\n\n if is_hit:\n # find out which one of the ships was shot\n for k, v in self.individual_ship_locations.items():\n # if v was the ship that was shot\n if point in v:\n # remove the point from v\n v.remove(point)\n if len(v) == 0:\n is_sunk = True\n ship_sunk = k\n\n return is_hit, is_sunk, ship_sunk",
"def tile(self, x: int, y: int):\n return self.awmap.tile(x, y)",
"def __init__(self, data, (x,y)):\n\t\tGameImage.__init__(self, data)\n\t\tself.coords = (x,y)",
"def get_shot_location():\n global LAST_SHOT\n available = [(x, y) for x in range(10) for y in range(10) if MY_SHOTS[x][y] is None]\n coords = random.choice(available)\n LAST_SHOT = coords\n return json.dumps(coords)",
"def shoot(self, pos_to_shoot):\n return [SHOOT, pos_to_shoot]",
"def ai_shoot(self, gk, goal_x):\n\n angles = {\n 1: { # For team 1\n 'SHOOT_E': math.pi/4,\n 'SHOOT_D': 0,\n 'SHOOT_C': -math.pi/4,\n },\n 2: { # For team 2\n 'SHOOT_Q': math.pi*3/4,\n 'SHOOT_A': math.pi,\n 'SHOOT_Z': -math.pi*5/4,\n },\n }\n\n self_pos = P(self.pos.x, H-self.pos.y)\n gk_pos = P(gk.pos.x, H-gk.pos.y)\n\n possible_shots = []\n for k, v in angles[self.team_id].items():\n line = [ # Equation of line as A*x +B*y + C = 0\n math.sin(v), # x coeff\n -math.cos(v), # y coeff\n self_pos.y*math.cos(v) - self_pos.x*math.sin(v), # constant\n ]\n intersection_pt = -(line[2] + line[0]*goal_x)/line[1]\n if GOAL_POS[0]*H < intersection_pt < GOAL_POS[1]*H:\n possible_shots.append((-self.dist_to_line(line, gk_pos), k))\n\n if possible_shots:\n shot = sorted(possible_shots)[0][1]\n else:\n shot = 'NOTHING'\n\n return shot",
"def set_position(self, x_position, y_position):\n \n # Checks if the tile position is within the x boundaries of the maze\n if x_position >= 0 and x_position <= 18: \n \n # The new self.rect.x is determined by multiplying the tile value by 50,\n # representing the pixel position of the sprite, and adding 14 as a slight\n # offset to center the image more appropriately on the tile\n self.rect.x = (x_position*50) + 14\n \n # Checks if the tile position is within the y boundaries of the maze \n if y_position >= 0 and y_position <= 18:\n \n # The new self.rect.y is determined by multiplying the tile value by 50,\n # representing the pixel position of the sprite, and adding 3 as a slight\n # offset to center the image more appropriately on the tile \n self.rect.y = (y_position*50) + 3\n\n # Returns the updated self.rect.x and self.rect.y to the caller\n return self.rect.x, self.rect.y",
"def position(self, x, y):\n self.x = x \n self.y = y\n self.pos[0] = x \n self.pos[1] = y",
"def draw_point(x, y):\n map_image = Image.open('map.png')\n map_image.putpixel((x, y), (0, 255, 0))\n map_image.save('map.png')\n map_image.show('map.png')",
"def to_pygame(point):\n return int(point.x), int(-point.y+500)",
"def set_position(self, x_position, y_position): \n \n # Checks if the tile position is within the x boundaries of the maze\n if x_position >= 0 and x_position <= 18: \n \n # The new self.rect.x is determined by multiplying the tile value by 50,\n # representing the pixel position of the sprite, and adding 3 as a slight\n # offset to center the image more appropriately on the tile \n self.rect.x = (x_position*50) + 3\n \n # Checks if the tile position is within the y boundaries of the maze \n if y_position >= 0 and y_position <= 18:\n \n # The new self.rect.y is determined by multiplying the tile value by 50,\n # representing the pixel position of the sprite, and adding 3 as a slight\n # offset to center the image more appropriately on the tile \n self.rect.y = (y_position*50)\n \n # Returns the updated self.rect.x and self.rect.y to the caller\n return self.rect.x, self.rect.y",
"def __init__(self, x, y, width, height):\n self.x1 = x\n self.y1 = y\n self.x2 = x + width\n self.y2 = y + height",
"def monster(x, y, i):\n screen.blit(monsterImg[i], (x, y))",
"def shoot_ray(self, entry_x, entry_y):\r\n\r\n # check to make sure entry_x and entry_y are valid\r\n if (entry_x in [0, 9] or entry_y in [0, 9]) and \\\r\n self._board.get_board_item(entry_x, entry_y) != \"o\":\r\n\r\n exit_tup = self._board.find_exit(entry_x, entry_y)\r\n # returned 0 if hit\r\n if exit_tup == 0:\r\n # decrement entry only if not visited\r\n marker = self.get_hit_marker()\r\n circle_tuple = self.calculate_entry_exit(entry_y, entry_x)\r\n marker.update_center(circle_tuple)\r\n points = self._player.add_entry_exit((entry_x, entry_y), marker,\r\n (entry_x, entry_y))\r\n self._stats.dec_player_score(points)\r\n return \"Hit\"\r\n elif exit_tup == 1:\r\n # decrement entry only if not visited\r\n marker = self.get_reflect_marker()\r\n circle_tuple = self.calculate_entry_exit(entry_y, entry_x)\r\n marker.update_center(circle_tuple)\r\n points = self._player.add_entry_exit((entry_x, entry_y), marker,\r\n (entry_x, entry_y))\r\n\r\n self._stats.dec_player_score(points)\r\n\r\n return \"reflect\"\r\n else:\r\n # decrement both entry and exit if not already visited\r\n marker = self.get_color_marker()\r\n exit_x, exit_y = exit_tup\r\n circle_entry = self.calculate_entry_exit(entry_y, entry_x)\r\n circle_exit = self.calculate_entry_exit(exit_y, exit_x)\r\n marker.update_center(circle_entry, circle_exit)\r\n points = self._player.add_entry_exit((entry_x, entry_y),\r\n marker, exit_tup)\r\n\r\n self._stats.dec_player_score(points)\r\n return exit_tup\r\n else:\r\n # returns false if the shoot_ray point is invalid\r\n return \"Bad shot\"",
"def shot(self, coord: Coordinate):\n if coord not in self._coords:\n raise InvalidCoordinateException(f\"given coordinate is invalid\")\n self._damaged_cells.add(coord)",
"def set_2d_location(self, x, y):\r\n self.unif[42:44] = [x, y]",
"def _place_objs(self, (screen_width, screen_height)):\n for x_pos in xrange(0, screen_width, self.itter_width):\n self.objects.put(Grass((x_pos, 0), self.width, self.height))",
"def shoot(uid, secret, enemy_uid, x, y):\n try:\n SERVER.validate_player(uid, secret)\n result = SERVER.shoot(uid, enemy_uid, x, y)\n except RoboBattleshipException as e:\n # if battle is over - archive it\n if e.code == 304:\n SERVER.archive_battle(uid, enemy_uid)\n return JsonResponse.error(e)\n except:\n LOG.exception(\"Failed to shoot at player '%s' at [%s,%s]\",\n enemy_uid, x, y)\n return JsonResponse.error(101)\n\n return JsonResponse.success({'result': result})",
"def tile_at(self, zoom, position):\n x, y = self.project_pixels(position, zoom)\n return (zoom, int(x/self.tileSize), int(y/self.tileSize))",
"def to_position(self, x, y, i, j):\n return (x * self.SIZE + i, y * self.SIZE + j)",
"def process_shot(self, coordinate):\n row_num = ord(coordinate[:1]) - ord('A')\n column_num = int(coordinate[1:]) - 1\n target_cell = self.grid[row_num][column_num]\n result = target_cell.process_shot()\n if result == constants.KILL:\n self.destroyed_ships += 1\n return result",
"def play_a_shot(self, req):\n game = models.BattleShip.getByUrlKey(req.url_key)\n return game.shoot(str(req.player), (str(req.y) + str(req.x - 1)))",
"def set_item(self, y_pos, x_pos):\n self.map[y_pos][x_pos] = 'X'"
] |
[
"0.68749666",
"0.6218412",
"0.6204216",
"0.6002892",
"0.59659797",
"0.59609425",
"0.593582",
"0.59223795",
"0.5911508",
"0.5891407",
"0.58793396",
"0.57502717",
"0.5737297",
"0.5729335",
"0.57282853",
"0.57242876",
"0.5707542",
"0.57012147",
"0.5698053",
"0.56627136",
"0.56547576",
"0.56465644",
"0.56026775",
"0.55975944",
"0.5597157",
"0.5593311",
"0.55800897",
"0.55774254",
"0.55697286",
"0.55631113"
] |
0.7167854
|
0
|
For debugging purposes, prints the ships that are sinking.
|
def printShipsToSink(self):
sb = []
for sinkingShip in self.shipsToSink:
shot = self.mapToShot(sinkingShip.bullseye)
sb.append(str(shot))
sb.append(":")
sb.append(str(sinkingShip.size))
sb.append(" ")
logging.debug("".join(sb))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sinkShips(self):\r\n while True:\r\n stillSinkingShips = False\r\n for i in range(len(self.shipsToSink) - 1, -1, -1):\r\n sunkShip, shipCoordinates = self.positionAndSinkShip(self.shipsToSink[i])\r\n if sunkShip:\r\n stillSinkingShips = True\r\n for coordinates in shipCoordinates:\r\n self.enemyBoard[coordinates.x][coordinates.y] = BoardState.SUNK\r\n del(self.shipsToSink[i])\r\n if not stillSinkingShips:\r\n break",
"def printSlipSystems(self):\n # TODO: this should be moved to static method of the SlipSystem class\n for i, (ssGroup, colour) in enumerate(zip(self.slipSystems,\n self.slipTraceColours)):\n print('Plane {0}: {1}\\tColour: {2}'.format(\n i, ssGroup[0].slipPlaneLabel, colour\n ))\n for j, ss in enumerate(ssGroup):\n print(' Direction {0}: {1}'.format(j, ss.slipDirLabel))",
"def print_out():\n pass",
"def show_board(self):\n\n for s in self.board[1:-1]:\n print(''.join(x.symbol for x in s[1:-1]))",
"def debug(self):\n neighbors = len(self.__neighbors)\n string = self.__repr__() + f' neighbors: {self.living_neighbors()}/{neighbors}'\n for neighbor in self.__neighbors:\n string += '\\n ' + neighbor.__repr__()\n print(string)",
"def main():\n dump(inventory(), fp=stdout, indent=4)",
"def shotResult(self, shot, hit, sunk):\r\n ShotSelector.shotResult(self, shot, hit, sunk)\r\n coordinates = self.mapToCoordinates(shot)\r\n if sunk:\r\n self.shipsToSink.append(SinkingShip(coordinates, sunk))\r\n self.sinkShips()\r\n self.printShipsAfloat()\r\n self.printShipsToSink()",
"def display_piles():\n global piles\n global num_piles\n y = 0\n for x in piles:\n print('pile '+str(y)+' = '+str(x))\n y+=1;",
"def show_current(self):\n for packet in self.station.genLoopPackets():\n print(packet)\n break",
"def print_boards(self, board):\n \n attacks_matrix, ships_matrix = board.get_matrixes(self.name)\n print(\"Attacks:\")\n self._print_board(attacks_matrix)\n\n print(\"Ocean:\")\n self._print_board(ships_matrix)",
"def print_out(self):\n for node in self.vertices:\n for arc in self.out_arcs_lists[node]:\n s = self.arc_info[arc]['start']\n t = self.arc_info[arc]['destin']\n w = self.arc_info[arc]['weight']\n lb = self.arc_info[arc]['lower_bound']\n u = self.arc_info[arc]['upper_bound']\n print(\"{} {} {} {} flow={}, edgeId={}\".format(s, t, lb, u, w,\n arc))",
"def print_inventory(self):\n print(\"Backpack:\")\n # Loop for each item in the players inventory\n for item in self.inventory:\n print('* ' + str(item))\n # Assigns the best weapon\n best_weapon = self.most_powerful_weapon()\n # print statement telling the best weapon in inventory\n print(\"Your best weapon is your {}\".format(best_weapon))",
"def print(self):\n\n for domino in self.hand:\n print(domino)",
"def print_output():\n print(\"count: [primary: \"+str(primary_shards)+\", replica: \"+str(secondary_shards)+\"]\")\n print(\"size: [primary: \"+pretty_print_storage(total_size_primary)+\", replica: \"+pretty_print_storage(total_size_secondary)+\"]\")\n print(\"disk-max-node: \"+max_size_node_name)\n print(\"watermark-breached: \"+str(watermark_breached))",
"def print_hand(self):\n\n for card in self.hand:\n card.printCard()",
"def prints(self):\r\n\r\n for i in range(len(self.heap_array)):\r\n print(self.heap_array[i])",
"def positionAndSinkShip(self, sinkingShip):\r\n directions = [Direction.North, Direction.South, Direction.East, Direction.West]\r\n sunkShip = False\r\n shipCoordinates = None\r\n for direction in directions:\r\n tSunkShip, tShipCoordinates = self.sinkShip(sinkingShip.bullsEye, sinkingShip.size, direction)\r\n if tSunkShip:\r\n if sunkShip:\r\n return False, None\r\n else:\r\n sunkShip = tSunkShip\r\n shipCoordinates = tShipCoordinates\r\n return sunkShip, shipCoordinates",
"def print_moves(self):\n print self._current_moves\n self._current_moves = \"\"",
"def dump(self, prefix = \" - \"):\n\t\tlines = [\n\t\t\t\"\"\n\t\t\t\"Host \",\n\t\t\t\"Light engine: state %d, flags 0x%x\" %\n\t\t\t\t(self.le_state, self.le_flags),\n\t\t\t\"Playback: state %d, flags 0x%x\" %\n\t\t\t\t(self.playback_state, self.playback_flags),\n\t\t\t\"Buffer: %d points\" %\n\t\t\t\t(self.fullness, ),\n\t\t\t\"Playback: %d kpps, %d points played\" %\n\t\t\t\t(self.point_rate, self.point_count),\n\t\t\t\"Source: %d, flags 0x%x\" %\n\t\t\t\t(self.source, self.source_flags)\n\t\t]\n\t\t'''\n\t\tif debug == 2:\n\t\t\tfor l in lines:\n\t\t\t\tprint prefix + l\n\t\t'''",
"def test_sink(self):\n ship = Ship([self.hit])\n self.assertEqual([self.hit], ship.location)\n self.assertEqual(2, ship.check_hit(self.hit))\n self.assertEqual(0, len(ship.location))",
"def ship_shoot(ship, x, y):\n click.echo('Ship %s fires to %s,%s' % (ship, x, y))",
"def debug_print(self):\n print self.title\n print self.storyline\n print self.poster_image_url\n print self.trailer_youtube_url\n print \"------\"",
"def play(self):\r\n\r\n gh.report_legend()\r\n\r\n for ship in self.__ships:\r\n ship.set_orientation()\r\n\r\n ship_coords = [ship.coordinates() for ship in self.__ships]\r\n ship_coords = [i for lst in ship_coords for i in lst]\r\n\r\n print(gh.board_to_string(self.__board_size, [], {}, [], ship_coords))\r\n\r\n while self.__ships:\r\n self.__play_one_round()\r\n\r\n gh.report_gameover()",
"def sinkShip(self, bullsEye, size, direction):\r\n sunkShip, shipCoordinates = self.sinkShipSearch(Coordinates(bullsEye.x + direction.x, bullsEye.y + direction.y), size - 1, direction)\r\n if sunkShip:\r\n shipCoordinates.append(bullsEye)\r\n return sunkShip, shipCoordinates",
"def print_inventory(self):\r\n for item in self._inventory:\r\n print(item, '\\n')",
"def debug_to_console(self):\n vert = None\n horiz = None\n if self.grid.apple_is_up():\n vert = \"Up \"\n elif self.grid.apple_is_down():\n vert = \"Down\"\n else:\n vert = \"None\"\n if self.grid.apple_is_left():\n horiz = \"Left \"\n elif self.grid.apple_is_right():\n horiz = \"Right\"\n else:\n horiz = \"None \"\n print(\n \"Apple is: (\", vert, \",\", horiz,\n \")\\tProximity: \",\n str(round(self.grid.proximity_to_apple(), 2)), \"\\t[x, y]:\",\n self.grid.snake.head(),\n \" \\tUp: (\", str(round(self.grid.safe_cells_up(), 2)),\n \",\", str(round(self.grid.safe_cells_up_global(), 2)), \")\"\n \" \\tDown: (\", str(round(self.grid.safe_cells_down(), 2)),\n \",\", str(round(self.grid.safe_cells_down_global(), 2)), \")\"\n \" \\tLeft: (\", str(round(self.grid.safe_cells_left(), 2)),\n \",\", str(round(self.grid.safe_cells_left_global(), 2)), \")\"\n \" \\tRight: (\", str(round(self.grid.safe_cells_right(), 2)),\n \",\", str(round(self.grid.safe_cells_right_global(), 2)), \")\"\n )",
"def debug_print(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n\n print('\\nPosition')\n print(self.tetromino.position())\n print('\\nBlock coordinates')\n print(self.tetromino.block_coordinates())\n print('\\nBoard')\n print(self.board)\n print('\\nBoard heights')\n print(self.board.get_height())\n\n if self.pause:\n print('\\nPaused')",
"def debug_test_planned_games(planned_games: [Game]):\n print(\"debug_test_game_class() START\")\n for pg in planned_games:\n print(pg.name + \" - \" + pg.genre + \" - setups: \" + str(pg.setup_amount))\n print(\"debug_test_game_class() END\")",
"def show_hand(self):\n for card in self.hand:\n print(card)",
"def print(self):\n # it would be nice just to add one point instead of printing all again from scratch\n stones_player_0 = [(i, j) for i in range(self.size) for j in range(self.size) if self.board[i, j] == -1]\n stones_player_1 = [(i, j) for i in range(self.size) for j in range(self.size) if self.board[i, j] == 1]\n plt.plot([0, self.size-1, 0, self.size-1], [0, 0, self.size-1, self.size-1], marker='x', ls='')\n plt.plot(*zip(*stones_player_0), marker='o', color='r', ls='')\n plt.plot(*zip(*stones_player_1), marker='o', color='b', ls='')\n\n plt.draw()\n plt.show(block=False)"
] |
[
"0.6868434",
"0.60091615",
"0.5819216",
"0.57831025",
"0.57612616",
"0.5760116",
"0.5730516",
"0.5694207",
"0.5685576",
"0.5679152",
"0.56756943",
"0.5630776",
"0.5624161",
"0.56204444",
"0.5612648",
"0.56079024",
"0.5590641",
"0.5578884",
"0.5547372",
"0.55457723",
"0.5534771",
"0.553473",
"0.5533447",
"0.5522042",
"0.55083156",
"0.5506241",
"0.55020565",
"0.5490658",
"0.5483758",
"0.5470093"
] |
0.8498444
|
0
|
Weights the board by placing all remaining ships in all possible positions. The more ways a ship can be placed over a particular set of coordinates, the higher the weight. Positions that overlay previous hits are given extra weight.
|
def weightBoard(self):
directions = (Direction.East, Direction.South)
for size, count in self.shipsAfloat.items():
size = int(size)
for i in range(self.boardDimensions):
for j in range(self.boardDimensions):
for direction in directions:
coordinates = Coordinates(i, j)
self.positionAndWeightShip(coordinates, size, count, direction)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def postion_fleet(self, ships_positions, board):\n for cell in ships_positions:\n row = ord(cell[:1]) - ord('A')\n col = int(cell[1:]) - 1\n for i in range(row, row + self.total_rows_req):\n for j in range(col, col + self.total_column_req):\n self.position_ship(\n board.grid[i][j],\n constants.FLEET_Q_CLASS,\n constants.Q_CLASS_HIT_POWER\n )",
"def postion_fleet(self, ships_positions, board):\n for cell in ships_positions:\n row = ord(cell[:1]) - ord('A')\n col = int(cell[1:]) - 1\n for i in range(row, row + self.total_rows_req):\n for j in range(col, col + self.total_column_req):\n self.position_ship(\n board.grid[i][j],\n constants.FLEET_P_CLASS,\n constants.P_CLASS_HIT_POWER\n )",
"def placeShips(self):\n\n self.ships = [] # Canvas co-ordinates for the ships\n self.shipText = [] # Text to be displayed besides each ship\n self.failedAttempts = []\n self.names = {2: 'BOAT', 3: 'SUB', 4: 'CRUISER', 5: 'CARRIER'}\n\n items = self.shipList.items()\n for k, v in items:\n for i in range(v): # for every ship v of size k\n attempts = 20\n success = False\n while not success and attempts > 0:\n success = True\n n = random.randrange(0, len(self.hit))\n shipRotation = random.randrange(0, 2)\n attempts -= 1\n\n # Check if ship fits horizontally\n if shipRotation != 0:\n for j in range(n, n + k):\n if (j >= len(self.hit)) or (j % self.boardsize \\\n < n % self.boardsize) or (self.hit[j] != 0):\n success = False\n break\n # Check if ship fits vertically\n else:\n for j in range(n, n + k * self.boardsize, \\\n self.boardsize):\n if (j >= len(self.hit)) or (self.hit[j] != 0):\n success = False\n break\n\n # Keep track of ships that failed to be placed\n if attempts == 0:\n self.failedAttempts.append(k)\n continue\n\n # Ships of custom sizes above 5 are named \"BATTLESHIP\"\n name = 'BATTLESHIP'\n if k in self.names:\n name = self.names[k]\n\n x = n % self.boardsize * 20 + 20\n y = (n / self.boardsize) * 20 + 40\n\n # Place ship horizontally\n if shipRotation != 0:\n for i in range(n, n + k):\n self.hit[i] = self.shipID\n self.ships.append(self.canvas.create_rectangle(\\\n x, y + 5, x + k * 20, y + 15, fill='orange', width=1))\n self.shipText.append(self.canvas.create_text(\\\n x + 20, y, text=name, font='Courier 6', fill='yellow'))\n\n # Place ship vertically\n else:\n for i in range(n, n + k * self.boardsize, self.boardsize):\n self.hit[i] = self.shipID\n self.ships.append(self.canvas.create_rectangle(\\\n x + 5, y, x + 15, y + k * 20, fill='orange', width=1))\n cname = \"\"\n for ch in name:\n cname += ch + '\\n'\n self.shipText.append(self.canvas.create_text(\\\n x, y + 20, text=cname, font='Courier 6', fill='yellow'))\n\n # Tag every placed ship with \"tagXXX\" where XXX is shipID\n # Will be used to identify which ship was bombed\n self.canvas.addtag_withtag('tag%s' % \\\n self.shipID, self.ships[-1])\n self.canvas.addtag_withtag('ship', self.ships[-1])\n self.tracker[self.shipID] = k\n self.shipID += 1\n\n # Announce any failures in placing ships\n # Game will exit after user is notified of this failure\n if self.failedAttempts:\n mssg = \"Oops, we failed to fit the \" \\\n \"following ships on this board:\\n\\n\"\n failCount = Counter(self.failedAttempts)\n for m, n in failCount.items():\n mssg += '%s ships of size %s\\n' % (n, m)\n showDialogBox(mssg + \"\\nUnfortunately, we \" \\\n \"cannot proceed with the game!\")\n showDialogBox(\"Goodbye!\")\n self.exitstatus = 1\n return\n\n # 'tracker' will be modified throughout the game, so keep a copy\n self.counter_copy = self.tracker.copy()\n self.players.tracker = self.tracker\n\n for i in self.ships:\n self.canvas.addtag_withtag('ship', i)\n for i in self.shipText:\n self.canvas.addtag_withtag('text', i)\n for i in range(self.shipID - 100):\n self.ships.append(None)\n self.shipText.append(None)\n\n if self.isComputer == 1:\n self.canvas.tag_lower('ship')\n self.canvas.tag_lower('text')\n self.canvas.tag_bind('square', '<Button-1>', self.fire)\n else:\n self.clickDone = Button(self.myframe, text='Done',\\\n command=self.clickDone)\n self.clickDone.place(x=1, y=1)",
"def place_ships(self):\n # An array that holds the ships to then be looped through\n length_of_ships = [6, 4, 3, 2]\n\n for ship_length in length_of_ships:\n while True:\n if self.user == 'computer':\n orientation = random.choice(['H', 'V'])\n row = random.randint(0, 9)\n column = random.randint(0, 9)\n if self.check_ship_fits(\n ship_length, row, column, orientation\n ):\n if self.collision_check(\n self.board, row, column, orientation, ship_length\n ) is False:\n if orientation == 'H':\n for i in range(column, column + ship_length):\n self.board[row][i] = SHIP\n else:\n for i in range(row, row + ship_length):\n self.board[i][column] = SHIP\n break\n else:\n if self.user == 'player':\n self.ship_prompt(ship_length)\n orientation, column, row = self.ship_input()\n if self.check_ship_fits(\n ship_length, row, column, orientation\n ):\n if self.collision_check(\n self.board,\n row,\n column,\n orientation,\n ship_length\n ) is False:\n if orientation == 'H':\n for i in range(\n column, column + ship_length\n ):\n self.board[row][i] = SHIP\n else:\n for i in range(row, row + ship_length):\n self.board[i][column] = SHIP\n print(' ')\n self.print_board()\n break",
"def calculate_shot(self, player_ships: list):\n board = Board(self.__size)\n prob_board = Board(self.__size)\n\n for move in self.__moves:\n x, y = move[1], move[2]\n board.shoot(x, y)\n prob_board.board[x][y] = -1000\n\n if move[0] == ShotResult.HIT:\n if any(ship.sunk and [anything, x, y] in ship.pieces for ship in player_ships):\n # part of a sunken ship; no need to increase neighbours probability\n continue\n\n for (i, j) in [(1, 0), (0, 1), (-1, 0), (0, -1)]:\n try: # easier to ask for forgiveness that permission :d\n if (ShotResult.HIT, x - i, y - j) in self.__moves: # opposite neighbour\n prob_board.board[x + i][y + j] += self.offset\n prob_board.board[x + i][y + j] += self.offset\n except IndexError:\n pass\n\n final_x, final_y = 0, 0\n max_prob = -1\n for s in player_ships:\n if not s.sunk: # the only time we use unsunken ships; we use just their size\n for i in range(self.__size):\n for j in range(self.__size):\n for o in range(0, 2): # for every (x, y, orientation) possible\n try:\n board.check(Ship(s.type, o, i, j))\n for offset in range(s.size):\n x, y = i - offset * o, j + offset * (not o)\n prob_board.board[x][y] += 1 # increase the probability of each piece\n except IllegalMove:\n pass\n\n for i in range(self.__size):\n for j in range(self.__size):\n if prob_board.board[i][j] > max_prob:\n final_x, final_y = i, j\n max_prob = prob_board.board[i][j]\n elif prob_board.board[i][j] == max_prob:\n if randint(0, 10) < 5: # random aspect to the ai, harder to predict\n final_x, final_y = i, j\n return final_x, final_y",
"def weightShipSearch(self, coordinates, size, weight, direction, hitWeight):\r\n if size == 0:\r\n # Successfully searched the required size.\r\n return True, hitWeight \r\n if coordinates.x < 0 or coordinates.y < 0 or coordinates.x == self.boardDimensions or coordinates.y == self.boardDimensions:\r\n # Can't go off the board.\r\n return False, 0\r\n if self.enemyBoard[coordinates.x][coordinates.y] < BoardState.HIT:\r\n # This search is all for naught since we can't possibly have a ship at this position.\r\n return False, 0\r\n if self.enemyBoard[coordinates.x][coordinates.y] == BoardState.HIT:\r\n # Weigh searches with hits already in them over searches without them. This is to \r\n # direct the shot selection toward coordinates with hits already near them.\r\n hitWeight += 10\r\n # Move to the next set of coordinates on the board.\r\n result, hitWeight = self.weightShipSearch(Coordinates(coordinates.x + direction.x, coordinates.y + direction.y),\r\n size - 1, weight, direction, hitWeight)\r\n if result:\r\n # A entire ship can fit, weight the coordinate appropriately.\r\n if self.enemyBoard[coordinates.x][coordinates.y] >= BoardState.OPEN:\r\n self.enemyBoard[coordinates.x][coordinates.y] += (weight + hitWeight)\r\n return result, hitWeight",
"def add_ships(ship_screen):\n\n direction_choice = random.choice([\"H\", \"V\"]) # chooses the direction of the big ship\n x = random.randint(0, 4)\n y = random.randint(0, 4)\n ship_screen[x][y] = \"X\"\n if direction_choice == \"V\": # after selection of the first coordinate, chooses a direction\n # and then according to x or y coordinates puts the other 2 pieces of the ship\n if x == 0:\n ship_screen[x+1][y], ship_screen[x+2][y] = \"X\", \"X\"\n elif x == 4:\n ship_screen[x-1][y], ship_screen[x-2][y] = \"X\", \"X\"\n else:\n ship_screen[x+1][y], ship_screen[x-1][y] = \"X\", \"X\"\n elif direction_choice == \"H\":\n if y == 0:\n ship_screen[x][y+1], ship_screen[x][y+2] = \"X\", \"X\"\n elif y == 4:\n ship_screen[x][y-1], ship_screen[x][y-2] = \"X\", \"X\"\n else:\n ship_screen[x][y-1], ship_screen[x][y+1] = \"X\", \"X\"\n main_ship_coordinates = [[a, b] for a in range(5) for b in range(5) if ship_screen[a][b] == \"X\"]\n # returns ship coordinates\n banned_coordinates = [] # codes of between 29-34 finds the neighbour coordinates of big ship\n for d in main_ship_coordinates:\n neighbour_coordinates = [[d[0], d[1]+1], [d[0]+1, d[1]], [d[0]-1, d[1]], [d[0], d[1]-1],\n [d[0]+1, d[1]+1], [d[0]-1, d[1]-1], [d[0]+1, d[1]-1], [d[0]-1, d[1]+1]]\n for e in neighbour_coordinates:\n if e[0] in range(5) and e[1] in range(5) and e not in banned_coordinates:\n banned_coordinates.append(e)\n while True:\n i = random.randint(0, 4)\n j = random.randint(0, 4)\n if [i, j] in banned_coordinates:\n continue\n else:\n ship_screen[i][j] = \"O\"\n break\n while True:\n possible_coordinates = [[i+1, j], [i-1, j], [i, j-1], [i, j+1]]\n # selects second piece randomly from possible 4\n second_piece = random.choice(possible_coordinates)\n if second_piece[0] in range(5) and second_piece[1] in range(5) and second_piece not in banned_coordinates:\n ship_screen[second_piece[0]][second_piece[1]] = \"O\"\n break\n else:\n continue\n return ship_screen",
"def _move_all_ships(game_data):\n # make the moves of the ships\n for player in game_data['ships']:\n # deal only with the ships which are not abandonned\n if player != 0:\n for ship_name in game_data['ships'][player]:\n # move the ship\n _move_ship(player, ship_name, game_data)",
"def update_potential_moves(self):\n\n board = self.get_board()\n\n for row_index, row in enumerate(board):\n\n for column_index, column in enumerate(row):\n\n if column is not None:\n \n position = self.reverse_position(column_index, row_index)\n game_piece_object = self.get_game_piece_object_at_position(position)\n game_piece_object.set_potential_moves(self.generate_moves(position))",
"def island_loss_of_weight(self):\n for y in self.island_map:\n for cell in y:\n cell.loss_of_weight()",
"def prep_ships(self):\n\t\t\tself.ships = Group()\n\t\t\tfor ship_number in range(self.stats.ships_left):\n\t\t\t\tship = Ship(self.ai_settings, self.screen)\n\t\t\t\tship.rect.x = 10 + ship_number * ship.rect.width\n\t\t\t\tship.rect.y = 10\n\t\t\t\tself.ships.add(ship)",
"def set_ship(self):\n lst_ship = Ship().random_ship()\n small_ships = lst_ship[0]\n mid_ship = lst_ship[1]\n l_ship = lst_ship[2]\n xl_ship = lst_ship[3]\n for i in range(10):\n self.board.append(['0']*10)\n for i in range(10):\n self.board.append(['0']*10)\n for i in small_ships:\n self.board[i[0]][i[1]] = '1'\n for i in mid_ship:\n self.board[i[0]][i[1]] = '1'\n for i in l_ship:\n self.board[i[0]][i[1]] = '1'\n for i in xl_ship:\n self.board[i[0]][i[1]] = '1'\n return (self.board)",
"def setNeighbors(self):\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n\n #Checks the 8 cells around the living one. \n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n\n #If the position is outside the world, loop around.\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n\n #Skipping itself. Becouse we do not want to calculate itself as a neighbor\n if(neighborsX == cell.x and neighborsY == cell.y):\n continue\n else:\n #Checks if a cell exist at neighborsX, neighborsY\n cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)\n if(cellToCheck != False):\n #Add one to the neighbor var if there already exist and cell for the given position.\n cellToCheck.numOfNeighbor += 1\n else:\n #Creates a new cell if it do not exist any.\n newCell = Cell(self.screen, neighborsX, neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)",
"def mine_all(self):\n\n # Query databse\n query_string = \"SELECT * from planets_in_range;\"\n self.conn_cur.execute(query_string)\n results = self.conn_cur.fetchall()\n\n # Check planets in range\n for ship in results:\n self.mine(str(ship[0]), str(ship[1]))",
"def prep_ships(self):\n self.ships = Group()\n for ship_id in range(self.game.ships_remaining):\n ship = Ship(self.game)\n ship.rect.x = 10 + ship_id * ship.rect.width\n ship.rect.y = 10\n self.ships.add(ship)",
"def prep_ships(self):\n self.ships = Group()\n for ship_number in range(self.stats.ships_left):\n ship = Ship(self.ai_settings, self.screen)\n ship.rect.x = 10 + ship_number * ship.rect.width\n ship.rect.y = 10\n self.ships.add(ship)",
"def prep_ships(self):\r\n\t\tself.ships=Group()\r\n\t\tfor ship_number in range(self.stats.ships_left):\r\n\t\t\tship=Ship(self.ai_settings, self.screen)\r\n\t\t\tship.transform()\r\n\t\t\tship.rect.x=10+ship_number*ship.rect.width\r\n\t\t\tship.rect.y=10\r\n\t\t\tself.ships.add(ship)",
"def occupied_cells(self):\n\n for lm in self.landmarks:\n if self.cell_size < 1:\n # expand the range the landmark exists\n lm_x_range = np.arange(lm[0]-self.R, lm[0]+self.R, self.cell_size)\n lm_y_range = np.arange(lm[1]-self.R, lm[1]+self.R, self.cell_size)\n\n # loop through expanded ranges and compute grid positions\n for lm_x in lm_x_range:\n for lm_y in lm_y_range:\n\n row, col = self.cell_index([lm_x, lm_y])\n\n # apply cost of occupied cell\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass\n\n else:\n # apply cost of occupied cell\n row, col = self.cell_index(lm)\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass",
"def get_hardwired_speed_weights(self):\n \n phase_shift=self.speed_phase_shift\n \n # row 1 has the weights of speed cells to grid cell 1\n self.W_speed_east=np.zeros_like(self.W_ee) \n self.W_speed_west=np.zeros_like(self.W_ee) \n self.W_speed_north=np.zeros_like(self.W_ee) \n self.W_speed_south=np.zeros_like(self.W_ee) \n\n if self.use_eight_directions is True:\n self.W_speed_north_east=np.zeros_like(self.W_ee) \n self.W_speed_north_west=np.zeros_like(self.W_ee) \n self.W_speed_south_east=np.zeros_like(self.W_ee) \n self.W_speed_south_west=np.zeros_like(self.W_ee) \n\n\n for phase_idx,phase in enumerate(self.gp.phases):\n shifted_north_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/2.),self.gp.phases)\n shifted_south_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/2.),self.gp.phases)\n shifted_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(0),self.gp.phases)\n shifted_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi),self.gp.phases)\n\n self.W_speed_north[phase_idx,:]=self.W_ee[shifted_north_phase_idx,:]\n self.W_speed_south[phase_idx,:]=self.W_ee[shifted_south_phase_idx,:]\n self.W_speed_east[phase_idx,:]=self.W_ee[shifted_east_phase_idx,:]\n self.W_speed_west[phase_idx,:]=self.W_ee[shifted_west_phase_idx,:] \n \n if self.use_eight_directions is True:\n shifted_north_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/4),self.gp.phases)\n shifted_north_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi*3/4),self.gp.phases)\n shifted_south_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/4),self.gp.phases)\n shifted_south_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi*3/4),self.gp.phases)\n \n self.W_speed_north_east[phase_idx,:]=self.W_ee[shifted_north_east_phase_idx,:]\n self.W_speed_north_west[phase_idx,:]=self.W_ee[shifted_north_west_phase_idx,:]\n self.W_speed_south_east[phase_idx,:]=self.W_ee[shifted_south_east_phase_idx,:]\n self.W_speed_south_west[phase_idx,:]=self.W_ee[shifted_south_west_phase_idx,:]",
"def bfs(game, game_coords):\n # *** main queue to record steps and corresponding costs ***\n queue_moves = [[game.player.row, game.player.col]]\n cost_moves = [0]\n\n # record cost and illegal moves\n cost = 1\n declined_moves = []\n\n # record the moves in the previous turn(iteration)\n last_steps = [[game.player.row, game.player.col]]\n\n # ***** Step 1: Marking game board using cost *****\n while True:\n\n # struggled in a location, loss\n if not last_steps:\n return 0, 0, 0\n\n # collect all potential moves: left, down, right, up, teleport(if possible)\n potential_steps = []\n for step in last_steps:\n potential_steps.append(left(step))\n potential_steps.append(down(step))\n potential_steps.append(right(step))\n potential_steps.append(up(step))\n\n if search_coords(game_coords, step) in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:\n potential_steps.append(step)\n\n current_steps = []\n for step in potential_steps:\n if step in declined_moves:\n continue\n elif step in queue_moves:\n # the step existed in main queue, replace it if cost is lower, otherwise skip\n if cost >= cost_moves[queue_moves.index(step)]:\n if step != queue_moves[-1]:\n continue\n\n # check if move is legal\n will_move = step\n item = search_coords(game_coords, will_move)\n\n if item == '*' or item == -1:\n declined_moves.append(will_move)\n continue\n\n elif item == 'W':\n game.player.num_water_buckets += 1\n\n for i in range(len(game_coords['W'])):\n # water picked up, set current display from 'W' to ' ' in game_coords\n if game_coords['W'][i] == will_move:\n game_coords['W'].pop(i)\n game_coords[' '].append(will_move)\n break\n\n elif item == 'F':\n if game.player.num_water_buckets < 1:\n # cannot put out fire, refuse this move :(\n declined_moves.append(will_move)\n continue\n\n game.player.num_water_buckets -= 1\n elif item in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:\n for coords in game_coords[item]:\n if coords != will_move:\n will_move = coords\n break\n\n current_steps.append(will_move)\n\n # append to main queue\n queue_moves.append(will_move)\n cost_moves.append(cost)\n\n cost += 1\n\n # reach end point\n if game_coords['Y'][0] in current_steps:\n break\n\n # last_steps <- current_steps\n last_steps = []\n last_steps.extend(current_steps)\n\n cost -= 1\n\n # ***** Step 2: recall through main queue to generate a path *****\n # *** Queue: last in first out ***\n recall_moves = queue_moves[::-1]\n recall_cost = cost_moves[::-1]\n cursor = recall_moves[0]\n\n # generated path\n route = []\n\n # 'action to cmd' translator\n action_map = {(1, 0): 'w', (-1, 0): 's', (0, 1): 'a', (0, -1): 'd'}\n\n for i in range(len(recall_moves)):\n if recall_cost[i] == cost - 1:\n x, y = coords_sub(recall_moves[i], cursor)\n\n # simple move: left, down, right, up\n if abs(x) + abs(y) == 1:\n cursor = recall_moves[i]\n cost -= 1\n route.insert(0, action_map[(x, y)])\n\n # teleport move\n elif teleport_pair(cursor, game_coords) != -1:\n pair = teleport_pair(cursor, game_coords)\n x, y = coords_sub(recall_moves[i], pair)\n\n # teleport after simple move\n if abs(x) + abs(y) == 1:\n cursor = recall_moves[i]\n cost -= 1\n route.insert(0, action_map[(x, y)])\n\n # teleport after no move ('e')\n elif abs(x) + abs(y) == 0:\n cursor = recall_moves[i]\n cost -= 1\n route.insert(0, 'e')\n\n # convert list of paths to string\n trace = ''\n for action in route:\n trace += action + ', '\n\n return 1, cost_moves[-1], trace",
"def _add_mines(self):\n num = 0\n while num < self._n:\n x = random.randint(0, self._dim - 1)\n y = random.randint(0, self._dim - 1)\n if self._board[x][y] != -1:\n self._board[x][y] = -1\n neighbors = self._get_neighbors((x, y))\n for neighbor in neighbors:\n if self._board[neighbor[0]][neighbor[1]] != -1:\n self._board[neighbor[0]][neighbor[1]] += 1\n num += 1",
"def updateWinners(self,bestships,generation):\n self.winnerSurface = []\n self.winnerPos = []\n self.winnerColour = []\n self.winnerSurface.append(myfont.render(\"GEN \"+str(self.gen-1)+ \" BEST\",False, (250,250,250)))\n for i, shp in enumerate(bestships):\n tempSurf = myfont.render(str(i) + \": \" + str(int(shp.score)) \n +\" \"+shp.getName(), False, shp.colour)\n self.winnerSurface.append(tempSurf)\n self.winnerPos.append(shp.getIntPos())\n self.winnerColour.append(shp.colour)",
"def random_ship(self):\n l4 = 4\n l3 = 3\n l2 = 2\n l1 = 1\n for i in range(l4):\n row = random.randint(0,9)\n col = random.randint(0,9)\n lst = [row, col]\n self.small_ships.append(lst)\n for i in range(l3):\n row = random.randint(0,8)\n col = random.randint(0,8)\n lst = [row, col]\n lst_1 = [row, col+1]\n if lst in self.small_ships or lst in self.mid_ship:\n if lst_1 in self.small_ships or lst_1 in self.mid_ship:\n l3+=1\n else:\n self.mid_ship.append(lst)\n self.mid_ship.append(lst_1)\n for i in range(l2):\n row = random.randint(0,7)\n col = random.randint(0,7)\n lst = [row, col]\n lst_1 = [row, col+1]\n lst_2 = [row, col+2]\n if lst in self.small_ships or lst in self.mid_ship or lst in self.l_ship:\n if lst_1 in self.small_ships or lst_1 in self.mid_ship or lst_1 in self.l_ship:\n if lst_2 in self.small_ships or lst_2 in self.mid_ship or lst_2 in self.l_ship:\n l2+=1\n else:\n self.l_ship.append(lst)\n self.l_ship.append(lst_1)\n self.l_ship.append(lst_2)\n for i in range(l1):\n row = random.randint(0,6)\n col = random.randint(0,6)\n lst = [row, col]\n lst_1 = [row, col+1]\n lst_2 = [row, col+2]\n lst_3 = [row, col+3]\n if lst in self.small_ships or lst in self.mid_ship or lst in self.l_ship or lst in self.xl_ship:\n if lst_1 in self.small_ships or lst_1 in self.mid_ship or lst_1 in self.l_ship or lst in self.xl_ship:\n if lst_2 in self.small_ships or lst_2 in self.mid_ship or lst_2 in self.l_ship or lst in self.xl_ship:\n if lst_3 in self.small_ships or lst_3 in self.mid_ship or lst_3 in self.l_ship or lst in self.xl_ship:\n l1+=1\n else:\n self.xl_ship.append(lst)\n self.xl_ship.append(lst_1)\n self.xl_ship.append(lst_2)\n self.xl_ship.append(lst_3)\n lst_ship = [self.small_ships, self.mid_ship, self.l_ship, self.xl_ship]\n return lst_ship",
"def setRepairCost(self):\n # first take into account the ship hull which is based on internal structure points\n ratio = 1.0 - (self.currentISP/self.myShipHull.maxISP)\n CR = int(self.myShipHull.costCR*ratio)\n AL = int(self.myShipHull.costAL*ratio)\n EC = int(self.myShipHull.costEC*ratio)\n IA = int(self.myShipHull.costIA*ratio)\n\n # compare to ship design, add costs of replacement\n for position, myQuad in self.quads.iteritems():\n designQuad = self.myDesign.quads[position]\n weaponsInQuad = []\n # look for missing components\n for componentID in designQuad.components.keys():\n if componentID not in myQuad.components:\n missingComponent = designQuad.components[componentID]\n if missingComponent.weaponID == '':\n # regular component\n CR += missingComponent.myComponentData.costCR\n AL += missingComponent.myComponentData.costAL\n EC += missingComponent.myComponentData.costEC\n IA += missingComponent.myComponentData.costIA\n elif missingComponent.weaponID not in weaponsInQuad:\n # component part of weapon, weapon must be replaced\n weaponsInQuad.append(missingComponent.weaponID)\n\n # go through weapons that were damaged in this quadrant\n for weaponID in weaponsInQuad:\n damagedWeapon = designQuad.weapons[weaponID]\n CR += damagedWeapon.myWeaponData.costCR\n AL += damagedWeapon.myWeaponData.costAL\n EC += damagedWeapon.myWeaponData.costEC\n IA += damagedWeapon.myWeaponData.costIA\n\n self.repairCost = [CR,AL,EC,IA]",
"def test_places_all_ships(self):\n player = TestPlayer()\n self.ai.place_ships(player)\n sorted_ships = sorted(player.placed_ships)\n self.assertEqual([2, 3, 3, 4, 5], sorted_ships)",
"def create_position(self):\n area = utils.AreaCreator(\n self._width, self._height, starts_at=self._starts_at,\n is_battle_area=False)\n for coordinate in area.get_coordinates():\n position = coordinate.get_position()\n self._cells.append(QShipCell(position))\n self._update_battle_position(self._cells)",
"def create_position(self):\n area = utils.AreaCreator(\n self._width, self._height, starts_at=self._starts_at,\n is_battle_area=False)\n for coordinate in area.get_coordinates():\n position = coordinate.get_position()\n self._cells.append(PShipCell(position))\n self._update_battle_position(self._cells)",
"def mineSweeper():\n clear()\n size_x = ask_numbers(\"Syötä kentän leveys ruuduissa\\n->\", \"Syötä luku joka on suurempi kuin 0.\\n->\")\n size_y = ask_numbers(\"Syötä kentän korkeus ruuduissa\\n->\", \"Syötä luku joka on suurempi kuin 0.\\n->\")\n mineQnt = ask_numbers(\"Syötä miinojen lukumäärä\\n->\", \"Syötä luku joka on suurempi kuin 0.\\n->\") \n while mineQnt > (size_x * size_y - 1):\n print(\"Syötä miinojen lukumäärä siten, että ne mahtuvat kentälle(Max {} kpl)\". format(size_x * size_y - 1))\n mineQnt = ask_numbers(\"Syötä miinojen lukumäärä\\n->\", \"Syötä luku joka on suurempi kuin 0.\\n->\") \n print(\"aloitetaan peli kentällä, jonka koko on {} x {}, \\nja laudalta löytyy piilotettuna {} miinaa\".format(size_x, size_y, mineQnt))\n duration, result, moves = drawingService.main(size_x, size_y, mineQnt)\n return duration, size_x, size_y, result, moves",
"def update(match):\r\n \r\n \r\n coordinates= match.board\r\n \r\n rows=len(match.board)\r\n column=len(match.board[0])\r\n for x in range(rows):\r\n for y in range(column):\r\n cell_up = match.board[wrapx(x)][wrapy(y+1)]\r\n cell_down = match.board[wrapx(x)][wrapy(y-1)]\r\n cell_right = match.board[wrapx(x+1)][wrapy(y)]\r\n cell_left = match.board[wrapx(x-1)][wrapy(y)]\r\n cell_diagupright = match.board[wrapx(x+1)][wrapy(y+1)]\r\n cell_diagupleft = match.board[wrapx(x-1)][wrapy(y+1)]\r\n cell_diagdownright = match.board[wrapx(x+1)][wrapy(y-1)] \r\n cell_diagdownleft = match.board[wrapx(x-1)][wrapy(y-1)]\r\n \r\n listofneightbours = [cell_up, cell_down, cell_right, cell_left, cell_diagupright, cell_diagupleft,\r\n cell_diagdownright, cell_diagdownleft]\r\n aliveneighbours = listofneighbours.count(1)\r\n \r\n if aliveneighbours < 2:\r\n x = 0\r\n elif aliveneighbours == 2:\r\n x = 1\r\n elif aliveneighbours == 3:\r\n x = 1\r\n else:\r\n x = 0",
"def moves(self, teleport=False):\n recv = [(self._size[y][x] - self._used[y][x], x, y)\n for x in range(self.xsize) for y in range(self.ysize)]\n recv.sort(reverse=True)\n send = [(self._used[y][x], x, y)\n for x in range(self.xsize) for y in range(self.ysize)\n if self._used[y][x] > 0]\n send.sort()\n # print(\"recv: {}...\".format(str(recv[:5])))\n # print(\"send: {}...\".format(str(send[:5])))\n moves = []\n for avail, x1, y1 in recv:\n for used, x0, y0 in send:\n if avail < used:\n break\n if teleport or (x0 == x1 and abs(y0 - y1) == 1) or (\n y0 == y1 and abs(x0 - x1) == 1):\n self.apply((x0, y0), (x1, y1))\n moves.append((self.score(), self.key(), self.save(), list(self.history)))\n self.undo()\n return moves"
] |
[
"0.7102829",
"0.7067783",
"0.65166754",
"0.65071094",
"0.64424324",
"0.6266532",
"0.6066007",
"0.5912358",
"0.572561",
"0.57133865",
"0.571108",
"0.56970054",
"0.5682629",
"0.5675079",
"0.56600696",
"0.56578845",
"0.5643913",
"0.56228065",
"0.5608582",
"0.5605877",
"0.5590811",
"0.5580842",
"0.5565258",
"0.5530934",
"0.5403084",
"0.53810376",
"0.53643095",
"0.5360212",
"0.5354128",
"0.53358126"
] |
0.8022364
|
0
|
Recursive function that positions a ship in a particular direction and applies the weights. Arguments coordinates The coordinate to check. size The size of the ship. weight The weight to apply if this coordinate can hold a ship. direction The direction to move as the ship is being placed. hitWeight The extra amount of weight to add if the ship overlays previously hit coordinates. Returns result True if the ship can be placed, False if it can't. hitWeight The extra hit weight to apply as the recursive stack unwinds.
|
def weightShipSearch(self, coordinates, size, weight, direction, hitWeight):
if size == 0:
# Successfully searched the required size.
return True, hitWeight
if coordinates.x < 0 or coordinates.y < 0 or coordinates.x == self.boardDimensions or coordinates.y == self.boardDimensions:
# Can't go off the board.
return False, 0
if self.enemyBoard[coordinates.x][coordinates.y] < BoardState.HIT:
# This search is all for naught since we can't possibly have a ship at this position.
return False, 0
if self.enemyBoard[coordinates.x][coordinates.y] == BoardState.HIT:
# Weigh searches with hits already in them over searches without them. This is to
# direct the shot selection toward coordinates with hits already near them.
hitWeight += 10
# Move to the next set of coordinates on the board.
result, hitWeight = self.weightShipSearch(Coordinates(coordinates.x + direction.x, coordinates.y + direction.y),
size - 1, weight, direction, hitWeight)
if result:
# A entire ship can fit, weight the coordinate appropriately.
if self.enemyBoard[coordinates.x][coordinates.y] >= BoardState.OPEN:
self.enemyBoard[coordinates.x][coordinates.y] += (weight + hitWeight)
return result, hitWeight
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sinkShipSearch(self, coordinates, size, direction):\r\n if size == 0:\r\n # Successfully searched the required size.\r\n return True, []\r\n if coordinates.x < 0 or coordinates.y < 0 or coordinates.x == self.boardDimensions or coordinates.y == self.boardDimensions:\r\n # Can't go off the board.\r\n return False, None\r\n if self.enemyBoard[coordinates.x][coordinates.y] != BoardState.HIT:\r\n # This search is all for naught since the ship can't possibly have sunk at this position.\r\n return False, None\r\n sunkShip, shipCoordinates = self.sinkShipSearch(Coordinates(coordinates.x + direction.x, coordinates.y + direction.y), size - 1, direction)\r\n if sunkShip:\r\n shipCoordinates.append(coordinates)\r\n return sunkShip, shipCoordinates",
"def ship_size(coord, field):\n # r for rows, c for columns\n if not has_ship(coord, field):\n return 0\n r, c = int(coord[1]) - 1, ord(coord[0]) - 65\n\n # Looking in all directions\n left = check_row(field[r], r, c, -1, -1)\n right = check_row(field[r], r, c, 10, 1)\n\n # reversing for up and down, creating of vertical row\n column = list(field[x][c] for x in range(len(field)))\n down = check_row(column, c, r, 10, 1, reverse=True)\n up = check_row(column, c, r, 0, -1, reverse=True)\n\n directions = [[(left[0] + right[0] - 1), list(set(left[1] + right[1]))],\n [(up[0] + down[0] - 1), list(set(up[1] + down[1]))]]\n ship = max(directions)\n for point in ship[1]:\n if find_foreign(point, ship, field):\n return False\n # if left[0] + right[0] + up[0] + down[0] == ship[0] + 3:\n return ship",
"async def add_ship(self, pn, x, y, dx, dy, size):\n board = self.boards[pn]\n try:\n board.add_ship(x, y, dx, dy, size)\n return True\n except ValueError:\n return False",
"def ship_size(data, cell):\n if type(data) != dict:\n print('Wrong argument data')\n return None\n if type(cell) != tuple:\n print(\"Second argument must be a tuple\")\n return None\n if type(cell[0]) != str:\n print(\"First element of the second argument must be a str - A..J\")\n return None\n if type(cell[1]) != int:\n print(\"Second element of the second argument must be a number - 1..10\")\n return None\n if not has_ship(data, cell):\n return 0\n x = ord(cell[0].upper()) - 64\n y = cell[1]\n if x < 1 or x > 10:\n print('Wrong coordinate. Must be from A to J.')\n return None\n if y < 1 or y > 10:\n print('Wrong coordinate. Must be from 1 to 10.')\n return None\n size = 1\n coords = {(x, y)}\n if data[(x+1, y)] == 'damaged' or data[(x+1, y)] or data[(x-1, y)] == 'damaged' or data[(x-1, y)]:\n start = x\n while(data[(start-1, y)] == 'damaged' or data[(start-1, y)]):\n coords = coords | {(start-1, y)}\n size += 1\n start -= 1\n if start < 2:\n break\n start = x\n while(data[(start+1, y)] == 'damaged' or data[(start+1, y)]):\n coords = coords | {(start + 1, y)}\n size += 1\n start += 1\n if start > 10:\n break\n elif data[(x, y+1)] == 'damaged' or data[(x, y+1)] or data[(x, y-1)] == 'damaged' or data[(x, y-1)]:\n start = y\n while(data[(x, start-1)] == 'damaged' or data[(x, start-1)]):\n coords = coords | {(x, start - 1)}\n size += 1\n start -= 1\n if start < 1:\n break\n start = y\n while(data[(x, start+1)] == 'damaged' or data[(x, start+1)]):\n coords = coords | {(x, start + 1)}\n size += 1\n start += 1\n if start > 10:\n break\n return (size, coords)",
"def check_ship_fits(self, ship_length, row, column, orientation):\n if orientation == 'H':\n if column + ship_length > 10:\n if self.user == 'player':\n print('SHIP DOES NOT FIT, TRY AGAIN!\\n')\n return False\n else:\n return False\n else:\n return True\n else:\n if row + ship_length > 10:\n if self.user == 'player':\n print('SHIP DOES NOT FIT, TRY AGAIN!\\n')\n return False\n else:\n return False\n else:\n return True",
"def ship_size(coordinates, field):\r\n return len(ship_coordinates(coordinates, field))\\\r\n if has_ship(coordinates, field) else 0",
"def check_direction(self, board: np.array, location: tuple, direction: Direction) -> bool:\n\n new_location = np.array(location) + direction.array\n if not self.in_bounds(new_location):\n return False\n\n # Note that this is already a boolean (so no need for if statements)\n return board[tuple(new_location)] == Item.Passage.value",
"def has_ship(coordinates, field):\r\n if field[coordinates[0], coordinates[1]] == '*':\r\n return True\r\n return False",
"def has_ship(data, coords):\n if type(data) != dict:\n print('Wrong type of first argument (data)')\n return None\n if type(coords) != tuple:\n print('Wrong type of second argument (coords)')\n return None\n x = ord(coords[0].upper()) - 64\n y = coords[1]\n if x < 1 or x > 10:\n print('Wrong coordinate. Must be from A to J.')\n return None\n if y < 1 or y > 10:\n print('Wrong coordinate. Must be from 1 to 10.')\n return None\n if data[(x, y)] or data[(x, y)] == 'damaged':\n return True\n else:\n return False",
"def _place(player, size, length, number):\n\n if number == 0:\n return\n name = player.name\n ship = player.board.fleet.name_ship(length)\n print \"\\n%s, please place your %s. (Length: %s)\\n\" % (name, ship, length)\n\n player.board.display(True)\n\n coord = man_coord(size)\n x = coord[0]\n y = coord[1]\n direct = man_dir()\n\n if player.board.check(x, y, length, direct) is True:\n name = player.fleet.name_ship(length)\n player.fleet.add_ship(name, x, y, length, direct)\n return _place(player, size, length, number - 1)\n print \"\\nSorry, that ship won't fit, please try again.\"\n return _place(player, size, length, number)",
"def isValidCoord(coord, size):\n return coord[0] >= 0 and coord[0] < size and \\\n coord[1] >= 0 and coord[1] < size",
"def has_ship(field, coordinates):\n\n if field[coordinates] == '*':\n return True\n return False",
"def validate_move(coordinates: dict, character_dict: dict, user_input: str) -> bool:\n new_coordinate = get_new_coordinate(x_y_coordinate=character_dict, move_direction=user_input)\n return new_coordinate in coordinates",
"def is_valid(field):\n one = 0\n two = 0\n three = 0\n four = 0\n for i in range(1, 11):\n for j in range(1, 11):\n if ship_size(field, (i, j)) == 1:\n one += 1\n if ship_size(field, (i, j)) == 2:\n two += 1\n if ship_size(field, (i, j)) == 3:\n three += 1\n if ship_size(field, (i, j)) == 4:\n four += 1\n if ship_size(field, (i, j)) > 4:\n return False\n if one == 4 and two == 6 and three == 6 and four == 4:\n return True\n print(one, two, three, four)\n\n return False",
"def validate_ship(cls, ship_type, star_square, orientation):\n try:\n cls.validate_square(star_square)\n cls.validate_type(ship_type)\n cls.check_if_fit_in_grid(ship_type, star_square, orientation)\n except ValueError as e:\n raise ValueError('%s for %s at %s' %\n (str(e), cls.TYPE_NAMES[ship_type], star_square))",
"def hit(bx, by, r, px, py,h):\n if bx >= px:\n distance = bx - px\n else:\n distance = px - bx\n if py<=by and by<=py+h and distance <= r:\n return True\n else:\n return False",
"def __create_ship(self, size, text_cords, direction):\n \n y, x = text_to_cords(text_cords)\n new_ship = Ship(size, y, x, direction)\n self.ships.append(new_ship)\n self.board.add_ship(new_ship)",
"def check_bounds (position, size):\n \n for item in position:\n # checks whether item is out of bounds\n if item < 0 or item >= size:\n return False\n return True",
"def weightBoard(self):\r\n directions = (Direction.East, Direction.South)\r\n for size, count in self.shipsAfloat.items():\r\n size = int(size)\r\n for i in range(self.boardDimensions):\r\n for j in range(self.boardDimensions):\r\n for direction in directions:\r\n coordinates = Coordinates(i, j)\r\n self.positionAndWeightShip(coordinates, size, count, direction)",
"def test_fitness_rules(rule_names,\n weights,\n steps=10,\n pop_size=20,\n search_width=10,\n log_folder=None,\n save_folder=None,\n internal_shape=(64, 64),\n output_shape=(400, 400)):\n critic_threshold = 0.000\n veto_threshold = 0.000\n novelty_weight = -1\n memsize = 10000\n shape = internal_shape\n\n pset = create_pset()\n create_kwargs = {'pset': pset,\n 'toolbox': create_toolbox(pset),\n 'pop_size': pop_size,\n 'shape': shape}\n\n # Make the rules\n rules = _make_rules(rule_names, shape)\n rule_weights = weights\n _init_save_folder(save_folder)\n\n # Environment and simulation\n menv = create_environment(2)\n\n for _ in range(1):\n ret = aiomas.run(until=menv.spawn('agents:GPImageAgent',\n log_folder=log_folder,\n save_folder=save_folder,\n artifact_cls=GeneticImageArtifact,\n create_kwargs=create_kwargs,\n rules=rules,\n rule_weights=rule_weights,\n memsize=memsize,\n critic_threshold=critic_threshold,\n veto_threshold=veto_threshold,\n novelty_weight=novelty_weight,\n search_width=search_width,\n output_shape=output_shape))\n\n run_sim(menv, steps, log_folder, save_folder, pset, output_shape)",
"def ship_coordinates(coordinates, field):\r\n if has_ship(coordinates, field):\r\n ship = [coordinates]\r\n\r\n for i in range(1, 4):\r\n if (coordinates[0] + i) < 10 and\\\r\n has_ship((coordinates[0] + i, coordinates[1]), field):\r\n ship.append((coordinates[0] + i, coordinates[1]))\r\n else:\r\n break\r\n\r\n for i in range(1, 4):\r\n if (coordinates[0] - i) > 0 and\\\r\n has_ship((coordinates[0] - i, coordinates[1]), field):\r\n ship.append((coordinates[0] - i, coordinates[1]))\r\n else:\r\n break\r\n\r\n if len(ship) == 1:\r\n for i in range(1, 4):\r\n if (coordinates[1] + i) < 10 and\\\r\n has_ship((coordinates[0], coordinates[1] + i), field):\r\n ship.append((coordinates[0], coordinates[1] + i))\r\n else:\r\n break\r\n\r\n for i in range(1, 4):\r\n if (coordinates[1] - i) > 0 and\\\r\n has_ship((coordinates[0], coordinates[1] - i), field):\r\n ship.append((coordinates[0], coordinates[1] - i))\r\n else:\r\n break\r\n\r\n return ship",
"def place_ship(board,ship_length,bow,ship_direction):\n\n # verifies the input\n if abs(ship_direction[0])+abs(ship_direction[1])==1 and \\\n 0 <= bow[0] < len(board[0]) and 0 <= bow[1] < len(board) and \\\n -1 <= (bow[0] - ship_direction[0]*ship_length) <= len(board[0]) and \\\n -1 <= (bow[1] - ship_direction[1]*ship_length) <= len(board):\n\n index=ship_index(board) # find the next ship-index\n size=[ship_length]\n for part in range(ship_length): # try to place the ship\n if board[bow[1]-ship_direction[1]*part]\\\n [bow[0]-ship_direction[0]*part] == None:\n board[bow[1]-ship_direction[1]*part]\\\n [bow[0]-ship_direction[0]*part] = (index, part, size)\n else: # if another ship in the middle, delete the part of the ship\n # alredy placed and return None\n for del_part in range(part):\n board[bow[1]-ship_direction[1]*del_part]\\\n [bow[0]-ship_direction[0]*del_part] = None\n return\n return index",
"def __can_enter(self, position, traversed):\n row, col = position\n # Check index values\n if row < 0 or col < 0:\n return False\n if row >= self.__row_count or col >= self.__col_count:\n return False\n # Check if already traversed\n if traversed[row][col]:\n return False\n # Check if blocked\n if self.__grid[row][col].blocked:\n return False\n return True",
"def _sideways_ship_hit(self):\n if self.stats.sideways_ships_left > 0:\n self.stats.sideways_ships_left -= 1\n self.aliens.empty()\n self.bullets.empty()\n self._create_fleet()\n self.sideways_ship.center_sideways_ship()\n sleep(0.5)\n else:\n self.stats.game_active = False",
"def position_ship(self, cell, fleet, hit_power):\n if not cell.occupied:\n ship = Ship(fleet, cell, hit_power)\n cell.occupied = True\n cell.mark = constants.ACTIVE_SHIP_MARK\n cell.ship = ship\n self.shipList.append(ship)\n else:\n # raise ex.CannotPlaceFleetError()\n print(\"XXXXXX\")",
"def place_ship(self, ship, x, y, orientation):\n\t\tdx = (orientation == GameBoard.O_HORIZONTAL)\n\t\tdy = (orientation == GameBoard.O_VERTICAL)\n\t\t# Check if there's enough space first.\n\t\tfor i in range(ship.size):\n\t\t\ttile = self.get_our_tile(x + i * dx, y + i * dy)\n\t\t\tif not tile.is_free():\n\t\t\t\traise ValueError(\"You already have a ship there!\")\n\n\t\tself.dump()\n\t\t# Enlist the ship in the navy.\n\t\tship.place(x, y, orientation)\n\t\tself.ships.append(ship)\n\t\t# Mark the tiles occupied by the ship.\n\t\tfor i in range(ship.size):\n\t\t\tcx = x + i * dx\n\t\t\tcy = y + i * dy\n\n\t\t\t# Create a tile boundary around the ship.\n\t\t\ttile = bt.Tile(bt.Tile.T_OCCUPIED)\n\t\t\tif i == 0:\n\t\t\t\t#\n\t\t\t\t# :AAAAA\n\t\t\t\t#\n\t\t\t\t# :\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t#\n\t\t\t\tself.set_our_tile(cx - dx, cy - dy, tile)\n\t\t\t\t# :\n\t\t\t\t# :AAAAA\n\t\t\t\t#\n\t\t\t\t# ::\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t#\n\t\t\t\tself.set_our_tile(cx - dx - dy, cy - dy - dx, tile)\n\t\t\t\t# :\n\t\t\t\t# :AAAAA\n\t\t\t\t# :\n\t\t\t\t# :::\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t#\n\t\t\t\tself.set_our_tile(cx - dx + dy, cy - dy + dx, tile)\n\t\t\telif i == ship.size - 1:\n\t\t\t\t# :\n\t\t\t\t# :AAAAA:\n\t\t\t\t# :\n\t\t\t\t# :::\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t# :\n\t\t\t\tself.set_our_tile(cx + dx, cy + dy, tile)\n\t\t\t\t# : :\n\t\t\t\t# :AAAAA:\n\t\t\t\t# :\n\t\t\t\t# :::\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t# ::\n\t\t\t\tself.set_our_tile(cx + dx - dy, cy + dy - dx, tile)\n\t\t\t\t# : :\n\t\t\t\t# :AAAAA:\n\t\t\t\t# : :\n\t\t\t\t# :::\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t# :::\n\t\t\t\tself.set_our_tile(cx + dx + dy, cy + dy + dx, tile)\n\t\t\t# :::::::\n\t\t\t# :AAAAA:\n\t\t\t# : :\n\t\t\t# :::\n\t\t\t# :E\n\t\t\t# :E\n\t\t\t# :::\n\t\t\tself.set_our_tile(cx - dy, cy - dx, tile)\n\t\t\t# :::::::\n\t\t\t# :AAAAA:\n\t\t\t# :::::::\n\t\t\t# :::\n\t\t\t# :E:\n\t\t\t# :E:\n\t\t\t# :::\n\t\t\tself.set_our_tile(cx + dy, cy + dx, tile)\n\n\t\t\t# Create the ship tile by tile.\n\t\t\tself.set_our_tile(cx, cy, ship.tile())\n\n\t\tself.dump()",
"def can_be_placed(grid, coords, word, vertical=False):\n\n # starting at coords, iterate in direction over the grid\n # - if the location is None, it's okay\n # - if there is a letter, make sure it's the same letter as the one we're\n # placing over it\n # - doesn't go over the edge of the grid (although we could just grow the\n # grid in this case)\n # - need to go back over the grid and make sure any new \"words\" exist in\n # some dictionary or are not generated at all\n #\n # might be useful to be able to transpose the matrix so we don't have to\n # figure out two ways to iterate over it. let's do horizontal first\n # (if we transpose the grid, we have to transpose the coordinates too)\n\n has_touched = is_empty(grid)\n\n if vertical:\n return can_be_placed(transpose(grid), (coords[1], coords[0]), word)\n else:\n for i in range(0, len(word)):\n\n x, y = coords[0] + i, coords[1]\n\n if y not in range(0, len(grid)) or x not in range(0, len(grid[0])):\n return False\n else:\n grid_contents = grid[y][x]\n current_letter = word[i]\n\n if grid_contents == None:\n pass\n elif grid_contents == current_letter:\n has_touched = True\n pass\n else:\n return False\n return has_touched",
"def on_ship(self, ship):\n # TODO: add ship to game\n # The game has a set combination of ships which is created when choosing the field size\n # (by battleships.ship_combination_creator()).\n # After that you need to create the player and add every ship from the combination (\n # without position) to his fleet. Done by add_ship(size) in the player class,\n # just cycle through the fleet_config of the Game class.\n # Then you need a button to determine the facing of the ship (north, west, east, south) and\n # something that shows which ship you are placing (either go through the array yourself\n # or by choosing the size per button).\n # Then the player needs to call position_ship(size, x, y, facing). If the ship cannot be\n # placed there (either because it conflicts with another ship or goes over the edge of\n # the board) the function will return a False, if it works it'll return True.\n # By calling check_ship_placement() from the Player class you can check whether all\n # ships are placed or not (returning True if all are placed, returning False if one or\n # more are still missing a position).\n # Apparently this is a commuication error on both sides:\n # This is how the ship placement works via the now built GUI:\n # New Game-> field size setting -> player name entry + ship placement,\n # via click, hoover and click (lenght of placed ship 1+hoover+1)\n # a list of coords creates the ship\n # {'n': [[(0, 0), (1, 0), (2, 0), (3, 0)]],\n # 'b': [[(1, 1), (2, 1), (3, 1), (4, 1)], [(2, 2), (2, 3), (2, 4)]]}\n\n if len(ship) < 3 or len(ship) > 6:\n # set length of ship to 3 to 6\n return False\n return True",
"def is_horizontal(coordinates, field):\r\n return has_ship((coordinates[0], coordinates[1] + 1), field) or \\\r\n has_ship((coordinates[0], coordinates[1] - 1), field)",
"def check_if_fit_in_grid(cls, ship_type, start_square, orientation):\n end_square = cls.get_end_square(ship_type, start_square, orientation)\n try:\n cls.validate_square(end_square)\n except ValueError:\n raise ValueError(\"Ship doesn't fit in sea grid\")"
] |
[
"0.5731691",
"0.5203796",
"0.48089176",
"0.47622883",
"0.47248405",
"0.46185344",
"0.44617125",
"0.43873248",
"0.4386601",
"0.4383292",
"0.4365558",
"0.43292725",
"0.43162784",
"0.43162185",
"0.4308138",
"0.43051094",
"0.4280897",
"0.4277226",
"0.42721885",
"0.42691907",
"0.42635524",
"0.42503732",
"0.423132",
"0.4219191",
"0.42124906",
"0.42120928",
"0.4211502",
"0.42054927",
"0.4201495",
"0.4198654"
] |
0.7550492
|
0
|
Puts all of the weighted coordinates in a priority queue and selects the coordinate with the most weight. Return bestCoordinates The highest weighted coordinates.
|
def selectBestCoordinates(self):
coordinatesQueue = []
# It's highly likely that there are going to be a lot of coordinates with the same "most" weight. Rather
# than always choosing the leftmost coordinates, make a random choice by adding a random tie breaker to the
# priority.
randomTieBreaker = [i for i in range(self.boardDimensions ** 2 )]
random.shuffle(randomTieBreaker)
for i in range(self.boardDimensions):
for j in range(self.boardDimensions):
if self.enemyBoard[i][j] > BoardState.OPEN:
heapq.heappush(coordinatesQueue, (-self.enemyBoard[i][j], randomTieBreaker.pop(), Coordinates(i, j)))
bestCoordinates = heapq.heappop(coordinatesQueue)[-1]
self.enemyBoard[bestCoordinates.x][bestCoordinates.y] = BoardState.OPEN
while len(coordinatesQueue) > 0:
coordinates = heapq.heappop(coordinatesQueue)[-1]
# Reset the weights on all coordinates under consideration so they'll be ready for another round of
# weighting.
self.enemyBoard[coordinates.x][coordinates.y] = BoardState.OPEN
return bestCoordinates
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_best_fitness(self):\n f = max(self.characters, key=operator.attrgetter('fitness'))\n self.best_fitness = round(f.fitness, 3)\n self.best_candidate = f",
"def get_best(self):\n scores, ids = self.sort_best()\n return scores[1], ids[1]",
"def best_bat(self):\n\n i = 0\n j = 0\n for i in range(self.NP):\n if self.Fitness[i] < self.Fitness[j]:\n j = i\n for i in range(self.D):\n self.best[i] = self.Sol[j][i]\n self.f_min = self.Fitness[j]",
"def _compute_best_value(self):\n asgt = self._neighbors_values.copy()\n best_cost, best_val = None, []\n\n for v in self._variable.domain:\n asgt[self.variable.name] = v\n c = self._compute_cost(**asgt)\n if (\n best_cost is None\n or (best_cost > c and self._mode == \"min\")\n or (best_cost < c and self._mode == \"max\")\n ):\n best_cost = c\n best_val = [v]\n elif best_cost == c:\n best_val.append(v)\n\n return best_val, best_cost",
"def _find_best_offer(\n self, all_offers: List[Tuple[str, Dict]]\n ) -> Tuple[List, float]:\n bests, best_gain = [], 0\n\n for partner, offers in all_offers:\n partial_asgt = self._neighbors_values.copy()\n current_partner = self._neighbor_var(partner)\n\n # Filter out the constraints linking those two variables to avoid\n # counting their cost twice.\n shared = find_dependent_relations(current_partner, self._constraints)\n concerned = [rel for rel in self._constraints if rel not in shared]\n\n for (val_p, my_offer_val), partner_local_gain in offers.items():\n partial_asgt.update({partner: val_p, self.variable.name: my_offer_val})\n\n # Then we evaluate the agent constraint's for the offer\n # and add the partner's local gain.\n cost = assignment_cost(partial_asgt, concerned)\n global_gain = self.current_cost - cost + partner_local_gain\n\n if (global_gain > best_gain and self._mode == \"min\") or (\n global_gain < best_gain and self._mode == \"max\"\n ):\n bests = [(val_p, my_offer_val, partner)]\n best_gain = global_gain\n elif global_gain == best_gain:\n bests.append((val_p, my_offer_val, partner))\n\n return bests, best_gain",
"def handle_input(self, coordinates, current_best = None):\n if self.disable:\n return current_best\n \n if self.is_coords_in_bounds(coordinates):\n if current_best is None or self.z <= current_best.z:\n current_best = self\n else:\n if self._currently_hovered:\n self.mouse_out()\n self._currently_hovered = False\n self.mouse_not_over()\n \n for child in self.children:\n current_best = child.handle_input(coordinates, current_best)\n\n return current_best",
"def _best_individual(self):\n return max(self._population, key=attrgetter(\"fitness\"))",
"def get_best_particle(self):\n index = self.weights.argmax()\n return self.particles[index, :]",
"def get_best_features(self) -> List[Tuple[str, float]]:\n return [(key, self.features[key]) for key in sorted(self.features, key=self.features.get, reverse=True)]",
"def find_best_solution_and_score(self):\r\n best_score = MAXSIZE\r\n best_solution = self.simulation.solutions[0]\r\n for solution in self.simulation.solutions:\r\n score = self.simulation.fitting_function.fit_score(solution)\r\n if score < best_score:\r\n best_score = score\r\n best_solution = solution\r\n return best_solution, best_score",
"def find_best(self, metrics_eval_func=None):\n eval_func = metrics_eval_func or self.metrics_eval_func\n if not self.grid_points:\n raise RuntimeError(\"GridSearchResults are empty, cannot find a best point\")\n best = self.grid_points[0]\n for point in self.grid_points:\n if eval_func(point.metrics, best.metrics):\n best = point\n return best",
"def closest(self, w, n=10):\r\n scores = self.m.dot(self.represent(w))\r\n return heapq.nlargest(n, zip(scores, self.iw))",
"def closest(self, w, n=10):\r\n scores = self.m.dot(self.represent(w))\r\n return heapq.nlargest(n, zip(scores, self.iw))",
"def closest(self, w, n=10):\r\n scores = self.m.dot(self.represent(w))\r\n return heapq.nlargest(n, zip(scores, self.iw))",
"def worst_atom(self, g_u, g_v, active_set):\n\n max_w = None\n max_m_w = None\n max_n_w = None\n max_score = -float('inf')\n\n for w in active_set:\n m_w, n_w = self.polytope.vertex(w)\n score_w = np.sum(g_u * m_w) + np.sum(g_v * n_w)\n\n if score_w > max_score:\n max_w = w\n max_m_w = m_w\n max_n_w = n_w\n max_score = score_w\n\n return max_w, max_m_w, max_n_w",
"def personal_best(scores):\n return max(scores)",
"def get_best_candidate(self):\n if not self.scores:\n return None\n return self.te_list[self.scores.index(max(self.scores))]",
"def get_best(self) -> Chromosome:\n if not (self._best_chromosome is None): # if the best chromosome is unchanged since the last calculation\n return self._best_chromosome\n\n best = None\n best_fitness = None\n\n for chromosome in self._population:\n chromosome_fitness = chromosome.get_fitness()\n\n if best_fitness is None or self._is_fitter(chromosome_fitness, best_fitness):\n best = chromosome\n best_fitness = chromosome_fitness\n\n return best",
"def greedy_policy(self):\n # print(self.weights)\n policy = defaultdict(lambda: 0)\n\n for entry, values in self.weights.items():\n policy[entry] = np.argmax(self.weights[entry])\n # print(policy)\n\n return policy",
"def best_node(self):\n nodes = self._all_nodes()\n sorted_nodes, _ = self.scorer.sort(nodes)\n return sorted_nodes[0]",
"def getOptimalSolution(self):\n max_index = np.argmax(self.Ws)\n self.Wmax = self.Ws[max_index]\n self.Emax = self.subsets[max_index]\n return (self.Wmax, self.Emax)",
"def getlocalbestcoordinate(self):\n return self.localbest.coordinate",
"def get_worst_fitness(self):\n f = min(self.characters, key=operator.attrgetter('fitness'))\n self.worst_fitness = round(f.fitness, 3)",
"def best_params(self):\n return self.X[np.argmax(self.y.numpy())]",
"def pop_best_node(self, nodes):\n best_node_score = float('inf')\n best_node_idx = 0\n\n for idx in range(len(nodes)):\n if nodes[idx][1] < best_node_score:\n best_node_score = nodes[idx][1]\n best_node_idx = idx\n\n return nodes.pop(best_node_idx)",
"def calculate_best_way(self) -> int:\n node = self._find_lowest_cost_node()\n while node:\n cost = self.costs[node]\n neighbors = self.graph[node]\n for neighbor in neighbors.keys():\n node_cost = cost + neighbors[neighbor]\n if self.costs[neighbor] > node_cost:\n self.costs[neighbor] = node_cost\n self.parents[neighbor] = node\n self.closed_nodes.append(node)\n node = self._find_lowest_cost_node()\n\n return self.costs[\"fin\"]",
"def GetBestSizeTuple(self):\n return self.GetBestSize().asTuple()",
"def get_optimal_move(self):\n # create the root state\n root = State(self.current_board, True, self.__machine_token, self.__human_token)\n # alpha-beta-pruning algorithm\n best_move = max_value_a_b(root, depth(root), -1000, 1000)\n # obtain the direct children.\n direct_children = get_direct_children(root, all_states_generated)\n # obtain the coordinates of the movement.\n for direct_child in direct_children:\n if direct_child.value == best_move:\n return get_coordinates(root, direct_child)",
"def get_best_position(self):\n # Todo: implement\n best_value_global = -inf\n position = None\n for particle in self.particles:\n if particle.best_value >= best_value_global:\n position = particle.best_position\n best_value_global = particle.best_value\n return position",
"def best_p(zscore):\n for p, z in HANDY_Z_SCORE_CHEATSHEET:\n if zscore > z:\n break\n\n return (p, z)"
] |
[
"0.6071521",
"0.6039935",
"0.58864534",
"0.58477885",
"0.5797013",
"0.5676042",
"0.5628477",
"0.55944246",
"0.5588288",
"0.55880636",
"0.55558515",
"0.5525676",
"0.5525676",
"0.5525676",
"0.54964423",
"0.5489497",
"0.5471704",
"0.5466427",
"0.54595804",
"0.54568857",
"0.54528767",
"0.54480404",
"0.5439205",
"0.54254514",
"0.5416065",
"0.54017407",
"0.5395872",
"0.5390617",
"0.5382041",
"0.5374994"
] |
0.7931314
|
0
|
Attempts to sink as many sinking ships as possible given the shot result. Arguments shot Shot of the form LetterNumber. hit True, if the shot was a hit. sunk Size of the sunk ship, if the shot sunk it.
|
def shotResult(self, shot, hit, sunk):
ShotSelector.shotResult(self, shot, hit, sunk)
coordinates = self.mapToCoordinates(shot)
if sunk:
self.shipsToSink.append(SinkingShip(coordinates, sunk))
self.sinkShips()
self.printShipsAfloat()
self.printShipsToSink()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def shotResult(self, shot, hit, sunk):\r\n logging.debug(\"shot result: %s, hit: %d, sunk: %d\" % (shot, hit, sunk))\r\n coordinates = self.mapToCoordinates(shot)\r\n # If a ship was sunk, remove it from the fleet.\r\n if sunk:\r\n sunk = str(sunk)\r\n assert(self.shipsAfloat[sunk] > 0)\r\n self.shipsAfloat[sunk] -= 1\r\n # Remove any counts that went to 0.\r\n if self.shipsAfloat[sunk] == 0:\r\n del(self.shipsAfloat[sunk])\r\n self.enemyBoard[coordinates.x][coordinates.y] = BoardState.BULLSEYE\r\n else:\r\n if hit:\r\n self.enemyBoard[coordinates.x][coordinates.y] = BoardState.HIT\r\n else:\r\n self.enemyBoard[coordinates.x][coordinates.y] = BoardState.MISS",
"def calculate_shot(self, player_ships: list):\n board = Board(self.__size)\n prob_board = Board(self.__size)\n\n for move in self.__moves:\n x, y = move[1], move[2]\n board.shoot(x, y)\n prob_board.board[x][y] = -1000\n\n if move[0] == ShotResult.HIT:\n if any(ship.sunk and [anything, x, y] in ship.pieces for ship in player_ships):\n # part of a sunken ship; no need to increase neighbours probability\n continue\n\n for (i, j) in [(1, 0), (0, 1), (-1, 0), (0, -1)]:\n try: # easier to ask for forgiveness that permission :d\n if (ShotResult.HIT, x - i, y - j) in self.__moves: # opposite neighbour\n prob_board.board[x + i][y + j] += self.offset\n prob_board.board[x + i][y + j] += self.offset\n except IndexError:\n pass\n\n final_x, final_y = 0, 0\n max_prob = -1\n for s in player_ships:\n if not s.sunk: # the only time we use unsunken ships; we use just their size\n for i in range(self.__size):\n for j in range(self.__size):\n for o in range(0, 2): # for every (x, y, orientation) possible\n try:\n board.check(Ship(s.type, o, i, j))\n for offset in range(s.size):\n x, y = i - offset * o, j + offset * (not o)\n prob_board.board[x][y] += 1 # increase the probability of each piece\n except IllegalMove:\n pass\n\n for i in range(self.__size):\n for j in range(self.__size):\n if prob_board.board[i][j] > max_prob:\n final_x, final_y = i, j\n max_prob = prob_board.board[i][j]\n elif prob_board.board[i][j] == max_prob:\n if randint(0, 10) < 5: # random aspect to the ai, harder to predict\n final_x, final_y = i, j\n return final_x, final_y",
"def sinkShips(self):\r\n while True:\r\n stillSinkingShips = False\r\n for i in range(len(self.shipsToSink) - 1, -1, -1):\r\n sunkShip, shipCoordinates = self.positionAndSinkShip(self.shipsToSink[i])\r\n if sunkShip:\r\n stillSinkingShips = True\r\n for coordinates in shipCoordinates:\r\n self.enemyBoard[coordinates.x][coordinates.y] = BoardState.SUNK\r\n del(self.shipsToSink[i])\r\n if not stillSinkingShips:\r\n break",
"async def play_shotgun(game_state) -> None:\n big_inside, lesser_counter = count_zombies(game_state)\n if big_inside and lesser_counter == 0:\n play_weapon(game_state, Supply.SHOTGUN, strong=True)\n elif lesser_counter <= 1 and not big_inside:\n play_weapon(game_state, Supply.SHOTGUN)\n elif lesser_counter > 1 and not big_inside:\n play_weapon(game_state, Supply.SHOTGUN, destroyed=False)\n play_weapon(game_state, Supply.SHOTGUN)\n else:\n message = 'What survivors should do [0/1]?\\n[0]: kill big zombie\\n' \\\n f'[1]: kill up to two lesser zombies ({lesser_counter} inside)\\n>'\n action = await get_action(game_state, message, ['0', '1'])\n if action == '0':\n play_weapon(game_state, Supply.SHOTGUN, strong=True)\n elif lesser_counter == 1:\n play_weapon(game_state, Supply.SHOTGUN)\n else:\n play_weapon(game_state, Supply.SHOTGUN, destroyed=False)\n play_weapon(game_state, Supply.SHOTGUN)",
"def sinkShip(self, bullsEye, size, direction):\r\n sunkShip, shipCoordinates = self.sinkShipSearch(Coordinates(bullsEye.x + direction.x, bullsEye.y + direction.y), size - 1, direction)\r\n if sunkShip:\r\n shipCoordinates.append(bullsEye)\r\n return sunkShip, shipCoordinates",
"def test_with_shots_option(self):\n params, target = self._generate_params_target([1])\n sampler = Sampler()\n result = sampler.run(\n circuits=[self._pqc], parameter_values=params, shots=1024, seed=15\n ).result()\n self._compare_probs(result.quasi_dists, target)",
"def ship_bullet_hits(ai, var, screen, ship, enemies, shots, blasters, shockers, items, hub):\r\n\tfor shot in shots.copy():\r\n\t\tif pygame.sprite.spritecollideany(shot, enemies):\r\n\t\t\tfor enemy in enemies.copy():\r\n\t\t\t\tif pygame.sprite.spritecollideany(enemy, shots):\r\n\t\t\t\t\tenemy.hits += 1\r\n\t\t\t\t\tif enemy.hits >= enemy.hp:\r\n\t\t\t\t\t\tif not enemy.dead:\r\n\t\t\t\t\t\t\titem_chance(ai, var, screen, enemy, items, hub)\r\n\t\t\t\t\t\t\tenemy.dead = 1\r\n\t\t\t\t\t\t\tenemy.dead_time = pygame.time.get_ticks()\r\n\t\t\t\t\t\t\tenemy_death(ai, var, screen, enemy, shockers)\r\n\t\t\tshot.remove(shots)\r\n\t\t\t\r\n\tfor blast in blasters.copy():\r\n\t\tif pygame.sprite.spritecollideany(blast, enemies):\r\n\t\t\tfor enemy in enemies.copy():\r\n\t\t\t\tif pygame.sprite.spritecollideany(enemy, blasters):\r\n\t\t\t\t\tenemy.hits += 10\r\n\t\t\t\t\tif enemy.hits >= enemy.hp:\r\n\t\t\t\t\t\tif not enemy.boomed:\r\n\t\t\t\t\t\t\tenemy.boomed = 1\r\n\t\t\t\t\t\t\tenemy.t_boomed = pygame.time.get_ticks()\r\n\t\t\t\t\t\t\titem_chance(ai, var, screen, enemy, items, hub)\r\n\t\t\t\t\t\t\tenemy.dead = 1\r\n\t\t\t\t\t\t\tenemy.dead_time = pygame.time.get_ticks()\r\n\t\t\t\t\t\t\tenemy_death(ai, var, screen, enemy, shockers)",
"def shot(self):\n\n SHOTS_ROWS = (\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\")\n SHOTS_COLS = (\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\")\n\n shot_msg = sys.stdin.readline()[:-1]\n if len(shot_msg) and shot_msg[0] in SHOTS_ROWS and shot_msg[1:] in SHOTS_COLS:\n self.last_shot = shot_msg\n self.client.send((\"S\" + shot_msg).encode(\"utf-8\")) # Send shot\n sys.stdout.flush()\n clear()",
"def bomb(self, index):\n\n coords = self.canvas.coords(self.squares[index])\n x, y = coords[0] + 10, coords[1] + 10\n tag = self.hit[index]\n\n # Count moves for player (used for scoring)\n if self.players.winCondition == 1:\n self.players.moves[self.playerNumber][0] += 1\n\n # Hit\n if tag != 0:\n self.tracker[tag] -= 1\n\n # Count moves for player (used in scoring)\n if self.players.winCondition == 1:\n self.players.moves[self.playerNumber].append(\\\n self.players.moves[self.playerNumber][0])\n self.players.moves[self.playerNumber][0] = 0\n\n # Ship was sunk\n if self.tracker[tag] == 0:\n text = []\n tagname = 'tag%s' % tag\n\n # Bonus points equal to the size of ship\n # awarded for sinking entire ship\n if self.players.winCondition == 0:\n self.players.score[self.playerNumber] += \\\n self.counter_copy[tag]\n\n # Show bombed location with black & orange flashing bar\n for i in range(5):\n text.append(self.canvas.create_text(\\\n x, y, text='O', fill='red'))\n self.canvas.addtag_withtag('text', text[-1])\n self.canvas.tag_raise(tagname, 'square')\n for i in range(3): # Flashing bar\n self.canvas.itemconfig(tagname, {'fill': 'black'})\n self.canvas.update()\n self.myframe.after(100)\n self.canvas.itemconfig(tagname, {'fill': 'orange'})\n self.canvas.update()\n self.myframe.after(100)\n\n self.hit[index] = 5\n self.players.message[not self.playerNumber] = \\\n '%s,\\nYour ship of size %s was sunk by enemy' % \\\n (self.players.usernames[not self.playerNumber], \\\n self.counter_copy[tag])\n self.players.endOfTurn(self.tracker)\n return\n\n # Hit, but not sunk. Player gets only 1 point\n if self.players.winCondition == 0:\n self.players.score[self.playerNumber] += 1\n\n # Show hit location with flashing black & red circle\n text = []\n for i in range(3):\n del text[:]\n for i in range(5): # flash black circle\n text.append(self.canvas.create_text(\\\n x, y, text='O', fill='black'))\n self.canvas.addtag_withtag('text', text[-1])\n self.canvas.update()\n self.myframe.after(100)\n del text[:]\n for i in range(5): # flash red circle\n text.append(self.canvas.create_text(\\\n x, y, text='O', fill='red'))\n self.canvas.addtag_withtag('text', text[-1])\n self.canvas.update()\n self.myframe.after(100)\n\n # Complete miss. Draw 'X'\n else:\n for i in range(5):\n text = self.canvas.create_text(x, y, text='X', fill='yellow')\n self.canvas.addtag_withtag('text', text)\n self.canvas.update()\n self.myframe.after(250)\n self.hit[index] = 5\n self.players.endOfTurn(self.tracker)",
"def sinkShipSearch(self, coordinates, size, direction):\r\n if size == 0:\r\n # Successfully searched the required size.\r\n return True, []\r\n if coordinates.x < 0 or coordinates.y < 0 or coordinates.x == self.boardDimensions or coordinates.y == self.boardDimensions:\r\n # Can't go off the board.\r\n return False, None\r\n if self.enemyBoard[coordinates.x][coordinates.y] != BoardState.HIT:\r\n # This search is all for naught since the ship can't possibly have sunk at this position.\r\n return False, None\r\n sunkShip, shipCoordinates = self.sinkShipSearch(Coordinates(coordinates.x + direction.x, coordinates.y + direction.y), size - 1, direction)\r\n if sunkShip:\r\n shipCoordinates.append(coordinates)\r\n return sunkShip, shipCoordinates",
"def ship_hit(si_settings,screen,stats,sb,ship,aliens,bullets):\n if stats.ships_left > 0:\n # Decrement ships_left.\n stats.ships_left -= 1\n #update Scoreboard\n sb.prep_ships()\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)\n #empties aliens and bullets\n aliens.empty()\n bullets.empty()\n #makes new aliens and centers ship\n create_fleet(si_settings,screen,ship,aliens)\n ship.center_ship()\n #stop\n sleep(0.5)",
"def ship_hit(si_settings, screen, stats, sb, ship, aliens, bullets, alienBullets, images):\r\n if stats.ships_left > 0:\r\n # Decrement ships_left.\r\n stats.ships_left -= 1\r\n\r\n # Animate the ship explosion\r\n ship_explosion(si_settings, screen, ship)\r\n\r\n # Update scoreboard.\r\n sb.prep_ships()\r\n\r\n # Empty the list of aliens and bullets.\r\n aliens.empty()\r\n bullets.empty()\r\n alienBullets.empty()\r\n\r\n # Create a new fleet and center the ship.\r\n create_fleet(si_settings, screen, ship, aliens, images)\r\n ship.center_ship()\r\n\r\n # Pause.\r\n sleep(0.5)\r\n else:\r\n stats.game_active = False\r\n pygame.mouse.set_visible(True)",
"def ship_size(data, cell):\n if type(data) != dict:\n print('Wrong argument data')\n return None\n if type(cell) != tuple:\n print(\"Second argument must be a tuple\")\n return None\n if type(cell[0]) != str:\n print(\"First element of the second argument must be a str - A..J\")\n return None\n if type(cell[1]) != int:\n print(\"Second element of the second argument must be a number - 1..10\")\n return None\n if not has_ship(data, cell):\n return 0\n x = ord(cell[0].upper()) - 64\n y = cell[1]\n if x < 1 or x > 10:\n print('Wrong coordinate. Must be from A to J.')\n return None\n if y < 1 or y > 10:\n print('Wrong coordinate. Must be from 1 to 10.')\n return None\n size = 1\n coords = {(x, y)}\n if data[(x+1, y)] == 'damaged' or data[(x+1, y)] or data[(x-1, y)] == 'damaged' or data[(x-1, y)]:\n start = x\n while(data[(start-1, y)] == 'damaged' or data[(start-1, y)]):\n coords = coords | {(start-1, y)}\n size += 1\n start -= 1\n if start < 2:\n break\n start = x\n while(data[(start+1, y)] == 'damaged' or data[(start+1, y)]):\n coords = coords | {(start + 1, y)}\n size += 1\n start += 1\n if start > 10:\n break\n elif data[(x, y+1)] == 'damaged' or data[(x, y+1)] or data[(x, y-1)] == 'damaged' or data[(x, y-1)]:\n start = y\n while(data[(x, start-1)] == 'damaged' or data[(x, start-1)]):\n coords = coords | {(x, start - 1)}\n size += 1\n start -= 1\n if start < 1:\n break\n start = y\n while(data[(x, start+1)] == 'damaged' or data[(x, start+1)]):\n coords = coords | {(x, start + 1)}\n size += 1\n start += 1\n if start > 10:\n break\n return (size, coords)",
"def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):\n if stats.ships_left > 0:\n #decrement the value of ships_left\n stats.ships_left -= 1\n #update scoreboard\n sb.prep_ships()\n #when hit remove bullets and aliens from screen\n aliens.empty()\n bullets.empty()\n #create a new fleet with ship at centre\n create_fleet(ai_settings, screen,ship, aliens)\n ship.center_ship()\n #pause for a sec to collect defeat\n sleep(1.0)\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)",
"def stitch(tiles, dest):\n pass",
"def play_a_shot(self, req):\n game = models.BattleShip.getByUrlKey(req.url_key)\n return game.shoot(str(req.player), (str(req.y) + str(req.x - 1)))",
"def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):\n\tif stats.ships_left > 0:\n\t\t#Decrement ships_left\n\t\tstats.ships_left -= 1\n\t\t\n\t\t#Update scoreboard\n\t\tsb.prep_ships()\n\t\t\n\t\t#Empty the list of aliens and bullets\n\t\taliens.empty()\n\t\tbullets.empty()\n\t\t\n\t\t#Create a new fleet and center the ship\n\t\tcreate_fleet(ai_settings, screen, ship, aliens)\n\t\tship.center_ship()\n\t\t\n\t\t#Pause\n\t\tsleep(0.5)\n\n\telse:\n\t\tstats.game_active = False \n\t\tpygame.mouse.set_visible(True)",
"def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):\n if stats.ships_left > 0:\n # Decrement ships left.\n stats.ships_left -= 1\n\n # Update scoreboard.\n sb.prep_ships()\n\n # Empty the list of aliens and bullets.\n aliens.empty()\n bullets.empty()\n\n # Create new fleet.\n create_fleet(ai_settings, screen, ship, aliens)\n\n # Center the ship.\n ship.center_ship()\n\n # Pause for a while.\n sleep(0.5)\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)",
"def computer_fire(self):\n\n # Check tracker to see if previous attempt was a hit\n # If yes, continue to bomb rest of the ship first\n for shipID, size in self.tracker.items():\n if (size != 0) and (self.counter_copy[shipID] != size):\n for n in range(len(self.hit)):\n if self.hit[n] == shipID:\n self.bomb(n)\n return\n\n # Else, randomly fire on a new location\n n = random.randrange(0, len(self.hit))\n while self.hit[n] == 5:\n n = random.randrange(0, len(self.hit))\n self.bomb(n)",
"def shots_per_game(self, shots_per_game):\n\n self._shots_per_game = shots_per_game",
"def process_shot(self):\n if self.has_active_ship():\n self.mark = constants.HIT_SHIP_MARK\n self.hit_count += 1\n if self.hit_count == self.ship.power:\n self.mark = constants.DEAD_SHIP_MARK\n return constants.KILL\n else:\n return constants.HIT\n elif not self.occupied or self.mark == constants.MISS_HIT_MARK:\n self.mark = constants.MISS_HIT_MARK\n return constants.MISS",
"def hit(self):\n\n self.units.pop()\n return (len(self.units) == 0) # Returns True if the ship has been sunk",
"def group_shot_data_by_game_team(shots):\n grouped_shot_data = dict()\n\n # definining zones\n zones = ['slot', 'left', 'right', 'blue_line', 'neutral_zone', 'behind_goal']\n\n for shot in shots[:]:\n game_team_key = (shot['game_id'], shot['team'])\n if game_team_key not in grouped_shot_data:\n grouped_shot_data[game_team_key] = dict()\n for shot_zone_cat in SHOT_ZONE_CATEGORIES:\n grouped_shot_data[game_team_key][shot_zone_cat] = 0\n if 'distance' in shot_zone_cat:\n grouped_shot_data[game_team_key][shot_zone_cat] = list()\n # retrieving shot zone, e.g. *slot*, *left*\n zone = shot['shot_zone'].lower()\n # retrieving combined shot zone and outcome used as key below, e.g.\n # *slot_missed*, *left_blocked*, *blue_line_on_goal*\n zone_tgt_type = \"%s_%s\" % (zone, shot['target_type'])\n # retrieving combined shot zone and distance used as key below, e.g.\n # *right_distance*\n zone_distance = \"%s_distance\" % zone\n # adding shot incident to counter for shot zone\n grouped_shot_data[game_team_key][\"%s_shots\" % zone] += 1\n # adding shot incident to counter for shot zone/outcome\n grouped_shot_data[game_team_key][zone_tgt_type] += 1\n # adding distance of shot incident\n grouped_shot_data[game_team_key][zone_distance].append(\n shot['distance'])\n # in case of a goal, adding shot incident to couter for goals\n # from shot zone\n if shot['scored']:\n grouped_shot_data[game_team_key][\"%s_goals\" % zone] += 1\n\n # finally calculating percentages and mean distances for shot incidents\n # from each zone\n for key in grouped_shot_data:\n all_shots = 0\n all_on_goal = 0\n for zone in zones:\n # adding shots from current zone to number of shots from all zones\n all_shots += grouped_shot_data[key][\"%s_shots\" % zone]\n # adding shots on goal from current zone to number of shots on goal\n # from all zones\n all_on_goal += grouped_shot_data[key][\"%s_on_goal\" % zone]\n # calculating mean distance of shots from the current zone (if\n # applicable)\n if grouped_shot_data[key][\"%s_shots\" % zone]:\n grouped_shot_data[key][\"%s_distance\" % zone] = round(\n sum(grouped_shot_data[key][\"%s_distance\" % zone]) /\n grouped_shot_data[key][\"%s_shots\" % zone], 2\n )\n else:\n grouped_shot_data[key][\"%s_distance\" % zone] = 0\n\n # calculating percentage of shots and shots on goal for each shot zone\n for zone in zones:\n grouped_shot_data[key][\"%s_pctg\" % zone] = round((\n grouped_shot_data[key][\"%s_shots\" % zone] / all_shots\n ) * 100., 2)\n grouped_shot_data[key][\"%s_on_goal_pctg\" % zone] = round((\n grouped_shot_data[key][\"%s_on_goal\" % zone] / all_on_goal\n ) * 100., 2)\n\n for key in grouped_shot_data:\n game_id, team = key\n per_team_game_shots = list(filter(\n lambda d:\n d['game_id'] == game_id and\n d['team'] == team, shots))\n grouped_shot_data[key]['shots'] = len(per_team_game_shots)\n per_team_post_crossbar_shots = list(filter(lambda d: 'hit_post' in d and d['hit_post'], per_team_game_shots))\n grouped_shot_data[key]['hit_post'] = len(per_team_post_crossbar_shots)\n per_team_game_ev_shots = list(filter(\n lambda d: d['situation'] == 'EV', per_team_game_shots))\n grouped_shot_data[key]['shots_ev'] = len(per_team_game_ev_shots)\n per_team_game_5v5_shots = list(filter(\n lambda d: d['plr_situation'] == '5v5', per_team_game_shots))\n grouped_shot_data[key]['shots_5v5'] = len(per_team_game_5v5_shots)\n per_team_game_pp_shots = list(filter(\n lambda d: d['situation'] == 'PP', per_team_game_shots))\n grouped_shot_data[key]['shots_pp'] = len(per_team_game_pp_shots)\n per_team_game_sh_shots = list(filter(\n lambda d: d['situation'] == 'SH', per_team_game_shots))\n grouped_shot_data[key]['shots_sh'] = len(per_team_game_sh_shots)\n per_team_game_unblocked_shots = list(filter(\n lambda d: d['target_type'] in ['on_goal', 'missed'],\n per_team_game_shots))\n grouped_shot_data[key]['shots_unblocked'] = len(\n per_team_game_unblocked_shots)\n per_team_game_unblocked_ev_shots = list(filter(\n lambda d: d['situation'] == 'EV', per_team_game_unblocked_shots))\n grouped_shot_data[key]['shots_unblocked_ev'] = len(\n per_team_game_unblocked_ev_shots)\n per_team_game_unblocked_5v5_shots = list(filter(\n lambda d: d['plr_situation'] == '5v5',\n per_team_game_unblocked_shots))\n grouped_shot_data[key]['shots_unblocked_5v5'] = len(\n per_team_game_unblocked_5v5_shots)\n per_team_game_unblocked_pp_shots = list(filter(\n lambda d: d['situation'] == 'PP', per_team_game_unblocked_shots))\n grouped_shot_data[key]['shots_unblocked_pp'] = len(\n per_team_game_unblocked_pp_shots)\n per_team_game_unblocked_sh_shots = list(filter(\n lambda d: d['situation'] == 'SH', per_team_game_unblocked_shots))\n grouped_shot_data[key]['shots_unblocked_sh'] = len(\n per_team_game_unblocked_sh_shots)\n per_team_game_shots_on_goal = list(filter(\n lambda d: d['target_type'] == 'on_goal', per_team_game_shots))\n grouped_shot_data[key]['shots_on_goal'] = len(\n per_team_game_shots_on_goal)\n per_team_game_ev_shots_on_goal = list(filter(\n lambda d: d['situation'] == 'EV', per_team_game_shots_on_goal))\n grouped_shot_data[key]['shots_on_goal_ev'] = len(\n per_team_game_ev_shots_on_goal)\n per_team_game_5v5_shots_on_goal = list(filter(\n lambda d: d['plr_situation'] == '5v5',\n per_team_game_shots_on_goal))\n grouped_shot_data[key]['shots_on_goal_5v5'] = len(\n per_team_game_5v5_shots_on_goal)\n per_team_game_pp_shots_on_goal = list(filter(\n lambda d: d['situation'] == 'PP', per_team_game_shots_on_goal))\n grouped_shot_data[key]['shots_on_goal_pp'] = len(\n per_team_game_pp_shots_on_goal)\n per_team_game_sh_shots_on_goal = list(filter(\n lambda d: d['situation'] == 'SH', per_team_game_shots_on_goal))\n grouped_shot_data[key]['shots_on_goal_sh'] = len(\n per_team_game_sh_shots_on_goal)\n per_team_game_5v5_goals = list(filter(\n lambda d: d['scored'], per_team_game_5v5_shots_on_goal))\n grouped_shot_data[key]['goals_5v5'] = len(per_team_game_5v5_goals)\n return grouped_shot_data",
"def slap(self, irc, msg, args, channel, victim, number):\r\n MaxSlaps = self.registryValue(\"MaxSlaps\")\r\n #self.log.debug(\"slap: \"+channel)\r\n if not victim: # no victim given\r\n victim = msg.nick\r\n if not channel: # no channel given\r\n channel = msg.args[0]\r\n if irc.nick == channel: #private chat\r\n channel = msg.nick\r\n if not number:\r\n number = 1\r\n if number > MaxSlaps:\r\n if MaxSlaps != 0:\r\n number = MaxSlaps\r\n for i in range(number):\r\n text = string.replace(self._buildSlap(), \"$nick\", victim)\r\n irc.queueMsg(ircmsgs.action(channel, text))",
"def shatter(self):\n self.delete()\n if self.size==0:\n #if this rock is a small rock, then dont spawn any new rocks when its shattered\n return\n numberOfRocksLeftToSpawn=numberOfNewRocksToSpawnOnShatter\n while (numberOfRocksLeftToSpawn>0):\n Rock(canvas=self.canvasIGetDrawnOn,xPos=self.xPos,yPos=self.yPos,size=self.size-1)\n numberOfRocksLeftToSpawn-=1",
"def printShipsToSink(self):\r\n sb = []\r\n for sinkingShip in self.shipsToSink:\r\n shot = self.mapToShot(sinkingShip.bullseye)\r\n sb.append(str(shot))\r\n sb.append(\":\")\r\n sb.append(str(sinkingShip.size))\r\n sb.append(\" \")\r\n logging.debug(\"\".join(sb))",
"def check_shot(self, cords):\n \n y, x = text_to_cords(cords)\n output = \"MISS\"\n\n for ship in self.ships:\n if ship.is_my_cords(y, x):\n output = \"HIT\"\n break\n \n self.board.change_field(y, x, text_to_field_type(output))\n self.show_boards()\n print(\"Enemy shot: \", cords)\n\n # if all ships are destroyed, disconnect\n if self.board.are_all_ships_destoryed(self.ships):\n output = \"WIN\"\n print(\"You lose :(\")\n self.client.send(f\"C{output}\".encode(\"utf-8\"))\n self.client.close()\n sys.exit()\n\n self.client.send(f\"C{output}\".encode(\"utf-8\"))",
"def test_sink(self):\n ship = Ship([self.hit])\n self.assertEqual([self.hit], ship.location)\n self.assertEqual(2, ship.check_hit(self.hit))\n self.assertEqual(0, len(ship.location))",
"def ship_shoot(ship, x, y):\n click.echo('Ship %s fires to %s,%s' % (ship, x, y))",
"def actions(self, ship: SpaceShip, input_data: Dict[str, Tuple]) -> None:\n # ship.turn_rate = 180.0\n ship.thrust = ship.thrust_range[1]\n ship.shoot()"
] |
[
"0.6546094",
"0.6270627",
"0.6220413",
"0.5889326",
"0.5881789",
"0.5799883",
"0.57032895",
"0.5695585",
"0.5620391",
"0.5610055",
"0.5579357",
"0.5547138",
"0.5538802",
"0.5500968",
"0.54043406",
"0.53991574",
"0.5375928",
"0.53510904",
"0.5305802",
"0.5302349",
"0.52896166",
"0.5269307",
"0.52645546",
"0.52599317",
"0.5244274",
"0.5220515",
"0.51776123",
"0.5173075",
"0.5124788",
"0.51114446"
] |
0.69968224
|
0
|
Attempts to sink all sinking ships by positioning them in all possible positions . If there's not enough information to sink them, the board remains asis. For every ship that's sunk marks all of its coordinates as SUNK to prevent them from being used in subsequent shot selections.
|
def sinkShips(self):
while True:
stillSinkingShips = False
for i in range(len(self.shipsToSink) - 1, -1, -1):
sunkShip, shipCoordinates = self.positionAndSinkShip(self.shipsToSink[i])
if sunkShip:
stillSinkingShips = True
for coordinates in shipCoordinates:
self.enemyBoard[coordinates.x][coordinates.y] = BoardState.SUNK
del(self.shipsToSink[i])
if not stillSinkingShips:
break
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def positionAndSinkShip(self, sinkingShip):\r\n directions = [Direction.North, Direction.South, Direction.East, Direction.West]\r\n sunkShip = False\r\n shipCoordinates = None\r\n for direction in directions:\r\n tSunkShip, tShipCoordinates = self.sinkShip(sinkingShip.bullsEye, sinkingShip.size, direction)\r\n if tSunkShip:\r\n if sunkShip:\r\n return False, None\r\n else:\r\n sunkShip = tSunkShip\r\n shipCoordinates = tShipCoordinates\r\n return sunkShip, shipCoordinates",
"def sinkShip(self, bullsEye, size, direction):\r\n sunkShip, shipCoordinates = self.sinkShipSearch(Coordinates(bullsEye.x + direction.x, bullsEye.y + direction.y), size - 1, direction)\r\n if sunkShip:\r\n shipCoordinates.append(bullsEye)\r\n return sunkShip, shipCoordinates",
"def place_ships(self):\n # An array that holds the ships to then be looped through\n length_of_ships = [6, 4, 3, 2]\n\n for ship_length in length_of_ships:\n while True:\n if self.user == 'computer':\n orientation = random.choice(['H', 'V'])\n row = random.randint(0, 9)\n column = random.randint(0, 9)\n if self.check_ship_fits(\n ship_length, row, column, orientation\n ):\n if self.collision_check(\n self.board, row, column, orientation, ship_length\n ) is False:\n if orientation == 'H':\n for i in range(column, column + ship_length):\n self.board[row][i] = SHIP\n else:\n for i in range(row, row + ship_length):\n self.board[i][column] = SHIP\n break\n else:\n if self.user == 'player':\n self.ship_prompt(ship_length)\n orientation, column, row = self.ship_input()\n if self.check_ship_fits(\n ship_length, row, column, orientation\n ):\n if self.collision_check(\n self.board,\n row,\n column,\n orientation,\n ship_length\n ) is False:\n if orientation == 'H':\n for i in range(\n column, column + ship_length\n ):\n self.board[row][i] = SHIP\n else:\n for i in range(row, row + ship_length):\n self.board[i][column] = SHIP\n print(' ')\n self.print_board()\n break",
"def _move_all_ships(game_data):\n # make the moves of the ships\n for player in game_data['ships']:\n # deal only with the ships which are not abandonned\n if player != 0:\n for ship_name in game_data['ships'][player]:\n # move the ship\n _move_ship(player, ship_name, game_data)",
"def placeShips(self):\n\n self.ships = [] # Canvas co-ordinates for the ships\n self.shipText = [] # Text to be displayed besides each ship\n self.failedAttempts = []\n self.names = {2: 'BOAT', 3: 'SUB', 4: 'CRUISER', 5: 'CARRIER'}\n\n items = self.shipList.items()\n for k, v in items:\n for i in range(v): # for every ship v of size k\n attempts = 20\n success = False\n while not success and attempts > 0:\n success = True\n n = random.randrange(0, len(self.hit))\n shipRotation = random.randrange(0, 2)\n attempts -= 1\n\n # Check if ship fits horizontally\n if shipRotation != 0:\n for j in range(n, n + k):\n if (j >= len(self.hit)) or (j % self.boardsize \\\n < n % self.boardsize) or (self.hit[j] != 0):\n success = False\n break\n # Check if ship fits vertically\n else:\n for j in range(n, n + k * self.boardsize, \\\n self.boardsize):\n if (j >= len(self.hit)) or (self.hit[j] != 0):\n success = False\n break\n\n # Keep track of ships that failed to be placed\n if attempts == 0:\n self.failedAttempts.append(k)\n continue\n\n # Ships of custom sizes above 5 are named \"BATTLESHIP\"\n name = 'BATTLESHIP'\n if k in self.names:\n name = self.names[k]\n\n x = n % self.boardsize * 20 + 20\n y = (n / self.boardsize) * 20 + 40\n\n # Place ship horizontally\n if shipRotation != 0:\n for i in range(n, n + k):\n self.hit[i] = self.shipID\n self.ships.append(self.canvas.create_rectangle(\\\n x, y + 5, x + k * 20, y + 15, fill='orange', width=1))\n self.shipText.append(self.canvas.create_text(\\\n x + 20, y, text=name, font='Courier 6', fill='yellow'))\n\n # Place ship vertically\n else:\n for i in range(n, n + k * self.boardsize, self.boardsize):\n self.hit[i] = self.shipID\n self.ships.append(self.canvas.create_rectangle(\\\n x + 5, y, x + 15, y + k * 20, fill='orange', width=1))\n cname = \"\"\n for ch in name:\n cname += ch + '\\n'\n self.shipText.append(self.canvas.create_text(\\\n x, y + 20, text=cname, font='Courier 6', fill='yellow'))\n\n # Tag every placed ship with \"tagXXX\" where XXX is shipID\n # Will be used to identify which ship was bombed\n self.canvas.addtag_withtag('tag%s' % \\\n self.shipID, self.ships[-1])\n self.canvas.addtag_withtag('ship', self.ships[-1])\n self.tracker[self.shipID] = k\n self.shipID += 1\n\n # Announce any failures in placing ships\n # Game will exit after user is notified of this failure\n if self.failedAttempts:\n mssg = \"Oops, we failed to fit the \" \\\n \"following ships on this board:\\n\\n\"\n failCount = Counter(self.failedAttempts)\n for m, n in failCount.items():\n mssg += '%s ships of size %s\\n' % (n, m)\n showDialogBox(mssg + \"\\nUnfortunately, we \" \\\n \"cannot proceed with the game!\")\n showDialogBox(\"Goodbye!\")\n self.exitstatus = 1\n return\n\n # 'tracker' will be modified throughout the game, so keep a copy\n self.counter_copy = self.tracker.copy()\n self.players.tracker = self.tracker\n\n for i in self.ships:\n self.canvas.addtag_withtag('ship', i)\n for i in self.shipText:\n self.canvas.addtag_withtag('text', i)\n for i in range(self.shipID - 100):\n self.ships.append(None)\n self.shipText.append(None)\n\n if self.isComputer == 1:\n self.canvas.tag_lower('ship')\n self.canvas.tag_lower('text')\n self.canvas.tag_bind('square', '<Button-1>', self.fire)\n else:\n self.clickDone = Button(self.myframe, text='Done',\\\n command=self.clickDone)\n self.clickDone.place(x=1, y=1)",
"def calculate_shot(self, player_ships: list):\n board = Board(self.__size)\n prob_board = Board(self.__size)\n\n for move in self.__moves:\n x, y = move[1], move[2]\n board.shoot(x, y)\n prob_board.board[x][y] = -1000\n\n if move[0] == ShotResult.HIT:\n if any(ship.sunk and [anything, x, y] in ship.pieces for ship in player_ships):\n # part of a sunken ship; no need to increase neighbours probability\n continue\n\n for (i, j) in [(1, 0), (0, 1), (-1, 0), (0, -1)]:\n try: # easier to ask for forgiveness that permission :d\n if (ShotResult.HIT, x - i, y - j) in self.__moves: # opposite neighbour\n prob_board.board[x + i][y + j] += self.offset\n prob_board.board[x + i][y + j] += self.offset\n except IndexError:\n pass\n\n final_x, final_y = 0, 0\n max_prob = -1\n for s in player_ships:\n if not s.sunk: # the only time we use unsunken ships; we use just their size\n for i in range(self.__size):\n for j in range(self.__size):\n for o in range(0, 2): # for every (x, y, orientation) possible\n try:\n board.check(Ship(s.type, o, i, j))\n for offset in range(s.size):\n x, y = i - offset * o, j + offset * (not o)\n prob_board.board[x][y] += 1 # increase the probability of each piece\n except IllegalMove:\n pass\n\n for i in range(self.__size):\n for j in range(self.__size):\n if prob_board.board[i][j] > max_prob:\n final_x, final_y = i, j\n max_prob = prob_board.board[i][j]\n elif prob_board.board[i][j] == max_prob:\n if randint(0, 10) < 5: # random aspect to the ai, harder to predict\n final_x, final_y = i, j\n return final_x, final_y",
"def postion_fleet(self, ships_positions, board):\n for cell in ships_positions:\n row = ord(cell[:1]) - ord('A')\n col = int(cell[1:]) - 1\n for i in range(row, row + self.total_rows_req):\n for j in range(col, col + self.total_column_req):\n self.position_ship(\n board.grid[i][j],\n constants.FLEET_Q_CLASS,\n constants.Q_CLASS_HIT_POWER\n )",
"def printShipsToSink(self):\r\n sb = []\r\n for sinkingShip in self.shipsToSink:\r\n shot = self.mapToShot(sinkingShip.bullseye)\r\n sb.append(str(shot))\r\n sb.append(\":\")\r\n sb.append(str(sinkingShip.size))\r\n sb.append(\" \")\r\n logging.debug(\"\".join(sb))",
"def add_ships(ship_screen):\n\n direction_choice = random.choice([\"H\", \"V\"]) # chooses the direction of the big ship\n x = random.randint(0, 4)\n y = random.randint(0, 4)\n ship_screen[x][y] = \"X\"\n if direction_choice == \"V\": # after selection of the first coordinate, chooses a direction\n # and then according to x or y coordinates puts the other 2 pieces of the ship\n if x == 0:\n ship_screen[x+1][y], ship_screen[x+2][y] = \"X\", \"X\"\n elif x == 4:\n ship_screen[x-1][y], ship_screen[x-2][y] = \"X\", \"X\"\n else:\n ship_screen[x+1][y], ship_screen[x-1][y] = \"X\", \"X\"\n elif direction_choice == \"H\":\n if y == 0:\n ship_screen[x][y+1], ship_screen[x][y+2] = \"X\", \"X\"\n elif y == 4:\n ship_screen[x][y-1], ship_screen[x][y-2] = \"X\", \"X\"\n else:\n ship_screen[x][y-1], ship_screen[x][y+1] = \"X\", \"X\"\n main_ship_coordinates = [[a, b] for a in range(5) for b in range(5) if ship_screen[a][b] == \"X\"]\n # returns ship coordinates\n banned_coordinates = [] # codes of between 29-34 finds the neighbour coordinates of big ship\n for d in main_ship_coordinates:\n neighbour_coordinates = [[d[0], d[1]+1], [d[0]+1, d[1]], [d[0]-1, d[1]], [d[0], d[1]-1],\n [d[0]+1, d[1]+1], [d[0]-1, d[1]-1], [d[0]+1, d[1]-1], [d[0]-1, d[1]+1]]\n for e in neighbour_coordinates:\n if e[0] in range(5) and e[1] in range(5) and e not in banned_coordinates:\n banned_coordinates.append(e)\n while True:\n i = random.randint(0, 4)\n j = random.randint(0, 4)\n if [i, j] in banned_coordinates:\n continue\n else:\n ship_screen[i][j] = \"O\"\n break\n while True:\n possible_coordinates = [[i+1, j], [i-1, j], [i, j-1], [i, j+1]]\n # selects second piece randomly from possible 4\n second_piece = random.choice(possible_coordinates)\n if second_piece[0] in range(5) and second_piece[1] in range(5) and second_piece not in banned_coordinates:\n ship_screen[second_piece[0]][second_piece[1]] = \"O\"\n break\n else:\n continue\n return ship_screen",
"def postion_fleet(self, ships_positions, board):\n for cell in ships_positions:\n row = ord(cell[:1]) - ord('A')\n col = int(cell[1:]) - 1\n for i in range(row, row + self.total_rows_req):\n for j in range(col, col + self.total_column_req):\n self.position_ship(\n board.grid[i][j],\n constants.FLEET_P_CLASS,\n constants.P_CLASS_HIT_POWER\n )",
"def sinkShipSearch(self, coordinates, size, direction):\r\n if size == 0:\r\n # Successfully searched the required size.\r\n return True, []\r\n if coordinates.x < 0 or coordinates.y < 0 or coordinates.x == self.boardDimensions or coordinates.y == self.boardDimensions:\r\n # Can't go off the board.\r\n return False, None\r\n if self.enemyBoard[coordinates.x][coordinates.y] != BoardState.HIT:\r\n # This search is all for naught since the ship can't possibly have sunk at this position.\r\n return False, None\r\n sunkShip, shipCoordinates = self.sinkShipSearch(Coordinates(coordinates.x + direction.x, coordinates.y + direction.y), size - 1, direction)\r\n if sunkShip:\r\n shipCoordinates.append(coordinates)\r\n return sunkShip, shipCoordinates",
"def sandwich(self):\n if self.game.rules[\"capture\"] == \"custodial_capture\":\n self.remove_self()\n if self.game.rules[\"trapping\"]:\n for trapped_neighbor in [neighbor for neighbor in self.get_neighbors() if neighbor.trapped and self.position in neighbor.get_sandwichers() and len(neighbor.get_sandwichers()) == 2]:\n trapped_neighbor.untrap()\n self.trap()",
"def island_feeding(self):\n for y in self.island_map:\n for cell in y:\n cell.feeding()",
"def allplacement(self, test_pylos):\n placements = []\n player = test_pylos._state['visible']['turn']\n for layer in range(4):\n for row in range(4-layer):\n for column in range(4-layer):\n value = test_pylos.get(layer,row,column)\n if value is None:\n try:\n test_pylos.validPosition(layer, row, column)\n except game.InvalidMoveException:\n pass\n else:\n if test_pylos.createSquare((layer, row, column)) is True:\n if player == 0:\n removableballs = self.removableballs0(test_pylos)\n removableballs.apppend([layer, row, column])\n else:\n removableballs = self.removableballs1(test_pylos)\n removableballs.apppend([layer, row, column])\n for i in removableballs:\n if i[0] == layer - 1 and i[1] == row and i[2] == column:\n removableballs.remove(i)\n elif i[0] == layer - 1 and i[1] == row + 1 and i[2] == column:\n removableballs.remove(i)\n elif i[0] == layer - 1 and i[1] == row + 1 and i[2] == column + 1:\n removableballs.remove(i)\n elif i[0] == layer - 1 and i[1] == row and i[2] == column + 1:\n removableballs.remove(i)\n\n for i in removableballs:\n move = {\n 'move': 'place',\n 'to': [layer, row, column],\n 'remove': i\n }\n\n placements.append(move)\n else:\n move = {\n 'move': 'place',\n 'to': [layer, row, column]\n }\n placements.append(move)\n return placements",
"def test_sink(self):\n ship = Ship([self.hit])\n self.assertEqual([self.hit], ship.location)\n self.assertEqual(2, ship.check_hit(self.hit))\n self.assertEqual(0, len(ship.location))",
"def stitch(tiles, dest):\n pass",
"def move_all_animals(self):\n\n y_lim, x_lim = np.shape(self.map)\n for y in range(y_lim):\n for x in range(x_lim):\n loc = y, x\n self.map[loc].migration(self.get_neighbour((y, x)))",
"def moves(self, teleport=False):\n recv = [(self._size[y][x] - self._used[y][x], x, y)\n for x in range(self.xsize) for y in range(self.ysize)]\n recv.sort(reverse=True)\n send = [(self._used[y][x], x, y)\n for x in range(self.xsize) for y in range(self.ysize)\n if self._used[y][x] > 0]\n send.sort()\n # print(\"recv: {}...\".format(str(recv[:5])))\n # print(\"send: {}...\".format(str(send[:5])))\n moves = []\n for avail, x1, y1 in recv:\n for used, x0, y0 in send:\n if avail < used:\n break\n if teleport or (x0 == x1 and abs(y0 - y1) == 1) or (\n y0 == y1 and abs(x0 - x1) == 1):\n self.apply((x0, y0), (x1, y1))\n moves.append((self.score(), self.key(), self.save(), list(self.history)))\n self.undo()\n return moves",
"def island_migration(self):\n for y in self.island_map:\n for cell in y:\n cell.migration()\n\n for y in self.island_map:\n for cell in y:\n for animal in cell.population:\n animal.has_moved = False",
"def placeGuess(player, xPos, yPos):\n\t\n\t# Print hit ship message if other player's board\n\t# has a ship at that position\n\tif(player.otherPlayer.board[yPos][xPos] != '~'):\n\t\tplayer.guess[yPos][xPos] = '#'\n\t\tprint(\"You've hit a ship!\\n\")\n\n\t# Print miss message if no ship at that position\n\telse:\n\t\tplayer.guess[yPos][xPos] = 'm'\n\t\tprint(\"You missed!\\n\")\n\n\n\tfor i in range(0, 2):\n\n\t\t# Set enemy to be the other player\n\t\tenemy = player.otherPlayer\n\t\tship = enemy.ships[i]\n\n\t\t# If ship is already sunk, go to next iteration\n\t\tif(ship.sunk == 1):\n\t\t\tcontinue\n\n\t\tbad = 0\n\t\tsX = ship.startX\n\t\tsY = ship.startY\n\t\tori = ship.orientation\n\n\t\t# Check if all of ship in horizontal position is all hit\n\t\tif(ori == 1):\n\t\t\tfor y in range(sY, sY + ship.shipLen):\n\t\t\t\tif(player.guess[y][sX] != enemy.board[y][sX]):\n\t\t\t\t\tbad = 1\n\t\t\t\t\tbreak\n\n\t\t# Check if all of ship in vertical position is all hit\n\t\telse:\n\t\t\tfor x in range(sX, sX + ship.shipLen):\n\t\t\t\tif(player.guess[sY][x] != enemy.board[sY][x]):\n\t\t\t\t\tbad = 1\n\t\t\t\t\tbreak\n\n\t\t# If entire ship is hit, sink ship, print ship sunk message\n\t\tif(bad == 0):\n\t\t\tship.sunk = 1\n\t\t\tprint(\"You sank a \" + ship.name + \"\\n\")\n\t\t\tbreak",
"def test_spreading_players(self):\n params = [3, 4, 11, 20]\n w = gen.generate_map(height=50, width=80, params=params)\n coords = s.spread_across_the_map(w, 4)\n for c in coords:\n x = c[0]\n y = c[1]\n self.assertNotEqual(w[x][y], 0)\n self.assertNotEqual(w[x][y], 3) # uncomment the block to see an overview\n # w[x][y] = 4\n # image = img.get_map_overview(w)\n # image2 = img.get_resized_map_overview(image, 781, 521)\n # image2.show()",
"def __init__(self, boardDimensions, shipsAfloat):\r\n ShotSelector.__init__(self, boardDimensions, shipsAfloat)\r\n self.shipsToSink = []",
"def attack_all(self):\n\n # Query databse\n query_string = \"SELECT * from ships_in_range;\"\n self.conn_cur.execute(query_string)\n results = self.conn_cur.fetchall()\n\n # Check ships in range\n for ship in results:\n self.attack(str(ship[0]), str(ship[1]))",
"def shotResult(self, shot, hit, sunk):\r\n ShotSelector.shotResult(self, shot, hit, sunk)\r\n coordinates = self.mapToCoordinates(shot)\r\n if sunk:\r\n self.shipsToSink.append(SinkingShip(coordinates, sunk))\r\n self.sinkShips()\r\n self.printShipsAfloat()\r\n self.printShipsToSink()",
"def test_places_all_ships(self):\n player = TestPlayer()\n self.ai.place_ships(player)\n sorted_ships = sorted(player.placed_ships)\n self.assertEqual([2, 3, 3, 4, 5], sorted_ships)",
"def place_targets():\n\n \n coords = []\n while len(coords)<self.N_targets:\n x = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[1]+1-self.BORDER_MARGIN,size=1)[0]\n y = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[0]+1-self.BORDER_MARGIN,size=1)[0]\n p = (x,y)\n all_valid = True\n for rect in self.coordinates__obstacles:\n if not check_valid_placement(p,rect):\n all_valid = False\n break\n if all_valid:\n coords +=[p]\n self.coordinates__targets = coords",
"def repairWall(self, game_state):\n first_row = [[0, 13], [1, 13],[2, 13],[3, 13],[4, 13],[5, 13],[6, 13],[7, 13],[8, 13],[9, 13],[10, 13],[11, 13],[12, 13],[13, 13],[15, 13],[16, 13],[17, 13],[18, 13],[19, 13],[20, 13],[21, 13],[22, 13],[23, 13],[24, 13],[25, 13],[26, 13],[27, 13]]\n destructor_loc1 = [[12,11], [16,11]]\n second_row = [[13, 12],[15, 12],[12, 12],[16, 12],[11, 12],[17, 12],[1, 12],[2, 12],[3, 12],[4, 12],[5, 12],[6, 12],[7, 12],[8, 12],[9, 12],[10, 12],[18, 12],[19, 12],[20, 12],[21, 12],[22, 12],[23, 12],[24, 12],[25, 12],[26, 12]]\n destructor_loc2 = [[8,11], [20,11]]\n encryptor_loc1 = [[13,11], [15,11]]\n destructor_loc3 = [[4,11], [24,11]]\n encryptor_row1 = [[13,10], [15,10]]\n destructor_row1 = [[12,10], [16,10]]\n encryptor_row2 = [[13,9], [15,9]]\n destructor_row2 = [[12,9], [16,9]]\n encryptor_row3 = [[13,8], [15,8]]\n destructor_row3 = [[12,8], [16,8]]\n\n for location in first_row:\n if game_state.can_spawn(FILTER, location):\n game_state.attempt_spawn(FILTER, location)\n\n for location in destructor_loc1:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in second_row:\n if game_state.can_spawn(FILTER, location):\n game_state.attempt_spawn(FILTER, location)\n\n for location in destructor_loc2:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_loc1:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_loc3:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_row1:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_row1:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_row2:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_row2:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_row3:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_row3:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)",
"def prep_ships(self):\r\n\t\tself.ships=Group()\r\n\t\tfor ship_number in range(self.stats.ships_left):\r\n\t\t\tship=Ship(self.ai_settings, self.screen)\r\n\t\t\tship.transform()\r\n\t\t\tship.rect.x=10+ship_number*ship.rect.width\r\n\t\t\tship.rect.y=10\r\n\t\t\tself.ships.add(ship)",
"def moveShips(self, shipList, empireID, systemID):\n try:\n # init attributes that all ships should share\n shipOne = self.ships[shipList[0]]\n shipOwner = shipOne.empireID\n if shipOwner <> empireID:\n return 'Cannot move ship if it is not yours'\n fromSystem = self.systems[shipOne.fromSystem]\n toSystem = self.systems[systemID]\n newSystemGrid = anwp.func.funcs.getMapQuadrant(fromSystem.x, fromSystem.y,\n toSystem.x, toSystem.y)\n # check if group of ships share these attributes\n for shipID in shipList:\n myShip = self.ships[shipID]\n if myShip.fromSystem.id <> fromSystem.id:\n return 'Ship %s comes from system: %s' % (myShip.name, fromSystem.name)\n if myShip.empireID <> shipOwner:\n return 'Ship %s has different empire Owner: %s' % (myShip.name, myShip.empireID)\n \n \n valid = 0\n # ships have been validated to be going the same place, now validate movement\n for shipID in shipList:\n myShip = self.ships[shipID]\n # can ship be moved?\n if myShip.fromSystem <> myShip.toSystem and systemID <> myShip.fromSystem:\n return 'Ship %s has already moved' % myShip.name\n # check if ship is platform going out of own empire\n if (myShip.myShipHull.abr[1:] == 'WP' and \n (toSystem.myEmpireID <> empireID and anwp.func.globals.diplomacy[self.empires[empireID].diplomacy[toSystem.myEmpireID].diplomacyID]['alliance'] == 0)):\n return 'Platforms can only move between your systems, they cannot attack.'\n # check that ship is allowed to move to system\n if toSystem.myEmpireID <> empireID and anwp.func.globals.diplomacy[self.empires[empireID].diplomacy[toSystem.myEmpireID].diplomacyID]['move'] == 0:\n return 'Diplomacy will not allow your ships to move to this system'\n \n # is ship returning from move this round?\n if systemID == myShip.fromSystem:\t\t\n # is system adjacent?\n if myShip.toSystem in fromSystem.connectedSystems:\n valid = 1\n else:\n # refund warp point\n fromSystem = self.systems[myShip.toSystem]\n fromSystem.usedWGC -= 1\n toSystem.usedWGC -= 1\n valid = 1\n else:\n # is system adjacent?\n if systemID in fromSystem.connectedSystems:\n valid = 1\n else:\n # is warp available?\n if 1 <= (fromSystem.availWGC-fromSystem.usedWGC) and 1 <= (toSystem.availWGC-toSystem.usedWGC):\n # spend warp points\n fromSystem.usedWGC += 1\n toSystem.usedWGC += 1\n valid = 1\n else:\n return 'Not enough Warp Capacity Points to move %s' % myShip.name\n \n if valid == 1:\n # move ships\n for shipID in shipList:\n myShip = self.ships[shipID]\n myShip.moveToSystem(newSystemGrid, systemID)\n return 1\n else:\n return 'Not a valid Ship Movement Order'\n except:\n return 'galaxy->moveShips error'",
"def place_allowed_tower_sites():\n self.coordinates__tower_sites = []\n for tk in xrange(self.N_tower_kinds):\n #Each kind of tower will have the correct number of sites placed\n \n coords = []\n while len(coords)<self.N_tower_sites[tk]:\n x = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[1]+1-self.BORDER_MARGIN,size=1)[0]\n y = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[0]+1-self.BORDER_MARGIN,size=1)[0]\n p = (x,y) \n all_valid = True\n for rect in self.coordinates__obstacles:\n if not check_valid_placement(p,rect):\n all_valid = False\n break\n if all_valid:\n coords.append(p)\n self.coordinates__tower_sites.append(coords)"
] |
[
"0.70138216",
"0.6665017",
"0.6649957",
"0.6581857",
"0.6460488",
"0.6365548",
"0.61918086",
"0.61489993",
"0.6130212",
"0.61247045",
"0.6064208",
"0.5848623",
"0.5781333",
"0.577842",
"0.5750796",
"0.57419527",
"0.572682",
"0.5720159",
"0.5629075",
"0.5614789",
"0.56005985",
"0.55898875",
"0.55876315",
"0.557461",
"0.55698496",
"0.5563406",
"0.5557599",
"0.55544144",
"0.55362195",
"0.55251825"
] |
0.83000946
|
0
|
Positions a sinking ship in all possible positions and tries to sink it. Arguments sinkingShip The ship to position. Returns sunkShip True if the ship was sunk, False if not. shipCoorindates Only valid if the ship was sunk, the coordinates of the sunk ship.
|
def positionAndSinkShip(self, sinkingShip):
directions = [Direction.North, Direction.South, Direction.East, Direction.West]
sunkShip = False
shipCoordinates = None
for direction in directions:
tSunkShip, tShipCoordinates = self.sinkShip(sinkingShip.bullsEye, sinkingShip.size, direction)
if tSunkShip:
if sunkShip:
return False, None
else:
sunkShip = tSunkShip
shipCoordinates = tShipCoordinates
return sunkShip, shipCoordinates
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sinkShips(self):\r\n while True:\r\n stillSinkingShips = False\r\n for i in range(len(self.shipsToSink) - 1, -1, -1):\r\n sunkShip, shipCoordinates = self.positionAndSinkShip(self.shipsToSink[i])\r\n if sunkShip:\r\n stillSinkingShips = True\r\n for coordinates in shipCoordinates:\r\n self.enemyBoard[coordinates.x][coordinates.y] = BoardState.SUNK\r\n del(self.shipsToSink[i])\r\n if not stillSinkingShips:\r\n break",
"def sinkShip(self, bullsEye, size, direction):\r\n sunkShip, shipCoordinates = self.sinkShipSearch(Coordinates(bullsEye.x + direction.x, bullsEye.y + direction.y), size - 1, direction)\r\n if sunkShip:\r\n shipCoordinates.append(bullsEye)\r\n return sunkShip, shipCoordinates",
"def is_ship_sunk(self, x, y):\n marker = self.markers[x][y]\n total_hits = self.ship_hits[marker]\n return total_hits == MarkerType.MAX_HITS[marker]",
"def on_ship(self, ship):\n # TODO: add ship to game\n # The game has a set combination of ships which is created when choosing the field size\n # (by battleships.ship_combination_creator()).\n # After that you need to create the player and add every ship from the combination (\n # without position) to his fleet. Done by add_ship(size) in the player class,\n # just cycle through the fleet_config of the Game class.\n # Then you need a button to determine the facing of the ship (north, west, east, south) and\n # something that shows which ship you are placing (either go through the array yourself\n # or by choosing the size per button).\n # Then the player needs to call position_ship(size, x, y, facing). If the ship cannot be\n # placed there (either because it conflicts with another ship or goes over the edge of\n # the board) the function will return a False, if it works it'll return True.\n # By calling check_ship_placement() from the Player class you can check whether all\n # ships are placed or not (returning True if all are placed, returning False if one or\n # more are still missing a position).\n # Apparently this is a commuication error on both sides:\n # This is how the ship placement works via the now built GUI:\n # New Game-> field size setting -> player name entry + ship placement,\n # via click, hoover and click (lenght of placed ship 1+hoover+1)\n # a list of coords creates the ship\n # {'n': [[(0, 0), (1, 0), (2, 0), (3, 0)]],\n # 'b': [[(1, 1), (2, 1), (3, 1), (4, 1)], [(2, 2), (2, 3), (2, 4)]]}\n\n if len(ship) < 3 or len(ship) > 6:\n # set length of ship to 3 to 6\n return False\n return True",
"def placeGuess(player, xPos, yPos):\n\t\n\t# Print hit ship message if other player's board\n\t# has a ship at that position\n\tif(player.otherPlayer.board[yPos][xPos] != '~'):\n\t\tplayer.guess[yPos][xPos] = '#'\n\t\tprint(\"You've hit a ship!\\n\")\n\n\t# Print miss message if no ship at that position\n\telse:\n\t\tplayer.guess[yPos][xPos] = 'm'\n\t\tprint(\"You missed!\\n\")\n\n\n\tfor i in range(0, 2):\n\n\t\t# Set enemy to be the other player\n\t\tenemy = player.otherPlayer\n\t\tship = enemy.ships[i]\n\n\t\t# If ship is already sunk, go to next iteration\n\t\tif(ship.sunk == 1):\n\t\t\tcontinue\n\n\t\tbad = 0\n\t\tsX = ship.startX\n\t\tsY = ship.startY\n\t\tori = ship.orientation\n\n\t\t# Check if all of ship in horizontal position is all hit\n\t\tif(ori == 1):\n\t\t\tfor y in range(sY, sY + ship.shipLen):\n\t\t\t\tif(player.guess[y][sX] != enemy.board[y][sX]):\n\t\t\t\t\tbad = 1\n\t\t\t\t\tbreak\n\n\t\t# Check if all of ship in vertical position is all hit\n\t\telse:\n\t\t\tfor x in range(sX, sX + ship.shipLen):\n\t\t\t\tif(player.guess[sY][x] != enemy.board[sY][x]):\n\t\t\t\t\tbad = 1\n\t\t\t\t\tbreak\n\n\t\t# If entire ship is hit, sink ship, print ship sunk message\n\t\tif(bad == 0):\n\t\t\tship.sunk = 1\n\t\t\tprint(\"You sank a \" + ship.name + \"\\n\")\n\t\t\tbreak",
"def draw_ship(self, image, ship, dims):\n # Get the center x, y and the size s\n x, y, s, r, m = dims\n\n #Load files\n if ship == 'cruiseship':\n im_path = 'ships/cruiseship_isolated.png'\n ma_path = 'ships/cruiseship_isolated_mask.png'\n elif ship == 'tanker':\n im_path = 'ships/tanker_isolated.png'\n ma_path = 'ships/tanker_isolated_mask.png'\n\n #Transforming ship\n ship = cv2.imread(im_path)\n ship_transformed = self.transform(ship.copy(), dims)\n ship_shape = np.shape(ship_transformed)\n s_x = int((ship_shape[0]+0.5)//2)\n s_y = int((ship_shape[1]+0.5)//2)\n ship_transformed = ship_transformed[0:np.shape(image[x-s_x:x+s_x, y-s_y:y+s_y, :])[0],\n 0:np.shape(image[x-s_x:x+s_x, y-s_y:y+s_y, :])[1],\n :]\n ship_transformed_th = self.threshold(ship_transformed)\n\n #Adding boat to image\n image_slice = image[x - s_x:x + s_x, y - s_y:y + s_y, :]\n image_slice -= 255*image_slice*ship_transformed_th\n image_slice += ship_transformed\n image[x - s_x:x + s_x, y - s_y:y + s_y, :] = image_slice\n\n return image",
"def ship_attack(attacker_ship, victim_ship):\n\n if not is_ship_alive(victim_ship):\n # save us some time, it should be the same dead ship.\n return victim_ship\n\n if attacker_ship.debuffs.get('active', {}).get('ECM', 0) != 0:\n # attacker is jammed can't attack or apply debuffs\n return victim_ship\n\n debuffs = grab_debuffs(attacker_ship.schema, victim_ship)\n\n if attacker_ship.schema.firepower <= 0:\n # damage doesn't need to be calculated, but debuffs do\n return Ship(\n victim_ship.schema,\n ShipAttributes(\n victim_ship.attributes.shield,\n victim_ship.attributes.armor,\n victim_ship.attributes.hull,\n ),\n debuffs,\n )\n\n damage = true_damage(attacker_ship.schema.firepower,\n attacker_ship.schema.weapon_size,\n victim_ship.schema.size,\n attacker_ship.debuffs,\n victim_ship.debuffs\n )\n\n shield = shield_bounce(victim_ship.attributes.shield,\n victim_ship.schema.shield, damage)\n if shield == victim_ship.attributes.shield:\n # it glanced off, don't need to worry about hull breaches when\n # the weapon didn't even hit\n return Ship(\n victim_ship.schema,\n ShipAttributes(\n victim_ship.attributes.shield,\n victim_ship.attributes.armor,\n victim_ship.attributes.hull,\n ),\n debuffs,\n )\n\n armor = victim_ship.attributes.armor + min(shield, 0)\n hull = hull_breach(victim_ship.attributes.hull,\n victim_ship.schema.hull, - min(armor, 0))\n return Ship(\n victim_ship.schema,\n ShipAttributes(max(0, shield), max(0, armor), max(0, hull)),\n debuffs,\n )",
"def add_ships(ship_screen):\n\n direction_choice = random.choice([\"H\", \"V\"]) # chooses the direction of the big ship\n x = random.randint(0, 4)\n y = random.randint(0, 4)\n ship_screen[x][y] = \"X\"\n if direction_choice == \"V\": # after selection of the first coordinate, chooses a direction\n # and then according to x or y coordinates puts the other 2 pieces of the ship\n if x == 0:\n ship_screen[x+1][y], ship_screen[x+2][y] = \"X\", \"X\"\n elif x == 4:\n ship_screen[x-1][y], ship_screen[x-2][y] = \"X\", \"X\"\n else:\n ship_screen[x+1][y], ship_screen[x-1][y] = \"X\", \"X\"\n elif direction_choice == \"H\":\n if y == 0:\n ship_screen[x][y+1], ship_screen[x][y+2] = \"X\", \"X\"\n elif y == 4:\n ship_screen[x][y-1], ship_screen[x][y-2] = \"X\", \"X\"\n else:\n ship_screen[x][y-1], ship_screen[x][y+1] = \"X\", \"X\"\n main_ship_coordinates = [[a, b] for a in range(5) for b in range(5) if ship_screen[a][b] == \"X\"]\n # returns ship coordinates\n banned_coordinates = [] # codes of between 29-34 finds the neighbour coordinates of big ship\n for d in main_ship_coordinates:\n neighbour_coordinates = [[d[0], d[1]+1], [d[0]+1, d[1]], [d[0]-1, d[1]], [d[0], d[1]-1],\n [d[0]+1, d[1]+1], [d[0]-1, d[1]-1], [d[0]+1, d[1]-1], [d[0]-1, d[1]+1]]\n for e in neighbour_coordinates:\n if e[0] in range(5) and e[1] in range(5) and e not in banned_coordinates:\n banned_coordinates.append(e)\n while True:\n i = random.randint(0, 4)\n j = random.randint(0, 4)\n if [i, j] in banned_coordinates:\n continue\n else:\n ship_screen[i][j] = \"O\"\n break\n while True:\n possible_coordinates = [[i+1, j], [i-1, j], [i, j-1], [i, j+1]]\n # selects second piece randomly from possible 4\n second_piece = random.choice(possible_coordinates)\n if second_piece[0] in range(5) and second_piece[1] in range(5) and second_piece not in banned_coordinates:\n ship_screen[second_piece[0]][second_piece[1]] = \"O\"\n break\n else:\n continue\n return ship_screen",
"def test_sink(self):\n ship = Ship([self.hit])\n self.assertEqual([self.hit], ship.location)\n self.assertEqual(2, ship.check_hit(self.hit))\n self.assertEqual(0, len(ship.location))",
"def move_ok(game, ship):\n cell_halite = game.game_map[ship.position].halite_amount\n\n if ship.is_full:\n return True\n\n # generally ignore low value cells. Note Mining_threshold may be dynamic\n if cell_halite < Mining_threshold:\n return True\n\n dropoffs = get_dropoff_positions(game)\n fuel_status = ship.halite_amount / SHIP_MAX_HALITE\n\n # the amount of halite we'll get if we refuel/mine\n # if ship in a dropoff/shipyard, set fuel to max to the ship departs\n refuel_amount = constants.MAX_HALITE if ship.position in dropoffs else cell_halite * SHIP_MINING_EFFICIENCY\n\n net_mine = (cell_halite * SHIP_MINING_EFFICIENCY) + (cell_halite * SHIP_MINING_EFFICIENCY) * -SHIP_FUEL_COST\n net_move = cell_halite * -SHIP_FUEL_COST + game.get_mining_rate(MINING_RATE_LOOKBACK) * SHIP_MINING_EFFICIENCY\n\n #logging.debug(\"fuel_status: {}\".format(fuel_status))\n #logging.debug(\"refuel_amount: {}\".format(refuel_amount))\n #logging.debug(\"net_mine: {}, net_move: {}\".format(net_mine, net_move))\n\n if ship.status == \"transiting\":\n #if refuel_amount > net_mining_yield and fuel_status < SHIP_REFUEL_THRESHOLD:\n # return True\n pass\n elif ship.status == \"exploring\":\n #if cell_halite < Mining_threshold:\n # return True\n pass\n elif ship.status == \"returning\":\n if net_move > net_mine or fuel_status > SHIP_REFUEL_THRESHOLD:\n return True\n else:\n raise RuntimeError(\"Unknown ship status: {}\".format(ship.status))\n\n return False",
"def place_ship(self, row, col, ship, aligment):\n self._validate_place_params(row, col, ship, aligment)\n\n if aligment == ShipPosition.HORIZONTAL:\n # Place ship in horizontal position, populating in right direction\n for i in range(self.get_col_index(col), ship.LENGH + self.get_col_index(col)):\n self.board[(row, self.get_col_value(i))] = ship\n elif aligment == ShipPosition.VERTICAL:\n # Place ship in vertical position, populating in down direction\n for i in range(self.get_row_index(row), ship.LENGH + self.get_row_index(row)):\n self.board[(self.get_row_value(i), col)] = ship",
"def place_ships(self):\n # An array that holds the ships to then be looped through\n length_of_ships = [6, 4, 3, 2]\n\n for ship_length in length_of_ships:\n while True:\n if self.user == 'computer':\n orientation = random.choice(['H', 'V'])\n row = random.randint(0, 9)\n column = random.randint(0, 9)\n if self.check_ship_fits(\n ship_length, row, column, orientation\n ):\n if self.collision_check(\n self.board, row, column, orientation, ship_length\n ) is False:\n if orientation == 'H':\n for i in range(column, column + ship_length):\n self.board[row][i] = SHIP\n else:\n for i in range(row, row + ship_length):\n self.board[i][column] = SHIP\n break\n else:\n if self.user == 'player':\n self.ship_prompt(ship_length)\n orientation, column, row = self.ship_input()\n if self.check_ship_fits(\n ship_length, row, column, orientation\n ):\n if self.collision_check(\n self.board,\n row,\n column,\n orientation,\n ship_length\n ) is False:\n if orientation == 'H':\n for i in range(\n column, column + ship_length\n ):\n self.board[row][i] = SHIP\n else:\n for i in range(row, row + ship_length):\n self.board[i][column] = SHIP\n print(' ')\n self.print_board()\n break",
"def get_dropoff_positions(game, ship = None):\n dropoffs = game.me.get_dropoffs()\n destinations = list(dropoffs) + [game.me.shipyard.position]\n\n if ship is None:\n return destinations\n\n minDistance = False\n movePosition = False\n\n for dest in destinations:\n distance = game.game_map.calculate_distance(ship.position, dest)\n if minDistance == False or distance < minDistance:\n minDistance = distance\n movePosition = dest\n\n return movePosition",
"def getShip(self):\n \"return self._ship\"\n if self._ship == None:\n return True\n return False",
"def calculate_shot(self, player_ships: list):\n board = Board(self.__size)\n prob_board = Board(self.__size)\n\n for move in self.__moves:\n x, y = move[1], move[2]\n board.shoot(x, y)\n prob_board.board[x][y] = -1000\n\n if move[0] == ShotResult.HIT:\n if any(ship.sunk and [anything, x, y] in ship.pieces for ship in player_ships):\n # part of a sunken ship; no need to increase neighbours probability\n continue\n\n for (i, j) in [(1, 0), (0, 1), (-1, 0), (0, -1)]:\n try: # easier to ask for forgiveness that permission :d\n if (ShotResult.HIT, x - i, y - j) in self.__moves: # opposite neighbour\n prob_board.board[x + i][y + j] += self.offset\n prob_board.board[x + i][y + j] += self.offset\n except IndexError:\n pass\n\n final_x, final_y = 0, 0\n max_prob = -1\n for s in player_ships:\n if not s.sunk: # the only time we use unsunken ships; we use just their size\n for i in range(self.__size):\n for j in range(self.__size):\n for o in range(0, 2): # for every (x, y, orientation) possible\n try:\n board.check(Ship(s.type, o, i, j))\n for offset in range(s.size):\n x, y = i - offset * o, j + offset * (not o)\n prob_board.board[x][y] += 1 # increase the probability of each piece\n except IllegalMove:\n pass\n\n for i in range(self.__size):\n for j in range(self.__size):\n if prob_board.board[i][j] > max_prob:\n final_x, final_y = i, j\n max_prob = prob_board.board[i][j]\n elif prob_board.board[i][j] == max_prob:\n if randint(0, 10) < 5: # random aspect to the ai, harder to predict\n final_x, final_y = i, j\n return final_x, final_y",
"def place_ship(self, ship, x, y, orientation):\n\t\tdx = (orientation == GameBoard.O_HORIZONTAL)\n\t\tdy = (orientation == GameBoard.O_VERTICAL)\n\t\t# Check if there's enough space first.\n\t\tfor i in range(ship.size):\n\t\t\ttile = self.get_our_tile(x + i * dx, y + i * dy)\n\t\t\tif not tile.is_free():\n\t\t\t\traise ValueError(\"You already have a ship there!\")\n\n\t\tself.dump()\n\t\t# Enlist the ship in the navy.\n\t\tship.place(x, y, orientation)\n\t\tself.ships.append(ship)\n\t\t# Mark the tiles occupied by the ship.\n\t\tfor i in range(ship.size):\n\t\t\tcx = x + i * dx\n\t\t\tcy = y + i * dy\n\n\t\t\t# Create a tile boundary around the ship.\n\t\t\ttile = bt.Tile(bt.Tile.T_OCCUPIED)\n\t\t\tif i == 0:\n\t\t\t\t#\n\t\t\t\t# :AAAAA\n\t\t\t\t#\n\t\t\t\t# :\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t#\n\t\t\t\tself.set_our_tile(cx - dx, cy - dy, tile)\n\t\t\t\t# :\n\t\t\t\t# :AAAAA\n\t\t\t\t#\n\t\t\t\t# ::\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t#\n\t\t\t\tself.set_our_tile(cx - dx - dy, cy - dy - dx, tile)\n\t\t\t\t# :\n\t\t\t\t# :AAAAA\n\t\t\t\t# :\n\t\t\t\t# :::\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t#\n\t\t\t\tself.set_our_tile(cx - dx + dy, cy - dy + dx, tile)\n\t\t\telif i == ship.size - 1:\n\t\t\t\t# :\n\t\t\t\t# :AAAAA:\n\t\t\t\t# :\n\t\t\t\t# :::\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t# :\n\t\t\t\tself.set_our_tile(cx + dx, cy + dy, tile)\n\t\t\t\t# : :\n\t\t\t\t# :AAAAA:\n\t\t\t\t# :\n\t\t\t\t# :::\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t# ::\n\t\t\t\tself.set_our_tile(cx + dx - dy, cy + dy - dx, tile)\n\t\t\t\t# : :\n\t\t\t\t# :AAAAA:\n\t\t\t\t# : :\n\t\t\t\t# :::\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t# :::\n\t\t\t\tself.set_our_tile(cx + dx + dy, cy + dy + dx, tile)\n\t\t\t# :::::::\n\t\t\t# :AAAAA:\n\t\t\t# : :\n\t\t\t# :::\n\t\t\t# :E\n\t\t\t# :E\n\t\t\t# :::\n\t\t\tself.set_our_tile(cx - dy, cy - dx, tile)\n\t\t\t# :::::::\n\t\t\t# :AAAAA:\n\t\t\t# :::::::\n\t\t\t# :::\n\t\t\t# :E:\n\t\t\t# :E:\n\t\t\t# :::\n\t\t\tself.set_our_tile(cx + dy, cy + dx, tile)\n\n\t\t\t# Create the ship tile by tile.\n\t\t\tself.set_our_tile(cx, cy, ship.tile())\n\n\t\tself.dump()",
"async def ship(self, ctx):\r\n if ctx.invoked_subcommand:\r\n return\r\n defenders = ctx.message.mentions\r\n if defenders:\r\n for defender in defenders:\r\n captain = defender.name\r\n user_ship = Ship.find_ship(captain)\r\n if not user_ship:\r\n await ctx.send(\"{} does not yet have a ship.\".format(captain))\r\n else:\r\n em = discord.Embed(colour=0xAA0000)\r\n em.set_author(name=user_ship.ship_name, icon_url=defender.avatar_url)\r\n em.add_field(name='Ship Level: {}'.format(str(user_ship.level())),\r\n value=\"Win/Loss: {}/{}\".format(user_ship.win, user_ship.loss), inline=False)\r\n em.add_field(name=\"__Part__\", value=parts_print, inline=True)\r\n em.add_field(name=\"__Level__\", value=user_ship.info(), inline=True)\r\n em.set_footer(text=\"Their ship's coffers hold {} gold\".format(user_ship.gold),\r\n icon_url=\"https://cdn.discordapp.com/emojis/554730061463289857.gif\")\r\n em_msg = await ctx.send(embed=em)\r\n return\r\n\r\n captain = ctx.message.author.name\r\n user_ship = Ship.find_ship(captain)\r\n\r\n if not user_ship:\r\n user_ship = Ship(captain)\r\n user_ship.update(is_new=True)\r\n\r\n await ctx.send('Congratulations on the new ship, Captain {}! Welcome aboard!'\r\n '\\nCannons and Crew contribute to your attack,'\r\n ' while Armor and Sails contribute to defense\\nHere\\'s what she\\'s got:'.format(captain))\r\n\r\n em = discord.Embed(colour=0xDD0000)\r\n em.set_author(name=user_ship.ship_name,\r\n icon_url=ctx.message.author.avatar_url)\r\n em.add_field(name='Ship Level: {}'.format(str(user_ship.level())),\r\n value=\"Win/Loss: {}/{}\".format(user_ship.win, user_ship.loss), inline=False)\r\n #em.add_field(name='Ship Level', value=str(user_ship.level()), inline=False)\r\n em.add_field(name=\"__Part__\", value=parts_print, inline=True)\r\n em.add_field(name=\"__Level__\", value=user_ship.info(), inline=True)\r\n em.set_footer(text=\"Your ship's coffers hold {} gold\".format(user_ship.gold),\r\n icon_url=\"https://cdn.discordapp.com/emojis/554730061463289857.gif\")\r\n em_msg = await ctx.send(embed=em)",
"def sinkShipSearch(self, coordinates, size, direction):\r\n if size == 0:\r\n # Successfully searched the required size.\r\n return True, []\r\n if coordinates.x < 0 or coordinates.y < 0 or coordinates.x == self.boardDimensions or coordinates.y == self.boardDimensions:\r\n # Can't go off the board.\r\n return False, None\r\n if self.enemyBoard[coordinates.x][coordinates.y] != BoardState.HIT:\r\n # This search is all for naught since the ship can't possibly have sunk at this position.\r\n return False, None\r\n sunkShip, shipCoordinates = self.sinkShipSearch(Coordinates(coordinates.x + direction.x, coordinates.y + direction.y), size - 1, direction)\r\n if sunkShip:\r\n shipCoordinates.append(coordinates)\r\n return sunkShip, shipCoordinates",
"def move_safe(self, ship, positions):\n new_moves = []\n for position in positions:\n move = position - ship.position\n new_moves.append((move.x, move.y))\n\n for i, cell in enumerate(positions):\n if not self.game_map[cell].is_occupied:\n self.game_map[cell].mark_unsafe(ship)\n return new_moves[i]\n\n return Direction.Still",
"def moveShips(self, shipList, empireID, systemID):\n try:\n # init attributes that all ships should share\n shipOne = self.ships[shipList[0]]\n shipOwner = shipOne.empireID\n if shipOwner <> empireID:\n return 'Cannot move ship if it is not yours'\n fromSystem = self.systems[shipOne.fromSystem]\n toSystem = self.systems[systemID]\n newSystemGrid = anwp.func.funcs.getMapQuadrant(fromSystem.x, fromSystem.y,\n toSystem.x, toSystem.y)\n # check if group of ships share these attributes\n for shipID in shipList:\n myShip = self.ships[shipID]\n if myShip.fromSystem.id <> fromSystem.id:\n return 'Ship %s comes from system: %s' % (myShip.name, fromSystem.name)\n if myShip.empireID <> shipOwner:\n return 'Ship %s has different empire Owner: %s' % (myShip.name, myShip.empireID)\n \n \n valid = 0\n # ships have been validated to be going the same place, now validate movement\n for shipID in shipList:\n myShip = self.ships[shipID]\n # can ship be moved?\n if myShip.fromSystem <> myShip.toSystem and systemID <> myShip.fromSystem:\n return 'Ship %s has already moved' % myShip.name\n # check if ship is platform going out of own empire\n if (myShip.myShipHull.abr[1:] == 'WP' and \n (toSystem.myEmpireID <> empireID and anwp.func.globals.diplomacy[self.empires[empireID].diplomacy[toSystem.myEmpireID].diplomacyID]['alliance'] == 0)):\n return 'Platforms can only move between your systems, they cannot attack.'\n # check that ship is allowed to move to system\n if toSystem.myEmpireID <> empireID and anwp.func.globals.diplomacy[self.empires[empireID].diplomacy[toSystem.myEmpireID].diplomacyID]['move'] == 0:\n return 'Diplomacy will not allow your ships to move to this system'\n \n # is ship returning from move this round?\n if systemID == myShip.fromSystem:\t\t\n # is system adjacent?\n if myShip.toSystem in fromSystem.connectedSystems:\n valid = 1\n else:\n # refund warp point\n fromSystem = self.systems[myShip.toSystem]\n fromSystem.usedWGC -= 1\n toSystem.usedWGC -= 1\n valid = 1\n else:\n # is system adjacent?\n if systemID in fromSystem.connectedSystems:\n valid = 1\n else:\n # is warp available?\n if 1 <= (fromSystem.availWGC-fromSystem.usedWGC) and 1 <= (toSystem.availWGC-toSystem.usedWGC):\n # spend warp points\n fromSystem.usedWGC += 1\n toSystem.usedWGC += 1\n valid = 1\n else:\n return 'Not enough Warp Capacity Points to move %s' % myShip.name\n \n if valid == 1:\n # move ships\n for shipID in shipList:\n myShip = self.ships[shipID]\n myShip.moveToSystem(newSystemGrid, systemID)\n return 1\n else:\n return 'Not a valid Ship Movement Order'\n except:\n return 'galaxy->moveShips error'",
"def go_home(self, ship) -> Position:\n self.ship_states[ship.id].destination = self.closest_dropoff(ship)\n self.ship_states[ship.id].mode = Modes.DEPOSITING",
"def buy_ship(self, location, ship_type):\n payload = {'location': location, 'type': ship_type}\n r = requests.post(self.base_url + f'/users/{self.username}/ships', headers=self.auth_header, )\n return r.text",
"def _move_all_ships(game_data):\n # make the moves of the ships\n for player in game_data['ships']:\n # deal only with the ships which are not abandonned\n if player != 0:\n for ship_name in game_data['ships'][player]:\n # move the ship\n _move_ship(player, ship_name, game_data)",
"def place_ship(self, ship: Ship, location: Point, orientation: Orientation) -> None:\n\n ship_point_set = ship.get_points(location, orientation)\n ship_type = ship.ship_type\n\n if self.board.issuperset(\n ship.get_points(location, orientation)\n ) and ship_point_set.isdisjoint(self.all_ship_locations):\n self.all_ship_locations.update(ship_point_set)\n self.individual_ship_locations[ship_type] = set(ship_point_set)\n else:\n raise InvalidShipPlacementException(f'Placement of {ship} at {location} in orientation {orientation.value} is invalid')",
"def place_ship(board,ship_length,bow,ship_direction):\n\n # verifies the input\n if abs(ship_direction[0])+abs(ship_direction[1])==1 and \\\n 0 <= bow[0] < len(board[0]) and 0 <= bow[1] < len(board) and \\\n -1 <= (bow[0] - ship_direction[0]*ship_length) <= len(board[0]) and \\\n -1 <= (bow[1] - ship_direction[1]*ship_length) <= len(board):\n\n index=ship_index(board) # find the next ship-index\n size=[ship_length]\n for part in range(ship_length): # try to place the ship\n if board[bow[1]-ship_direction[1]*part]\\\n [bow[0]-ship_direction[0]*part] == None:\n board[bow[1]-ship_direction[1]*part]\\\n [bow[0]-ship_direction[0]*part] = (index, part, size)\n else: # if another ship in the middle, delete the part of the ship\n # alredy placed and return None\n for del_part in range(part):\n board[bow[1]-ship_direction[1]*del_part]\\\n [bow[0]-ship_direction[0]*del_part] = None\n return\n return index",
"def runShipBattle(self, myShipBattle):\n try:\n running = 1\n interval = anwp.func.globals.intervalValue\n game = self.app.game\n myShipBattle.setData(self.componentdata, self.shiphulldata, self.dronehulldata, self.weapondata)\n mode = anwp.war.shipsimulator.ShipSimulator(game, myShipBattle, False, self)\n \n while running:\n if mode.update(interval) == 0:\n running = 0\n mode = None\n return 'ShipBattle success'\n except:\n return 'galaxy->runShipBattle error: %s' % myShipBattle.systemName",
"def point_in_ship(ships, coor):\n for ship in ships:\n if coor in ship.coordinates or coor in ship.neighbor:\n return True\n return False",
"def ship_hit(si_settings, screen, stats, sb, ship, aliens, bullets, alienBullets, images):\r\n if stats.ships_left > 0:\r\n # Decrement ships_left.\r\n stats.ships_left -= 1\r\n\r\n # Animate the ship explosion\r\n ship_explosion(si_settings, screen, ship)\r\n\r\n # Update scoreboard.\r\n sb.prep_ships()\r\n\r\n # Empty the list of aliens and bullets.\r\n aliens.empty()\r\n bullets.empty()\r\n alienBullets.empty()\r\n\r\n # Create a new fleet and center the ship.\r\n create_fleet(si_settings, screen, ship, aliens, images)\r\n ship.center_ship()\r\n\r\n # Pause.\r\n sleep(0.5)\r\n else:\r\n stats.game_active = False\r\n pygame.mouse.set_visible(True)",
"def check_ship_fits(self, ship_length, row, column, orientation):\n if orientation == 'H':\n if column + ship_length > 10:\n if self.user == 'player':\n print('SHIP DOES NOT FIT, TRY AGAIN!\\n')\n return False\n else:\n return False\n else:\n return True\n else:\n if row + ship_length > 10:\n if self.user == 'player':\n print('SHIP DOES NOT FIT, TRY AGAIN!\\n')\n return False\n else:\n return False\n else:\n return True",
"def _choose_ship(self, ship_type, board):\n coordinate = self._get_coordinate()\n direction = self._get_ship_direction()\n \n try:\n board.set_ship(coordinate, direction, ship_type, self.name)\n except b_types.BattleshipError:\n print(\"Invalid Coordinate! Make sure the entire ship is in bounds and it doesn't overlap with another ship.\")\n self._choose_ship(ship_type, board)"
] |
[
"0.6950931",
"0.60631305",
"0.58785766",
"0.5825624",
"0.56573665",
"0.5617272",
"0.55613524",
"0.5537164",
"0.5525837",
"0.549535",
"0.5424462",
"0.53371406",
"0.5328491",
"0.53082603",
"0.53073376",
"0.53026956",
"0.52963483",
"0.5286528",
"0.52810824",
"0.5252677",
"0.5246915",
"0.52281064",
"0.5204364",
"0.51442903",
"0.5141819",
"0.5133382",
"0.50739515",
"0.50529665",
"0.5040077",
"0.50230944"
] |
0.7811943
|
0
|
Skips over the BULLSEYE before placing the sinking ship as the BULLSEYE will cause early search termination. Arguments bullsEye The coordinates of the shot that caused the ship to start sinking. size The size of the ship. direction The direction to move as the ship is being placed. Returns sunkShip True if the ship was sunk, False if not. shipCoorindates Only valid if the ship was sunk, the coordinates of the sunk ship.
|
def sinkShip(self, bullsEye, size, direction):
sunkShip, shipCoordinates = self.sinkShipSearch(Coordinates(bullsEye.x + direction.x, bullsEye.y + direction.y), size - 1, direction)
if sunkShip:
shipCoordinates.append(bullsEye)
return sunkShip, shipCoordinates
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sinkShipSearch(self, coordinates, size, direction):\r\n if size == 0:\r\n # Successfully searched the required size.\r\n return True, []\r\n if coordinates.x < 0 or coordinates.y < 0 or coordinates.x == self.boardDimensions or coordinates.y == self.boardDimensions:\r\n # Can't go off the board.\r\n return False, None\r\n if self.enemyBoard[coordinates.x][coordinates.y] != BoardState.HIT:\r\n # This search is all for naught since the ship can't possibly have sunk at this position.\r\n return False, None\r\n sunkShip, shipCoordinates = self.sinkShipSearch(Coordinates(coordinates.x + direction.x, coordinates.y + direction.y), size - 1, direction)\r\n if sunkShip:\r\n shipCoordinates.append(coordinates)\r\n return sunkShip, shipCoordinates",
"def bulldozer(pos, sizeX=20 , sizeY=20, sizeZ=40, putGrass=\"yes\"):\n # Make the place empty\n #mc.setBlocks(pos.x-30, pos.y-1, pos.z-10,\n # pos.x+40, pos.y+20, pos.z+40, air)\n \n mc.setBlocks(pos.x-sizeX, pos.y-1, pos.z-10,\n pos.x+sizeX, pos.y+sizeY, pos.z+sizeZ, air)\n \n if putGrass == \"yes\":\n # put grass on the ground\n mc.setBlocks(pos.x-30, pos.y-1, pos.z-10,\n pos.x+sizeX, pos.y-1, pos.z+sizeZ, grass)\n elif putGrass == \"no\":\n pass",
"def sinkShips(self):\r\n while True:\r\n stillSinkingShips = False\r\n for i in range(len(self.shipsToSink) - 1, -1, -1):\r\n sunkShip, shipCoordinates = self.positionAndSinkShip(self.shipsToSink[i])\r\n if sunkShip:\r\n stillSinkingShips = True\r\n for coordinates in shipCoordinates:\r\n self.enemyBoard[coordinates.x][coordinates.y] = BoardState.SUNK\r\n del(self.shipsToSink[i])\r\n if not stillSinkingShips:\r\n break",
"def breaking_of_the_box(size = (10, 10), verbose = False):\n import numpy as np\n r, l, u, d = \"R\", \"L\", \"U\", \"D\" # initiating walkind directions\n np.random.seed(int(time.time()))\n \n # initiating field with walking directions\n field = np.random.randint(1, 5, size = (10, 10))\n field = np.where(field ==1, r, field)\n field = np.where(field =='2', l, field)\n field = np.where(field =='3', u, field)\n field = np.where(field =='4', d, field)\n\n i, j = 0, 0\n coordinates = []\n \n # iterating in a field\n while (i<field.shape[0] and i>-1) and (j<field.shape[1] and j>-1):\n prev_i,prev_j = i, j\n coordinates.append((i, j)) \n \n copy_field = field.copy()\n \n if field[i][j] == r:\n j+=1\n elif field[i][j] == l:\n j-=1\n elif field[i][j] == u:\n i-=1\n elif field[i][j] == d:\n i+=1\n copy_field[i][j] = \"X\"\n if verbose == True:\n print(copy_field, \"#\"*48, sep = \"\\n\") #printing step by step position of a player\n if (i, j) in coordinates:\n # in case of infitine loop break\n print(\"Player is stucked inside of a box\")\n break\n\n else:\n print(\"Player came out of the box\")\n print(\"Coordinates of a breaking point\", \"(\", prev_i, prev_j, \")\")",
"def add_ships(ship_screen):\n\n direction_choice = random.choice([\"H\", \"V\"]) # chooses the direction of the big ship\n x = random.randint(0, 4)\n y = random.randint(0, 4)\n ship_screen[x][y] = \"X\"\n if direction_choice == \"V\": # after selection of the first coordinate, chooses a direction\n # and then according to x or y coordinates puts the other 2 pieces of the ship\n if x == 0:\n ship_screen[x+1][y], ship_screen[x+2][y] = \"X\", \"X\"\n elif x == 4:\n ship_screen[x-1][y], ship_screen[x-2][y] = \"X\", \"X\"\n else:\n ship_screen[x+1][y], ship_screen[x-1][y] = \"X\", \"X\"\n elif direction_choice == \"H\":\n if y == 0:\n ship_screen[x][y+1], ship_screen[x][y+2] = \"X\", \"X\"\n elif y == 4:\n ship_screen[x][y-1], ship_screen[x][y-2] = \"X\", \"X\"\n else:\n ship_screen[x][y-1], ship_screen[x][y+1] = \"X\", \"X\"\n main_ship_coordinates = [[a, b] for a in range(5) for b in range(5) if ship_screen[a][b] == \"X\"]\n # returns ship coordinates\n banned_coordinates = [] # codes of between 29-34 finds the neighbour coordinates of big ship\n for d in main_ship_coordinates:\n neighbour_coordinates = [[d[0], d[1]+1], [d[0]+1, d[1]], [d[0]-1, d[1]], [d[0], d[1]-1],\n [d[0]+1, d[1]+1], [d[0]-1, d[1]-1], [d[0]+1, d[1]-1], [d[0]-1, d[1]+1]]\n for e in neighbour_coordinates:\n if e[0] in range(5) and e[1] in range(5) and e not in banned_coordinates:\n banned_coordinates.append(e)\n while True:\n i = random.randint(0, 4)\n j = random.randint(0, 4)\n if [i, j] in banned_coordinates:\n continue\n else:\n ship_screen[i][j] = \"O\"\n break\n while True:\n possible_coordinates = [[i+1, j], [i-1, j], [i, j-1], [i, j+1]]\n # selects second piece randomly from possible 4\n second_piece = random.choice(possible_coordinates)\n if second_piece[0] in range(5) and second_piece[1] in range(5) and second_piece not in banned_coordinates:\n ship_screen[second_piece[0]][second_piece[1]] = \"O\"\n break\n else:\n continue\n return ship_screen",
"def draw_ship(self, image, ship, dims):\n # Get the center x, y and the size s\n x, y, s, r, m = dims\n\n #Load files\n if ship == 'cruiseship':\n im_path = 'ships/cruiseship_isolated.png'\n ma_path = 'ships/cruiseship_isolated_mask.png'\n elif ship == 'tanker':\n im_path = 'ships/tanker_isolated.png'\n ma_path = 'ships/tanker_isolated_mask.png'\n\n #Transforming ship\n ship = cv2.imread(im_path)\n ship_transformed = self.transform(ship.copy(), dims)\n ship_shape = np.shape(ship_transformed)\n s_x = int((ship_shape[0]+0.5)//2)\n s_y = int((ship_shape[1]+0.5)//2)\n ship_transformed = ship_transformed[0:np.shape(image[x-s_x:x+s_x, y-s_y:y+s_y, :])[0],\n 0:np.shape(image[x-s_x:x+s_x, y-s_y:y+s_y, :])[1],\n :]\n ship_transformed_th = self.threshold(ship_transformed)\n\n #Adding boat to image\n image_slice = image[x - s_x:x + s_x, y - s_y:y + s_y, :]\n image_slice -= 255*image_slice*ship_transformed_th\n image_slice += ship_transformed\n image[x - s_x:x + s_x, y - s_y:y + s_y, :] = image_slice\n\n return image",
"def bamboo(screen, color, x, y, size):\n rect(screen, color, (x, y + size * 6, size, size * 14 / 3))\n rect(screen, color, (x, y, size, size * 16 / 3))\n polygon(screen, color, [(x + size * 2 / 3, y - size * 2 / 3), (x, y - size),\n (x + size / 3, y - size * 13 / 3), (x + size, y - size * 14 / 3)])\n polygon(screen, color, [(x + size, y - size * 14 / 3), (x + size * 2 / 3, y - size * 73 / 15),\n (x + size * 17 / 15, y - size * 29 / 3), (x + size * 22 / 15, y - size * 142 / 15)])\n stick(screen, color, [x - size * 13 / 3, y - size * 5 / 3], [x - size * 1 / 3, y + size / 3], 3, 1, -30)\n stick(screen, color, [x + size * 24 / 3, y - size * 25 / 3], [x + size * 4 / 3, y - size * 3], 5, 1, 30)\n stick(screen, color, [x - size * 13 / 3, y - size * 25 / 3], [x - size * 1 / 3, y - size * 5], 5, 1, -30)\n stick(screen, color, [x + size * 14 / 3, y - size * 5 / 3], [x + size * 4 / 3, y + size / 3], 3, 1, 30)",
"def positionAndSinkShip(self, sinkingShip):\r\n directions = [Direction.North, Direction.South, Direction.East, Direction.West]\r\n sunkShip = False\r\n shipCoordinates = None\r\n for direction in directions:\r\n tSunkShip, tShipCoordinates = self.sinkShip(sinkingShip.bullsEye, sinkingShip.size, direction)\r\n if tSunkShip:\r\n if sunkShip:\r\n return False, None\r\n else:\r\n sunkShip = tSunkShip\r\n shipCoordinates = tShipCoordinates\r\n return sunkShip, shipCoordinates",
"def sandwich(self):\n if self.game.rules[\"capture\"] == \"custodial_capture\":\n self.remove_self()\n if self.game.rules[\"trapping\"]:\n for trapped_neighbor in [neighbor for neighbor in self.get_neighbors() if neighbor.trapped and self.position in neighbor.get_sandwichers() and len(neighbor.get_sandwichers()) == 2]:\n trapped_neighbor.untrap()\n self.trap()",
"def run_shoe_rack_manipulation(self, debug=False, push_in_distance=0.00):\n\n print(\"\\n\\n--- Running Shoe Manipulation-------\\n\\n\")\n\n # self.taskRunner.callOnMain(self._poser_visualizer.visualize_result)\n\n if not self.check_category_goal_estimation_succeeded():\n return False\n\n # check that we really are doing mug\n category_manipulation_type = self.state.cache['category_manipulation_goal']['type']\n assert category_manipulation_type == CategoryManipulationType.SHOE_ON_RACK\n\n\n speed = self.graspingParams['speed']['fast']\n self.moveHome(speed=speed)\n\n result = self.state.cache['category_manipulation_goal']['result']\n T_W_fingertip = ros_numpy.numpify(result.T_world_gripper_fingertip)\n T_W_fingertip_vtk = transformUtils.getTransformFromNumpy(T_W_fingertip)\n\n grasp_data = GraspData.from_gripper_fingertip_frame(T_W_fingertip)\n grasp_data.gripper.params[\"hand_inner_diameter\"] = result.gripper_width\n grasp_data.gripper.params[\"hand_inner_diameter\"] = 0.07\n self.state.grasp_data = grasp_data\n\n # rotate the grasp to align with nominal\n params = self.getParamsForCurrentLocation()\n grasp_z_axis_nominal = np.array(params['grasp']['grasp_nominal_direction'])\n grasp_data.rotate_grasp_frame_to_nominal(grasp_z_axis_nominal)\n\n\n\n\n def vis_function():\n vis.updateFrame(T_W_fingertip_vtk, \"gripper fingertip frame\", scale=0.15, parent=self._vis_container)\n\n vis.updateFrame(grasp_data.grasp_frame, \"grasp frame\", scale=0.15, parent=self._vis_container)\n\n self.visualize_grasp(grasp_data)\n\n self.taskRunner.callOnMain(vis_function)\n\n # execute the grasp\n force_threshold_magnitude = 30\n object_in_gripper = self.execute_grasp(grasp_data, close_gripper=True, use_cartesian_plan=True, force_threshold_magnitude=force_threshold_magnitude, push_in_distance=0.04, ee_speed_m_s=0.1)\n\n\n if not object_in_gripper:\n print(\"grasp failed, returning\")\n return False\n\n print \"object_in_gripper:\", object_in_gripper\n\n T_goal_obs = self.state.cache['category_manipulation_goal'][\"T_goal_obs\"]\n T_W_G = self.state.cache['gripper_frame_at_grasp']\n\n\n\n pre_grasp_pose = self.state.cache['pre_grasp_ik_response'].joint_state.position\n pickup_speed = self.graspingParams['speed']['pickup']\n\n if not object_in_gripper:\n # open the gripper and back away\n self.gripperDriver.send_open_gripper_set_distance_from_current()\n self.robotService.moveToJointPosition(pre_grasp_pose,\n maxJointDegreesPerSecond=\n pickup_speed)\n return False\n\n # pickup the object\n self.robotService.moveToJointPosition(pre_grasp_pose,\n maxJointDegreesPerSecond=\n pickup_speed)\n\n # move home\n self.moveHome()\n\n # move to approach pose\n speed = self.graspingParams['speed']['fast']\n q_approach = np.array(self._stored_poses_director[\"left_table\"][\"shoe_approach\"])\n self.robotService.moveToJointPosition(q_approach, maxJointDegreesPerSecond=speed)\n\n\n # compute some poses\n T_goal_obs = ros_numpy.numpify(result.T_goal_obs) # 4 x 4 numpy matrix\n T_goal_obs_vtk = transformUtils.getTransformFromNumpy(T_goal_obs)\n object_manip = ObjectManipulation(T_goal_object=T_goal_obs_vtk, T_W_G=T_W_G)\n object_manip.compute_transforms()\n T_W_Gn_vtk = object_manip.T_W_Gn # gripper to world for place pose\n\n T_pre_goal_obs = ros_numpy.numpify(result.T_pre_goal_obs)\n T_pre_goal_obs_vtk = transformUtils.getTransformFromNumpy(T_pre_goal_obs)\n object_manip_approach = ObjectManipulation(T_goal_object=T_pre_goal_obs_vtk, T_W_G=T_W_G)\n object_manip_approach.compute_transforms()\n T_W_Gn_approach_vtk = object_manip_approach.T_W_Gn\n\n # move this down by push_in_distance\n\n pos, quat = transformUtils.poseFromTransform(T_W_Gn_approach_vtk)\n T_W_Gn_approach_vtk = transformUtils.transformFromPose(pos, quat)\n\n\n # now convert these to ee poses for running IK\n pos, quat = transformUtils.poseFromTransform(T_W_Gn_vtk)\n pos[2] -= push_in_distance\n T_W_Gn_vtk = transformUtils.transformFromPose(pos, quat)\n T_W_ee_vtk = self.getIiwaLinkEEFrameFromGraspFrame(T_W_Gn_vtk)\n T_W_ee = transformUtils.getNumpyFromTransform(T_W_ee_vtk)\n\n T_W_ee_approach_vtk = self.getIiwaLinkEEFrameFromGraspFrame(T_W_Gn_approach_vtk)\n T_W_ee_approach = transformUtils.getNumpyFromTransform(T_W_ee_approach_vtk)\n\n\n # place the object\n force_threshold_magnitude = 50 # shoes are heavy\n q_nom = np.array(self._stored_poses_director[\"Grasping\"][\"above_table_pre_grasp\"])\n q_nom = np.array(self._stored_poses_director[\"left_table\"][\"above_table_pre_grasp\"])\n code =self.execute_place_new(T_W_ee, T_W_ee_approach, q_nom=q_nom, use_cartesian_plan=True, force_threshold_magnitude=force_threshold_magnitude)\n\n print(\"\\n\\n--- Finished Shoe Manipulation-------\\n\\n\")\n\n self._shoe_manipulation_counter += 1\n\n return code",
"def shipCollided(self):\n self.shipHealth -= 1\n\n if self.shipHealth > 0:\n self.startInvincibilty()\n return True\n else:\n return False",
"def move():\n\tdirections = [\"up\", \"down\", \"left\", \"right\"]\n\tdata = bottle.request.json\n\t#print(\"MOVE:\", json.dumps(data))\n\t# THE BEST MOVE IS CALUCLATED USING A FLOODFILL. HIGHEST AREA WIN. ME SPEEL GOOD\n\tmove = \"\"\n\n\tupC = floodFill(getNextPosition(\"up\", data), data, arrayify(\"up\", data, not largestSnake(data)), 0)\n\tdownC = floodFill(getNextPosition(\"down\", data), data, arrayify(\"down\", data, not largestSnake(data)), 0)\n\trightC = floodFill(getNextPosition(\"right\", data), data, arrayify(\"right\", data, not largestSnake(data)), 0)\n\tleftC = floodFill(getNextPosition(\"left\", data), data, arrayify(\"left\", data, not largestSnake(data)), 0)\n\tmoveC = [upC, downC, rightC, leftC]\n\t#if moveC cannot find a viable move with ghostheads, it disables them so the snake doesn't kill itself\n\n\tmove = goto(moveC, findFood(data), data)\n\tprint(\"moveC before max = \" + str(moveC))\n\tif max(moveC) == 0:\n\t\tprint(\"ghosthead disabled\")\n\t\tupC = floodFill(getNextPosition(\"up\", data), data, arrayify(\"up\", data, False), 0)\n\t\tdownC = floodFill(getNextPosition(\"down\", data), data, arrayify(\"down\", data, False), 0)\n\t\trightC = floodFill(getNextPosition(\"right\", data), data, arrayify(\"right\", data, False), 0)\n\t\tleftC = floodFill(getNextPosition(\"left\", data), data, arrayify(\"left\", data, False), 0)\n\t\tmoveC = [upC, downC, rightC, leftC]\n\n\tprint(\"move after goto: \" + move)\n\tprint(\"movC after if max 0 block: \" + str(moveC))\n\tif move == \"\":\n\t\tgoodMoves = []\n\t\tif upC == max(moveC):\n\t\t\tgoodMoves.append(\"up\")\n\t\tif downC == max(moveC):\n\t\t\tgoodMoves.append(\"down\")\n\t\tif leftC == max(moveC):\n\t\t\tgoodMoves.append(\"left\")\n\t\tif rightC == max(moveC):\n\t\t\tgoodMoves.append(\"right\")\n\t\tmove = random.choice(goodMoves)\n\n\tprint(\"Turn: \" + str(data[\"turn\"]))\n\tprint(\"Move: \" + move)\n\n\tresponse = {\"move\": move, \"shout\": \"yeet\"}\n\treturn HTTPResponse(\n\t\tstatus=200,\n\t\theaders={\"Content-Type\": \"application/json\"},\n\t\tbody=json.dumps(response),\n\t)",
"def inSmallBlindPosition(self):\n return len(self.in_game) > 0 and ((self.dealer + 1) % len(self.in_game)) == self.position",
"def is_ship_sunk(self, x, y):\n marker = self.markers[x][y]\n total_hits = self.ship_hits[marker]\n return total_hits == MarkerType.MAX_HITS[marker]",
"def isDeboutHandCoded( sk, bOnlyTorso = False, bVerbose = False ):\n \n neck = sk.listPoints[Skeleton.getNeckIndex()]\n \n if bVerbose: print(\"neck: %s\" % str(neck))\n \n legsInfo = sk.getLegs()\n if bVerbose: print(\"legs: %s\" % str(legsInfo))\n \n bb = sk.getBB_Size()\n sto = sk.getStomach()\n \n\n \n rh,rk,ra = legsInfo[0] # hip, knee, ankle\n lh,lk,la = legsInfo[1]\n \n avgFeets = [ ra[0]+la[0],ra[1]+la[1],ra[2]+la[2] ]\n div2(avgFeets)\n \n lal = sk.getArms()\n if bVerbose: print(\"arms: %s\" % str(lal))\n rs,re,rw = lal[0] #shoulder, elbow, wrist\n ls,le,lw = lal[1]\n \n rThreshold = 0.2\n \n #~ # si les pieds sont plus bas que les hanches\n # a essayer: orientation cou/(estomac ou moyenne des hanches): vertical => debout; sinon couche\n # a essayer: quand les fesses sont sur le sol\n \n # NB: on n'arrivera jamais a voir que quelqu'un qui est assis ou couche' oriente' vers la camera est tombe'\n \n # si les mains ou a defaut les coudes sont plus hautes que les pieds ou a defaut les hanches\n bDeboutFromArmsLegsHeight = None\n \n if rw[2] > rThreshold:\n rHi = rw[:2]\n elif re[2] > rThreshold:\n rHi = re[:2]\n else:\n rHi = None\n\n if lw[2] > rThreshold:\n lHi = lw[:2]\n elif le[2] > rThreshold:\n lHi = le[:2]\n else:\n # check le neck\n if neck[2] > rThreshold:\n lHi = neck[:2]\n else:\n lHi = None\n \n if lHi != None or rHi != None:\n \n if lHi == None:\n hi = rHi\n elif rHi == None:\n hi = lHi\n else:\n hi = avg2(rHi,lHi)\n \n \n \n if ra[2] > rThreshold:\n rLo = ra[:2]\n elif rk[2] > rThreshold:\n rLo = rk[:2]\n else:\n rLo = None\n\n if la[2] > rThreshold:\n lLo = la[:2]\n elif lk[2] > rThreshold:\n lLo = lk[:2]\n else:\n lLo = None\n \n if lLo != None or rLo != None:\n\n if lLo == None:\n lo = rLo\n elif rLo == None:\n lo = lLo\n else:\n lo = avg2(rLo,lLo)\n \n if bVerbose: print(\"rLo:%s,lLo:%s\" % (rLo,lLo) )\n \n if bVerbose: print(\"hi:%s,lo:%s\" % (hi,lo) )\n \n #~ return hi[1]<lo[1] # add a margin ?\n \n bb = sk.getBB_Size()\n rMargin = bb[1]/4\n\n \n bDeboutFromArmsLegsHeight = hi[1]+rMargin<lo[1] # WRN: pixel Y are inverted (high pixel are smaller than lower)\n \n if bVerbose: print(\"rMargin:%5.2f, bDeboutFromArmsLegsHeight: %s\"% (rMargin,bDeboutFromArmsLegsHeight) )\n\n bDeboutFromTorsoAngle = None\n if (rh[2] > rThreshold or lh[2] > rThreshold) and neck[2] > rThreshold:\n if (rh[2] > rThreshold and lh[2] > rThreshold):\n avg_hip = avg2(rh,lh)\n elif rh[2] > rThreshold:\n avg_hip = rh\n else:\n avg_hip = lh\n dx = avg_hip[0]-neck[0]\n dy = avg_hip[1]-neck[1]\n if abs(dx) < 0.1:\n coef = dy*10\n else:\n coef = dy/dx\n bDeboutFromTorsoAngle = abs(coef) > 1. # 1: diagonal\n if bVerbose: print(\"coef: %5.1f (dy:%3.1f,dx:%3.1f), bDeboutFromTorsoAngle: %s\" % (coef,dy, dx, bDeboutFromTorsoAngle) )\n #~ else:\n #~ return None\n \n # fesses sur le sol\n bNotBumOnGround = None\n if rh[2] > rThreshold and lh[2] > rThreshold:\n avg_hip = avg2(rh,lh)\n elif rh[2] > rThreshold:\n avg_hip = rh\n elif lh[2] > rThreshold:\n avg_hip = lh\n else:\n avg_hip = None\n if avg_hip != None:\n # look for lower point in legs, but not hip:\n rLowest = -10000\n #~ for i in range(cv2_openpose.Skeleton.NBR_POINTS):\n for i in [cv2_openpose.Skeleton.RKNEE,cv2_openpose.Skeleton.LKNEE,cv2_openpose.Skeleton.RANKLE,cv2_openpose.Skeleton.LANKLE]:\n if i == cv2_openpose.Skeleton.RHIP or i == cv2_openpose.Skeleton.LHIP:\n continue\n if sk.listPoints[i][2] < rThreshold:\n continue\n if sk.listPoints[i][1] > rLowest:\n rLowest = sk.listPoints[i][1]\n if rLowest >= 0:\n #~ lenLimbs = sk.getLenLimbs()\n #~ if bVerbose: print(\"lenLimbs: %s\" % str(lenLimbs) )\n #~ rLenLegs = (lenLimbs[0][0] +lenLimbs[1][0]) / 2\n rLenLegs = sk.getAvgLenLeg()\n if rLenLegs != None:\n bNotBumOnGround = (avg_hip[1] + (rLenLegs*0.75)) < rLowest\n if bVerbose: print(\"avg hip: %5.1f, lowest: %5.1f, rLenLegs: %5.1f, bNotBum: %s\" % (avg_hip[1],rLowest,rLenLegs, bNotBumOnGround) )\n if legsInfo[0][2][2] < rThreshold and legsInfo[1][2][2] < rThreshold:\n if bVerbose: print(\"INF: no foot seen, reseting bNotBumOnGround\" )\n # on ne voit aucun pied, soit ils ne sont pas a l'ecran soit il sont derriere, dans le doute, on prefere dire None\n bNotBumOnGround = None\n #~ return bNotBumOnGround\n \n # on veut etre sur => si hesitation, ne se prononces pas\n if 0:\n if not bOnlyTorso:\n if bDeboutFromArmsLegsHeight != bDeboutFromTorsoAngle:\n return None\n\n \n #~ if bDeboutFromArmsLegsHeight:\n if bDeboutFromTorsoAngle == None and bNotBumOnGround:\n return 1\n \n if bDeboutFromTorsoAngle and bNotBumOnGround:\n return 1\n\n \n if bDeboutFromTorsoAngle == None and bNotBumOnGround == None and bDeboutFromArmsLegsHeight != None:\n return bDeboutFromArmsLegsHeight\n \n if bDeboutFromTorsoAngle != None and bNotBumOnGround == None:\n return bDeboutFromTorsoAngle\n \n return 0\n \n \n else:\n if bDeboutFromTorsoAngle == bNotBumOnGround and bDeboutFromTorsoAngle == bDeboutFromArmsLegsHeight:\n return bDeboutFromTorsoAngle\n if bNotBumOnGround != None:\n return bNotBumOnGround\n if bDeboutFromTorsoAngle != None:\n return bDeboutFromTorsoAngle \n if bDeboutFromArmsLegsHeight != None:\n return bDeboutFromArmsLegsHeight\n return None",
"def _shipCollide(self):\n for s in range(self.getLengthAlien()):\n for t in range(len(self._aliens[0])):\n for b in self._bolts:\n if self._aliens[s][t] != None and + \\\n self._aliens[s][t].collides(b):\n self._aliens[s][t] = None\n self._bolts.remove(b)\n self._key = False",
"def SBO_isSteadyStateExpression(*args):\n return _libsbml.SBO_isSteadyStateExpression(*args)",
"def isSteadyStateExpression(*args):\n return _libsbml.SBO_isSteadyStateExpression(*args)",
"def gimme_the_hole(required_hole_size):\n\tgood_object = spray(required_hole_size)\n\tmake_hole(required_hole_size, good_object)\n\treturn good_object",
"def drop_shape(self):\n if self.falling_shape:\n while not self.shape_cannot_be_placed(self.falling_shape):\n self.falling_shape.lower_shape_by_one_row()\n self.falling_shape.raise_shape_by_one_row()\n if self.shape_cannot_be_placed(self.falling_shape):\n self.end_game()\n else:\n self.settle_falilng_shape()\n return True",
"def move_inward_outward(self):\r\n\r\n if self.movement == \"inward_outward\" and self.flag_move:\r\n leftPos, topPos, rightPos, bottomPos = self.canvas.coords(self.ball)\r\n if self.size_flag:\r\n self.change_size(\"larger\")\r\n elif not self.size_flag:\r\n self.change_size(\"smaller\")\r\n # If the ball hits a wall, change inward to outward.\r\n if leftPos <= 0 or rightPos >= 400 or topPos <= 0 or bottomPos >= 400:\r\n self.size_flag = 0\r\n # If the ball size reaches 1, change outward to inward.\r\n elif self.size == 1:\r\n self.size_flag = 1\r\n self.canvas.after(50, self.move_inward_outward)",
"def get_bishop_moves(state, coord):\n # Movement Options\n # Diagonals\n # no piece = add move, enemy = add move & break loop, friendly = break loop\n\n bishop_moves = [] \n\n if state.active_color == cc.WHITE_ACTIVE:\n for vector in cc.BISHOP_VECTORS:\n bishop_moves.extend(get_direction_moves(state, cc.W_BISHOP, coord, vector))\n \n elif state.active_color == cc.BLACK_ACTIVE:\n for vector in cc.BISHOP_VECTORS:\n bishop_moves.extend(get_direction_moves(state, cc.B_BISHOP, coord, vector))\n else:\n raise Exception(\"GameState: Invalid Active Color\")\n return bishop_moves",
"def weightShipSearch(self, coordinates, size, weight, direction, hitWeight):\r\n if size == 0:\r\n # Successfully searched the required size.\r\n return True, hitWeight \r\n if coordinates.x < 0 or coordinates.y < 0 or coordinates.x == self.boardDimensions or coordinates.y == self.boardDimensions:\r\n # Can't go off the board.\r\n return False, 0\r\n if self.enemyBoard[coordinates.x][coordinates.y] < BoardState.HIT:\r\n # This search is all for naught since we can't possibly have a ship at this position.\r\n return False, 0\r\n if self.enemyBoard[coordinates.x][coordinates.y] == BoardState.HIT:\r\n # Weigh searches with hits already in them over searches without them. This is to \r\n # direct the shot selection toward coordinates with hits already near them.\r\n hitWeight += 10\r\n # Move to the next set of coordinates on the board.\r\n result, hitWeight = self.weightShipSearch(Coordinates(coordinates.x + direction.x, coordinates.y + direction.y),\r\n size - 1, weight, direction, hitWeight)\r\n if result:\r\n # A entire ship can fit, weight the coordinate appropriately.\r\n if self.enemyBoard[coordinates.x][coordinates.y] >= BoardState.OPEN:\r\n self.enemyBoard[coordinates.x][coordinates.y] += (weight + hitWeight)\r\n return result, hitWeight",
"def _sideways_ship_hit(self):\n if self.stats.sideways_ships_left > 0:\n self.stats.sideways_ships_left -= 1\n self.aliens.empty()\n self.bullets.empty()\n self._create_fleet()\n self.sideways_ship.center_sideways_ship()\n sleep(0.5)\n else:\n self.stats.game_active = False",
"def bump_into_wall(\n state: State,\n action: Action,\n next_state: State, # pylint: disable=unused-argument\n) -> bool:\n attempted_next_position = updated_agent_position_if_unobstructed(\n state.agent.position, state.agent.orientation, action\n )\n\n return attempted_next_position in state.grid and (\n isinstance(state.grid[attempted_next_position], Wall)\n )",
"def check_aliens_bottom(si_settings,screen,stats,sb,ship,aliens,bullets):\n screen_rect= screen.get_rect()\n for alien in aliens.sprites():\n if alien.rect.bottom >= screen_rect.bottom:\n ship_hit(si_settings,screen,stats,sb,ship,aliens,bullets)\n break",
"def move_ok(game, ship):\n cell_halite = game.game_map[ship.position].halite_amount\n\n if ship.is_full:\n return True\n\n # generally ignore low value cells. Note Mining_threshold may be dynamic\n if cell_halite < Mining_threshold:\n return True\n\n dropoffs = get_dropoff_positions(game)\n fuel_status = ship.halite_amount / SHIP_MAX_HALITE\n\n # the amount of halite we'll get if we refuel/mine\n # if ship in a dropoff/shipyard, set fuel to max to the ship departs\n refuel_amount = constants.MAX_HALITE if ship.position in dropoffs else cell_halite * SHIP_MINING_EFFICIENCY\n\n net_mine = (cell_halite * SHIP_MINING_EFFICIENCY) + (cell_halite * SHIP_MINING_EFFICIENCY) * -SHIP_FUEL_COST\n net_move = cell_halite * -SHIP_FUEL_COST + game.get_mining_rate(MINING_RATE_LOOKBACK) * SHIP_MINING_EFFICIENCY\n\n #logging.debug(\"fuel_status: {}\".format(fuel_status))\n #logging.debug(\"refuel_amount: {}\".format(refuel_amount))\n #logging.debug(\"net_mine: {}, net_move: {}\".format(net_mine, net_move))\n\n if ship.status == \"transiting\":\n #if refuel_amount > net_mining_yield and fuel_status < SHIP_REFUEL_THRESHOLD:\n # return True\n pass\n elif ship.status == \"exploring\":\n #if cell_halite < Mining_threshold:\n # return True\n pass\n elif ship.status == \"returning\":\n if net_move > net_mine or fuel_status > SHIP_REFUEL_THRESHOLD:\n return True\n else:\n raise RuntimeError(\"Unknown ship status: {}\".format(ship.status))\n\n return False",
"def on_ship(self, ship):\n # TODO: add ship to game\n # The game has a set combination of ships which is created when choosing the field size\n # (by battleships.ship_combination_creator()).\n # After that you need to create the player and add every ship from the combination (\n # without position) to his fleet. Done by add_ship(size) in the player class,\n # just cycle through the fleet_config of the Game class.\n # Then you need a button to determine the facing of the ship (north, west, east, south) and\n # something that shows which ship you are placing (either go through the array yourself\n # or by choosing the size per button).\n # Then the player needs to call position_ship(size, x, y, facing). If the ship cannot be\n # placed there (either because it conflicts with another ship or goes over the edge of\n # the board) the function will return a False, if it works it'll return True.\n # By calling check_ship_placement() from the Player class you can check whether all\n # ships are placed or not (returning True if all are placed, returning False if one or\n # more are still missing a position).\n # Apparently this is a commuication error on both sides:\n # This is how the ship placement works via the now built GUI:\n # New Game-> field size setting -> player name entry + ship placement,\n # via click, hoover and click (lenght of placed ship 1+hoover+1)\n # a list of coords creates the ship\n # {'n': [[(0, 0), (1, 0), (2, 0), (3, 0)]],\n # 'b': [[(1, 1), (2, 1), (3, 1), (4, 1)], [(2, 2), (2, 3), (2, 4)]]}\n\n if len(ship) < 3 or len(ship) > 6:\n # set length of ship to 3 to 6\n return False\n return True",
"def let_shape_fall(self):\n if self.falling_shape:\n self.falling_shape.lower_shape_by_one_row()\n if self.shape_cannot_be_placed(self.falling_shape):\n self.falling_shape.raise_shape_by_one_row()\n if self.shape_cannot_be_placed(self.falling_shape):\n self.end_game()\n else:\n self.settle_falilng_shape()\n return True",
"def check_boss_edges(ai_settings, boss):\n\tif boss.check_edges():\n\t\tai_settings.boss_direction *= -1"
] |
[
"0.58415544",
"0.52179354",
"0.51744556",
"0.49338993",
"0.4874807",
"0.48674476",
"0.4732885",
"0.46843466",
"0.45478213",
"0.45308974",
"0.45002207",
"0.4419198",
"0.44042423",
"0.4400801",
"0.43920544",
"0.43743762",
"0.43740964",
"0.4360961",
"0.4356543",
"0.4354255",
"0.43533662",
"0.43007264",
"0.42759725",
"0.42686984",
"0.42390525",
"0.41930208",
"0.41912904",
"0.4181161",
"0.41714826",
"0.4167891"
] |
0.7173418
|
0
|
Recursive function that positions a sinking ship in a particular direction to see if can be sunk. Arguments bullsEye The coordinates of the shot that caused the ship to start sinking. size The size of the ship. direction The direction to move as the ship is being placed. Returns sunkShip True if the ship was sunk, False if not. shipCoorindates Only valid if the ship was sunk, the coordinates of the sunk ship.
|
def sinkShipSearch(self, coordinates, size, direction):
if size == 0:
# Successfully searched the required size.
return True, []
if coordinates.x < 0 or coordinates.y < 0 or coordinates.x == self.boardDimensions or coordinates.y == self.boardDimensions:
# Can't go off the board.
return False, None
if self.enemyBoard[coordinates.x][coordinates.y] != BoardState.HIT:
# This search is all for naught since the ship can't possibly have sunk at this position.
return False, None
sunkShip, shipCoordinates = self.sinkShipSearch(Coordinates(coordinates.x + direction.x, coordinates.y + direction.y), size - 1, direction)
if sunkShip:
shipCoordinates.append(coordinates)
return sunkShip, shipCoordinates
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sinkShip(self, bullsEye, size, direction):\r\n sunkShip, shipCoordinates = self.sinkShipSearch(Coordinates(bullsEye.x + direction.x, bullsEye.y + direction.y), size - 1, direction)\r\n if sunkShip:\r\n shipCoordinates.append(bullsEye)\r\n return sunkShip, shipCoordinates",
"def bulldozer(pos, sizeX=20 , sizeY=20, sizeZ=40, putGrass=\"yes\"):\n # Make the place empty\n #mc.setBlocks(pos.x-30, pos.y-1, pos.z-10,\n # pos.x+40, pos.y+20, pos.z+40, air)\n \n mc.setBlocks(pos.x-sizeX, pos.y-1, pos.z-10,\n pos.x+sizeX, pos.y+sizeY, pos.z+sizeZ, air)\n \n if putGrass == \"yes\":\n # put grass on the ground\n mc.setBlocks(pos.x-30, pos.y-1, pos.z-10,\n pos.x+sizeX, pos.y-1, pos.z+sizeZ, grass)\n elif putGrass == \"no\":\n pass",
"def draw_ship(self, image, ship, dims):\n # Get the center x, y and the size s\n x, y, s, r, m = dims\n\n #Load files\n if ship == 'cruiseship':\n im_path = 'ships/cruiseship_isolated.png'\n ma_path = 'ships/cruiseship_isolated_mask.png'\n elif ship == 'tanker':\n im_path = 'ships/tanker_isolated.png'\n ma_path = 'ships/tanker_isolated_mask.png'\n\n #Transforming ship\n ship = cv2.imread(im_path)\n ship_transformed = self.transform(ship.copy(), dims)\n ship_shape = np.shape(ship_transformed)\n s_x = int((ship_shape[0]+0.5)//2)\n s_y = int((ship_shape[1]+0.5)//2)\n ship_transformed = ship_transformed[0:np.shape(image[x-s_x:x+s_x, y-s_y:y+s_y, :])[0],\n 0:np.shape(image[x-s_x:x+s_x, y-s_y:y+s_y, :])[1],\n :]\n ship_transformed_th = self.threshold(ship_transformed)\n\n #Adding boat to image\n image_slice = image[x - s_x:x + s_x, y - s_y:y + s_y, :]\n image_slice -= 255*image_slice*ship_transformed_th\n image_slice += ship_transformed\n image[x - s_x:x + s_x, y - s_y:y + s_y, :] = image_slice\n\n return image",
"def add_ships(ship_screen):\n\n direction_choice = random.choice([\"H\", \"V\"]) # chooses the direction of the big ship\n x = random.randint(0, 4)\n y = random.randint(0, 4)\n ship_screen[x][y] = \"X\"\n if direction_choice == \"V\": # after selection of the first coordinate, chooses a direction\n # and then according to x or y coordinates puts the other 2 pieces of the ship\n if x == 0:\n ship_screen[x+1][y], ship_screen[x+2][y] = \"X\", \"X\"\n elif x == 4:\n ship_screen[x-1][y], ship_screen[x-2][y] = \"X\", \"X\"\n else:\n ship_screen[x+1][y], ship_screen[x-1][y] = \"X\", \"X\"\n elif direction_choice == \"H\":\n if y == 0:\n ship_screen[x][y+1], ship_screen[x][y+2] = \"X\", \"X\"\n elif y == 4:\n ship_screen[x][y-1], ship_screen[x][y-2] = \"X\", \"X\"\n else:\n ship_screen[x][y-1], ship_screen[x][y+1] = \"X\", \"X\"\n main_ship_coordinates = [[a, b] for a in range(5) for b in range(5) if ship_screen[a][b] == \"X\"]\n # returns ship coordinates\n banned_coordinates = [] # codes of between 29-34 finds the neighbour coordinates of big ship\n for d in main_ship_coordinates:\n neighbour_coordinates = [[d[0], d[1]+1], [d[0]+1, d[1]], [d[0]-1, d[1]], [d[0], d[1]-1],\n [d[0]+1, d[1]+1], [d[0]-1, d[1]-1], [d[0]+1, d[1]-1], [d[0]-1, d[1]+1]]\n for e in neighbour_coordinates:\n if e[0] in range(5) and e[1] in range(5) and e not in banned_coordinates:\n banned_coordinates.append(e)\n while True:\n i = random.randint(0, 4)\n j = random.randint(0, 4)\n if [i, j] in banned_coordinates:\n continue\n else:\n ship_screen[i][j] = \"O\"\n break\n while True:\n possible_coordinates = [[i+1, j], [i-1, j], [i, j-1], [i, j+1]]\n # selects second piece randomly from possible 4\n second_piece = random.choice(possible_coordinates)\n if second_piece[0] in range(5) and second_piece[1] in range(5) and second_piece not in banned_coordinates:\n ship_screen[second_piece[0]][second_piece[1]] = \"O\"\n break\n else:\n continue\n return ship_screen",
"def positionAndSinkShip(self, sinkingShip):\r\n directions = [Direction.North, Direction.South, Direction.East, Direction.West]\r\n sunkShip = False\r\n shipCoordinates = None\r\n for direction in directions:\r\n tSunkShip, tShipCoordinates = self.sinkShip(sinkingShip.bullsEye, sinkingShip.size, direction)\r\n if tSunkShip:\r\n if sunkShip:\r\n return False, None\r\n else:\r\n sunkShip = tSunkShip\r\n shipCoordinates = tShipCoordinates\r\n return sunkShip, shipCoordinates",
"def sinkShips(self):\r\n while True:\r\n stillSinkingShips = False\r\n for i in range(len(self.shipsToSink) - 1, -1, -1):\r\n sunkShip, shipCoordinates = self.positionAndSinkShip(self.shipsToSink[i])\r\n if sunkShip:\r\n stillSinkingShips = True\r\n for coordinates in shipCoordinates:\r\n self.enemyBoard[coordinates.x][coordinates.y] = BoardState.SUNK\r\n del(self.shipsToSink[i])\r\n if not stillSinkingShips:\r\n break",
"def drawShip(self,screen,maze,frame,midpos = (450,800),zoom = 1,fancyShip = False, drawThrusters = True):\n bp = self.getIntPos()\n bp = getOffsetPos(bp,midpos)\n \n # Draw Inputs\n if not self.crashed:\n if self.inputType == 0:\n self.drawPointInputs(screen,maze,midpos=midpos)\n elif self.inputType == 1:\n self.drawVariableLOS(screen,frame,midpos=midpos)\n \n# if(fancyShip): pygame.draw.polygon(screen, self.parentcolour, \n# [[int(bp[0]+ 10 *np.cos(self.angle+3.14)), \n# int(bp[1]+ 10 *np.sin(self.angle+3.14))], \n# [int(bp[0]+ 10 *np.cos(self.angle+1)), \n# int(bp[1]+ 10 *np.sin(self.angle+1))], \n# [int(bp[0]), \n# int(bp[1])], \n# [int(bp[0]+ 10 *np.cos(self.angle-1)), \n# int(bp[1]+ 10 *np.sin(self.angle-1))]])\n # draw thrusters\n \n if not self.crashed:\n if(drawThrusters):\n pygame.draw.polygon(screen, (140,140,40),\n [[int(bp[0]+ self.accel*22 *np.cos(self.angle+3.14)), \n int(bp[1]+ self.accel*22 *np.sin(self.angle+3.14))],\n [int(bp[0]+ 7 *np.cos(self.angle + 2.64)), \n int(bp[1]+ 7 *np.sin(self.angle + 2.64))],\n [int(bp[0]+ 7 *np.cos(self.angle + 3.64)), \n int(bp[1]+ 7 *np.sin(self.angle + 3.64))]])\n \n \n pygame.draw.polygon(screen, (140,140,40),\n [[int(bp[0]+ self.dangle*60 *np.cos(self.angle-1.57) + 7*np.cos(self.angle)), \n int(bp[1]+ self.dangle*60 *np.sin(self.angle-1.57) + 7*np.sin(self.angle))],\n [int(bp[0]+ 5 *np.cos(self.angle)), \n int(bp[1]+ 5 *np.sin(self.angle))],\n [int(bp[0]+ 9 *np.cos(self.angle)), \n int(bp[1]+ 9 *np.sin(self.angle))]])\n \n # draw ship\n pygame.draw.polygon(screen, self.colour, \n [[int(bp[0]+ 10 *np.cos(self.angle-0.15)), \n int(bp[1]+ 10 *np.sin(self.angle-0.15))],\n [int(bp[0]+ 10 *np.cos(self.angle+0.15)), \n int(bp[1]+ 10 *np.sin(self.angle+0.15))],\n [int(bp[0]+ 10 *np.cos(self.angle + 2.64)), \n int(bp[1]+ 10 *np.sin(self.angle + 2.64))],\n [int(bp[0]+ 10 *np.cos(self.angle + 3.64)), \n int(bp[1]+ 10 *np.sin(self.angle + 3.64))]])\n # Draw the cockpit\n pygame.draw.circle(screen, (140,160,240), bp, 5,2)",
"def breaking_of_the_box(size = (10, 10), verbose = False):\n import numpy as np\n r, l, u, d = \"R\", \"L\", \"U\", \"D\" # initiating walkind directions\n np.random.seed(int(time.time()))\n \n # initiating field with walking directions\n field = np.random.randint(1, 5, size = (10, 10))\n field = np.where(field ==1, r, field)\n field = np.where(field =='2', l, field)\n field = np.where(field =='3', u, field)\n field = np.where(field =='4', d, field)\n\n i, j = 0, 0\n coordinates = []\n \n # iterating in a field\n while (i<field.shape[0] and i>-1) and (j<field.shape[1] and j>-1):\n prev_i,prev_j = i, j\n coordinates.append((i, j)) \n \n copy_field = field.copy()\n \n if field[i][j] == r:\n j+=1\n elif field[i][j] == l:\n j-=1\n elif field[i][j] == u:\n i-=1\n elif field[i][j] == d:\n i+=1\n copy_field[i][j] = \"X\"\n if verbose == True:\n print(copy_field, \"#\"*48, sep = \"\\n\") #printing step by step position of a player\n if (i, j) in coordinates:\n # in case of infitine loop break\n print(\"Player is stucked inside of a box\")\n break\n\n else:\n print(\"Player came out of the box\")\n print(\"Coordinates of a breaking point\", \"(\", prev_i, prev_j, \")\")",
"def ship_size(data, cell):\n if type(data) != dict:\n print('Wrong argument data')\n return None\n if type(cell) != tuple:\n print(\"Second argument must be a tuple\")\n return None\n if type(cell[0]) != str:\n print(\"First element of the second argument must be a str - A..J\")\n return None\n if type(cell[1]) != int:\n print(\"Second element of the second argument must be a number - 1..10\")\n return None\n if not has_ship(data, cell):\n return 0\n x = ord(cell[0].upper()) - 64\n y = cell[1]\n if x < 1 or x > 10:\n print('Wrong coordinate. Must be from A to J.')\n return None\n if y < 1 or y > 10:\n print('Wrong coordinate. Must be from 1 to 10.')\n return None\n size = 1\n coords = {(x, y)}\n if data[(x+1, y)] == 'damaged' or data[(x+1, y)] or data[(x-1, y)] == 'damaged' or data[(x-1, y)]:\n start = x\n while(data[(start-1, y)] == 'damaged' or data[(start-1, y)]):\n coords = coords | {(start-1, y)}\n size += 1\n start -= 1\n if start < 2:\n break\n start = x\n while(data[(start+1, y)] == 'damaged' or data[(start+1, y)]):\n coords = coords | {(start + 1, y)}\n size += 1\n start += 1\n if start > 10:\n break\n elif data[(x, y+1)] == 'damaged' or data[(x, y+1)] or data[(x, y-1)] == 'damaged' or data[(x, y-1)]:\n start = y\n while(data[(x, start-1)] == 'damaged' or data[(x, start-1)]):\n coords = coords | {(x, start - 1)}\n size += 1\n start -= 1\n if start < 1:\n break\n start = y\n while(data[(x, start+1)] == 'damaged' or data[(x, start+1)]):\n coords = coords | {(x, start + 1)}\n size += 1\n start += 1\n if start > 10:\n break\n return (size, coords)",
"def is_ship_sunk(self, x, y):\n marker = self.markers[x][y]\n total_hits = self.ship_hits[marker]\n return total_hits == MarkerType.MAX_HITS[marker]",
"def move_ok(game, ship):\n cell_halite = game.game_map[ship.position].halite_amount\n\n if ship.is_full:\n return True\n\n # generally ignore low value cells. Note Mining_threshold may be dynamic\n if cell_halite < Mining_threshold:\n return True\n\n dropoffs = get_dropoff_positions(game)\n fuel_status = ship.halite_amount / SHIP_MAX_HALITE\n\n # the amount of halite we'll get if we refuel/mine\n # if ship in a dropoff/shipyard, set fuel to max to the ship departs\n refuel_amount = constants.MAX_HALITE if ship.position in dropoffs else cell_halite * SHIP_MINING_EFFICIENCY\n\n net_mine = (cell_halite * SHIP_MINING_EFFICIENCY) + (cell_halite * SHIP_MINING_EFFICIENCY) * -SHIP_FUEL_COST\n net_move = cell_halite * -SHIP_FUEL_COST + game.get_mining_rate(MINING_RATE_LOOKBACK) * SHIP_MINING_EFFICIENCY\n\n #logging.debug(\"fuel_status: {}\".format(fuel_status))\n #logging.debug(\"refuel_amount: {}\".format(refuel_amount))\n #logging.debug(\"net_mine: {}, net_move: {}\".format(net_mine, net_move))\n\n if ship.status == \"transiting\":\n #if refuel_amount > net_mining_yield and fuel_status < SHIP_REFUEL_THRESHOLD:\n # return True\n pass\n elif ship.status == \"exploring\":\n #if cell_halite < Mining_threshold:\n # return True\n pass\n elif ship.status == \"returning\":\n if net_move > net_mine or fuel_status > SHIP_REFUEL_THRESHOLD:\n return True\n else:\n raise RuntimeError(\"Unknown ship status: {}\".format(ship.status))\n\n return False",
"def on_ship(self, ship):\n # TODO: add ship to game\n # The game has a set combination of ships which is created when choosing the field size\n # (by battleships.ship_combination_creator()).\n # After that you need to create the player and add every ship from the combination (\n # without position) to his fleet. Done by add_ship(size) in the player class,\n # just cycle through the fleet_config of the Game class.\n # Then you need a button to determine the facing of the ship (north, west, east, south) and\n # something that shows which ship you are placing (either go through the array yourself\n # or by choosing the size per button).\n # Then the player needs to call position_ship(size, x, y, facing). If the ship cannot be\n # placed there (either because it conflicts with another ship or goes over the edge of\n # the board) the function will return a False, if it works it'll return True.\n # By calling check_ship_placement() from the Player class you can check whether all\n # ships are placed or not (returning True if all are placed, returning False if one or\n # more are still missing a position).\n # Apparently this is a commuication error on both sides:\n # This is how the ship placement works via the now built GUI:\n # New Game-> field size setting -> player name entry + ship placement,\n # via click, hoover and click (lenght of placed ship 1+hoover+1)\n # a list of coords creates the ship\n # {'n': [[(0, 0), (1, 0), (2, 0), (3, 0)]],\n # 'b': [[(1, 1), (2, 1), (3, 1), (4, 1)], [(2, 2), (2, 3), (2, 4)]]}\n\n if len(ship) < 3 or len(ship) > 6:\n # set length of ship to 3 to 6\n return False\n return True",
"def bamboo(screen, color, x, y, size):\n rect(screen, color, (x, y + size * 6, size, size * 14 / 3))\n rect(screen, color, (x, y, size, size * 16 / 3))\n polygon(screen, color, [(x + size * 2 / 3, y - size * 2 / 3), (x, y - size),\n (x + size / 3, y - size * 13 / 3), (x + size, y - size * 14 / 3)])\n polygon(screen, color, [(x + size, y - size * 14 / 3), (x + size * 2 / 3, y - size * 73 / 15),\n (x + size * 17 / 15, y - size * 29 / 3), (x + size * 22 / 15, y - size * 142 / 15)])\n stick(screen, color, [x - size * 13 / 3, y - size * 5 / 3], [x - size * 1 / 3, y + size / 3], 3, 1, -30)\n stick(screen, color, [x + size * 24 / 3, y - size * 25 / 3], [x + size * 4 / 3, y - size * 3], 5, 1, 30)\n stick(screen, color, [x - size * 13 / 3, y - size * 25 / 3], [x - size * 1 / 3, y - size * 5], 5, 1, -30)\n stick(screen, color, [x + size * 14 / 3, y - size * 5 / 3], [x + size * 4 / 3, y + size / 3], 3, 1, 30)",
"def check_ship_fits(self, ship_length, row, column, orientation):\n if orientation == 'H':\n if column + ship_length > 10:\n if self.user == 'player':\n print('SHIP DOES NOT FIT, TRY AGAIN!\\n')\n return False\n else:\n return False\n else:\n return True\n else:\n if row + ship_length > 10:\n if self.user == 'player':\n print('SHIP DOES NOT FIT, TRY AGAIN!\\n')\n return False\n else:\n return False\n else:\n return True",
"def place_ship(self, ship, x, y, orientation):\n\t\tdx = (orientation == GameBoard.O_HORIZONTAL)\n\t\tdy = (orientation == GameBoard.O_VERTICAL)\n\t\t# Check if there's enough space first.\n\t\tfor i in range(ship.size):\n\t\t\ttile = self.get_our_tile(x + i * dx, y + i * dy)\n\t\t\tif not tile.is_free():\n\t\t\t\traise ValueError(\"You already have a ship there!\")\n\n\t\tself.dump()\n\t\t# Enlist the ship in the navy.\n\t\tship.place(x, y, orientation)\n\t\tself.ships.append(ship)\n\t\t# Mark the tiles occupied by the ship.\n\t\tfor i in range(ship.size):\n\t\t\tcx = x + i * dx\n\t\t\tcy = y + i * dy\n\n\t\t\t# Create a tile boundary around the ship.\n\t\t\ttile = bt.Tile(bt.Tile.T_OCCUPIED)\n\t\t\tif i == 0:\n\t\t\t\t#\n\t\t\t\t# :AAAAA\n\t\t\t\t#\n\t\t\t\t# :\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t#\n\t\t\t\tself.set_our_tile(cx - dx, cy - dy, tile)\n\t\t\t\t# :\n\t\t\t\t# :AAAAA\n\t\t\t\t#\n\t\t\t\t# ::\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t#\n\t\t\t\tself.set_our_tile(cx - dx - dy, cy - dy - dx, tile)\n\t\t\t\t# :\n\t\t\t\t# :AAAAA\n\t\t\t\t# :\n\t\t\t\t# :::\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t#\n\t\t\t\tself.set_our_tile(cx - dx + dy, cy - dy + dx, tile)\n\t\t\telif i == ship.size - 1:\n\t\t\t\t# :\n\t\t\t\t# :AAAAA:\n\t\t\t\t# :\n\t\t\t\t# :::\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t# :\n\t\t\t\tself.set_our_tile(cx + dx, cy + dy, tile)\n\t\t\t\t# : :\n\t\t\t\t# :AAAAA:\n\t\t\t\t# :\n\t\t\t\t# :::\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t# ::\n\t\t\t\tself.set_our_tile(cx + dx - dy, cy + dy - dx, tile)\n\t\t\t\t# : :\n\t\t\t\t# :AAAAA:\n\t\t\t\t# : :\n\t\t\t\t# :::\n\t\t\t\t# E\n\t\t\t\t# E\n\t\t\t\t# :::\n\t\t\t\tself.set_our_tile(cx + dx + dy, cy + dy + dx, tile)\n\t\t\t# :::::::\n\t\t\t# :AAAAA:\n\t\t\t# : :\n\t\t\t# :::\n\t\t\t# :E\n\t\t\t# :E\n\t\t\t# :::\n\t\t\tself.set_our_tile(cx - dy, cy - dx, tile)\n\t\t\t# :::::::\n\t\t\t# :AAAAA:\n\t\t\t# :::::::\n\t\t\t# :::\n\t\t\t# :E:\n\t\t\t# :E:\n\t\t\t# :::\n\t\t\tself.set_our_tile(cx + dy, cy + dx, tile)\n\n\t\t\t# Create the ship tile by tile.\n\t\t\tself.set_our_tile(cx, cy, ship.tile())\n\n\t\tself.dump()",
"def place_ship(board,ship_length,bow,ship_direction):\n\n # verifies the input\n if abs(ship_direction[0])+abs(ship_direction[1])==1 and \\\n 0 <= bow[0] < len(board[0]) and 0 <= bow[1] < len(board) and \\\n -1 <= (bow[0] - ship_direction[0]*ship_length) <= len(board[0]) and \\\n -1 <= (bow[1] - ship_direction[1]*ship_length) <= len(board):\n\n index=ship_index(board) # find the next ship-index\n size=[ship_length]\n for part in range(ship_length): # try to place the ship\n if board[bow[1]-ship_direction[1]*part]\\\n [bow[0]-ship_direction[0]*part] == None:\n board[bow[1]-ship_direction[1]*part]\\\n [bow[0]-ship_direction[0]*part] = (index, part, size)\n else: # if another ship in the middle, delete the part of the ship\n # alredy placed and return None\n for del_part in range(part):\n board[bow[1]-ship_direction[1]*del_part]\\\n [bow[0]-ship_direction[0]*del_part] = None\n return\n return index",
"async def add_ship(self, pn, x, y, dx, dy, size):\n board = self.boards[pn]\n try:\n board.add_ship(x, y, dx, dy, size)\n return True\n except ValueError:\n return False",
"def run_shoe_rack_manipulation(self, debug=False, push_in_distance=0.00):\n\n print(\"\\n\\n--- Running Shoe Manipulation-------\\n\\n\")\n\n # self.taskRunner.callOnMain(self._poser_visualizer.visualize_result)\n\n if not self.check_category_goal_estimation_succeeded():\n return False\n\n # check that we really are doing mug\n category_manipulation_type = self.state.cache['category_manipulation_goal']['type']\n assert category_manipulation_type == CategoryManipulationType.SHOE_ON_RACK\n\n\n speed = self.graspingParams['speed']['fast']\n self.moveHome(speed=speed)\n\n result = self.state.cache['category_manipulation_goal']['result']\n T_W_fingertip = ros_numpy.numpify(result.T_world_gripper_fingertip)\n T_W_fingertip_vtk = transformUtils.getTransformFromNumpy(T_W_fingertip)\n\n grasp_data = GraspData.from_gripper_fingertip_frame(T_W_fingertip)\n grasp_data.gripper.params[\"hand_inner_diameter\"] = result.gripper_width\n grasp_data.gripper.params[\"hand_inner_diameter\"] = 0.07\n self.state.grasp_data = grasp_data\n\n # rotate the grasp to align with nominal\n params = self.getParamsForCurrentLocation()\n grasp_z_axis_nominal = np.array(params['grasp']['grasp_nominal_direction'])\n grasp_data.rotate_grasp_frame_to_nominal(grasp_z_axis_nominal)\n\n\n\n\n def vis_function():\n vis.updateFrame(T_W_fingertip_vtk, \"gripper fingertip frame\", scale=0.15, parent=self._vis_container)\n\n vis.updateFrame(grasp_data.grasp_frame, \"grasp frame\", scale=0.15, parent=self._vis_container)\n\n self.visualize_grasp(grasp_data)\n\n self.taskRunner.callOnMain(vis_function)\n\n # execute the grasp\n force_threshold_magnitude = 30\n object_in_gripper = self.execute_grasp(grasp_data, close_gripper=True, use_cartesian_plan=True, force_threshold_magnitude=force_threshold_magnitude, push_in_distance=0.04, ee_speed_m_s=0.1)\n\n\n if not object_in_gripper:\n print(\"grasp failed, returning\")\n return False\n\n print \"object_in_gripper:\", object_in_gripper\n\n T_goal_obs = self.state.cache['category_manipulation_goal'][\"T_goal_obs\"]\n T_W_G = self.state.cache['gripper_frame_at_grasp']\n\n\n\n pre_grasp_pose = self.state.cache['pre_grasp_ik_response'].joint_state.position\n pickup_speed = self.graspingParams['speed']['pickup']\n\n if not object_in_gripper:\n # open the gripper and back away\n self.gripperDriver.send_open_gripper_set_distance_from_current()\n self.robotService.moveToJointPosition(pre_grasp_pose,\n maxJointDegreesPerSecond=\n pickup_speed)\n return False\n\n # pickup the object\n self.robotService.moveToJointPosition(pre_grasp_pose,\n maxJointDegreesPerSecond=\n pickup_speed)\n\n # move home\n self.moveHome()\n\n # move to approach pose\n speed = self.graspingParams['speed']['fast']\n q_approach = np.array(self._stored_poses_director[\"left_table\"][\"shoe_approach\"])\n self.robotService.moveToJointPosition(q_approach, maxJointDegreesPerSecond=speed)\n\n\n # compute some poses\n T_goal_obs = ros_numpy.numpify(result.T_goal_obs) # 4 x 4 numpy matrix\n T_goal_obs_vtk = transformUtils.getTransformFromNumpy(T_goal_obs)\n object_manip = ObjectManipulation(T_goal_object=T_goal_obs_vtk, T_W_G=T_W_G)\n object_manip.compute_transforms()\n T_W_Gn_vtk = object_manip.T_W_Gn # gripper to world for place pose\n\n T_pre_goal_obs = ros_numpy.numpify(result.T_pre_goal_obs)\n T_pre_goal_obs_vtk = transformUtils.getTransformFromNumpy(T_pre_goal_obs)\n object_manip_approach = ObjectManipulation(T_goal_object=T_pre_goal_obs_vtk, T_W_G=T_W_G)\n object_manip_approach.compute_transforms()\n T_W_Gn_approach_vtk = object_manip_approach.T_W_Gn\n\n # move this down by push_in_distance\n\n pos, quat = transformUtils.poseFromTransform(T_W_Gn_approach_vtk)\n T_W_Gn_approach_vtk = transformUtils.transformFromPose(pos, quat)\n\n\n # now convert these to ee poses for running IK\n pos, quat = transformUtils.poseFromTransform(T_W_Gn_vtk)\n pos[2] -= push_in_distance\n T_W_Gn_vtk = transformUtils.transformFromPose(pos, quat)\n T_W_ee_vtk = self.getIiwaLinkEEFrameFromGraspFrame(T_W_Gn_vtk)\n T_W_ee = transformUtils.getNumpyFromTransform(T_W_ee_vtk)\n\n T_W_ee_approach_vtk = self.getIiwaLinkEEFrameFromGraspFrame(T_W_Gn_approach_vtk)\n T_W_ee_approach = transformUtils.getNumpyFromTransform(T_W_ee_approach_vtk)\n\n\n # place the object\n force_threshold_magnitude = 50 # shoes are heavy\n q_nom = np.array(self._stored_poses_director[\"Grasping\"][\"above_table_pre_grasp\"])\n q_nom = np.array(self._stored_poses_director[\"left_table\"][\"above_table_pre_grasp\"])\n code =self.execute_place_new(T_W_ee, T_W_ee_approach, q_nom=q_nom, use_cartesian_plan=True, force_threshold_magnitude=force_threshold_magnitude)\n\n print(\"\\n\\n--- Finished Shoe Manipulation-------\\n\\n\")\n\n self._shoe_manipulation_counter += 1\n\n return code",
"def ggpl_spiral_staircase(dx,dy,dz):\n\tnstep = int(dy*2.7)+1\n\t\"\"\" steps parameters \"\"\"\n\triserHeight = (0.50*dy)/nstep\n\ttreadDept = (0.6300-riserHeight)/2.0\n\t\"\"\" number of steps and length of landing for each side \"\"\"\n\tlandingLengthY=dy-((nstep+1)*treadDept)\n\tif dx>dy:\n\t\tstepWidth = landingLengthY\n\telse:\n\t\tstepWidth = dx/2.5\n\t\tlandingLengthY = stepWidth\n\tnsteplatox = int(((dx-2*stepWidth)/treadDept)+0.5) \n\tlandingLengthX=stepWidth\n\tnsteplatoy = int(((dy-stepWidth-landingLengthY)/treadDept)+0.5)\n\t\"\"\" skeleton of the box that contains the stair \"\"\"\n\tbox = SKEL_1(CUBOID([dx,dy,dz]))\n\t\"\"\" total steps \"\"\"\n\ttotalSteps = int((dz/riserHeight))\n\t\"\"\" number and height of floor \"\"\"\n\tnfloor = int(round(dz/2)+1)\n\theightfloor = (nsteplatoy)*riserHeight\n\t\"\"\" first stair \"\"\"\n\tstair=make_stair(nsteplatoy,treadDept,riserHeight,landingLengthY+treadDept,stepWidth,1)\n\tstair = T(2)([dy-((nsteplatoy+2)*treadDept)-landingLengthY]) (stair)\n\t\"\"\" variable that takes into account the number of steps made \"\"\"\n\trealizedStep = nsteplatoy\n\tr =4\n\n\t\"\"\" realization of the stairs \"\"\"\n\tfor j in range(int(nfloor)*2):\n\t\t\"\"\" condition for the realization of the final stair \"\"\"\n\t\tif (totalSteps-realizedStep<=nsteplatox) or (totalSteps-realizedStep<=nsteplatoy):\n\t\t\tif (totalSteps-realizedStep<=nsteplatox) and r%2==1:\n\t\t\t\tfinalStair = make_stair((totalSteps-realizedStep-1),treadDept,riserHeight,dy-stepWidth-(totalSteps-realizedStep-1)*treadDept,stepWidth,2)\n\t\t\telse:\n\t\t\t\tfinalStair = make_stair((totalSteps-realizedStep-1),treadDept,riserHeight,dx-stepWidth-(totalSteps-realizedStep-1)*treadDept,stepWidth,2)\n\t\t\t\t\"\"\" rotation and translation of the scale in the correct position \"\"\"\n\t\t\tif r==4:\n\t\t\t\tfinalStair=R([1,2])(3*PI/2)(finalStair)\n\t\t\t\tfinalStair = T([1,2,3])([stepWidth-treadDept,dy,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\t\t\tif r==1:\n\t\t\t\tfinalStair = R([1,2])(PI)(finalStair)\n\t\t\t\tfinalStair = T([1,2,3])([dx,dy-landingLengthY+treadDept ,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\t\t\tif r==2:\n\t\t\t\tfinalStair = R([1,2])(PI/2)(finalStair)\n\t\t\t\tfinalStair = T([1,2,3])([dx-landingLengthY+treadDept,0,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\t\t\tif r==3:\n\t\t\t\tfinalStair = T([1,2,3])([0,stepWidth-treadDept,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\n\t\telse:\n\t\t\tif j%4== 0:\n\t\t\t\tstepsX = make_stair(nsteplatox,treadDept,riserHeight,landingLengthX,stepWidth,1)\n\t\t\t\tstepsX = R([1,2])(3*PI/2)(stepsX)\n\t\t\t\tstepsX = T([1,2,3])([stepWidth-treadDept,dy,heightfloor])(stepsX)\n\t\t\t\tstair = STRUCT([stair,stepsX])\n\t\t\t\theightfloor += (nsteplatox+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatox+1\n\t\t\t\tr=1\n\t\t\tif j%4== 1:\n\t\t\t\tstepsY = make_stair(nsteplatoy,treadDept,riserHeight,dy-nsteplatoy*treadDept-stepWidth,stepWidth,1)\n\t\t\t\tstepsY = R([1,2])(PI)(stepsY)\n\t\t\t\tstepsY = T([1,2,3])([dx,dy-landingLengthY+treadDept ,heightfloor])(stepsY)\n\t\t\t\tstair = STRUCT([stair,stepsY])\n\t\t\t\theightfloor += (nsteplatoy+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatoy+1\n\t\t\t\tr=2\n\t\t\tif j%4== 2:\n\t\t\t\tstepsX = make_stair(nsteplatox,treadDept,riserHeight,landingLengthX,stepWidth,1)\n\t\t\t\tstepsX = R([1,2])(PI/2)(stepsX)\n\t\t\t\tstepsX = T([1,2,3])([dx-landingLengthY+treadDept,0,heightfloor])(stepsX)\n\t\t\t\tstair = STRUCT([stair,stepsX])\n\t\t\t\theightfloor += (nsteplatox+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatox+1\n\t\t\t\tr=3\n\t\t\tif j%4== 3:\n\t\t\t\tstepsY = make_stair(nsteplatoy,treadDept,riserHeight,landingLengthY,stepWidth,1)\n\t\t\t\tstepsY = T([1,2,3])([0,stepWidth-treadDept,heightfloor])(stepsY)\n\t\t\t\tstair = STRUCT([stair,stepsY])\n\t\t\t\theightfloor += (nsteplatoy+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatoy+1\n\t\t\t\tr=4\n\t\"\"\"floor of the stair\"\"\"\n\tfloor = CUBOID([dx,dy,0.05])\n\tfloor = TEXTURE(\"texture/floorStair.jpg\")(floor)\n\n\treturn STRUCT([stair,floor,box])",
"def bump_into_wall(\n state: State,\n action: Action,\n next_state: State, # pylint: disable=unused-argument\n) -> bool:\n attempted_next_position = updated_agent_position_if_unobstructed(\n state.agent.position, state.agent.orientation, action\n )\n\n return attempted_next_position in state.grid and (\n isinstance(state.grid[attempted_next_position], Wall)\n )",
"def shipCollided(self):\n self.shipHealth -= 1\n\n if self.shipHealth > 0:\n self.startInvincibilty()\n return True\n else:\n return False",
"def gimme_the_hole(required_hole_size):\n\tgood_object = spray(required_hole_size)\n\tmake_hole(required_hole_size, good_object)\n\treturn good_object",
"def ship_hit(si_settings, screen, stats, sb, ship, aliens, bullets, alienBullets, images):\r\n if stats.ships_left > 0:\r\n # Decrement ships_left.\r\n stats.ships_left -= 1\r\n\r\n # Animate the ship explosion\r\n ship_explosion(si_settings, screen, ship)\r\n\r\n # Update scoreboard.\r\n sb.prep_ships()\r\n\r\n # Empty the list of aliens and bullets.\r\n aliens.empty()\r\n bullets.empty()\r\n alienBullets.empty()\r\n\r\n # Create a new fleet and center the ship.\r\n create_fleet(si_settings, screen, ship, aliens, images)\r\n ship.center_ship()\r\n\r\n # Pause.\r\n sleep(0.5)\r\n else:\r\n stats.game_active = False\r\n pygame.mouse.set_visible(True)",
"def draw_mask(self, image, ship, dims):\n # Get the center x, y and the size s\n x, y, s, r, m = dims\n\n #Load files\n if ship == 'cruiseship':\n ma_path = 'ships/cruiseship_isolated_mask.png'\n id = 1\n elif ship == 'tanker':\n ma_path = 'ships/tanker_isolated_mask.png'\n id = 2\n\n #Transforming mask\n mask = cv2.imread(ma_path)\n mask_transformed = self.transform(mask.copy(), dims)\n mask_shape = np.shape(mask_transformed)\n s_x = int((mask_shape[0]+0.5)//2)\n s_y = int((mask_shape[1]+0.5)//2)\n mask_transformed = mask_transformed[0:np.shape(image[x-s_x:x+s_x, y-s_y:y+s_y, :])[0],\n 0:np.shape(image[x-s_x:x+s_x, y-s_y:y+s_y, :])[1],\n :]\n\n mask_transformed_th = self.threshold(mask_transformed)\n\n #Adding mask to image\n image[x-s_x:x+s_x, y-s_y:y+s_y, :] = id/255*mask_transformed_th\n\n return image",
"def spawn_ok(game):\n me = game.me\n shipyard_cell = game.game_map[me.shipyard]\n\n # % turns above mining rate to dropoff the halite, will typically be about 2?\n mining_over_head = 2\n ship_count = len(me.get_ships())\n\n #\n # absolute constraints (order can be important)\n #\n\n if ship_count >= MAX_SHIPS:\n if DEBUG & (DEBUG_GAME): logging.info(\"Game - Spawn denied. MAX ships reached\".format())\n return False\n\n if me.halite_amount < constants.SHIP_COST:\n if DEBUG & (DEBUG_GAME): logging.info(\"Game - Spawn denied. Insufficient halite\".format())\n return False\n\n #\n # conditional constraints\n #\n\n logging.debug(\"shipyard_cell.is_occupied: {}\".format(shipyard_cell.is_occupied))\n if shipyard_cell.is_occupied:\n logging.debug(\"shipyard_cell.ship.owner == me.id: {}\".format(shipyard_cell.ship.owner == me.id))\n\n # watch for collisions with owner only, note this will be 1 turn behind\n occupied_cells = []\n if shipyard_cell.is_occupied and shipyard_cell.ship.owner == me.id:\n occupied_cells.append(shipyard_cell.position)\n\n logging.debug(\"oc1: {}\".format(occupied_cells))\n\n # entry lane are N/S\n n_cell = shipyard_cell.position.directional_offset(Direction.North)\n s_cell = shipyard_cell.position.directional_offset(Direction.South)\n e_cell = shipyard_cell.position.directional_offset(Direction.East)\n w_cell = shipyard_cell.position.directional_offset(Direction.West)\n for pos in [n_cell, s_cell, e_cell, w_cell]:\n if game.game_map[pos].is_occupied:\n occupied_cells.append(pos)\n\n logging.debug(\"oc2: {}\".format(occupied_cells))\n\n # need to keep track of ships docking instead, a ship in an adjacent cell could be leaving\n if occupied_cells:\n if DEBUG & (DEBUG_GAME): logging.info(\"Game - Spawn denied. Occupied cells: {}\".format(occupied_cells))\n return False\n\n return True",
"def weightShipSearch(self, coordinates, size, weight, direction, hitWeight):\r\n if size == 0:\r\n # Successfully searched the required size.\r\n return True, hitWeight \r\n if coordinates.x < 0 or coordinates.y < 0 or coordinates.x == self.boardDimensions or coordinates.y == self.boardDimensions:\r\n # Can't go off the board.\r\n return False, 0\r\n if self.enemyBoard[coordinates.x][coordinates.y] < BoardState.HIT:\r\n # This search is all for naught since we can't possibly have a ship at this position.\r\n return False, 0\r\n if self.enemyBoard[coordinates.x][coordinates.y] == BoardState.HIT:\r\n # Weigh searches with hits already in them over searches without them. This is to \r\n # direct the shot selection toward coordinates with hits already near them.\r\n hitWeight += 10\r\n # Move to the next set of coordinates on the board.\r\n result, hitWeight = self.weightShipSearch(Coordinates(coordinates.x + direction.x, coordinates.y + direction.y),\r\n size - 1, weight, direction, hitWeight)\r\n if result:\r\n # A entire ship can fit, weight the coordinate appropriately.\r\n if self.enemyBoard[coordinates.x][coordinates.y] >= BoardState.OPEN:\r\n self.enemyBoard[coordinates.x][coordinates.y] += (weight + hitWeight)\r\n return result, hitWeight",
"def ship_hit(si_settings,screen,stats,sb,ship,aliens,bullets):\n if stats.ships_left > 0:\n # Decrement ships_left.\n stats.ships_left -= 1\n #update Scoreboard\n sb.prep_ships()\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)\n #empties aliens and bullets\n aliens.empty()\n bullets.empty()\n #makes new aliens and centers ship\n create_fleet(si_settings,screen,ship,aliens)\n ship.center_ship()\n #stop\n sleep(0.5)",
"def check_fleet_edges(si_settings, aliens):\n for alien in aliens.sprites():\n if alien.check_edges():\n change_fleet_direction(si_settings,aliens)\n break",
"def check_fleet_edges(si_settings, aliens):\r\n for alien in aliens.sprites():\r\n if alien.check_edges():\r\n change_fleet_direction(si_settings, aliens)\r\n break",
"def move():\n\tdirections = [\"up\", \"down\", \"left\", \"right\"]\n\tdata = bottle.request.json\n\t#print(\"MOVE:\", json.dumps(data))\n\t# THE BEST MOVE IS CALUCLATED USING A FLOODFILL. HIGHEST AREA WIN. ME SPEEL GOOD\n\tmove = \"\"\n\n\tupC = floodFill(getNextPosition(\"up\", data), data, arrayify(\"up\", data, not largestSnake(data)), 0)\n\tdownC = floodFill(getNextPosition(\"down\", data), data, arrayify(\"down\", data, not largestSnake(data)), 0)\n\trightC = floodFill(getNextPosition(\"right\", data), data, arrayify(\"right\", data, not largestSnake(data)), 0)\n\tleftC = floodFill(getNextPosition(\"left\", data), data, arrayify(\"left\", data, not largestSnake(data)), 0)\n\tmoveC = [upC, downC, rightC, leftC]\n\t#if moveC cannot find a viable move with ghostheads, it disables them so the snake doesn't kill itself\n\n\tmove = goto(moveC, findFood(data), data)\n\tprint(\"moveC before max = \" + str(moveC))\n\tif max(moveC) == 0:\n\t\tprint(\"ghosthead disabled\")\n\t\tupC = floodFill(getNextPosition(\"up\", data), data, arrayify(\"up\", data, False), 0)\n\t\tdownC = floodFill(getNextPosition(\"down\", data), data, arrayify(\"down\", data, False), 0)\n\t\trightC = floodFill(getNextPosition(\"right\", data), data, arrayify(\"right\", data, False), 0)\n\t\tleftC = floodFill(getNextPosition(\"left\", data), data, arrayify(\"left\", data, False), 0)\n\t\tmoveC = [upC, downC, rightC, leftC]\n\n\tprint(\"move after goto: \" + move)\n\tprint(\"movC after if max 0 block: \" + str(moveC))\n\tif move == \"\":\n\t\tgoodMoves = []\n\t\tif upC == max(moveC):\n\t\t\tgoodMoves.append(\"up\")\n\t\tif downC == max(moveC):\n\t\t\tgoodMoves.append(\"down\")\n\t\tif leftC == max(moveC):\n\t\t\tgoodMoves.append(\"left\")\n\t\tif rightC == max(moveC):\n\t\t\tgoodMoves.append(\"right\")\n\t\tmove = random.choice(goodMoves)\n\n\tprint(\"Turn: \" + str(data[\"turn\"]))\n\tprint(\"Move: \" + move)\n\n\tresponse = {\"move\": move, \"shout\": \"yeet\"}\n\treturn HTTPResponse(\n\t\tstatus=200,\n\t\theaders={\"Content-Type\": \"application/json\"},\n\t\tbody=json.dumps(response),\n\t)"
] |
[
"0.6850111",
"0.54134226",
"0.5392717",
"0.5219759",
"0.5036896",
"0.50010026",
"0.4920517",
"0.4919272",
"0.47853866",
"0.47571567",
"0.46850467",
"0.46844095",
"0.46821985",
"0.4675576",
"0.4668884",
"0.4647125",
"0.46391284",
"0.46350923",
"0.45468506",
"0.45452628",
"0.45292756",
"0.44798708",
"0.44796002",
"0.44768032",
"0.4475532",
"0.4408934",
"0.438519",
"0.43775368",
"0.43750215",
"0.43682444"
] |
0.5980011
|
1
|
Purpose NOTE THAT THIS FUNCTION IS INTENDED TO BE USED FOR THE `apply()` METHOD OF A PANDAS DATAFRAME WITH THE AXIS PARAMETER SET TO `"columns"` or `1`. The purpose of this function is to take the information about an event and return as a int the starting point of the event. Something that is important to note is that the convention that the creator's of the original event tracking data set followed was to indicate field positions from the prospective of the team initiating the corresponding event. Thus, for any given match, the positions listed may not be the same even if they're numeric values are the same. We chose to keep this convention because what matters more is the position of the event in the context of the initiating team's attack.
|
def event_starting_point_extractor(row) -> int:
to_return = None
# First, define the variables that we will need for the rest of this
# function.
positions_list = literal_eval(row["positions"])
assert isinstance(positions_list, list)
assert 1 <= len(positions_list) <= 2
# Next, extract the starting and ending positions.
raw_starting_x = positions_list[0].get("x")
raw_starting_y = positions_list[0].get("y")
starting_x = (raw_starting_x/100)*104
starting_y = (raw_starting_y/100)*68
# Finally, validate and return the result.
to_return = [starting_x, starting_y]
return to_return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def event_ending_point_extractor(row) -> int:\n to_return = None\n # First, define the variables that we will need for the rest of this\n # function.\n positions_list = literal_eval(row[\"positions\"])\n assert isinstance(positions_list, list)\n assert 1 <= len(positions_list) <= 2\n\n # Next, extract the starting and ending positions.\n starting_x = positions_list[0].get(\"x\")\n starting_y = positions_list[0].get(\"y\")\n\n try:\n ending_x = positions_list[1].get(\"x\")\n raw_ending_y = positions_list[1].get(\"y\")\n except IndexError:\n # If the event is one where there is no ending point to list (i.e.,\n # a foul).\n ending_x, raw_ending_y = starting_x, starting_y\n\n ending_y = (raw_ending_y/100)*69\n\n # Finally, validate and return the result.\n to_return = [ending_x, ending_y]\n\n return to_return",
"def get_index_under_point(self, event):\r\n xy = np.asarray(list(zip(self.xs, self.ys)))\r\n xyt = self.line.get_transform().transform(xy)\r\n xt, yt = xyt[:, 0], xyt[:, 1]\r\n d = np.sqrt((xt - event.x) ** 2 + (yt - event.y) ** 2)\r\n pt_idx = np.argmin(d)\r\n if d[pt_idx] >= self.max_pixels_from_vertex:\r\n pt_idx = None\r\n return pt_idx",
"def getEventLocation():\n global currentToken\n global currentChunk\n global currentSentence\n if currentSentence is not None:\n if currentToken is not None and currentToken.isAdjToken(): #if not currentChunk\n position = currentToken.position\n logger.debug(\"Event position obtained from AdkToken: \"+str(position))\n else:\n position = currentChunk.position\n logger.debug(\"Event position obtained from Chunk: \"+str(position))\n return position\n else:\n debug.error(\"No position for current Event\")",
"def frame_index_to_pts(frame: int, start_pt: int, diff_per_frame: int) -> int:\r\n return start_pt + frame * diff_per_frame",
"def cluster_positions_extractor(\n cluster_events_df: pd.DataFrame) -> pd.DataFrame:\n to_return = None\n # First, validate the input data\n ipv.parameter_type_validator(expected_type=pd.DataFrame,\n parameter_var=cluster_events_df)\n normed = cluster_events_df.reset_index(drop=True)\n\n # Next, run the above two functions to get starting and ending positions\n starting_positions_series = normed.apply(\n func=event_starting_point_extractor, \n axis=\"columns\"\n )\n starting_positions_df = pd.DataFrame(\n data=starting_positions_series.tolist(),\n index=normed.index,\n columns=[\"starting_x\", \"starting_y\"]\n )\n\n ending_positions_series = normed.swifter.apply(\n func=event_ending_point_extractor,\n axis=\"columns\"\n )\n ending_positions_df = pd.DataFrame(\n data=ending_positions_series.tolist(),\n index=normed.index,\n columns=[\"ending_x\", \"ending_y\"]\n )\n\n # Create the new DataFrame that we will be returning.\n positions_df = pd.concat(\n objs=[normed.drop(columns=\"positions\"),\n starting_positions_df,\n ending_positions_df],\n axis=\"columns\",\n ignore_index=True\n )\n positions_df.rename(columns={0 : \"seq_id\",\n 1 : \"id\",\n 2 : \"matchId\",\n 3 : \"teamId\",\n 4 : \"starting_x\",\n 5 : \"starting_y\",\n 6 : \"ending_x\",\n 7 : \"ending_y\"},\n inplace=True)\n\n # Finally, validate and return the result\n ipv.parameter_type_validator(expected_type=pd.DataFrame,\n parameter_var=positions_df)\n to_return = positions_df\n\n return to_return",
"def getIndexPoint(event=None, plane=None, epsilon=2):\n\n if event is None:\n return None\n if plane is None:\n return None\n if len(plane) == 0:\n return None\n\n xt = np.asarray([i[1] for i in plane])\n yt = np.asarray([i[0] for i in plane])\n d = np.sqrt((xt - event.xdata)**2 / 16 + (yt - event.ydata)**2)\n index = d.argsort()[:1][0]\n # position to far away\n if d[index] >= epsilon:\n return None\n index = int(index)\n return index",
"def getIndexPointX(event=None, plane=None):\n\n if event is None:\n return None\n if plane is None:\n return None\n if len(plane) < 2:\n return None\n\n xt = [i[1] for i in plane]\n index = int(bisect.bisect_left(xt, event.xdata) - 1)\n return index",
"def find_player_position(labyrinth: Labyrinth) -> Tuple[int, int]:\n for row in range(0, len(labyrinth)):\n for col in range(0, len(labyrinth[0])):\n if labyrinth[row][col] == Labyrinth.START:\n return row, col\n\n # todo: handle exception, if there is no field holding 'S' then something is wrong\n return -1, -1",
"def intrinsic_index_calc(df: pd.DataFrame):\n\n cur_index = 0\n df['Int_index'] = None\n df['Int_index'].iloc[0] = cur_index\n for i in range(len(df)):\n if df['Int_event'][i] in [-1, 1, -2, 2]:\n cur_index = cur_index + 1\n df['Int_index'].iloc[i] = cur_index\n\n return df",
"def _get_relative(self, event):\n delta_x = Quartz.CGEventGetIntegerValueField(\n event, Quartz.kCGMouseEventDeltaX)\n delta_y = Quartz.CGEventGetIntegerValueField(\n event, Quartz.kCGMouseEventDeltaY)\n return delta_x, delta_y",
"def position_index(x, y):\r\n position_action_idx = x + y*8\r\n return position_action_idx",
"def _locate_events(self, start_time, end_time):\n\n # Define pre-pad as a function of the onset windows\n if self.pre_pad is None:\n self.pre_pad = max(self.p_onset_win[1],\n self.s_onset_win[1]) \\\n + 3 * max(self.p_onset_win[0],\n self.s_onset_win[0])\n\n # Adjust pre- and post-pad to take into account cosine taper\n t_length = self.pre_pad + 4*self.marginal_window + self.post_pad\n self.pre_pad += np.ceil(t_length * 0.06)\n self.post_pad += np.ceil(t_length * 0.06)\n\n trig_events = self.output.read_triggered_events(start_time, end_time)\n n_evts = len(trig_events)\n\n for i, trig_event in trig_events.iterrows():\n event_uid = trig_event[\"EventID\"]\n msg = \"=\" * 120 + \"\\n\"\n msg += \"\\tEVENT - {} of {} - {}\\n\"\n msg += \"=\" * 120 + \"\\n\\n\"\n msg += \"\\tDetermining event location...\\n\"\n msg = msg.format(i + 1, n_evts, event_uid)\n self.output.log(msg, self.log)\n\n w_beg = trig_event[\"CoaTime\"] - 2*self.marginal_window \\\n - self.pre_pad\n w_end = trig_event[\"CoaTime\"] + 2*self.marginal_window \\\n + self.post_pad\n\n timer = util.Stopwatch()\n self.output.log(\"\\tReading waveform data...\", self.log)\n try:\n self._read_event_waveform_data(trig_event, w_beg, w_end)\n except util.ArchiveEmptyException:\n msg = \"\\tNo files found in archive for this time period\"\n self.output.log(msg, self.log)\n continue\n except util.DataGapException:\n msg = \"\\tAll available data for this time period contains gaps\"\n msg += \"\\n\\tOR data not available at start/end of time period\\n\"\n self.output.log(msg, self.log)\n continue\n self.output.log(timer(), self.log)\n\n timer = util.Stopwatch()\n self.output.log(\"\\tComputing 4D coalescence grid...\", self.log)\n\n daten, max_coa, max_coa_norm, loc, map_4d = self._compute(\n w_beg, w_end,\n self.data.signal,\n self.data.availability)\n coord = self.lut.xyz2coord(np.array(loc).astype(int))\n event_coa_data = pd.DataFrame(np.array((daten, max_coa,\n coord[:, 0],\n coord[:, 1],\n coord[:, 2])).transpose(),\n columns=[\"DT\", \"COA\", \"X\", \"Y\", \"Z\"])\n event_coa_data[\"DT\"] = event_coa_data[\"DT\"].apply(UTCDateTime)\n event_coa_data_dtmax = \\\n event_coa_data[\"DT\"].iloc[event_coa_data[\"COA\"].astype(\"float\").idxmax()]\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n\n if (event_coa_data_dtmax >= trig_event[\"CoaTime\"]\n - self.marginal_window) \\\n and (event_coa_data_dtmax <= trig_event[\"CoaTime\"]\n + self.marginal_window):\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n else:\n msg = \"\\n\\tEvent {} is outside marginal window.\\n\"\n msg += \"\\tDefine more realistic error - the marginal window\"\n msg += \" should be an estimate of the origin time uncertainty,\"\n msg += \"\\n\\tdetermined by the expected spatial uncertainty and\"\n msg += \"the seismic velocity in the region of the earthquake\\n\"\n msg += \"\\n\" + \"=\" * 120 + \"\\n\"\n msg = msg.format(event_uid)\n self.output.log(msg, self.log)\n continue\n\n event_mw_data = event_coa_data\n event_mw_data = event_mw_data[(event_mw_data[\"DT\"] >= w_beg_mw) &\n (event_mw_data[\"DT\"] <= w_end_mw)]\n map_4d = map_4d[:, :, :,\n event_mw_data.index[0]:event_mw_data.index[-1]]\n event_mw_data = event_mw_data.reset_index(drop=True)\n event_max_coa = event_mw_data.iloc[event_mw_data[\"COA\"].astype(\"float\").idxmax()]\n\n # Update event UID; make out_str\n event_uid = str(event_max_coa.values[0])\n for char_ in [\"-\", \":\", \".\", \" \", \"Z\", \"T\"]:\n event_uid = event_uid.replace(char_, \"\")\n out_str = \"{}_{}\".format(self.output.name, event_uid)\n self.output.log(timer(), self.log)\n\n # Make phase picks\n timer = util.Stopwatch()\n self.output.log(\"\\tMaking phase picks...\", self.log)\n phase_picks = self._phase_picker(event_max_coa)\n self.output.write_picks(phase_picks[\"Pick\"], event_uid)\n self.output.log(timer(), self.log)\n\n # Determining earthquake location error\n timer = util.Stopwatch()\n self.output.log(\"\\tDetermining earthquake location and uncertainty...\", self.log)\n loc_spline, loc_gau, loc_gau_err, loc_cov, \\\n loc_cov_err = self._calculate_location(map_4d)\n self.output.log(timer(), self.log)\n\n # Make event dictionary with all final event location data\n event = pd.DataFrame([[event_max_coa.values[0],\n event_max_coa.values[1],\n loc_spline[0], loc_spline[1], loc_spline[2],\n loc_gau[0], loc_gau[1], loc_gau[2],\n loc_gau_err[0], loc_gau_err[1],\n loc_gau_err[2],\n loc_cov[0], loc_cov[1], loc_cov[2],\n loc_cov_err[0], loc_cov_err[1],\n loc_cov_err[2]]],\n columns=self.EVENT_FILE_COLS)\n\n self.output.write_event(event, event_uid)\n\n self._optional_locate_outputs(event_mw_data, event, out_str,\n phase_picks, event_uid, map_4d)\n\n self.output.log(\"=\" * 120 + \"\\n\", self.log)\n\n del map_4d, event_coa_data, event_mw_data, event_max_coa, \\\n phase_picks\n self.coa_map = None",
"def position_in_operon(self):\n if self.transcription_units:\n tu_lengths = [len(tu.location) for tu in self.transcription_units]\n longest_tu = self.transcription_units[int(np.argmax(tu_lengths))]\n if longest_tu.location.strand == 1:\n gene_starts = sorted([gene.location.start.position for gene in longest_tu.genes])\n this_gene_start = self.location.start.position\n else:\n gene_starts = sorted([gene.location.end.position for gene in longest_tu.genes])\n gene_starts.reverse()\n this_gene_start = self.location.end.position\n position = np.where(np.array(gene_starts) == this_gene_start)[0][0] + 1\n else:\n position = 1\n return position",
"def getEventsCounters (attack_df, events):\n n_11 = 0 \n n_12 = 0 \n n_21 = 0 \n n_22 = 0 \n event_type = 0\n for event in events:\n is_attack = False\n for ind in range (len(attack_df.index)):\n # attack happened\n if (event.start >= (attack_df[\"start_time\"][ind] + 0.5)\n and event.start < (attack_df[\"end_time\"][ind]) - 0.5) or (\n event.end > (attack_df[\"start_time\"][ind] + 0.5)) and (\n event.end <= (attack_df[\"end_time\"][ind] - 0.5)):\n if event.choise == True:\n n_11 += 1\n event_type = 1\n is_attack = True\n break\n else:\n event_type = 3\n n_21 += 1\n is_attack = True\n break\n if is_attack:\n continue\n else:\n if event.choise == True:\n event_type = 2\n n_12 += 1\n else:\n event_type = 4\n n_22 += 1\n\n return ([n_11, n_12, n_21, n_22], event_type)",
"def get_new_position(row_delta, column_delta):\n new_row = start_row - row_delta\n new_column = start_column + column_delta\n return new_row, new_column",
"def xy(event):\n return map(int, event.get_coords())",
"def starting_position(self) -> aws_cdk.aws_lambda.StartingPosition:\n return self._values.get('starting_position')",
"def starting_position(self) -> aws_cdk.aws_lambda.StartingPosition:\n return self._values.get('starting_position')",
"def starting_position(self) -> aws_cdk.aws_lambda.StartingPosition:\n return self._values.get('starting_position')",
"def getCellpos(self, event):\n e = event.widget\n cx, cy = cart(e.canvasx(event.x), e.canvasy(event.y))\n cellx = int(cx) // self.cell_width\n celly = int(cy) // self.cell_height\n return cellx, celly",
"def _find_position(self, e):\n walk = self._data.first()\n while walk is not None and walk.element()._value != e:\n walk = self._data.after(walk)\n \n return walk",
"def find_startpos(self, searched_object:str):\r\n fak = 1 #< When the figure needs to be pushed to the right -> fak = 1 else fak = 0\r\n # The main figures spwan position beginns at index 14 and ends at size(self.look_up_table) - 9\r\n start_index = 14\r\n y = start_index \r\n end_index = -9\r\n for x in self.look_up_table[start_index : end_index]:\r\n # When the serached object is in the row then get the index of it\r\n if searched_object in x:\r\n x = x.index(searched_object)\r\n break\r\n y += 1\r\n # Pac-Man does not need to push to the right\r\n if searched_object == 'PACMAN':\r\n fak = 0\r\n return x * self.grid_size + fak * self.grid_size // 2, y * self.grid_size",
"def calc_position_relative_point(data, point):\n numframe = len(data[0])\n x_adj, y_adj = np.zeros((numframe, 1)), np.zeros((numframe, 1))\n for idx in range(numframe):\n x,y = data[0][idx], data[1][idx]\n x_adj[idx] = point[0] - x\n y_adj[idx] = point[1] - y\n return x_adj, y_adj",
"def event_to_x_y(self, event):\n\t\treturn (round(event.x / self.w_to_px), round((HEIGHT - event.y) / self.h_to_px))",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):",
"def getPosition(*args):"
] |
[
"0.6105091",
"0.5720752",
"0.5614691",
"0.5605947",
"0.55807626",
"0.5570309",
"0.5512276",
"0.5461457",
"0.5446376",
"0.53841436",
"0.5370126",
"0.5357768",
"0.53466797",
"0.52359855",
"0.52021855",
"0.51934916",
"0.5181264",
"0.5181264",
"0.5181264",
"0.50866765",
"0.5081355",
"0.5064781",
"0.5062241",
"0.50327486",
"0.5032035",
"0.5032035",
"0.5032035",
"0.5032035",
"0.5032035",
"0.5032035"
] |
0.69432586
|
0
|
Purpose NOTE THAT THIS FUNCTION IS INTENDED TO BE USED FOR THE `apply()` METHOD OF A PANDAS DATAFRAME WITH THE AXIS PARAMETER SET TO `"columns"` or `1`. The purpose of this function is to take the information about an event and return as a int the ending point of the event. Something that is important to note is that the convention that the creator's of the original event tracking data set followed was to indicate field positions from the prospective of the team initiating the corresponding event. Thus, for any given match, the positions listed may not be the same even if they're numeric values are the same. We chose to keep this convention because what matters more is the position of the event in the context of the initiating team's attack.
|
def event_ending_point_extractor(row) -> int:
to_return = None
# First, define the variables that we will need for the rest of this
# function.
positions_list = literal_eval(row["positions"])
assert isinstance(positions_list, list)
assert 1 <= len(positions_list) <= 2
# Next, extract the starting and ending positions.
starting_x = positions_list[0].get("x")
starting_y = positions_list[0].get("y")
try:
ending_x = positions_list[1].get("x")
raw_ending_y = positions_list[1].get("y")
except IndexError:
# If the event is one where there is no ending point to list (i.e.,
# a foul).
ending_x, raw_ending_y = starting_x, starting_y
ending_y = (raw_ending_y/100)*69
# Finally, validate and return the result.
to_return = [ending_x, ending_y]
return to_return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def event_starting_point_extractor(row) -> int:\n to_return = None\n # First, define the variables that we will need for the rest of this\n # function.\n positions_list = literal_eval(row[\"positions\"])\n assert isinstance(positions_list, list)\n assert 1 <= len(positions_list) <= 2\n\n # Next, extract the starting and ending positions.\n raw_starting_x = positions_list[0].get(\"x\")\n raw_starting_y = positions_list[0].get(\"y\")\n\n starting_x = (raw_starting_x/100)*104\n starting_y = (raw_starting_y/100)*68\n\n # Finally, validate and return the result.\n to_return = [starting_x, starting_y]\n\n return to_return",
"def cluster_positions_extractor(\n cluster_events_df: pd.DataFrame) -> pd.DataFrame:\n to_return = None\n # First, validate the input data\n ipv.parameter_type_validator(expected_type=pd.DataFrame,\n parameter_var=cluster_events_df)\n normed = cluster_events_df.reset_index(drop=True)\n\n # Next, run the above two functions to get starting and ending positions\n starting_positions_series = normed.apply(\n func=event_starting_point_extractor, \n axis=\"columns\"\n )\n starting_positions_df = pd.DataFrame(\n data=starting_positions_series.tolist(),\n index=normed.index,\n columns=[\"starting_x\", \"starting_y\"]\n )\n\n ending_positions_series = normed.swifter.apply(\n func=event_ending_point_extractor,\n axis=\"columns\"\n )\n ending_positions_df = pd.DataFrame(\n data=ending_positions_series.tolist(),\n index=normed.index,\n columns=[\"ending_x\", \"ending_y\"]\n )\n\n # Create the new DataFrame that we will be returning.\n positions_df = pd.concat(\n objs=[normed.drop(columns=\"positions\"),\n starting_positions_df,\n ending_positions_df],\n axis=\"columns\",\n ignore_index=True\n )\n positions_df.rename(columns={0 : \"seq_id\",\n 1 : \"id\",\n 2 : \"matchId\",\n 3 : \"teamId\",\n 4 : \"starting_x\",\n 5 : \"starting_y\",\n 6 : \"ending_x\",\n 7 : \"ending_y\"},\n inplace=True)\n\n # Finally, validate and return the result\n ipv.parameter_type_validator(expected_type=pd.DataFrame,\n parameter_var=positions_df)\n to_return = positions_df\n\n return to_return",
"def adbGetEvent( self, pars ):\n \n\t( id1, id2, id3, flag ) = pars\n\n\tif id3 == 1:\n\n\t e\t= self.adb.get( 'logEvents' )\n\t x\t= []\n\t y\t= []\n\t nx\t= 0\n\t ny\t= 0\n\t for j in range(e.shape[0]):\n\t\tif e[j,0] == id1 and e[j,1] == id2:\n\t\t if ny <= nx:\n\t\t\ty.append( e[j,2] )\n\t\t\tny\t+= 1\n\t\tif e[j,0] == _EVENT_TS and nx < ny:\n\t\t x.append( e[j,2] )\n\t\t nx\t+= 1\n\n\telif id3 == 2:\n\n\t e\t= self.adb.get( 'logEvents' )\n\t x\t= []\n\t y\t= []\n\t nx\t= 0\n\t ny\t= 0\n\t for j in range(e.shape[0]):\n\t\tif e[j,0] == id1 and e[j,1] == id2:\n\t\t if ny <= nx:\n\t\t\ty.append( e[j,2] )\n\t\t\tny\t+= 1\n\t\t else:\n\t\t\ty[-1]\t= e[j,2]\n\t\tif e[j,0] == _EVENT_TS and nx < ny:\n\t\t x.append( e[j,2] )\n\t\t nx\t+= 1\n\n\telif id3 == 3:\n\n\t e\t= self.adb.get( 'logEvents' )\n\t x\t= []\n\t y\t= []\n\t nx\t= 0\n\t ny\t= 0\n\t for j in range(e.shape[0]):\n\t\tif e[j,0] == id1 and e[j,1] == id2:\n\t\t y.append( e[j,2] )\n\t\t ny\t+= 1\n\t\tif e[j,0] == _EVENT_TS and nx < ny:\n\t\t n\t= ny - nx\n\t\t dx\t= 1. / n\n\t\t for i in range(1,n+1):\n\t\t\tx.append( e[j,2] - dx * (n-i) )\n\t\t nx\t+= n\n\n\telse:\n\n\t e\t= self.adb.get( 'logEvents' )\n\t x\t= self.adb.get( 'steps' )\n\t y\t= []\n\t for j in range(e.shape[0]):\n\t\tif e[j,0] == id1 and e[j,1] == id2:\n\t\t y.append( e[j,2] )\n\n\tif flag == 1:\n\t y\t= numarray.array( y, 'd' )\n\t y\t= numarray.maximum( y, 1.e-20 )\n\n steps = self.adb.get( 'steps' )\n times = self.adb.get( 'times' )\n tIncs = self.adb.get( 'timeIncs' )\n nSteps = len( steps )\n xt = []\n i = 0\n for j in range(len(x)):\n while i < nSteps and steps[i] < x[j]:\n i += 1\n t = times[i] + (x[j] - steps[i]) * tIncs[i]\n xt.append( t )\n \n\tx \t= numarray.array( x ).flat\n\txt \t= numarray.array( xt ).flat\n\ty \t= numarray.array( y ).flat\n return( x, xt, y )",
"def get_index_under_point(self, event):\r\n xy = np.asarray(list(zip(self.xs, self.ys)))\r\n xyt = self.line.get_transform().transform(xy)\r\n xt, yt = xyt[:, 0], xyt[:, 1]\r\n d = np.sqrt((xt - event.x) ** 2 + (yt - event.y) ** 2)\r\n pt_idx = np.argmin(d)\r\n if d[pt_idx] >= self.max_pixels_from_vertex:\r\n pt_idx = None\r\n return pt_idx",
"def getIndexPoint(event=None, plane=None, epsilon=2):\n\n if event is None:\n return None\n if plane is None:\n return None\n if len(plane) == 0:\n return None\n\n xt = np.asarray([i[1] for i in plane])\n yt = np.asarray([i[0] for i in plane])\n d = np.sqrt((xt - event.xdata)**2 / 16 + (yt - event.ydata)**2)\n index = d.argsort()[:1][0]\n # position to far away\n if d[index] >= epsilon:\n return None\n index = int(index)\n return index",
"def getEventLocation():\n global currentToken\n global currentChunk\n global currentSentence\n if currentSentence is not None:\n if currentToken is not None and currentToken.isAdjToken(): #if not currentChunk\n position = currentToken.position\n logger.debug(\"Event position obtained from AdkToken: \"+str(position))\n else:\n position = currentChunk.position\n logger.debug(\"Event position obtained from Chunk: \"+str(position))\n return position\n else:\n debug.error(\"No position for current Event\")",
"def _find_position(self, e):\n walk = self._data.first()\n while walk is not None and walk.element()._value != e:\n walk = self._data.after(walk)\n \n return walk",
"def getEventsCounters (attack_df, events):\n n_11 = 0 \n n_12 = 0 \n n_21 = 0 \n n_22 = 0 \n event_type = 0\n for event in events:\n is_attack = False\n for ind in range (len(attack_df.index)):\n # attack happened\n if (event.start >= (attack_df[\"start_time\"][ind] + 0.5)\n and event.start < (attack_df[\"end_time\"][ind]) - 0.5) or (\n event.end > (attack_df[\"start_time\"][ind] + 0.5)) and (\n event.end <= (attack_df[\"end_time\"][ind] - 0.5)):\n if event.choise == True:\n n_11 += 1\n event_type = 1\n is_attack = True\n break\n else:\n event_type = 3\n n_21 += 1\n is_attack = True\n break\n if is_attack:\n continue\n else:\n if event.choise == True:\n event_type = 2\n n_12 += 1\n else:\n event_type = 4\n n_22 += 1\n\n return ([n_11, n_12, n_21, n_22], event_type)",
"def events(self, game_id: int) -> DataFrame[Any]:",
"def _locate_events(self, start_time, end_time):\n\n # Define pre-pad as a function of the onset windows\n if self.pre_pad is None:\n self.pre_pad = max(self.p_onset_win[1],\n self.s_onset_win[1]) \\\n + 3 * max(self.p_onset_win[0],\n self.s_onset_win[0])\n\n # Adjust pre- and post-pad to take into account cosine taper\n t_length = self.pre_pad + 4*self.marginal_window + self.post_pad\n self.pre_pad += np.ceil(t_length * 0.06)\n self.post_pad += np.ceil(t_length * 0.06)\n\n trig_events = self.output.read_triggered_events(start_time, end_time)\n n_evts = len(trig_events)\n\n for i, trig_event in trig_events.iterrows():\n event_uid = trig_event[\"EventID\"]\n msg = \"=\" * 120 + \"\\n\"\n msg += \"\\tEVENT - {} of {} - {}\\n\"\n msg += \"=\" * 120 + \"\\n\\n\"\n msg += \"\\tDetermining event location...\\n\"\n msg = msg.format(i + 1, n_evts, event_uid)\n self.output.log(msg, self.log)\n\n w_beg = trig_event[\"CoaTime\"] - 2*self.marginal_window \\\n - self.pre_pad\n w_end = trig_event[\"CoaTime\"] + 2*self.marginal_window \\\n + self.post_pad\n\n timer = util.Stopwatch()\n self.output.log(\"\\tReading waveform data...\", self.log)\n try:\n self._read_event_waveform_data(trig_event, w_beg, w_end)\n except util.ArchiveEmptyException:\n msg = \"\\tNo files found in archive for this time period\"\n self.output.log(msg, self.log)\n continue\n except util.DataGapException:\n msg = \"\\tAll available data for this time period contains gaps\"\n msg += \"\\n\\tOR data not available at start/end of time period\\n\"\n self.output.log(msg, self.log)\n continue\n self.output.log(timer(), self.log)\n\n timer = util.Stopwatch()\n self.output.log(\"\\tComputing 4D coalescence grid...\", self.log)\n\n daten, max_coa, max_coa_norm, loc, map_4d = self._compute(\n w_beg, w_end,\n self.data.signal,\n self.data.availability)\n coord = self.lut.xyz2coord(np.array(loc).astype(int))\n event_coa_data = pd.DataFrame(np.array((daten, max_coa,\n coord[:, 0],\n coord[:, 1],\n coord[:, 2])).transpose(),\n columns=[\"DT\", \"COA\", \"X\", \"Y\", \"Z\"])\n event_coa_data[\"DT\"] = event_coa_data[\"DT\"].apply(UTCDateTime)\n event_coa_data_dtmax = \\\n event_coa_data[\"DT\"].iloc[event_coa_data[\"COA\"].astype(\"float\").idxmax()]\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n\n if (event_coa_data_dtmax >= trig_event[\"CoaTime\"]\n - self.marginal_window) \\\n and (event_coa_data_dtmax <= trig_event[\"CoaTime\"]\n + self.marginal_window):\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n else:\n msg = \"\\n\\tEvent {} is outside marginal window.\\n\"\n msg += \"\\tDefine more realistic error - the marginal window\"\n msg += \" should be an estimate of the origin time uncertainty,\"\n msg += \"\\n\\tdetermined by the expected spatial uncertainty and\"\n msg += \"the seismic velocity in the region of the earthquake\\n\"\n msg += \"\\n\" + \"=\" * 120 + \"\\n\"\n msg = msg.format(event_uid)\n self.output.log(msg, self.log)\n continue\n\n event_mw_data = event_coa_data\n event_mw_data = event_mw_data[(event_mw_data[\"DT\"] >= w_beg_mw) &\n (event_mw_data[\"DT\"] <= w_end_mw)]\n map_4d = map_4d[:, :, :,\n event_mw_data.index[0]:event_mw_data.index[-1]]\n event_mw_data = event_mw_data.reset_index(drop=True)\n event_max_coa = event_mw_data.iloc[event_mw_data[\"COA\"].astype(\"float\").idxmax()]\n\n # Update event UID; make out_str\n event_uid = str(event_max_coa.values[0])\n for char_ in [\"-\", \":\", \".\", \" \", \"Z\", \"T\"]:\n event_uid = event_uid.replace(char_, \"\")\n out_str = \"{}_{}\".format(self.output.name, event_uid)\n self.output.log(timer(), self.log)\n\n # Make phase picks\n timer = util.Stopwatch()\n self.output.log(\"\\tMaking phase picks...\", self.log)\n phase_picks = self._phase_picker(event_max_coa)\n self.output.write_picks(phase_picks[\"Pick\"], event_uid)\n self.output.log(timer(), self.log)\n\n # Determining earthquake location error\n timer = util.Stopwatch()\n self.output.log(\"\\tDetermining earthquake location and uncertainty...\", self.log)\n loc_spline, loc_gau, loc_gau_err, loc_cov, \\\n loc_cov_err = self._calculate_location(map_4d)\n self.output.log(timer(), self.log)\n\n # Make event dictionary with all final event location data\n event = pd.DataFrame([[event_max_coa.values[0],\n event_max_coa.values[1],\n loc_spline[0], loc_spline[1], loc_spline[2],\n loc_gau[0], loc_gau[1], loc_gau[2],\n loc_gau_err[0], loc_gau_err[1],\n loc_gau_err[2],\n loc_cov[0], loc_cov[1], loc_cov[2],\n loc_cov_err[0], loc_cov_err[1],\n loc_cov_err[2]]],\n columns=self.EVENT_FILE_COLS)\n\n self.output.write_event(event, event_uid)\n\n self._optional_locate_outputs(event_mw_data, event, out_str,\n phase_picks, event_uid, map_4d)\n\n self.output.log(\"=\" * 120 + \"\\n\", self.log)\n\n del map_4d, event_coa_data, event_mw_data, event_max_coa, \\\n phase_picks\n self.coa_map = None",
"def numEvents(self):\n offsets = self.baxH5._offsetsByHole[self.holeNumber]\n return offsets[1] - offsets[0]",
"def position_in_operon(self):\n if self.transcription_units:\n tu_lengths = [len(tu.location) for tu in self.transcription_units]\n longest_tu = self.transcription_units[int(np.argmax(tu_lengths))]\n if longest_tu.location.strand == 1:\n gene_starts = sorted([gene.location.start.position for gene in longest_tu.genes])\n this_gene_start = self.location.start.position\n else:\n gene_starts = sorted([gene.location.end.position for gene in longest_tu.genes])\n gene_starts.reverse()\n this_gene_start = self.location.end.position\n position = np.where(np.array(gene_starts) == this_gene_start)[0][0] + 1\n else:\n position = 1\n return position",
"def intrinsic_index_calc(df: pd.DataFrame):\n\n cur_index = 0\n df['Int_index'] = None\n df['Int_index'].iloc[0] = cur_index\n for i in range(len(df)):\n if df['Int_event'][i] in [-1, 1, -2, 2]:\n cur_index = cur_index + 1\n df['Int_index'].iloc[i] = cur_index\n\n return df",
"def get_new_position(row_delta, column_delta):\n new_row = start_row - row_delta\n new_column = start_column + column_delta\n return new_row, new_column",
"def get_special_point(power,events,borders,eventName,numericValue):\n event_consider = events[events['eventName']==eventName].reset_index(drop=True)\n #around turn_on\n i = 0 \n count = 0\n event_index = []\n while(i<len(event_consider)):\n date = time.mktime(datetime.strptime(event_consider['time'][i], \"%Y-%m-%d %H:%M:%S\").timetuple())\n start = str(datetime.fromtimestamp(date-borders[0]))\n end = str(datetime.fromtimestamp(date+borders[1]))\n serie = Series.from_array(power[(power['time']>=start)&(power['time']<=end)]['value'])\n if len(serie)>0:\n event_index.append(serie.index[int(len(serie)/2)])\n count += 1\n i += 1\n print(\"number of\", eventName ,\"in groudtruth and power=\",count)\n return event_index,[numericValue]*len(event_index)",
"def _get_relative(self, event):\n delta_x = Quartz.CGEventGetIntegerValueField(\n event, Quartz.kCGMouseEventDeltaX)\n delta_y = Quartz.CGEventGetIntegerValueField(\n event, Quartz.kCGMouseEventDeltaY)\n return delta_x, delta_y",
"def get_highest_event (self, list_of_event_pos):\n highest = None\n highest_distance = 100\n \n part_of_speech_list = self.tree.pos()\n for i in xrange(len(list_of_event_pos)):\n event_pos = list_of_event_pos[i]\n try:\n distance = len(self.tree.leaf_treeposition(event_pos))\n \n if distance < highest_distance:\n highest_distance = distance\n highest = i\n elif distance == highest_distance:\n try:\n highest_POS = part_of_speech_list[list_of_event_pos[highest]][1]\n current_POS = part_of_speech_list[list_of_event_pos[i]][1]\n \"\"\"\n If the current event is actually a verb, it should \n replace the highest event with the same high\n \"\"\"\n if highest_POS[0] != 'V' and current_POS[0] == 'V':\n highest_distance = distance\n highest = i\n except Exception:\n logger.warn(\"Problem in comparing part of speech of two \\\n highest event candidate\")\n except IndexError as ie:\n logger.warn(\"Index error\")\n logger.info('Event pos %d' %event_pos)\n logger.info('Tree length %d' %len(self.tree.leaves()))\n logger.info(str(self.tree))\n return highest",
"def position_index(x, y):\r\n position_action_idx = x + y*8\r\n return position_action_idx",
"def find_player_position(labyrinth: Labyrinth) -> Tuple[int, int]:\n for row in range(0, len(labyrinth)):\n for col in range(0, len(labyrinth[0])):\n if labyrinth[row][col] == Labyrinth.START:\n return row, col\n\n # todo: handle exception, if there is no field holding 'S' then something is wrong\n return -1, -1",
"def xy(event):\n return map(int, event.get_coords())",
"def frame_index_to_pts(frame: int, start_pt: int, diff_per_frame: int) -> int:\r\n return start_pt + frame * diff_per_frame",
"def coord_match(self, new_coords):\n all_events = []\n gene = self.gene\n\n for orig_cord in self.negative_coords:\n compare_f = orig_cord[0] + orig_cord[-1]\n compare_t = new_coords[0] + new_coords[-1]\n mm_f = orig_cord[2]\n mm_t = new_coords[1]\n\n if compare_f == compare_t and mm_f < mm_t:\n eventid = ('{}:{}-{}:{}-{}:{}-{}:{}-{}:{}'\n .format(gene.chr, orig_cord[0], orig_cord[1], orig_cord[2], orig_cord[3],\n new_coords[0], new_coords[1], new_coords[2], new_coords[3], gene.strand))\n all_events.append([eventid, orig_cord])\n return all_events",
"def eventNumber(self):\n raise NotImplementedError",
"def parse_score_fp_event(self):\n score = -1\n if self.last_event_code == DEFS.EF_FPFTR:\n score = self.last_payload_data[0]\n return score",
"def frame(self, frame):\n if self.vertical:\n cell = ((frame-1)//self.rows)+1\n row = frame-(cell-1)*self.rows\n else:\n row = ((frame-1)//self.cells)+1\n cell = frame-(row-1)*self.cells\n\n return cell, row",
"def event_to_x_y(self, event):\n\t\treturn (round(event.x / self.w_to_px), round((HEIGHT - event.y) / self.h_to_px))",
"def getIndexPointX(event=None, plane=None):\n\n if event is None:\n return None\n if plane is None:\n return None\n if len(plane) < 2:\n return None\n\n xt = [i[1] for i in plane]\n index = int(bisect.bisect_left(xt, event.xdata) - 1)\n return index",
"def get_pos_index(self):\n return [self.row-1, self.col-1]",
"def _get_past_tense_event(self):\n return f'{self.event_type}d' if self.event_type[-1] == 'e' else f'{self.event_type}ed'",
"def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0"
] |
[
"0.61686385",
"0.5415751",
"0.5356658",
"0.52927125",
"0.51529205",
"0.51526916",
"0.5141919",
"0.5124614",
"0.5096792",
"0.5095719",
"0.50302047",
"0.50110096",
"0.500243",
"0.49752328",
"0.4970546",
"0.49601305",
"0.49375775",
"0.49232593",
"0.48982283",
"0.48747092",
"0.48539612",
"0.48432496",
"0.48264444",
"0.48228136",
"0.48134485",
"0.48002934",
"0.47997072",
"0.47989503",
"0.47601473",
"0.47490168"
] |
0.68056196
|
0
|
Purpose The purpose of this function is to take a DataFrame that contains all of the events of set piece sequences that belong to a particular cluster of interest and create a new DataFrame that for each event explicitly lists its starting point and ending point on the soccer pitch.
|
def cluster_positions_extractor(
cluster_events_df: pd.DataFrame) -> pd.DataFrame:
to_return = None
# First, validate the input data
ipv.parameter_type_validator(expected_type=pd.DataFrame,
parameter_var=cluster_events_df)
normed = cluster_events_df.reset_index(drop=True)
# Next, run the above two functions to get starting and ending positions
starting_positions_series = normed.apply(
func=event_starting_point_extractor,
axis="columns"
)
starting_positions_df = pd.DataFrame(
data=starting_positions_series.tolist(),
index=normed.index,
columns=["starting_x", "starting_y"]
)
ending_positions_series = normed.swifter.apply(
func=event_ending_point_extractor,
axis="columns"
)
ending_positions_df = pd.DataFrame(
data=ending_positions_series.tolist(),
index=normed.index,
columns=["ending_x", "ending_y"]
)
# Create the new DataFrame that we will be returning.
positions_df = pd.concat(
objs=[normed.drop(columns="positions"),
starting_positions_df,
ending_positions_df],
axis="columns",
ignore_index=True
)
positions_df.rename(columns={0 : "seq_id",
1 : "id",
2 : "matchId",
3 : "teamId",
4 : "starting_x",
5 : "starting_y",
6 : "ending_x",
7 : "ending_y"},
inplace=True)
# Finally, validate and return the result
ipv.parameter_type_validator(expected_type=pd.DataFrame,
parameter_var=positions_df)
to_return = positions_df
return to_return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_sample_dataframe():\n ax_readings = []\n ay_readings = []\n az_readings = []\n mx_readings = []\n my_readings = []\n mz_readings = []\n gx_readings = []\n gy_readings = []\n gz_readings = []\n activity_list = [LABELS_NAMES[0] for _ in range(SEGMENT_TIME_SIZE)]\n\n\n for _ in range(SEGMENT_TIME_SIZE):\n ax_readings.append(random.uniform(-10,10))\n ay_readings.append(random.uniform(-10,10))\n az_readings.append(random.uniform(-10,10))\n mx_readings.append(random.uniform(-10,10))\n my_readings.append(random.uniform(-10,10))\n mz_readings.append(random.uniform(-10,10))\n gx_readings.append(random.uniform(-10,10))\n gy_readings.append(random.uniform(-10,10))\n gz_readings.append(random.uniform(-10,10))\n\n data_dict = {\n COLUMN_NAMES[0]: activity_list, COLUMN_NAMES[1]: ax_readings,\n COLUMN_NAMES[2]: ay_readings, COLUMN_NAMES[3]: az_readings,\n COLUMN_NAMES[4]: gx_readings, COLUMN_NAMES[5]: gy_readings,\n COLUMN_NAMES[6]: gz_readings, COLUMN_NAMES[7]: mx_readings,\n COLUMN_NAMES[8]: my_readings, COLUMN_NAMES[9]: mz_readings\n }\n\n df = pd.DataFrame(data=data_dict)\n return df",
"def create_rows(subject_event_df, parsing_method):\n if parsing_method >= 1: # parse using seconds\n second_interval = int(parsing_method)\n start_time = parse(subject_event_df[\"TimeStamp\"].iloc[0])\n total_seconds = (parse(subject_event_df[\"TimeStamp\"].iloc[-1]) - start_time).total_seconds()\n sequence_indices = generate_time_series_indices(subject_event_df[\"TimeStamp\"], total_seconds=total_seconds, interval=second_interval)\n block = subject_event_df.iloc[sequence_indices, :]\n elif 0 <= parsing_method < 1:\n portion = min(0.5, parsing_method)\n indices_per_frame = (subject_event_df.shape[0] * portion)\n frames = int(1.0 / portion)\n sequence_indices = []\n for frame_num in range(frames):\n start_index = int(frame_num * indices_per_frame)\n end_index = int((frame_num + 1) * indices_per_frame)\n sequence_indices.append(end_index)\n block = subject_event_df.iloc[sequence_indices, :]\n else:\n print(\"Undefined parsing method: %s\" % parsing_method)\n block = pd.DataFrame()\n return block",
"def _spawn_runways() -> pd.DataFrame:\n\n n = NUMBER_OF_RUNWAYS\n runway_data = np.empty((n, 5))\n\n if not n % 2:\n for i, N in enumerate(range(1, n, 2)):\n\n x = N * (RUNWAY_SEPARATION + RUNWAY_WIDTH) / 2\n y_base, y_top = - RUNWAY_LENGTH / 2, RUNWAY_LENGTH / 2\n\n runway_data[i, 0] = x\n runway_data[i, 1] = y_base\n runway_data[i, 2] = x\n runway_data[i, 3] = y_top\n runway_data[i, 4] = 0\n\n runway_data[i + n // 2, 0] = - x\n runway_data[i + n // 2, 1] = y_base\n runway_data[i + n // 2, 2] = - x\n runway_data[i + n // 2, 3] = y_top\n runway_data[i + n // 2, 4] = 0\n\n else:\n for i, N in enumerate(range(- n // 2 + 1, n // 2 + 1)):\n\n x = N * (RUNWAY_SEPARATION + RUNWAY_WIDTH)\n y_base, y_top = - RUNWAY_LENGTH / 2, RUNWAY_LENGTH / 2\n\n runway_data[i, 0] = x\n runway_data[i, 1] = y_base\n runway_data[i, 2] = x\n runway_data[i, 3] = y_top\n runway_data[i, 4] = 0\n\n runway_info = pd.DataFrame(runway_data)\n return runway_info",
"def cluster_data(df, bus):\n rowNbr = 1\n df = df[(df.Bus == bus) | (df.Bus == -1)]\n size = df.shape[0]\n itr = df.iterrows()\n event_index = 0\n dict_clu = create_dict(size)\n row = next(itr)[1]\n ExternalTrigger = 0\n while rowNbr < size:\n timestamp = row.Time\n if row.Bus == -1:\n ExternalTrigger = row.Time\n row = next(itr)[1]\n rowNbr = rowNbr + 1\n else:\n wChTemp = [-1, 0]\n gChTemp = [-1, 0]\n wADC = 0\n wM = 0\n gADC = 0\n gM = 0\n while timestamp == row.Time and rowNbr < size:\n Channel = row.Channel\n if Channel < 80:\n wADC = wADC + row.ADC\n wM = wM + 1\n if row.ADC > wChTemp[1]:\n wChTemp[0] = Channel\n wChTemp[1] = row.ADC\n else:\n gADC = gADC + row.ADC\n gM = gM + 1\n if row.ADC > gChTemp[1]:\n gChTemp[0] = Channel\n gChTemp[1] = row.ADC\n row = next(itr)[1]\n rowNbr = rowNbr + 1\n \n wCh = wChTemp[0] \n gCh = gChTemp[0]\n \n dict_clu['ToF'][event_index] = timestamp - ExternalTrigger\n dict_clu['Time'][event_index] = timestamp\n dict_clu['wCh'][event_index] = wCh\n dict_clu['wADC'][event_index] = wADC\n dict_clu['wM'][event_index] = wM\n dict_clu['gCh'][event_index] = gCh\n dict_clu['gADC'][event_index] = gADC\n dict_clu['gM'][event_index] = gM\n \n event_index = event_index + 1\n \n if rowNbr % 100000 == 0:\n print('Progress: ' + str(round(((rowNbr)/size),2)*100) + ' %')\n print('Number of events: ' + str(event_index) + '\\n')\n \n df_clu = pd.DataFrame(dict_clu)\n df_clu = df_clu.drop(range(event_index, size, 1))\n return df_clu",
"def create_blocks(subject_event_df, parsing_method):\n all_blocks = []\n if parsing_method >= 1: # parse using seconds\n second_interval = int(parsing_method)\n start_time = parse(subject_event_df[\"TimeStamp\"].iloc[0])\n total_seconds = (parse(subject_event_df[\"TimeStamp\"].iloc[-1]) - start_time).total_seconds()\n sequence_indices = generate_time_series_indices(subject_event_df[\"TimeStamp\"], total_seconds=total_seconds, interval=second_interval)\n for i in range(1, len(sequence_indices)):\n block = subject_event_df.iloc[sequence_indices[i-1]:sequence_indices[i], :]\n all_blocks.append(block.copy())\n elif 0 <= parsing_method < 1:\n portion = min(0.5, parsing_method)\n indices_per_frame = (subject_event_df.shape[0] * portion)\n frames = int(1.0 / portion)\n for frame_num in range(frames):\n start_index = int(frame_num * indices_per_frame)\n end_index = int((frame_num+1) * indices_per_frame)\n block = subject_event_df.iloc[start_index:end_index, :]\n all_blocks.append(block.copy())\n else:\n print(\"Undefined parsing method: %s\" % parsing_method)\n return all_blocks",
"def construct_df():\n iterable = [['approach', 'contact', 'retract', 'pause'], ['force', 'height']]\n index = pd.MultiIndex.from_product(iterable, names=['segment', 'channel'])\n return pd.DataFrame(columns=index)",
"def speed_map_segs_to_df(seg_list):\n # Put segment list into DataFrame\n df = pd.DataFrame(seg_list)\n\n # Seperate the start and end coords from pathLocs\n temp_df = pd.DataFrame(df['pathLocs'].to_list())\n temp_df.rename({0: 'start', 1: 'end'}, axis=1, inplace=True)\n\n # Put start coords into a dataframe\n start_coords = pd.DataFrame(temp_df['start'].to_list())\n start_coords.columns = start_coords.columns.str.capitalize()\n start_coords = start_coords.add_prefix('start')\n\n # Put end coords into a dataframe\n end_coords = pd.DataFrame(temp_df['end'].to_list())\n end_coords.columns = end_coords.columns.str.capitalize()\n end_coords = end_coords.add_prefix('end')\n\n # Drop columns we don't care about\n df = df.drop(['pathLocs', 'toStop', 'fromStop'], axis=1, errors='ignore')\n\n # Combine the dataframes side by side\n return pd.concat([start_coords, end_coords, df], axis=1)",
"def cumulative_event_blocks(event_filename, activity_filename, interval, add_columns, keep_list):\n event_df = pd.read_csv(event_filename)\n act_df = pd.read_csv(activity_filename)\n omit_list = [e for e in list(event_df[\"TestSubject\"].unique()) if \"1301\" not in e] # removing partial agency students\n subject_blocks = create_subject_event_blocks(event_df, omit_list=omit_list)\n subject_counts = pd.Series()\n supervised_examples = pd.DataFrame()\n\n for subject in subject_blocks.keys():\n if subject in keep_list:\n add_student_data = get_student_data(act_df, subject, add_list=add_columns)\n start_time = parse(subject_blocks[subject].loc[:, \"TimeStamp\"].iloc[0])\n subject_blocks[subject].loc[:, \"GameTime\"] = subject_blocks[subject].loc[:, \"TimeStamp\"].apply(lambda cell: (parse(cell) - start_time).total_seconds())\n parsed_subject_block = create_rows(subject_blocks[subject], parsing_method=interval)\n parsed_subject_block.index = list(range(parsed_subject_block.shape[0]))\n subject_counts[subject] = parsed_subject_block.shape[0]\n for act_feat in list(add_student_data.index):\n parsed_subject_block[act_feat] = pd.Series([add_student_data[act_feat]] * parsed_subject_block.shape[0])\n parsed_subject_block[\"IntervalID\"] = pd.Series([e+1 for e in range(parsed_subject_block.shape[0])])\n supervised_examples = pd.concat([supervised_examples, parsed_subject_block], axis=0)\n supervised_examples.index = list(range(supervised_examples.shape[0]))\n return supervised_examples, subject_counts",
"def start_dataframe(block_zones, block_guid, block_name, block_pt, block_layer):\n # make an empty list to populate with block objects\n blocks = []\n # connect gh wires to python block classes\n for i, z in enumerate(block_zones):\n b = Block()\n b.guid = block_guid[i]\n b.name = block_name[i]\n b.point = block_pt[i]\n b.layer = block_layer[i]\n b.phase = find_phase(b.layer)\n b.zones = z\n b.x = b.point[0]\n b.y = b.point[1]\n b.z = b.point[2]\n b.floor = find_floor(str(z))\n b.elevation = find_elevation(str(z))\n b.swing_drop = get_drop(str(z))\n b.priority = find_priority(str(z))\n b.access = get_access(str(z))\n b.survey = get_survey(str(z))\n\n # populate list of blocks\n blocks.append(b)\n\n # turn the above list to a pandas dataframe\n df = pd.DataFrame([vars(f) for f in blocks])\n\n # append a columns to df to track drop sort order\n df[\"drop_sort\"] = df.swing_drop.apply(lambda x: nat_sort(x, df))\n\n # further specify dataframe sort order\n df = df.sort_values([\"access\", \"survey\", \"elevation\", \"floor\", \"drop_sort\", \"y\", \"x\"],\n ascending=[False, True, True, False, True, False, True])\n df.reset_index(inplace=True, drop=True)\n df[\"new_order\"] = df.index + 1\n\n # append columns\n df[\"instance\"] = df.groupby(\"name\").cumcount() + 1\n df[\"sample\"] = 0\n df.loc[df.instance == 1, \"sample\"] = 1\n\n # create the survey name/bumper sticker name\n df[\"survey_name\"] = df.swing_drop + \"-\" + df.floor.map(str) + \"-\" + df.name + \"[\" + df.new_order.map(str) + \"]\"\n # df[\"survey_name\"] = f\"{df.swing_drop}-{df.floor.map(str)-{df.name}[{df.new_order.map(str)}]}\"\n return df",
"def filterEvents(intervals_dates,list_infected,distance):\n d=distance\n list_gpsevents=[]\n for z in range(len(intervals_dates)-1):\n print(\"Interval: \",intervals_dates[z], \"y\", intervals_dates[z+1])\n infected,uninfected=getTrazaTimestamp(intervals_dates[z],intervals_dates[z+1],GPSrecords,list_infected)\n events_gps = nearest_neighbor(infected, uninfected, d)\n events_gps = events_gps.drop(['geometry','closest_stop_geom'], axis=1)\n print(len(events_gps))\n if(len(events_gps)!=0):\n list_gpsevents.append(events_gps.reset_index(drop=True))\n else:\n events_gps=pd.DataFrame()\n list_gpsevents.append(events_gps)\n #GPSevents=pd.concat(list_gpsevents).reset_index(drop=True)\n #return GPSevents\n return list_gpsevents",
"def create_lineups(self):\n\t\tnum_skaters = len(self.skaters_df.index)\n\t\tnum_goalies = len(self.goalies_df.index)\n\t\tteams = list(set(self.skaters_df['team'].values))\n\t\tnum_teams = len(teams)\n\n\t\t#Create player position indicators so you know which position they are playing\n\t\tpositions = {'C':[], 'W':[], 'D':[]}\n\t\tfor pos in self.skaters_df.loc[:, 'pos']:\n\t\t\tfor key in positions:\n\t\t\t\tpositions[key].append(1 if key in pos else 0)\n\t\t\n\t\t#Create player line indicators so you know which line by their team they are on\n\t\tteam_lines = []\n\t\tfor i, line in enumerate(self.skaters_df.loc[:, 'line']):\n\t\t\tplayer_line = []\n\t\t\tif int(line) == 1:\n\t\t\t\tplayer_line.extend((1, 0, 0, 0))\n\t\t\telif int(line) == 2:\n\t\t\t\tplayer_line.extend((0, 1, 0, 0))\n\t\t\telif int(line) == 3:\n\t\t\t\tplayer_line.extend((0, 0, 1, 0))\n\t\t\telif int(line) == 4:\n\t\t\t\tplayer_line.extend((0, 0, 0, 1))\n\t\t\telse:\n\t\t\t\tplayer_line.extend((0, 0, 0, 0))\n\t\t\tplayer_lines = []\n\t\t\tfor team in teams:\n\t\t\t\tif self.skaters_df.loc[i, 'team'] == team:\n\t\t\t\t\tplayer_lines.extend(player_line)\n\t\t\t\telse:\n\t\t\t\t\tplayer_lines.extend((0, 0, 0, 0))\n\t\t\tteam_lines.append(player_lines)\n\t\tnum_lines = len(team_lines[0])\n\t\t\n\t\t#NOTE: Maybe add PP line indicators\n\n\t\t#Create player team indicators so you know which team they are on\n\t\tskaters_teams = []\n\t\tfor player_team in self.skaters_df.loc[:, 'team']:\n\t\t\tskaters_teams.append([1 if player_team == team else 0 for team in teams])\n\n\t\t#Create goalie opponent indicators so you know who the goalie is opposing\n\t\tgoalies_opponents = []\n\t\tfor player_opp in self.skaters_df.loc[:, 'opp']:\n\t\t\tgoalies_opponents.append([1 if player_opp == team else 0 for team in self.goalies_df.loc[:, 'team']])\n\n\t\t#Generate the lineups\n\t\tlineups = []\n\t\tfor _ in tqdm(range(1, self.num_lineups+1)):\n\t\t\tlineup = self.type_1(lineups, positions, team_lines, skaters_teams, goalies_opponents, num_skaters, num_goalies, num_teams, num_lines)\n\t\t\tif lineup:\n\t\t\t\tlineups.append(lineup)\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\t#Fill the lineups with player names\n\t\tself.fill_lineups(lineups, positions, num_skaters, num_goalies)",
"def construct_game_dataframe(game):\n\n # points as index\n # each stat as a column\n # columns: passes, possessions, turnovers, blocks, starting_fence, we_scored\n # from read_frame: starting_fence, ourscore_EOP, theirscore_EOP\n # to be calculated: passes, possessions, turnovers, blocks, we_scored\n\n from .models import Point, Possession\n from django_pandas.io import read_frame\n\n logger = logging.getLogger(__name__)\n\n game_points = game.points.all().order_by('point_ID')\n\n # generate initial dataframe with some columns\n df = read_frame(game_points,\n fieldnames=['startingfence', 'ourscore_EOP', 'theirscore_EOP'],\n index_col='point_ID')\n\n # assign(colname=data) ; data must be a series or series-like object\n\n # bool - did we score this point\n we_scored = pd.Series([bool_we_scored(point) for point in game_points],\n index=df.index)\n\n # goals we scored\n goals = pd.Series([get_events_by_point(point, opposition_events=0).filter(action__in=GOALS).count() for point in game_points],\n index=df.index)\n\n # goals they scored\n opp_goals = pd.Series([get_events_by_point(point, opposition_events=2).filter(action__in=GOALS).count() for point in game_points],\n index=df.index)\n\n # callahans we threw\n callahans_thrown = pd.Series([get_events_by_point(point, opposition_events=0).filter(action__in=CALLAHANS).count() for point in game_points],\n index=df.index)\n\n # callahans we caught\n opp_callahans_thrown = pd.Series([get_events_by_point(point, opposition_events=2).filter(action__in=CALLAHANS).count() for point in game_points],\n index=df.index)\n\n # number of passes we made\n passes = pd.Series([get_events_by_point(point, opposition_events=0).filter(action__in=PASSES).count() for point in game_points],\n index=df.index)\n\n # number of TOTAL possessions\n total_possessions = pd.Series([point.possessions.no_cessation().all().count() for point in game_points],\n index=df.index)\n\n # number of turnovers we had\n turnovers = pd.Series([get_events_by_point(point, opposition_events=0).filter(action__in=TURNOVERS).count() for point in game_points],\n index=df.index)\n\n # blocks we had\n blocks = pd.Series([get_events_by_point(point, opposition_events=0).filter(action__in=BLOCKS).count() for point in game_points],\n index=df.index)\n\n # turnovers opponent had\n opp_turns = pd.Series([get_events_by_point(point, opposition_events=2).filter(action__in=TURNOVERS).count() for point in game_points],\n index=df.index)\n\n # our possessions end on:\n # our goals + their blocks + our turnovers + their callahans scored (ours thrown)\n # in our stats, their blocks are not recorded only our turnovers\n possessions = goals + turnovers + callahans_thrown\n\n # their possessions end on:\n # their goals + our blocks + their turnovers + our callahans scored\n opp_possessions = opp_goals + blocks + opp_turns + opp_callahans_thrown\n\n # check our possession calculations\n pos_testframe = pd.concat([total_possessions - (possessions + opp_possessions)], axis=1)\n zeroframe = pd.DataFrame(0, index=pos_testframe.index, columns=pos_testframe.columns)\n if not zeroframe.equals(pos_testframe):\n logger.critical('error in possession calculation, below should be all zeros')\n logger.critical(pos_testframe)\n\n df = df.assign(we_scored=we_scored)\n df = df.assign(goals=goals)\n df = df.assign(passes=passes)\n df = df.assign(turnovers=turnovers)\n df = df.assign(blocks=blocks)\n df = df.assign(possessions=possessions)\n df = df.assign(callahans=opp_callahans_thrown)\n\n df = df.assign(opp_goals=opp_goals)\n df = df.assign(opp_turns=opp_turns)\n df = df.assign(opp_poss=opp_possessions)\n df = df.assign(opp_callahans=callahans_thrown)\n\n return df",
"def trial_events_from_cili(df, trial_events):\r\n # select rows\r\n trial_event_times = df[df.label.isin(trial_events)].reset_index()\r\n # create a trial id column\r\n trial_event_times['trial_id'] = pd.to_numeric(trial_event_times.content, errors='raise')\r\n # reshape dataframe\r\n trial_event_times = trial_event_times.pivot_table(index='trial_id',columns='label', values='index')\r\n \r\n # sort columns\r\n trial_event_times = trial_event_times.reindex(columns = trial_events)\r\n \r\n return trial_event_times",
"def samples_timesteps_features(dataframe, columns, start_date, timesteps=72, \n steps_ahead=24, window_days=100, train_percent=80.):\n \n def overlap_windows(dataset, timesteps, steps_ahead):\n \"\"\" Create overlaping window of time-series data\n \n Parameters\n ----------\n dataset: pd.DataFrame\n time-series pandas dataset\n timesteps: int\n number of time steps from the past for creating output arrays\n steps_ahead: int\n number of time steps into the future for making predictions\n \n Returns\n -------\n X, y: np.array\n input and output 3-d arrays of overlaping time windows\n \"\"\"\n X = []; y = []\n \n start = 0\n for i in range(len(dataset)):\n # Define the end of the input sequence\n in_end = start + timesteps\n out_end = in_end + steps_ahead\n # Ensure that there is enough data\n if out_end <= len(dataset):\n X.append(dataset[start:in_end, :])\n # First column holds load values\n y.append(dataset[in_end:out_end, 0])\n # Move along one time step\n start += 1\n \n # Convert list to np.array\n X = np.asarray(X)\n y = np.asarray(y)\n \n return X, y\n\n\n data = dataframe.copy()\n \n if window_days*24 > data.values.shape[0]:\n raise ValueError('Variable window_days has too large value: {}*24h = {} > {}, which is more than there is data!'.format(window_days, window_days*24, \n data.values.shape[0]))\n \n # Training period\n # ---------------\n train_percent = train_percent/100.\n st = pd.to_datetime(start_date) # start date\n et = st + dt.timedelta(days=int(train_percent*window_days)) # end date\n train = data.loc[st:et].values\n \n # Standardize and transform training data set\n mean_std_values = {}\n for i, column in enumerate(columns):\n # Calculate mean and standard deviation only\n # from the training data set values\n mu = train[:,i].mean() # axis=0\n sd = train[:,i].std()\n mean_std_values[column] = (mu, sd)\n # Standardize training data\n train[:,i] = (train[:,i] - mu)/sd\n \n # Create overlapping windows with training data\n X_train, y_train = overlap_windows(train, timesteps, steps_ahead)\n \n # Testing / Validation period\n # ---------------------------\n sv = et \n ev = sv + dt.timedelta(days=int((1-train_percent)*window_days)+1)\n test = data.loc[sv:ev].values\n \n # Transform testing/validation data set\n for i, column in enumerate(columns):\n # Use mean and standard deviation from the\n # training data set\n mu = mean_std_values[column][0]\n sd = mean_std_values[column][1]\n # Standardize test data\n test[:,i] = (test[:,i] - mu)/sd\n \n # Create overlaping windows with test data\n X_test, y_test = overlap_windows(test, timesteps, steps_ahead)\n \n return mean_std_values, X_train, y_train, X_test, y_test",
"def add_data(self, df, window=\"none\", nperseg=None, noverlap=None):\n\n # Column names - omit column 1 (timestamp/time)\n channels = df.columns[1:].astype(str)\n\n # Calculate amplitude spectrum\n # amps = [np.abs(np.fft.rfft(df[channel]) ** 2) for channel in channels]\n #\n # # Add 2d arrays to dictionary\n # for i, channel in enumerate(channels):\n # if channel not in self.spectrograms:\n # self.spectrograms[channel] = amps[i]\n # else:\n # self.spectrograms[channel] = np.column_stack([self.spectrograms[channel], amps[i]])\n\n if isinstance(df.iloc[0, 0], pd.Timestamp):\n fs = 1 / ((df.iloc[1, 0] - df.iloc[0, 0]).total_seconds())\n else:\n fs = 1 / (df.iloc[1, 0] - df.iloc[0, 0])\n\n window = window.lower()\n if window == \"none\":\n window = \"boxcar\"\n\n # Calculate number of segment overlap points - set nperseg to length of sample if not provided\n n = len(df)\n if nperseg:\n noverlap = nperseg * noverlap // 100\n else:\n nperseg = n\n\n if nperseg <= n:\n # Calculate PSD using Welch method\n try:\n self.freq, psd = calc_psd(\n data=df.iloc[:, 1:].T.values,\n fs=fs,\n window=window,\n nperseg=nperseg,\n noverlap=noverlap,\n )\n except Exception:\n raise Exception\n # Sample is too short, can't compute PSD\n else:\n # Just in case the first file happens to be too short,\n # calculate the expected number of zero points to create\n if self.expected_length == 0:\n if n % 2 == 0:\n self.expected_length = nperseg // 2 + 1\n else:\n self.expected_length = int(nperseg / 2 + 1)\n\n # Create a dummy row of zeros for the no PSD event\n dummy_row = np.zeros(self.expected_length)\n\n # Add 2d arrays to dictionary\n for i, channel in enumerate(channels):\n if channel not in self.spectrograms:\n self.spectrograms[channel] = psd[i]\n self.expected_length = len(self.freq)\n else:\n try:\n self.spectrograms[channel] = np.row_stack([self.spectrograms[channel], psd[i]])\n except:\n self.spectrograms[channel] = np.row_stack(\n [self.spectrograms[channel], dummy_row]\n )\n msg = (\n f\"Error during spectrograms processing:\\n\\n\"\n f\"Length of sample is {len(df)} which is less than the \"\n f\"expected length of {nperseg} used per PSD ensemble. \"\n f\"Set a spectral sample length that does not result in such a \"\n f\"short sample data length when processing the tail of a file.\"\n )\n print(f\"Spectral screening warning: {msg}\")\n # TODO: Compile warnings to control object to report to GUI at the end and write to Screening Report\n # raise ValueError(msg)",
"def sample_series(self, series, append_frame=None):\n\n columns, values = self.get_readings(series)\n\n dataframe = DataFrame(values, columns=columns)\n dataframe = self.format_index(dataframe, self.ENERGY_DB_INDEX)\n\n # https://pandas.pydata.org/pandas-docs/stable/merging.html\n if append_frame is not None:\n # dataframe = pandas.concat([dataframe, input_frame], axis=1, join='inner', join_axes=[input_frame.index])\n dataframe = pandas.merge(append_frame, dataframe, on=['time', 'time'])\n # print(dataframe)\n\n return dataframe",
"def condense_blocks_to_aoi_series(subject_blocks, cumulative, difference, add_data):\n cumulative_series = initialize_feature_series(add_data)\n sum_keys = [e for e in list(cumulative_series.index) if e not in list(add_data.index)]\n examples_dataframe = pd.DataFrame()\n labels = []\n aoi_keys = list(DURATION_TARGET_DICT.keys())\n for block in subject_blocks:\n if block.shape[0] > 0:\n # Condense the block into a series of (summation) features\n feature_series = initialize_feature_series(add_data) # Depends on globally defined variables\n feature_series.loc[\"Game Time\"] = (parse(block[\"TimeStamp\"].iloc[-1]) - parse(block[\"TimeStamp\"].iloc[0])).total_seconds()\n for i,row in block.iterrows():\n if row[\"Event\"] == \"AOI\":\n aoi_key = _convert_target_category(row[\"Target\"])\n if aoi_key != \"UNIDENTIFIED AOI_TARGET\":\n feature_series[aoi_key] += row[\"Duration\"]\n feature_series[\"Fixations/Sec\"] += 1\n cumulative_series.loc[sum_keys] += feature_series.loc[sum_keys]\n cumulative_series.loc[\"Game Time\"] += feature_series.loc[\"Game Time\"]\n\n # Get the game score label for the block with either difference or last\n try:\n if difference:\n label = block.loc[:, \"CumulativeGameScore\"].iloc[-1] - block.loc[:, \"CumulativeGameScore\"].iloc[0]\n else:\n label = block.loc[:, \"CumulativeGameScore\"].iloc[-1]\n except IndexError:\n print(block)\n label = 0.0\n labels.append(label)\n\n # Calculate proportions and append to dataframe using either cumulative or block specific features\n fixation_sum = np.sum(feature_series.loc[aoi_keys])\n if cumulative and fixation_sum > 0:\n proportion_cumulative_series = cumulative_series.copy()\n proportion_cumulative_series.loc[aoi_keys] = cumulative_series.loc[aoi_keys] / np.sum(cumulative_series.loc[aoi_keys])\n proportion_cumulative_series.loc[\"Fixations/Sec\"] = proportion_cumulative_series.loc[\"Fixations/Sec\"] / proportion_cumulative_series.loc[\"Game Time\"]\n examples_dataframe = pd.concat([examples_dataframe, proportion_cumulative_series.copy()], axis=1)\n elif fixation_sum > 0:\n proportion_feature_series = feature_series.copy()\n proportion_feature_series.loc[aoi_keys] = feature_series.loc[aoi_keys] / np.sum(feature_series.loc[aoi_keys])\n proportion_feature_series.loc[\"Fixations/Sec\"] = proportion_feature_series.loc[\"Fixations/Sec\"] / proportion_feature_series.loc[\"Game Time\"]\n examples_dataframe = pd.concat([examples_dataframe, proportion_feature_series.copy()], axis=1)\n\n # Adding labels as a series in the dataframe\n examples_dataframe = examples_dataframe.T\n examples_dataframe.index = list(range(examples_dataframe.shape[0]))\n examples_dataframe[\"Label\"] = pd.Series(labels)\n return examples_dataframe",
"def main(datafilepath):\n #create midline\n sectionsize = 10000\n TrackData = TrackMaker(sectionsize) # 10000\n moving_window = sectionsize*2\n midline = TrackData[0] \n sections = TrackData[2]\n #midline = midline[sections[0]:sections[5],:] #only work with the midline of the trial \n #steergaze_df = pd.read_feather(datafilepath)\n steergaze_df = pd.read_csv(datafilepath, sep=',',header=0)\n #steergaze_df.reset_index()\n master_steergaze = pd.DataFrame()\n datafolder = os.path.split(datafilepath)[0] \n\n #TODO: due to grouping the future path cuts - off at end of slalom, use the continuous trajectory across roadsections for fp mapping\n\n #modes taken from gaze_through_midline_densities.py\n entry = find_closest_index(midline, [-23, 69])\n firstobject = find_closest_index(midline, [25, 52])\n gazemodes = [entry, firstobject]\n\n mid_diff = np.linalg.norm(np.diff(midline, axis=0, prepend = np.array([[0,0]])), axis = 1)\n midline_dist_array = np.cumsum(mid_diff)\n\n tree = spatial.cKDTree(midline)\n\n #for trial in picked_trials:\t\n for block, blockdata in steergaze_df.groupby(['ID','block']):\n\n print(block)\n begin = timer()\n\n\n blockdata = blockdata.copy()\n blockdata.sort_values('currtime', inplace=True)\n # blockdata.reset_index()\n\n ####pick target\n \"\"\"\n condition = blockdata.condition.values[0]\n target_centres = targets.loc[targets['condition']==int(condition),:]\n #pprint(target_centres)\n\n target_centres = target_centres.reset_index(drop=True)\n #pick starting position.\n start_x = np.sign(blockdata['posx']).values[0]\n #select targets with opposite sign for xcentre, these will be the ones encountered in that block\n target_centres = target_centres.loc[np.sign(target_centres['xcentre'])!=start_x,:] \n target_circles = dp.target_position_circles(target_centres)\n\n \"\"\"\n\n traj_x = blockdata['posx'].values\n traj_z = blockdata['posz'].values\n trajectory = np.transpose(np.array([traj_x, traj_z]))\n\n yaw = blockdata['yaw'].values\n \n #gaze_on_screen = blockdata['hangle'].values, blockdata['vangle'].values\n gaze_on_screen = np.transpose(np.array([blockdata['hangle'].values, blockdata['vangle'].values]))\n\n #print(yaw[0])\n #index = i\n #\tviewpoint = blockdata['posx'].values, blockdata['posz'].values\n roadsection = blockdata['roadsection'].values\n\n #find time headway along MIDLINE \n \"\"\"\n start = timer()\n #idx, *_ = find_closest_index(midline, trajectory[0,:])\n idx = [find_closest_index(midline, viewpoint) for viewpoint in trajectory] \n print(idx[:10])\n print(timer()-start)\n \"\"\"\n\n #closest_indexes = [closest_node(midline, viewpoint) for viewpoint in trajectory] \n #closest indexes\n #print(np.take(midline, 5, axis = 0, mode = 'wrap'))\n #print(np.take(midline, len(midline), axis = 0, mode = 'wrap'))\n #print(np.take(midline, 0, axis = 0, mode = 'wrap'))\n _, closest_indexes = tree.query(trajectory) \n\n end_of_view = closest_indexes + moving_window\n\n #futuremid = np.take(midline, range(closest_indexes[0], end_of_view[0]), axis = 0, mode = 'wrap')\n def takemid(c,e):\n return (np.take(midline, range(c, e), axis = 0, mode = 'wrap'))\n\n start = timer()\n ml_idx, ml_screen_refs, ml_world_refs, ml_th = zip(*[\n closest_on_screen_point(takemid(c,e), t, y, g) \n for c, e, t, y, g in zip(closest_indexes, end_of_view, trajectory, yaw, gaze_on_screen)\n ])\n print(timer() - start) \n \n print(ml_screen_refs.shape)\n print(type(ml_screen_refs))\n ml_screen_refs = ml_screen_refs.reshape(-1, 2)\n ml_world_refs = ml_world_refs.reshape(-1, 2)\n print(ml_th)\n\n blockdata['midline_ref_onscreen_x'] = ml_screen_refs[:, 0]\n blockdata['midline_ref_onscreen_z'] = ml_screen_refs[:, 1]\n blockdata['midline_ref_world_x'] = ml_world_refs[:, 0]\n blockdata['midline_ref_world_z'] = ml_world_refs[:, 1]\n blockdata['th_along_midline'] = ml_th\n\n #find closest point on FUTURE PATH, with th calc along the path \n \n traj_index = range(len(trajectory))\n fp_idx, fp_screen_refs, fp_world_refs, fp_th = zip(*[\n closest_on_screen_point(trajectory[i:(i+1000),:], t, y, g) \n for i, t, y, g in zip(traj_index, trajectory, yaw, gaze_on_screen)\n ])\n #future_traj = trajectory[index:(index+window_fp), :]\n #fp_world_ref, fp_idx, dists, fp_angles = closest_on_screen_point(future_traj, viewpoint, yaw, gaze_on_screen)\n print(fp_screen_refs.shape)\n print(type(fp_screen_refs))\n fp_screen_refs = fp_screen_refs.reshape(-1, 2)\n fp_world_refs = fp_world_refs.reshape(-1, 2)\n print(ml_th)\n\n blockdata['futurepath_ref_onscreen_x'] = fp_screen_refs[:, 0]\n blockdata['futurepath_ref_onscreen_z'] = fp_screen_refs[:, 1]\n blockdata['futurepath_ref_world_x'] = fp_world_refs[:, 0]\n blockdata['futurepath_ref_world_z'] = fp_world_refs[:, 1]\n blockdata['th_along_futurepath'] = fp_th\n \n \n\n #TODO: current method runs into problems if the viewpoint is just before the midline resets (i.e. very large midline_dist_array value).\n #but not a problem for current analysis because trial starts from beginning of midline.\n #th_to_entry\n mid_dist_viewpoint = midline_dist_array[idx]\n\n mid_dist_entry = midline_dist_array[gazemodes[0]]\n th_to_entry = (mid_dist_entry - mid_dist_viewpoint) / 8.0 #if it's negative you have passed the point\n blockdata.loc[index,'veh_th_to_entry'] = th_to_entry\n\n #th_to_object\n mid_dist_object = midline_dist_array[gazemodes[1]]\n th_to_object = (mid_dist_object - mid_dist_viewpoint) / 8.0 #if it's negative you have passed the point\n blockdata.loc[index,'veh_th_to_object'] = th_to_object\t\t\n \n \"\"\"\n trialcode = row['trialcode']\n #plot\t\t\t \n #print(\"th_along_midline\", ml_timeheadway)\n #print('ml_ref', ml_world_ref)\n #print(\"th_along_futurepath\", fp_timeheadway)\n #print(\"fp_ref\", fp_world_ref)\n\n world_gaze = dp.angles_to_world(gaze_on_screen, viewpoint, yaw)\n #print(\"world_gaze\", world_gaze)\n\n plt.ylim(angles_limits_bottom[1],angles_limits_top[1])\n plt.xlim(angles_limits_bottom[0],angles_limits_top[0])\n\n plt.plot(ml_angles[:,0],ml_angles[:,1], 'C3o', markersize = .5, )\n plt.plot(fp_angles[:,0],fp_angles[:,1], 'C2o', markersize = .5)\n plt.plot(ml_screen_ref[0],ml_screen_ref[1], 'C1o', markersize = 5, markeredgecolor = 'k')\n plt.plot(fp_screen_ref[0],fp_screen_ref[1], 'C0o', markersize = 5, markeredgecolor = 'k')\n\n plt.plot(gaze_on_screen[0],gaze_on_screen[1], 'mo', markersize = 5, markeredgecolor = 'k')\n plt.title(str(trialcode))\n\n\n plt.pause(.016) \n plt.cla()\n\n plt.show()\n \"\"\"\n\t\t\n #master_steergaze = pd.concat([master_steergaze, blockdata])\n\n\n compute_time = timer()-begin\n print(\"Processing block took %f seconds\" % compute_time)\n\n\n print(\"APPENDING DATA FRAME\")\n outfilepath = datafolder + '/trout_gazeandsteering_addthfrompath2.csv'\n\n with open(outfilepath, 'a', newline = '') as sgfile:\n blockdata.to_csv(sgfile, mode='a', header=sgfile.tell()==0)\n\n #master_steergaze.to_csv(datafolder + '/trout_gazeandsteering_addthfrompath.csv')\n\n #master_steergaze.to_feather(datafilepath)",
"def startrek_starships_specs():\n pdf = pd.DataFrame({\n 'uid': [\n 'NCC-1701',\n 'NCC-74656',\n 'NCC-1031',\n 'NCC-1764'\n ],\n 'warp': [\n 9.2,\n 9.975,\n 9.9,\n 9.2\n ]\n })\n return pdf",
"def create_window(dataframe):\n\n window_dict = {}\n for i, val in enumerate(dataframe['line']):\n e1 = re.findall('<e1>(.*?)</e2>', val)\n before = re.findall('\\w* ?<e1>', val)\n after = re.findall('</e2> ?\\w*', val)\n bef = before[0].replace('<e1>', '')\n aft = after[0].replace('</e2>', '')\n s = e1[0].replace('</e1>', '').replace('<e2>', '')\n window_dict[i] = bef+s+aft\n\n\n window_dataframe = create_dataframe(window_dict, ['window'])\n\n dataframe['line window'] = window_dataframe['window']\n return dataframe",
"def to_epochs(event_data, resolution=3, val_col=\"tag\"):\n if val_col != \"tag\":\n logger.warning(\"Non-tag val_col should generally not be used. Epoch conversions is last step.\"\n \"Are you sure you wish to continue?\")\n # First indentify allocations. Windows should be split and\n # merged using the individual classes. Long-form data\n # holder with contain index start end dur tutples\n holder = [[]]\n # i = 1\n for num, (index, row) in enumerate(event_data.iterrows()):\n if num % 100 == 0:\n print(f\"Handling row {num}\")\n s, e = row.start, row.end\n while s < e:\n # print(s, e)\n # i += 1\n # if i > 1000:\n # break\n cur = holder[-1]\n # Determine how much time is in the current row\n if not cur:\n cur_time = 0\n else:\n cur_time = sum(i[-1] for i in cur)\n # Amount of time that needs to be added\n to_find = resolution - cur_time\n # print(s, e, cur_time, to_find)\n # If the instance is full,\n if to_find <= 10e-8:\n # print(\"APPEND\")\n holder.append([])\n continue\n\n # Add that much time to the current holder from the current event if possible\n overlap = min(e, s + timedelta(seconds=to_find))\n # print(overlap)\n cur.append((index, s, overlap, (overlap - s).total_seconds()))\n s = overlap\n # Then reconstruct a dataframe from such (Breakdown-> compile)\n print(f\"Reconstructing holder of length {len(holder)}\")\n rows = []\n for interval_num, intervals in enumerate(holder):\n # window here is thus the start to end\n window = (intervals[0][1], intervals[-1][2])\n # Calculate the highest tag for the window\n activpal_event_dist = defaultdict(int)\n tag_dist = defaultdict(int)\n # Also the total steps for the epoch in the meantime\n epoch_steps = 0\n for index, start, end, duration in intervals:\n row = event_data.loc[index, :]\n activpal_event_dist[row[\"activpal_event\"]] += duration\n tag_dist[row[val_col]] += duration\n # Partial steps\n # print(row)\n epoch_steps += row[\"steps\"] * (end-start).total_seconds() / row[\"duration\"]\n\n event = max(activpal_event_dist.items(), key=lambda x: x[1])[0]\n tag = max(tag_dist.items(), key=lambda x: x[1])[0]\n logger.debug(f\"tag_dist{tag_dist}, selected_tag={tag}\")\n epoch_start = intervals[0][1]\n epoch_end = intervals[-1][2]\n epoch_duration = (epoch_end - epoch_start).total_seconds()\n data = {\n val_col: tag,\n \"activpal_event\": event,\n \"steps\": epoch_steps,\n \"cadence\": epoch_steps / epoch_duration * 60,\n \"start\": epoch_start,\n \"end\": epoch_end,\n \"duration\": epoch_duration,\n }\n rows.append(data)\n\n return pd.DataFrame(rows)",
"def events(self, game_id: int) -> DataFrame[Any]:",
"def onlinedata(star):\n data = None\n if not isinstance(star, list):\n star = [star]\n for s in star:\n # Stacking the results one after each in a numpy array.\n s = correctname(s)\n print(('Star : {0}'.format(s)))\n d = query(s)\n if data is None:\n data = np.array(d)\n else:\n data = np.hstack((data, d))\n df = pd.DataFrame(data)\n df = correctcoordinates(df)\n return df",
"def create_annotation(raw):\n annotation_pandas = pd.DataFrame(columns=[\"onset\", \"duration\", \"description\"])\n for idx, event in enumerate(raw.annotations):\n annotation_pandas.loc[idx] = [\n event[\"onset\"],\n event[\"duration\"],\n event[\"description\"],\n ]\n return annotation_pandas",
"def cluster_stats(df):\r\n pattern = list(df.iloc[0])[-2]\r\n n_days = len(pattern)\r\n \r\n cls = [(day + 1, hour) for day in range(n_days) for hour in range(24)]\r\n tp = pd.DataFrame(columns = cls)\r\n tp.columns = pd.MultiIndex.from_tuples(tp.columns, names = ['day', 'hour'])\r\n tp.index.name = 'cluster'\r\n \r\n for (key, value) in df.groupby('cluster'):\r\n d, total = np.zeros((n_days, 24)), len(value)\r\n for arr in value.iloc[:, :-2].values:\r\n for i, ax in enumerate(np.split(arr, n_days)):\r\n ax = np.array([[0, 1][x > 0] for x in ax])\r\n d[i] += ax\r\n d /= total\r\n s = pd.Series({(x + 1, y) : d[x][y] for x in range(n_days) for y in range(24)})\r\n s.name = key\r\n tp = tp.append(s)\r\n \r\n tp['pattern'] = [pattern] * len(tp)\r\n return tp",
"def start_pipeline(df):\n new_df = df.copy()\n new_df = new_df[[\"Title\", \"Genre\", \"Director\", \"Actors\", \"Plot\"]]\n return new_df",
"def construct_drawr_result_df(input_df, start_index, end_index, map_back, run_parameters):\n len_set_names = input_df.shape[1] - 1\n smooth_base = input_df.values[start_index:end_index, -1]\n smooth_base = smooth_base[:, np.newaxis]\n diff_smooth_spreadsheet = input_df.values[start_index:end_index, :-1] - smooth_base\n diff_smooth_spreadsheet /= np.abs(np.max(diff_smooth_spreadsheet, axis=0))\n\n diff_val = np.ravel(diff_smooth_spreadsheet)\n orig_val = np.ravel(input_df.values[start_index:end_index, :-1])\n set_name = np.array(list(input_df.columns.values[:-1]) * (end_index - start_index))\n input_gene_name = input_df.index.values[start_index:end_index]\n\n if map_back is True:\n map_df = pd.read_csv(run_parameters[\"gene_names_map\"], index_col=0, header=None, sep='\\t')\n input_gene_name = map_df.loc[input_gene_name].values\n ret_col = ['user_gene_set', 'gene_node_id', 'difference_score', 'query_score', 'baseline_score']\n else:\n ret_col = ['user_gene_set', 'property_gene_set', 'difference_score', 'query_score', 'baseline_score']\n new_gene_name = np.repeat(input_gene_name, len_set_names)\n base_val = np.repeat(input_df['base'].values[start_index:end_index], len_set_names)\n \n result_val = np.column_stack((set_name, new_gene_name, diff_val, orig_val, base_val))\n result_df = pd.DataFrame(result_val, columns=ret_col).sort_values(\"difference_score\", ascending=0)\n result_df = result_df[result_df['difference_score'] > 0.5]\n return result_df",
"def getAreas(df):\n\n df_plu = df[df[\"strand\"]==\"+\"]\n df_min = df[df[\"strand\"]==\"-\"]\n df_plu_FA = FivePrimeArea(df_plu)\n df_min_FA = FivePrimeArea(df_min)\n df_plu_LA = ThreePrimeArea(df_plu)[[\"name\",\"LA_start\",\"LA_end\",\"LA_length\"]]\n df_min_LA = ThreePrimeArea(df_min)[[\"name\",\"LA_start\",\"LA_end\",\"LA_length\"]]\n df_plu = pd.merge(df_plu_FA,df_plu_LA,on=\"name\")\n df_min = pd.merge(df_min_FA,df_min_LA,on=\"name\")\n df = pd.concat([df_plu,df_min])\n return df",
"def preprocess(df_kek):\n df = pd.DataFrame([])\n df['ETA'] = df_kek['ETA']\n df['EDA'] = df_kek['EDA']\n df['ESP'] = df['EDA'] / df['ETA']\n if 'p200' in df_kek.columns:\n df['p200'] = df_kek['p200']\n df['p500'] = df_kek['p500']\n df['p1000'] = df_kek['p1000']\n df['route_num'] = df_kek['route'].apply(lambda x: 0 if pd.isna(x) else len(polyline.decode(x)))\n df = pd.concat([df, add_time_features(set_time_by_timezone(df_kek))], axis=1)\n df = pd.concat([df, add_distance_features(df_kek)], axis=1)\n\n return df",
"def get_pre_df(temp_pre_df):\n \n event_time_max = temp_pre_df['event_time'].max()\n cat_dfs = []\n for num in np.arange(0,(1080/2)+1,30)[1:]:\n # making <= null i.e keeping >\n temp_pre_df.loc[temp_pre_df['event_time'] <= int(num), 'event_time'] = np.nan\n for col in ['event_name', 'specialty', 'plan_type']:\n cat_df = temp_pre_df.groupby([\"id\", col]).agg({\"event_time\": 'count'}).unstack(level=col)\n cat_df = cat_df/(event_time_max-num)\n cat_df.columns = ['__'.join(['normChange', col, name, str(int(num))]) for name in cat_df.columns.droplevel()]\n cat_dfs.append(cat_df)\n pre_df = pd.concat(cat_dfs, axis = 1) \n return pre_df.fillna(0)"
] |
[
"0.57482284",
"0.56927854",
"0.5451736",
"0.54411083",
"0.539259",
"0.53024316",
"0.5288239",
"0.5273695",
"0.5271883",
"0.52564204",
"0.5252375",
"0.5251818",
"0.52454084",
"0.5172984",
"0.5157079",
"0.51437426",
"0.50912386",
"0.50897837",
"0.5063519",
"0.50620985",
"0.5054893",
"0.50488377",
"0.50317687",
"0.5031266",
"0.50309384",
"0.50302356",
"0.50278497",
"0.49831375",
"0.49764442",
"0.49735487"
] |
0.6459534
|
0
|
Purpose The purpose of this function is to take the positions (whether its the starting or ending as specified by the user) of the events that comprise the sequences that belong to the cluster of interest and conduct a 2dimensional binning so as to determine the spatial distribution of events in said cluster.
|
def cluster_positions_binning(
cluster_positions_df: pd.DataFrame, beginning_points=True) -> tuple:
to_return = None
# First, validate the input data
ipv.parameter_type_validator(expected_type=pd.DataFrame,
parameter_var=cluster_positions_df)
# Next, define the variables that we will need for the rest of the
# function.
field_bins = np.arange(0, 101, 1)
x_col = "starting_x" if beginning_points else "ending_x"
x_vals = cluster_positions_df[x_col].to_numpy()
y_col = "starting_y" if beginning_points else "ending_y"
y_vals = cluster_positions_df[y_col].to_numpy()
# Now we are ready to actually perform the 2D binning.
field_bin_counts, xbins, ybins = np.histogram2d(
x=x_vals,
y=y_vals,
bins=field_bins,
range=[[0, 100], [0, 100]],
density=True
)
# Finally, validate and return the result
assert all([isinstance(field_bin_counts, np.ndarray),
isinstance(xbins, np.ndarray),
isinstance(ybins, np.ndarray)])
assert xbins.shape == ybins.shape
assert field_bin_counts.shape[0] == xbins.size - 1
assert field_bin_counts.shape[0] == ybins.size - 1
to_return = (field_bin_counts, xbins, ybins)
return to_return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __call__(self, n_bins, segment, elements):\n\n # n_bins\n assert type(n_bins) is int\n assert n_bins > 0\n\n # segment\n assert type(segment) is list or type(segment) is tuple\n assert len(segment) == 2\n assert np.isscalar(segment[0]) and np.isscalar(segment[1])\n assert segment[0] < segment[1]\n\n # elements\n assert type(elements) is np.ndarray, f\"elements should be an np.ndarray, instead of {type(elements)}\"\n assert elements.dtype == np.number\n\n sorted_elements = np.sort(elements)\n\n bin_card = int(floor(elements.shape[0]/n_bins))\n\n bin_boundaries = [segment[0]]\n\n for i in range(1, n_bins):\n boundary_l = sorted_elements[i*bin_card - 1]\n boundary_r = sorted_elements[i * bin_card]\n boundary = (boundary_l+boundary_r)/2\n\n bin_boundaries.append(boundary)\n\n bin_boundaries.append(segment[1])\n\n return np.array(bin_boundaries)",
"def _locate_events(self, start_time, end_time):\n\n # Define pre-pad as a function of the onset windows\n if self.pre_pad is None:\n self.pre_pad = max(self.p_onset_win[1],\n self.s_onset_win[1]) \\\n + 3 * max(self.p_onset_win[0],\n self.s_onset_win[0])\n\n # Adjust pre- and post-pad to take into account cosine taper\n t_length = self.pre_pad + 4*self.marginal_window + self.post_pad\n self.pre_pad += np.ceil(t_length * 0.06)\n self.post_pad += np.ceil(t_length * 0.06)\n\n trig_events = self.output.read_triggered_events(start_time, end_time)\n n_evts = len(trig_events)\n\n for i, trig_event in trig_events.iterrows():\n event_uid = trig_event[\"EventID\"]\n msg = \"=\" * 120 + \"\\n\"\n msg += \"\\tEVENT - {} of {} - {}\\n\"\n msg += \"=\" * 120 + \"\\n\\n\"\n msg += \"\\tDetermining event location...\\n\"\n msg = msg.format(i + 1, n_evts, event_uid)\n self.output.log(msg, self.log)\n\n w_beg = trig_event[\"CoaTime\"] - 2*self.marginal_window \\\n - self.pre_pad\n w_end = trig_event[\"CoaTime\"] + 2*self.marginal_window \\\n + self.post_pad\n\n timer = util.Stopwatch()\n self.output.log(\"\\tReading waveform data...\", self.log)\n try:\n self._read_event_waveform_data(trig_event, w_beg, w_end)\n except util.ArchiveEmptyException:\n msg = \"\\tNo files found in archive for this time period\"\n self.output.log(msg, self.log)\n continue\n except util.DataGapException:\n msg = \"\\tAll available data for this time period contains gaps\"\n msg += \"\\n\\tOR data not available at start/end of time period\\n\"\n self.output.log(msg, self.log)\n continue\n self.output.log(timer(), self.log)\n\n timer = util.Stopwatch()\n self.output.log(\"\\tComputing 4D coalescence grid...\", self.log)\n\n daten, max_coa, max_coa_norm, loc, map_4d = self._compute(\n w_beg, w_end,\n self.data.signal,\n self.data.availability)\n coord = self.lut.xyz2coord(np.array(loc).astype(int))\n event_coa_data = pd.DataFrame(np.array((daten, max_coa,\n coord[:, 0],\n coord[:, 1],\n coord[:, 2])).transpose(),\n columns=[\"DT\", \"COA\", \"X\", \"Y\", \"Z\"])\n event_coa_data[\"DT\"] = event_coa_data[\"DT\"].apply(UTCDateTime)\n event_coa_data_dtmax = \\\n event_coa_data[\"DT\"].iloc[event_coa_data[\"COA\"].astype(\"float\").idxmax()]\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n\n if (event_coa_data_dtmax >= trig_event[\"CoaTime\"]\n - self.marginal_window) \\\n and (event_coa_data_dtmax <= trig_event[\"CoaTime\"]\n + self.marginal_window):\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n else:\n msg = \"\\n\\tEvent {} is outside marginal window.\\n\"\n msg += \"\\tDefine more realistic error - the marginal window\"\n msg += \" should be an estimate of the origin time uncertainty,\"\n msg += \"\\n\\tdetermined by the expected spatial uncertainty and\"\n msg += \"the seismic velocity in the region of the earthquake\\n\"\n msg += \"\\n\" + \"=\" * 120 + \"\\n\"\n msg = msg.format(event_uid)\n self.output.log(msg, self.log)\n continue\n\n event_mw_data = event_coa_data\n event_mw_data = event_mw_data[(event_mw_data[\"DT\"] >= w_beg_mw) &\n (event_mw_data[\"DT\"] <= w_end_mw)]\n map_4d = map_4d[:, :, :,\n event_mw_data.index[0]:event_mw_data.index[-1]]\n event_mw_data = event_mw_data.reset_index(drop=True)\n event_max_coa = event_mw_data.iloc[event_mw_data[\"COA\"].astype(\"float\").idxmax()]\n\n # Update event UID; make out_str\n event_uid = str(event_max_coa.values[0])\n for char_ in [\"-\", \":\", \".\", \" \", \"Z\", \"T\"]:\n event_uid = event_uid.replace(char_, \"\")\n out_str = \"{}_{}\".format(self.output.name, event_uid)\n self.output.log(timer(), self.log)\n\n # Make phase picks\n timer = util.Stopwatch()\n self.output.log(\"\\tMaking phase picks...\", self.log)\n phase_picks = self._phase_picker(event_max_coa)\n self.output.write_picks(phase_picks[\"Pick\"], event_uid)\n self.output.log(timer(), self.log)\n\n # Determining earthquake location error\n timer = util.Stopwatch()\n self.output.log(\"\\tDetermining earthquake location and uncertainty...\", self.log)\n loc_spline, loc_gau, loc_gau_err, loc_cov, \\\n loc_cov_err = self._calculate_location(map_4d)\n self.output.log(timer(), self.log)\n\n # Make event dictionary with all final event location data\n event = pd.DataFrame([[event_max_coa.values[0],\n event_max_coa.values[1],\n loc_spline[0], loc_spline[1], loc_spline[2],\n loc_gau[0], loc_gau[1], loc_gau[2],\n loc_gau_err[0], loc_gau_err[1],\n loc_gau_err[2],\n loc_cov[0], loc_cov[1], loc_cov[2],\n loc_cov_err[0], loc_cov_err[1],\n loc_cov_err[2]]],\n columns=self.EVENT_FILE_COLS)\n\n self.output.write_event(event, event_uid)\n\n self._optional_locate_outputs(event_mw_data, event, out_str,\n phase_picks, event_uid, map_4d)\n\n self.output.log(\"=\" * 120 + \"\\n\", self.log)\n\n del map_4d, event_coa_data, event_mw_data, event_max_coa, \\\n phase_picks\n self.coa_map = None",
"def draw_bBox_from_cluster(cluster, ev_of_interest, all_events, pos_of_interest,\n prob_filter=0, min_dims=None, use_cluster_prob=False):\n cluster_img = build_img_from_clusters(ev_of_interest, cluster, pos_of_interest)\n prob_img = build_prob_img(all_events, pos_of_interest)\n full_img = build_3channels_img(all_events, pos_of_interest)\n\n probs = np.unique(cluster_img)\n for p in probs:\n if p == 0: continue\n h_indexes, w_indexes = np.where(cluster_img == p)\n start = w_indexes.min(), h_indexes.min()\n end = w_indexes.max(), h_indexes.max()\n\n box_h = end[1] - start[1]\n box_w = end[0] - start[0]\n h, w = min_dims if min_dims and len(min_dims) == 2 else (box_h, box_w)\n\n if use_cluster_prob:\n true_prob = p\n else:\n # instead of using prob in cluster (that ony consider elements of the cluster),\n # use the full image in the box slice to compute the probability inside the box\n box_slice = prob_img[start[1]:end[1], start[0]:end[0]]\n true_prob = box_slice.sum() / (box_slice > 0).sum()\n\n if true_prob >= prob_filter and h <= box_h and w <= box_w:\n draw_bBox(full_img, start, end, \"\", true_prob * 100, bbox_color=(255, 255, 255))\n # else:\n # full_img[h_indexes, w_indexes] = 0\n return full_img",
"def bindEvents(fusionEvents,divisionEvents, buff):\n #1/Finding correspondances\n fusion_indices = []\n fusion_labels = []\n fusion_labels_2 = [] # In label 2 says with which cell the disappearded one has\n for events,label in fusionEvents:\n index,osef,labels = events\n fusion_indices.append(index)\n fusion_labels.append(labels[0])\n fusion_labels_2.append(label)\n \n division_indices = []\n division_labels = []\n division_labels_2 = [] # Tells in which cell it is created\n for events,label in divisionEvents:\n index,osef,labels = events\n division_indices.append(index)\n division_labels.append(labels[0])\n division_labels_2.append(label)\n \n associated_division_list = []\n associated_indexes = []\n for i in fusion_indices:\n ind = next((x for x in division_indices if x>i),-1)\n if ind>0:\n associated_division_list.append((i,ind))\n corr_ind_fusion = fusion_indices.index(i)\n corr_ind_division = division_indices.index(ind)\n associated_indexes.append((corr_ind_fusion,corr_ind_division))\n\n \n #2/removing corresponding elements\n for j in range(len(associated_division_list)):\n index_fus, index_div = associated_indexes[j]\n if division_labels_2[index_div]==fusion_labels_2[index_fus]:\n #If they are not equal, means that the process of division/fusion \n #has not happened on the same blob and hence is not relevant\n big_label = division_labels_2[index_div]\n small_label = fusion_labels[index_fus]\n new_label = division_labels[index_div] #Replace after division this label by small label\n first_index = fusion_indices[index_fus]\n second_index = division_indices[index_div]\n \n for k in range(second_index-first_index):\n splitCell(buff,first_index+k,big_label,small_label)\n \n #Attribution of the new created cells to each one of the previous cells:\n #For this, we take the closest centroid\n #centroid of the big label\n last_image = buff[:,:,second_index]\n xs,ys = centroids2(last_image,[big_label,new_label])\n xs0,ys0 = centroids2(buff[:,:,second_index-1],[big_label,small_label])\n dist_regular = (xs0[0]-xs[0])**2 + (ys0[0]-ys[0])**2 + (xs0[1]-xs[1])**2 + (ys0[1]-ys[1])**2\n dist_inverted = (xs0[0]-xs[1])**2 + (ys0[0]-ys[1])**2 + (xs0[1]-xs[0])**2 + (ys0[1]-ys[0])**2\n \n if dist_regular>dist_inverted:\n print \"ca marche pas gael euh quoi?\"\n tmp_stack = buff[:,:,second_index:]\n tmp_stack[buff[:,:,second_index:]==big_label]=small_label\n tmp_stack[buff[:,:,second_index:]==new_label]=big_label\n buff[:,:,second_index:] = tmp_stack\n division_labels = [x if (x!=new_label and x!=big_label) else big_label if x==new_label else small_label for x in division_labels]\n fusion_labels = [x if x!=new_label and x!=big_label else big_label if x==new_label else small_label for x in fusion_labels]\n division_labels_2= [x if x!=new_label and x!=big_label else big_label if x==new_label else small_label for x in division_labels_2]\n fusion_labels_2= [x if x!=new_label and x!=big_label else big_label if x==new_label else small_label for x in fusion_labels_2]\n else:\n print \"ca marche bien gael\"\n \"\"\"Reassigning new labels\"\"\"\n tmp_stack = buff[:,:,second_index:]\n tmp_stack[tmp_stack==new_label] = small_label\n buff[:,:,second_index:] = tmp_stack\n division_labels = [x if x!=new_label else small_label for x in division_labels]\n fusion_labels = [x if x!=new_label else small_label for x in fusion_labels]\n division_labels_2 = [x if x!=new_label else small_label for x in division_labels_2]\n fusion_labels_2 = [x if x!=new_label else small_label for x in fusion_labels_2]",
"def create_bin_boundaries(config, epoch_df, data_type, obs_per_bin, verbose=False):\n \n edges = create_edges_set(config, epoch_df, data_type)\n \n boundaries = []\n for edge in edges:\n start, end, freq = edge\n bin_size = freq * obs_per_bin\n boundaries.append(np.arange(start, end, bin_size))\n boundaries = np.concatenate(boundaries)\n \n return boundaries",
"def bins_crossed(self, position_in_grid, endpoint_in_grid):\n bins_crossed = Set()\n if position_in_grid[0] == endpoint_in_grid[0]:\n # movement is in y direction\n for y_coord in self.get_range(\n position_in_grid[1],\n endpoint_in_grid[1],\n ):\n bins_crossed.add((position_in_grid[0], y_coord))\n elif position_in_grid[1] == endpoint_in_grid[1]:\n # movement is in x direction\n for x_coord in self.get_range(\n position_in_grid[0],\n endpoint_in_grid[0],\n ):\n bins_crossed.add((x_coord, position_in_grid[1]))\n\n else:\n raise ValueError(\"Diagonal movement\")\n\n return bins_crossed",
"def draw_bBox_from_clusters(cluster, ev_of_interest, all_events, pos_of_interest,\n prob_filter=0, min_dims=None, use_cluster_prob=False):\n # build region proposal from clusters\n rois = get_ROIs(cluster, [(e.x, e.y) for e in ev_of_interest], min_dims)\n # AFTER this, all list must have the same size as ROIS\n # and each position correspond to the rois coords in rois list\n\n # get the mean and sum probabilities for each class for every region\n rois_probs_sum, rois_probs_count = get_prob_inside_rois(rois, all_events)\n\n # classify the region using some strategy based on mean and sum values\n strategy = lambda x, y: x # this strategy selects the sum as the important metric\n strategy = lambda x, y: np.array(x, dtype=np.float)/y # this strategy selects the mean as the important metric\n classes, probs = classify_roi(rois_probs_sum, rois_probs_count, strategy)\n\n # filter rois\n filtered_rois = []\n for r, c, p in zip(rois, classes, probs):\n if p > prob_filter and (pos_of_interest is None or pos_of_interest == c):\n filtered_rois.append((r, c, p))\n\n # full_img = build_prob_img(all_events, pos_of_interest)\n full_img = build_3channels_img(all_events, pos_of_interest)\n\n for r, c, p in filtered_rois:\n x, y, w, h = r\n draw_bBox(full_img, (x, y), (x+w, y+h), str(c), p * 100, bbox_color=(255, 255, 255))\n return full_img",
"def grid_to_bins(grid, start_bin_val, end_bin_val):\n bin_centers = (grid[1:] + grid[:-1])/2.0\n bins = np.concatenate([[start_bin_val], bin_centers, [end_bin_val]])\n return bins",
"def edge_cluster_rectangle(cluster):\n x_starts = []\n x_stops = []\n y_starts = []\n y_stops = []\n for area in cluster:\n x, y, w, h = area\n x_starts.append(x)\n x_stops.append(x + w)\n y_starts.append(y)\n y_stops.append(y + h)\n return min(x_starts), min(y_starts), max(x_stops) - min(x_starts), max(y_stops) - min(y_starts)",
"def populate_grid(self):\n from cemc_cpp_code import hoshen_kopelman\n self.bins[:, :, :] = 0\n for atom in self.atoms:\n if atom.symbol in self.track_elements:\n n = self.get_bin(atom.index)\n self.bins[n[0], n[1], n[2]] += 1\n\n # Run the Hoshen-Kopelman algorithm to label the \n # bins into clusters\n self.clusters = hoshen_kopelman(self.bins)",
"def event_overlap(labels, half, timestamp, window):\n\n for l, _ in labels:\n if l[0] == half:\n ceil = l[1] + window//2\n floor = l[1] - window//2\n if timestamp <= ceil and timestamp >= floor:\n return True\n return False",
"def _get_end_points(self, segmented_instances, i, stats, idx):\n\n end_points=[]\n\n # find all points intersecting the bbox\n #(tl_x, th_y, width, height, area)\n label_num=i+1\n leftmost_x = stats['bbox'][i][cv2.CC_STAT_LEFT]\n topmost_y = stats['bbox'][i][cv2.CC_STAT_TOP]\n width = stats['bbox'][i][cv2.CC_STAT_WIDTH]\n height = stats['bbox'][i][cv2.CC_STAT_HEIGHT]\n bottom_most_y = topmost_y + height-1\n right_most_x = leftmost_x + width-1\n\n segmented_instances_copy=segmented_instances.copy()\n edge_points = np.zeros(segmented_instances.shape).astype(np.uint8)\n segs = np.zeros(segmented_instances.shape).astype(np.uint8)\n segs[segmented_instances==label_num]=255\n cv2.rectangle(segmented_instances_copy,(leftmost_x, topmost_y), (right_most_x, bottom_most_y), 150, 2)\n\n #Get all points for the current stem segment\n label_points = np.argwhere(segmented_instances.copy()==label_num)\n\n # upper points from (tl_x,th_y) to (th_x, th_y) that instersect with the upper edge of the bouding box\n upper_points = [i for i in label_points if i[0]==topmost_y and i[1]>=leftmost_x and i[1]<=right_most_x]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(upper_points, edge_points, segs, 1)\n center_upper_pts = sorted(self._get_centeroids(x_pts))\n\n # left side points from (tl_x, tl_y) to (tl_x, th_y) that instersect with the left edge of the bouding box\n left_points = [i for i in label_points if i[1]==leftmost_x and i[0]<=bottom_most_y and i[0]>=topmost_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(left_points, edge_points, segs, 0)\n center_left_pts = sorted(self._get_centeroids(x_pts))\n\n #right side points form (th_x, tl_y) to (th_x, th_y) that instersect with the right edge of the bouding box\n right_points = [i for i in label_points if i[1]==right_most_x and i[0]<=bottom_most_y and i[0]>=topmost_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(right_points, edge_points, segs, 0)\n center_right_pts = sorted(self._get_centeroids(x_pts))\n\n #bottom points from (tl_x, tl_y) to (th_x,tl_y)\n bottom_points = [i for i in label_points if i[1]>=leftmost_x and i[1]<=right_most_x and i[0]==bottom_most_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(bottom_points, edge_points, segs, 1)\n center_bottom_pts = sorted(self._get_centeroids(x_pts))\n\n # If there are corner edges, get the centroid of that\n center_x_lb, center_y_lb, center_left_pts, center_bottom_pts = self._get_corner_centers(center_left_pts, \\\n center_bottom_pts, bottom_most_y, leftmost_x)\n if (center_x_lb != None) and (center_y_lb != None):\n end_points.append([center_x_lb, center_y_lb])\n else:\n if len(center_left_pts)>0:\n for pt_idx in range(0, len(center_left_pts)):\n end_points.append([leftmost_x, center_left_pts[pt_idx]])\n if len(center_bottom_pts)>0:\n for pt_idx in range(0, len(center_bottom_pts)):\n end_points.append([center_bottom_pts[pt_idx], bottom_most_y])\n\n # If there are corner edges, get the centroid of that\n center_x_ur, center_y_ur, center_right_pts, center_upper_pts = self._get_corner_centers(center_right_pts, \\\n center_upper_pts, topmost_y, right_most_x)\n if (center_x_ur != None) and (center_y_ur != None):\n end_points.append([center_x_ur, center_y_ur])\n else:\n if len(center_right_pts)>0:\n for pt_idx in range(0, len(center_right_pts)):\n end_points.append([right_most_x, center_right_pts[pt_idx]])\n if len(center_upper_pts)>0:\n for pt_idx in range(0, len(center_upper_pts)):\n end_points.append([center_upper_pts[pt_idx], topmost_y])\n\n # If there are corner edges, get the centroid of that\n center_x_ul, center_y_ul, center_left_pts, center_upper_pts = self._get_corner_centers(center_left_pts, \\\n center_upper_pts, topmost_y, leftmost_x)\n if (center_x_ul != None) and (center_y_ul != None):\n end_points.append([center_x_ul, center_y_ul])\n else:\n if len(center_left_pts)>0:\n for pt_idx in range(0, len(center_left_pts)):\n end_points.append([leftmost_x, center_left_pts[pt_idx]])\n if len(center_upper_pts)>0:\n for pt_idx in range(0, len(center_upper_pts)):\n end_points.append([center_upper_pts[pt_idx], topmost_y])\n\n\n # If there are corner edges, get the centroid of that\n center_x_br, center_y_br, center_right_pts, center_bottom_pts = self._get_corner_centers(center_right_pts, \\\n center_bottom_pts, bottom_most_y, right_most_x)\n if (center_x_br != None) and (center_y_br != None):\n end_points.append([center_x_br, center_y_br])\n else:\n if len(center_right_pts)>0:\n for pt_idx in range(0, len(center_right_pts)):\n end_points.append([right_most_x, center_right_pts[pt_idx]])\n if len(center_bottom_pts)>0:\n for pt_idx in range(0, len(center_bottom_pts)):\n end_points.append([center_bottom_pts[pt_idx], bottom_most_y])\n\n #self.showme(segmented_instances_copy, 'bbox')\n\n return end_points",
"def get_cluster_bounds(i, cluster_bounds):\n con1 = np.where(i >= cluster_bounds)[0]\n j = con1[len(con1) -1]+1\n\n # for j in range(1, len(cluster_bounds)):\n # if(i < cluster_bounds[j] and i >= cluster_bounds[j-1]):\n # break\n return np.array([cluster_bounds[j-1], cluster_bounds[j]], dtype=np.int64)",
"def bin_sizing(self):\n\n self.log.info(\"Begin Re-Binning the Genome Space.\")\n new_list = []\n seg_num = 0\n\n for chrom in natsort.natsorted(self.seg_analyzer.chrom_list):\n self.log.debug(\"Binning Chromosome {0}\".format(chrom))\n\n # Some chromosomes have no segments.\n try:\n chrom_slice = \\\n self.seg_analyzer.seg_copy_array[self.seg_analyzer.seg_copy_array[:, 1] == chrom.encode()]\n seg_count = chrom_slice.shape[0]\n coord_start = int(chrom_slice[0, 2])\n except IndexError:\n continue\n\n for i in range((seg_count-1)):\n if (i+1) < seg_count and (i+1) % int(self.args.Combine_Segments) == 0:\n coord_stop = int(chrom_slice[i, 3])\n new_list.append([seg_num, chrom.encode(), coord_start, coord_stop])\n\n coord_start = int(chrom_slice[i+1, 2])\n seg_num += 1\n\n self.log.info(\"Genome Space Successfully Re-Binned.\")\n\n return numpy.array(new_list, dtype='object')",
"def _cluster_into_bins(eval_data, ref_data, num_clusters):\r\n\r\n cluster_data = np.vstack([eval_data, ref_data])\r\n kmeans = sklearn.cluster.MiniBatchKMeans(n_clusters=num_clusters, n_init=10)\r\n labels = kmeans.fit(cluster_data).labels_\r\n\r\n eval_labels = labels[:len(eval_data)]\r\n ref_labels = labels[len(eval_data):]\r\n\r\n eval_bins = np.histogram(eval_labels, bins=num_clusters,\r\n range=[0, num_clusters], density=True)[0]\r\n ref_bins = np.histogram(ref_labels, bins=num_clusters,\r\n range=[0, num_clusters], density=True)[0]\r\n return eval_bins, ref_bins",
"def plot_events_cluster(self, cluster, distrib=True, dataname='', timeres=None):\n\n def plot_notimeres():\n \"\"\"\n All time colapsed\n @return:\n \"\"\"\n cont = np.zeros(cluster.num_clusters())\n for i in range(self.dataset.shape[0]):\n posy = self.dataset[i][0]\n posx = self.dataset[i][1]\n ejem = np.array([[posy, posx]]).reshape(1,-1)\n ncl = cluster.predict(ejem)\n ncl = ncl[0]\n\n if distrib:\n cont[ncl] += 1\n else:\n cont[ncl] = 1\n\n if distrib:\n cont = cont / np.max(cont)\n for i in range(cont.shape[0]):\n if cont[i] > 0.01:\n cx = cluster.cluster_centers_[i][0]\n cy = cluster.cluster_centers_[i][1]\n mymap.circle_marker(location=[cx, cy],\n radius=cont[i] * circlesize,\n line_color='#000000',\n fill_color='#110000',\n popup=str(cont[i]), fill_opacity=0.4)\n else:\n for i in range(cont.shape[0]):\n if cont[i] > 0.01:\n cx = cluster.cluster_centers_[i][0]\n cy = cluster.cluster_centers_[i][1]\n\n mymap.circle_marker(location=[cx, cy],\n radius=30,\n line_color='#000000',\n fill_color='#110000',\n popup=str(cont[i]), fill_opacity=0.4)\n\n def plot_timeres(timeres):\n \"\"\"\n Geo points separated by the time resolution zones\n @return:\n \"\"\"\n tint = 24 / len(timeres.intervals)\n step = 255 / (tint + 1)\n\n cont = np.zeros(cluster.num_clusters())\n for i in range(self.dataset.shape[0]):\n posy = self.dataset[i][0]\n posx = self.dataset[i][1]\n ejem = np.array([[posy, posx]]).reshape(1,-1)\n ncl = cluster.predict(ejem)\n ncl = ncl[0]\n\n if distrib:\n cont[ncl] += 1\n else:\n cont[ncl] = 1\n\n # if distrib:\n # cont = cont / np.max(cont)\n # for i in range(cont.shape[0]):\n # if cont[i] > 0.01:\n # cx = cluster.cluster_centers_[i][0]\n # cy = cluster.cluster_centers_[i][1]\n # mymap.circle_marker(location=[cx, cy],\n # radius=cont[i] * circlesize,\n # line_color='#FF0000',\n # fill_color='#110000')\n # else:\n # for i in range(cont.shape[0]):\n # if cont[i] > 0.01:\n # cx = cluster.cluster_centers_[i][0]\n # cy = cluster.cluster_centers_[i][1]\n # mymap.circle_marker(location=[cx, cy],\n # radius=30,\n # line_color='#FF0000',\n # fill_color='#110000')\n for t in range(tint):\n color = '#' + (str(hex((t + 1) * step))[2:]) + \\\n (str(hex((t + 1) * step))[2:]) + 'FF' # (str(hex((t+1)*step))[2:])\n cont = np.zeros(cluster.num_clusters())\n for i in range(self.dataset.shape[0]):\n posy = self.dataset[i][0]\n posx = self.dataset[i][1]\n ejem = np.array([[posy, posx]]).reshape(1,-1)\n ncl = cluster.predict(ejem)\n ncl = ncl[0]\n\n _, evtime = timeres.discretize(self.dataset[i][2])\n\n if evtime == t:\n if distrib:\n cont[ncl] += 1\n else:\n cont[ncl] = 1\n if distrib:\n cont = cont / np.max(cont)\n for i in range(cont.shape[0]):\n if cont[i] > 0.01:\n cx = cluster.cluster_centers_[i][0]\n cy = cluster.cluster_centers_[i][1]\n mymap.circle_marker(location=[cx, cy],\n radius=cont[i] * circlesize,\n line_color=pltcolors[t],\n fill_color=pltcolors[t], #'#110000',\n popup=str(t) + '-' + str(cont[i]), fill_opacity=0.2)\n else:\n for i in range(cont.shape[0]):\n for j in range(cont.shape[1]):\n if cont[i] > 0.01:\n cx = cluster.cluster_centers_[i][0]\n cy = cluster.cluster_centers_[i][1]\n mymap.circle_marker(location=[cx, cy],\n radius=30,\n line_color=pltcolors[t],\n fill_color=pltcolors[t], #'#110000',\n popup=str(t) + '-' + str(cont[i]), fill_opacity=0.2)\n\n print 'Generating the events plot ...'\n circlesize = 60000 * cluster.radius\n\n # if timeres == 0:\n # fig = plt.figure()\n # ax = fig.add_subplot(111)\n\n minLat, maxLat, minLon, maxLon = self.city[1]\n # normLat = scale / (maxLat - minLat)\n # normLon = scale / (maxLon - minLon)\n mymap = folium.Map(location=[(minLat + maxLat) / 2.0, (minLon + maxLon) / 2.0], zoom_start=12, width=1200,\n height=1000)\n\n if timeres is None:\n plot_notimeres()\n else:\n plot_timeres(timeres)\n\n #today = time.strftime('%Y%m%d%H%M%S', time.localtime())\n nfile = self.application + '-' + dataname + '-Clust'\n if self.mnhh is not None and self.mnhh is not None:\n nfile += '-nusr' + str(self.mxhh) + '#' + str(self.mnhh)\n\n if timeres is not None:\n nfile += '-tr' + str(timeres.intervals)\n nfile += '-s' + str(cluster.radius)\n\n # if timeres == 0:\n # fig.savefig(homepath + 'Results/' + self.city[2] + '-' +\n # nfile + '.pdf', orientation='landscape', format='pdf')\n # plt.close()\n mymap.create_map(path=homepath + 'Results/clusters/' + self.city[2] + nfile + '.html')",
"def calculate_bin_edges(n_bins, geo):\n #Gefittete offsets: x,y,factor: factor*(x+x_off)\n #[6.19, 0.064, 1.0128]\n \n #print \"Reading detector geometry in order to calculate the detector dimensions from file \" + fname_geo_limits\n #geo = np.loadtxt(fname_geo_limits)\n\n # derive maximum and minimum x,y,z coordinates of the geometry input [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]\n geo_limits = np.nanmin(geo, axis = 0), np.nanmax(geo, axis = 0)\n #print ('Detector dimensions [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]: ' + str(geo_limits))\n\n x_bin_edges = np.linspace(geo_limits[0][1] - 9.95, geo_limits[1][1] + 9.95, num=n_bins[0] + 1) #try to get the lines in the bin center 9.95*2 = average x-separation of two lines\n y_bin_edges = np.linspace(geo_limits[0][2] - 9.75, geo_limits[1][2] + 9.75, num=n_bins[1] + 1) # Delta y = 19.483\n z_bin_edges = np.linspace(geo_limits[0][3] - 4.665, geo_limits[1][3] + 4.665, num=n_bins[2] + 1) # Delta z = 9.329\n\n #offset_x, offset_y, scale = [6.19, 0.064, 1.0128]\n #x_bin_edges = (x_bin_edges + offset_x )*scale\n #y_bin_edges = (y_bin_edges + offset_y )*scale\n\n #calculate_bin_edges_test(geo, y_bin_edges, z_bin_edges) # test disabled by default. Activate it, if you change the offsets in x/y/z-bin-edges\n\n return x_bin_edges, y_bin_edges, z_bin_edges",
"def __call__(self, n_bins, segment, elements):\n\n # n_bins\n assert type(n_bins) is int\n assert n_bins > 0\n\n # segment\n assert type(segment) is list or type(segment) is tuple\n assert len(segment) == 2\n assert np.isscalar(segment[0]) and np.isscalar(segment[1])\n assert segment[0] < segment[1]\n\n # elements\n assert type(elements) is np.ndarray, f\"elements should be an np.ndarray, instead of {type(elements)}\"\n assert elements.dtype == np.number\n\n return np.array([segment[0] + i / n_bins * (segment[1] - segment[0])\n for i in range(n_bins)]\n + [float(segment[1])])",
"def computation_gr(particles,p_types,dist,i,j,nbins, rmax):\n i=np.where(p_types == i)[0][0]\n j=np.where(p_types == j)[0][0]\n\n\n if len(p_types)>1:\n #indexes to delete if there is more than one type of particles\n i_axis0=[]\n i_axis1=[]\n for k in range(len(p_types)):\n if k!=i:\n i_axis0.append(particles[k])\n if k!=j:\n i_axis1.append(particles[k])\n dist = np.delete(dist,np.hstack(i_axis0), axis=0)\n dist = np.delete(dist,np.hstack(i_axis1), axis=1)\n\n\n\n bin_count = np.zeros((nbins,3))\n bin_ends = -rmax*np.cos(np.linspace(np.pi/2,np.pi,num=nbins+1))\n\n vol_old=0\n for i in range(nbins):\n bin_count[i,0]=0.5*(bin_ends[i+1]+bin_ends[i]) #Count position in the middle of the bin only needed in the first\n rmax_bin=bin_ends[i+1]\n indexes=np.where(dist<=rmax_bin)\n dist[indexes]=1000\n bin_count[i,1]=len(indexes[0])/len(particles[j])\n print(len(particles[j]))\n vol_new=4/3*np.pi*rmax_bin**3\n bin_count[i,2]=bin_count[i,1]/(vol_new-vol_old)\n\n rho_ave=256/6.71838**3 #np.sum(bin_count[:,1])/(4/3*np.pi*rmax**3)\n\n print(rho_ave)\n\n bin_count[:,2]=bin_count[:,2]/rho_ave**2 #g(r)=rho(r)/rho_ave\n\n return bin_count",
"def at_b (self):\n self.argc = int((len(n.coord[0]))/2)\n self.pts_con = np.array(self.coord[:,self.argc:len(n.coord[0])])\n\n self.xd = self.xdi\n self.zd = self.zdi \n \n for i, x in enumerate(self.xdi):\n self.aux_con = self.pts_con[0] - x \n self.arg1 = np.argmin(abs(self.aux_con)) \n \n if (self.aux_con[self.arg1] < 0 and self.arg1 == 0) or (self.aux_con[self.arg1] > 0 and self.arg1 == len(self.aux_con)-1):\n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif (self.aux_con[self.arg1] > 0 and self.aux_con[self.arg1+1] > self.aux_con[self.arg1]): #(self.aux_con[self.arg1] < 0 and self.aux_con[self.arg1-1] > self.aux_con[self.arg1]) or \n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif self.aux_con[self.arg1] < 0:\n #print(self.arg1)\n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 - 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1])\n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n\n elif self.aux_con[self.arg1] > 0:\n #print(self.arg1) \n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 + 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1]) \n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n \n #print('Defensa {0}\\n{1}: {2}\\n{3}: {4}'.format(i,self.arg1,self.aux_con[self.arg1],self.arg2,self.aux_con[self.arg2])) \n \n #self.yd = self.yd\n self.b = np.array([self.xd,self.yd,self.zd])\n #self.b.loc[:,('y')] = self.b.loc[:,('y')] ",
"def cluster_positions_extractor(\n cluster_events_df: pd.DataFrame) -> pd.DataFrame:\n to_return = None\n # First, validate the input data\n ipv.parameter_type_validator(expected_type=pd.DataFrame,\n parameter_var=cluster_events_df)\n normed = cluster_events_df.reset_index(drop=True)\n\n # Next, run the above two functions to get starting and ending positions\n starting_positions_series = normed.apply(\n func=event_starting_point_extractor, \n axis=\"columns\"\n )\n starting_positions_df = pd.DataFrame(\n data=starting_positions_series.tolist(),\n index=normed.index,\n columns=[\"starting_x\", \"starting_y\"]\n )\n\n ending_positions_series = normed.swifter.apply(\n func=event_ending_point_extractor,\n axis=\"columns\"\n )\n ending_positions_df = pd.DataFrame(\n data=ending_positions_series.tolist(),\n index=normed.index,\n columns=[\"ending_x\", \"ending_y\"]\n )\n\n # Create the new DataFrame that we will be returning.\n positions_df = pd.concat(\n objs=[normed.drop(columns=\"positions\"),\n starting_positions_df,\n ending_positions_df],\n axis=\"columns\",\n ignore_index=True\n )\n positions_df.rename(columns={0 : \"seq_id\",\n 1 : \"id\",\n 2 : \"matchId\",\n 3 : \"teamId\",\n 4 : \"starting_x\",\n 5 : \"starting_y\",\n 6 : \"ending_x\",\n 7 : \"ending_y\"},\n inplace=True)\n\n # Finally, validate and return the result\n ipv.parameter_type_validator(expected_type=pd.DataFrame,\n parameter_var=positions_df)\n to_return = positions_df\n\n return to_return",
"def _hold_bounds(self):\n adc_channel = self.graph_renderer.channels[0]\n if self.sx2 > adc_channel.size():\n self.anchored = True\n\n if self.anchored:\n # anchor right side of the window to the last graph sample. so the graph always animates, grows out from\n # the right side of the window. (anchor sx2 to adc_channel.size())\n dx = self.sx2 - adc_channel.size()\n dxw = self.wsx2 - adc_channel.size()\n self.sx1 -= dx\n self.sx2 -= dx\n self.wsx1 -= dxw\n self.wsx2 -= dxw\n\n # eliminate integer overflow problems. only allow indices smaller than a 32bit integer value. and then divide\n # it by four just to be sure.. maybe it's not necessary, but maybe there are some other tricks used in the\n # graph rendering..\n bound = 0xffffffff / 4\n # hmm. this allows only 12 days of data with ~960Hz. time to go 64bit?\n self.sx1 = max(self.sx1, -bound)\n self.sy1 = max(self.sy1, -bound)\n self.sx1 = min(self.sx1, bound)\n self.sy1 = min(self.sy1, bound)\n self.sx2 = max(self.sx2, -bound)\n self.sy2 = max(self.sy2, -bound)\n self.sx2 = min(self.sx2, bound)\n self.sy2 = min(self.sy2, bound)\n self.wsx1 = max(self.wsx1, -bound)\n self.wsy1 = max(self.wsy1, -bound)\n self.wsx1 = min(self.wsx1, bound)\n self.wsy1 = min(self.wsy1, bound)\n self.wsx2 = max(self.wsx2, -bound)\n self.wsy2 = max(self.wsy2, -bound)\n self.wsx2 = min(self.wsx2, bound)\n self.wsy2 = min(self.wsy2, bound)\n\n # limit horizontal zoom to 2 samples. can't zoom in anymore if less than one sample stays on screen.\n # don't have time to implement and test line segment cutting, if one sample is outside the window, and another\n # is inside.\n if self.wsx2 - self.wsx1 < 2.:\n self.wsx2 = self.wsx1 + 2.\n if self.sx2 - self.sx1 < 2.:\n self.sx2 = self.sx1 + 2.\n\n #\n # limit vertical movement and vertical zoom\n #\n\n val_min = adc_channel.value_min\n val_max = adc_channel.value_max\n\n # allow offset of this percent/100 of the screen\n overlap = .30\n\n # top of the screen has smaller sample values than bottom of the screen. inverted graph.\n # sy1 is top pixel, sy2 bottom. bottom-left coordinat is (0, 0)\n if self.sy1 < self.sy2:\n val_top = val_min + (self.wsy1 - self.wsy2) * overlap\n val_bottom = val_max - (self.wsy1 - self.wsy2) * overlap\n if self.wsy1 < val_top:\n self.wsy2 -= self.wsy1 - val_top\n self.wsy1 = val_top\n if self.wsy2 > val_bottom:\n self.wsy1 += val_bottom - self.wsy2\n self.wsy2 = val_bottom\n if self.wsy1 < val_top:\n self.wsy1 = val_top\n if self.wsy2 > val_bottom:\n self.wsy2 = val_bottom\n else:\n val_bottom = val_min - (self.wsy1 - self.wsy2) * overlap\n val_top = val_max + (self.wsy1 - self.wsy2) * overlap\n if self.wsy1 > val_top:\n self.wsy2 -= self.wsy1 - val_top\n self.wsy1 = val_top\n if self.wsy2 < val_bottom:\n self.wsy1 += val_bottom - self.wsy2\n self.wsy2 = val_bottom\n if self.wsy1 > val_top:\n self.wsy1 = val_top\n if self.wsy2 < val_bottom:\n self.wsy2 = val_bottom",
"def cluster_spatial_positioning(data):\n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n if n_clusters <2:\n #Setting cluster angluar features to default\n cdist=[Cluster_Relative_Distances()]\n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n elif n_clusters >=2:\n # Here we implement two approaches for measuring distances between clustes:\n # (1) border-boder distances and (2) centroid-centroid distances. \n # We compute dispersion measures for the distances obtained. \n \n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n\n min_dist_between_clusters=np.row_stack([[np.amin(ss.distance_matrix(np.column_stack([d[i]['X'].array,d[i]['Y'].array]), \n np.column_stack([d[j]['X'].array,d[j]['Y'].array]))) for j in d.keys()] for i in d.keys()])\n min_dist_between_clusters=np.delete(list(set(np.frombuffer(min_dist_between_clusters))) ,0)\n\n cen_dist_between_clusters=ss.distance_matrix(np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]),\n np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]))\n cen_dist_between_clusters=np.delete(list(set(np.frombuffer(cen_dist_between_clusters))) ,0)\n\n (avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster)= distribution_statistics(min_dist_between_clusters)\n\n (avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster)= distribution_statistics(cen_dist_between_clusters)\n\n cdist = [Cluster_Relative_Distances([avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster,\n avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster])]\n \n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n \n return cdist",
"def grid_events(self, scale, threshold=100, distrib=False, dataname=''):\n\n def plot_notimeres(thres):\n \"\"\"\n All time colapsed\n @return:\n \"\"\"\n cont = np.zeros((scale, scale))\n for i in range(self.dataset.shape[0]):\n posx = int(((self.dataset[i][1] - minLon) * normLon))\n posy = int(((self.dataset[i][0] - minLat) * normLat))\n\n cont[posx - 1, posy - 1] += 1\n\n lgeo = []\n\n for i in range(cont.shape[0]):\n for j in range(cont.shape[1]):\n if cont[i, j] >= thres:\n path = [(minLon + (i + 0.5) / normLon, maxLat - (scale - j - 1.5) / normLat),\n (minLon + (i + 0.5) / normLon, maxLat - (scale - j - 0.5) / normLat),\n (minLon + (i + 1.5) / normLon, maxLat - (scale - j - 0.5) / normLat),\n (minLon + (i + 1.5) / normLon, maxLat - (scale - j - 1.5) / normLat)\n ]\n lgeo.append(Feature(geometry=Polygon([path])))\n\n return lgeo\n\n minLat, maxLat, minLon, maxLon = self.city[1]\n normLat = scale / (maxLat - minLat)\n normLon = scale / (maxLon - minLon)\n mymap = folium.Map(location=[(minLat + maxLat) / 2.0, (minLon + maxLon) / 2.0], zoom_start=12, width=1400,\n height=1000)\n lgeog = plot_notimeres(threshold)\n\n nfile = self.application + '-' + dataname\n if self.mnhh is not None and self.mnhh is not None:\n nfile += '-nusr' + str(self.mxhh) + '-' + str(self.mnhh)\n nfile += '-s' + str(scale)\n nfile += '-tr' + str(threshold)\n\n geoc = FeatureCollection(lgeog)\n dump = geojson.dumps(geoc)\n jsfile = open(homepath + 'Results/' + nfile + '.json', 'w')\n jsfile.write(dump)\n jsfile.close()\n mymap.geo_json(geo_path=homepath + 'Results/' + nfile + '.json', fill_opacity=0.2)\n\n mymap.create_map(path=homepath + 'Results/' + self.city[2] + nfile + '.html')",
"def bounds(self, pos):",
"def update_binwise_positions(cnarr, segments=None, variants=None):\n cnarr = cnarr.copy()\n if segments:\n segments = segments.copy()\n seg_chroms = set(segments.chromosome.unique())\n if variants:\n variants = variants.copy()\n var_chroms = set(variants.chromosome.unique())\n\n # ENH: look into pandas groupby innards to get group indices\n for chrom in cnarr.chromosome.unique():\n # Enumerate bins, starting from 0\n # NB: plotted points will be at +0.5 offsets\n c_idx = cnarr.chromosome == chrom\n c_bins = cnarr[c_idx] # .copy()\n if segments and chrom in seg_chroms:\n # Match segment boundaries to enumerated bins\n c_seg_idx = (segments.chromosome == chrom).values\n seg_starts = np.searchsorted(\n c_bins.start.values, segments.start.values[c_seg_idx]\n )\n seg_ends = np.r_[seg_starts[1:], len(c_bins)]\n segments.data.loc[c_seg_idx, \"start\"] = seg_starts\n segments.data.loc[c_seg_idx, \"end\"] = seg_ends\n\n if variants and chrom in var_chroms:\n # Match variant positions to enumerated bins, and\n # add fractional increments to multiple variants within 1 bin\n c_varr_idx = (variants.chromosome == chrom).values\n c_varr_df = variants.data[c_varr_idx]\n # Get binwise start indices of the variants\n v_starts = np.searchsorted(c_bins.start.values, c_varr_df.start.values)\n # Overwrite runs of repeats with fractional increments,\n # adding the cumulative fraction to each repeat\n for idx, size in list(get_repeat_slices(v_starts)):\n v_starts[idx] += np.arange(size) / size\n variant_sizes = c_varr_df.end - c_varr_df.start\n variants.data.loc[c_varr_idx, \"start\"] = v_starts\n variants.data.loc[c_varr_idx, \"end\"] = v_starts + variant_sizes\n\n c_starts = np.arange(len(c_bins)) # c_idx.sum())\n c_ends = np.arange(1, len(c_bins) + 1)\n cnarr.data.loc[c_idx, \"start\"] = c_starts\n cnarr.data.loc[c_idx, \"end\"] = c_ends\n\n return cnarr, segments, variants",
"def segment_func2(self):\n # computing neighboors graph\n A = self.boundaryprob_graph()\n\n # SpectralClustering segmentation\n sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')\n labels = sc.fit_predict(A)\n\n return labels",
"def cluster_testing_dist(agg1, agg2, partDiameter):\n agg2_temp = translate_aggregate(agg2, random_point_generator(calculate_LD(agg1), calculate_LD(agg2), calculate_COM(agg1), calculate_COM(agg2), partDiameter))\n agg2_temp = random_rotate_aggregate(agg2_temp)\n\n check = 1\n while check == 1:\n agg2_temp = translate_aggregate(agg2_temp, numpy.array((calculate_COM(agg1)-calculate_COM(agg2_temp))*0.01))\n check, index = test_collision(agg1, agg2_temp, partDiameter)\n \"\"\" Index from this part is not valid! Function returns '99' before collision happens.\n \"\"\"\n if (check == 2):\n # print(index)\n return numpy.linalg.norm(calculate_COM(agg1) - calculate_COM(agg2_temp)), numpy.linalg.norm(calculate_COM(agg1) - agg2_temp[:,index])\n # return numpy.linalg.norm(calculate_COM(agg1) - agg2_temp[0:3,index])\n break",
"def decluster(self, catalogue, config):\n # Get relevant parameters\n neq = len(catalogue.data['magnitude']) # Number of earthquakes\n # Get decimal year (needed for time windows)\n year_dec = decimal_year(\n catalogue.data['year'], catalogue.data['month'],\n catalogue.data['day'])\n # Get space and time windows corresponding to each event\n sw_space, sw_time = (\n config['time_distance_window'].calc(catalogue.data['magnitude']))\n # Initial Position Identifier\n eqid = np.arange(0, neq, 1)\n # Pre-allocate cluster index vectors\n vcl = np.zeros(neq, dtype=int)\n # Sort magnitudes into descending order\n id0 = np.flipud(np.argsort(catalogue.data['magnitude'],\n kind='heapsort'))\n longitude = catalogue.data['longitude'][id0]\n latitude = catalogue.data['latitude'][id0]\n sw_space = sw_space[id0]\n sw_time = sw_time[id0]\n year_dec = year_dec[id0]\n eqid = eqid[id0]\n flagvector = np.zeros(neq, dtype=int)\n # Begin cluster identification\n clust_index = 0\n for i in range(0, neq - 1):\n if vcl[i] == 0:\n # Find Events inside both fore- and aftershock time windows\n dt = year_dec - year_dec[i]\n vsel = np.logical_and(\n vcl == 0,\n np.logical_and(\n dt >= (-sw_time[i] * config['fs_time_prop']),\n dt <= sw_time[i]))\n # Of those events inside time window,\n # find those inside distance window\n vsel1 = haversine(longitude[vsel],\n latitude[vsel],\n longitude[i],\n latitude[i]) <= sw_space[i]\n vsel[vsel] = vsel1[:, 0]\n temp_vsel = np.copy(vsel)\n temp_vsel[i] = False\n if any(temp_vsel):\n # Allocate a cluster number\n vcl[vsel] = clust_index + 1\n flagvector[vsel] = 1\n # For those events in the cluster before the main event,\n # flagvector is equal to -1\n temp_vsel[dt >= 0.0] = False\n flagvector[temp_vsel] = -1\n flagvector[i] = 0\n clust_index += 1\n\n # Re-sort the catalog_matrix into original order\n id1 = np.argsort(eqid, kind='heapsort')\n eqid = eqid[id1]\n vcl = vcl[id1]\n flagvector = flagvector[id1]\n\n return vcl, flagvector",
"def find_centroid_for_each(self):"
] |
[
"0.56852734",
"0.5581037",
"0.55431163",
"0.54475576",
"0.5437623",
"0.54101694",
"0.5406706",
"0.53986126",
"0.5320614",
"0.53138363",
"0.5239041",
"0.5185479",
"0.51828754",
"0.51673204",
"0.51503664",
"0.51502055",
"0.5123758",
"0.5118751",
"0.51078063",
"0.5100034",
"0.5098178",
"0.50862473",
"0.50830996",
"0.5079069",
"0.5073205",
"0.50672376",
"0.5064213",
"0.5064027",
"0.5033009",
"0.5018138"
] |
0.5842902
|
0
|
Raise an exception for invalid tunnel range or malformed range.
|
def _parse_nexus_vni_range(self, tunnel_range):
for ident in tunnel_range:
if not self._is_valid_nexus_vni(ident):
raise exc.NetworkTunnelRangeError(
tunnel_range=tunnel_range,
error=_("%(id)s is not a valid Nexus VNI value.") %
{'id': ident})
if tunnel_range[1] < tunnel_range[0]:
raise exc.NetworkTunnelRangeError(
tunnel_range=tunnel_range,
error=_("End of tunnel range is less than start of "
"tunnel range."))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _check_one_range(r):\n if not _is_single_range(r):\n raise error.RangeSyntaxError(str(r))",
"def _check_range(r): \n if _is_single_range(r):\n _check_one_range(r)\n elif isinstance(r, collections.Sequence):\n for r2 in r:\n _check_one_range(r2)\n else:\n raise error.RangeSyntaxError(str(r))",
"def __check_args_val(self):\n if self.__min_range < 0:\n error_msg = \"min_range must be greater than or equal to zero\"\n raise ValueError(error_msg)\n elif self.__max_range < 0:\n error_msg = \"max_range must be greater than or equal to zero\"\n raise ValueError(error_msg)\n elif self.__max_range < self.__min_range:\n error_msg = \"max_range must be greater than or equal to min_range\"\n raise ValueError(error_msg)",
"def is_valid_range(parser, arg, minimum=0, maximum=100):\n if arg < minimum:\n parser.error(\"%s < %s\", arg, minimum)\n else:\n if arg > maximum:\n parser.error(\"%s > %s\", arg, maximum)\n\n return arg",
"def _validate_interval(interval: Interval) -> None:\n origin, end = interval\n\n if end < origin:\n raise ValueError(f\"Interval [{origin}, {end}] is not a proper one.\") # pragma: no cover",
"def isRangeValid(self) -> bool:\n ...",
"def _check_range(range_):\n try:\n if not isinstance(range_, list):\n range_ = list(range_)\n min_, max_ = range_\n except (ValueError, TypeError):\n raise TypeError(\"each range in ising_linear_ranges should be a list of length 2.\")\n if not isinstance(min_, Number) or not isinstance(max_, Number) or min_ > max_:\n raise ValueError((\"each range in ising_linear_ranges should be a 2-tuple \"\n \"(min, max) where min <= max\"))\n return range_",
"def is_in_range(value: float, lower_bound: float, upper_bound: float, err_string: str) -> None:\n if value < lower_bound or value > upper_bound:\n print('\\n' + err_string + '\\n')\n sys.exit(1)",
"def validate_timestamp(start, end):\n if start is not None and end is not None:\n start, end = str(start), str(end)\n if start > end:\n log.error(\"Invalid Data Range: {}, {}\".format(start, end))\n return 400",
"def _validate_port_range(self) -> Tuple[int, int]:\n\n lower_port = upper_port = 0\n port_range = self.port_range\n try:\n port_ranges = port_range.split(\"..\")\n\n lower_port = int(port_ranges[0])\n upper_port = int(port_ranges[1])\n\n port_range_size = upper_port - lower_port\n if port_range_size != 0:\n if port_range_size < min_port_range_size:\n self.log_and_raise(ValueError(f\"Port range validation failed for range: '{port_range}'. \"\n f\"Range size must be at least {min_port_range_size} as specified by \"\n \"env EG_MIN_PORT_RANGE_SIZE\"))\n\n # According to RFC 793, port is a 16-bit unsigned int. Which means the port\n # numbers must be in the range (0, 65535). However, within that range,\n # ports 0 - 1023 are called \"well-known ports\" and are typically reserved for\n # specific purposes. For example, 0 is reserved for random port assignment,\n # 80 is used for HTTP, 443 for TLS/SSL, 25 for SMTP, etc. But, there is\n # flexibility as one can choose any port with the aforementioned protocols.\n # Ports 1024 - 49151 are called \"user or registered ports\" that are bound to\n # services running on the server listening to client connections. And, ports\n # 49152 - 65535 are called \"dynamic or ephemeral ports\". A TCP connection\n # has two endpoints. Each endpoint consists of an IP address and a port number.\n # And, each connection is made up of a 4-tuple consisting of -- client-IP,\n # client-port, server-IP, and server-port. A service runs on a server with a\n # specific IP and is bound to a specific \"user or registered port\" that is\n # advertised for clients to connect. So, when a client connects to a service\n # running on a server, three out of 4-tuple - client-IP, client-port, server-IP -\n # are already known. To be able to serve multiple clients concurrently, the\n # server's IP stack assigns an ephemeral port for the connection to complete\n # the 4-tuple.\n #\n # In case of JEG, we will accept ports in the range 1024 - 65535 as these days\n # admins use dedicated hosts for individual services.\n def validate_port(port: int) -> None:\n if port < 1024 or port > 65535:\n self.log_and_raise(ValueError(f\"Invalid port range '{port_range}' specified. \"\n \"Range for valid port numbers is (1024, 65535).\"))\n validate_port(lower_port)\n validate_port(upper_port)\n except IndexError as ie:\n self.log_and_raise(RuntimeError(f\"Port range validation failed for range: '{port_range}'.\"), chained=ie)\n\n return lower_port, upper_port",
"def test_find_break_points_invalid_range(self):\r\n self.assertRaises(ValueError, self.mc._find_break_points, 1, 0, 5)\r\n self.assertRaises(ValueError, self.mc._find_break_points, 1, 1, 5)",
"def test_try_create_out_of_range_ip_in_network(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/out_of_range_ipv4_172_0_0_5_net_5.json'\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(400, response.status_code)\n self.compare_values(\n 'Ip 172.0.0.5 not available for network 5.',\n response.data['detail'])",
"def test_get_offenses_with_invalid_range(self, make_request):\n action_params = {\"range\": \"-1-2\"}\n with self.assertRaises(PluginException):\n self.action.run(action_params)",
"def check_binning_parameter_range(x_min, x_max, ws_unit):\n if ws_unit == 'dSpacing' and not 0 < x_min < x_max < 20:\n # dspacing within (0, 20)\n x_range_is_wrong = True\n elif ws_unit == 'TOF' and not 1000 < x_min < x_max < 1000000:\n # TOF within (1000, 1000000)\n x_range_is_wrong = True\n elif ws_unit != 'dSpacing' and ws_unit != 'TOF':\n raise NotImplementedError('Impossible case for unit {}'.format(ws_unit))\n else:\n # good cases\n x_range_is_wrong = False\n\n if x_range_is_wrong:\n ero_msg = 'For {0}, X range ({1}, {2}) does not make sense' \\\n ''.format(ws_unit, x_min, x_max)\n print('[ERROR CAUSING CRASH] {}'.format(ero_msg))\n raise RuntimeError(ero_msg)\n\n return",
"def test_snmpset_value_out_of_range_error():\n with pytest.raises(SNMPWriteError) as excinfo:\n snmpset(ipaddress=SNMP_SRV_ADDR, oid='SNMPv2-MIB::sysName.0',\n value_type='s', value='Thiiiiiiiiiiiiiiiiiiiiiiiiiiiiis '\n 'sssssttttttttrrrriiiiiiiiiiiiiiinnnnnnnnnnnnng is '\n 'wwwwwwaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaayyyyyyyyyy '\n 'tttoooooooooooooooooooooooooooooooooooooooooooooo '\n 'lllooooooooooooooooooooooonnnnnnnnnnnnnnnnnnnggggg'\n ' !!!!!!!!!!!!!!!!!!!!!!!!!!!!', port=SNMP_SRV_PORT)\n assert 'Value out of range' in str(excinfo.value)",
"def test_bad_bounds(self):\n with pytest.raises(ValueError):\n Real(\"yolo\", \"norm\", 0, 2, low=+2, high=-2, shape=(4, 4))\n with pytest.raises(ValueError):\n Real(\"yolo\", \"norm\", 0, 2, low=+2, high=+2, shape=(4, 4))",
"def error(number):\n \n if number >= 1 or number <= -1 :\n raise TypeError,\\\n \"\\n<The interval of convergence should be -1 < value < 1 \\n\"",
"def error(number):\n \n if number >= 1 or number <= -1 :\n raise TypeError,\\\n \"\\n<The interval of convergence should be -1 < value < 1 \\n\"",
"def _validate_val_range(self, proposal):\n val_range = proposal[\"value\"]\n if len(val_range) != 2:\n raise traitlets.TraitError(\"val_range must be of length 2.\")\n if val_range[0] > val_range[1]:\n raise traitlets.TraitError(\n \"val_range[0] must be smaller than val_range[1].\"\n )\n return val_range",
"def error(number):\n \n if number > 1 or number <= -1 :\n raise TypeError,\\\n \"\\n<The interval of convergence should be -1 < value <= 1 \\n\"",
"def validate(self):\n if self._inc_begin is None:\n raise ValueError((\"TimeRange {self} missing begin point\")\n .format(self=self))\n if self._exc_end is None:\n raise ValueError((\"TimeRange {self} missing end point\")\n .format(self=self))",
"def testinvalidrange(self):\n self.assertRaises(AbilityError, WeaponAbility, 'Animated', range=0)\n self.assertRaises(AbilityError, WeaponAbility, 'Animated', range=6)\n self.assertRaises(AbilityError, WeaponAbility, 'Animated', range='3')\n self.assertRaises(AbilityError, WeaponAbility, 'Changling', range=3)",
"def test_invalid_time_range(event_member):\n _, member, event_id = event_member\n current = date.today() + timedelta(days=6)\n start = datetime.combine(current, time(12, 30))\n end = start - timedelta(days=1)\n expect_error(edit, InputError, member.username, event_id,\n True, start, end)",
"def transmitRangeFailed(): \r\n global data\r\n DW1000.newTransmit()\r\n data[0] = C.RANGE_FAILED\r\n DW1000.setData(data, LEN_DATA)\r\n DW1000.startTransmit()",
"def out_of_range_check(self, guess, range):\r\n if ((guess<0) or (guess>=range)):\r\n return \"Input is out of range!\"\r\n else:\r\n return guess",
"def testSPFInvalidIPv4Range(self):\n spf_record = \"v=spf1 ip4:78.46.96.236/99 ~all\"\n domain = \"surftown.dk\"\n self.assertRaises(checkdmarc.SPFSyntaxError,\n checkdmarc.parse_spf_record, spf_record, domain)",
"def _raise_value_error(is_gt, tracker, seq):\n if is_gt:\n raise TrackEvalException(\n 'GT data for sequence %s cannot be converted to the right format. Is data corrupted?' % seq)\n else:\n raise TrackEvalException(\n 'Tracking data from tracker %s, sequence %s cannot be converted to the right format. '\n 'Is data corrupted?' % (tracker, seq))",
"def test_creation_bounds_not_inclusive():\n with pytest.raises(ValueError) as __:\n value = -42\n __ = param.Integer(value=value, hardbounds=[-42, 100], inclusive_bounds=[False, False])",
"def range_validator(value_str, args):\n \n assert len(args) == 5, \"Error: range_validator requires 5 arguments.\"\n a_type, lb, ub, allow_none, error_msg = args\n try:\n if allow_none and value_str == 'None':\n value = None\n else:\n value = a_type(value_str)\n except ValueError:\n raise InputException(error_msg + value_str)\n if (lb != None and value < lb) or (ub != None and value > ub):\n raise InputException(error_msg + value_str)\n return value",
"def check_params_are_valid(params):\n if params.index.duplicated().any():\n raise ValueError(\"No duplicates allowed in the index of params.\")\n\n invalid_bounds = params.query(\"lower_bound > value | upper_bound < value\")\n\n if len(invalid_bounds) > 0:\n raise ValueError(f\"value out of bounds for:\\n{invalid_bounds.index}\")"
] |
[
"0.6624532",
"0.6413814",
"0.6199758",
"0.61368984",
"0.6097965",
"0.607641",
"0.60197747",
"0.59626144",
"0.5958465",
"0.59397733",
"0.5909572",
"0.5895414",
"0.5871884",
"0.58117115",
"0.5792768",
"0.57913935",
"0.57842344",
"0.57842344",
"0.5781298",
"0.577995",
"0.576521",
"0.57485545",
"0.57255167",
"0.56767267",
"0.567406",
"0.56692994",
"0.56328005",
"0.5626911",
"0.56160706",
"0.5599799"
] |
0.68945503
|
0
|
Synchronize vxlan_allocations table with configured tunnel ranges.
|
def sync_allocations(self):
# determine current configured allocatable vnis
vxlan_vnis = set()
for tun_min, tun_max in self.tunnel_ranges:
vxlan_vnis |= set(six.moves.range(tun_min, tun_max + 1))
session = db_api.get_session()
with session.begin(subtransactions=True):
# remove from table unallocated tunnels not currently allocatable
# fetch results as list via all() because we'll be iterating
# through them twice
allocs = (session.query(nexus_models_v2.NexusVxlanAllocation).
with_lockmode("update").all())
# collect all vnis present in db
existing_vnis = set(alloc.vxlan_vni for alloc in allocs)
# collect those vnis that needs to be deleted from db
vnis_to_remove = [alloc.vxlan_vni for alloc in allocs
if (alloc.vxlan_vni not in vxlan_vnis and
not alloc.allocated)]
# Immediately delete vnis in chunks. This leaves no work for
# flush at the end of transaction
bulk_size = 100
chunked_vnis = (vnis_to_remove[i:i + bulk_size] for i in
range(0, len(vnis_to_remove), bulk_size))
for vni_list in chunked_vnis:
session.query(nexus_models_v2.NexusVxlanAllocation).filter(
nexus_models_v2.NexusVxlanAllocation.
vxlan_vni.in_(vni_list)).delete(
synchronize_session=False)
# collect vnis that need to be added
vnis = list(vxlan_vnis - existing_vnis)
chunked_vnis = (vnis[i:i + bulk_size] for i in
range(0, len(vnis), bulk_size))
for vni_list in chunked_vnis:
bulk = [{'vxlan_vni': vni, 'allocated': False}
for vni in vni_list]
session.execute(nexus_models_v2.NexusVxlanAllocation.
__table__.insert(), bulk)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def populate_ranges(self,):\n self.ranges = list()\n # coredump: info target shows all sections in full detail\n # live debug: only file-backed sections are shown\n targetinfo = gdb.execute(\"info target\", False, True)\n for line in targetinfo.splitlines():\n line = line.strip()\n if line.startswith('`'):\n line = line.split(\"'\")[1]\n source = line[1:]\n continue\n if not line.startswith(\"0x\"):\n continue\n\n start, dash, end, str_is, memtype = line.split(maxsplit=4)\n assert(dash == '-' and str_is == 'is')\n start = int(start, 16)\n end = int(end, 16)\n new_range = MemoryRange(start, end-start, source, memtype)\n startoverlap = self.get_range(start)\n endoverlap = self.get_range(end)\n\n if endoverlap == startoverlap:\n endoverlap = None\n\n #TODO: splitup and punch holes/replace\n if memtype.startswith('.'):\n # gdb reports loadXXX sections on top of file-backed sections of the binary\n # probably because the kernel maps writeable pages on top of them\n # Therefore, keep the more accurate description from the file-backed section\n if startoverlap is not None and startoverlap.memtype == MemoryType.General:\n previous, current = self.split_range_at(start)\n self.ranges.remove(current)\n startoverlap = None\n if endoverlap is not None and endoverlap.memtype == MemoryType.General:\n current, end = self.split_range_at(end)\n self.ranges.remove(current)\n endoverlap = None\n\n if startoverlap is not None and endoverlap is not None:\n print(\"Overlapping memory ranges: %s in %s -> %s\" %\n (new_range, str(startoverlap), str(endoverlap)))\n bisect.insort(self.ranges, new_range)\n\n # live target: run-time allocated memory and some file-backed sections\n # There typically is overlap with the 'info target' output, so give precedence\n # to the previously added ranges\n mappinginfo = gdb.execute(\"info proc mappings\", False, True)\n for line in mappinginfo.splitlines():\n line = line.strip()\n if not line.startswith(\"0x\"):\n continue\n\n items = line.split()\n if len(items) == 4:\n start, end, size, offset = items\n source = \"unknown\"\n elif len(items) == 5:\n start, end, size, offset, source = items\n else:\n print(\"Unexpected line when parsing 'info proc mappings': %s\" % line)\n continue\n\n start = int(start, 16)\n size = int(size, 16)\n end = int(end, 16)\n\n new_range = MemoryRange(start, size, source, source)\n self.tentative_add_range(new_range)",
"def _sync_ranges(self):\n with self.sphere.sphere_lock:\n self._sync_range(\n self.sphere.bai_1d_args, 'radial_range', 'numpoints',\n self.radialRange1D\n )\n self._sync_range(\n self.sphere.bai_2d_args, 'radial_range', 'npt_rad',\n self.radialRange2D\n )\n self._sync_range(\n self.sphere.bai_2d_args, 'azimuth_range', 'npt_azim',\n self.azimuthalRange2D\n )",
"def fusion_api_allocate_vmac_range(self, body, uri, api=None, headers=None):\n return self.vmacrange.allocate(body, uri, api, headers)",
"def update_stp(self):\n\n # add active links from calculated spanning tree\n ports_to_remove = [conn.link1.port if conn.link1.switch == self.connection.dpid else conn.link2.port\n for conn in self.stp.unused_ports if\n conn.link1.switch == self.connection.dpid or conn.link2.switch == self.connection.dpid]\n ports_to_add = [conn.link1.port if conn.link1.switch == self.connection.dpid else\n conn.link2.port for conn in self.stp.legal_connections if\n conn.link1.switch == self.connection.dpid or conn.link2.switch == self.connection.dpid]\n all_switch_ports = [conn.link1.port if conn.link1.switch == self.connection.dpid else\n conn.link2.port for conn in self.stp.all_connections if\n conn.link1.switch == self.connection.dpid or conn.link2.switch == self.connection.dpid]\n\n self.ports_use = [port for port in self.ports_use if port not in ports_to_remove]\n # append lists, the ports STP give us doen't contain HOST links\n self.ports_use = list(set(self.ports_use + ports_to_add))\n self.host_ports = [port for port in self.ports_use if port not in all_switch_ports]\n # delete flows that send data to non active links\n self.stp.find_path()",
"def keep_vifs_bound():\n global EVT_TIMEOUT, NETSRS\n\n while bEndless:\n with xenapi_session() as x:\n try:\n vms = x.VM.get_all_records() # Get a list of VMs for multiple uses\n\n # If the host is in maintenance mode end it and auto start VMs\n host_ref = x.host.get_by_uuid(get_host_ref())\n if not x.host.get_enabled(host_ref):\n x.host.enable(host_ref) # End maintenance mode\n\n # Get a list of suitable VMs to start, ordered by ha restart priority\n autostart_list = [(vms[k]['order'], k, float(vms[k]['start_delay'])) for k in vms.keys() if (\n (not vms[k]['is_a_snapshot']) and\n (not vms[k]['is_a_template']) and\n (not vms[k]['is_control_domain']) and\n (('auto_poweron' in vms[k]['other_config']) and (vms[k]['other_config']['auto_poweron'])) and\n ('Halted' == vms[k]['power_state'])\n )]\n # We avoid .sort with a lambda to be able to delete the vms list\n from operator import itemgetter\n autostart_list.sort(key=itemgetter(0))\n\n # Attempt to start the VMs, while respecting the delays\n for i in range(len(autostart_list)):\n vm_ref = autostart_list[i][1]\n try:\n x.VM.start(vm_ref, False, False)\n except:\n pass\n finally:\n from time import sleep\n if i < (len(autostart_list) - 1):\n sleep(autostart_list[i][2])\n\n del autostart_list # Clean up\n\n # Find the ObscureRef of the target VM\n try:\n vmref = [k for k in vms.keys() if vms[k]['name_label'] == VMNAME][0]\n except:\n print('Unable to find a VM named \"{}\"'.format(VMNAME))\n exit(4)\n vifs = x.VM.get_VIFs(vmref)\n bNetworkFound = False\n for vif in vifs:\n if SSPNNAME == x.network.get_record(x.VIF.get_network(vif))['name_label']:\n bNetworkFound = True\n break\n if not bNetworkFound:\n print('Unable to find a network named \"{}\" attached to the \"{}\" VM'.format(SSPNNAME, VMNAME))\n exit(5)\n\n # Clean up\n del vifs\n del vms\n\n # Non-blocking listen for VM events\n token = '' # Initial token\n while bEndless:\n output = x.event_from(['VM'], token, EVT_TIMEOUT)\n token = output['token'] # Current token\n\n for event in output['events']:\n # Check the IP assigned to the VIFs of the target VM, if it's running\n if (('add' == event['operation']) or\n ('mod' == event['operation'])) and \\\n (vmref == event['ref']) and \\\n ('Running' == x.VM.get_power_state(vmref)):\n if 'snapshot' not in event:\n continue\n vifs = event['snapshot']['VIFs'] # Virtual interfaces list\n for vif in vifs:\n net = x.VIF.get_network(vif) # Network ref\n netrec = x.network.get_record(net) # Network record\n if SSPNNAME != netrec['name_label']:\n continue\n if rebind_ip_address(netrec['bridge']) and NETSRS:\n for sr in NETSRS:\n # Check if the destination is live for a maximum of 2 minutes and a half,\n # and if it goes live replug the target SRs\n (sr_uuid, pingTarget), = sr.items()\n p = Ping(pingTarget, 5000)\n if p.live(30):\n plug_pbds(x, sr_uuid)\n\n except (socket.error, XmlRPCFault):\n # Toolstack restarted/unavailable or SIGTERM'd\n pass",
"async def fix_alleles(limiter, data):\n \n # work on a copy, to prevent assignment warnings\n ref = data['ref'].copy()\n alt = data['alt'].copy()\n \n idx = ref.isnull()\n \n seqs = {}\n coords = [(x.chrom, x.pos, x.pos, x.build) for i, x in data[idx].iterrows()]\n async with trio.open_nursery() as nursery:\n for x in coords:\n nursery.start_soon(parallel_sequence, limiter, *x[:3], seqs, x[3])\n \n ref[idx] = [seqs[x] for x in coords]\n \n # add the reference base to insertions\n alt[idx] = ref[idx] + alt[idx]\n \n # make deletion alts VEP-compatible\n idx = alt.isnull()\n\n seqs = {}\n coords = [(x.chrom, x.pos - 1, x.pos - 1, x.build) for i, x in data[idx].iterrows()]\n async with trio.open_nursery() as nursery:\n for x in coords:\n nursery.start_soon(parallel_sequence, limiter, *x[:3], seqs, x[3])\n \n alt[idx] = [seqs[x] for x in coords]\n ref[idx] = alt[idx] + ref[idx]\n \n return ref, alt",
"def sync_with_database(self):\n # learn from db\n lports = self.nb_api.get_all(l2.LogicalPort)\n for lport in lports:\n port_id = \"{}:{}\".format(lport.lswitch.id, lport.id)\n self.cache_logical_port_by_port_id[port_id] = lport\n lrouters = self.nb_api.get_all(l3.LogicalRouter)\n for lrouter in lrouters:\n self.cache_logical_router_by_dpid[lrouter.id] = lrouter",
"def _link_allocations_to_block_trade(cls, allocations, block_trade):\n for allocation in allocations:\n cls._apply_trx_trade_to_allocation(allocation, block_trade)",
"def update_ranges(self):\n new_ranges = self.get_z_ranges()\n self.config.update_ranges(new_ranges)",
"def verify_intervlan_routing(self):\n for src in self.host_information:\n for dst in self.host_information:\n if dst > src:\n self.check_host_connectivity_by_id(src, dst)",
"def update_port_ip_address(self):\n leases = None\n req = dict(ip='0.0.0.0')\n instances = self.get_vms_for_this_req(**req)\n if instances is None:\n return\n\n for vm in instances:\n if not leases:\n # For the first time finding the leases file.\n leases = self._get_ip_leases()\n if not leases:\n # File does not exist.\n return\n\n for line in leases:\n if line.startswith('lease') and line.endswith('{\\n'):\n ip_addr = line.split()[1]\n if 'hardware ethernet' in line:\n if vm.mac == line.replace(';', '').split()[2]:\n LOG.info(_LI('Find IP address %(ip)s for %(mac)s'),\n {'ip': ip_addr, 'mac': vm.mac})\n try:\n rule_info = dict(ip=ip_addr, mac=vm.mac,\n port=vm.port_id,\n status='up')\n self.neutron_event.update_ip_rule(str(vm.host),\n str(rule_info))\n except (rpc.MessagingTimeout, rpc.RPCException,\n rpc.RemoteError):\n LOG.error(_LE(\"RPC error: Failed to update\"\n \"rules.\"))\n else:\n params = dict(columns=dict(ip=ip_addr))\n self.update_vm_db(vm.port_id, **params)\n\n # Send update to the agent.\n vm_info = dict(status=vm.status, vm_mac=vm.mac,\n segmentation_id=vm.segmentation_id,\n host=vm.host, port_uuid=vm.port_id,\n net_uuid=vm.network_id,\n oui=dict(ip_addr=ip_addr,\n vm_name=vm.name,\n vm_uuid=vm.instance_id,\n gw_mac=vm.gw_mac,\n fwd_mod=vm.fwd_mod,\n oui_id='cisco'))\n try:\n self.neutron_event.send_vm_info(vm.host,\n str(vm_info))\n except (rpc.MessagingTimeout, rpc.RPCException,\n rpc.RemoteError):\n LOG.error(_LE('Failed to send VM info to '\n 'agent.'))",
"def clusterAndLink(\n observations,\n vx_range=[-0.1, 0.1],\n vy_range=[-0.1, 0.1],\n vx_bins=100,\n vy_bins=100,\n vx_values=None,\n vy_values=None,\n eps=0.005,\n min_obs=5,\n min_arc_length=1.0,\n alg=\"dbscan\",\n num_jobs=1,\n parallel_backend=\"cf\",\n):\n time_start_cluster = time.time()\n logger.info(\"Running velocity space clustering...\")\n\n if vx_values is None and vx_range is not None:\n vx = np.linspace(*vx_range, num=vx_bins)\n elif vx_values is None and vx_range is None:\n raise ValueError(\"Both vx_values and vx_range cannot be None.\")\n else:\n vx = vx_values\n vx_range = [vx_values[0], vx_values[-1]]\n vx_bins = len(vx)\n\n if vy_values is None and vy_range is not None:\n vy = np.linspace(*vy_range, num=vy_bins)\n elif vy_values is None and vy_range is None:\n raise ValueError(\"Both vy_values and vy_range cannot be None.\")\n else:\n vy = vy_values\n vy_range = [vy_values[0], vy_values[-1]]\n vy_bins = len(vy)\n\n if vx_values is None and vy_values is None:\n vxx, vyy = np.meshgrid(vx, vy)\n vxx = vxx.flatten()\n vyy = vyy.flatten()\n elif vx_values is not None and vy_values is not None:\n vxx = vx\n vyy = vy\n else:\n raise ValueError(\"\")\n\n logger.debug(\"X velocity range: {}\".format(vx_range))\n if vx_values is not None:\n logger.debug(\"X velocity values: {}\".format(vx_bins))\n else:\n logger.debug(\"X velocity bins: {}\".format(vx_bins))\n\n logger.debug(\"Y velocity range: {}\".format(vy_range))\n if vy_values is not None:\n logger.debug(\"Y velocity values: {}\".format(vy_bins))\n else:\n logger.debug(\"Y velocity bins: {}\".format(vy_bins))\n if vx_values is not None:\n logger.debug(\"User defined x velocity values: True\")\n else:\n logger.debug(\"User defined x velocity values: False\")\n if vy_values is not None:\n logger.debug(\"User defined y velocity values: True\")\n else:\n logger.debug(\"User defined y velocity values: False\")\n\n if vx_values is None and vy_values is None:\n logger.debug(\"Velocity grid size: {}\".format(vx_bins * vy_bins))\n else:\n logger.debug(\"Velocity grid size: {}\".format(vx_bins))\n logger.info(\"Max sample distance: {}\".format(eps))\n logger.info(\"Minimum samples: {}\".format(min_obs))\n\n possible_clusters = []\n if len(observations) > 0:\n # Extract useful quantities\n obs_ids = observations[\"obs_id\"].values\n theta_x = observations[\"theta_x_deg\"].values\n theta_y = observations[\"theta_y_deg\"].values\n mjd = observations[\"mjd_utc\"].values\n\n # Select detections in first exposure\n first = np.where(mjd == mjd.min())[0]\n mjd0 = mjd[first][0]\n dt = mjd - mjd0\n\n parallel, num_workers = _checkParallel(num_jobs, parallel_backend)\n if parallel:\n if parallel_backend == \"ray\":\n import ray\n\n if not ray.is_initialized():\n ray.init(address=\"auto\")\n\n clusterVelocity_worker_ray = ray.remote(clusterVelocity_worker)\n clusterVelocity_worker_ray = clusterVelocity_worker_ray.options(\n num_returns=1, num_cpus=1\n )\n\n # Put all arrays (which can be large) in ray's\n # local object store ahead of time\n obs_ids_oid = ray.put(obs_ids)\n theta_x_oid = ray.put(theta_x)\n theta_y_oid = ray.put(theta_y)\n dt_oid = ray.put(dt)\n\n p = []\n for vxi, vyi in zip(vxx, vyy):\n p.append(\n clusterVelocity_worker_ray.remote(\n vxi,\n vyi,\n obs_ids=obs_ids_oid,\n x=theta_x_oid,\n y=theta_y_oid,\n dt=dt_oid,\n eps=eps,\n min_obs=min_obs,\n min_arc_length=min_arc_length,\n alg=alg,\n )\n )\n possible_clusters = ray.get(p)\n\n elif parallel_backend == \"mp\":\n p = mp.Pool(processes=num_workers, initializer=_initWorker)\n possible_clusters = p.starmap(\n partial(\n clusterVelocity_worker,\n obs_ids=obs_ids,\n x=theta_x,\n y=theta_y,\n dt=dt,\n eps=eps,\n min_obs=min_obs,\n min_arc_length=min_arc_length,\n alg=alg,\n ),\n zip(vxx, vyy),\n )\n p.close()\n\n elif parallel_backend == \"cf\":\n with cf.ProcessPoolExecutor(\n max_workers=num_workers, initializer=_initWorker\n ) as executor:\n futures = []\n for vxi, vyi in zip(vxx, vyy):\n f = executor.submit(\n clusterVelocity_worker,\n vxi,\n vyi,\n obs_ids=obs_ids,\n x=theta_x,\n y=theta_y,\n dt=dt,\n eps=eps,\n min_obs=min_obs,\n min_arc_length=min_arc_length,\n alg=alg,\n )\n futures.append(f)\n\n possible_clusters = []\n for f in cf.as_completed(futures):\n possible_clusters.append(f.result())\n\n else:\n raise ValueError(\n \"Invalid parallel_backend: {}\".format(parallel_backend)\n )\n\n else:\n possible_clusters = []\n for vxi, vyi in zip(vxx, vyy):\n possible_clusters.append(\n clusterVelocity(\n obs_ids,\n theta_x,\n theta_y,\n dt,\n vxi,\n vyi,\n eps=eps,\n min_obs=min_obs,\n min_arc_length=min_arc_length,\n alg=alg,\n )\n )\n\n time_end_cluster = time.time()\n logger.info(\n \"Clustering completed in {:.3f} seconds.\".format(\n time_end_cluster - time_start_cluster\n )\n )\n\n logger.info(\"Restructuring clusters...\")\n time_start_restr = time.time()\n\n possible_clusters = pd.DataFrame({\"clusters\": possible_clusters})\n\n # Remove empty clusters\n possible_clusters = possible_clusters[~possible_clusters[\"clusters\"].isna()]\n\n if len(possible_clusters) != 0:\n ### The following code is a little messy, its a lot of pandas dataframe manipulation.\n ### I have tried doing an overhaul wherein the clusters and cluster_members dataframe are created per\n ### velocity combination in the clusterVelocity function. However, this adds an overhead in that function\n ### of ~ 1ms. So clustering 90,000 velocities takes 90 seconds longer which on small datasets is problematic.\n ### On large datasets, the effect is not as pronounced because the below code takes a while to run due to\n ### in-memory pandas dataframe restructuring.\n\n # Make DataFrame with cluster velocities so we can figure out which\n # velocities yielded clusters, add names to index so we can enable the join\n cluster_velocities = pd.DataFrame({\"vtheta_x\": vxx, \"vtheta_y\": vyy})\n cluster_velocities.index.set_names(\"velocity_id\", inplace=True)\n\n # Split lists of cluster ids into one column per cluster for each different velocity\n # then stack the result\n possible_clusters = pd.DataFrame(\n possible_clusters[\"clusters\"].values.tolist(), index=possible_clusters.index\n )\n possible_clusters = pd.DataFrame(possible_clusters.stack())\n possible_clusters.rename(columns={0: \"obs_ids\"}, inplace=True)\n possible_clusters = pd.DataFrame(\n possible_clusters[\"obs_ids\"].values.tolist(), index=possible_clusters.index\n )\n\n # Drop duplicate clusters\n possible_clusters.drop_duplicates(inplace=True)\n\n # Set index names\n possible_clusters.index.set_names([\"velocity_id\", \"cluster_id\"], inplace=True)\n\n # Reset index\n possible_clusters.reset_index(\"cluster_id\", drop=True, inplace=True)\n possible_clusters[\"cluster_id\"] = [\n str(uuid.uuid4().hex) for i in range(len(possible_clusters))\n ]\n\n # Make clusters DataFrame\n clusters = possible_clusters.join(cluster_velocities)\n clusters.reset_index(drop=True, inplace=True)\n clusters = clusters[[\"cluster_id\", \"vtheta_x\", \"vtheta_y\"]]\n\n # Make cluster_members DataFrame\n cluster_members = possible_clusters.reset_index(drop=True).copy()\n cluster_members.index = cluster_members[\"cluster_id\"]\n cluster_members.drop(\"cluster_id\", axis=1, inplace=True)\n cluster_members = pd.DataFrame(cluster_members.stack())\n cluster_members.rename(columns={0: \"obs_id\"}, inplace=True)\n cluster_members.reset_index(inplace=True)\n cluster_members.drop(\"level_1\", axis=1, inplace=True)\n\n # Calculate arc length and add it to the clusters dataframe\n cluster_members_time = cluster_members.merge(\n observations[[\"obs_id\", \"mjd_utc\"]], on=\"obs_id\", how=\"left\"\n )\n clusters_time = (\n cluster_members_time.groupby(by=[\"cluster_id\"])[\"mjd_utc\"]\n .apply(lambda x: x.max() - x.min())\n .to_frame()\n )\n clusters_time.reset_index(inplace=True)\n clusters_time.rename(columns={\"mjd_utc\": \"arc_length\"}, inplace=True)\n clusters = clusters.merge(\n clusters_time[[\"cluster_id\", \"arc_length\"]],\n on=\"cluster_id\",\n how=\"left\",\n )\n\n else:\n cluster_members = pd.DataFrame(columns=[\"cluster_id\", \"obs_id\"])\n clusters = pd.DataFrame(\n columns=[\"cluster_id\", \"vtheta_x\", \"vtheta_y\", \"arc_length\"]\n )\n\n time_end_restr = time.time()\n logger.info(\n \"Restructuring completed in {:.3f} seconds.\".format(\n time_end_restr - time_start_restr\n )\n )\n logger.info(\"Found {} clusters.\".format(len(clusters)))\n logger.info(\n \"Clustering and restructuring completed in {:.3f} seconds.\".format(\n time_end_restr - time_start_cluster\n )\n )\n\n return clusters, cluster_members",
"def test_add_node_after_wide_mv_with_range_deletions(self):\n\n session = self.prepare()\n\n session.execute(\"CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n for i in range(10):\n for j in range(100):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0:\n session.execute(\"DELETE FROM t WHERE id = {} AND v >= {} and v < {}\".format(i, j, j + 2))\n\n self.cluster.flush()\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session, \"SELECT * FROM t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session, \"SELECT * FROM t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n node4 = new_node(self.cluster, data_center=\"dc1\")\n node4.set_configuration_options(values={'max_mutation_size_in_kb': 25}) # CASSANDRA-11670\n logger.debug(\"Start join at {}\".format(time.strftime(\"%H:%M:%S\")))\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.migration_task_wait_in_seconds={}\".format(MIGRATION_WAIT)])\n\n session2 = self.patient_exclusive_cql_connection(node4)\n\n for i in range(10):\n for j in range(100):\n if j % 10 == 0 or (j - 1) % 10 == 0:\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])\n\n for i in range(10):\n for j in range(100, 110):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=j))\n\n for i in range(10):\n for j in range(110):\n if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):\n assert_none(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j))\n assert_none(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j))\n else:\n assert_one(session2, \"SELECT * FROM ks.t WHERE id = {} and v = {}\".format(i, j), [i, j])\n assert_one(session2, \"SELECT * FROM ks.t_by_v WHERE id = {} and v = {}\".format(i, j), [j, i])",
"def _base_test_insert_during_range_movement(self, rf):\n\n session = self.prepare(rf=rf)\n\n logger.debug(\"Creating table and view\")\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n logger.debug(\"Starting new node4 in write survey mode\")\n node4 = new_node(self.cluster, data_center=\"dc1\")\n # Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below\n node4.start(wait_for_binary_proto=True, jvm_args=[\"-Dcassandra.write_survey=true\",\n \"-Dcassandra.batchlog.replay_timeout_in_ms=1\"])\n\n logger.debug(\"Insert data while node4 is joining\")\n\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v) VALUES ({id}, {v})\".format(id=i, v=-i))\n\n logger.debug(\"Finish joining node4\")\n node4.nodetool(\"join\")\n\n logger.debug('Replay batchlogs')\n time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)\n self._replay_batchlogs()\n\n logger.debug(\"Verify data\")\n for i in range(1000):\n assert_one(session, \"SELECT * FROM t_by_v WHERE v = {}\".format(-i), [-i, i])",
"def handle_timer(self):\n\n for dest in self.hosts_to_unused_ports:\n self.hosts_to_unused_ports[dest] = [host for host in self.hosts_to_unused_ports[dest] if api.current_time() != host.time_to_live] \n self.hosts_to_ports[dest] = self.find_minium_latency_unused_ports(self.hosts_to_unused_ports[dest])\n\n #Send the reachable routes (must be less than infinity)\n for dest in self.hosts_to_ports:\n if self.hosts_to_ports[dest].latency < INFINITY: \n distance_vector = self.hosts_to_ports[dest] \n host_latency = distance_vector.latency\n\n distance_vector = self.hosts_to_ports[dest]\n\n # Send normal route packet\n packet = basics.RoutePacket(dest, host_latency)\n self.send(packet, distance_vector.port)\n\n # Send poison packet if POISON_MODE is true\n if self.POISON_MODE == True:\n poison_packet = basics.RoutePacket(dest, INFINITY)\n self.send(poison_packet, distance_vector.port)",
"def reconfigure_ml2_vlan_range(self):\n self.check_run('reconfigure_ml2_vlan_range')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(2)\n config = utils.get_config_template('neutron')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n service_name = 'neutron-server'\n uptimes = self.get_service_uptime(controllers, service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(5)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(6)\n self.check_service_was_restarted(controllers, uptimes, service_name)\n\n self.show_step(7)\n self.check_config_on_remote(controllers, structured_config)\n\n self.show_step(8)\n self.show_step(9)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n self.check_ml2_vlan_range(os_conn)\n\n self.env.make_snapshot(\"reconfigure_ml2_vlan_range\", is_make=True)",
"def sync_all_teams_coverage():\n teams = Team.objects.all()\n\n for team in teams:\n identifier = team.identifier\n\n sync_team_coverage.apply_async(args=(identifier, ))\n sync_team_cluster_stats.apply_async(args=(identifier, ))\n sync_team_advisory_stats.apply_async(args=(identifier, ))",
"def update_vluln_table():",
"def test_ipam_vlan_groups_partial_update(self):\n pass",
"def loop_vzones_update(self, send, stop):\n self.log.info(u\"= = > Internal loop to keep sync _pzones and _vzones started for {0} vzones.\".format(len(self._vzones)))\n\twhile not stop.isSet():\n for zone in self._vzones:\n\t for cle in PZONE_TO_VZONE:\n self._vzones[zone][cle] = self._pzones[self._vzones[zone][\"childs\"][0]][cle]\n diffparams = [param for param in self._vzones[zone] if self._vzones[zone][param] != self._vzones_old[zone][param]]\n self._vzones_old[zone] = self._vzones[zone].copy()\n if diffparams:\n for i,elt in enumerate(diffparams):\n val = elt, self._vzones[zone][elt]\n self.log.info(u\"= = > '{0}' : {1} update of {2} with value {3}\".format(zone,self._vzones[zone]['name'],elt,self._vzones[zone][elt]))\n send(zone, val)\n\t stop.wait(1)\n self.close()",
"def put(self):\n try:\n with transaction.atomic():\n data_list = self.data_list\n data_return = []\n # telnet test\n pool = ThreadPool(device_views.CLI_THREADPOOL_SIZE)\n res_telnet = pool.map(self.telnet_status_check, data_list)\n pool.close()\n pool.join()\n for x in res_telnet:\n device_pre = DevicesTmp.objects.get(device_id=x[1])\n telnet_res = x[0]\n data = {\n 'telnet_status': telnet_res,\n }\n serializer = DevicesTmpSerializer(device_pre, data=data, partial=True)\n if serializer.is_valid(Exception):\n serializer.save()\n # snmp test\n pool = ThreadPool(device_views.CLI_THREADPOOL_SIZE)\n res_snmp = pool.map(self.snmp_status_check, data_list)\n pool.close()\n pool.join()\n\n for x in res_snmp:\n device_pre = DevicesTmp.objects.get(device_id=x[1])\n snmp_res = x[0]\n data = {\n 'snmp_status': snmp_res,\n }\n serializer = DevicesTmpSerializer(device_pre, data=data, partial=True)\n if serializer.is_valid(Exception):\n serializer.save()\n data_return.append(serializer.data)\n data = {\n 'data': data_return,\n 'new_token': self.new_token,\n constants.STATUS: {\n constants.STATUS: constants.TRUE,\n constants.MESSAGE: constants.SUCCESS\n },\n }\n return api_return(data=data)\n except Exception, e:\n transaction.rollback()\n print e\n raise e",
"def update_agent_location_vector(self):\n\n for agent in self.agents:\n location = agent.getz()\n # print(location)\n if location[0] == 0:\n vectorized_agent_loc = location[1]\n elif location[0] == 1:\n vectorized_agent_loc = 4 + location[1]\n elif location[0] == 2:\n vectorized_agent_loc = 8 + location[1]\n else: # location[0] == 3\n vectorized_agent_loc = 12 + location[1]\n\n if agent.isBusy == False:\n # remove any location if it shows it as well\n self.agent_locations[0][vectorized_agent_loc] = 0\n continue\n else:\n self.agent_locations[0][vectorized_agent_loc] = 1\n if self.DEBUG:\n print('agent location vector is ', self.agent_locations)",
"def mostly_sync_mempools(rpc_connections, difference=50, wait=1, verbose=1):\n iterations = 0\n while True:\n iterations += 1\n pool = set(rpc_connections[0].getrawmempool())\n num_match = 1\n poolLen = [len(pool)]\n for i in range(1, len(rpc_connections)):\n tmp = set(rpc_connections[i].getrawmempool())\n if tmp == pool:\n num_match = num_match + 1\n if iterations > 10 and len(tmp.symmetric_difference(pool)) < difference:\n num_match = num_match + 1\n poolLen.append(len(tmp))\n if verbose:\n logging.info(\"sync mempool: \" + str(poolLen))\n if num_match == len(rpc_connections):\n break\n time.sleep(wait)",
"def fusion_api_allocate_vsn_range(self, body, uri, api=None, headers=None):\n return self.vsnrange.allocate(body, uri, api, headers)",
"def _populate_new_notifications_triplets_single_shard(self, virt_imei_range_start, virt_imei_range_end, executor):\n with create_db_connection(self._config.db_config) as conn, conn.cursor() as cursor, CodeProfiler() as cp:\n notifications_imeis_shard = \\\n partition_utils.imei_shard_name(base_name=self._notifications_imei_new_tblname,\n virt_imei_range_start=virt_imei_range_start,\n virt_imei_range_end=virt_imei_range_end)\n notifications_triplets_shard = \\\n partition_utils.imei_shard_name(base_name=self._notifications_triplets_new_tblname,\n virt_imei_range_start=virt_imei_range_start,\n virt_imei_range_end=virt_imei_range_end)\n pairing_list_shard = partition_utils.imei_shard_name(base_name='historic_pairing_list',\n virt_imei_range_start=virt_imei_range_start,\n virt_imei_range_end=virt_imei_range_end)\n\n # Note: IMSI can't be NULL in pairing list as it is constrained in table DDL\n #\n # 20170504: Re: discussion with Daniel it was decided to exclude triplets with a NULL IMSI or MSISDN\n # from notifications. Without an MSISDN there is no easy way for an operator to contact the\n # subscriber. Without an IMSI we can't determine accurately whether they were already paired.\n # We expect both NULL IMSI and MSISDN to be transient, weird events that do not consistently\n # happen for the same subscriber. Therefore they will be notified anyway based on the non-NULL\n # IMSI/MSISDN that is expected to be seen either on the same day or at some point during the\n # configured lookback window.\n query = sql.SQL(\n \"\"\"INSERT INTO {notifications_triplets_shard}(imei_norm,\n virt_imei_shard,\n imsi,\n msisdn,\n block_date,\n reasons,\n is_valid,\n amnesty_granted,\n imei_norm_with_check_digit,\n home_operator,\n fallback_operators)\n SELECT imei_norm,\n FIRST(network_triplets.virt_imei_shard),\n imsi,\n msisdn,\n FIRST(block_date),\n FIRST(reasons),\n FIRST(is_valid),\n FIRST(amnesty_granted),\n FIRST(imei_norm_with_check_digit),\n FIRST(home_network_tbl.operator_id) AS home_operator,\n array_agg(DISTINCT network_triplets.operator_id)\n filter(WHERE network_triplets.operator_id IS NOT NULL) AS fallback_operators\n FROM {notifications_imeis_shard}\n INNER JOIN monthly_network_triplets_per_mno network_triplets\n USING (imei_norm)\n LEFT JOIN LATERAL ({home_network_query}) home_network_tbl\n ON TRUE\n WHERE NOT EXISTS (SELECT 1\n FROM {pairing_list_shard}\n WHERE end_date IS NULL\n AND imei_norm = network_triplets.imei_norm\n AND imsi = network_triplets.imsi)\n AND imei_norm IS NOT NULL\n AND imsi IS NOT NULL\n AND msisdn IS NOT NULL\n AND last_seen >= %(lookback_start_date)s\n AND first_seen < %(lookback_end_date)s\n AND network_triplets.virt_imei_shard >= %(virt_imei_range_start)s\n AND network_triplets.virt_imei_shard < %(virt_imei_range_end)s\n GROUP BY imei_norm,\n imsi,\n msisdn\n \"\"\").format(notifications_triplets_shard=sql.Identifier(notifications_triplets_shard), # noqa: Q447\n notifications_imeis_shard=sql.Identifier(notifications_imeis_shard),\n pairing_list_shard=sql.Identifier(pairing_list_shard),\n home_network_query=self._home_network_query)\n\n lookback_end_date = compute_analysis_end_date(conn, self._curr_date)\n lookback_start_date = lookback_end_date - datetime.timedelta(days=self._lookback_days)\n logger = logging.getLogger('dirbs.listgen')\n log_analysis_window(logger, lookback_start_date, lookback_end_date,\n start_message='Notifications lists using lookback window')\n cursor.execute(query, {'lookback_start_date': lookback_start_date,\n 'lookback_end_date': lookback_end_date,\n 'virt_imei_range_start': virt_imei_range_start,\n 'virt_imei_range_end': virt_imei_range_end})\n num_records = cursor.rowcount\n self._add_pk(conn, tblname=notifications_triplets_shard, pk_columns=['imei_norm', 'imsi', 'msisdn'])\n\n return num_records, cp.duration",
"def del_all_reservations(self):\n\n # locks the self.current_reservations data structure. This is done\n # because there is a thread that could access it concurrently.\n with self.update_lock:\n pass\n # PART 1, TASK 4.2 remove all the reservations ",
"def deploy_routing_table(self):\n for subnet, entry in self.tbl.items():\n if entry.neighbor_port:\n self.deploy_flow_entry(subnet=subnet, outport=entry.receive_port, dstport=entry.neighbor_port)",
"def table_allocations(\n self, id_value, id_type, start_date=None, end_date=None, freq=None\n ):\n\n start_date, end_date, freq = self.get_time_parameters(\n start_date, end_date, freq\n )\n\n try:\n df = self.get_allocations(id_value, id_type, start_date, end_date, freq)\n\n if id_type == \"project\" and \"ALL\" not in str(id_value):\n # add the project's missing people allocation\n if freq == \"D\":\n df[\"UNALLOCATED\"] = self.wim.project_peoplereq[id_value]\n else:\n df[\"UNALLOCATED\"] = (\n self.wim.project_peoplereq[id_value].resample(freq).mean()\n )\n\n elif id_type == \"person\" and \"ALL\" not in str(id_value):\n # add the person's total project assignment to the data frame\n if freq == \"D\":\n df[\"TOTAL\"] = self.wim.people_totals[id_value]\n else:\n df[\"TOTAL\"] = self.wim.people_totals[id_value].resample(freq).mean()\n\n df = self.format_date_index(df, freq)\n\n return self.highlight_allocations(df)\n\n except ValueError as e:\n print(e)",
"def createGRSAZ(gwtable, inputsubnets, Routetargets):\n ec2 = boto3.client(\"ec2\")\n elb = boto3.client('elb')\n\n #clean the inputsubnets\n vpcid = elb.describe_load_balancers(LoadBalancerNames=[elbname])['LoadBalancerDescriptions'][0]['VPCId']\n subnetsvpc = ec2.describe_subnets(Filters=[{'Name': \"vpc-id\", 'Values': [vpcid]}])\n notrealsubnets = set(inputsubnets)-set([s['SubnetId'] for s in subnetsvpc['Subnets']])\n if len(notrealsubnets) > 0:\n print('the following are not real subnets in your VPC: ', notrealsubnets)\n cleaninputsubnets = list(set(inputsubnets) - notrealsubnets)\n\n #find all the routing tables already associated with any healthy gws and their associated subnets \n rt2 = ec2.describe_route_tables(Filters=[{'Name': 'association.subnet-id', 'Values': cleaninputsubnets}])\n #disassociate subnets from RTs if used by gateway ...later\n\n M = []\n for r in rt2['RouteTables']:\n if set(Routetargets) <= set([rr['DestinationCidrBlock'] for rr in r['Routes'] if 'InstanceId' in rr.keys() and rr['InstanceId'] in [g[0] for g in gwtable if g[1] == 'InService']]):\n for s in [ass for ass in r['Associations'] if ass['SubnetId'] in cleaninputsubnets]:\n goodinstance = [rr['InstanceId'] for rr in r['Routes'] if 'InstanceId' in rr.keys() and rr['InstanceId'] in [g[0] for g in gwtable if g[1] == 'InService']].pop()\n M.append(tuple([goodinstance,\n r['RouteTableId'],\n s['SubnetId'],\n 1]))\n\n # add route tables that have the routes but no live GWs with index 2....we'll reuse these RTs and routes\n elif set(Routetargets) <= set([rr['DestinationCidrBlock'] for rr in r['Routes']]):\n for s in r['Associations']:\n M.append(tuple(['NoGW',\n r['RouteTableId'],\n s['SubnetId'],\n 2]))\n\n #add new RTs for any subnets that are not in the table. mark the GWs as NoGW and index at 3 so that we know that we need to add new routes\n subnets1 = ec2.describe_subnets(Filters=[{'Name': \"subnet-id\", 'Values': list(set([m[2] for m in M]) | set(cleaninputsubnets))}])\n subnets2 = {s['SubnetId']: s for s in subnets1['Subnets']}\n for sub in cleaninputsubnets:\n if not (sub in [m[2] for m in M]):\n if subnets2[sub]['VpcId'] == vpcid:\n rass = []\n for rt in rt2['RouteTables']:\n for ass in rt['Associations']:\n if ass['SubnetId'] == sub:\n rass.append(ass['RouteTableAssociationId'])\n if len(rass) > 0:\n ec2.disassociate_route_table(AssociationId=rass.pop())\n print('removed RT association from subnet ', sub)\n RTforS = ec2.create_route_table(VpcId=vpcid)['RouteTable']['RouteTableId']\n ec2.associate_route_table(SubnetId=sub, RouteTableId=RTforS)\n print('created route table ', RTforS, ' and associated it with subnet ', sub)\n M.append(tuple(['NoGW', RTforS, sub, 3]))\n else:\n print('Subnet ', sub, ' is in VPC ', subnets2[sub]['VpcId'], ' which is not in the same vpc as your gateways: (', vpcid, '). Ignoring!')\n \n # Convert to a list and add AZ info into table\n MM = [list(n) for n in set(M)]\n for r in MM:\n r.insert(3, subnets2[r[2]]['AvailabilityZone'])\n\n return MM",
"def array_update(self, table_list):\r\n for tbl in table_list:\r\n x = kit.SQL_pull('name, subject_id', tbl)\r\n r = {i[0]: i[1] for i in x}\r\n h = {i[1]: tbl for i in x}\r\n \r\n self.refference.update(r)\r\n self.home_table.update(h)\r\n \r\n self.counts[tbl] = len(x)"
] |
[
"0.524511",
"0.5159683",
"0.5064488",
"0.5003374",
"0.4980253",
"0.4976541",
"0.49230257",
"0.49073362",
"0.4881262",
"0.486306",
"0.48597613",
"0.48488852",
"0.48237342",
"0.47735462",
"0.47349444",
"0.4712839",
"0.47052684",
"0.46999648",
"0.4630769",
"0.46169844",
"0.46065456",
"0.46051174",
"0.4585807",
"0.45850605",
"0.45788246",
"0.45783818",
"0.45652613",
"0.45648518",
"0.45502213",
"0.45331824"
] |
0.8001009
|
0
|
Initializing the form. We have to break down the skills list that was sent so that we can indicate what the valid choices are.
|
def __init__(self, *args, **kwargs):
# do pop first, so the parent doesn't get unexpected arguments.
skills = kwargs.pop('skills')
super(CharacterSkillForm, self).__init__(*args, **kwargs)
self.fields['skills'].choices = \
[(s.id, s.skill.name)
for header in skills
for s in header.headerskill_set.all()]
self.fields['headers'].choices = \
[(h.id, h.name) for h in skills]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_initial(self):\n initial = super(SkillsView, self).get_initial()\n ai = get_ai(\n self.request.session.get('token', False),\n self.kwargs['aiid']\n )\n initial = {\n 'skills': ai['linked_bots']\n }\n return initial",
"def __init__(self,\n quiz_size_slug=Quiz.DEFAULT_QUIZ_SIZE_SLUG,\n *args, **kwargs):\n super(QuizForm, self).__init__(*args, **kwargs)\n quiz_json = QuizJson()\n question_count = Quiz.get_question_count_for_slug(quiz_size_slug)\n self.question_count = question_count\n\n for question_no in range(0, question_count):\n question_no_str = str(question_no)\n question_no_2_chars = question_no_str.zfill(2)\n question_key = 'question_' + question_no_2_chars\n form_question_no_str = str(question_no + 1)\n question_text = quiz_json.get_question_text(question_no)\n label = form_question_no_str + '. ' + question_text\n radio_widget = forms.RadioSelect(attrs={'class': 'quiz_answer'})\n choices = quiz_json.get_choices(question_no)\n self.fields[question_key] = forms.ChoiceField(\n widget=radio_widget, label=label, choices=choices\n )",
"def _create_and_initialise_fields(self):\n for team in self.category.breaking_teams.all():\n self.fields[self._fieldname_remark(team)] = OptionalChoiceField(choices=BreakingTeam.REMARK_CHOICES, required=False)\n try:\n self.initial[self._fieldname_remark(team)] = self._bt(team).remark\n except KeyError:\n self.initial[self._fieldname_remark(team)] = None",
"def __init__(self):\n self.model = self.load_model()\n self.form_html = self.create_form_html()",
"def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'team_name': 'Team name',\n 'planning_deadline': 'planning_deadline',\n 'coaching_rep': 'coaching_rep',\n 'min_lunchbreak': 'min_lunchbreak',\n 'min_dinnerbreak': 'min_dinnerbreak',\n 'min_paidbreak': 'min_paidbreak'\n }\n\n for field in self.fields:\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'profile-form-input'\n self.fields[field].label = False",
"def __init__(self, *args, **kwargs):\n user = kwargs.pop('user')\n super(ChooseAppointmentForm, self).__init__(*args, **kwargs)\n if(user.first_name=='patient'):\n self.appointments = user.patient_appointment.all()\n appointment_partner = 'doctor' # patient is partnered with a doctor and vice versa\n else:\n self.appointments = user.doctor_appointment.all()\n appointment_partner = 'patient'\n choices = []\n\n for i, appointment in enumerate(self.appointments):\n partner_first_name = appointment.associated_patient.patient_user_profile.first_name if (appointment_partner=='patient') else appointment.associated_doctor.doctor_user_profile.first_name\n partner_last_name = appointment.associated_patient.patient_user_profile.last_name if (appointment_partner=='patient') else appointment.associated_doctor.doctor_user_profile.last_name\n choices.append((appointment, 'Appointment: {}, on {}, at {} with {} {}'\n .format(appointment.title, appointment.date, appointment.time, partner_first_name, partner_last_name)))\n\n self.fields['appointments'] = forms.ChoiceField(label=\"\", choices=choices, widget=forms.RadioSelect)",
"def make_form(self):",
"def get_initial(self, **kwargs):\n\n if not self.initial:\n # Get an intent if not prepared yet (get_form_kwargs) is calling\n # get_initial we don't want to run it multiple times\n intent = get_intent(\n self.request.session.get('token', False),\n self.kwargs['aiid'],\n self.kwargs['intent_name']\n )\n\n # Prepare data for the form\n intent['webhook'] = '' if intent['webhook'] is None else intent['webhook']['endpoint']\n intent['responses'] = settings.TOKENFIELD_DELIMITER.join(intent['responses'])\n intent['user_says'] = settings.TOKENFIELD_DELIMITER.join(intent['user_says'])\n\n intent['context_in'] = [\n {'variable': key, 'value': value} for key, value in intent['context_in'].items()\n ]\n\n intent['context_out'] = [\n {'variable': key, 'value': value} for key, value in intent['context_out'].items()\n ]\n\n for entity in intent['variables']:\n entity['prompts'] = settings.TOKENFIELD_DELIMITER.join(\n entity['prompts']\n )\n # If field is -1 (limitless lifetime) we leave it empty in UI\n turns = entity.get('lifetime_turns', -1)\n entity['lifetime_turns'] = '' if turns == -1 else turns\n\n self.initial = intent\n\n return super(IntentsUpdateView, self).get_initial(**kwargs)",
"def _async_init_form(self) -> FlowResult:\n\n apps_list = {k: f\"{v} ({k})\" if v else k for k, v in self._apps.items()}\n apps = [SelectOptionDict(value=APPS_NEW_ID, label=\"Add new\")] + [\n SelectOptionDict(value=k, label=v) for k, v in apps_list.items()\n ]\n rules = [RULES_NEW_ID] + list(self._state_det_rules)\n options = self.options\n\n data_schema = vol.Schema(\n {\n vol.Optional(CONF_APPS): SelectSelector(\n SelectSelectorConfig(options=apps, mode=SelectSelectorMode.DROPDOWN)\n ),\n vol.Optional(\n CONF_GET_SOURCES,\n default=options.get(CONF_GET_SOURCES, DEFAULT_GET_SOURCES),\n ): bool,\n vol.Optional(\n CONF_EXCLUDE_UNNAMED_APPS,\n default=options.get(\n CONF_EXCLUDE_UNNAMED_APPS, DEFAULT_EXCLUDE_UNNAMED_APPS\n ),\n ): bool,\n vol.Optional(\n CONF_SCREENCAP,\n default=options.get(CONF_SCREENCAP, DEFAULT_SCREENCAP),\n ): bool,\n vol.Optional(\n CONF_TURN_OFF_COMMAND,\n description={\n \"suggested_value\": options.get(CONF_TURN_OFF_COMMAND, \"\")\n },\n ): str,\n vol.Optional(\n CONF_TURN_ON_COMMAND,\n description={\n \"suggested_value\": options.get(CONF_TURN_ON_COMMAND, \"\")\n },\n ): str,\n vol.Optional(CONF_STATE_DETECTION_RULES): SelectSelector(\n SelectSelectorConfig(\n options=rules, mode=SelectSelectorMode.DROPDOWN\n )\n ),\n }\n )\n\n return self.async_show_form(step_id=\"init\", data_schema=data_schema)",
"def __initSkills(self):\n skills = self.teamparser.getPlayerSkills()\n try:\n skills = skills[(self.team, self.position)] #initial skills\n except KeyError, err:\n skills = []\n raise TypeError, \"Invalid Team/Position: \" + self.team\n for skill in skills:\n skobj = pyBBSkill.BBSkill(skill, self.skillparser)\n self.skills.append(skobj)",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'business_name': 'Please enter your business name',\n 'user_type': 'Please select the type of user',\n 'phone': 'Phone Number',\n 'postcode': 'Postcode',\n 'city': 'City',\n 'street_address': 'Street Address',\n 'street_address2': 'Street Address 2',\n 'county': 'County',\n 'country': 'Country'\n }\n\n # to force cursor to start in business name field\n self.fields['business_name'].widget.attrs['autofocus'] = True\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = False\n self.fields[field].label = placeholder\n self.fields[field].widget.attrs['class'] = 'form-control'",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'role_name': 'Role name',\n 'role_color': 'role color',\n }\n\n for field in self.fields:\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'profile-form-input'\n self.fields[field].label = False",
"def __init__(self):\r\n self.skills = {}\r\n self.orderedSkills = []",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n \"first_name\": \"First Name\",\n \"last_name\": \"Last Name\",\n \"default_phone_num\": \"Phone Number\",\n \"default_passport_num\": \"Passport Number\",\n }\n\n self.fields[\"default_phone_num\"].widget.attrs[\"autofocus\"] = True\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs[\"placeholder\"] = placeholder\n self.fields[field].widget.attrs[\n \"class\"\n ] = \"border-black rounded-0 \\\n all-form-input\"\n self.fields[field].label = False\n self.helper = FormHelper()\n self.helper.form_tag = True\n self.helper.layout = Layout(\n Div(\n Field(\n \"first_name\",\n ),\n Field(\n \"last_name\",\n ),\n Field(\n \"default_phone_num\",\n ),\n Field(\n \"default_passport_num\",\n ),\n ),\n ButtonHolder(\n Submit(\"submit\", \"Save\", css_class=\"m-0 btn btn-outline\"),\n ),\n )",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'shift_name': 'Shift name',\n 'min_agents': 'Minimum Number of Agents',\n 'shift_start': 'Start time',\n 'shift_end': 'End time',\n 'weekday_sunday': 'Sunday',\n 'weekday_monday': 'Monday',\n 'weekday_tuesday': 'Tuesday',\n 'weekday_wednesday': 'Wednesday',\n 'weekday_thursday': 'Thursday',\n 'weekday_friday': 'Friday',\n 'weekday_saturday': 'Saturday'\n }\n\n for field in self.fields:\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'profile-form-input'\n self.fields[field].label = False",
"def __init__(self, *args, **kwargs):\n super(AddEventForm, self).__init__(*args)\n\n if kwargs.get('current_user') is not None:\n self.fields['speakers'].initial = kwargs.get('current_user')\n\n self.fields['speakers'].label_from_instance = self.label_from_instance",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['first_name'].required = False\n self.fields['last_name'].required = False\n self.fields['institution'].required = False\n self.fields['institution_logo'].required = False\n self.fields['allow_notifications'].required = False",
"def initialize_survey(self, **kwargs):",
"def get_form(self, form_class):\n form = super(InvitationCreateView, self).get_form(form_class)\n form.fields[\"receiver\"].queryset = self.get_available_invitees()\n form.fields[\"team\"].queryset = self.get_available_teams()\n form.fields[\"team\"].empty_label = None\n return form",
"def create_dummy_form(title,text,fill_choice=[],choice_length=[]):\n # fill it with blank for dummy choices\n count=0\n choices=[]\n while count < 8:\n choices.append(None)\n count+=1\n \n # fill choices based on value on fill_choice\n for i in fill_choice:\n try :\n length = choice_length[i]\n except IndexError :\n length = 10\n choices[i] = create_random_string(length)\n\n dummy_form=CreatePollQuestion(\n {\"question_title\":title,\n \"question_text\" :text,\n \"choice_1\":choices[0],\n \"choice_2\":choices[1],\n \"choice_3\":choices[2],\n \"choice_4\":choices[3],\n \"choice_5\":choices[4],\n \"choice_6\":choices[5],\n \"choice_7\":choices[6],\n \"choice_8\":choices[7],\n })\n\n return dummy_form",
"def get_form(self):\n # setup request layer\n self.request = TestRequest()\n # get add view\n form = getMultiAdapter((self.experiments, self.request),\n name=\"newSpeciesTraits\")\n # update the form once to initialise all widgets\n form.update()\n # go through all widgets on the form and update the request with default values\n data = {}\n for widget in chain(\n form.widgets.values(),\n # skip standard plone groups\n #chain.from_iterable(g.widgets.values() for g in form.groups),\n chain.from_iterable(g.widgets.values() for g in form.param_groups)):\n data[widget.name] = widget.value\n data.update({\n 'form.widgets.IDublinCore.title': u\"My ST Experiment\",\n 'form.widgets.IDublinCore.description': u'This is my experiment description',\n 'form.widgets.algorithm': [self.algorithm.UID()],\n 'form.widgets.formula': u'Z ~ X + Y',\n 'form.widgets.data_table': [unicode(self.traitsds.UID())]\n })\n self.request.form.update(data)\n form = getMultiAdapter((self.experiments, self.request),\n name=\"newSpeciesTraits\")\n return form",
"def __init__(self, *args, **kwargs):\n if 'instance' in kwargs:\n initial = kwargs.setdefault('initial', {})\n # The widget for a ModelMultipleChoiceField expects a list of primary key for the selected data.\n initial['members'] = [\n t.pk for t in kwargs['instance'].recipient_set.all()\n ]\n\n forms.ModelForm.__init__(self, *args, **kwargs)",
"def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')",
"def individual_formfields():\n # Instantiate Consent Tracker\n consent = s3db.auth_Consent(processing_types = VOL_CONSENT_OPTIONS)\n\n formfields = [utable.first_name,\n utable.last_name,\n Field(\"addr_L3\",\n label = T(\"Location\"),\n requires = IS_IN_SET(districts_and_uk),\n ),\n Field(\"addr_street\",\n label = T(\"Street Address\"),\n ),\n Field(\"addr_postcode\",\n label = T(\"Postcode\"),\n ),\n Field(\"mobile\",\n label = T(\"Contact Number (Preferred)\"),\n requires = IS_PHONE_NUMBER_MULTI(),\n comment = DIV(_class = \"tooltip\",\n _title = \"%s|%s\" % (T(\"Contact Number (Preferred)\"),\n T(\"Ideally a Mobile Number, so that we can send you Text Messages.\")),\n ),\n ),\n Field(\"home\",\n label = T(\"Contact Number (Secondary)\"),\n requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI()),\n ),\n utable.email,\n utable[passfield],\n # Password Verification Field\n Field(\"password_two\", \"password\",\n label = auth_messages.verify_password,\n requires = IS_EXPR(\"value==%s\" % \\\n repr(request.vars.get(passfield)),\n error_message = auth_messages.mismatched_password,\n ),\n ),\n\n # Skills\n s3db.hrm_multi_skill_id(empty = False,\n label = T(\"Volunteer Offer\"),\n ),\n Field(\"skills_details\",\n label = T(\"Please specify details\"),\n ),\n Field(\"certificates\", \"list:string\",\n label = T(\"Qualifications\"),\n requires = IS_IN_SET(certificates, multiple=True),\n widget = S3MultiSelectWidget(header=\"\",\n selectedList=3),\n ),\n Field(\"experience\",\n label = T(\"Skills and Experience\"),\n widget = lambda f, v: \\\n s3_comments_widget(f, v, _placeholder = \"e.g. Co-ordination, Event Management, PCV qualified.\")\n ),\n Field(\"resources\",\n label = T(\"Offers of Resources\"),\n widget = lambda f, v: \\\n s3_comments_widget(f, v, _placeholder = \"e.g. Minibus.\")\n ),\n Field(\"where_operate\", \"list:string\",\n label = T(\"Where would you be willing to volunteer?\"),\n requires = IS_IN_SET(districts, multiple=True),\n widget = S3MultiSelectWidget(header=\"\",\n selectedList=3),\n ),\n Field(\"travel\", \"integer\",\n label = T(\"Willing to Travel?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"slots\", \"list:string\",\n label = T(\"Times\"),\n requires = IS_IN_SET(slots, multiple=True),\n widget = S3MultiSelectWidget(header=\"\",\n selectedList=3),\n ),\n Field(\"significant_physical\", \"integer\",\n label = T(\"That require significant physical activity (including lifting and carrying) and may involve being outdoors (e.g. clean up of affected properties)\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"some_physical\", \"integer\",\n label = T(\"That require some physical activity and may involve being outdoors (e.g. door knocking)\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"little_physical\", \"integer\",\n label = T(\"That require little physical activity and are based indoors (e.g. preparing refreshments)\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"health_details\",\n label = T(\"If you wish, you can give us some further information on any fitness, medical or mobility issues that might limit the kind of activities you are able to volunteer for; this will help us to suggest suitable opportunities for you\"),\n ),\n Field(\"faith_requirements\", \"integer\",\n label = T(\"Do you have any faith requirements that you would like help with if you are coming to Support Cumbria?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"faith_requirements_details\",\n label = T(\"If Yes please outline\"),\n ),\n Field(\"emergency_contact_name\",\n label = T(\"Contact Name\"),\n requires = IS_NOT_EMPTY(),\n ),\n Field(\"emergency_contact_number\",\n label = T(\"Contact Number\"),\n requires = IS_PHONE_NUMBER_MULTI(),\n ),\n Field(\"emergency_contact_relationship\",\n label = T(\"Relationship\"),\n requires = IS_NOT_EMPTY(),\n ),\n Field(\"workplace\", \"integer\",\n label = T(\"Are you volunteering under your workplace volunteering scheme?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"workplace_details\",\n label = T(\"If yes please name your employer\"),\n ),\n Field(\"dbs\", \"integer\",\n label = T(\"Are you DBS checked?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n #Field(\"convictions\", \"integer\",\n # label = T(\"Do you have any unspent convictions?\"),\n # comment = T(\"Please tick 'Yes' if you have any convictions that are not yet spent under the Rehabilitation of Offenders Act 1974. The term 'convictions' is used to refer to any sentence or disposal issued by a court. If all your convictions are spent, you can tick 'No'. If you're not sure if your convictions are unspent or spent, you can use a tool available at www.disclosurecalculator.org.uk and read guidance at hub.unlock.org.uk/roa\"),\n # requires = IS_IN_SET({0: T(\"No\"),\n # 1: T(\"Yes\"),\n # }),\n # widget = lambda f, v: \\\n # SQLFORM.widgets.radio.widget(f, v,\n # style=\"divs\"),\n # ),\n # Consent (GDPR + FOC)\n Field(\"consent\",\n label = T(\"Consent\"),\n widget = consent.widget,\n ),\n ]\n\n required_fields = [\"first_name\",\n \"last_name\",\n \"addr_L3\",\n \"addr_street\",\n \"addr_postcode\",\n \"mobile\",\n \"emergency_contact\",\n \"where_operate\",\n ]\n\n return formfields, required_fields",
"def __init__(self, choices, *args, **kwargs):\n super(RangePollChoiceForm, self).__init__(*args, **kwargs)\n nominees = [(i, '%d' % i) for i in range(0, choices.count()+1)]\n for choice in choices:\n self.fields['range_poll__%s' % str(choice.id)] = (\n forms.ChoiceField(widget=forms.Select(),\n choices=nominees,\n label=choice.nominee.get_full_name()))",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'company_name': 'Company name',\n 'street_address1': 'Street Address 1',\n 'street_address2': 'Street Address 2',\n 'country': 'Country or State',\n 'postcode': 'Postcode',\n 'town_or_city': 'Town or City',\n 'payment': 'Paid for number of months',\n 'setting_daystart': 'Hour when your day starts',\n 'setting_dayend': 'hour when your day ends'\n }\n\n for field in self.fields:\n if field != 'country':\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n if field == 'setting_daystart' or field == 'setting_dayend' or field == 'payment':\n self.fields[field].widget.attrs['class'] = 'width-numbers'\n else:\n self.fields[field].widget.attrs['class'] = 'profile-form-input'\n self.fields[field].label = placeholder",
"def initcomponentform():\n form = ComponentsForm()\n form.name.value = []\n form.id.value = []\n form.currentstock.value = []\n form.reorderlevel.value = []\n form.unitprice.value = []\n form.supplier.value = []\n form.location.value = []\n form.datasheet.value = []\n return form",
"def __init__(self, *args, **kwargs):\n super(HiddenModelObjectInputForm, self).__init__(*args, **kwargs)\n self.fields['model'].choices = get_registered_models(\n ignore=IGNORED_MODELS\n )",
"def configure_list_of_choices_type_question(self, question_data):\n self.driver.find_radio_button(LIST_OF_CHOICE_RB).click()\n index = 1\n for choice in fetch_(CHOICE, from_(question_data)):\n if index > 1:\n self.driver.find(ADD_CHOICE_LINK).click()\n self.driver.find_text_box(by_xpath(CHOICE_XPATH_LOCATOR + \"[\" + str(index) + \"]\" + CHOICE_TB_XPATH_LOCATOR)).enter_text(choice)\n index += 1\n choice_type = fetch_(ALLOWED_CHOICE, from_(question_data))\n if ONLY_ONE_ANSWER == choice_type:\n self.driver.find_radio_button(ONLY_ONE_ANSWER_RB).click()\n elif MULTIPLE_ANSWERS == choice_type:\n self.driver.find_radio_button(MULTIPLE_ANSWER_RB).click()\n return self"
] |
[
"0.6526162",
"0.63097054",
"0.58881307",
"0.58718127",
"0.584773",
"0.57678336",
"0.57535213",
"0.5733922",
"0.5721101",
"0.5705405",
"0.5692461",
"0.56911916",
"0.5627025",
"0.559385",
"0.5533806",
"0.55180943",
"0.55096644",
"0.5508936",
"0.5490525",
"0.54798377",
"0.54447186",
"0.5436644",
"0.54342115",
"0.5433006",
"0.5425599",
"0.53930885",
"0.53740203",
"0.53706735",
"0.5367072",
"0.53578484"
] |
0.76746976
|
0
|
The size of the world communicator.
|
def world_size(self):
return self._wsize
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def world_size() -> int:\n return dist.get_world_size() if dist.is_initialized() else 1",
"def get_world_size() -> int:\n return collective.get_world_size()",
"def size():\n return int(os.environ['WORLD_SIZE'])",
"def get_world_size():\n if not torch.distributed.is_available():\n return 1\n if not torch.distributed.is_initialized():\n return 1\n return torch.distributed.get_world_size()",
"def get_world_size(backend) -> int:\n if backend != 'mpi':\n return int(os.environ.get('WORLD_SIZE', 1))\n else:\n return int(os.environ.get('OMPI_COMM_WORLD_SIZE', 1))",
"def world_size(self):\n if self.data_section is None:\n return None\n attrs = self.data_section.attrs\n if bool(attrs)==False:\n return None\n return attrs.get('world_size', None)",
"def getSize(self):\n\n return self.size",
"def getSize(self):\r\n return self.size",
"def get_size(self):\n return self.size",
"def get_size(self):\n return self.size",
"def get_size(self):\n return self.size",
"def size(self) -> int:\n return self._status['party_size'][0]",
"def getSize(self):\n return self.size",
"def size(self):\n return self.size_number",
"def size(self):\r\n return self.info().size",
"def get_size(self):\r\n return self._size",
"def getSize(self):\n return self.__size",
"def get_size(self):\r\n return self.__size",
"def length(self):\n return self.size",
"def get_size(self):\r\n\r\n return self._size",
"def get_size(self):\n return self.__size",
"def get_size(self):\n return self.__size",
"def get_size(self):\n return self._size",
"def get_size(self):\n return self._size",
"def size(self):\n return self.properties.get('size')",
"def size(self):\n return self.size",
"def size(self):\n return self.size",
"def size(self):\n return self.size",
"def __get_size(self):\n return self.__size",
"def size(self):\n\t\treturn self._size"
] |
[
"0.8183484",
"0.8161533",
"0.7968778",
"0.7838551",
"0.7544292",
"0.75160927",
"0.7281078",
"0.72509223",
"0.7210917",
"0.7210917",
"0.7210917",
"0.7210812",
"0.72054684",
"0.7197497",
"0.71910495",
"0.71712273",
"0.7170887",
"0.7160263",
"0.7158934",
"0.71567965",
"0.7153045",
"0.7153045",
"0.715248",
"0.715248",
"0.7143407",
"0.7130119",
"0.7130119",
"0.7130119",
"0.7125097",
"0.7114506"
] |
0.81939703
|
0
|
The rank of this process in the world communicator.
|
def world_rank(self):
return self._wrank
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_rank(self) -> int:\n return dist.get_rank()",
"def rank(self):\n return self._rank",
"def rank(self):\n return self._rank",
"def rank(self):\n return self._rank",
"def rank(self):\n return self._rank",
"def rank(self):\n return self._rank",
"def getRank(self):\n return self._rank",
"def comm_rank(self):\n return self._rcomm",
"def rank(self):\n return self.lib.calculate_rank()",
"def get_rank(self):\n return self.__rank",
"def getRank(self):\n return self.rank",
"def rank(self) -> int:\n return self._rank",
"def rank(self):\n if self._rank is None:\n self._rank = self.prufer_rank()\n return self._rank",
"def mpi_rank():\n comm = mpi4py.MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n return rank",
"def get_rank(self):\n return self.rank",
"def getRank(self):\r\n return self.rank",
"def get_rank(self):\n return int(self._rank)",
"def get_rank(self) -> int:\r\n return self.rank",
"def get_rank(self):\r\n return self.rank",
"def get_rank():\n if not torch.distributed.is_available():\n return 0\n if not torch.distributed.is_initialized():\n return 0\n return torch.distributed.get_rank()",
"def rank() -> int:\n return dist.get_rank() if dist.is_initialized() else 0",
"def mpi_rank(self):\n return 0",
"def get_rank(self):\n \n if self.rank == None:\n self.rank = self.main_ranker(self.string)\n \n return self.rank",
"def local_rank():\n return int(os.environ['LOCAL_RANK'])",
"def get_global_rank(backend) -> int:\n if backend != 'mpi':\n return int(os.environ.get('RANK', 0))\n else:\n return int(os.environ.get('OMPI_COMM_WORLD_RANK', 0))",
"def rank():\n return int(os.environ['RANK'])",
"def rank(self):\n return self.matrix().rank()",
"def spatial_rank(self) -> int:\n return self.shape.spatial.rank",
"def rank(self) -> tskit.Rank:\n return combinatorics.RankTree.from_tsk_tree(self).rank()",
"def get_rank() -> int:\n return collective.get_rank()"
] |
[
"0.78776574",
"0.7876336",
"0.7876336",
"0.7876336",
"0.7876336",
"0.7876336",
"0.78501487",
"0.78450763",
"0.7805618",
"0.7785066",
"0.7780536",
"0.7763916",
"0.77480894",
"0.7724303",
"0.7718263",
"0.771391",
"0.76907665",
"0.7675124",
"0.76343757",
"0.76057",
"0.7599256",
"0.75333965",
"0.7483848",
"0.74757785",
"0.740707",
"0.7390237",
"0.7270975",
"0.713765",
"0.711248",
"0.7055807"
] |
0.801047
|
0
|
The number of process groups.
|
def ngroups(self):
return self._ngroups
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def num_node_groups(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"num_node_groups\")",
"def num_node_groups(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_node_groups\")",
"def num_node_groups(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_node_groups\")",
"def getNumGroups(self):\n return len(np.unique(self._group_index))",
"def numprocesses(self):\n info = self.info()\n return info['max_processes']",
"def getNumEnergyGroups(self):\n return self.lib.numGroups",
"def getNumGroups(self):\n return _libsbml.ListOfGroups_getNumGroups(self)",
"def processes(self):\n return self._getint('processes')",
"def group_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"group_count\")",
"def getNumGroups(self):\n return _libsbml.GroupsModelPlugin_getNumGroups(self)",
"def num_processes(self):\n return 1",
"def num_processes(self, new_value):",
"def queue_size(self):\n return len(self.groups)",
"def _n_workers(self, processes: int = 2) -> int:\n if 2 <= processes <= cpu_count():\n n_workers = processes\n else:\n n_workers = cpu_count()\n return n_workers",
"def num_processes():\n return 1",
"def getNumMembers(self):\n return _libsbml.Group_getNumMembers(self)",
"def numRunningProcesses():\n try:\n proc = subprocess.run(\"ps -Af|grep -i \\\"online2-wav-nnet3-latgen-faster\\\"\", stdout=subprocess.PIPE, shell=True)\n np = (len(str(proc.stdout).split(\"\\\\n\")) - 3)\n if(np == None):\n np = 0\n return np\n except Exception as e:\n\t\t Tools.writeException(\"numRunningProcesses\", e)\n return -1",
"def group_size(self):\n return self._gsize",
"def getGroupSize(Id):\r\n return \"Number of groups\"",
"def num_output_group(self):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n out = ctypes.c_size_t()\n _check_call(_LIB.TreeliteQueryNumOutputGroups(self.handle, ctypes.byref(out)))\n return out.value",
"def get_num_instances(self):\n return len( self.get_instances_ids() )",
"def max_individuals(self) -> int:\n return self.group_size.upper * self.groups_allowed",
"def get_ncores(self):\n return self._ncores",
"def get_numpins(self):\n return self.numpins",
"def get_n_workers(self):\n return self.df.worker.nunique()",
"def __len__(self):\n return len(self.group_list)",
"def num_pipes(self):\n return len(self._link_reg.pipe_names)",
"def number_of_nodes(self):\n return int(self._data['number_of_nodes'])",
"def number_of_launches(self):\n return self._number_of_launches",
"def get_num_of_containers(self):\n Container.num_of_cntnrs = len(Container.containers)\n return self.num_of_cntnrs"
] |
[
"0.81979567",
"0.7825811",
"0.7825811",
"0.7593607",
"0.74162036",
"0.74027264",
"0.73958987",
"0.729504",
"0.7193732",
"0.71907777",
"0.7045195",
"0.7004456",
"0.69894195",
"0.69244087",
"0.68316674",
"0.678754",
"0.67854625",
"0.67679214",
"0.66554826",
"0.6641968",
"0.6623282",
"0.646443",
"0.6421052",
"0.6419943",
"0.6410801",
"0.6405248",
"0.6388438",
"0.6371426",
"0.6361861",
"0.6348719"
] |
0.83494246
|
0
|
The size of the group containing this process.
|
def group_size(self):
return self._gsize
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def queue_size(self):\n return len(self.groups)",
"def group_size_dist(self):\n return self.group_sizes() / self.group_sizes().sum()",
"def getGroupSize(Id):\r\n return \"Number of groups\"",
"def size(self):\n return self.size_number",
"def group_sizes(self):\n return self.g_sizes",
"def Laplace_group_size(self):\n return self.Size_integ_group",
"def size(self) -> int:\n return self.stat().size",
"def get_size(self):\n return get_dir_size(self.run_dir)",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\n return self._size",
"def size(self):\r\n return self.info().size",
"def get_size(self):\r\n\r\n return self._size",
"def get_size(self):\n return self._size",
"def get_size(self):\n return self._size"
] |
[
"0.7482762",
"0.7292368",
"0.7252224",
"0.7235323",
"0.72331035",
"0.71471894",
"0.7062642",
"0.6934623",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68790656",
"0.68775505",
"0.6861347",
"0.6858437",
"0.6858437"
] |
0.8637419
|
0
|
Distribute indivisible blocks of items between groups. Given some contiguous blocks of items which cannot be subdivided, distribute these blocks to the specified number of groups in a way which minimizes the maximum total items given to any group. Optionally weight the blocks by a power of their size when computing the distribution.
|
def distribute_discrete(sizes, groups, pow=1.0):
chunks = np.array(sizes, dtype=np.int64)
weights = np.power(chunks.astype(np.float64), pow)
max_per_proc = float(distribute_partition(weights.astype(np.int64), groups))
target = np.sum(weights) / groups
dist = []
off = 0
curweight = 0.0
proc = 0
for cur in range(0, weights.shape[0]):
if curweight + weights[cur] > max_per_proc:
dist.append( (off, cur-off) )
over = curweight - target
curweight = weights[cur] + over
off = cur
proc += 1
else:
curweight += weights[cur]
dist.append( (off, weights.shape[0]-off) )
if len(dist) != groups:
raise RuntimeError("Number of distributed groups different than number requested")
return dist
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def distribute_uniform(totalsize, groups):\n ret = []\n for i in range(groups):\n myn = totalsize // groups\n off = 0\n leftover = totalsize % groups\n if ( i < leftover ):\n myn = myn + 1\n off = i * myn\n else:\n off = ((myn + 1) * leftover) + (myn * (i - leftover))\n ret.append( (off, myn) )\n return ret",
"def divide_into_groups(items, max_group_size):\n if max_group_size <= 0:\n raise ValueError('maximum group size must be greater than 0')\n\n items_copy = deepcopy(items)\n groups = []\n\n while len(items_copy) > 0:\n\n # size of current group is either max number of items or\n # number of remaining items, whichever is smaller\n group_size = min(len(items_copy), max_group_size)\n\n # save the items for the current group\n new_group = items_copy[:group_size]\n groups.append(new_group)\n\n # remove the items from the list\n del items_copy[:group_size]\n\n return groups",
"def grouper(iterable, n):\n it = iter(iterable)\n while True:\n chunk = tuple(islice(it, n))\n if not chunk:\n return\n yield chunk",
"def chunker(results, n):\n\n def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\n m = int(len(results) / n)\n return list(grouper(iterable=results, n=m, fillvalue=None))",
"def grouper(n, iterable):\n\tit = iter(iterable)\n\twhile True:\n\t\tchunk = tuple(itertools.islice(it, n))\n\t\tif not chunk:\n\t\t\treturn\n\t\tyield chunk",
"def split(\n items: typing.List[typing.Any],\n sizes: typing.List[float],\n random_state: int = 42,\n stratify: typing.Sequence[typing.Hashable] = None,\n group: typing.Sequence[typing.Hashable] = None,\n preserve: typing.Sequence[typing.Optional[int]] = None,\n) -> typing.Sequence[typing.Any]:\n splits: typing.List[typing.List[typing.Any]] = [[] for _ in range(len(sizes))]\n if group is None:\n group = list(range(len(items)))\n if stratify is None:\n stratify = [0] * len(items)\n if preserve is not None:\n assert len(items) == len(\n preserve\n ), \"When preserve is provided, it must be the same length as items.\"\n for item, preserveIdx in zip(items, preserve):\n if preserveIdx is not None:\n splits[preserveIdx].append(item)\n ideal_counts = [s * len(items) for s in sizes]\n items, stratify, group = [\n [\n entry\n for entry, preserveIdx in zip(current_list, preserve)\n if preserveIdx is None\n ]\n for current_list in [items, stratify, group]\n ]\n if len(items) == 0:\n # There's nothing left to split.\n return splits\n # Rebalance sizes so that we shuffle the remaining\n # items into the splits to try and match the originally\n # desired sizes.\n offsets = [\n max(target - len(split), 0) for split, target in zip(splits, ideal_counts)\n ]\n sizes = [offset / sum(offsets) for offset in offsets]\n assert (\n 0.99 < sum(sizes) < 1.01\n ), f\"The sizes must add up to 1.0 (they added up to {sum(sizes)}).\"\n assert len(group) == len(items), \"group must be the same length as the collection.\"\n assert len(stratify) == len(\n items\n ), \"stratify must be the same length as the collection.\"\n rng = np.random.default_rng(seed=random_state)\n grouped = [\n {**dict(zip([\"idxs\", \"stratifiers\"], zip(*grouper))), \"group\": g}\n for g, grouper in groupby_unsorted(\n list(zip(range(len(stratify)), stratify)),\n key=lambda v: typing.cast(typing.Sequence[typing.Hashable], group)[v[0]],\n )\n ]\n hashes = {\n h: list(g)\n for h, g in groupby_unsorted(\n grouped, key=lambda g: hash(tuple(set(g[\"stratifiers\"])))\n )\n }\n for subgroups in hashes.values():\n for a, u in zip(\n rng.choice(len(sizes), size=len(subgroups), p=sizes),\n subgroups,\n ):\n splits[a].extend(items[idx] for idx in u[\"idxs\"])\n return splits",
"def grouper(n, iterable):\n it = iter(iterable)\n while True:\n chunk = tuple(itertools.islice(it, n))\n if not chunk:\n return\n yield chunk",
"async def split_large_groups_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n progress = ClickIndeterminate(\"Splitting large groups\")\n progress.start()\n splitting = True\n stmt = select(Group).options(selectinload(Group.items), selectinload(Group.children))\n while splitting:\n splitting = False\n result = await dbsession.execute(stmt)\n for group in result.scalars():\n if len(group.children) == 0:\n if len(group.items) > 120 and len(group.items) < 300: # noqa: PLR2004\n if split_by_year(config, dbsession, group):\n splitting = True\n else:\n split_by_similarity(dbsession, group)\n splitting = True\n elif len(group.items) >= 300: # noqa: PLR2004\n if split_by_attribute(dbsession, group, \"concepts\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"subjects\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"materials\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"techniques\"):\n splitting = True\n elif split_by_year(config, dbsession, group):\n splitting = True\n else:\n split_by_similarity(dbsession, group)\n splitting = True\n await dbsession.commit()\n progress.stop()",
"def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)",
"def blockify_chunks(chunks):\n acc = []\n size = 0\n for chunk, chunk_size in chunks:\n assert len(chunk) == CHUNK_SIZE\n assert len(acc) <= BLOCK_SIZE\n if len(acc) == BLOCK_SIZE:\n # Only the last chunk may be short.\n assert size == CHUNK_SIZE * BLOCK_SIZE\n yield acc, size\n acc = []\n size = 0\n acc.append(chunk)\n size += chunk_size\n assert acc\n yield acc, size",
"def split_large_groups(ctx):\n asyncio.run(split_large_groups_impl(ctx.obj[\"config\"]))",
"def chunks(item_list, n_items):\n for i in range(0, len(item_list), n_items):\n yield item_list[i : i + n_items]",
"def split_chunk_iter(chunk, sizes, neighbors, rng=None):\n assert len(chunk) > len(sizes), f\"{len(chunk)} !> {len(sizes)}\"\n if not isinstance(rng, random.Random):\n rng = random\n # start by drawing three random items\n splits = [[c] for c in rng.sample(list(chunk), len(sizes))]\n unused = set(chunk) - set(sum(splits, []))\n max_iters = max(sizes) * len(sizes) # worst case\n for j in range(max_iters):\n i = j % len(sizes)\n size = sizes[i]\n split = splits[i]\n if len(split) == size:\n continue\n # get all of the neighbors of the split\n candidates = set()\n for c in split:\n candidates |= neighbors[c]\n # filter to unused cubes\n candidates = candidates & unused\n if not candidates:\n return None\n # Pick a candidate at random and add it\n choice = rng.choice(list(candidates))\n split.append(choice)\n unused.remove(choice)\n return splits",
"def grouped(iterable, n):\n batch_window = [None for _ in range(n)]\n cur_size = 0\n for item in iterable:\n batch_window[cur_size] = item\n cur_size += 1\n if cur_size >= n:\n batched = batch_window[:]\n batch_window = [None for _ in range(n)]\n cur_size = 0\n yield batched",
"def partition_data(total_sample_size, num_groups):\n individual_sample_size = total_sample_size/num_groups\n indices = np.arange(0, total_sample_size, 1)\n groups = np.zeros(len(indices))\n for i in range(1, num_groups):\n groups[int(individual_sample_size*i):int(individual_sample_size*(i+1))] = i\n if total_sample_size % num_groups != 0:\n groups[individual_sample_size*num_groups:] = -1\n randoms = np.random.random_sample(len(groups))\n ordered_randoms_and_reordered_groups = np.array(sorted(zip(randoms, groups)))\n groups = ordered_randoms_and_reordered_groups[:, 1]\n return groups",
"def generate_block_sizes(n_features, blocks, shorten_last_block=False):\n # If blocks is an int, divide in blocks of equal size.\n if isinstance(blocks, int):\n if n_features % blocks != 0 and not shorten_last_block:\n raise ValueError('The parameter \"n_features\" must be '\n f'divisible by \"blocks\" ({blocks})')\n\n div, mod = divmod(n_features, blocks)\n blocks = [blocks] * div\n if mod != 0:\n blocks += [mod]\n elif n_features != sum(blocks):\n raise ValueError('The sum of the block sizes must be equal to '\n f'\"n_features\" ({n_features}).')\n\n return blocks",
"def fit(blocks, size):\r\n assert(len(blocks) > 0)\r\n assert(size >= min_width(blocks))\r\n if len(blocks) == 1:\r\n return [grules.EMPTY * i + grules.FILLED * blocks[0] + grules.EMPTY * (size - blocks[0] - i) \\\r\n for i in range(size - blocks[0] + 1)]\r\n else:\r\n return [grules.EMPTY * (i - blocks[0]) + grules.FILLED * blocks[0] + grules.EMPTY + f2 \\\r\n for i in range(blocks[0], size - min_width(blocks[1:])) \\\r\n for f2 in fit(blocks[1:], size - i - 1)]",
"def split_chunks(item_list, num_items_in_list):\n for item in range(0, len(item_list), num_items_in_list):\n # Create an index range for item_list of num_items_in_list items:\n yield item_list[item:item + num_items_in_list]",
"def group_layers_with_balanced_memory(inputs: List[torch.nn.Module],\n num_groups: int,\n summary: Optional[OrderedDict]) -> Generator:\n\n class Group:\n def __init__(self) -> None:\n self.length = 0\n self.memory_mbytes = 0.0\n self.indices: List[int] = list()\n\n num_layers = len(inputs)\n summary_values = [] if summary is None else list(summary.values())\n\n # Recursive function that collects memory requirement of each layer\n def get_memory(modules: List[torch.nn.Module], module_memory: List[float]) -> None:\n for module in modules:\n submodules = list(module.children())\n has_children = len(submodules) > 0\n if has_children:\n get_memory(submodules, module_memory)\n else:\n layer_summary = summary_values.pop(0)\n module_memory.append(layer_summary.output_memory_megabytes)\n if not (layer_summary.n_params == sum([np.prod(p.size()) for p in module.parameters()])):\n raise ValueError(\"The summary does not match with the layer information.\")\n\n def find_available_group(group_id: int,\n groups: List[Group],\n mem_to_be_allocated: float,\n max_mem_allowed: float) -> int:\n \"\"\"Finds the next available group to store input layer which is represented\n in terms of its memory (`mem_to_be_allocated`). The algorithm assigns the object\n to the current group if it is empty or it has enough capacity to store the layer.\n Otherwise the rest of groups are enquired. If none of the groups accept the incoming layer,\n it is assigned to the group with lowest memory load. This approach find a comprimise between\n sequentiality of layers and memory load balance.\"\"\"\n\n num_groups = len(groups)\n available_groups = [True for _ in range(num_groups)]\n group_mems = [groups[g_id].memory_mbytes for g_id in range(num_groups)]\n lowest_mem_group_id = group_mems.index(min(group_mems))\n\n while (groups[group_id].length > 0) and \\\n (mem_to_be_allocated + groups[group_id].memory_mbytes > max_mem_allowed):\n available_groups[group_id] = False\n if not any(available_groups):\n group_id = lowest_mem_group_id\n break\n group_id = (group_id + 1) % num_groups\n\n return group_id\n\n # Recursively traverse through the input modules and collect the memory information.\n model_memory = list()\n for layer in inputs:\n layer_memory: List[float] = list()\n get_memory([layer], layer_memory)\n model_memory.append(sum(layer_memory))\n group_max_mem_capacity = sum(model_memory) / float(num_groups)\n\n # Groups input layers by balancing the memory load of each group/device\n group_id = 0\n groups = [Group() for _ in range(num_groups)]\n for block_id in range(num_layers):\n current_memory_mbytes = model_memory[block_id]\n group_id = find_available_group(group_id, groups, current_memory_mbytes, group_max_mem_capacity)\n groups[group_id].memory_mbytes += current_memory_mbytes\n groups[group_id].length += int(1)\n groups[group_id].indices.append(block_id)\n\n # Return the groupped layers through a generator\n for group in groups:\n yield [inputs[ii] for ii in range(num_layers) if ii in group.indices]",
"def splitInBlocks (l, n):\n k = len(l) / n\n r = len(l) % n\n\n i = 0\n blocks = []\n while i < len(l):\n if len(blocks)<r:\n blocks.append(l[i:i+k+1])\n i += k+1\n else:\n blocks.append(l[i:i+k])\n i += k\n\n return blocks",
"def block_group(inputs,\n filters,\n block_fn,\n blocks,\n strides,\n is_training,\n name,\n pruning_method='baseline',\n init_method='baseline',\n data_format='channels_first',\n end_sparsity=0.,\n weight_decay=0.):\n with tf.name_scope(name):\n end_point = 'block_group_projection_%s' % name\n # Only the first block per block_group uses projection shortcut and strides.\n inputs = block_fn(\n inputs,\n filters,\n is_training,\n strides,\n use_projection=True,\n pruning_method=pruning_method,\n init_method=init_method,\n data_format=data_format,\n end_sparsity=end_sparsity,\n weight_decay=weight_decay,\n name=end_point)\n\n for n in range(1, blocks):\n with tf.name_scope('block_group_%d' % n):\n end_point = '%s_%d_1' % (name, n)\n inputs = block_fn(\n inputs,\n filters,\n is_training,\n 1,\n pruning_method=pruning_method,\n init_method=init_method,\n data_format=data_format,\n end_sparsity=end_sparsity,\n weight_decay=weight_decay,\n name=end_point)\n\n return tf.identity(inputs, name)",
"def gallery_groups(self):\n\n \"Collect data into fixed-length chunks or blocks\"\n # grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx\n n = 3\n iterable = self.context['gallery'].values()\n args = [iter(iterable)] * 3\n return izip_longest(fillvalue=None, *args)",
"def chunk(size, seq):\n if not isinstance(size, int) or size <= 0: # pragma: no cover\n raise ValueError(\"size must be an integer greater than zero\")\n\n group = []\n\n for item in seq:\n if len(group) >= size:\n yield group\n group = []\n group.append(item)\n\n if group:\n yield group",
"def split_chunk(chunk, sizes, max_iter=1000, rng=None):\n assert len(chunk) == sum(sizes), f\"{len(chunk)} != {sum(sizes)}\"\n if not isinstance(rng, random.Random):\n rng = random\n # Precompute neighbors for each cube in the chunk\n neighbors = dict()\n for c in chunk:\n neighbors[c] = set(c.neighbors()) & set(chunk)\n for i in range(max_iter):\n result = split_chunk_iter(chunk, sizes, neighbors, rng)\n if result != None:\n return result\n raise SplitChunkMaxIterationExceeded(\"Ran out of iterations trying to split chunk\")",
"def _make_weighting_blocks(self, num_blocks, reduce_ratio, stride=1):\n layers = []\n for i in range(num_blocks):\n layers.append(ConditionalChannelWeighting(self.in_channels, stride=stride, reduce_ratio=reduce_ratio, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, with_cp=self.with_cp))\n return nn.Sequential(*layers)",
"def splitList(itms, numGr):\n\ttcount = len(itms)\n\tcItems = list(itms)\n\tsz = int(len(cItems) / numGr)\n\tgroups = list()\n\tcount = 0\n\tfor i in range(numGr):\n\t\tif (i == numGr - 1):\n\t\t\tcsz = tcount - count\n\t\telse:\n\t\t\tcsz = sz + randint(-2, 2)\n\t\t\tcount += csz\n\t\tgr = list()\n\t\tfor j in range(csz):\n\t\t\tit = selectRandomFromList(cItems)\n\t\t\tgr.append(it)\t\n\t\t\tcItems.remove(it)\t\n\t\tgroups.append(gr)\n\treturn groups",
"def better_grouper(inputs, n):\n iters = [iter(inputs)] * n\n return zip(*iters)",
"def itergroups(groups, num=3):\n sortedgroups = sorted(groups, key=santamin)\n\n for c in sortedgroups:\n if num == 1:\n yield (c,)\n else:\n others = (\n t for t in sortedgroups\n if not set.intersection(t, c)\n )\n for subgrp in itergroups(others, num=num - 1):\n yield (c,) + subgrp",
"def grouper(iterable, block_size, fillvalue=None) -> list:\n args = [iter(iterable)] * block_size\n return zip_longest(*args, fillvalue=fillvalue)",
"def partition_cr(n_sample, size, n_cpus):\n\n # divid the block by n_cpu partitions, with size n_sample0\n # if the divided chunk is smaller than the requested chunk n_sample\n # use the requested chunk size\n n_sample0 = int(math.ceil(np.sqrt(size*size/n_cpus/2.)))\n if (n_sample0 > n_sample):\n n_sample = n_sample0\n\n block_id=[]\n nbatch = 0\n nbatch1 = 0\n nbatch2 = 0\n e1 = 0\n while (e1 < size):\n s1 = int(n_sample*nbatch1)\n e1 = int(np.min([s1 + n_sample, size]))\n nbatch2 = nbatch1\n nbatch1 += 1\n e2 = 0\n while (e2 <size):\n s2 = int(n_sample*nbatch2)\n e2 = int(np.min([s2 + n_sample, size]))\n block_id += [(s1, e1, s2, e2)]\n nbatch2 += 1\n nbatch += 1\n\n return block_id, nbatch"
] |
[
"0.689245",
"0.61154896",
"0.5870437",
"0.5869391",
"0.58351797",
"0.58224726",
"0.57872975",
"0.57095844",
"0.5696942",
"0.56960577",
"0.56809574",
"0.5656975",
"0.5649476",
"0.5628034",
"0.5624336",
"0.56135494",
"0.56011015",
"0.55793",
"0.5571817",
"0.5562723",
"0.55538625",
"0.5550281",
"0.5545399",
"0.553153",
"0.55199844",
"0.5513893",
"0.5497602",
"0.5491738",
"0.5486519",
"0.5465722"
] |
0.7222635
|
0
|
Uniformly distribute items between groups. Given some number of items and some number of groups, distribute the items between groups in the most Uniform way possible.
|
def distribute_uniform(totalsize, groups):
ret = []
for i in range(groups):
myn = totalsize // groups
off = 0
leftover = totalsize % groups
if ( i < leftover ):
myn = myn + 1
off = i * myn
else:
off = ((myn + 1) * leftover) + (myn * (i - leftover))
ret.append( (off, myn) )
return ret
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def distribute_discrete(sizes, groups, pow=1.0):\n chunks = np.array(sizes, dtype=np.int64)\n weights = np.power(chunks.astype(np.float64), pow)\n max_per_proc = float(distribute_partition(weights.astype(np.int64), groups))\n\n target = np.sum(weights) / groups\n\n dist = []\n\n off = 0\n curweight = 0.0\n proc = 0\n for cur in range(0, weights.shape[0]):\n if curweight + weights[cur] > max_per_proc:\n dist.append( (off, cur-off) )\n over = curweight - target\n curweight = weights[cur] + over\n off = cur\n proc += 1\n else:\n curweight += weights[cur]\n\n dist.append( (off, weights.shape[0]-off) )\n\n if len(dist) != groups:\n raise RuntimeError(\"Number of distributed groups different than number requested\")\n\n return dist",
"def splitList(itms, numGr):\n\ttcount = len(itms)\n\tcItems = list(itms)\n\tsz = int(len(cItems) / numGr)\n\tgroups = list()\n\tcount = 0\n\tfor i in range(numGr):\n\t\tif (i == numGr - 1):\n\t\t\tcsz = tcount - count\n\t\telse:\n\t\t\tcsz = sz + randint(-2, 2)\n\t\t\tcount += csz\n\t\tgr = list()\n\t\tfor j in range(csz):\n\t\t\tit = selectRandomFromList(cItems)\n\t\t\tgr.append(it)\t\n\t\t\tcItems.remove(it)\t\n\t\tgroups.append(gr)\n\treturn groups",
"def split(\n items: typing.List[typing.Any],\n sizes: typing.List[float],\n random_state: int = 42,\n stratify: typing.Sequence[typing.Hashable] = None,\n group: typing.Sequence[typing.Hashable] = None,\n preserve: typing.Sequence[typing.Optional[int]] = None,\n) -> typing.Sequence[typing.Any]:\n splits: typing.List[typing.List[typing.Any]] = [[] for _ in range(len(sizes))]\n if group is None:\n group = list(range(len(items)))\n if stratify is None:\n stratify = [0] * len(items)\n if preserve is not None:\n assert len(items) == len(\n preserve\n ), \"When preserve is provided, it must be the same length as items.\"\n for item, preserveIdx in zip(items, preserve):\n if preserveIdx is not None:\n splits[preserveIdx].append(item)\n ideal_counts = [s * len(items) for s in sizes]\n items, stratify, group = [\n [\n entry\n for entry, preserveIdx in zip(current_list, preserve)\n if preserveIdx is None\n ]\n for current_list in [items, stratify, group]\n ]\n if len(items) == 0:\n # There's nothing left to split.\n return splits\n # Rebalance sizes so that we shuffle the remaining\n # items into the splits to try and match the originally\n # desired sizes.\n offsets = [\n max(target - len(split), 0) for split, target in zip(splits, ideal_counts)\n ]\n sizes = [offset / sum(offsets) for offset in offsets]\n assert (\n 0.99 < sum(sizes) < 1.01\n ), f\"The sizes must add up to 1.0 (they added up to {sum(sizes)}).\"\n assert len(group) == len(items), \"group must be the same length as the collection.\"\n assert len(stratify) == len(\n items\n ), \"stratify must be the same length as the collection.\"\n rng = np.random.default_rng(seed=random_state)\n grouped = [\n {**dict(zip([\"idxs\", \"stratifiers\"], zip(*grouper))), \"group\": g}\n for g, grouper in groupby_unsorted(\n list(zip(range(len(stratify)), stratify)),\n key=lambda v: typing.cast(typing.Sequence[typing.Hashable], group)[v[0]],\n )\n ]\n hashes = {\n h: list(g)\n for h, g in groupby_unsorted(\n grouped, key=lambda g: hash(tuple(set(g[\"stratifiers\"])))\n )\n }\n for subgroups in hashes.values():\n for a, u in zip(\n rng.choice(len(sizes), size=len(subgroups), p=sizes),\n subgroups,\n ):\n splits[a].extend(items[idx] for idx in u[\"idxs\"])\n return splits",
"def divide_into_groups(items, max_group_size):\n if max_group_size <= 0:\n raise ValueError('maximum group size must be greater than 0')\n\n items_copy = deepcopy(items)\n groups = []\n\n while len(items_copy) > 0:\n\n # size of current group is either max number of items or\n # number of remaining items, whichever is smaller\n group_size = min(len(items_copy), max_group_size)\n\n # save the items for the current group\n new_group = items_copy[:group_size]\n groups.append(new_group)\n\n # remove the items from the list\n del items_copy[:group_size]\n\n return groups",
"def partition_data(total_sample_size, num_groups):\n individual_sample_size = total_sample_size/num_groups\n indices = np.arange(0, total_sample_size, 1)\n groups = np.zeros(len(indices))\n for i in range(1, num_groups):\n groups[int(individual_sample_size*i):int(individual_sample_size*(i+1))] = i\n if total_sample_size % num_groups != 0:\n groups[individual_sample_size*num_groups:] = -1\n randoms = np.random.random_sample(len(groups))\n ordered_randoms_and_reordered_groups = np.array(sorted(zip(randoms, groups)))\n groups = ordered_randoms_and_reordered_groups[:, 1]\n return groups",
"def bootstrap_group(nsubj, ngroups):\n groupsize = nsubj\n samples = [(groupsize * np.random.rand(groupsize)).astype(np.int_)\n for i in range(ngroups)]\n return samples",
"def better_grouper(inputs, n):\n iters = [iter(inputs)] * n\n return zip(*iters)",
"def _get_inter_splits_by_group(items_and_groups, split_probs, split_number):\n groups = sorted(set(group_id for item_id, group_id in items_and_groups))\n rng = np.random.RandomState(split_number)\n rng.shuffle(groups)\n\n split_boundaries = _compute_split_boundaries(split_probs, len(groups))\n group_id_to_split = {}\n for split_name, i_start, i_end in split_boundaries:\n for i in range(i_start, i_end):\n group_id_to_split[groups[i]] = split_name\n\n split_to_ids = collections.defaultdict(set)\n for item_id, group_id in items_and_groups:\n split = group_id_to_split[group_id]\n split_to_ids[split].add(item_id)\n\n return split_to_ids",
"def sort_and_distribute(array, splits=2):\n if not isinstance(array, (list,tuple)): raise TypeError(\"array must be a list\")\n if not isinstance(splits, int): raise TypeError(\"splits must be an integer\")\n remaining = sorted(array)\n if sys.version_info < (3, 0):\n myrange = xrange(splits)\n else:\n myrange = range(splits)\n groups = [[] for i in myrange]\n while len(remaining) > 0:\n for i in myrange:\n if len(remaining) > 0: groups[i].append(remaining.pop(0))\n return groups",
"def distribute_sampling(numSamples, localDevices=None, numChainsPerDevice=1):\n\n global globNumSamples\n\n # Determine number of samples per process\n samplesPerProcess = numSamples // commSize\n\n if rank < numSamples % commSize:\n samplesPerProcess += 1\n\n if localDevices is None:\n\n globNumSamples = numSamples\n\n return samplesPerProcess\n\n numChainsPerProcess = localDevices * numChainsPerDevice\n\n def spc(spp):\n return (spp + numChainsPerProcess - 1) // numChainsPerProcess\n\n a = numSamples % commSize\n globNumSamples = (a * spc(1 + numSamples // commSize) + (commSize - a) * spc(numSamples // commSize)) * numChainsPerProcess\n\n return spc(samplesPerProcess)",
"def naive_grouper(inputs, n):\n num_groups = len(inputs) // n\n return [tuple(inputs[i*n:(i+1)*n]) for i in range(num_groups)]",
"def generate(group, number, n):\n return [get_group(group, number) for i in xrange(n)]",
"def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)",
"def grouper(n, iterable, fillvalue=None):\n from itertools import zip_longest\n args = [iter(iterable)] * n\n return zip_longest(fillvalue=fillvalue, *args)",
"def split_group(nsubj, ngroups):\n groupsize = int(np.floor(nsubj / ngroups))\n rperm = np.argsort(np.random.rand(nsubj))\n samples = [rperm[i * groupsize: (i + 1) * groupsize]\n for i in range(ngroups)]\n return samples",
"def __call__(self, p, size=1):\n\n # Get a uniform distribution over the elements to fill each group.\n uni_gen = MappingDiscreteUniformDistributionGenerator(*self.args)\n\n # Draw the sizes for each group\n group_sizes = numpy.random.geometric(p, size)\n\n # Using the sizes draw element to fill groups up to the right size\n results = [\n uni_gen(group_sizes[i]) for i in nanshe.util.iters.irange(size)\n ]\n\n return(results)",
"def itergroups(groups, num=3):\n sortedgroups = sorted(groups, key=santamin)\n\n for c in sortedgroups:\n if num == 1:\n yield (c,)\n else:\n others = (\n t for t in sortedgroups\n if not set.intersection(t, c)\n )\n for subgrp in itergroups(others, num=num - 1):\n yield (c,) + subgrp",
"def grouper(n, iterable):\n\tit = iter(iterable)\n\twhile True:\n\t\tchunk = tuple(itertools.islice(it, n))\n\t\tif not chunk:\n\t\t\treturn\n\t\tyield chunk",
"def grouper(n, iterable):\n it = iter(iterable)\n while True:\n chunk = tuple(itertools.islice(it, n))\n if not chunk:\n return\n yield chunk",
"def _greedy_packing(items: List[Item], cap: int,\n func: Callable) -> Tuple[Set[int], int]:\n items.sort(key=func)\n included = set()\n total_val, total_weight = 0, 0\n for item in items:\n if total_weight + item.weight > cap:\n continue\n included.add(item.idx)\n total_val += item.val\n total_weight += item.weight\n return included, total_val\n # Running time complexity: O(nlog n)",
"def distribute(\n self, direction=\"x\", spacing=100, separation=True, edge=\"center\"\n ) -> \"Group\":\n _distribute(\n elements=self.elements,\n direction=direction,\n spacing=spacing,\n separation=separation,\n edge=edge,\n )\n return self",
"def uniform_split(self, nr_agents):\n indices = np.linspace(start=0, stop=self.samples.shape[0], num=nr_agents + 1, dtype=int).tolist()\n\n self.samples = self.partition(self.samples, indices, nr_agents)\n self.labels = self.partition(self.labels, indices, nr_agents)",
"def group_norm(input, num_groups, weight, bias, eps=1e-5):\n return FunctionLib.apply(\n 'GroupNorm', input.device, [input, weight, bias],\n axis=1, group=num_groups, epsilon=eps)",
"def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return itertools.zip_longest(*args, fillvalue=fillvalue)",
"def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return itertools.zip_longest(*args, fillvalue=fillvalue)",
"def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return itertools.zip_longest(fillvalue=fillvalue, *args)",
"def group(n, iterable, fill_value = None):\n args = [iter(iterable)] * n\n return izip_longest(fillvalue = fill_value, *args)",
"def grouper(iterable, n):\n it = iter(iterable)\n while True:\n chunk = tuple(islice(it, n))\n if not chunk:\n return\n yield chunk",
"async def generate_groups_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n item_stmt = select(Item).filter(Item.group_id is None)\n count_stmt = select(func.count(Item.id)).filter(Item.group_id is None)\n count = await dbsession.execute(count_stmt)\n result = await dbsession.execute(item_stmt)\n categories = []\n with click.progressbar(\n result.scalars(), length=count.scalar_one(), label=\"Generating potential groups\"\n ) as progress:\n for item in progress:\n for category in item.attributes[\"_categories\"]:\n categories.append(category.lower())\n counts = [(cat, count) for cat, count in Counter(categories).most_common() if count >= 15] # noqa: PLR2004\n counts.sort(key=lambda c: c[1])\n max_groups = len(counts)\n with click.progressbar(length=max_groups, label=\"Generating groups\") as progress:\n while counts:\n category = counts[0][0]\n group_stmt = select(Group).filter(Group.value == category)\n result = await dbsession.execute(group_stmt)\n group = result.scalars().first()\n if group is None:\n group = Group(value=category, label=category[0].upper() + category[1:], split=\"basic\")\n dbsession.add(group)\n result = await dbsession.execute(item_stmt)\n for item in result.scalars():\n if category in item.attributes[\"_categories\"]:\n item.group = group\n await dbsession.commit()\n categories = []\n result = await dbsession.execute(item_stmt)\n for item in result.scalars():\n for category in item.attributes[\"_categories\"]:\n categories.append(category.lower())\n old_counts = len(counts)\n counts = [\n (cat, count) for cat, count in Counter(categories).most_common() if count >= 15 # noqa: PLR2004\n ]\n counts.sort(key=lambda c: c[1])\n progress.update(old_counts - len(counts))\n await dbsession.commit()",
"def chunks(item_list, n_items):\n for i in range(0, len(item_list), n_items):\n yield item_list[i : i + n_items]"
] |
[
"0.72016066",
"0.64487475",
"0.62581235",
"0.622412",
"0.61074805",
"0.58833826",
"0.5873367",
"0.58550215",
"0.57324255",
"0.57185954",
"0.5712346",
"0.5675752",
"0.5650044",
"0.5643228",
"0.5635769",
"0.5634285",
"0.562649",
"0.5610099",
"0.5546628",
"0.554625",
"0.5526702",
"0.5517434",
"0.5512002",
"0.5509805",
"0.5509805",
"0.55064386",
"0.5505194",
"0.5504798",
"0.5491463",
"0.54869545"
] |
0.798673
|
0
|
The toast.Comm over which the data is distributed.
|
def comm(self):
return self._comm
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_comm(self):\n return self.comm",
"def get_comm() -> Communication:\n return __default_comm",
"def GetComm(self):\n return _hypre.HypreParVector_GetComm(self)",
"def object_communicator():\n comm = MPI.COMM_WORLD",
"def GetComm(self):\n return _hypre.HypreParMatrix_GetComm(self)",
"def comm_port(self):\r\n return self._comm_port",
"def _get_toast(self):\n\n pass",
"def use_comm(comm: Communication = None):\n global __default_comm\n __default_comm = sanitize_comm(comm)",
"def receive( self, ): # combine with task?\r\n # for 232: breaks simulator\r\n\r\n data = self.com_driver.getRecString( )\r\n # below but did not fix\r\n# if data == \"\":\r\n# pass\r\n# else:\r\n# #self.gui.print_rec_string( data ) # this post could be a problem, lets put back in helper ??\r\n\r\n # this seems to be in gui, which may be right\r\n # if ( self.comm_log_file is not None ) and ( data not == \"\" ):\r\n # self.comm_log_file.write( data )\r\n return data",
"def serial(self):",
"def communicate():\n pass",
"def connect(self, mach) -> channel.Channel:\n self.console_uart = self.servo_get_tty()\n return mach.open_channel(\"picocom\", \"-q\", \"-b\", \"115200\",\n self.console_uart)",
"def COM(cmd,data): #Status: WIP\r\n #Desc CMD Target Address\r\n if cmd == 'U': #get update U\r\n parseLocal(data)\r\n if cmd == 'T':\r\n setETM(data)\r\n# rpc(addr,getLocals,addr, lTime, lSped, lLoca, lStat)\r\n elif cmd == 'M': #merge M\r\n setETM(data)\r\n merge()\r\n elif cmd == 'E': #help E multicasted\r\n setStatus(data)\r\n emergency()\r\n elif cmd == 'D':\r\n getDest()",
"def sendTemperature(self):\n if len(self.controller.myContainer.temperature) != 0:\n temp = sum(self.controller.myContainer.temperature) / len(self.controller.myContainer.temperature)\n else:\n temp = 0\n payload = ('{\"ts\": '+ str(int(time())) + ', \"temp\":' + str(temp) +\n '\"data\": { \"status\": ' + str(self.controller.status) + ', \"setpoint\": '+ str(self.controller.setpoint) + ' }}' )\n res, self.midTemp = self.client.publish(self.pubTemp, payload, qos=1, retain=False)\n if debug: print(\"Sent: \", payload , \"on\", self.pubTemp, \"mid: \", self.midTemp)\n self.controller.myContainer.resetTempAccumulators()\n\n filename = self.pubTemp.replace(\"/\", \"-\") + \".txt\"\n if self.storeTempLocal:\n f = open(filename, 'a+')\n f.write(self.lastTempPayload+\"\\n\")\n f.close()\n self.storeLocalTemp = True\n self.lastTempPayload = payload",
"def request_realtime_info(self):\n self.socket_datastream.sendto(b\"!r\", self.ip_port_arduino_datastream)\n self.socket_datastream.sendto(b\"!s\", self.ip_port_arduino_datastream)",
"def init_communications(self):\n from os.path import exists\n from serial import Serial\n import serial.tools.list_ports\n if self.ser is not None:\n try:\n info(\"Checking whether device is still responsive...\")\n self.ser.write(self.id_query)\n debug(\"%s: Sent %r\" % (self.ser.name,self.id_query))\n reply = self.read(count=self.id_reply_length)\n if not self.id_reply_valid(reply):\n debug(\"%s: %r: invalid reply %r\" % (self.ser.name,self.id_query,reply))\n info(\"%s: lost connection\" % self.ser.name)\n self.ser = None\n else: info(\"Device is still responsive.\")\n except Exception as msg:\n debug(\"%s: %s\" % (Exception,msg))\n self.ser = None\n\n if self.ser is None:\n devices = serial.tools.list_ports.comports()\n debug('devices: %r' % devices)\n for item in devices:\n debug('device: %r' % item)\n try:\n ser = Serial(item.device,baudrate=self.baudrate)\n ser.write(self.id_query)\n debug(\"%s: Sent %r\" % (ser.name,self.id_query))\n reply = self.read(count=self.id_reply_length,ser=ser)\n if self.id_reply_valid(reply):\n self.ser = ser\n info(\"Discovered device at %s based on reply %r\" % (self.ser.name,reply))\n break\n except Exception as msg:\n debug(\"%s: %s\" % (Exception,msg))\n if self.ser is not None: break",
"def onConnecting(self, transport_details):",
"def meta_trader_connector():\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(SOCKET_LOCAL_HOST)\n return socket",
"def do_local(self, *args):\n return self.do_scpi(':communicate:remote 0')",
"def import_measurements():\n\n print('Receive a transfer...')",
"def send_to_port(self):\r\n time.sleep(2)\r\n # ser.write(\"R\".encode())\r\n ser.flush()\r\n ser.write(\"{},{},{},{},{}\".format(self.x_Pos, self.y_Pos, self.t_Tap, self.U_on, self.u_off).encode())\r\n # ser.flush()\r\n # while (1 == 1):\r\n # mydata = ser.readline().lstrip()\r\n # print(mydata.decode('utf-8'))\r\n # value = str(mydata)\r",
"def communicate(self, communicate_func=uniform_com_func):\n return communicate_func(self)",
"def init(self, msg_in = None, client = None):\r\n self.name = 'Oasis_DL'\r\n\r\n self.circular_buffers[b'act_temperature'] = CBServer(size = (2,4320000), var_type = 'float64')\r\n self.circular_buffers[b'cmd_temperature'] = CBServer(size = (2,4320000), var_type = 'float64')\r\n self.circular_buffers[b'fault'] = CBServer(size = (2,10000), var_type = 'float64')\r\n\r\n self.description = ''\r\n\r\n self.task_dictionary[0] = {b'function':driver.get_actual_temperature,b'name':b'act_temperature'}\r\n self.task_dictionary[1] = {b'function':driver.set_target_temperature,b'name':b'cmd_temperature'}\r\n self.task_dictionary[2] = {b'function':driver.get_faults,b'name':b'fault'}\r\n \r\n\r\n\r\n self.task_dictionary[10] = {b'function':driver.set_lower_limit,b'name':b'set_lower_limit'}\r\n self.task_dictionary[11] = {b'function':driver.get_lower_limit,b'name':b'get_lower_limit'}\r\n self.task_dictionary[12] = {b'function':driver.set_upper_limit,b'name':b'set_upper_limit'}\r\n self.task_dictionary[13] = {b'function':driver.get_upper_limit,b'name':b'get_upper_limit'}\r\n\r\n flag = False\r\n message = None\r\n err = ''\r\n flag, message, err = driver.init(), '', ''\r\n if flag:\r\n self.lower_limit = driver.device_dict[b'lower_limit']\r\n self.upper_limit = driver.device_dict[b'upper_limit']\r\n\r\n response = {}\r\n response[b'flag'] = flag\r\n response[b'message'] = message\r\n response[b'error'] = err\r\n return response",
"def __init__(self, commu_ip=\"127.0.0.1\", commu_port=6019, commu_volume=10, debug_handler=None):\n # type: (str, int, int, DebugHandler) -> None\n self.cumhelper = CUMHelper(commu_ip, commu_port)\n self.cumhelper.chvolume(commu_volume)\n self.debug_handler = debug_handler\n\n rospy.loginfo(\"CommUWrapper instance created.\")",
"def write_command(serial, comm, verbose = False, dt = None):\r\n if verbose and comm != \"\":\r\n if dt is None:\r\n print(\"{} \\t\\t-> {}\".format(comm, serial.port))\r\n else:\r\n print(\"{} \\t\\t-> {} at {:2.3f} ms\".format(comm, serial.port, dt))\r\n serial.write(comm.encode())",
"def send_robot_cmd(self, command, *args):\n \n \n if self.robot_commander is None:\n self.start_robot_publisher()\n time.sleep(.5)\n\n # choose which platform\n #if GlobalSettings.USE_TEGA:\n msg = TegaBehaviors.get_msg_from_behavior(command, args)\n #else:\n # msg = JiboBehaviors.get_msg_from_behavior(command, args)\n\n # add header\n self.robot_commander.publish(msg) # would be nice to guarantee message performance here\n #rospy.loginfo(msg)",
"def parent(self):\n return self._mpis.comm",
"def _send_message(self, *args, **kwargs):\n with self.comm_lock:\n return super(FrontendComm, self)._send_message(*args, **kwargs)",
"def send_serial_command(data):\n print(data)\n serial_command = data\n SERIAL_PARENT.send(serial_command)\n OUTGOING.append(serial_command)",
"def device(self):\n return self.broker.device(**{\"JobDetailID\": self.JobDetailID})"
] |
[
"0.61659884",
"0.6069794",
"0.5846124",
"0.5728337",
"0.5702727",
"0.5535399",
"0.53622645",
"0.5292387",
"0.51455116",
"0.5064816",
"0.5057292",
"0.49910793",
"0.49418244",
"0.49387032",
"0.49278933",
"0.4907674",
"0.49075994",
"0.4895052",
"0.48310533",
"0.48227745",
"0.48221517",
"0.47922882",
"0.47912186",
"0.47821826",
"0.47504163",
"0.47475272",
"0.47364154",
"0.47316435",
"0.471351",
"0.47104776"
] |
0.612058
|
1
|
Print information about the distributed data to the specified file handle. Only the rank 0 process writes.
|
def info(self, handle):
# Each process group gathers their output
groupstr = ""
procstr = ""
gcomm = self._comm.comm_group
wcomm = self._comm.comm_world
rcomm = self._comm.comm_rank
if wcomm.rank == 0:
handle.write("Data distributed over {} processes in {} groups\n".format(self._comm.world_size, self._comm.ngroups))
for ob in self.obs:
id = ob['id']
tod = ob['tod']
base = ob['baselines']
nse = ob['noise']
intrvl = ob['intervals']
if gcomm.rank == 0:
groupstr = "observation {}:\n".format(id)
groupstr = "{} {} total samples, {} detectors\n".format(groupstr, tod.total_samples, len(tod.detectors))
if intrvl is not None:
groupstr = "{} {} intervals:\n".format(groupstr, len(intrvl))
for it in intrvl:
groupstr = "{} {} --> {} ({} --> {})\n".format(groupstr, it.first, it.last, it.start, it.stop)
# rank zero of the group will print general information,
# and each process will get its statistics.
nsamp = tod.local_samples[1]
dets = tod.local_dets
procstr = " proc {}\n".format(gcomm.rank)
my_chunks = 1
if tod.local_chunks is not None:
my_chunks = tod.local_chunks[1]
procstr = "{} sample range {} --> {} in {} chunks:\n".format(procstr, tod.local_samples[0], (tod.local_samples[0] + nsamp - 1), my_chunks)
if tod.local_chunks is not None:
chkoff = tod.local_samples[0]
for chk in range(tod.local_chunks[1]):
abschk = tod.local_chunks[0] + chk
chkstart = chkoff
chkstop = chkstart + tod.total_chunks[abschk] - 1
procstr = "{} {} --> {}\n".format(procstr, chkstart, chkstop)
chkoff += tod.total_chunks[abschk]
if nsamp > 0:
stamps = tod.read_times(local_start=0, n=nsamp)
procstr = "{} timestamps {} --> {}\n".format(procstr, stamps[0], stamps[-1])
for dt in dets:
procstr = "{} det {}:\n".format(procstr, dt)
pdata = tod.read_pntg(detector=dt, local_start=0, n=nsamp)
procstr = "{} pntg [{:.3e} {:.3e} {:.3e} {:.3e}] --> [{:.3e} {:.3e} {:.3e} {:.3e}]\n".format(procstr, pdata[0,0], pdata[0,1], pdata[0,2], pdata[0,3], pdata[-1,0], pdata[-1,1], pdata[-1,2], pdata[-1,3])
data = tod.read(detector=dt, local_start=0, n=nsamp)
flags, common = tod.read_flags(detector=dt, local_start=0, n=nsamp)
procstr = "{} {:.3e} ({}) --> {:.3e} ({})\n".format(procstr, data[0], flags[0], data[-1], flags[-1])
good = np.where((flags | common) == 0)[0]
procstr = "{} {} good samples\n".format(procstr, len(good))
min = np.min(data[good])
max = np.max(data[good])
mean = np.mean(data[good])
rms = np.std(data[good])
procstr = "{} min = {:.4e}, max = {:.4e}, mean = {:.4e}, rms = {:.4e}\n".format(procstr, min, max, mean, rms)
for cname in tod.cache.keys():
procstr = "{} cache {}:\n".format(procstr, cname)
ref = tod.cache.reference(cname)
min = np.min(ref)
max = np.max(ref)
mean = np.mean(ref)
rms = np.std(ref)
procstr = "{} min = {:.4e}, max = {:.4e}, mean = {:.4e}, rms = {:.4e}\n".format(procstr, min, max, mean, rms)
recvstr = ""
if gcomm.rank == 0:
groupstr = "{}{}".format(groupstr, procstr)
for p in range(1, gcomm.size):
if gcomm.rank == 0:
recvstr = gcomm.recv(source=p, tag=p)
groupstr = "{}{}".format(groupstr, recvstr)
elif p == gcomm.rank:
gcomm.send(procstr, dest=0, tag=p)
gcomm.barrier()
# the world rank 0 process collects output from all groups and
# writes to the handle
recvgrp = ""
if wcomm.rank == 0:
handle.write(groupstr)
for g in range(1, self._comm.ngroups):
if wcomm.rank == 0:
recvgrp = rcomm.recv(source=g, tag=g)
handle.write(recvgrp)
elif g == self._comm.group:
if gcomm.rank == 0:
rcomm.send(groupstr, dest=0, tag=g)
wcomm.barrier()
return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def write_data():",
"def format(self, f):\n INFO = self.logger.info\n\n f.write(\"# Greenplum Database identifier for this master/segment.\\n\")\n f.write(\"# Do not change the contents of this file.\\n\")\n f.write('dbid = %d\\n' % self.dbid)\n INFO('wrote dbid: %d' % self.dbid)\n\n if self.standby_dbid:\n f.write('standby_dbid = %d\\n' % self.standby_dbid)\n INFO('wrote standby_dbid: %d' % self.standby_dbid)",
"def outputpairedstats(fname,writemode,name1,n1,m1,se1,min1,max1,name2,n2,m2,se2,min2,max2,statname,stat,prob):\r\n suffix = '' # for *s after the p-value\r\n try:\r\n x = prob.shape\r\n prob = prob[0]\r\n except:\r\n pass\r\n if prob < 0.001: suffix = ' ***'\r\n elif prob < 0.01: suffix = ' **'\r\n elif prob < 0.05: suffix = ' *'\r\n title = [['Name','N','Mean','SD','Min','Max']]\r\n lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],\r\n [name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]\r\n if type(fname)<>StringType or len(fname)==0:\r\n print\r\n print statname\r\n print\r\n pstats.printcc(lofl)\r\n print\r\n try:\r\n if stat.shape == ():\r\n stat = stat[0]\r\n if prob.shape == ():\r\n prob = prob[0]\r\n except:\r\n pass\r\n print 'Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix\r\n print\r\n else:\r\n file = open(fname,writemode)\r\n file.write('\\n'+statname+'\\n\\n')\r\n file.close()\r\n writecc(lofl,fname,'a')\r\n file = open(fname,'a')\r\n try:\r\n if stat.shape == ():\r\n stat = stat[0]\r\n if prob.shape == ():\r\n prob = prob[0]\r\n except:\r\n pass\r\n file.write(pstats.list2string(['\\nTest statistic = ',round(stat,4),' p = ',round(prob,4),suffix,'\\n\\n']))\r\n file.close()\r\n return None",
"def output(d):\n try:\n current_worker().output(d)\n except AttributeError:\n pass",
"def write_output(self):",
"def write(self, f):\n if self.best_mhc_align:\n mhc_align_str = self.best_mhc_align.subject_str()\n mhc_score_str = str(self.best_mhc_align.bit_score)\n else:\n mhc_align_str = \".\"\n mhc_score_str = \"0\"\n\n if self.best_non_mhc_align:\n non_mhc_align_str = self.best_non_mhc_align.subject_str()\n non_mhc_score_str = str(self.best_non_mhc_align.bit_score)\n else:\n non_mhc_align_str = \".\"\n non_mhc_score_str = \"0\"\n \n f.write(\"\\t\".join([self.locus, self.short_samp_id, self.name,\n str(self.length), mhc_align_str, non_mhc_align_str,\n mhc_score_str, non_mhc_score_str,\n str(self.n_mhc_align), str(self.n_non_mhc_align)]) + \"\\n\")",
"def report(self,f):\n f.write(\"sectorsize: %d\\n\" % self.sectorsize)\n for run in sorted(self.db):\n f.write(\" [@%8d ; %8d]\\n\" % (run.img_offset,run.bytes))\n f.write(\"total entries in database: %d\\n\\n\" % len(r))",
"def writeLog(pid):\n\tglobal processes,logfile,strikes,sleep\n\tproc = processes[pid]\n\tlogfile.write('[%s] %d %s %f%%cpu %f%%mem (over %d s): %s\\n'%(time.strftime('%b %d %H:%M:%S'),pid,proc.user,proc.cpu,proc.mem,proc.count*sleep,proc.command))",
"def print_output():\n print(\"count: [primary: \"+str(primary_shards)+\", replica: \"+str(secondary_shards)+\"]\")\n print(\"size: [primary: \"+pretty_print_storage(total_size_primary)+\", replica: \"+pretty_print_storage(total_size_secondary)+\"]\")\n print(\"disk-max-node: \"+max_size_node_name)\n print(\"watermark-breached: \"+str(watermark_breached))",
"def write_to_file(fib_details: dict):\n pass # TODO: Replace with implementation!",
"def dump(self, filename):\n LIB.mnt_grid_dump.argtypes = [POINTER(c_void_p), c_char_p]\n fm = filename.encode('utf-8')\n ier = LIB.mnt_grid_dump(self.obj, fm)\n if ier:\n error_handler(FILE, 'dump', ier)",
"def _output_stats(self, stats_str, fname=None):\n info = self.dic_info\n if fname is None:\n print(stats_str)\n print(\"DIC: %f\" % info[\"DIC\"])\n print(\"deviance: %f\" % info[\"deviance\"])\n print(\"pD: %f\" % info[\"pD\"])\n else:\n with open(fname, \"w\") as fd:\n fd.write(stats_str)\n fd.write(\"\\nDIC: %f\\n\" % info[\"DIC\"])\n fd.write(\"deviance: %f\\n\" % info[\"deviance\"])\n fd.write(\"pD: %f\" % info[\"pD\"])",
"def write_info_to_file(self):\n\n self.info.write_mission_info()\n\n self.logger.info(\"Mission instance write succeeded.\")",
"def write(self, *args) -> \"int\":\n return _ida_fpro.qfile_t_write(self, *args)",
"def gridftpout(self,pid,str):\n\t\tmessage = \"%s(%d): %s\" % (socket.getfqdn(),pid,str)\n\t\tsys.stdout.write(message)\n\t\thost = socket.getfqdn()",
"def print_info(self):\n\n self.logging.info(str(self.filename) + ':' + str(self.__info_dict))",
"def dbtrace_show_output(trace_object, output_file):\n\n pass",
"def print_on_master(self, msg: str, process_group: ProcessGroup = None):\n rank = dist.get_rank(group=process_group)\n if rank == 0:\n print(msg)",
"def print_stats_to_file(file, cnt_insts, cnt_vars):\n\n for arg in sys.argv:\n if arg == \"--insts\":\n file.write(str(cnt_insts) + \"\\n\")\n elif arg == \"--vars\":\n file.write(str(cnt_vars) + \"\\n\")",
"def PrintOutput(self):\n self.file_settings[\"file_name\"].SetString(self.file_name)\n file = TimeBasedAsciiFileWriterUtility(self.model_part, self.file_settings, self._GetHeader()).file\n for point, var_values in zip(self.found_positions, self.values):\n file.write(self._DataToString(point, var_values))\n file.close()",
"def print_log_summary(self, device_id, log_file, df_phys):\n if self.verbose:\n print(\n \"\\n---------------\",\n f\"\\nDevice: {device_id} | Log file: {log_file.split(device_id)[-1]} [Extracted {len(df_phys)} decoded frames]\\nPeriod: {df_phys.index.min()} - {df_phys.index.max()}\\n\",\n )",
"def write():\n pass",
"def write_output(df, filename):\n\n logging.info('Writing output')\n\n feather.write_dataframe(df, filename)",
"def mpirun(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n print rank \n print size\n data = []\n dcds = self.getdcds()\n for i in range(0, len(dcds)):\n pid = i % size \n if pid == rank:\n dcd = dcds[i]\n dcdpath = self.d + \"/\" + dcd\n data.extend(self.metric(self.dcdtopsf(dcd), dcdpath))\n self.write(data)",
"def exposed_write_data(self, chunk_id, data):\n local_filename = self.chunk_filename(chunk_id)\n with open(local_filename, \"w\") as file:\n file.write(data)\n # self.handle_table[chunk_id] = local_filename",
"def logToFile(output, file): \r\n print( output, file=file )",
"def dumpData(self,out):\n #--Get sizes and dump into dataIO\n self.hedr.getSize()\n self.hedr.dump(out)\n for (name,size) in self.masters:\n out.packSub0('MAST',name)\n out.packSub('DATA','Q',size)\n if self.gmdt: \n self.gmdt.getSize()\n self.gmdt.dump(out)\n for other in self.others:\n other.getSize()\n other.dump(out)",
"def writedata(self,filename_): # 3\n res = self.__obj.writedata(filename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)",
"def print_data(self, to_file=True):\n logging.info(\"Printing PData\")\n pprog = self.prog_logs\n cursor = pprog.find({})\n if to_file:\n ff = open(os.getcwd() + \"/progress_data.txt\", \"w+\")\n logging.info(\"Writing to file: \" + os.getcwd() + \"/progress_data.txt\")\n for doc in cursor:\n date = list(doc.keys())[1]\n info = {date: doc[date]}\n ff.writelines(json.dumps(info) + \"\\n\")\n ff.close()\n else:\n for doc in cursor:\n date = list(doc.keys())[1]\n info = {date: doc[date]}\n print(json.dumps(info))",
"def summary(self, verbosity=0, file=None):\n\n if type(file) == type(\"\"):\n f=open(file, \"w\")\n else: f= sys.stdout\n\n f.write(_(\"The number of vertices is %d. \") % self.number_of_vertices)\n f.write(_(\"The largest %s is %d.\\n\") % (self.degree_type, self.max_deg))\n f.write(\"\\nDegree distribution:\\n\")\n f.write(_(\" 0:%7.4f%%\\n\") % \\\n (self.n_0/self.number_of_vertices*100))\n\n column=1\n for degree, probability in self.dd:\n f.write(\" %5d:%7.4f%%\" % (degree, probability*100))\n if column == 5:\n f.write(\"\\n\")\n column=1\n else: column += 1\n f.write(\"\\n\")"
] |
[
"0.533129",
"0.5310704",
"0.5240423",
"0.5231672",
"0.5218582",
"0.51917535",
"0.5177688",
"0.51472735",
"0.5135112",
"0.51345277",
"0.51254356",
"0.5119048",
"0.5080074",
"0.5076334",
"0.5051235",
"0.5046454",
"0.50440407",
"0.5038391",
"0.50326645",
"0.5031325",
"0.50310373",
"0.50263727",
"0.5019236",
"0.4998892",
"0.49980772",
"0.49971068",
"0.4989198",
"0.4982769",
"0.49807185",
"0.49802408"
] |
0.5959142
|
0
|
Connect a method to this slot.
|
def connect(self, method):
key = (method.__func__, id(method.__self__))
self._dict[key] = method.__self__
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def connect(self, slot):\r\n if inspect.ismethod(slot):\r\n instance = slot.__self__\r\n function = slot.__func__\r\n if instance not in self._methods:\r\n self._methods[instance] = set()\r\n if function not in self._methods[instance]:\r\n self._methods[instance].add(function)\r\n else:\r\n if slot not in self._functions:\r\n self._functions.add(slot)",
"def connect(self, slot):\n #if inspect.getargspec(slot).keywords is None:\n # raise exceptions.SlotMustAcceptKeywords(self, slot)\n\n if not self.is_connected(slot):\n self.slots.append(slot)",
"def connect_to_update(self, method):\n self._update_signal.connect(method)",
"def onConnect(self, *args):\n UAV_Logger.log(\"connect signal recieved\")\n self.socketIO.emit('CORE_SL_SLOTS_SET', Request(self.getModuleName()).newData({\"slots\":self.slots}).toDictionary())",
"def connect_slots(self):\n\n self.set_elf_sig.connect(self.set_elf)\n self.run_target_sig.connect(self.run_target)\n self.continue_target_sig.connect(self.continue_target)\n self.set_breakpoint_sig.connect(self.set_breakpoint)\n self.set_app_mode_sig.connect(self.set_app_mode)\n self.write_hex_value_sig.connect(self.write_word_to_address)",
"def connect(self):",
"def connect(self):",
"def _connect(self):\n raise NotImplementedError(\"ERROR: Unimplemented function.\")",
"def connect(self, *args, **kwargs):",
"def connect(self):\n raise NotImplementedError",
"def connect(self):\n raise NotImplementedError",
"def add_slot(self, slot):\n slot.set_location(len(self.slots)+1)\n self.slots.append(slot)",
"def connect(self) -> None:\n ...",
"def connect(self) -> None:",
"def connect(self, plug):",
"def connect(self, plug): # pragma: no cover\n raise NotImplementedError(\"The subclass has to define connect()\")",
"def connect(self, **kwargs):\n raise NotImplementedError",
"def set_method(self, method):\n self.method = method",
"def connect(self):\n pass",
"def _onconnect(self):\n\n pass",
"def slot(self, name):\n raise ClixxException(\"Not implemented.\")",
"def connect(self):\n pass",
"def connect(self):\n pass",
"def connect(self):\n pass",
"def connect(self):\n pass",
"def connect(self):\n pass",
"def connect(self):\n pass",
"def connect(self):\n pass",
"def connect(self):\n pass",
"def connectButtons__(self):\n self.play_button.clicked.connect(self.play_slot__) # (1)\n self.stop_button.clicked.connect(self.stop_slot__) # (2)\n if self.zoom_to_fs_button is not None:\n self.zoom_to_fs_button.clicked.connect(self.zoom_to_fs_slot__)"
] |
[
"0.76193994",
"0.67152643",
"0.67026365",
"0.6241124",
"0.6180561",
"0.6174271",
"0.6174271",
"0.61111224",
"0.60996664",
"0.6064166",
"0.6064166",
"0.6021597",
"0.6018544",
"0.59857386",
"0.59537935",
"0.59430385",
"0.5863827",
"0.5855777",
"0.58509547",
"0.5847076",
"0.58200467",
"0.57805985",
"0.57805985",
"0.57805985",
"0.57805985",
"0.57805985",
"0.57805985",
"0.57805985",
"0.57805985",
"0.5758645"
] |
0.7031321
|
1
|
Disconnect a method from this slot.
|
def disconnect(self, method):
key = (method.__func__, id(method.__self__))
if key in self._dict:
del self._dict[key]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _disconnect(self):\n raise NotImplementedError(\"ERROR: Unimplemented function.\")",
"def disconnect(self, slot):\n if self.is_connected(slot):\n self.slots.pop(self.slots.index(slot))",
"def disconnect(self):\n raise NotImplementedError('disconnect() is not implemented')",
"def disconnect(self):\n raise NotImplementedError",
"def disconnect(self):\n raise NotImplementedError",
"def _unregisterConnect(self, function):\n self._sig_connect.unsubscribe(function)",
"def disconnect(self) -> None:\n ...",
"def disconnect(self):\n _abstract()",
"def disconnect(self):\n _abstract()",
"def disconnect(self):",
"def unregisterDisconnect(self, function):\n self._sig_disconnect.unsubscribe(function)",
"def disconnect(self):\n # Nothing to do\n pass",
"def disconnect(self):\n pass",
"def disconnect(self):\n pass",
"def disconnect(self):\n pass",
"def disconnect(self):\n pass",
"def disconnect(self):\n pass",
"def disconnect(self):\n\n if self.connect:\n Disconnect(self.connect)",
"def device_disconnect(self):\n pass",
"def disconnect(self, signal=None, slot=None, transform=None, condition=None):\n if slot:\n self.connections[signal][condition].pop(slot, None)\n elif condition is not None:\n self.connections[signal].pop(condition, None)\n elif signal:\n self.connections.pop(signal, None)\n else:\n delattr(self, 'connections')",
"def disconnect(self):\n raise NotImplementedError(\"Implement in Headset child class\")",
"def unplug(self):\n return signal_base_unplug(self.obj)",
"def disconnect(self, connection):\n # Emit signal.\n plug_item = connection.plugItem\n\n nodzInst = self.scene().views()[0]\n if plug_item is not None:\n nodzInst.portDisconnected(plug_item.port(), self.port())\n\n # Remove connected plugs\n if connection.plugItem in self.connected_slots:\n self.connected_slots.remove(connection.plugItem)\n\n # Remove connections\n self.connections.remove(connection)",
"def disconnect(receiver, *args, **kwargs):\n _dispatcher.disconnect(receiver, *args, **kwargs)",
"def stop_slot__(self):\n print(\"stop_slot__\")\n self.valkkafs_manager.stop()",
"def __del__(self):\n self.disconnect()",
"def disconnect(self, receiver, *args, **kwargs):\n self._dispatcher.disconnect(receiver, *args, **kwargs)",
"def disconnect(self, callback):\n if not callable(callback):\n raise TypeError('Cannot disconnect a non-callable from a Signal')\n conn = self._make_connection(callback)\n self._connection_dead(conn)",
"def disconnect(self, connection):\n # Emit signal.\n socket_item = connection.socketItem\n\n nodzInst = self.scene().views()[0]\n if socket_item is not None:\n nodzInst.portDisconnected(self.port(), socket_item.port())\n\n # Remove connected socket from plug\n if connection.socketItem in self.connected_slots:\n self.connected_slots.remove(connection.socketItem)\n\n # Remove connection\n self.connections.remove(connection)",
"def disconnect(self, message, **kwargs):\n pass"
] |
[
"0.73672116",
"0.71853644",
"0.69441503",
"0.6936023",
"0.6936023",
"0.6859748",
"0.6830833",
"0.6797493",
"0.6797493",
"0.6711881",
"0.6648343",
"0.6539646",
"0.6523527",
"0.6523527",
"0.6523527",
"0.6523527",
"0.6523527",
"0.64713925",
"0.63750887",
"0.6365629",
"0.6272979",
"0.6259001",
"0.62554294",
"0.624831",
"0.62297904",
"0.61854255",
"0.6167697",
"0.6148324",
"0.6089661",
"0.60808843"
] |
0.7751953
|
0
|
To compute the geodesic distance to the walls in using \ a fastmarching method
|
def compute_wall_distance(self):
phi = sp.ones(self.image_red.shape)
if (len(self.mask_id[0])>0):
phi[self.mask_id] = 0
self.wall_distance = skfmm.distance(phi, dx=self.pixel_size)
grad = sp.gradient(self.wall_distance,edge_order=2)
grad_X = grad[1]/self.pixel_size
grad_Y = grad[0]/self.pixel_size
norm = sp.sqrt(grad_X**2+grad_Y**2)
norm = (norm>0)*norm+(norm==0)*0.001
self.wall_grad_X = grad_X/norm
self.wall_grad_Y = grad_Y/norm
else:
self.wall_distance = 1.0e99*sp.ones(self.image_red.shape)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def perform_fast_marching(D,start_points):\n D_temp = np.copy(D)\n D_temp[start_points[0,:],start_points[1,:]] = 0\n return fmm.distance(D_temp) + 1e-15",
"def _computeDistance(self, mote, neighbor):\n\n return 1000*math.sqrt((mote.x - neighbor.x)**2 +\n (mote.y - neighbor.y)**2)",
"def _computeDistance(self, mote, neighbor):\n\n return 1000*math.sqrt((mote.x - neighbor.x)**2 +\n (mote.y - neighbor.y)**2)",
"def test_distance():\n t0 = time.time()\n c1 = coord.CelestialCoord(0.234 * coord.radians, 0.342 * coord.radians)\n c2 = coord.CelestialCoord(0.234 * coord.radians, -1.093 * coord.radians)\n c3 = coord.CelestialCoord((pi + 0.234) * coord.radians, -0.342 * coord.radians)\n c4 = coord.CelestialCoord((pi + 0.234) * coord.radians, 0.832 * coord.radians)\n c5 = coord.CelestialCoord(1.832 * coord.radians, -0.723 * coord.radians)\n c6 = coord.CelestialCoord((0.234 + 2.3e-9) * coord.radians, (0.342 + 1.2e-9) * coord.radians)\n t1 = time.time()\n\n a1 = astropy.coordinates.SkyCoord(0.234 * units.radian, 0.342 * units.radian)\n a2 = astropy.coordinates.SkyCoord(0.234 * units.radian, -1.093 * units.radian)\n a3 = astropy.coordinates.SkyCoord((pi + 0.234) * units.radian, -0.342 * units.radian)\n a4 = astropy.coordinates.SkyCoord((pi + 0.234) * units.radian, 0.832 * units.radian)\n a5 = astropy.coordinates.SkyCoord(1.832 * units.radian, -0.723 * units.radian)\n a6 = astropy.coordinates.SkyCoord(0.234 + 2.3e-9, 0.342 + 1.2e-9, unit=units.radian)\n t2 = time.time()\n\n coord_dist = [c1.distanceTo(c).rad for c in [c2,c3,c4,c5,c6]]\n t3 = time.time()\n astropy_dist = [a1.separation(a).rad for a in [a2,a3,a4,a5,a6]]\n t4 = time.time()\n\n np.testing.assert_almost_equal(coord_dist, astropy_dist, decimal=12)\n # For the last one, the distance is rather small in radians, so test in arcsec\n np.testing.assert_almost_equal(coord_dist[-1] * (coord.radians/coord.arcsec),\n astropy_dist[-1] * (coord.radians/coord.arcsec), decimal=10)\n\n print('Compare times for distance calculations:')\n print(' Make CelestialCoords: t = ',t1-t0)\n print(' Make SkyCoords: t = ',t2-t1)\n print(' Calculate distances with Coord: t = ',t3-t2)\n print(' Calculate distances with Astropy: t = ',t4-t3)",
"def getDistances():\n\n # If there's a wall in the way then there's no edge that way (probably)\n\n wallL, edgeL = getDistance(-45) # Left\n wallF, edgeF = getDistance( 0) # Forward\n wallR, edgeR = getDistance( 45) # Right\n\n panTilt.pan() # Recenter\n\n return wallL, edgeL, wallF, edgeF, wallR, edgeR",
"def _geodesic_distance(mesh, face1, face2, edge):\n edge_center = (mesh.vertices[edge[0]] + mesh.vertices[edge[1]]) / 2\n return _list_length(_list_minus(edge_center, _face_center(mesh, face1))) + \\\n _list_length(_list_minus(edge_center, _face_center(mesh, face2)))",
"def get_shortest_route_floyd(network, start,destination, excludings=[]):\n\n # On récupère la liste des villes\n list_city = network[1].keys()\n \n # Si la ville de départ ou de fin n'existe pas\n if start not in list_city or destination not in list_city:\n return None\n\n # On retire les villes à exclure\n list_city = [x for x in list_city if x not in excludings]\n\n\n # Initialisation de se qu'on a besoin\n matrix = []\n distance = []\n n = len(list_city)\n\n \n # On construit la matrice adjacente où indique la distance si il existe une autoroute entre 2 villes\n for x in range(n): \n matrix.append( [] )\n distance.append( [] )\n for y in range(n):\n road_id = get_road_to(network,list_city[x],list_city[y])\n if road_id != None:\n matrix[x].append( get_length(network,road_id) )\n else:\n matrix[x].append( None )\n distance[x].append( [road_id] ) # Autoroute -> format: ['LA']\n\n\t \n # Algorithme de Floyd\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if ( matrix[i][k] != None and matrix[k][j] != None ) and ( ( matrix[i][j] == None ) or ( matrix[i][j] > matrix[i][k] + matrix[k][j] ) ):\n matrix[i][j] = matrix[i][k] + matrix[k][j]\n\t\t \n\t\t # Hors Floyd / Ajout personnel\n if i != k and j != k: # Si i == k ou j == k, cela veut dire qu'on additionne un résultat supplémentaire à la case ij\n distance[i][j] = [] # Sinon ca signifie qu'on a trouvé un chemin plus court, du coup on supprime l'ancien chemin\n distance[i][j].extend( distance[i][k] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n distance[i][j].extend( distance[k][j] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n\n\t\t \n # On récupère simplement la liste des autoroutes parcourus\n idx_start = list_city.index( start )\n idx_destination = list_city.index( destination )\n distance_minimum = distance[ idx_start ][ idx_destination ]\n\n \n # Si on ne trouve aucune solution, on renvoie None\n if distance_minimum == [None]:\n distance_minimum = None\n \n return distance_minimum",
"def test_warping_distance(self):\n t = np.linspace(0, 1, 1000)\n w1 = FDataGrid([t**5], t)\n w2 = FDataGrid([t**3], t)\n\n d = warping_distance(w1, w2)\n np.testing.assert_allclose(d, np.arccos(np.sqrt(15) / 4), atol=1e-3)\n\n d = warping_distance(w2, w2)\n np.testing.assert_allclose(d, 0, atol=2e-2)",
"def get_nodes_distance(dbpath,node1,node2,inst,stepname,nframe=-1):\n odb = openOdb(path=dbpath)\n _inst = odb.rootAssembly.instances[inst]\n ic = odb.rootAssembly.instances[inst].nodes\n us = odb.steps[stepname].frames[nframe].fieldOutputs['U'].getSubset(region=_inst).values\n xx1 = ic[node1-1].coordinates[0]+us[node1-1].data[0]\n yy1 = ic[node1-1].coordinates[1]+us[node1-1].data[1]\n xx2 = ic[node2-1].coordinates[0]+us[node2-1].data[0]\n yy2 = ic[node2-1].coordinates[1]+us[node2-1].data[1]\n if _inst.embeddedSpace == THREE_D:\n zz1 = ic[node1-1].coordinates[2]+us[node1-1].data[2]\n zz2 = ic[node2-1].coordinates[2]+us[node2-1].data[2]\n d = np.sqrt((xx2-xx1)**2 + (yy2-yy1)**2 + (zz2-zz1)**2)\n else:\n d = np.sqrt((xx2-xx1)**2+(yy2-yy1)**2)\n return d",
"def distcalc(z,h=0.70,omegalambda=0.7,omegam=0.3,omegak=0.0):\n\n H0 = 100 * h # this is in units of km/s/Mpc\n\n H0freq = H0 * constants.kilo/(constants.mega * constants.parsec) # this is H0 is units of Hz\n \n hubbletime = 1.0/H0freq # in seconds\n hubbletimeyr = hubbletime / constants.year\n\n #hubble distance\n dh = constants.c / H0freq # in meters\n\n #now i can calculate the comoving distance (line of sight) using hogg eqn 15\n dc = dh * integrate.quad(dcintegrand,0,z,(omegalambda,omegam,omegak))[0]\n\n #now i can find the transverse comoving distance using hogg eqn 16\n if omegak == 0:\n dm = dc\n elif omegak > 0:\n dm = dh/np.sqrt(omegak) * np.sinh(dc * np.sqrt(omegak) / dh)\n else:\n dm = dh/np.sqrt(abs(omegak)) * np.sin(dc * np.sqrt(abs(omegak)) / dh)\n\n\n #now i will calculate the angular diameter distance (hogg eqn 18)\n da = dm/(1+z)\n \n #now i will calculate scale in kpc/arcsec, since this is commonly used\n scale = da * constants.arcsec / (constants.kilo * constants.parsec)\n\n #now i will calculate the luminosity distance (hog eqn 21)\n dl = (1+z)*dm\n \n #now i will calculate lookback time and \n #time from the begining of the universe to that redshift using hogg eqn 30\n \n tlookback = hubbletimeyr * integrate.quad(timeintegrand,0,z,(omegalambda,omegam,omegak))[0]\n \n tz = hubbletimeyr * integrate.quad(timeintegrand,z,np.inf,(omegalambda,omegam,omegak))[0]\n \n #all sky co-moving volume out to redshift z (hogg eqn 30)\n if omegak == 0:\n vc = 4 * np.pi * dm**3 / 3\n elif omegak > 0:\n vc = ( 4 * np.pi * dh**3 / (2 * omegak) ) * ( dm * np.sqrt(1 + omegak * dm**2 / dh**2) / dh - \n np.arcsinh( np.sqrt(omegak) * dm / dh ) / np.sqrt(omegak) )\n else:\n vc = ( 4 * np.pi * dh**3 / (2 * omegak) ) * ( dm * np.sqrt(1 + omegak * dm**2 / dh**2) / dh - \n np.arcsin( np.sqrt(abs(omegak)) * dm / dh ) / np.sqrt(abs(omegak)) )\n\n #for output, i will make a dictionary\n output = dict(dh=dh, dc=dc, dm=dm, da=da, scale=scale, dl=dl, tlookback = tlookback, tz=tz, vc=vc)\n\n return output",
"def calculate_dist_from_eqm(distance_from_eqm, variable_mask):",
"def _earth_distance(time='now'):\n return get_earth(time).radius",
"def comparable_dist(zamg_id):\n station_lat, station_lon = stations[zamg_id]\n return (lat - station_lat) ** 2 + (lon - station_lon) ** 2",
"def _compute_dist_lat_lon(graph):\r\n R = 6371 # Radius of the earth in km\r\n for edge in graph.edges():\r\n node1, node2 = edge\r\n lon1 = np.radians(graph.nodes[node1]['Longitude'])\r\n lon2 = np.radians(graph.nodes[node2]['Longitude'])\r\n lat1 = np.radians(graph.nodes[node1]['Latitude'])\r\n lat2 = np.radians(graph.nodes[node2]['Latitude'])\r\n delta_lat = lat2 - lat1\r\n delta_lon = lon2 - lon1\r\n a = np.sin(delta_lat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * (np.sin(delta_lon / 2) ** 2)\r\n c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))\r\n dist = np.round(R * c, 5)\r\n graph.edges[node1, node2]['length'] = dist",
"def calculate_distance(source,dest):\n\n ### Earth radius in miles\n R = 3960.0\n\n lat1, lon1 = source\n lat2, lon2 = dest\n lat1 = radians(lat1)\n lon1 = radians(lon1)\n lat2 = radians(lat2)\n lon2 = radians(lon2)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n ### compute distance in spherical coordinates\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n\n return distance",
"def shortestDistance(self, grid):\n # return self.house_oriented_TLE(grid)\n # One axis\n row_count = [sum(row) for row in grid]\n col_count = [0]* len(grid[0])\n row_dist = [0]* len(grid)\n col_dist = [0]* len(grid[0])\n output = sys.maxsize\n for i in range(len(grid)): \n for j in range(len(grid[0])):\n col_count[j] += grid[i][j]\n \n for index_p in range(len(row_count)):\n for index_h in range(len(row_count)):\n row_dist[index_p] += abs(index_h - index_p) * row_count[index_h]\n \n for index_p in range(len(col_count)):\n for index_h in range(len(col_count)):\n col_dist[index_p] += abs(index_h - index_p) * col_count[index_h]\n \n # print(row_count)\n # print(col_count)\n # print(row_dist)\n # print(col_dist)\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n continue\n output = min(output, row_dist[i] + col_dist[j])\n return output",
"def distances(self):",
"def _great_circle_distance_fast(ra1, dec1, ra2, dec2, threads):\n\n import numexpr as ne\n \n # terminology from the Vicenty formula - lambda and phi and\n # \"standpoint\" and \"forepoint\"\n lambs = np.radians(ra1)\n phis = np.radians(dec1)\n lambf = np.radians(ra2)\n phif = np.radians(dec2)\n \n dlamb = lambf - lambs\n\n #using numexpr\n #nthreads = ne.detect_number_of_cores()\n nthreads = threads\n ne.set_num_threads(nthreads)\n\n hold1=ne.evaluate('sin(phif)') #calculate these once instead of a few times!\n hold2=ne.evaluate('sin(phis)')\n hold3=ne.evaluate('cos(phif)')\n hold4=ne.evaluate('cos(dlamb)')\n hold5=ne.evaluate('cos(phis)')\n numera = ne.evaluate( 'hold3 * sin(dlamb)')\n numerb = ne.evaluate('hold5 * hold1 - hold2 * hold3 * hold4')\n numer = ne.evaluate('sqrt(numera**2 + numerb**2)')\n denom = ne.evaluate('hold2 * hold1 + hold5 * hold3 * hold4')\n pi=math.pi\n\n return ne.evaluate('(arctan2(numer, denom))*180.0/pi')",
"def test_equals_distance_clusters_rust():\n rust_result = rust_force.calculate_distance_between_two_clusters(\n rust_buildings[:n_first_cluster], rust_buildings[n_first_cluster:], rust_first_cluster_position,\n rust_second_cluster_position)\n rust_result_parallel = rust_force.calculate_distance_between_two_clusters_parallel(\n rust_buildings[:n_first_cluster], rust_buildings[n_first_cluster:], rust_first_cluster_position,\n rust_second_cluster_position)\n assert rust_result == rust_result_parallel",
"def computeDistance3D(imIn, imOut, grid=m3D.DEFAULT_GRID3D, edge=mamba.EMPTY):\r\n err = core3D.MB3D_Distanceb(imIn.mb3DIm, imOut.mb3DIm, grid.getCValue(), edge.id)\r\n err = m3D.convert3DErrorToMamba(err)\r\n mamba.raiseExceptionOnError(err)",
"def get_fast_distance(lat1, lon1, lat2, lon2):\n KM = 6371.393\n lat1, lon1, lat2, lon2 = map(np.deg2rad, [lat1, lon1, lat2, lon2])\n dlat = lat2 - lat1\n dlon = lon2 - lon1\n a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2\n c = 2 * np.arcsin(np.sqrt(a))\n distance = KM * c\n return distance",
"def calc_distances_from_central(cluster, embedding):\n\n return calc_distances_in_embedding(cluster, embedding)",
"def _compute_distances(self, atoms: List[CellAtom]):\n muon = self._cell_atoms[self._muon_index]\n\n for atom in atoms:\n atom.distance_from_muon = np.linalg.norm(muon.position - atom.position)",
"def zonal_avg2(data,Log=False):\n print 'setting up the destination grid'\n # get lat and lon for new regular grid\n# fpin = Nio.open_file('/home/ivan/Python/data/lat_t.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/lat_t.nc','r')\n lat_t = fpin.variables['lat_t'][:]\n lat_t_edges = fpin.variables['lat_t_edges'][:]\n fpin.close()\n# fpin = Nio.open_file('/home/ivan/Python/data/gx3v5.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/gx3v5.nc','r')\n lon_t = N.sort(fpin.variables['TLONG'][0,:])\n ulon = N.sort(fpin.variables['ULONG'][0,:])\n lon_t_edges = N.concatenate((ulon,ulon[0,N.newaxis]+360.),0)\n # get gx3v5 lat and lon\n tlon = fpin.variables['TLONG'][:]\n tlat = fpin.variables['TLAT'][:]\n fpin.close()\n\n # compute area of cells in new regular grid\n area = grid_area(lon_t_edges,lat_t_edges)\n\n nlat = lat_t.shape[0]\n nlon = lon_t.shape[0]\n\n print 'computing weights for grid cell'\n ilist = []\n jlist = []\n wghts2D = []\n wghts3D = []\n for i in range(nlat):\n for j in range(nlon):\n i_inds, j_inds = find_stn_idx(lon_t[j], lat_t[i], tlon, tlat)\n ilist.append(i_inds)\n jlist.append(j_inds)\n dist = gc_dist(lon_t[i], lat_t[i], tlon, tlat)\n # make weights=0 on land\n work2D = 1./MA.array(dist,mask=data[0,...].mask)\n wghts2D.append(MA.filled(N.take(N.take(work2D,i_inds,0),j_inds,1)\n ,0))\n\n work3D = 1./MA.array(N.resize(dist,data.shape),mask=data.mask)\n wghts3D.append(MA.filled(N.take(N.take(work3D,i_inds,-2),j_inds,-1)\n ,0))\n\n #print 'computing zonal average'\n return lon_t, lat_t, ilist, jlist, wghts2D, wghts3D",
"def calcDistanceOptimized(lat1, lon1, lat2, lon2):\n rad = 0.017453292519943\n yDistance = (lat2 - lat1) * 60.00721\n xDistance = (math.cos(lat1 * rad) + math.cos(lat2 * rad)) * (lon2 - lon1) * 30.053965\n distance = math.sqrt( yDistance**2 + xDistance**2 )\n return distance * 1852.00088832",
"def main():\n\n # Create an empty array to hold our points.\n n = gpuarray.zeros(shape=(x, y, z),\n dtype=gpuarray.vec.float3)\n\n # Populate the array with randomized points from the search space.\n for k in range(z):\n for j in range(y):\n for i in range(x):\n n[i, j, k] = gpuarray.vec.make_float3(random.uniform(-width, width),\n random.uniform(-height, height),\n random.uniform(-depth, depth))\n\n # Declare our elementwise CUDA kernel.\n mod = Elementwise(\n arguments=\"float3 pt, float3 *ns, float *rs\",\n operation=\"rs[i] = sqrt(pow(pt.x-ns[i].x,2)+pow(pt.y-ns[i].y,2)+pow(pt.z-ns[i].z,2))\",\n name=\"euclidean_distance\",\n preamble=\"#include <math.h>\"\n )\n\n # Declare an empty results array.\n r = gpuarray.zeros(shape=(50, 50, 2), dtype=numpy.float32)\n start = cuda.Event()\n end = cuda.Event()\n start.record()\n # Call the kernel with a randomize point from the search space.\n mod(gpuarray.vec.make_float3(random.uniform(-width, width),\n random.uniform(-height, height),\n random.uniform(-width, width)), n, r)\n end.record()\n end.synchronize()\n print((start.time_till(end)))\n print(r)",
"def _estimate_velocity_by_neigh(\n x_coords_metres, y_coords_metres, x_velocities_m_s01,\n y_velocities_m_s01, e_folding_radius_metres):\n\n if numpy.isnan(e_folding_radius_metres):\n neigh_radius_metres = numpy.inf\n else:\n neigh_radius_metres = 3 * e_folding_radius_metres\n\n orig_x_velocities_m_s01 = x_velocities_m_s01 + 0.\n orig_y_velocities_m_s01 = y_velocities_m_s01 + 0.\n\n nan_flags = numpy.logical_or(\n numpy.isnan(orig_x_velocities_m_s01),\n numpy.isnan(orig_y_velocities_m_s01)\n )\n nan_indices = numpy.where(nan_flags)[0]\n\n for this_index in nan_indices:\n if numpy.isnan(e_folding_radius_metres):\n these_neighbour_indices = numpy.where(numpy.invert(nan_flags))[0]\n if len(these_neighbour_indices) == 0:\n continue\n\n x_velocities_m_s01[this_index] = numpy.mean(\n orig_x_velocities_m_s01[these_neighbour_indices]\n )\n\n y_velocities_m_s01[this_index] = numpy.mean(\n orig_y_velocities_m_s01[these_neighbour_indices]\n )\n\n continue\n\n these_x_diffs_metres = numpy.absolute(\n x_coords_metres[this_index] - x_coords_metres)\n these_y_diffs_metres = numpy.absolute(\n y_coords_metres[this_index] - y_coords_metres)\n\n these_neighbour_flags = numpy.logical_and(\n these_x_diffs_metres <= neigh_radius_metres,\n these_y_diffs_metres <= neigh_radius_metres)\n\n these_neighbour_flags = numpy.logical_and(\n these_neighbour_flags, numpy.invert(nan_flags)\n )\n\n these_neighbour_indices = numpy.where(these_neighbour_flags)[0]\n if len(these_neighbour_indices) == 0:\n continue\n\n these_neighbour_dist_metres = numpy.sqrt(\n these_x_diffs_metres[these_neighbour_indices] ** 2 +\n these_y_diffs_metres[these_neighbour_indices] ** 2\n )\n\n these_neighbour_subindices = numpy.where(\n these_neighbour_dist_metres <= neigh_radius_metres\n )[0]\n if len(these_neighbour_subindices) == 0:\n continue\n\n these_neighbour_indices = these_neighbour_indices[\n these_neighbour_subindices]\n these_neighbour_dist_metres = these_neighbour_dist_metres[\n these_neighbour_subindices]\n\n these_weights = numpy.exp(\n -these_neighbour_dist_metres / e_folding_radius_metres\n )\n these_weights = these_weights / numpy.sum(these_weights)\n\n x_velocities_m_s01[this_index] = numpy.sum(\n these_weights * orig_x_velocities_m_s01[these_neighbour_indices]\n )\n\n y_velocities_m_s01[this_index] = numpy.sum(\n these_weights * orig_y_velocities_m_s01[these_neighbour_indices]\n )\n\n return x_velocities_m_s01, y_velocities_m_s01",
"def calcDistance(lat1, lon1, lat2, lon2):\n yDistance = (lat2 - lat1) * nauticalMilePerLat\n xDistance = (math.cos(lat1 * rad) + math.cos(lat2 * rad)) * (lon2 - lon1) * (nauticalMilePerLongitude / 2) \n distance = math.sqrt( yDistance**2 + xDistance**2 )\n return distance * milesPerNauticalMile",
"def ToRasterDistance(inputgeodf, rastername, rasterconn, value, method):\n raster_data = PostgresRasterToArray(rastername, rasterconn)\n raster_tree = spatial.cKDTree(np.argwhere(raster_data[0] == value))\n input_rowcol = np.column_stack((np.round((raster_data[1][5] - inputgeodf.centroid.y)/raster_data[1][0]).values,\n np.round((inputgeodf.centroid.x - raster_data[1][2])/raster_data[1][0]).values))\n dist_method = 1 if method.lower() == 'manhattan' else 2\n return raster_tree.query(input_rowcol, p=dist_method)[0]*raster_data[1][0]",
"def redfearn(lat, lon, false_easting=None, false_northing=None,\r\n zone=None, central_meridian=None, scale_factor=None):\r\n\r\n\r\n from math import pi, sqrt, sin, cos, tan\r\n \r\n\r\n\r\n #GDA Specifications\r\n a = 6378137.0 #Semi major axis\r\n inverse_flattening = 298.257222101 #1/f\r\n if scale_factor is None:\r\n K0 = 0.9996 #Central scale factor \r\n else:\r\n K0 = scale_factor\r\n #print 'scale', K0\r\n zone_width = 6 #Degrees\r\n\r\n longitude_of_central_meridian_zone0 = -183 \r\n longitude_of_western_edge_zone0 = -186\r\n\r\n if false_easting is None:\r\n false_easting = 500000\r\n\r\n if false_northing is None:\r\n if lat < 0:\r\n false_northing = 10000000 #Southern hemisphere\r\n else:\r\n false_northing = 0 #Northern hemisphere)\r\n \r\n \r\n #Derived constants\r\n f = 1.0/inverse_flattening\r\n b = a*(1-f) #Semi minor axis\r\n\r\n e2 = 2*f - f*f# = f*(2-f) = (a^2-b^2/a^2 #Eccentricity\r\n e = sqrt(e2)\r\n e2_ = e2/(1-e2) # = (a^2-b^2)/b^2 #Second eccentricity\r\n e_ = sqrt(e2_)\r\n e4 = e2*e2\r\n e6 = e2*e4\r\n\r\n #Foot point latitude\r\n n = (a-b)/(a+b) #Same as e2 - why ?\r\n n2 = n*n\r\n n3 = n*n2\r\n n4 = n2*n2\r\n\r\n G = a*(1-n)*(1-n2)*(1+9*n2/4+225*n4/64)*pi/180\r\n\r\n\r\n phi = lat*pi/180 #Convert latitude to radians\r\n\r\n sinphi = sin(phi) \r\n sin2phi = sin(2*phi)\r\n sin4phi = sin(4*phi)\r\n sin6phi = sin(6*phi)\r\n\r\n cosphi = cos(phi)\r\n cosphi2 = cosphi*cosphi\r\n cosphi3 = cosphi*cosphi2\r\n cosphi4 = cosphi2*cosphi2\r\n cosphi5 = cosphi*cosphi4 \r\n cosphi6 = cosphi2*cosphi4\r\n cosphi7 = cosphi*cosphi6\r\n cosphi8 = cosphi4*cosphi4 \r\n\r\n t = tan(phi)\r\n t2 = t*t\r\n t4 = t2*t2\r\n t6 = t2*t4\r\n \r\n #Radius of Curvature\r\n rho = a*(1-e2)/(1-e2*sinphi*sinphi)**1.5\r\n nu = a/(1-e2*sinphi*sinphi)**0.5\r\n psi = nu/rho\r\n psi2 = psi*psi\r\n psi3 = psi*psi2\r\n psi4 = psi2*psi2\r\n\r\n\r\n\r\n #Meridian distance\r\n\r\n A0 = 1 - e2/4 - 3*e4/64 - 5*e6/256\r\n A2 = 3.0/8*(e2+e4/4+15*e6/128)\r\n A4 = 15.0/256*(e4+3*e6/4)\r\n A6 = 35*e6/3072\r\n \r\n term1 = a*A0*phi\r\n term2 = -a*A2*sin2phi\r\n term3 = a*A4*sin4phi\r\n term4 = -a*A6*sin6phi\r\n\r\n m = term1 + term2 + term3 + term4 #OK\r\n\r\n if zone is not None and central_meridian is not None:\r\n msg = 'You specified both zone and central_meridian. Provide only one of them'\r\n raise Exception, msg\r\n \r\n # Zone\r\n if zone is None:\r\n zone = int((lon - longitude_of_western_edge_zone0)/zone_width)\r\n\r\n # Central meridian\r\n if central_meridian is None:\r\n central_meridian = zone*zone_width+longitude_of_central_meridian_zone0\r\n else:\r\n zone = -1\r\n\r\n omega = (lon-central_meridian)*pi/180 #Relative longitude (radians)\r\n omega2 = omega*omega\r\n omega3 = omega*omega2\r\n omega4 = omega2*omega2\r\n omega5 = omega*omega4\r\n omega6 = omega3*omega3\r\n omega7 = omega*omega6\r\n omega8 = omega4*omega4\r\n \r\n #Northing\r\n term1 = nu*sinphi*cosphi*omega2/2 \r\n term2 = nu*sinphi*cosphi3*(4*psi2+psi-t2)*omega4/24\r\n term3 = nu*sinphi*cosphi5*\\\r\n (8*psi4*(11-24*t2)-28*psi3*(1-6*t2)+\\\r\n psi2*(1-32*t2)-psi*2*t2+t4-t2)*omega6/720\r\n term4 = nu*sinphi*cosphi7*(1385-3111*t2+543*t4-t6)*omega8/40320\r\n northing = false_northing + K0*(m + term1 + term2 + term3 + term4)\r\n\r\n #Easting\r\n term1 = nu*omega*cosphi\r\n term2 = nu*cosphi3*(psi-t2)*omega3/6\r\n term3 = nu*cosphi5*(4*psi3*(1-6*t2)+psi2*(1+8*t2)-2*psi*t2+t4)*omega5/120\r\n term4 = nu*cosphi7*(61-479*t2+179*t4-t6)*omega7/5040\r\n easting = false_easting + K0*(term1 + term2 + term3 + term4)\r\n \r\n return zone, easting, northing"
] |
[
"0.6176286",
"0.6150726",
"0.6150726",
"0.60788697",
"0.5812341",
"0.57557344",
"0.57368165",
"0.5719741",
"0.5710146",
"0.56776214",
"0.56411487",
"0.5639875",
"0.5636927",
"0.56183845",
"0.5616496",
"0.5609934",
"0.5602868",
"0.559367",
"0.5593239",
"0.5589662",
"0.5570916",
"0.5567575",
"0.5559643",
"0.55481964",
"0.55276495",
"0.551984",
"0.5505389",
"0.5492445",
"0.5489179",
"0.54841423"
] |
0.63437414
|
0
|
To plot the wall distances
|
def plot_wall_dist(self,id=1,dpi=150):
fig = plt.figure(id)
ax1 = fig.add_subplot(111)
ax1.imshow(self.image,interpolation='nearest',
extent=[self.xmin,self.xmax,self.ymin,self.ymax], origin='lower')
ax1.imshow(self.wall_distance,interpolation='nearest',
extent=[self.xmin,self.xmax,self.ymin,self.ymax],alpha=0.7,
origin='lower')
step = 10
ax1.quiver(self.X[::step, ::step],self.Y[::step, ::step],
self.wall_grad_X[::step, ::step],
self.wall_grad_Y[::step, ::step])
#plt.savefig('.png',dpi=dpi)
plt.draw()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getDistances():\n\n # If there's a wall in the way then there's no edge that way (probably)\n\n wallL, edgeL = getDistance(-45) # Left\n wallF, edgeF = getDistance( 0) # Forward\n wallR, edgeR = getDistance( 45) # Right\n\n panTilt.pan() # Recenter\n\n return wallL, edgeL, wallF, edgeF, wallR, edgeR",
"def compute_wall_distance(self):\n phi = sp.ones(self.image_red.shape)\n if (len(self.mask_id[0])>0):\n phi[self.mask_id] = 0\n self.wall_distance = skfmm.distance(phi, dx=self.pixel_size)\n grad = sp.gradient(self.wall_distance,edge_order=2)\n grad_X = grad[1]/self.pixel_size\n grad_Y = grad[0]/self.pixel_size\n norm = sp.sqrt(grad_X**2+grad_Y**2)\n norm = (norm>0)*norm+(norm==0)*0.001\n self.wall_grad_X = grad_X/norm\n self.wall_grad_Y = grad_Y/norm\n else:\n self.wall_distance = 1.0e99*sp.ones(self.image_red.shape)",
"def init_distance_plot(self, width=200, height=200, mode='fixed'):\n \"\"\"\n Initiates the neighbourhood composition plot.\n \"\"\"\n self.distance_quads = {}\n self.distance_plot = figure(title=\"Distance utility\",\n x_range=(0, 1),\n plot_width=width,\n sizing_mode=mode,\n output_backend=\"webgl\")\n\n fractions = self.composition_data(agent_type='household')\n for group in fractions.keys():\n\n hist, edges = np.histogram(fractions[group], density=True, bins=20)\n self.distance_quads[group] = \\\n self.distance_plot.quad(\n top=hist, bottom=0, left=edges[:-1], right=edges[1:],\n fill_color=group[:-1], line_color=\"white\", alpha=0.7,\n legend_label=group)",
"def find_wall(self, data):\n minimum = min(data.ranges)\n min_index = data.ranges.index(minimum)\n\n # convert angle to be centered around front laser \n # set differences and constants\n if min_index <= 180:\n diff_ang = min_index\n else:\n diff_ang = min_index - 360\n diff_dist = minimum - distance\n k_d_dist = 0.5\n k_d_ang = 0.01\n \n if min_index < 5 or min_index > 355:\n # Go forward until it reaches close to wall\n self.twist.angular.z = k_d_ang * diff_ang\n self.twist.linear.x = k_d_dist * diff_dist\n else:\n # keep turning until it faces closest wall\n self.twist.angular.z =k_d_ang * diff_ang\n if self.twist.linear.x > 0:\n self.twist.linear.x = k_d_dist * diff_dist",
"def vizualize_wall(self):\n\n\t\t#Points are converted from polar to cartesian here\n\t\tpoint1 = Point()\n\t\t#(-math.pi/4) represents the 45 degree rotation of the front point\n\t\t#from the front of the robot\n\t\tpoint1.x = math.cos((-math.pi/4))*self.front_point\n\t\tpoint1.y = math.sin((-math.pi/4))*self.front_point\n\t\tpoint2 = Point()\n\t\t#(-3*math.pi/4) represents the back point's 90 degree rotaion from\n\t\t#the front point\n\t\tpoint2.x = math.cos((-3*math.pi/4))*self.back_point\n\t\tpoint2.y = math.sin((-3*math.pi/4))*self.back_point\n\t\tmy_marker = Marker(type=Marker.LINE_STRIP)\n\t\tmy_marker.header.frame_id = \"base_link\"\n\t\tmy_marker.color.a = 1\n\t\tmy_marker.scale.x = .1\n\t\tmy_marker.points = [point1, point2]\n\t\tself.visualizer.publish(my_marker)",
"def distance_plot():\n day = \"20131210\"\n file = \"Data/matfiles/\" + day + \".mat\"\n object = MatReader(file)\n\n\n\n # ind1 = 2606\n # ind2 = 13940 + 1*7000\n\n ind1 = 0\n ind2 = 150000\n\n times = object.secondsB[ind1:ind2]\n\n xA = object.latA[ind1:ind2]\n yA = object.longA[ind1:ind2]\n zA = object.radA[ind1:ind2]\n\n xB = object.latB[ind1:ind2]\n yB = object.longB[ind1:ind2]\n zB = object.radB[ind1:ind2]\n\n xC = object.latC[ind1:ind2]\n yC = object.longC[ind1:ind2]\n zC = object.radC[ind1:ind2]\n\n mltA = object.mltA[ind1:ind2]\n mltB = object.mltB[ind1:ind2]\n\n\n dist_BA = object.great_circle_distance(xB, yB, zB, xA, yA, zA)\n dist_BC = object.great_circle_distance(xB, yB, zB, xC, yC, zC)\n dist_AC = object.great_circle_distance(xA, yA, zA, xC, yC, zC)\n\n\n plt.figure(0)\n plt.plot(times, dist_BA)\n plt.plot(times, dist_BC)\n plt.plot(times, dist_AC)\n plt.title(\"Distance over time\")\n plt.xlabel(\"Seconds since midnight UTC of sat B\")\n plt.ylabel(\"Distance [m]\")\n plt.legend([\"B - A\", \"B - C\", \"A - C\"])\n plt.grid(\"on\")\n # plt.savefig(\"Figures/matfigs/distance_over_time_\" + day + \".pdf\")\n plt.figure(1)\n\n plt.plot(times, xB - xA)\n plt.plot(times, xB - xC)\n plt.title(\"difference in latitude\")\n plt.xlabel(\"time of sat B [s]\")\n plt.ylabel(\"difference in latitude [degrees]\")\n plt.legend([\"B - A\", \"B - C\"])\n\n plt.figure(2)\n\n plt.plot(times, yB - yA)\n plt.plot(times, yB - yC)\n #plt.axis([0, 7000, -10, 10])\n plt.title(\"difference in longitude\")\n plt.xlabel(\"time of sat B [s]\")\n plt.ylabel(\"difference in latitude [degrees]\")\n plt.legend([\"B - A\", \"B - C\"])\n plt.show()\n\n # plt.plot(times, zB - zA)\n # plt.plot(times, zB - zC)\n # plt.title(\"Difference in altitude\")\n # plt.xlabel(\"time of sat B [s]\")\n # plt.ylabel(\"difference in altitude [m]\")\n # plt.legend([\"B - A\", \"B - C\"])\n # plt.show()\n\n plt.plot(xB, dist_BA)\n plt.plot(xB, dist_BC)\n plt.plot(xB, dist_AC)\n plt.xlabel(\"Latitude [Degrees]\")\n plt.ylabel(\"Distance [m]\")\n plt.title(\"Distance over latitude\")\n plt.grid(\"on\")\n plt.legend([\"B - A\", \"B - C\", \"A - C\"])\n plt.xticks([-90, -77, -70, -30, 0, 30, 70, 77, 90])\n plt.savefig(\"Figures/matfigs/distance_over_latitude_\" + day + \".pdf\")\n plt.show()\n\n mltdiff = mltA - mltB\n\n for i in range(len(mltdiff)):\n if mltdiff[i] > 24:\n mltdiff[i] = mltdiff[i] - 24\n elif mltdiff[i] < -24:\n mltdiff[i] = mltdiff[i] + 24\n\n\n print(\"mean distance B-A = %g m\" % np.mean(dist_BA))\n print(\"mean distance B-C = %g m\" % np.mean(dist_BC))\n\n print(\"velocity times timediff BA = %g\" % (object.BA_shift/2*np.mean(object.velA)))\n print(\"velocity times timediff BC = %g\" % (object.BC_shift/2*np.mean(object.velC)))\n print(np.min(dist_BA))\n print(object.BA_shift)\n print(object.BC_shift - object.BA_shift)\n print(np.mean(object.velA))",
"def add_walls(self):\n for x in range(self.width):\n self.add_thing(Wall(), (x, 0))\n self.add_thing(Wall(), (x, self.height - 1))\n\n for y in range(self.height):\n self.add_thing(Wall(), (0, y))\n self.add_thing(Wall(), (self.width - 1, y))",
"def visualise_distances(distances: pd.DataFrame\n ) -> None:\n \n # Initialise the heatmap\n hm = sns.heatmap(\n data=distances,\n annot=True,\n vmin=0,\n vmax=1,\n cmap=sns.color_palette(\"crest\", as_cmap=True)\n )\n \n # Handle the cut-off top and bottom that occasionally occurs\n bottom, top = hm.get_ylim()\n hm.set_ylim(bottom + 0.5, top - 0.5)\n \n # Save the heatmap to a file\n # hm.savefig(\"distance_heatmap.png\")\n sns.utils.plt.show()",
"def distances(self):",
"def draw_distance_map(graph):\n pos = nx.get_node_attributes(graph, 'pos')\n\n ax1 = plt.subplot(211)\n ax1.set_title('Not mirrored')\n ax1.xaxis.tick_top()\n ax1.yaxis.tick_left()\n ax1.yaxis.set_ticks(np.arange(-2000, 17000, 2000))\n ax1.xaxis.set_ticks(np.arange(-2000, 16000, 2000))\n nx.draw_networkx_nodes(graph, pos, node_color='g')\n nx.draw_networkx_labels(graph, pos, fontsize=10)\n plt.margins(0.1)\n\n ax2 = plt.subplot(212)\n ax2.set_title('Mirrored')\n ax2.invert_yaxis()\n ax2.xaxis.tick_top()\n ax2.yaxis.tick_left()\n ax2.yaxis.set_ticks(np.arange(-2000, 17000, 2000))\n ax2.xaxis.set_ticks(np.arange(-2000, 16000, 2000))\n nx.draw_networkx_nodes(graph, pos, node_color='r')\n nx.draw_networkx_labels(graph, pos, fontsize=10)\n plt.margins(0.1)\n\n plt.subplots_adjust(left=0.03, right=0.97, bottom=0.03, top=0.92)\n\n plt.show()",
"def _draw_walls(self, draw_grid):\n for yi, y in enumerate(self._grid):\n for xi, x in enumerate(y):\n for i, w in enumerate(x.walls):\n if i == 0 and w:\n draw_grid[yi * 2 + 1][xi * 2] = self._wall_color\n if i == 1 and w:\n draw_grid[yi * 2 + 1][xi * 2 + 2] = self._wall_color\n if i == 2 and w:\n draw_grid[yi * 2][xi * 2 + 1] = self._wall_color\n if i == 3 and w:\n draw_grid[yi * 2 + 2][xi * 2 + 1] = self._wall_color\n return draw_grid",
"def distance_between_wheels():",
"def walls_and_gridlines2_d(self):\n return self.container['walls_and_gridlines2_d']",
"def display_distances(self) -> None:\n self.svg.clear()\n for colnum, valtup in enumerate(self.dist_lst):\n epc, dist_val = valtup\n self.drawcolumn(colnum, epc, dist_val)",
"def draw_atom_disks(mol, weights, radius=0.05, step=0.001):\n x = np.arange(0, 1, step)\n y = np.arange(0, 1, step)\n X, Y = np.meshgrid(x, y)\n Z = np.zeros(X.shape)\n for i, (c_x, c_y) in mol._atomPs.items():\n base = (X - c_x) ** 2 + (Y - c_y) ** 2\n circle_mask = (base < radius**2)\n circle = circle_mask.astype('float') * weights[i]\n Z += circle\n return X, Y, Z",
"def walls(self):\n return self._walls",
"def walls(self):\n return self._walls",
"def extract_walls(img_array,x_scale,y_scale,wall_height):\n\n wall_th = 2\n length = 0\n wall_list = []\n\n #check for horizontal walls first\n for row in range(img_array.shape[0]):\n for col in range(img_array.shape[1]):\n \n sec = img_array.astype(int)[row:row+2,col:col+2]\n \n if left_edge(sec):\n #check two steps to the right\n next_sec = img_array.astype(int)[row:row+2, col+1:col+3]\n next_next_sec = img_array.astype(int)[row:row+2, col+2:col+4]\n\n #if horizontal wall, get coordinates and count length\n if is_wall(next_sec) and not right_edge(next_next_sec): \n #record corner coordinates\n x = col +1\n y = row\n while is_wall(next_sec):\n #start counting length across, until right edge found\n length +=1\n col +=1\n next_sec = img_array.astype(int)[row:row+2, col:col+2]\n #create wall object and store in list \n new_wall = Wall(x*x_scale,y*y_scale,length*x_scale,wall_th*y_scale,wall_height)\n wall_list.append(new_wall)\n length = 0\n\n #check for vertical walls\n for col in range(img_array.shape[1]):\n for row in range(img_array.shape[0]):\n\n sec = img_array.astype(int)[row:row+2,col:col+2]\n \n if top_edge(sec): \n #check two steps below\n next_sec = img_array.astype(int)[row+1:row+3, col:col+2]\n next_next_sec = img_array.astype(int)[row+2:row+4, col:col+2]\n\n #if vertical wall, get coordinates and count length\n if is_wall(next_sec) and is_wall(next_next_sec):\n x = col\n y = row\n while is_wall(next_sec):\n #start counting length downwards, until bottom edge found\n length += 1\n row += 1\n next_sec = img_array.astype(int)[row:row+2, col:col+2]\n #create wall object and store in list\n new_wall = Wall(x*x_scale,y*y_scale,wall_th*x_scale,length*y_scale, wall_height)\n wall_list.append(new_wall)\n length = 0\n\n return wall_list",
"def edistw_to_line(point, edge, walls):\r\n#\tif min(x1,x2) <= x <= max(x1,x2) and min(y1,y2) <= y <= max(y1,y2):\r\n#\t\treturn 0\r\n\t(x,y) = point\r\n\t((x1,y1),(x2,y2)) = edge\r\n\tif x1 == x2:\r\n\t\tds = [math.sqrt((x1-x)**2 + (y3-y)**2) \\\r\n\t\t\tfor y3 in range(min(y1,y2),max(y1,y2)+1) \\\r\n\t\t\tif not racetrack.crash(((x,y),(x1,y3)), walls)]\r\n\telse:\r\n\t\tds = [math.sqrt((x3-x)**2 + (y1-y)**2) \\\r\n\t\t\tfor x3 in range(min(x1,x2),max(x1,x2)+1) \\\r\n\t\t\tif not racetrack.crash(((x,y),(x3,y1)), walls)]\r\n\tds.append(infinity)\r\n\treturn min(ds)",
"def add_walls(self):\n for x in range(self.width + 1):\n if not self.some_things_at((x, 0), Wall):\n self.add_thing(Wall(), (x, 0))\n if not self.some_things_at((x, self.height), Wall):\n self.add_thing(Wall(), (x, self.height))\n\n for y in range(self.height + 1):\n if not self.some_things_at((0, y), Wall):\n self.add_thing(Wall(), (0, y))\n if not self.some_things_at((self.width, y), Wall):\n self.add_thing(Wall(), (self.width, y))\n #self.add_thing(Wumpus(),(1,3))\n #self.add_thing(Pit(),(3,3))\n #self.add_thing(Pit(),(3,1))\n #self.add_thing(Gold(),(2,3))\n #self.add_thing(Pit(),(4,4))",
"def getWalls(self):\n return self.data.layout.walls",
"def hourglass(funnel_width, hole_width, d_angle_from_horizon_to_wall, dist_between_anchors, diam, grain_height, **kwargs_ignore):\n # Do calculations for funnel\n r_angle = math.radians(d_angle_from_horizon_to_wall)\n tan_over_2 = math.tan(r_angle)/2.0\n height_funnel = funnel_width * tan_over_2\n hole_width += diam # trick to allow space so bottom anchors don't overlap, e.g. so a zero-width hole actually places anchors with their borders touching instead of with their centers touching\n height_hole = hole_width * tan_over_2\n xdist = dist_between_anchors * math.cos(r_angle)\n ydist = dist_between_anchors * math.sin(r_angle)\n # Centers of wall particles\n def get_anchor_centers(xstart, xend, xdist, left_side=True):\n # np.arange doesn't let you go from a greater to a lesser value, so invert twice to get that behavior\n if xstart > xend:\n dx = (xstart - xend)\n## xs = -np.arange(-xstart, -xend, xdist)\n else:\n dx = (xend - xstart)\n## xs = np.arange(xstart, xend, xdist)\n num_anchors = math.ceil(dx / xdist)\n xs = np.linspace(xstart, xend, num_anchors)\n return xs\n # Right wall equation: goes from ( (wf + wh)/2, hh - hf ) to (wf, 0)\n #m = (height_funnel - height_hole) / (funnel_width - (funnel_width + hole_width)/2.0)\n extra_funnel_extension = 0 # messes up hole alignment\n y_funnel_bottom = height_hole - height_funnel # y is negative\n xrs = get_anchor_centers((funnel_width + hole_width)/2.0, funnel_width + extra_funnel_extension, xdist)\n yrs = get_anchor_centers(y_funnel_bottom, extra_funnel_extension, ydist, False)\n # Left wall goes from (0, 0) to ( (wf - wh)/2, hh - hf)\n xls = get_anchor_centers(-extra_funnel_extension, (funnel_width - hole_width)/2.0, xdist)\n yls = get_anchor_centers(extra_funnel_extension, y_funnel_bottom, ydist, False)\n\n # Place anchors\n # NOTE: the order of adding matters when plotting anchor forces, so try to add from left to right\n c = Container()\n\n # Make side walls/anchors\n # NOTE: The anchors at y = 0 are placed by other code\n wall_ys = np.arange(diam, grain_height + diam, diam)\n for y in reversed(wall_ys): # high to low\n c.add_particle([0, y])\n\n def add_anchors(xs, ys):\n for x, y in zip(xs, ys):\n c.add_particle([x, y])\n add_anchors(xls, yls)\n add_anchors(xrs, yrs)\n\n # KLUDGE repeating sucks, but I want the anchor ixs in the right order\n for y in wall_ys: # low to high\n c.add_particle([funnel_width, y])\n\n anchor_ixs = range(c.num_particles)\n\n # Finally, add sand\n moldyn.add_triangle_lattice(c,\n (diam, funnel_width-diam),\n (diam, grain_height),\n diam, diam)\n return c, y_funnel_bottom, anchor_ixs",
"def plot_k_walls(k_walls, plot_range=None,\n plot_data_points=False,):\n pyplot.figure()\n pyplot.axes().set_aspect('equal')\n\n for k_wall in k_walls:\n xs = k_wall.get_xs() \n ys = k_wall.get_ys() \n pyplot.plot(xs, ys, '-', label=k_wall.identifier)\n\n if(plot_data_points == True):\n pyplot.plot(xs, ys, 'o', color='k', markersize=4)\n\n if plot_range is None:\n pyplot.autoscale(enable=True, axis='both', tight=None)\n else:\n [[x_min, x_max], [y_min, y_max]] = plot_range\n pyplot.xlim(x_min, x_max)\n pyplot.ylim(y_min, y_max)\n\n mpldatacursor.datacursor(\n formatter='{label}'.format,\n hover=True,\n )\n\n pyplot.show()",
"def set_channel_walls(self,walls=['left','right','top','bottom']):\n solid_list_a = np.empty(0).flatten()\n solid_list_b = np.empty(0).flatten()\n solid_list_c = np.empty(0).flatten()\n solid_list_d = np.empty(0).flatten()\n\n for w in walls:\n if w=='right':\n solid_list_a = np.array(np.where((self.x==0.))).flatten()\n elif w=='left':\n solid_list_b = np.array(np.where((self.x == self.Lx_p))).flatten()\n elif w=='top':\n solid_list_d = np.array(np.where((self.y == self.Ly_p))).flatten()\n elif w=='bottom':\n solid_list_c = np.array(np.where((self.y == 0.))).flatten()\n\n solid_list = np.array(np.union1d(solid_list_a,solid_list_b)); \n solid_list = np.array(np.union1d(solid_list,solid_list_c))\n self.solid_list = np.array(np.union1d(solid_list,solid_list_d))",
"def set_channel_walls(self,walls=['left','right','top','bottom']):\n solid_list_a = np.empty(0).flatten()\n solid_list_b = np.empty(0).flatten()\n solid_list_c = np.empty(0).flatten()\n solid_list_d = np.empty(0).flatten()\n\n for w in walls:\n if w=='right':\n solid_list_a = np.array(np.where((self.x==0.))).flatten()\n elif w=='left':\n solid_list_b = np.array(np.where((self.x == self.Lx_p))).flatten()\n elif w=='top':\n solid_list_d = np.array(np.where((self.y == self.Ly_p))).flatten()\n elif w=='bottom':\n solid_list_c = np.array(np.where((self.y == 0.))).flatten()\n\n solid_list = np.array(np.union1d(solid_list_a,solid_list_b)); \n solid_list = np.array(np.union1d(solid_list,solid_list_c))\n self.solid_list = np.array(np.union1d(solid_list,solid_list_d))",
"def plot_distances(self, params=None, **kw):\n from ...ipyutils import plot_data\n if params is None:\n params = self.collocation_points()\n if 'distances' in kw:\n distances = kw.pop('distances')\n else:\n distances = self.point_distances(params=params)\n return plot_data(\n params[:-1], distances,\n **insert_missing(\n kw,\n figsize=(4, 2.4), dpi=110,\n ylog=True,\n xlabel=r\"curve parameter $\\lambda$\",\n ylabel=r\"coordinate distance\",\n )\n )",
"def walls(self):\n return self.container['walls']",
"def draw_heaters(ax, windtunnel):\n draw_heater(ax, windtunnel.heater_l)\n draw_heater(ax, windtunnel.heater_r)",
"def _wall_pos_xyaxes(size):\n return [\n ((0., -size[1], 0.), (-1, 0, 0, 0, 0, 1)),\n ((0., size[1], 0.), (1, 0, 0, 0, 0, 1)),\n ((-size[0], 0., 0.), (0, 1, 0, 0, 0, 1)),\n ((size[0], 0., 0.), (0, -1, 0, 0, 0, 1)),\n ]",
"def twoEntryPoints(self):\n points = []\n if self.isRed:\n for x in range(self.midWidth, self.width):\n for y in range(0, self.height):\n if (x, y) not in self.walls:\n adjacent = self.adjacentValidPoints(x, y)\n if len(adjacent) == 2:\n points.append((x, y))\n else:\n for x in range(0, self.midWidth-1):\n for y in range(0, self.height):\n if(x, y) not in self.walls:\n adjacent = self.adjacentValidPoints(x, y)\n if len(adjacent) == 2:\n points.append((x, y))\n return points"
] |
[
"0.6642078",
"0.62512195",
"0.5998224",
"0.59888494",
"0.59229547",
"0.5896411",
"0.5847392",
"0.57994545",
"0.57985526",
"0.5769744",
"0.57665414",
"0.5710313",
"0.57037246",
"0.5697214",
"0.5687254",
"0.56732756",
"0.56732756",
"0.56475973",
"0.563784",
"0.5581875",
"0.5537967",
"0.551638",
"0.55144215",
"0.5503351",
"0.5503351",
"0.54841",
"0.5480307",
"0.5444718",
"0.5443573",
"0.5441358"
] |
0.7610848
|
0
|
Iter to a list of servers and instantiate Protocol class.
|
def set_servers(self, servers):
if isinstance(servers, six.string_types):
servers = [servers]
assert servers, "No memcached servers supplied"
self._servers = [Protocol(
server=server,
username=self.username,
password=self.password,
compression=self.compression,
socket_timeout=self.socket_timeout,
pickle_protocol=self.pickle_protocol,
pickler=self.pickler,
unpickler=self.unpickler,
tls_context=self.tls_context,
) for server in servers]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self,server_list):\n self.workers=[]\n self.worker_by_name={}\n worker_id = 1\n for host,port in server_list:\n # Add the uid here can help with port conflicts, but only works\n # on Unix clusters. We really need to work out a daemon service\n # model that makes the port mess transparent.\n port = port #+ os.getuid()\n new_worker = sync_cluster.standard_sync_client(host,port,worker_id)\n self.workers.append(new_worker)\n self.worker_by_name[host] = new_worker\n worker_id = worker_id + 1",
"def set_servers(self, servers):\n\n\t\tself.servers = []\n\t\tself.buckets = []\n\n\t\tfor server_desc in servers:\n\t\t\tif type(server_desc) == tuple:\n\t\t\t\tserver_addr, weight = server_desc\n\t\t\telse:\n\t\t\t\tserver_addr, weight = server_desc, 1\n\n\t\t\tserver = _ServerConnection(server_addr, weight, self._debuglog)\n\n\t\t\tself.servers.append(server)\n\n\t\t\tfor _index in range(weight):\n\t\t\t\tself.buckets.append(server)",
"def add_servers(self, servers: List[Server]):\n pass",
"def connect_servers(self):\r\n\r\n for srvr in self.server_list_text:\r\n try:\r\n self.connect_server(srvr[\"serverID\"], srvr[\"serverIP\"], int(srvr[\"serverPort\"]), srvr[\"nickname\"])\r\n except Exception as exp:\r\n print(\"Error occurred.\\nWhy: {0}\".format(exp)) # TOOD: posílat takovéhle errory klientům\r",
"def __init__(self, target_src, target_dst, dst):\n self.client_list = list()\n self.server_list = list()\n if isinstance(target_src, list):\n if isinstance(dst, list):\n assert len(target_src) == len(dst)\n for i in range(0, len(dst)):\n self.client_list.append(IperfClient(target_src[i], dst[i]))\n else:\n for src in target_src:\n self.client_list.append(IperfClient(src, dst))\n if isinstance(target_dst, list):\n assert isinstance(dst, list)\n assert len(target_src) == len(target_dst)\n assert len(target_src) == len(dst)\n for dst in target_dst:\n self.server_list.append(IperfServer(dst))\n else:\n self.server_list.append(IperfServer(target_dst))\n else:\n assert not isinstance(target_dst, list)\n assert not isinstance(dst, list)\n self.client_list.append(IperfClient(target_src, dst))\n self.server_list.append(IperfServer(target_dst))",
"def objs(self):\n return (\n Nameserver.objects.create(\n domain=self.r, server='ns2.moot.ru'),\n Nameserver.objects.create(\n domain=self.r, server='ns5.moot.ru'),\n Nameserver.objects.create(\n domain=self.r, server=u'ns3.moot.ru'),\n Nameserver.objects.create(\n domain=self.b_f_r, server='n1.moot.ru'),\n Nameserver.objects.create(\n domain=self.b_f_r, server='ns2.moot.ru'),\n Nameserver.objects.create(\n domain=self.r, server='asdf.asdf'),\n )",
"def create_server(self, init_list):\n self.factory.initialize_resources(init_list)\n reactor.listenTCP(SERVER_PORT, self.factory)",
"def __init__(self):\n self._host = None\n self._port = None\n self._servers = []",
"def set_servers(self, servers):\r\n self.servers = [_Host(s, self.debug, dead_retry=self.dead_retry,\r\n socket_timeout=self.socket_timeout)\r\n for s in servers]\r\n self._init_buckets()",
"def start(self):\r\n for srv in self._servers:\r\n srv.start()",
"def __init__(self, server):\r\n self.server = server",
"def build_server_list(client, diff):\n print(\"INFO: Fetching each server not in 'Communication failure' anymore...\")\n servers = []\n for server in diff:\n servers.append(client.server(str(server.id)))\n return servers",
"def __init__(self, server_addr, server_port):",
"def _negotiate_protocols(self, protocols, direction):\n uris = [p.uri for p in protocols]\n if direction in ['pushFromVoSpace', 'pullToVoSpace']:\n supported = list(set(uris) & set(CLIENT_PROTOCOLS))\n else:\n supported = list(set(uris) & set(SERVER_PROTOCOLS))\n if len(supported) == 0: raise VOSpaceError(500, \"The service supports none of the requested Protocols\", summary = PROTOCOL_NOT_SUPPORTED)\n selected = [p for p in protocols if p.uri in supported]\n if direction in ['pullFromVoSpace', 'pushToVoSpace']:\n for protocol in selected:\n protocol.set_endpoint(SERVER_PROTOCOLS[protocol.uri].get_endpoint())\n return selected",
"def __init__(self, protocol):\n\n # TODO: add client dictionary\n self._name = protocol['name']\n self._mode = protocol['mode']\n\n try:\n from minicps import __file__\n index = __file__.rfind('minicps')\n self._minicps_path = __file__[:index+7] + '/'\n\n except Exception as error:\n print('ERROR Protocol __init__ set _minicps_path: ', error)\n\n if self._mode > 0:\n # TODO: update server dict field: log\n self._server = protocol['server']\n else:\n self._server = {}",
"def __init__(self, hosts: List[IPv4Address], loop: asyncio.AbstractEventLoop):\n super().__init__()\n self.hosts = hosts\n self.loop = loop",
"def list_servers(self, request, paginate):\n raise NotImplementedError",
"def buildProtocol(self,addr):\r\n p = self.protocol(self.server)\r\n p.factory = self\r\n return p",
"def __new__(cls, conf, **kwargs):\n if isinstance(conf, Struct):\n dconf = conf.to_dict()\n\n else:\n dconf = conf\n\n dconf.pop('kind', None)\n ls_solvers = [(ls, Struct(**_conf) + Struct(kind=ls) + Struct(**dconf))\n for ls, _conf in cls._ls_solvers]\n\n return use_first_available(ls_solvers, **kwargs)",
"def connect_all(self, service=VoidService, config={}):\n return [s.connect(service, config) for s in self.servers]",
"def servers(self):\n response = self._request(\"GET\", [ROUTE_SERVERS])\n\n return CBWParser().parse_response(CBWServer, response)",
"def start_ircbot(server_list, operator):\n logging.info(\"Launching IRC bots\")\n for name, server_info in server_list.items():\n server, nick, channel, port, ssl = (\n server_info['server'],\n server_info.get('nickname', 'slight-ci'), \n server_info['channel'], \n server_info['port'], \n server_info.get('ssl', False)\n )\n sock = create_socket(server, port, ssl)\n if sock is not None:\n queue = messages.get_queue(sock)\n thread = threading.Thread(target=listen, args=(sock, queue, nick, channel, operator))\n thread.daemon = True\n thread.start()\n thread = threading.Thread(target=dispatch_messages, args=(sock, queue, channel))\n thread.daemon = True\n thread.start()",
"def set_servers(self, servers):\n kwargs = dict(io_loop = self.io_loop)\n #if self.connect_timeout:\n # kwargs['connect_timeout'] = self.connect_timeout \n if self.dead_retry:\n kwargs['dead_retry'] = self.dead_retry \n self.servers = [_Host(s, self.debuglog, **kwargs) for s in servers]\n self._init_buckets()",
"def get_all_servers(self) -> List[Server]:\n pass",
"def create_server_list(user, apikey, account_id, region=None, path=os.path.expanduser('~/.fabrackservers')):\n if region == 'uk':\n auth = (uk_authurl_v1_0, uk_authurl_v2_0)\n next_gen_dc = ['lon']\n else:\n auth = (us_authurl_v1_0, us_authurl_v2_0)\n next_gen_dc = ['ord', 'dfw']\n servers = []\n first_gen = openstack.compute.Client(username=user, apikey=apikey, \n auth_url=auth[1], service_type='compute')\n for server in first_gen.servers.list():\n servers.append({ 'name': server.name, 'addresses': server.addresses, \n 'generation': 1 })\n\n for r in next_gen_dc:\n next_gen = novaclient.Client(user, apikey, account_id, auth_url=auth[2], \n region_name=r, service_name='cloudServersOpenStack')\n for server in next_gen.servers.list():\n servers.append({ 'name': server.name, 'addresses': server.addresses, \n 'generation': 2 })\n with open(path, 'w') as fh:\n pickle.dump(servers, fh)\n return servers",
"def run(self):\n self._list_servers()",
"def __init__(self, services=None, verbose=False, responses=None):\n if not services:\n from moneywagon import ALL_SERVICES\n services = ALL_SERVICES\n\n self.services = []\n for ServiceClass in services:\n self.services.append(\n ServiceClass(verbose=verbose, responses=responses)\n )\n\n self.verbose = verbose\n self._successful_service = None # gets filled in after success\n self._failed_services = []",
"def _initialize_protocols(self):\n with open(str(pathlib.Path(__file__).parent / 'protocol.yml'), encoding='UTF-8') as handle:\n self._protocols = yaml.safe_load(handle)",
"def getServerInterfaces(self):\n return self.servers",
"def init_server_list(self, subsection_arch, subsection_id, server_range,\n output_list):\n\n # Init a list of servers to return.\n rtn = []\n\n # Loop through all servers in the arch.\n for i in range(server_range[0], server_range[1]):\n\n # Pass the ID of the server and Init a server.\n rtn.append(ServiceAgent(str(i), subsection_id, output_list))\n\n # Return the list.\n return rtn"
] |
[
"0.6665561",
"0.5961506",
"0.58802676",
"0.582022",
"0.57430696",
"0.55884355",
"0.55266905",
"0.55143803",
"0.5494216",
"0.54644495",
"0.54373556",
"0.53618836",
"0.5360595",
"0.53562003",
"0.5351459",
"0.534337",
"0.53352594",
"0.5334474",
"0.53176737",
"0.53134704",
"0.53105503",
"0.5309834",
"0.5308343",
"0.52998567",
"0.5293259",
"0.5246405",
"0.5238014",
"0.5235614",
"0.5231856",
"0.5225432"
] |
0.6542367
|
1
|
Tests for Average Bioequivalence.
|
def test_average_bioequivalence():
# See 10.2 Example from Chow et al.
h = Average(delta=0.223, stdev=0.40, margin=0.05,
alpha=0.05, power=0.8, known_stdev=True)
h.calculate()
# Chow has 21, but they have the wrong z_beta/2. It should be 1.28,
# not 0.84. When that is fixed, the correct n is 23
assert h.n == 23
assert h.power > 0.8
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_avg_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(s.avg_grade(3)),\"B\")",
"def test_avg_entanglement_fidelity_ensemble():\n # Test on emsemble.\n probs = [1.]\n states = [np.eye(2) / 2.]\n # Test on pauli choi matrix.\n krauss_ops = initialize_pauli_examples(0.1, 0.2, 0.7)\n choi_matrix = sum([np.outer(np.ravel(x, order=\"F\"),\n np.conj(np.ravel(x, order=\"F\"))) for x in krauss_ops])\n choi_obj = ChoiQutip(choi_matrix, [1, 1], 2, 2)\n actual = choi_obj.average_entanglement_fidelity(probs, states)\n desired = np.ravel(states[0], \"F\").dot(choi_matrix.dot(np.ravel(states[0], \"F\")))\n assert np.abs(actual - desired) < 1e-5\n\n # Test on another ensemble\n probs = [0.25, 0.75]\n states = [np.eye(2), (np.eye(2) + 0.2 * np.array([[0., 1.], [1., 0.]])) / 2.]\n actual = choi_obj.average_entanglement_fidelity(probs, states)\n desired = np.ravel(states[0], \"F\").dot(choi_matrix.dot(np.ravel(states[0], \"F\"))) * probs[0]\n desired += np.ravel(states[1], \"F\").dot(choi_matrix.dot(np.ravel(states[1], \"F\"))) * probs[1]\n assert np.abs(actual - desired) < 1e-5\n\n kraus = [np.array([[0., 1.], [1., 0.]])]",
"def compare_averages(ave_stats):\n pass",
"def test_compare_averages_asymptotics(self):\n # load models\n models = [pybamm.lead_acid.LOQS(), pybamm.lead_acid.Full()]\n\n # load parameter values (same for all models)\n param = models[0].default_parameter_values\n param.update({\"Current function [A]\": 1})\n for model in models:\n param.process_model(model)\n\n # set mesh\n var_pts = {\"x_n\": 10, \"x_s\": 10, \"x_p\": 10}\n\n # discretise models\n for model in models:\n geometry = model.default_geometry\n param.process_geometry(geometry)\n mesh = pybamm.Mesh(geometry, model.default_submesh_types, var_pts)\n disc = pybamm.Discretisation(mesh, model.default_spatial_methods)\n disc.process_model(model)\n\n # solve model\n solutions = []\n t_eval = np.linspace(0, 3600 * 17, 100)\n for model in models:\n solution = pybamm.CasadiSolver().solve(model, t_eval)\n solutions.append(solution)\n\n # test averages\n comparison = StandardOutputComparison(solutions)\n comparison.test_averages()",
"def test_mean(self):\n pass",
"def test_mean(self):\n pass",
"def test_ACE(self): \n self.assertFloatEqual(ACE(array([2,0])), 1.0, eps=0.001)\n # next: just returns the number of species when all are abundant\n self.assertFloatEqual(ACE(array([12,0,9])), 2.0, eps=0.001)\n self.assertFloatEqual(ACE(array([12,2,8])), 3.0, eps=0.001)\n self.assertFloatEqual(ACE(array([12,2,1])), 4.0, eps=0.001)\n self.assertFloatEqual(ACE(array([12,1,2,1])), 7.0, eps=0.001)\n self.assertFloatEqual(ACE(array([12,3,2,1])), 4.6, eps=0.001)\n self.assertFloatEqual(ACE(array([12,3,6,1,10])), 5.62749672, eps=0.001)",
"def test_avgeraging(self):\n\n num_ensemble = 10\n enn = networks.MLPEnsembleEnn(\n output_sizes=[1],\n num_ensemble=num_ensemble,\n )\n\n dummy_metrics = {'a': 0, 'b': 1}\n # A dummy loss fn that returns the normalized index as loss and two constant\n # metrics. Index is random but normalized such that its mean is 1.\n single_loss_fn = DummySingleIndexLossFn(num_ensemble, dummy_metrics)\n\n num_index_samples = 100\n loss_fn = average_single_index_loss(single_loss_fn, num_index_samples)\n dummy_batch = base.Batch(np.ones([1, 1]), np.ones([1, 1]))\n loss, metrics = loss_fn(\n enn=enn, params=dict(), batch=dummy_batch, key=jax.random.PRNGKey(0))\n\n # Since the single loss has mean 1 the averaged loss also has mean 1 a\n # variance proportional to 1/np.sqrt(num_index_samples).\n self.assertAlmostEqual(\n loss,\n 1.0,\n delta=5 / np.sqrt(num_index_samples),\n msg=f'Expected loss to be ~1.0 but it is {loss}')\n self.assertDictEqual(\n metrics, dummy_metrics,\n f'expected metrics to be {dummy_metrics} but it is {metrics}')",
"def test_population_bioequivalence():\n\n # See 10.3 Example from Chow et al.\n h = Population(l=-0.2966, stdev_11=0.2, stdev_tt=math.sqrt(0.17),\n stdev_tr=math.sqrt(0.17), stdev_bt=0.4, stdev_br=0.4,\n rho=0.75, alpha=0.05, power=0.8)\n h.calculate()\n assert h.n == 12",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def is_good_qualtiative_example(iaa_score, ann1_total, ann2_total):\n return iaa_score > .3 and iaa_score < 1 and ann1_total > 3 and ann2_total > 3",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)",
"def average_percent_error(individual, test_data, truth_data, name=None):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n return np.mean(np.abs(test_data - truth_data) / truth_data)",
"def test_average():\n assert uniplot.analysis.average_len(\n uniplot.parse.uniprot_seqrecords(TEST_UNIPROT)\n ) == 302.72222222222223",
"def test_interfacial_current_average(self):\n\n np.testing.assert_array_almost_equal(\n np.mean(\n self.a_n(self.t, self.x_n)\n * (self.j_n(self.t, self.x_n) + self.j_n_sei(self.t, self.x_n)),\n axis=0,\n ),\n self.i_cell / self.l_n,\n decimal=4,\n )\n np.testing.assert_array_almost_equal(\n np.mean(\n self.a_p(self.t, self.x_p)\n * (self.j_p(self.t, self.x_p) + self.j_p_sei(self.t, self.x_p)),\n axis=0,\n ),\n -self.i_cell / self.l_p,\n decimal=4,\n )",
"def test_preds_average():\n pred_1 = np.array([[0.1, 0.3, 0.1, 0.5], [0.9, 0.05, 0.025, 0.025]])\n pred_2 = np.array([[0.6, 0.1, 0.2, 0.1], [0.8, 0.1, 0.05, 0.05]])\n av = preds_average([pred_1, pred_2], [0.9, 0.1])\n assert (av == np.array([3, 0])).all()",
"def testMeansClose(self):\n mean_sim = {}\n mean_ana = {}\n expected_sim = {0.01: 168.0, 0.1: 327.2, 0.5: 433.8, 0.9: 466.0}\n expected_ana = {0.01: 175.0, 0.1: 328.9, 0.5: 430.2, 0.9: 457.5}\n for k in self.species_richnesses_sim.keys():\n mean_sim[k] = float(sum(self.species_richnesses_sim[k])) / float(len(self.species_richnesses_sim[k]))\n mean_ana[k] = float(sum(self.species_richnesses_ana[k])) / float(len(self.species_richnesses_ana[k]))\n self.assertEqual(expected_sim, mean_sim)\n self.assertEqual(expected_ana, mean_ana)",
"def test_avg_mean(forecasters):\n y = make_forecasting_problem()\n forecaster = EnsembleForecaster(forecasters)\n forecaster.fit(y, fh=[1, 2, 3])\n mean_pred = forecaster.predict()\n\n forecaster_1 = EnsembleForecaster(forecasters, aggfunc=\"mean\", weights=[1, 1])\n forecaster_1.fit(y, fh=[1, 2, 3])\n avg_pred = forecaster_1.predict()\n\n pd.testing.assert_series_equal(mean_pred, avg_pred)",
"def test_classify_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(5.00),\"A+\")",
"def test_is_average(self):\n avg_orders = Decimal(self._uncertain_demand.average_orders)\n self.assertEqual(avg_orders, 50)",
"def test_gene_essentiality_from_data_qualitative(combined_dataframe):\n comparative_dataframe, exp = essential.prepare_qualitative_comparison(\n combined_dataframe\n )\n assert len(comparative_dataframe[comparative_dataframe[\"true_positives\"] == 1]) == 3",
"def test_properlyAveraged(self):\n r0 = self.singleReader\n r1 = DetectorReader(DET_FILES['bwr1'])\n r1.read()\n for detName in self.sampler.detectors:\n expectedTallies, expectedErrors = (_getExpectedAverages(\n r0.detectors[detName], r1.detectors[detName]))\n uniq = self.sampler.detectors[detName]\n assert_allclose(uniq.tallies, expectedTallies, err_msg='tallies',\n **TOLERANCES['tallies'])\n assert_allclose(uniq.errors, expectedErrors, err_msg='errrors',\n **TOLERANCES['errors'])",
"def test_compare_alpha_diversities(self):\r\n # test 'Dose' at 480 inputs\r\n category = 'Dose'\r\n depth = 480\r\n test_type = 'parametric'\r\n obs_tcomps, obs_ad_avgs = compare_alpha_diversities(\r\n self.rarefaction_file,\r\n self.mapping_file, category=category, depth=depth,\r\n test_type=test_type)\r\n\r\n # hardcoded order of the terms in the keys otherwise would comps fail\r\n exp_tcomps = \\\r\n {('Control', '2xDose'): (1.1746048668554037, 0.44899351189030801),\r\n ('1xDose', '2xDose'): (1.7650193854830403, 0.17574514418562981),\r\n ('Control', '1xDose'): (0.43618805086434992, 0.7052689260099092)}\r\n\r\n # test each key in expected results -- this won't catch if\r\n # obs_tcomps has extra entries, but test that via the next call\r\n for k in exp_tcomps:\r\n assert_almost_equal(exp_tcomps[k], obs_tcomps[k])\r\n self.assertEqual(set(exp_tcomps.keys()), set(obs_tcomps.keys()))\r\n\r\n # test that returned alpha diversity averages are correct\r\n # dose\r\n # 1xDose = ['Sam1','Sam2','Sam6'], 2xDose = ['Sam3','Sam4'],\r\n # Control = ['Sam5']\r\n exp_ad_avgs = {'1xDose': (3.2511951575216664, 0.18664627928763661),\r\n '2xDose': (2.7539647172550001, 0.30099438035250015),\r\n 'Control': (3.3663303519925001, 0.0)}\r\n for k in exp_ad_avgs:\r\n assert_almost_equal(exp_ad_avgs[k], obs_ad_avgs[k])\r\n\r\n # test 'Dose' at 480 inputs with nonparametric test\r\n seed(0) # set the seed to reproduce random MC pvals\r\n category = 'Dose'\r\n depth = 480\r\n test_type = 'nonparametric'\r\n num_permutations = 100\r\n obs_tcomps, obs_ad_avgs = compare_alpha_diversities(\r\n self.rarefaction_file,\r\n self.mapping_file, category=category, depth=depth,\r\n test_type=test_type, num_permutations=num_permutations)\r\n exp_tcomps = {('1xDose', '2xDose'): (1.7650193854830403, 0.13),\r\n ('Control', '1xDose'): (0.43618805086434992, 0.83), ('Control',\r\n '2xDose'): (1.1746048668554037, 0.62)}\r\n # test each key in expected results -- this won't catch if\r\n # obs_tcomps has extra entries, but test that via the next call\r\n for k in exp_tcomps:\r\n assert_almost_equal(exp_tcomps[k], obs_tcomps[k])\r\n self.assertEqual(set(exp_tcomps.keys()), set(obs_tcomps.keys()))\r\n\r\n # test that returned alpha diversity averages are correct\r\n # dose\r\n # 1xDose = ['Sam1','Sam2','Sam6'], 2xDose = ['Sam3','Sam4'],\r\n # Control = ['Sam5']\r\n exp_ad_avgs = {'Control': (\r\n 3.3663303519925001,\r\n 0.0),\r\n '1xDose': (3.2511951575216664,\r\n 0.18664627928763661),\r\n '2xDose': (2.7539647172550001,\r\n 0.30099438035250015)}\r\n\r\n for k in exp_ad_avgs:\r\n assert_almost_equal(exp_ad_avgs[k], obs_ad_avgs[k])\r\n\r\n # test it works with NA values\r\n # test 'Dose' at 500 inputs with paramteric test\r\n category = 'Dose'\r\n depth = 500\r\n test_type = 'parametric'\r\n obs_tcomps, obs_ad_avgs = compare_alpha_diversities(\r\n self.rarefaction_file,\r\n self.mapping_file, category=category, depth=depth,\r\n test_type=test_type)\r\n exp_tcomps = \\\r\n {('Control', '2xDose'): (-0.63668873339963239, 0.63906168713487699),\r\n ('1xDose', '2xDose'): (None, None),\r\n ('Control', '1xDose'): (None, None)}\r\n for obs, exp in izip(obs_tcomps, exp_tcomps):\r\n self.assertEqual(obs, exp)\r\n \r\n # test that it works with nonparametric test - this was erroring.\r\n seed(0)\r\n test_type = 'nonparametric'\r\n exp_tcomps = \\\r\n {('Control', '2xDose'): (-0.63668873339963239, 0.672),\r\n ('1xDose', '2xDose'): (None, None),\r\n ('Control', '1xDose'): (None, None)}\r\n obs_tcomps, obs_ad_avgs = compare_alpha_diversities(\r\n self.rarefaction_file,\r\n self.mapping_file, category=category, depth=depth,\r\n test_type=test_type)\r\n for obs, exp in izip(obs_tcomps, exp_tcomps):\r\n self.assertEqual(obs, exp)\r\n\r\n # test that returned alpha diversity averages are correct\r\n # dose\r\n # 1xDose = ['Sam1','Sam2','Sam6'], 2xDose = ['Sam3','Sam4'],\r\n # Control = ['Sam5']\r\n # will fail on nan comparison so avoid this\r\n exp_ad_avgs = {'1xDose': (nan, nan),\r\n '2xDose': (3.1955144893699998, 0.84206819489000018),\r\n 'Control': (2.2669008538500002, 0.0)}\r\n for k in exp_ad_avgs:\r\n if k != '1xDose':\r\n assert_almost_equal(exp_ad_avgs[k], obs_ad_avgs[k])\r\n if k == '1xDose':\r\n self.assertTrue(all(map(isnan, obs_ad_avgs[k])))\r\n\r\n # test that it works when no depth is passed\r\n category = 'Dose'\r\n depth = None # should return depth = 910\r\n test_type = 'parametric'\r\n obs_tcomps, obs_ad_avgs = compare_alpha_diversities(\r\n self.rarefaction_file,\r\n self.mapping_file, category=category, depth=depth,\r\n test_type=test_type)\r\n\r\n # hardcoded order of the terms in the keys otherwise would comps fail\r\n exp_tcomps = \\\r\n {('Control', '2xDose'): (3.3159701868634883, 0.1864642327553255),\r\n ('1xDose', '2xDose'): (-0.48227871733885291, 0.66260803238173183),\r\n ('Control', '1xDose'): (0.83283756452373126, 0.49255115337550748)}\r\n for obs, exp in izip(obs_tcomps, exp_tcomps):\r\n self.assertEqual(obs, exp)\r\n\r\n # test that returned alpha diversity averages are correct\r\n # dose\r\n # 1xDose = ['Sam1','Sam2','Sam6'], 2xDose = ['Sam3','Sam4'],\r\n # Control = ['Sam5']\r\n exp_ad_avgs = {'1xDose': (2.6763340901916668, 0.36025734786901326),\r\n '2xDose': (2.8358041871949999, 0.04611264137749993),\r\n 'Control': (3.1006488615725001, 0.0)}\r\n for k in exp_ad_avgs:\r\n assert_almost_equal(exp_ad_avgs[k], obs_ad_avgs[k])",
"def avg_e_score(self, entity):\n return float(entity['es']) / float(entity['count'])",
"def test_liemdframe_get_average(self):\n\n ave = self.mdframe.get_average()\n\n # Unbound energies should be same value for all poses\n self.assertEqual(len(ave['coul_unbound'].unique()), 1)\n self.assertEqual(len(ave['coul_unbound']), 5)\n self.assertEqual(len(ave['vdw_unbound'].unique()), 1)\n self.assertEqual(len(ave['vdw_unbound']), 5)\n\n # Bound energies mostly unique for all poses\n self.assertEqual(len(ave['coul'].unique()), 5)\n self.assertEqual(len(ave['vdw'].unique()), 5)\n self.assertEqual(len(ave['coul']), 5)\n self.assertEqual(len(ave['vdw']), 5)\n\n # Check values\n self.assertListEqual(list(ave.poses), [1, 2, 3, 4, 5])\n self.assertListEqual(list(ave.cases), [1])\n self.assertAlmostEqual(ave['coul'][0], 60.12452, places=5)\n self.assertAlmostEqual(ave['vdw'][1], -53.78007, places=5)",
"def test_e(self):\n user_dict = {'A': 3, 'B': 4, 'C': 5, 'D': 6, 'E': 7}\n user_key = 'e'\n self.assertEqual(7, switch_average(user_dict, user_key.upper()))"
] |
[
"0.6671989",
"0.64432764",
"0.64178544",
"0.62681234",
"0.62358105",
"0.62358105",
"0.61920863",
"0.6122437",
"0.6073407",
"0.60704947",
"0.60704947",
"0.60704947",
"0.6024898",
"0.6024063",
"0.6024063",
"0.6024063",
"0.59498715",
"0.5922118",
"0.58959824",
"0.5873126",
"0.58328617",
"0.577348",
"0.5763123",
"0.57376295",
"0.5729476",
"0.5723763",
"0.56868964",
"0.56713796",
"0.56688964",
"0.5652847"
] |
0.8098406
|
0
|
Tests for Population Bioequivalence.
|
def test_population_bioequivalence():
# See 10.3 Example from Chow et al.
h = Population(l=-0.2966, stdev_11=0.2, stdev_tt=math.sqrt(0.17),
stdev_tr=math.sqrt(0.17), stdev_bt=0.4, stdev_br=0.4,
rho=0.75, alpha=0.05, power=0.8)
h.calculate()
assert h.n == 12
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_compare_genomes_2(self):\n self.pmr_gnm.annotation_status = \"final\"\n self.pmr_gnm.name = \"Trixie\"\n import_genome.compare_genomes(self.genome_pair, self.eval_flags)\n count = count_status(self.genome_pair, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.genome_pair.evaluations), 13)\n with self.subTest():\n self.assertEqual(count, 0)",
"def test_one_pop(data_: tuple, _is_pop: bool):\n x_bar = cls.get_mean(data_)\n s_x = cls.get_stdev(data_, is_population=_is_pop)\n n_x = cls.get_n(data_)\n return (x_bar - h0) / (s_x / sqrt(n_x))",
"def c_test_population_function(self, function):\r\n return 1",
"def testIsBiconnected(self):\n self.assertEqual(is_biconnected(self.G1), True)\n self.assertEqual(is_biconnected(self.G2), False)",
"def test_compare_genomes_1(self):\n import_genome.compare_genomes(self.genome_pair, self.eval_flags)\n count = count_status(self.genome_pair, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.genome_pair.evaluations), 12)\n with self.subTest():\n self.assertEqual(count, 0)",
"def test_create_population():\n pop = Population()\n assert len(pop.population) == POPULATION_SIZE\n assert isinstance(pop.population[0], Individual)",
"def test_probabilities_are_ok(self, seed):\n bins = defaultdict(int)\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", \"2\", \"3\", \"4\")\n categories = OrderedDict(zip(categories, probs))\n dim = Categorical(\"yolo\", categories)\n for _ in range(500):\n sample = dim.sample(seed=seed)[0]\n bins[sample] += 1\n for keys in bins.keys():\n bins[keys] /= float(500)\n for key, value in categories.items():\n assert abs(bins[key] - value) < 0.01",
"def test_relevance_with_itself():\n state = gen_state_cgpm(get_data_separated)\n assert np.allclose(state.relevance_probability(2, [2], 1), 1.0)",
"def test_make_pop(self, pop_size, cell_number, microcell_number):\n for i in [0, 1]:\n pe.Parameters.instance().use_ages = i\n # Population is initialised with no households\n pop_params = {\"population_size\": pop_size,\n \"cell_number\": cell_number,\n \"microcell_number\": microcell_number}\n test_pop = ToyPopulationFactory.make_pop(pop_params)\n\n total_people = 0\n count_non_empty_cells = 0\n for cell in test_pop.cells:\n for microcell in cell.microcells:\n total_people += len(microcell.persons)\n if len(cell.persons) > 0:\n count_non_empty_cells += 1\n # Test there is at least one non-empty cell\n self.assertTrue(count_non_empty_cells >= 1)\n # Test that everyone in the population has been assigned a\n # microcell\n self.assertEqual(total_people, pop_size)\n\n # Test a population class object is returned\n self.assertIsInstance(test_pop, pe.Population)",
"def test_genomic(self):\n self.c.execute(\"\"\"select expIds,expScores from genomic_test\"\"\")\n rows = self.c.fetchall()\n self.assertEqual(len(rows), 1) # one probe\n self.assertEqual(rows[0][0], '0,1,2,3,4') # ordered by sample id\n values = map(lambda x: float(x), rows[0][1].split(',')) # scores are in correct order\n self.assertTrue(values[0] - 0.479005065149792 < self.tolerance)\n self.assertTrue(values[1] - 25.1 < self.tolerance)\n self.assertTrue(values[2] - 5.3 < self.tolerance)\n self.assertTrue(values[3] - 3.1 < self.tolerance)\n self.assertTrue(values[4] - -1.23 < self.tolerance)",
"def test_get_provenance(self):\n filename = 'mpciMeanImage.mlapdv_estimate.npy'\n provenance = MesoscopeFOV.get_provenance(filename)\n self.assertEqual('ESTIMATE', provenance.name)\n filename = 'mpciROIs.brainLocation_ccf_2017.npy'\n provenance = MesoscopeFOV.get_provenance(filename)\n self.assertEqual('HISTOLOGY', provenance.name)",
"def test_2():\n results = base_tests()\n correct = {\n \"Consequence\": \"synonymous_variant\",\n \"Codons\": \"tgC/tgT\",\n \"Amino_acids\": \"C\",\n \"Gene\": \"ENSG00000130164\",\n \"SYMBOL\": \"LDLR\",\n \"Feature\": \"ENST00000558013\",\n \"EXON\": \"2/18\",\n \"PolyPhen\": \"\",\n \"SIFT\": \"\",\n \"Protein_position\": \"27/858\",\n 'BIOTYPE\"': \"protein_coding\",\n }\n assert results[0] == correct",
"def test_avg_entanglement_fidelity_ensemble():\n # Test on emsemble.\n probs = [1.]\n states = [np.eye(2) / 2.]\n # Test on pauli choi matrix.\n krauss_ops = initialize_pauli_examples(0.1, 0.2, 0.7)\n choi_matrix = sum([np.outer(np.ravel(x, order=\"F\"),\n np.conj(np.ravel(x, order=\"F\"))) for x in krauss_ops])\n choi_obj = ChoiQutip(choi_matrix, [1, 1], 2, 2)\n actual = choi_obj.average_entanglement_fidelity(probs, states)\n desired = np.ravel(states[0], \"F\").dot(choi_matrix.dot(np.ravel(states[0], \"F\")))\n assert np.abs(actual - desired) < 1e-5\n\n # Test on another ensemble\n probs = [0.25, 0.75]\n states = [np.eye(2), (np.eye(2) + 0.2 * np.array([[0., 1.], [1., 0.]])) / 2.]\n actual = choi_obj.average_entanglement_fidelity(probs, states)\n desired = np.ravel(states[0], \"F\").dot(choi_matrix.dot(np.ravel(states[0], \"F\"))) * probs[0]\n desired += np.ravel(states[1], \"F\").dot(choi_matrix.dot(np.ravel(states[1], \"F\"))) * probs[1]\n assert np.abs(actual - desired) < 1e-5\n\n kraus = [np.array([[0., 1.], [1., 0.]])]",
"def testSanity(self):\n\t\tga = GA.GA(2,3)\n\t\tgenomes = ga.seedGenomes()\n\t\tself.assertEqual(len(genomes), 2, \n\t\t \"Wrong number of genomes\")\n\t\tself.assertEqual(len(genomes[0]), 3, \n\t\t \"Wrong size in genomes\")\n\t\t#print genomes\n\t\t#live and learn\n\t\tfitnesses = [23, 45]\n\t\tga.fitnessUpdate(fitnesses)\n\t\tgenomes2 = ga.createNextGeneration()\n\t\tself.assertEqual(len(genomes2), 2, \n \"Wrong number of genomes\")\n\t\tself.assertEqual(len(genomes2[0]), 3, \n \"Wrong size in genomes\")",
"def simulationTwoDrugsVirusPopulations():\n #TODO",
"def test_gene_essentiality_from_data_qualitative(combined_dataframe):\n comparative_dataframe, exp = essential.prepare_qualitative_comparison(\n combined_dataframe\n )\n assert len(comparative_dataframe[comparative_dataframe[\"true_positives\"] == 1]) == 3",
"def test_average_bioequivalence():\n\n # See 10.2 Example from Chow et al.\n h = Average(delta=0.223, stdev=0.40, margin=0.05,\n alpha=0.05, power=0.8, known_stdev=True)\n h.calculate()\n # Chow has 21, but they have the wrong z_beta/2. It should be 1.28,\n # not 0.84. When that is fixed, the correct n is 23\n assert h.n == 23\n assert h.power > 0.8",
"def check_proof(self):\n for gene in self.population:\n if gene.is_proof:\n print(gene.chromosome)\n for state in gene.coq_states:\n print(state)\n self.proofs.append(Gene(chromosome=gene.valid_tactics))",
"def test_positive_electrode_potential_profile(self):\n\n # TODO: add these when have averages",
"def test_fitness():\n herb1 = Herbivore(0)\n herb2 = Herbivore(80)\n nt.assert_not_equal(herb1.fitness, herb2.fitness)\n herb3 = Herbivore(20, 0)\n herb4 = Herbivore(20, 80)\n nt.assert_not_equal(herb3.fitness, herb4.fitness)",
"def eaSimple(population, toolbox, cxpb, mutpb, elitpb, ngen, stats=None,\n halloffame=None, verbose=__debug__):\n logbook = tools.Logbook()\n logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])\n # Evaluate the individuals with an invalid fitness\n\n for i in population:\n i.fitness.values = toolbox.evaluate(individual=i, hof=[])\n\n if halloffame is not None:\n halloffame.update(population)\n hof_store = tools.HallOfFame(5*len(population))\n hof_store.update(population)\n cop_po = population\n record = stats.compile(population) if stats else {}\n logbook.record(gen=0, nevals=len(population), **record)\n if verbose:\n print(logbook.stream)\n\n for gen in range(1, ngen + 1):\n\n #Select the next generation individuals by elitism\n elitismNum=int(elitpb * len(population))\n population_for_eli=[toolbox.clone(ind) for ind in population]\n offspringE = toolbox.selectElitism(population_for_eli, k=elitismNum)\n \n #print('e',offspringE[0].fitness.values)\n\n #print(len(offspringE))\n #print(offspringE[0].fitness.values)\n # Select the next generation individuals for crossover and mutation\n offspring = toolbox.select(population, len(population)-elitismNum)\n # Vary the pool of individuals\n offspring = varAnd(offspring, toolbox, cxpb, mutpb)\n # add offspring from elitism into current offspring\n #generate the next generation individuals\n \n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n #print(len(invalid_ind))\n \n for i in invalid_ind:\n i.fitness.values = toolbox.evaluate(individual=i, hof=cop_po)\n \n offspring[0:0]=offspringE\n \n # Update the hall of fame with the generated\n if halloffame is not None:\n halloffame.update(offspring)\n cop_po = offspring.copy()\n hof_store.update(offspring)\n for i in hof_store:\n cop_po.append(i)\n population[:] = offspring\n # Append the current generation statistics to the logbook\n record = stats.compile(population) if stats else {}\n #print(record)\n logbook.record(gen=gen, nevals=len(offspring), **record)\n #print(record)\n if verbose:\n print(logbook.stream) \n return population, logbook",
"def evaluate(genome):\n # base fitness\n fit = 1.0\n # promote 1001 starting motif\n matches = 0\n if genome.sequence_A[0] == 1:\n matches += 1\n if genome.sequence_A[1] == 0:\n matches += 1\n if genome.sequence_A[2] == 0:\n matches += 1\n if genome.sequence_A[3] == 1:\n matches += 1\n fit += matches * 0.1\n # finish\n return fit",
"def test_expectation():\n\n age = 1e-5\n ass_pars1 = np.array([0, 0, 0, 0, 0, 0, 5., 2., age])\n comp1 = SphereComponent(ass_pars1)\n ass_pars2 = np.array([100., 0, 0, 20, 0, 0, 5., 2., age])\n comp2 = SphereComponent(ass_pars2)\n starcounts = [100,100]\n synth_data = SynthData(pars=[ass_pars1, ass_pars2],\n starcounts=starcounts)\n synth_data.synthesise_everything()\n tabletool.convert_table_astro2cart(synth_data.table)\n\n true_memb_probs = np.zeros((np.sum(starcounts), 2))\n true_memb_probs[:starcounts[0], 0] = 1.\n true_memb_probs[starcounts[0]:, 1] = 1.\n\n # star_means, star_covs = tabletool.buildDataFromTable(synth_data.astr_table)\n # all_lnols = em.getAllLnOverlaps(\n # synth_data.astr_table, [comp1, comp2]\n # )\n\n fitted_memb_probs = em.expectation(\n tabletool.build_data_dict_from_table(synth_data.table),\n [comp1, comp2]\n )\n\n assert np.allclose(true_memb_probs, fitted_memb_probs, atol=1e-10)",
"def test_unique_genome(self):\n p1 = self.player()\n p2 = self.player()\n self.assertTrue(p1.genome is p2.genome)",
"def test_observed_species(self):\n c = array([4,3,4,0,1,0,2])\n obs = observed_species(c)\n exp = 5\n self.assertEqual(obs, exp)\n c = array([0,0,0])\n obs = observed_species(c)\n exp = 0\n self.assertEqual(obs, exp)\n self.assertEqual(observed_species(self.TestData), 9)",
"def test_bias_ih(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents\n cfg = Config()\n gene1, gene2 = get_gru_node_gene(0, cfg.genome)\n \n # Ratio of 0.5, so possible to cross to both parents\n p1 = False\n p2 = False\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0.5)\n for v in gene3.bias_ih:\n if v == 0:\n p1 = True\n elif v == 1:\n p2 = True\n else:\n raise self.failureException(\"Must be mutated to one of parent's values\")\n if p1 and p2: break\n self.assertTrue(p1 and p2)\n \n # Ratio of 1, so always inherits from first parent\n for _ in range(10):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=1)\n self.assertEqual(np.linalg.norm(gene3.bias_ih - gene1.bias_ih), 0)\n self.assertNotEqual(np.linalg.norm(gene3.bias_ih - gene2.bias_ih), 0)\n \n # Ratio of 0, so always inherits from second parent\n for _ in range(10):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0)\n self.assertNotEqual(np.linalg.norm(gene3.bias_ih - gene1.bias_ih), 0)\n self.assertEqual(np.linalg.norm(gene3.bias_ih - gene2.bias_ih), 0)",
"def test_conservation(self):\n self.c_s_tot = (\n self.c_s_n_tot(self.solution.t)\n + self.c_s_p_tot(self.solution.t)\n + self.c_SEI_n_tot(self.solution.t)\n + self.c_SEI_p_tot(self.solution.t)\n + self.c_Li_n_tot(self.solution.t)\n + self.c_Li_p_tot(self.solution.t)\n )\n diff = (self.c_s_tot[1:] - self.c_s_tot[:-1]) / self.c_s_tot[:-1]\n if \"profile\" in self.model.options[\"particle\"]:\n np.testing.assert_array_almost_equal(diff, 0, decimal=10)\n elif self.model.options[\"surface form\"] == \"differential\":\n np.testing.assert_array_almost_equal(diff, 0, decimal=10)\n elif self.model.options[\"SEI\"] == \"ec reaction limited\":\n np.testing.assert_array_almost_equal(diff, 0, decimal=12)\n else:\n np.testing.assert_array_almost_equal(diff, 0, decimal=15)",
"def two_pop_var_test(datae,dataf,alpha):\n \n # Dataset E\n data_e = 1.0*np.array(datae)\n n_e = data_e.shape[0]*data_e.shape[1]\n mean_e = np.array(data_e).mean()\n var_e = np.array(data_e).var(ddof=1)\n df_e = n_e-1\n \n # Dataset F\n data_f = 1.0*np.array(dataf)\n n_f = dataf.shape[0]*dataf.shape[1]\n mean_f = np.array(data_f).mean()\n var_f = np.array(data_f).var(ddof=1)\n df_f = n_f-1\n \n # Calculate Critical Regions\n F = var_e/var_f\n critical_region_left = scs.f.ppf(alpha-(alpha/2),df_e,df_f) \n critical_region_right = scs.f.ppf(1-alpha/2,df_e,df_f) \n \n # Decision \n if F < critical_region_left and F > critical_region_right:\n decision = 'Reject H0'\n return critical_region_left,critical_region_right,F,decision\n else:\n decision = 'Fail to Reject H0'\n return critical_region_left,critical_region_right,F,decision",
"def test_case_7():\n N = 50\n\n x = 0\n y = 0\n strength = 10000\n population = [[0 for i in range(N)] for j in range(N)]\n final_population = simulate_infection(population, x, y, strength)\n\n assert sum([len([i for i in row if i == -1]) for row in final_population]) == N * N",
"def test_run_simplega():\n WRFga_winner = run_simplega(pop_size=100, n_generations=1, testing=True)\n assert WRFga_winner.Fitness >= 0"
] |
[
"0.6193102",
"0.61115116",
"0.6015876",
"0.59955764",
"0.5883219",
"0.58637136",
"0.5820107",
"0.5813707",
"0.5778114",
"0.5767895",
"0.57653195",
"0.57620484",
"0.573535",
"0.57301676",
"0.57206523",
"0.56827587",
"0.5657508",
"0.56414694",
"0.5628475",
"0.5628418",
"0.5615447",
"0.5606899",
"0.56052345",
"0.5594801",
"0.55775756",
"0.55750185",
"0.55646694",
"0.55505675",
"0.55387723",
"0.553694"
] |
0.78521746
|
0
|
get observables from stix2 json
|
def get_observables(stix_json, log):
objects = stix_json["objects"]
observables = []
for obj in objects:
observable = get_observable(obj, log)
if observable:
observables.append(observable)
return observables
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def list_servables(self):\n if not self._server_started() is True:\n print('Server not started at host %s, port %d' % (self.host, self.port))\n sys.exit(0)\n else:\n headers = {'Content-Type': 'application/json'}\n url = 'http://'+self.host+':'+str(self.port)+'/servables'\n res = requests.get(url=url, headers=headers)\n res.content.decode(\"utf-8\")\n res_body = res.json()\n\n if res.status_code != requests.codes.ok:\n print(\"Request error! Status code: \", res.status_code)\n sys.exit(0)\n elif res_body['status'] != 0:\n print(res_body['err_msg'])\n sys.exit(0)\n else:\n return res_body['servables']",
"def observables(self):\r\n return self.__obs",
"def get_observable(stix_obj, log):\n res_obj = {}\n\n if stix_obj[u\"type\"] == \"relationship\":\n return None\n\n res_obj[u\"toxicity\"] = stix_obj.get(IBM_TOXICITY, \"\")\n res_obj[u\"relevance\"] = stix_obj.get(IBM_RELEVANCE, \"\")\n res_obj[u\"description\"] = get_observable_description(stix_obj, log)\n res_obj[u\"type\"] = get_observable_type(stix_obj, log)\n\n return res_obj",
"def get(self, base_url, observable, limit, credentials):\n\n url = url_join(base_url, self.filter(observable)) + f'&$top={limit}'\n\n response = get_data(url, credentials)\n\n return [\n self.sighting(observable, x) for x in response.get('value', [])\n ]",
"def get_json(self):\r\n self.get_recordrange()\r\n [\r\n self.json_data.setter(n, self.get_data(\"json\", x))\r\n for n, x in enumerate(tqdm(self.iterlist))\r\n ]",
"def filter(self, observable):",
"def read(self, ix=None):\n\n if ix is not None:\n sims = {ix: self.sims[int(ix)]} \n else:\n sims = self.sims\n\n dump = []\n for ix, sim in sims.iteritems():\n kv = {}\n for k, v in sim.iteritems():\n if k == 'async_result':\n if v.ready():\n try:\n kv['result'] = v.get()\n kv['status'] = True\n except Exception as e:\n kv['status'] = repr(e)\n else:\n kv['status'] = 'waiting'\n else:\n kv[k] = v\n dump.append(kv)\n\n return json.dumps(dump)",
"def test_that_method_should_return_json_on_success(self, request):\n result = None\n\n def on_next(v):\n nonlocal result\n result = v\n\n value = MagicMock()\n value.json.return_value = {'json': 'dict'}\n observable = rx.Observable.from_([value])\n request.return_value = observable\n r = rx_json('GET', 'http://google.com')\n\n r.subscribe(on_next=on_next)\n\n self.assertEqual({'json': 'dict'}, result)",
"def _data_list(json: 'a json'):\n data = json['Time Series (Daily)']\n return list(data.items())",
"def observables(self):\n return set(self._observable_map.keys())",
"def query(src, jq):\n for res in Query(jq).execute(loads(src)):\n if isinstance(res, (Null, Node)):\n yield pyjson.loads(str(res))\n else:\n yield [pyjson.loads(str(item)) for item in res]",
"def observatories():\n\n obs_db = {}\n\n obs_db['PWT-Oxford'] = { 'long':'-01:15:00', \\\n 'lat':'+51:45:00', \\\n 'altitude-metres':130.0, \\\n 'timezone':'Europe/London' }\n\n obs_db['LaPalma'] = { 'lat':'+28:45:00', \\\n 'long':'-17:53:00', \\\n 'altitude-metres':2326, \\\n 'timezone':'Atlantic/Canary' }\n \n obs_db['Paranal'] = { 'lat':'-24:37:00', \\\n 'long':'-70:24:00', \\\n 'altitude-metres':2635, \\\n 'timezone':'America/Santiago' }\n\n obs_db['LaSilla'] = { 'lat':'-29:15:00', \\\n 'long':'-70:44:00', \\\n 'altitude-metres':2380, \\\n 'timezone':'America/Santiago' }\n\n obs_db['MaunaKea'] = { 'lat':'+19:50:00', \\\n 'long':'-155:28:00', \\\n 'altitude-metres':4190, \\\n 'timezone':'Pacific/Honolulu' }\n \n obs_db['SidingSpring'] = { 'lat':'-31:16:00', \\\n 'long':'+149:04:00', \\\n 'altitude-metres':1149, \\\n 'timezone':'Australia/Sydney' }\n \n obs_db['KittPeak'] = { 'lat':'+31:58:00', \\\n 'long':'-111:36:00', \\\n 'altitude-metres':2096, \\\n 'timezone':'America/Phoenix' }\n\n obs_db['CalarAlto'] = { 'lat':'+37:13:25', \\\n 'long':'-2:32:47', \\\n 'altitude-metres':2168, \\\n 'timezone':'Europe/Madrid' }\n \n obs_db['Gemini-N'] = { 'lat':'+19:49:26', \\\n 'long':'-155:28:09', \\\n 'altitude-metres':4213, \\\n 'timezone':'Pacific/Honolulu' }\n\n obs_db['Gemini-S'] = { 'lat':'-30:14:27', \\\n 'long':'-70:44:12', \\\n 'altitude-metres':2722, \\\n 'timezone':'America/Santiago' }\n\n return obs_db",
"async def get(self) -> List[RowProxy]:",
"def __call__(self, **kwargs):\n return super()._call(\n kwargs, f=lambda x: [registry.getObject(xx, self.session) for xx in x]\n )",
"async def stations():\n with open(\"/data/station_list.json\") as j:\n data = json.load(j)\n return data",
"def list_models():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_models\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)",
"def get(self):\n generos = Genero.recuperar_todos_los_generos()\n lista_generos = []\n for genero in generos:\n lista_generos.append(genero.obtener_json())\n return lista_generos",
"def _do_decode_objects(self, content, observation):\n decoder = json.JSONDecoder()\n try:\n doc = decoder.decode(content)\n if not isinstance(doc, list):\n doc = [doc]\n observation.add_all_objects(doc)\n except ValueError as ex:\n error = 'Invalid JSON in response: %s' % content\n logging.getLogger(__name__).info('%s\\n%s\\n----------------\\n',\n error, traceback.format_exc())\n observation.add_error(JsonError(error, ex))\n return []\n\n return observation.objects",
"def loadObservables(config):\n import imp\n import inspect\n import os\n\n CLI = config.getFolder(\"CLI+\")\n robust = CLI.getTagBoolDefault(\"robust\",False)\n dummy = CLI.getTagBoolDefault(\"dummy\",False)\n\n # set global property\n if config.getTagBoolDefault(\"evaluateSubObservablesLazily\", False):\n QFramework.TQMultiObservable.gEvaluateSubObservablesLazily = True\n\n customobservables = False\n # for each snippet,\n for observablescript_TString in config.getTagVString(\"customObservables.snippets\"):\n QFramework.TQStringUtils.removeLeadingBlanks(observablescript_TString)\n QFramework.TQStringUtils.removeTrailingBlanks(observablescript_TString)\n QFramework.TQStringUtils.removeTrailingText(observablescript_TString, \".py\")\n observablescript = observablescript_TString.Data()\n if len(observablescript) == 0:\n QFramework.INFO(\"Skipping custom observable loading - no snippets provided\")\n continue\n found_modules = []\n observablesDirs = config.getTagVStandardString(\"customObservables.directories\")\n # search through the directories provided in the config\n for observablesPath in observablesDirs:\n module = QFramework.TQFolder.concatPaths(observablesPath, observablescript)+\".py\"\n module = common.findConfigPath(module, False, True)\n # findConfigPath returns \"\" if no module was found\n if len(module) > 0:\n # snippet was found in this directory -\n # add it's absolute path and the directory it was found in\n # to a list in tuple form\n found_modules.append((module,observablesPath))\n if len(found_modules) == 0:\n # check CommonAnalysisHelpers for an observable snippet as fall-back\n CAHObservablesDir = \"CommonAnalysisHelpers/share/observables\"\n observablesDirs.push_back(CAHObservablesDir)\n module = QFramework.TQFolder.concatPaths(CAHObservablesDir, observablescript) + \".py\"\n module = QFramework.TQPathManager.findFileFromEnvVarWithoutExecDir(module, \"CAFCOREDIR\", False)\n if len(module) > 0:\n found_modules.append((module, CAHAlgorithmsDir))\n\n # continue only if there was one match found\n if len(found_modules) == 0:\n QFramework.BREAK(\"No module found for '{:s}' in the custom observable directories provided:\\n{:s}\\n\".format(observablescript,', '.join(observablesDirs))+\n \"Please make sure that there exists a snippet by the name of '{:s}.py' available in one of them.\\n\".format(observablescript))\n elif len(found_modules) > 1:\n QFramework.BREAK(\"Ambiguity detected while resolving custom observable snippet location. Multiple modules found for {:s} in the custom observable directories provided:\\n{:s}\\n\".format(observablescript,', '.join(observablesDirs))+\n \"Consider placing the {:s}.py snippet only in a common directory if it's used by more than one (sub)analysis.\".format(observablescript))\n abs_path = found_modules[0][0]\n module_name = os.path.basename(abs_path).rstrip(\".py\")\n relative_path = QFramework.TQFolder.concatPaths(found_modules[0][1], observablescript)+\".py\"\n QFramework.START(\"l.\",\"loading custom observable instances from the '{:s}' snippet\".format(str(relative_path)))\n try:\n addobservables = imp.load_source(module_name, abs_path)\n\n argspec = inspect.getargspec(addobservables.addObservables)\n if 'config' in argspec.args:\n added = addobservables.addObservables(config=config)\n elif len(argspec.args) == 1:\n added = addobservables.addObservables(config)\n elif len(argspec.args) == 0:\n added = addobservables.addObservables()\n else:\n QFramework.BREAK(\"unable to add observable(s) from script '{:s}' - unknown arguments appeared: {:s}\".format(abs_path, str(argspec.args)))\n if added:\n QFramework.END(QFramework.TQMessageStream.OK)\n customobservables = True\n else:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n QFramework.BREAK(\"unable to properly setup custom observables from '{:s}'\".format(abs_path))\n except IOError as error:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n QFramework.BREAK(\"unable to open file '{:s}' - please double-check!\\n\".format(abs_path)+\"Message from python:\\n\"+str(error))\n except NameError as error:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n if not robust and not dummy:\n QFramework.BREAK(\"syntax error in observable snippet '{:s}' - please double-check!\\n\".format(abs_path)+\"Message from python:\\n\"+str(error))\n except AttributeError as error:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n if not robust and not dummy:\n QFramework.BREAK(\"attribute error in observable snippet '{:s}' - please double-check!\\n\".format(abs_path)+\n \"If the message from python below is\\n'module' object has no attribute 'addObservables'\\nplease make sure that the snippet has the function addObservables() defined.\\n\"\n \"Message from python:\\n\"+str(error))\n return customobservables",
"def get(self):\r\n\t\treturn list(self)",
"def get_movies():\n\n # ouverture du fichier de notre liste de films\n with open(DATA_FILE,\"r\") as f:\n movies_list = json.load(f)\n\n # notre liste des instances\n movies = [Movie(m)for m in movies_list] \n return movies",
"def get(self, request, scene_id):\n\n scene_id = int(scene_id)\n scene = Scene.objects.prefetch_related(\"metroline_set__metrostation_set\",\n \"metroline_set__metrodepot_set\",\n \"metroline_set__metrotrack_set__startStation\",\n \"metroline_set__metrotrack_set__endStation\"). \\\n get(user=request.user, id=scene_id)\n\n lines = []\n for line in scene.metroline_set.all():\n lines.append(line.get_dict())\n\n connections_dict = []\n for connection in scene.metroconnection_set.all():\n connections_dict.append(connection.get_dict())\n\n response = {\"lines\": lines, \"connections\": connections_dict}\n\n Status.getJsonStatus(Status.OK, response)\n\n return JsonResponse(response, safe=False)",
"def get_stations():\n response = requests.get('https://api.hh.ru/metro/160')\n todos = json.loads(response.text)\n colors = {'CD0505': 'red'}\n all_stations_one_line = []\n\n for i in todos['lines']:\n all_stations_one_line = []\n\n for j in i['stations']:\n one_station = station.station()\n one_station.set_name(j['name'])\n one_station.set_color(colors.get(i['hex_color']))\n one_station.set_lat(j['lat'])\n one_station.set_lng(j['lng'])\n all_stations_one_line.append(one_station)\n return all_stations_one_line",
"def observerRead(self, x):\n pass",
"def get_obs(self):\n\n # Get Distance Object to Gripper and Objectposition from Service Call. Needs to be done a second time cause we need the distance and position after the Step execution\n distance_gripper_to_object, position_xyz_object = U.get_distance_gripper_to_object()\n object_pos_x = position_xyz_object[0]\n object_pos_y = position_xyz_object[1]\n object_pos_z = position_xyz_object[2]\n\n # Get Joints Data out of Subscriber\n joint_states = self.joints_state\n elbow_joint_state = joint_states.position[0]\n shoulder_lift_joint_state = joint_states.position[1]\n shoulder_pan_joint_state = joint_states.position[2]\n wrist_1_joint_state = joint_states.position[3]\n wrist_2_joint_state = joint_states.position[4]\n wrist_3_joint_state = joint_states.position[5]\n\n for joint in joint_states.position:\n if joint > 2 * math.pi or joint < -2 * math.pi:\n print(joint_states.name)\n print(np.around(joint_states.position, decimals=3))\n sys.exit(\"Joint exceeds limit\")\n\n # Get Contact Forces out of get_contact_force Functions to be able to take an average over some iterations otherwise chances are high that not both sensors are showing contact the same time\n contact_1_force = self.get_contact_force_1()\n contact_2_force = self.get_contact_force_2()\n\n # Stack all information into Observations List\n observation = []\n for obs_name in self._list_of_observations:\n if obs_name == \"distance_gripper_to_object\":\n observation.append(distance_gripper_to_object)\n elif obs_name == \"elbow_joint_state\":\n observation.append(elbow_joint_state)\n elif obs_name == \"shoulder_lift_joint_state\":\n observation.append(shoulder_lift_joint_state)\n elif obs_name == \"shoulder_pan_joint_state\":\n observation.append(shoulder_pan_joint_state)\n elif obs_name == \"wrist_1_joint_state\":\n observation.append(wrist_1_joint_state)\n elif obs_name == \"wrist_2_joint_state\":\n observation.append(wrist_2_joint_state)\n elif obs_name == \"wrist_3_joint_state\":\n observation.append(wrist_3_joint_state)\n elif obs_name == \"contact_1_force\":\n observation.append(contact_1_force)\n elif obs_name == \"contact_2_force\":\n observation.append(contact_2_force)\n elif obs_name == \"object_pos_x\":\n observation.append(object_pos_x)\n elif obs_name == \"object_pos_y\":\n observation.append(object_pos_y)\n elif obs_name == \"object_pos_z\":\n observation.append(object_pos_z)\n elif obs_name == \"object_type\":\n observation.append(self.object_type)\n elif obs_name == \"min_distance_gripper_to_object\":\n observation.append(self.min_distace)\n else:\n raise NameError('Observation Asked does not exist==' + str(obs_name))\n\n return observation",
"def subscribe(observer):",
"def subscribe(observer):",
"def jsons(self):\n if self._jsons is None:\n self.make_jsons()\n return self._jsons",
"def getAllObs(self):\n #print 'getAllOBS:',self.listener.allObs\n return self.listener.allObs",
"def get_all_songs():\r\n return [Song.song_json(song) for song in Song.query.all()]"
] |
[
"0.62193173",
"0.5815124",
"0.55947036",
"0.5513718",
"0.53893983",
"0.53819305",
"0.53428006",
"0.52789974",
"0.50996727",
"0.50031304",
"0.49881196",
"0.49829662",
"0.49760333",
"0.49739003",
"0.4947553",
"0.49138665",
"0.49131507",
"0.49004713",
"0.4892729",
"0.48071638",
"0.48034522",
"0.47890806",
"0.47762555",
"0.4753935",
"0.4744211",
"0.47252765",
"0.47252765",
"0.47195238",
"0.47141033",
"0.4700245"
] |
0.7748714
|
0
|
Get observable type for a stix object
|
def get_observable_type(stix_obj, log):
obj_type = stix_obj[u"type"]
if obj_type == u"observed-data":
#
# so far all the observed-data has only one embedded obj
# if there is more, log the error
#
if len(stix_obj[u"objects"]) > 1:
log.error("Observed-data {} has {} objects!".format(stix_obj[u"id"], str(len(stix_obj[u"objects"]))))
# Only look at the first one
obj = stix_obj[u"objects"]["0"]
obj_type = obj[u"type"]
elif obj_type == u"indicator":
obj_type = INDICATOR_NAME_TYPE.get(stix_obj[u"name"], None)
if not obj_type:
#
# Out INDICATOR_NAME_TYPE mapping is not complete
#
log.error("Not handling {}".format(str(stix_obj)))
obj_type = stix_obj[u"type"]
return obj_type
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_type ( self, object ):\n return self.type",
"def get_type ( self, object ):\n return self.type",
"def get_observable(stix_obj, log):\n res_obj = {}\n\n if stix_obj[u\"type\"] == \"relationship\":\n return None\n\n res_obj[u\"toxicity\"] = stix_obj.get(IBM_TOXICITY, \"\")\n res_obj[u\"relevance\"] = stix_obj.get(IBM_RELEVANCE, \"\")\n res_obj[u\"description\"] = get_observable_description(stix_obj, log)\n res_obj[u\"type\"] = get_observable_type(stix_obj, log)\n\n return res_obj",
"def to_observation_type(self) -> str:\n obstype = self._header[\"OBSTYPE\"].strip().lower()\n self._used_these_cards(\"OBSTYPE\")\n if obstype == \"object\":\n return \"science\"\n return obstype",
"def get_type (self):\n return self._stype",
"def object_type(self) -> str:\n return self._event.get('object_type')",
"def typ(rxn_class):\n return rxn_class[0]",
"def typename ( o ) :\n return type ( o ) .__name__",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def object_type(self) -> str:\n return pulumi.get(self, \"object_type\")",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type"
] |
[
"0.6467229",
"0.6467229",
"0.60474116",
"0.6035891",
"0.6021861",
"0.60110044",
"0.6003303",
"0.59633845",
"0.5953436",
"0.5953436",
"0.5953436",
"0.5953436",
"0.5953436",
"0.5953436",
"0.5953436",
"0.5953436",
"0.5953436",
"0.5953436",
"0.5953436",
"0.5953436",
"0.5953436",
"0.5953436",
"0.5953436",
"0.5953436",
"0.5950566",
"0.5950566",
"0.5950566",
"0.5950566",
"0.5950566",
"0.5950566"
] |
0.8163599
|
0
|
Find a stix obj using obj_id
|
def find_object_by_id(stix_objects, obj_id):
ret_obj = None
for obj in stix_objects:
if obj["id"] == obj_id:
ret_obj = obj
break
return ret_obj
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_object(id):",
"def find_by_id(object_id, items):\n for item in items:\n if object_id == item[\"id\"]:\n return item\n\n raise Exception(f\"Item with {object_id} not found\")",
"def object_for(objectid):",
"def get_object_by_id(self, object_list, object_id):\n obj = None\n for i in object_list:\n if i.get_id() == object_id:\n obj = i\n break\n return obj",
"def find_with_uuid(self, groupid, objects, section):\n if groupid in self._total[section]:\n # we get the objects by name to avoid memory corruption issues,\n # but we're not checking if the names change!\n return objects[self._total[section][groupid]]\n else:\n for obj in objects:\n obj_uuid = self.get_uuid(obj)\n if obj_uuid:\n #self._total[section][obj_uuid] = obj.name\n if obj_uuid == groupid:\n return obj",
"def findByIndex(self, obj_index):\n return self.registry.findByIndex(obj_index)",
"def get_one_by_id(self, object, id):\n self.lock.acquire()\n result = self.__Session.query(object).get(id)\n self.lock.release()\n return result",
"def get_sobj(self):\n return self._std.FindObjectID(self.entry)",
"def find_by_id(cls, object_id):\n try:\n return mongo_db[cls.__collection__].find_one({\"_id\": ObjectId(object_id)})\n except InvalidId:\n # TODO: Log the exception\n print('Invalid bson id: {}'.format(object_id))\n return None",
"def __getitem__(self, objectId: str):\n return super()._getitem(\n objectId, f=lambda x: registry.getObject(x, self.session)\n )",
"def fetch_obj(type, id, error=404, new_id=False):\n if id is None:\n abort(error)\n obj_q = Session.query(type)\n obj = obj_q.get(int(id))\n #else:\n # obj = obj_q.filter(type.ID==int(id)).first()\n\n if obj is None:\n abort(error)\n return obj",
"def salesforce_get(self, obj_name, obj_id):\n self.builtin.log(f\"Getting {obj_name} with Id {obj_id}\")\n obj_class = getattr(self.cumulusci.sf, obj_name)\n return obj_class.get(obj_id)",
"def get_object(self, id_):\n return self._objects.get(id_, None)",
"def find_object_by_uuid(remote, uuid):\n cmd = mmapi.StoredCommands()\n cmd_key = cmd.AppendSceneCommand_FindObjectByUUID(uuid)\n remote.runCommand(cmd)\n result_val = mmapi.any_result()\n bFound = cmd.GetSceneCommandResult_FindObjectByUUID(cmd_key, result_val)\n return (bFound, result_val.i)",
"def find_with_uuid(self, groupid, objects, section):\n if groupid in self._total[section]:\n # we get the objects by name to avoid memory corruption issues,\n # but we're not checking if the names change!\n return self.wrap_object(objects[self._total[section][groupid]],\n section)\n else:\n for obj in objects:\n obj_uuid = self.get_uuid(obj)\n if obj_uuid:\n self._total[section][obj_uuid] = obj.name\n if obj_uuid == groupid:\n return self.wrap_object(obj, section)",
"def find_object_by_name(remote, obj_name):\n cmd = mmapi.StoredCommands()\n cmd_key = cmd.AppendSceneCommand_FindObjectByName(obj_name)\n remote.runCommand(cmd)\n result_val = mmapi.any_result()\n bFound = cmd.GetSceneCommandResult_FindObjectByName(cmd_key, result_val)\n return (bFound, result_val.i)",
"def get_object(self, object_id, attributes_to_retrieve = None):\n obj_id = quote((\"%s\" % object_id).encode('utf8'), safe='')\n if (attributes_to_retrieve == None):\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/%s\" % (self.url_index_name, obj_id), self.client.timeout)\n else:\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/%s?attributes=%s\" % (self.url_index_name, obj_id, attributes_to_retrieve), self.client.timeout)",
"def find_index(self, obj):\n return self.model.indexlist[obj]",
"def _find_object_id(self, cr, uid, context=None):\n object_id = context and context.get('object_id', False) or False\n ids = self.pool.get('ir.model').search(cr, uid, [('id', '=', object_id)])\n return ids and ids[0] or False",
"def find(cls, sid):\n cls.logger.info(\"Processing lookup for shopcart id %s ...\", sid)\n return cls.query.get(sid)",
"def find(self, objectclass, **kwargs):\n raise NotImplementedError",
"def find_by_id(cls, iid: int):\n return cls.query.filter_by(id=iid).first()",
"def _get_object(cls, pk):\n kwargs = {}\n try:\n kwargs['pk'] = int(pk)\n except Exception as e:\n if not cls.search_alternate:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n kwargs[f\"{cls.search_alternate}__iexact\"] = pk\n return get_object_or_404(cls.model().objects.all(), **kwargs)",
"def id(obj):\n return obj",
"def get_object(self):\n queryset = self.get_queryset() # acquire queryset\n for key in self.lookup_args:\n if self.kwargs.get(key):\n id = self.kwargs[key]\n try:\n instance = queryset.get(id=id) # acquire current instance\n return instance \n except models.ObjectDoesNotExist:\n raise Http404('NO object found.')\n \n raise Http404('No object found.')",
"def find_object(self, obj_type, obj_name):\n try:\n # Simply look it up by type and name.\n obj = self.model_map['object'][obj_type][obj_name][1]\n except KeyError:\n # No dice. This object doesn't exist in the model.\n obj = None\n\n return obj",
"def FindObject(self, tagged_address):\n raise NotImplementedError",
"def get(self, query_data=None, id_obj=None):\n if id_obj:\n return self.collection.find_one({'_id': id_obj})\n return self.collection.find_one(query_data)",
"def get_object(self, object_id):\r\n model = self.model\r\n object_id = model._meta.pk.to_python(object_id)\r\n return self.queryset().get(pk=object_id)",
"def get_object(self, oid):\n return self.request('get', safeformat('registry/objects/{:int}', oid))"
] |
[
"0.74366605",
"0.6660078",
"0.6613826",
"0.66030574",
"0.65227836",
"0.6500162",
"0.6412086",
"0.64099437",
"0.63752186",
"0.63266295",
"0.62988967",
"0.62979877",
"0.6282076",
"0.6247149",
"0.6232817",
"0.621489",
"0.6199346",
"0.6195045",
"0.61742485",
"0.61664426",
"0.6160441",
"0.6148691",
"0.614139",
"0.61351055",
"0.6116136",
"0.60951346",
"0.6094274",
"0.6092061",
"0.6062492",
"0.60611224"
] |
0.8400525
|
0
|
Find all leaf nodes below start_tag in the tag_table. Search from the start tag and down through the tag tree, until ending up at children that have no children of their own. Return the combined Series of these children.
|
def find_children(start_tag, tag_table):
pure_child = pd.Series([])
parents = pd.Series([start_tag])
while parents.shape[0] > 0:
pure_child = pd.concat([pure_child,
parents[~parents
.isin(tag_table['Parent'])]])
parents = tag_table.loc[tag_table['Parent']
.isin(parents[parents
.isin(tag_table['Parent'])]),
'Child']
return pure_child
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_children(search_tag, tag_list):\n list_return = []\n\n for tag in tag_list:\n if str(tag.parent) == str(search_tag):\n list_return.append(tag)\n list_return.extend(get_children(tag, tag_list))\n return list(set(list_return)) # This will return a list of unique elements",
"def expand_all(start, end, tags):\n if len(tags) == 1:\n for branch in expand(start, end, tags[0]):\n yield [branch]\n else:\n first, rest = tags[0], tags[1:]\n for middle in range(start+1, end+1-len(rest)): \n for first_branch in expand(start, middle, first):\n for rest_branches in expand_all(middle, end, rest):\n yield [first_branch] + rest_branches",
"def traverse_graph_start_end_extra_node(graph):\n\n # get tree with starting node tags\n\n def traverse(graph, node):\n\n children = [int(c) for c in graph[node][\"children\"]]\n tagged_children = []\n for child in children:\n ellipsed_parents = [int(p) for p in graph[child][\"ellipsed_parents\"]]\n # if the child is explicit\n if node not in ellipsed_parents:\n if graph[child][\"terminal\"] == \"yes\":\n tagged_children.append(ParentedTree(graph[child][\"tag\"], [graph[child][\"text\"]]))\n else:\n tagged_children.append(traverse(graph, child))\n # if the child is ellipsed\n else:\n ellipsis_tag = get_ellipsis_tag_from_graph(graph, child)\n tagged_children.append(ParentedTree(ellipsis_tag, []))\n \n tree = ParentedTree(graph[node][\"tag\"], tagged_children)\n\n return tree\n\n tree = traverse(graph, 0)\n\n # get ending node tags\n positions = [pos for pos in tree.treepositions() if pos not in tree.treepositions(\"leaves\")]\n end_tags = []\n ellipsis_id = 0 # assign an id to each ellipsis start and end nodes\n for pos_i, pos in enumerate(positions):\n if tree[pos].label().startswith(\"start\"):\n ellipsis_tag = tree[pos].label().split(\"start\")[-1]\n tree[pos].set_label(\"start\" + str(ellipsis_id))\n end_location = get_ellipsis_location(tree, ellipsis_tag)\n end_tag = \"end\" + str(ellipsis_id)\n end_tags.append((end_location, end_tag))\n ellipsis_id += 1\n\n # insert ending node tags\n for index, st in enumerate(tree.subtrees()):\n for end_location, end_tag in end_tags:\n if st.treeposition() == end_location:\n st.insert(index, ParentedTree(end_tag, []))\n\n positions = [pos for pos in tree.treepositions() if pos not in tree.treepositions(\"leaves\")]\n rev_positions = [pos for pos in reversed(positions)]\n for pos_i, pos in enumerate(rev_positions):\n # append start tag to the previous node\n if tree[pos].label().startswith(\"start\"):\n prev_pos_i = pos_i + 1\n prev_pos = rev_positions[prev_pos_i]\n tree[prev_pos].set_label(tree[prev_pos].label() + tree[pos].label())\n del tree[pos]\n # append end tag to the parent of the current node\n elif tree[pos].label().startswith(\"end\"):\n parent_pos = tree[pos].parent().treeposition()\n tree[parent_pos].set_label(tree[parent_pos].label() + tree[pos].label())\n del tree[pos] \n\n # wrap each constituent that has end or start tags with extra nodes\n\n def add_extra_nodes(tree):\n children = []\n for subtree in tree:\n if type(subtree) == str:\n children.append(subtree)\n else:\n splits = re.split(\"(start|end)\", subtree.label())\n const_tag = splits[0]\n ellipsis_tag = \"\".join(splits[1:]) \n if len(ellipsis_tag) > 0:\n children.append(Tree(subtree.label(), [Tree(const_tag, [sst for sst in subtree])]))\n else:\n children.append(add_extra_nodes(subtree))\n\n return Tree(tree.label(), children)\n\n tree = add_extra_nodes(tree)\n\n return tree",
"def depth_first_traversal_iterative(self, start):\n try:\n res = []\n stack = Stack([start])\n track = set()\n while stack.top:\n cur_node = stack.pop()\n if cur_node not in track:\n res.append(cur_node)\n track.add(cur_node)\n for child in reversed(self.node_dict[cur_node]):\n stack.push(child)\n except KeyError:\n raise KeyError(str(start) + ' not in graph')\n return res",
"def traverse_graph_end(graph):\n\n # get tree with starting node tags\n\n def traverse(graph, node):\n\n children = [int(c) for c in graph[node][\"children\"]]\n tagged_children = []\n for child in children:\n ellipsed_parents = [int(p) for p in graph[child][\"ellipsed_parents\"]]\n # if the child is explicit\n if node not in ellipsed_parents:\n if graph[child][\"terminal\"] == \"yes\":\n tagged_children.append(ParentedTree(graph[child][\"tag\"], [graph[child][\"text\"]]))\n else:\n tagged_children.append(traverse(graph, child))\n # if the child is ellipsed\n else:\n ellipsis_tag = get_ellipsis_tag_from_graph(graph, child)\n tagged_children.append(ParentedTree(ellipsis_tag, []))\n \n tree = ParentedTree(graph[node][\"tag\"], tagged_children)\n\n return tree\n\n tree = traverse(graph, 0)\n\n # get ending node tags\n positions = [pos for pos in tree.treepositions() if pos not in tree.treepositions(\"leaves\")]\n end_tags = []\n for pos_i, pos in enumerate(positions):\n if tree[pos].label().startswith(\"start\"):\n ellipsis_tag = tree[pos].label().split(\"start\")[-1]\n end_location = get_ellipsis_location(tree, ellipsis_tag)\n start_location = pos_i\n while tree[positions[start_location]].label().startswith(\"start\"):\n start_location -= 1\n end_tag = get_ellipsis_tag_from_tree(tree, positions[start_location])\n end_tags.append((end_location, end_tag))\n\n # insert ending node tags\n for index, st in enumerate(tree.subtrees()):\n for end_location, end_tag in end_tags:\n if st.treeposition() == end_location:\n st.insert(index, ParentedTree(end_tag, []))\n\n # delete starting node tags\n subtrees = [st for st in tree.subtrees()]\n reversed_subtrees = [st for st in reversed(subtrees)]\n for st in reversed_subtrees:\n if st.label().startswith(\"start\"):\n del tree[st.treeposition()]\n\n positions = [pos for pos in tree.treepositions() if pos not in tree.treepositions(\"leaves\")]\n rev_positions = [pos for pos in reversed(positions)]\n for pos_i, pos in enumerate(rev_positions):\n # append ending node tag to the parent of the current node\n if tree[pos].label().startswith(\"end\"):\n parent_pos = tree[pos].parent().treeposition()\n tree[parent_pos].set_label(tree[parent_pos].label() + tree[pos].label())\n del tree[pos] \n\n return tree",
"def get_root_children(self):\n return self.browser.tags",
"def apply(self, nodes):\n if self.step == '.':\n return nodes\n elif self.step == '..':\n return [node.parent for node in nodes]\n\n result = []\n for node in nodes:\n if self.axis == AXIS_CHILD:\n found = node.findAll(recursive=False, **self.soup_args)\n elif self.axis == AXIS_DESCENDANT:\n found = node.findAll(recursive=True, **self.soup_args)\n elif self.axis == AXIS_ATTRIBUTE:\n try:\n found = [node[self.node_test]]\n except KeyError:\n found = []\n elif self.axis == AXIS_FOLLOWING_SIBLING:\n found = node.findNextSiblings(**self.soup_args)\n elif self.axis == AXIS_PRECEDING_SIBLING:\n # TODO: make sure that the result is reverse ordered\n found = node.findPreviousSiblings(**self.soup_args)\n elif self.axis == AXIS_FOLLOWING:\n # find the last descendant of this node\n last = node\n while (not isinstance(last, BeautifulSoup.NavigableString)) \\\n and (len(last.contents) > 0):\n last = last.contents[-1]\n found = last.findAllNext(**self.soup_args)\n elif self.axis == AXIS_ANCESTOR:\n found = node.findParents(**self.soup_args)\n\n # this should only be active if there is a position predicate\n # and the axis is not 'descendant'\n if self.index is not None:\n if found:\n if len(found) > self.index:\n found = [found[self.index]]\n else:\n found = []\n\n if found:\n for checker in self.checkers:\n found = filter(checker, found)\n result.extend(found)\n\n return result",
"def find_sub_node(tree, keys, limit_tags=['part', 'subpart'], force_tag=None):\n \"\"\" limit tags exists to prevent ambiguity between parts and section labels. however, sometimes we must treat\n parts etc like sections, for ranges etc \"\"\"\n node = tree\n xpath_query = \".//%s[%s]\"\n depth = lambda x: len(list(x.iterancestors()))\n shallowest = lambda nodes: nodes[0] if len(node) == 1 else sorted(map(lambda x: (x, depth(x)), nodes), key=itemgetter(1))[0][0]\n\n def get_closest(node, label):\n \"\"\" note: this is split between xpath and python for performance reasons (xpath too slow on ancestors) \"\"\"\n while True:\n try:\n tag = force_tag if force_tag else '*'\n nodes = node.xpath(xpath_query % (tag, labelize(label)))\n nodes = filter(lambda x: x.tag not in limit_tags and\n not len(set(map(lambda t: t.tag, x.iterancestors())).intersection(IGNORE_TRAVERSAL_TAGS)), nodes)\n return shallowest(nodes)\n\n except IndexError:\n node = node.getparent()\n if node is None or not len(node):\n raise StopIteration('no more parents')\n\n nodes = []\n\n try:\n for i, a in enumerate(keys):\n if a:\n adds = a.split('+')\n for add in adds:\n add = add.strip()\n if not add:\n continue\n elif '-' in add:\n # we can't assume any reasonable lexicographical ordering of labels, so instead\n # find first match and continue until last\n labels = [x.strip() for x in add.split('-')]\n # get first node\n start = get_closest(node, labels[0])\n last = get_closest(node, labels[1])\n # this sucks, having to start at start,\n tag = start.tag\n nodes.append(start)\n # try to find way to start iter at arbitrary node\n tree_iter = tree.iter(tag)\n current = None\n while True:\n current = next(tree_iter)\n if current == start:\n break\n while True:\n current = next(tree_iter)\n nodes.append(current)\n if current == last:\n break\n # find every tag that matches depth, until we match last\n else:\n nodes.append(get_closest(node, add.strip()))\n node = nodes\n if i < len(keys) - 1:\n node = nodes[-1]\n if not len(keys):\n nodes = [node]\n if not len(nodes):\n raise CustomException(\"Empty\")\n # remove ancestors\n ancestors = []\n for n in nodes:\n ancestors.extend(list(n.iterancestors()))\n nodes = [n for n in nodes if n not in ancestors]\n return nodes\n except (IndexError, StopIteration, AttributeError), e:\n raise CustomException(\"Path not found\")",
"def find_result(self):\n result = []\n current_node = self.start_node\n while current_node.children:\n values = []\n for i in current_node.children:\n values += current_node.children[i]\n # find removed cells and then find the direct next move\n removed_cells = max(values)[1]\n for key, value in current_node.children.items():\n for i in value:\n if len(i) == 3 and i[1] == removed_cells:\n current_node = key\n result.insert(0, (current_node, removed_cells))\n break\n if current_node == key:\n break\n return result",
"def traverse_graph_start_without_pos(graph):\n\n def traverse(graph, node):\n\n children = [int(c) for c in graph[node][\"children\"]]\n tagged_children = []\n for child in children:\n ellipsed_parents = [int(p) for p in graph[child][\"ellipsed_parents\"]]\n # if the child is explicit\n if node not in ellipsed_parents:\n if graph[child][\"terminal\"] == \"yes\":\n tagged_children.append(ParentedTree(graph[child][\"tag\"], [graph[child][\"text\"]]))\n else:\n tagged_children.append(traverse(graph, child))\n # if the child is ellipsed\n else:\n ellipsis_tag = get_ellipsis_tag_from_graph(graph, child)\n tagged_children.append(ParentedTree(ellipsis_tag, []))\n \n tree = ParentedTree(graph[node][\"tag\"], tagged_children)\n\n return tree\n \n tree = traverse(graph, 0)\n positions = [pos for pos in tree.treepositions() if pos not in tree.treepositions(\"leaves\")]\n rev_positions = [pos for pos in reversed(positions)]\n for pos_i, pos in enumerate(rev_positions):\n # append starting_node tag to the previous non-terminal node\n if tree[pos].label().startswith(\"start\"):\n prev_pos_i = pos_i + 1\n while tree[rev_positions[prev_pos_i]].height() == 2:\n prev_pos_i += 1\n prev_pos = rev_positions[prev_pos_i]\n tree[prev_pos].set_label(tree[prev_pos].label() + tree[pos].label())\n del tree[pos]\n\n return tree",
"def get_children_with_tag(self, tag):\n if tag in self._children_tag_table:\n result = self._children_tag_table[tag]\n else:\n result = []\n composite_children = [c for c in self._children.values() if isinstance(c, Composite)]\n grand_children = []\n for c in composite_children:\n grand_children += c.get_children_with_tag(tag)\n return result + grand_children",
"def getAllParents(start):\n parent = start.getParent()\n\n while parent:\n node = parent\n yield parent\n parent = node.getParent()",
"def traverse_graph_start(graph):\n\n def traverse(graph, node):\n\n children = [int(c) for c in graph[node][\"children\"]]\n tagged_children = []\n for child in children:\n ellipsed_parents = [int(p) for p in graph[child][\"ellipsed_parents\"]]\n # if the child is explicit\n if node not in ellipsed_parents:\n if graph[child][\"terminal\"] == \"yes\":\n tagged_children.append(ParentedTree(graph[child][\"tag\"], [graph[child][\"text\"]]))\n else:\n tagged_children.append(traverse(graph, child))\n # if the child is ellipsed\n else:\n ellipsis_tag = get_ellipsis_tag_from_graph(graph, child)\n tagged_children.append(ParentedTree(ellipsis_tag, []))\n \n tree = ParentedTree(graph[node][\"tag\"], tagged_children)\n\n return tree\n \n tree = traverse(graph, 0)\n positions = [pos for pos in tree.treepositions() if pos not in tree.treepositions(\"leaves\")]\n rev_positions = [pos for pos in reversed(positions)]\n for pos_i, pos in enumerate(rev_positions):\n # append starting_node tag to the previous node\n if tree[pos].label().startswith(\"start\"):\n prev_pos_i = pos_i + 1\n prev_pos = rev_positions[prev_pos_i]\n tree[prev_pos].set_label(tree[prev_pos].label() + tree[pos].label())\n del tree[pos]\n\n return tree",
"def get_subgraph_between_nodes(self, start, end):\n nodes = set()\n nodes.add(start)\n\n to_visit = set()\n to_visit.add(start)\n\n while len(to_visit) > 0:\n current_visit = copy.copy(to_visit)\n for tv in current_visit:\n to_visit.remove(tv)\n if tv is not end:\n for s in self.successors(tv):\n to_visit.add(s)\n nodes.add(s)\n\n nodes.add(end)\n\n return self.subgraph(nodes)",
"def get_subtree(tree, start_node):\n \n plot_nodes = [start_node]\n finished=False\n while not finished:\n extra_nodes = []\n for node in plot_nodes:\n children = []\n if \"yes_branch\" in tree[node]:\n children.append(tree[node][\"yes_branch\"])\n if \"no_branch\" in tree[node]:\n children.append(tree[node][\"no_branch\"])\n for child in children:\n if child not in extra_nodes and child not in plot_nodes:\n extra_nodes.append(child)\n if extra_nodes == []:\n finished=True\n else:\n plot_nodes.extend(extra_nodes)\n \n sub_tree = {}\n for node in tree.keys():\n if node in plot_nodes:\n sub_tree[node] = tree[node]\n \n return sub_tree",
"def get_parents(tag):\n\n nonlocal depth\n depth = depth - 1\n\n if depth == 0:\n return\n\n if isinstance(tag, str):\n tag = self.tag_to_id[tag]\n\n if tag:\n\n parents = self.parent[tag] - self.categories\n for parent in parents:\n self.categories.add(parent)\n get_parents(parent)",
"def expand(start, end, tag):\n if end-start == 1:\n word = words[start]\n for leaf in lexicon:\n if leaf.tag == tag and leaf.word == word:\n yield leaf\n if tag in grammar:\n for tags in grammar[tag]:\n for branches in expand_all(start, end, tags):\n yield Tree(tag, branches)",
"def get_tree(self, root):\n root_index = self.__nodedata.index(root)\n last_index = root_index\n count = 0 if self.__rtags[root_index] and self.__ltags[root_index] else 1\n last_index += 1\n limit = len(self.__nodedata)\n\n while root_index < limit and count:\n if self.__rtags[root_index]:\n count += 1\n\n if self.__ltags[root_index]:\n count -= 1\n\n return self.__nodedata[root_index:last_index + 1]",
"def find_all(st, sub):\n\n if not sub: return None\n if sub[0] not in st.root.trans: return None\n \n found, i, s = False, 0, st.root\n scaned = 0 # length of the scaned\n while True:\n k, p, s = s.trans[sub[i]]\n len1, len2 = p-k+1, len(sub)-i\n if len1 >= len2:\n if st.text[k:k+len2] == sub[i:]:\n found, scaned = True, scaned+len1\n break\n else:\n if st.text[k:k+len1] == sub[i:i+len1]:\n i, scaned = i+len1, scaned+len1\n else: break\n if found:\n # shift_of_suffix = len(st.text) - len(suffix)\n leaf_depthes = get_leaf_depthes(s)\n return [len(st.text)-x-scaned for x in leaf_depthes]\n\n return None",
"def get_relations(bt, start=1):\n if not bt:\n return [], [], start # leaf\n roots, relations, index = get_relations(bt[0], start=start)\n rroots, rrelations, rindex = get_relations(bt[1], start=index + 1)\n roots.append(index)\n relations.extend(rrelations)\n relations.extend([(j, index) for j in rroots])\n return roots, relations, rindex",
"def get_tags(self, root, recursive=True):\n tags = []\n for child in root.children:\n tags.append(child)\n if recursive:\n tags += self.get_tags(child)\n return tags",
"def get_slice(the_tree,begin_element,end_element):\n all_elements=[e for e in the_tree.iter()]\n begin_index=all_elements.index(begin_element)\n end_index=all_elements.index(end_element)+1\n uniq_elements=[]\n for element in all_elements[begin_index:end_index]:\n tempset=set(uniq_elements)\n ancestors=set([e for e in element.iterancestors()])\n if len (tempset & ancestors) == 0:\n uniq_elements.append(element)\n return uniq_elements",
"def get_first_level_tags(self) -> Any:\n return self.orthanc.get_instance_first_level_tags(self.identifier)",
"def get_tags(self, root):\n tags = root.xpath(self.path)\n return tags if self.many else tags[:1]",
"def generateTreesRecursive(self, start: int, end: int) -> List[Optional[TreeNode]]:\n if start > end:\n return [None] # to set cur_root.left = None\n\n all_trees = []\n\n for val in range(start, end + 1):\n # cur_root = TreeNode(val)\n\n # get left subtrees (less than val)\n left_subtrees = self.generateTreesRecursive(start, val - 1)\n # get right subtrees (greater than val)\n right_subtrees = self.generateTreesRecursive(val + 1, end)\n\n for l_subtree in left_subtrees:\n for r_subtree in right_subtrees:\n cur_root = TreeNode(val)\n cur_root.left = l_subtree\n cur_root.right = r_subtree\n\n # cur_root is now the root of a BST\n all_trees.append(cur_root)\n\n return all_trees",
"def find_nodes_from_here(start_node, key):\n node_ = start_node\n yield from find_nodes(node_, key)\n while node_.parent:\n this_key_ = node_.key\n node_ = node_.parent\n if node_.key == key: # pragma: no branch\n yield node_\n for child_ in node_.children:\n if child_.key == this_key_: # pragma: no branch\n continue\n yield from find_nodes(child_, key)",
"def get_left_children(query_tree, out):\n qt = query_tree\n for parent in qt:\n # print parent, qt\n if len(qt[parent].keys()) > 0:\n children = qt[parent].keys()\n children.sort()\n # print \"Sorted Children\", children\n out.append(children[0])\n get_left_children({children[0]: qt[parent][children[0]]}, out)\n else:\n break",
"def depth_first_traversal(self, start):\n return self.recursive_dft(start, [])",
"def get_relations(bt, start=1):\n if not bt:\n return [], [], start # leaf\n lroots, lrelations, index = get_relations(bt[0], start=start)\n roots, relations, rindex = get_relations(bt[1], start=index + 1)\n roots.append(index)\n relations.extend(lrelations)\n relations.extend([(j, index) for j in lroots])\n return roots, relations, rindex",
"def s_leaves(node):\r\n if len(node.children)==0:\r\n return [node]\r\n else:\r\n desc_leaves = []\r\n for child in node.children:\r\n desc_leaves.extend(Node.s_leaves(child))\r\n return desc_leaves"
] |
[
"0.60712296",
"0.59759665",
"0.5792263",
"0.56499445",
"0.5448759",
"0.54347426",
"0.5383377",
"0.536103",
"0.5359288",
"0.52728033",
"0.52625084",
"0.5240432",
"0.5222117",
"0.5203426",
"0.51802504",
"0.5170244",
"0.5154103",
"0.5152585",
"0.5125588",
"0.5117449",
"0.5116381",
"0.5103964",
"0.5088831",
"0.50772107",
"0.5075297",
"0.5074781",
"0.5073921",
"0.5069604",
"0.50688016",
"0.5047541"
] |
0.7850898
|
0
|
Cleans up a slug by removing slug separator characters that occur at the beginning or end of a slug. If an alternate separator is used, it will also replace any instances of the default '' separator with the new separator.
|
def _slug_strip(self,
value,
separator='-'):
separator = separator or ''
if separator == '-' or not separator:
re_sep = '-'
else:
re_sep = '(?:-|%s)' % re.escape(separator)
# Remove multiple instances and if an alternate separator is provided,
# replace the default '-' separator.
if separator != re_sep:
value = re.sub('%s+' % re_sep, separator, value)
# Remove separator from the beginning and end of the slug.
if separator:
if separator != '-':
re_sep = re.escape(separator)
value = re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)
return value
# TODO implement the unique_slugify so the slug can be used as user url.
# def unique_slugify(self):
# slug = slugify(self.name)
#
# user = CustomUser.objects.get(slug=slug)
#
# while user is not None:
# slug += "-%i" % random.randint(1,2000)
# user = CustomUser.objects.get(slug=slug)
#
# self.slug = slug
# TODO test this code found online
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _slug_strip(value, separator='-'):\n separator = separator or ''\n if separator == '-' or not separator:\n re_sep = '-'\n else:\n re_sep = '(?:-|%s)' % re.escape(separator)\n # Remove multiple instances and if an alternate separator is provided,\n # replace the default '-' separator.\n if separator != re_sep:\n value = re.sub('%s+' % re_sep, separator, value)\n # Remove separator from the beginning and end of the slug.\n if separator:\n if separator != '-':\n re_sep = re.escape(separator)\n value = re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)\n return value",
"def slugify(value, delim=\"-\"):\n value = re.sub(r\"[^\\w\\s-]\", \"\", value).strip()\n value = re.sub(r\"[-\\s]+\", delim, value)\n return value",
"def slugify(value: str) -> str:\n value = re.sub(r\"[^\\w\\s-]\", \"\", value.lower())\n return re.sub(r\"[-\\s]+\", \"-\", value).strip(\"-_\")",
"def slugify(value, delimiter='-'):\n words = slug_pattern.split(value)\n return delimiter.join(filter(truth, words)).lower()",
"def slugify(value):\n value = re.sub('[^\\w\\s-]', '', value).strip()\n return re.sub('[-\\s]+', '-', value)",
"def deslugify(_slug):\n return string.capwords(_slug.replace('-', ' '))",
"def slugify(text):\n non_safe = [c for c in text if c in non_url_safe]\n if non_safe:\n for c in non_safe:\n text = text.replace(c, '')\n # Strip leading, trailing and multiple whitespace, convert remaining whitespace to _\n text = u'_'.join(text.split())\n return text",
"def slugify(s):\n s = reg_nonchar.sub('-', s).lower()\n s = reg_dashes.sub('-', s)\n s = reg_outer_dashes.sub('', s)\n return s",
"def slugify(text):\n concatenated = re.sub('\\s+', '-', text.lower())\n return re.sub('[^A-Za-z0-9_-]', '', concatenated)",
"def slugify(s: str) -> str:\n\n return re.sub(re_forbidden, ' ', s.strip()).strip().replace(' ', '-')",
"def slugify(value: Any, sep: str = \"-\") -> Optional[str]:\n text = stringify(value)\n if text is None:\n return None\n text = text.replace(sep, WS)\n # run this first because it'll give better results on special\n # characters.\n text = category_replace(text, SLUG_CATEGORIES)\n text = latinize_text(text, ascii=True)\n if text is None:\n return None\n text = text.lower()\n text = \"\".join([c for c in text if c in VALID_CHARS])\n text = collapse_spaces(text)\n if text is None or len(text) == 0:\n return None\n return text.replace(WS, sep)",
"def slugify(value):\n # import unicodedata\n # value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n # value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n # value = unicode(re.sub('[-\\s]+', '-', value))\n value = re.sub('[^\\w\\s-]', '', value).strip().lower()\n value = re.sub('[-\\s]+', '-', value)\n return value",
"def slugify(s):\n s = s.lower()\n\n for c in [' ', '-', '.', '/']:\n s = s.replace(c, '_')\n\n s = re.sub('\\W', '', s)\n s = s.replace('_', ' ')\n s = re.sub('\\s+', ' ', s)\n s = s.strip()\n s = s.replace(' ', '-')\n\n return s",
"def slugify(value):\n #import unicodedata\n #value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = re.sub('[-\\s]+', '-', value)\n return value",
"def get_slug(self, headline):\n exclude = set(string.punctuation)\n s = ''.join(ch for ch in headline if ch not in exclude)\n return s.lower().replace(\" \", \"-\")",
"def slugify(text: str) -> str:\n return text.strip().replace(', ', '-').replace(' ', '_').lower()",
"def slugify(value):\n value = unicode(str(value))\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub(r'[^\\w\\s-]', '', value).strip().lower()\n return re.sub(r'[-\\s]+', '-', value)",
"def slugify(value):\n import unicodedata\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub(r'(?u)[^-\\w .]', '', value).strip()\n value = value.replace(\":\", \"\")\n return value",
"def create_slug(val):\n slug = slugify(unidecode(val))\n for s in settings.DJCAT_SLUG_RESERVED:\n slug = slug.replace(s, '')\n return slug",
"def slugify2(value):\n try:\n value = unicodedata.normalize('NFC', value)\n value = downcode(value)\n value = unicodedata.normalize('NFD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n return safestring.mark_safe(re.sub('[-\\s]+', '-', value))\n except:\n if settings.TEMPLATE_DEBUG:\n raise\n else:\n return u''",
"def slugify(value):\n import unicodedata\n import re\n from django.utils.safestring import mark_safe\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n return mark_safe(re.sub('[-\\s]+', '-', value))",
"def slugify(value):\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = str(re.sub('[^\\w\\s-]', '', str(value)).strip().lower())\n return re.sub('[-\\s]+', '-', str(value))",
"def format_slug(text):\n allowed_chars = (\n \"abcdefghijklmnopqrstuvxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\" # Alphabet\n \"01234567890\" # Numbers\n \"_-\" # Symbols\n )\n # Replace seperators with dash\n seperators = [\" \", \",\", \".\"]\n for sep in seperators:\n text = text.replace(sep, \"-\")\n # Strip unacceptable characters\n text = \"\".join([c for c in text if c in allowed_chars])\n # Enforce max length\n return truncate(text, max_len=50).lower()",
"def _slugify(value):\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = re.sub(r'[^\\w\\s-]', '', value.decode('utf-8', 'ignore'))\n value = value.strip().lower()\n value = re.sub(r'[-\\s]+', '-', value)\n return value",
"def _get_slug(str):\n return str.replace(\" \", \"_\")",
"def fix_ending(x):\n x = strip_stoich_wrapper(x)\n x = re.sub(r'(?<=[a-zA-Z])\\-(?=[a-zA-Z]$)', ' ', x)\n return x",
"def slugify(text, delim=u\"-\"):\n _punct_re = re.compile(r'[\\t !\"#$%&\\'()*\\-/<=>?@\\[\\\\\\]^_`{|},.]+')\n result = []\n for word in _punct_re.split(text.lower()):\n word = normalize(\"NFKD\", word).encode(\"ascii\", \"ignore\")\n if word:\n result.append(word)\n return unicode(delim.join(result))",
"def slugify(s):\n\treturn re.sub('[!@#$%^&*()\\\\\\\\/:.\"\"]+', '', s).replace(' ', '-').replace('--', '-').lower()",
"def slugify(value):\n value = unicode(value)\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub('[^\\w\\s-]', '', value).strip().lower()\n return re.sub('[-\\s]+', '-', value)",
"def slugify(value):\n return '_' + value.replace(' ', '-').lower()"
] |
[
"0.8093289",
"0.7083839",
"0.67627585",
"0.6718731",
"0.6622538",
"0.6551716",
"0.65302366",
"0.6511013",
"0.64927936",
"0.64924055",
"0.6492133",
"0.6430577",
"0.6415602",
"0.63980514",
"0.6385788",
"0.63794196",
"0.63383615",
"0.6315635",
"0.6314748",
"0.6311437",
"0.63062793",
"0.63028795",
"0.62895",
"0.6280222",
"0.62712306",
"0.6258698",
"0.6250235",
"0.6242315",
"0.6236451",
"0.62266684"
] |
0.78587174
|
1
|
Calculates and stores a unique slug of ``value`` for an instance. ``slug_field_name`` should be a string matching the name of the field to store the slug in (and the field to check against for uniqueness). ``queryset`` usually doesn't need to be explicitly provided it'll default to using the ``.all()`` queryset from the model's default manager.
|
def unique_slugify(instance,
value,
slug_field_name='slug',
queryset=None,
slug_separator='-'):
slug_field = instance._meta.get_field(slug_field_name)
slug = getattr(instance, slug_field.attname)
slug_len = slug_field.max_length
# Sort out the initial slug, limiting its length if necessary.
slug = slugify(value)
if slug_len:
slug = slug[:slug_len]
slug = instance._slug_strip(slug, slug_separator)
original_slug = slug
# Create the queryset if one wasn't explicitly provided and exclude the
# current instance from the queryset.
if queryset is None:
queryset = instance.__class__._default_manager.all()
if instance.pk:
queryset = queryset.exclude(pk=instance.pk)
# Find a unique slug. If one matches, at '-2' to the end and try again
# (then '-3', etc).
next_element = 2
while not slug or queryset.filter(**{slug_field_name: slug}):
slug = original_slug
end = '%s%s' % (slug_separator, next_element)
if slug_len and len(slug) + len(end) > slug_len:
slug = slug[:slug_len - len(end)]
slug = instance._slug_strip(slug, slug_separator)
slug = '%s%s' % (slug, end)
next_element += 1
setattr(instance, slug_field.attname, slug)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def unique_slugify(instance, value, slug_field_name='slug', queryset=None,\n slug_separator='-'):\n slug_field = instance._meta.get_field(slug_field_name)\n\n slug = getattr(instance, slug_field.attname)\n slug_len = slug_field.max_length\n\n # Sort out the initial slug, limiting its length if necessary.\n slug = slugify(value)\n if slug_len:\n slug = slug[:slug_len]\n slug = _slug_strip(slug, slug_separator)\n original_slug = slug\n\n # Create the queryset if one wasn't explicitly provided and exclude the\n # current instance from the queryset.\n if queryset is None:\n queryset = instance.__class__._default_manager.all()\n if instance.pk:\n queryset = queryset.exclude(pk=instance.pk)\n\n # Find a unique slug. If one matches, add '-2' to the end and try again\n # (then '-3', etc).\n next_try = 2\n while not slug or queryset.filter(**{slug_field_name: slug}):\n slug = original_slug\n end = '%s%s' % (slug_separator, next_try)\n if slug_len and len(slug) + len(end) > slug_len:\n slug = slug[:slug_len-len(end)]\n slug = _slug_strip(slug, slug_separator)\n slug = '%s%s' % (slug, end)\n next_try += 1\n\n setattr(instance, slug_field.attname, slug)",
"def slug(self, value: str):\n if value and self.slug:\n raise Exception('Order slug should never be updated.')\n elif value:\n self._slug = value",
"def get_unique_slug(model_instance, slugable_field_name, slug_field_name):\n slug = slugify(getattr(model_instance, slugable_field_name))\n unique_slug = slug\n extension = 1\n ModelClass = model_instance.__class__\n\n while ModelClass._default_manager.filter(\n **{slug_field_name: unique_slug}\n ).exists():\n unique_slug = '{}-{}'.format(slug, extension)\n extension += 1\n\n return unique_slug",
"def _make_slug(\n self, instance: BaseModel,\n field: Optional[str] = None,\n fields: Optional[List[str]] = None) -> str:\n if field is not None:\n text = str(getattr(instance, field))\n if text is None or text == '':\n raise ValueError('There was no string to make a slug from')\n elif fields is not None:\n parts = [str(getattr(instance, field)) for field in fields]\n text = '-'.join(parts)\n if text is None or text.replace('-', '') == '':\n raise ValueError('There was no string to make a slug from')\n\n concrete_slug = slugify(text, to_lower=True)\n slug = concrete_slug\n counter = 1\n while self.q.filter_by(slug=slug).first() is not None:\n slug = '{}-{}'.format(concrete_slug, counter)\n counter += 1\n return slug",
"def _slug(self, value):\n if isinstance(value, basestring):\n try:\n converted = value\n except Exception, exception:\n logger.error(exception)\n raise\n elif isinstance(value, str):\n try:\n converted = unicode(value, \"utf-8\")\n except Exception, exception:\n logger.error(exception)\n raise\n elif isinstance(value, (int, long, float)):\n self.assertNotIsInstance(value, basestring)\n try:\n converted = str(value)\n converted = unicode(converted)\n except Exception, exception:\n logger.error(exception)\n raise\n else:\n self.assertNotIsInstance(value, basestring)\n try:\n converted = unicode(value)\n except Exception, exception:\n logger.error(exception)\n raise\n output = converted.lower().strip().replace(\" \", \"-\")\n output = re.sub(r'[^a-z0-9]+', '-', output).strip('-')\n output = re.sub(r'[-]+', '-', output)\n output = re.sub(r\"[^\\w-]\", \"\", output)\n if isinstance(output, basestring):\n number_of_spaces = output.count(\" \")\n if number_of_spaces == 0:\n return output\n else:\n return False",
"def set_unique_slug(self):\n set_unique_slug(self, based_on_field='name')",
"def slug(value):\n return slugify(value)",
"def _slug_strip(self,\n value,\n separator='-'):\n separator = separator or ''\n if separator == '-' or not separator:\n re_sep = '-'\n else:\n re_sep = '(?:-|%s)' % re.escape(separator)\n # Remove multiple instances and if an alternate separator is provided,\n # replace the default '-' separator.\n if separator != re_sep:\n value = re.sub('%s+' % re_sep, separator, value)\n # Remove separator from the beginning and end of the slug.\n if separator:\n if separator != '-':\n re_sep = re.escape(separator)\n value = re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)\n return value\n\n # TODO implement the unique_slugify so the slug can be used as user url.\n # def unique_slugify(self):\n # slug = slugify(self.name)\n #\n # user = CustomUser.objects.get(slug=slug)\n #\n # while user is not None:\n # slug += \"-%i\" % random.randint(1,2000)\n # user = CustomUser.objects.get(slug=slug)\n #\n # self.slug = slug\n\n # TODO test this code found online",
"def unique_slug(manager, slug_field, slug):\n max_length = manager.model._meta.get_field(slug_field).max_length\n slug = slug[:max_length]\n i = 0\n while True:\n if i > 0:\n if i > 1:\n slug = slug.rsplit(\"-\", 1)[0]\n # We need to keep the slug length under the slug fields max length. We need to\n # account for the length that is added by adding a random integer and `-`.\n slug = \"%s-%s\" % (slug[:max_length - len(str(i)) - 1], i)\n if not manager.filter(**{slug_field: slug}):\n break\n i += 1\n return slug",
"def generate_unique_slug(klass, field, instance=None):\n origin_slug = slugify(field)\n unique_slug = origin_slug\n numb = 1\n if instance is not None:\n while klass.objects.filter(slug=unique_slug).exclude(id=instance.id).exists():\n unique_slug = '%s-%d' % (origin_slug, numb)\n numb += 1\n else:\n while klass.objects.filter(slug=unique_slug).exists():\n unique_slug = '%s-%d' % (origin_slug, numb)\n numb += 1\n return unique_slug",
"def create_slug_from_tag_name(sender, instance, *args, **kwargs):\n if not instance.slug:\n instance.slug = slugify(instance.name)",
"def slug_save(obj):\n if not obj.slug: # if there isn't a slug\n obj.slug = get_random_string(5) # create one\n slug_is_wrong = True\n while slug_is_wrong: # keep checking until we have a valid slug\n slug_is_wrong = False\n other_objs_with_slug = type(obj).objects.filter(slug=obj.slug)\n if len(other_objs_with_slug) > 0:\n # if any other objects have current slug\n slug_is_wrong = True\n if slug_is_wrong:\n # create another slug and check it again\n obj.slug = get_random_string(5)",
"def slugify(values, ensure_unique=False, **kwargs):\n slug_args = {'separator': '_'}\n slug_args.update(kwargs)\n\n if ensure_unique:\n new_values = tuple(pslugify(value, **slug_args) for value in values)\n return deduplicate(new_values, separator=slug_args['separator'])\n\n return tuple(pslugify(value, **slug_args) for value in values)",
"def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name)",
"def add_unique(self, field_name, value, **kwargs):\n self.properties.update(kwargs)\n model = self.model.get_subclass_model(**self.properties)\n\n self.array_validate(field_name, value, model)\n return self.array_process(field_name, value, operation_type='add_unique')",
"def unique_slug(model, slug, instance=None, reserved_slugs=[]):\n orig, apdx = split_slug_on_appendix(slug)\n for x in itertools.count(1):\n if instance:\n if not model.objects.filter(slug=slug).exclude(pk=instance.pk).exists() and slug not in reserved_slugs:\n break\n else:\n if not model.objects.filter(slug=slug).exists() and slug not in reserved_slugs:\n break\n slug = '{}{}{}'.format(orig, settings.DJCAT_SLUG_UNIQNUMBER_DELIMITER, x)\n return slug",
"def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = self.generate_slug()\n super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = self.generate_slug()\n super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n self.slug = slugify(self.title)\n super(QueryH, self).save(*args, **kwargs)",
"def _slugify(value):\n import unicodedata\n if not isinstance(value, unicode):\n value = unicode(value)\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(_slugify_strip_re.sub('', value).strip().lower())\n return _slugify_hyphenate_re.sub('-', value)",
"def create_slug_from_category_name(sender, instance, *args, **kwargs):\n if not instance.slug:\n instance.slug = slugify(instance.name)",
"def get_unique_slug(instance):\n slug = slugify(instance.title)\n extension = 1\n ModelClass = instance.__class__\n\n while ModelClass.objects.filter(slug=slug).exists():\n slug = '{}-{}'.format(slug, extension)\n extension += 1\n\n return slug",
"def clean_slug(self):\n slug = self.cleaned_data.get('slug', None)\n if slug is None or len(slug) == 0 and 'title' in self.cleaned_data:\n slug = slugify(self.cleaned_data['title'])\n return slug",
"def unique_slug(s, model, num_chars=50):\n slug = slugify(s)\n slug = slug[:num_chars].strip('-')\n while True:\n dup = model.objects.filter(slug=slug)\n if not dup:\n return slug\n\n slug = slug[:39] + '-' + random_string(10)",
"def set_slug(self):\n if not self.slug:\n self.slug = slugify(self.name)[:50]",
"def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = self.create_slug()\n super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.title)\n return super().save(*args, **kwargs)",
"def generate_slug(self):\n return slugify(self.name)",
"def to_slug(value, incoming=None, errors=\"strict\"):\r\n value = safe_decode(value, incoming, errors)\r\n # NOTE(aababilov): no need to use safe_(encode|decode) here:\r\n # encodings are always \"ascii\", error handling is always \"ignore\"\r\n # and types are always known (first: unicode; second: str)\r\n value = unicodedata.normalize(\"NFKD\", value).encode(\r\n \"ascii\", \"ignore\").decode(\"ascii\")\r\n value = SLUGIFY_STRIP_RE.sub(\"\", value).strip().lower()\r\n return SLUGIFY_HYPHENATE_RE.sub(\"-\", value)",
"def get_unique_slug_field_name(self, model):\n slug_fields = []\n for field in model._meta.fields:\n if field.unique and isinstance(field, SlugField):\n slug_fields.append(field)\n if len(slug_fields) == 1:\n return slug_fields[0]\n return None"
] |
[
"0.7676992",
"0.6218883",
"0.6207083",
"0.61557245",
"0.6017999",
"0.5916228",
"0.5716703",
"0.5674682",
"0.5611593",
"0.5576234",
"0.53269184",
"0.5287026",
"0.5262419",
"0.52261305",
"0.52043664",
"0.5179654",
"0.5138324",
"0.5138324",
"0.51121",
"0.5101838",
"0.50992155",
"0.5083046",
"0.50523716",
"0.5038269",
"0.50379634",
"0.50305307",
"0.50249714",
"0.5024019",
"0.5016509",
"0.49976012"
] |
0.7879291
|
0
|
iRep gc_content message calculate gc content over sequence windows
|
def _iRep_gc_content(seq, window = 5000, slide = 100):
# convert GC
replacements = {'G':1, 'C':1, 'A':0, 'T':0, 'N':0}
GC = [] # G - C
for base in seq:
try:
GC.append(replacements[base.upper()])
except:
GC.append(0)
# calculate gc content over sliding windows
i = 0
weights = np.ones(window)
table = defaultdict(list)
for gc in scipy.signal.fftconvolve(GC, weights, 'valid').tolist()[0::slide]:
table['index'].append(i)
table['GC_content'].append(gc/window)
i += slide
return pd.DataFrame(table)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def gc_content(seq):\n return round( (seq.count('C') + seq.count('G')) / len(seq) * 100 , 6 )",
"def get_gc_content(sequence):\n # get the sequence length and \n # make all the sequence characters upper case\n seq_len, sequence = len(sequence), sequence.upper()\n # count all gs and cs\n c = sequence.count('C')\n g = sequence.count('G')\n # returns the gc content from a sequence\n # sum up the |Gs and Cs counts and divide \n # by the sequence length\n return round((c + g) / seq_len, 4)",
"def gc_content(self):\n if self.sequence is not None:\n return np.around(GC(self.sequence), decimals=2)\n else:\n return None",
"def calculate_gc_content(sequence):\n sequence = sequence.upper()\n sc = Counter(sequence)\n return round((sc['C'] + sc['G']) / (sc['A'] + sc['C'] + sc['G'] + sc['T']) * 100, 2)",
"def GC_Content(self):\n GC_content = lambda dna: (dna.count('G')+dna.count('C'))\\\n /self.length\n return round(GC_content(self.sequence),4)",
"def get_gc_content(sequence):\n len_seq = len(sequence) - sum(alternative_bases_counter(sequence).values())\n sequence = sequence.upper()\n c = sequence.count('C')\n g = sequence.count('G')\n return round((c + g) / len_seq, 4)",
"def gc_content(sequence):\n gc = sequence.count('G') + sequence.count('C')\n atgc = sequence.count('A') + sequence.count('T') + sequence.count('G') + sequence.count('C')\n \n return (gc/atgc) * 100",
"def get_at_content(gc):\n return 1 - gc",
"def get_at_content(gc):\n return 1 - gc",
"def gc_content_sequence_window(sequence, as_overlap=False, k=20):\n # make sequence upper case and getting the length of it\n sequence, seq_len = sequence.upper(), len(sequence)\n # the array-like object to collect the data\n gc_content = []\n # non overlap sequence length\n non_overlap = range(0, len(sequence) - k + 1, k)\n # overlap sequence length\n overlap = range(0, seq_len - k + 1)\n # overlap is needed\n if as_overlap:\n # iterates to the overlap region\n for i in overlap:\n # creates the substring to count the gc_content\n subseq = sequence[i:i + k]\n # count and sum up the Gs and Cs counts\n g_c = subseq.count('C') + subseq.count('G')\n # collect the data in the array container\n gc_content.append(round(g_c / len(subseq), 4) * 100)\n # if non overlap is choosed\n else:\n # iterates to the mon overlap region\n for j in non_overlap:\n # creates the substring to count the gc_content\n subseq = sequence[j:j + k]\n # count and sum up the Gs and Cs counts\n g_c = subseq.count('C') + subseq.count('G')\n # collect the data in the array container\n gc_content.append(round(g_c / len(subseq), 4) * 100)\n return gc_content",
"def gc_content_along_the_chain(dna_sequence, window_size):\n sub_sequences = extract_sub_sequences(dna_sequence, window_size)\n gc_results = []\n for sub_sequence in sub_sequences:\n gc_results.append(gc_content(sub_sequence))\n return gc_results",
"def gc_content(seq):\n result = float(str(seq).count('G') + str(seq).count('C'))/len(seq) *100\n return result",
"def _compute_gc_content(self, graph):\n gc_content = 0\n for node, data in graph.nodes_iter(data=True):\n if (data['label'] == 'G') or (data['label'] == 'C'):\n gc_content += 1\n gc_content = float(gc_content) / float(nx.number_of_nodes(graph))\n return gc_content",
"def get_gc_sliding(self, window=500):\n\n gc_res = []\n\n # Get contigID for each window position\n labels, xbars = self._get_window_labels(window)\n\n # Get complete sequence to calculate sliding window values\n complete_seq = \"\".join(self.contigs.values()).lower()\n\n for p, i in enumerate(range(0, len(complete_seq), window)):\n\n seq_window = complete_seq[i:i + window]\n\n # Get GC proportion\n gc_res.append(self._gc_prop(seq_window, len(seq_window)))\n\n return gc_res, labels, xbars",
"def gc_content_plot (self): \n if 'gc_content' not in self.fastqc_data or len(self.fastqc_data['gc_content']) == 0:\n log.debug('gc_content not found in FastQC reports')\n return None\n \n pconfig = {\n 'id': 'fastqc_gc_content_plot',\n 'title': 'Per Sequence GC Content',\n 'ylab': 'Count',\n 'xlab': '%GC',\n 'ymin': 0,\n 'xmax': 100,\n 'xmin': 0,\n 'yDecimals': False,\n 'tt_label': '<b>{point.x}% GC</b>: {point.y}',\n 'colors': self.get_status_cols('gc_content'),\n }\n self.sections.append({\n 'name': 'Per Sequence GC Content',\n 'anchor': 'fastqc_gc_content',\n 'content': '<p>The average GC content of reads. Normal random library typically have a roughly normal distribution of GC content. ' +\n 'See the <a href=\"http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/5%20Per%20Sequence%20GC%20Content.html\" target=\"_bkank\">FastQC help</a>.</p>' +\n self.plot_xy_data(self.fastqc_data['gc_content'], pconfig)\n })",
"def gc_var(sequence, as_overlap=False, k=20):\n # calculates the percent of gc content\n gc = get_gc_content(sequence) * 100\n # get the gc content in the window space as an array\n gc_i = np.array(gc_content_sequence_window(sequence, as_overlap, k=k))\n # get the len of the gc content in the window space\n len_gc_i = np.shape(gc_i)[0]\n # check the difference of each point \n dif = gc_i - gc\n return np.log((1 / len_gc_i) * sum(abs(dif)))",
"def get_gc_content(cst, nmsk, segs):\n assert isinstance(cst, ChromStruct)\n\n # load the reference chromosome\n # ref = fasta_array(cst.chrom, cst.refg_files)\n ref = fasta_array(cst.chrom, cst.ancs_files)\n\n # get the GC content at neutral sites for each segment\n gc = []\n for (start, end) in segs:\n cur_msk = nmsk[start:end]\n if not np.sum(cur_msk > 0):\n gc.append(0)\n else:\n cur_ref = ref[start:end]\n cur_neut = cur_ref[cur_msk > 0]\n gc_count = np.sum(np.in1d(cur_neut, ['C', 'G']))\n gc_fract = 1.0 * gc_count / len(cur_neut)\n gc.append(gc_fract)\n\n return np.array(gc)",
"def get_gc_content(regions, fasta):\r\n\tnuc_count = {\"T\":0, \"t\":0, \"A\":0, \"a\":0, \"G\":1, \"g\":1, \"C\":1, \"c\":1}\r\n\r\n\tgc = 0\r\n\ttotal = 0\r\n\tfasta_obj = pysam.FastaFile(fasta)\r\n\tfor region in regions:\r\n\t\tseq = fasta_obj.fetch(region.chrom, region.start, region.end)\r\n\t\tgc += sum([nuc_count.get(nuc, 0.5) for nuc in seq])\r\n\t\ttotal += region.end - region.start\r\n\tfasta_obj.close()\r\n\tgc_content = gc / float(total)\r\n\r\n\treturn(gc_content)",
"def gc_blocks(seq, block_size):\n\n # Make all capital\n seq = seq.upper()\n iterations = len(seq) // block_size\n\n # Iterate through finding the GC content\n gc = []\n for i in range(iterations):\n block = seq[i*block_size:(i+1)*block_size]\n gc.append((block.count('G') + block.count('C')) / block_size)\n return tuple(gc)",
"def gc(self):\n g = self.seq.count('G')\n g += self.seq.count('g')\n c = self.seq.count('C')\n c += self.seq.count('c')\n return (g + c) / len(self.seq)",
"def compute_gc(seq): # seq should be a string\n num_GC = list(seq).count('g')+list(seq).count('c')+list(seq).count('G')+list(seq).count('C')\n amount_GC = num_GC/len(seq)\n return amount_GC",
"def gc_content(dna):\n seqlength = len(dna)\n\n # Count A and T nucleotides, including the W ambiguity base representing\n # either A or T\n atcount = dna.count('A') + dna.count('a') + \\\n dna.count('T') + dna.count('t') + \\\n dna.count('W') + dna.count('w')\n\n # Count C and G nucleotides, including the S ambiguity base representing\n # either C or G\n gccount = dna.count('C') + dna.count('c') + \\\n dna.count('G') + dna.count('g') + \\\n dna.count('S') + dna.count('s')\n\n # Count all other ambiguous nucleotides; most will be Ns, but occasionally\n # there will be other IUPAC ambiguity symbols\n ncount = seqlength - atcount - gccount\n\n if atcount + gccount == 0:\n assert ncount == seqlength\n gccontent = 0.0\n else:\n gccontent = float(gccount) / float(gccount + atcount)\n return gccontent",
"def find_GC_content(fasta_file_name):\n\twith open(fasta_file_name) as fasta:\n\t\tGC_content = {}\n\t\tfor line in fasta:\n\n\t\t\t# Each line (bar the last) ends with '\\n'\n\t\t\tloc_line = line.replace('\\n', '')\n\n\t\t\t# Finds '>' at opening of line (FASTA seq title)\n\t\t\tif re.match(r'^>', loc_line):\n\t\t\t\tGC_content[loc_line] = 0\n\t\t\t\tG_count = 0\n\t\t\t\tC_count = 0\n\t\t\t\tcount = 0\n\t\t\t\tcurrent = loc_line\n\t\t\telse:\n\t\t\t\tG_count += loc_line.count('G')\n\t\t\t\tC_count += loc_line.count('C')\n\t\t\t\tcount += len(loc_line)\n\t\t\t\tGC_content[current] = float((G_count + C_count)) / count\n\treturn GC_content",
"def getGC(self):\n numGC = self.sequence.upper().count(\"G\") + self.sequence.upper().count(\"C\")\n self.gc = float(numGC)/len(self.sequence)\n return self.gc",
"def gc_content(self, letters='CGS'):\n if len(self) == 0:\n denom = 1.\n else:\n denom = float(self._N)\n letters = [x.upper() for x in letters] + [x.lower() for x in letters]\n letters = list(set(letters))\n counter = sum(self._data.count(x) for x in letters)\n return 100. * counter / denom",
"def gc_frequency(self):\n result = str(self.seq).count(\"G\") + str(self.seq).count(\"C\")\n return result",
"def gc(sequence):\n sequence = sequence.upper()\n return (sequence.count('G') + sequence.count('C')) / float(len(sequence))",
"def gc_map(seq, block_size, gc_thresh):\n\n # Get the GC content for each block\n gc_cont = gc_blocks(seq, block_size)\n\n # Iterate through the sequence adding the appropriate cased block_size\n new_seq = ''\n iterations = len(seq) // block_size\n for i in range(iterations):\n block = seq[i*block_size:(i+1)*block_size]\n if gc_cont[i] >= gc_thresh:\n new_seq += block.upper()\n else:\n new_seq += block.lower()\n return new_seq",
"def normaliseHmmByReferenceGCContent(hmm, gcContent):\n for state in range(hmm.stateNumber):\n if state not in (2, 4): #Don't normalise GC content of insert states \n #(as they don't have any ref bases!)\n n = toMatrix(hmm.emissions[(SYMBOL_NUMBER**2) * \n state:(SYMBOL_NUMBER**2) * (state+1)])\n hmm.emissions[(SYMBOL_NUMBER**2) * state:(SYMBOL_NUMBER**2) * (state+1)] = \\\n fromMatrix(map(lambda i : map(lambda j : (n[i][j]/sum(n[i])) * \n (gcContent/2.0 if i in [1, 2] else (1.0-gcContent)/2.0), range(SYMBOL_NUMBER)), \n range(SYMBOL_NUMBER))) #Normalise",
"def get_gc_count(dataset):\n\n gc_count_dict = {}\n\n for sequence in SeqIO.parse(dataset, 'fasta'):\n c_count = sequence.seq.count('C')\n g_count = sequence.seq.count('G')\n gc_count = ((c_count + g_count)/len(sequence))*100\n gc_count_dict[sequence.id] = gc_count\n\n\n return gc_count_dict"
] |
[
"0.7346947",
"0.7220607",
"0.6958903",
"0.69466054",
"0.6928254",
"0.6875267",
"0.68252456",
"0.6799876",
"0.6799876",
"0.6786788",
"0.6718868",
"0.660639",
"0.6367929",
"0.6344572",
"0.62539244",
"0.6087192",
"0.60487205",
"0.60303813",
"0.59628457",
"0.59487265",
"0.58792704",
"0.5855243",
"0.56695545",
"0.56275046",
"0.54790545",
"0.5437367",
"0.53690124",
"0.5208273",
"0.5153181",
"0.51213485"
] |
0.7706852
|
0
|
RIGHT FROM iREP linear function for sorted coverage profile y = mx + b
|
def coverage_function(pars, X, data = None, printPs = False):
m = pars['m'].value
b = pars['b'].value
if printPs is True:
print('m: %s b: %s' % \
('{:,}'.format(int(m)), '{:,}'.format(int(b))))
results = [float(m * x) + b for x in X]
if data is None:
return np.asarray(results)
return np.asarray([y - data[i] for i, y in enumerate(results)]) # model - data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def make_lineprofile(npix,rstar,xc,vgrid,A,veq,linewidth):\n vc=(np.arange(npix)-xc)/rstar*veq\n vs=vgrid[np.newaxis,:]-vc[:,np.newaxis]\n profile=1.-A*np.exp( -(vs*vs)/2./linewidth**2)\n return profile",
"def gaus_pol1(x, a, b, c, m, yint):\n return gaus(x, a, b, c) + lin(x, m, yint)",
"def TestFunc2(x):\r\n return 10*(-0.02*x[0] + 0.5*x[0]*x[0] + x[1])**2 \\\r\n + 128*(-0.02*x[0] + 0.5*x[0]*x[0] - x[1]/4) \\\r\n - (8e-5)*x[0]",
"def poly_regression_cubic(X, Y, Xs_test, Ys_test):\n ## YOUR CODE HERE\n #################\n return 0",
"def nav_ipr(x):\n x2 = x*x\n s2 = np.sum(x2)\n if s2 < MACH_EPSILON:\n # Zero sum. Could happen for veeery small overall prevalence.\n return 0.\n else:\n return np.sum(x2 * x2 / (s2 * s2))",
"def s2profile(r,r0,A,B):\n x = r/r0\n res = A*4./(np.exp(x)+np.exp(-x))**2 + B\n return res",
"def apointbiserialr(x,y):\r\n TINY = 1e-30\r\n categories = pstats.aunique(x)\r\n data = pstats.aabut(x,y)\r\n if len(categories) <> 2:\r\n raise ValueError, \"Exactly 2 categories required (in x) for pointbiserialr().\"\r\n else: # there are 2 categories, continue\r\n codemap = pstats.aabut(categories,N.arange(2))\r\n recoded = pstats.arecode(data,codemap,0)\r\n x = pstats.alinexand(data,0,categories[0])\r\n y = pstats.alinexand(data,0,categories[1])\r\n xmean = amean(pstats.acolex(x,1))\r\n ymean = amean(pstats.acolex(y,1))\r\n n = len(data)\r\n adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))\r\n rpb = (ymean - xmean)/asamplestdev(pstats.acolex(data,1))*adjust\r\n df = n-2\r\n t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))\r\n prob = abetai(0.5*df,0.5,df/(df+t*t))\r\n return rpb, prob",
"def I(x, y, l, p):\n \n return 0.5 / (mu * c) * A0**2 * ( u(x, y, l, p) )**2",
"def fun(_, y):\n return np.array([-self.r * self.beta * y[1] * y[0] / self.N,\n self.r * self.beta * y[1] * y[0] / self.N - self.gamma * y[1],\n self.gamma * y[1]])",
"def linear(m, b, x, xx):\n y = m*(x - xx) + b\n return y",
"def objective(rp,n=5000,C=-2*10**11,a=300,b=1):\n l = log(rp)/n\n r = exp(l)\n rm1 = r-1\n return (rp-1)*((a-b*n)*rm1 + 1) - C*(rm1)*(rm1)\n #return rm1",
"def intrpf(xi,x,y):\n\n # calculate yi = p(xi) using Lagrange polynomial \n yi = ((xi-x[1])*(xi-x[2])/((x[0]-x[1])*(x[0]-x[2]))) * y[0]\\\n +((xi-x[0])*(xi-x[2])/((x[1]-x[0])*(x[1]-x[2]))) * y[1]\\\n +((xi-x[0])*(xi-x[1])/((x[2]-x[0])*(x[2]-x[1]))) * y[2]\n return yi",
"def y(df,x):\r\n x_p=np.array(df['Vertices'])\r\n y_p=np.array(df['DIxPRE 252'])\r\n cs = scipy.interpolate.splrep(x_p,y_p)\r\n return scipy.interpolate.splev(x,cs)",
"def coverage(self):\r\n return 0, 1",
"def bla_ipr(x):\n phi = x / np.sqrt(np.sum(x**2))\n return np.sum(phi**4)\n # if s2 < MACH_EPSILON:\n # # Zero sum. Could happen for veeery small overall prevalence.\n # return 0.\n # else:\n # return np.sum(x2 * x2 / (s2 * s2))",
"def cumulative_capacity_rule(_m, g, y):\r\n\r\n return sum(m.x_c[g, j] for j in m.Y if j <= y)",
"def evaluate(self, x):\n # Assign 'continuum'\n y = self[\"off\"] + self[\"lin\"] * x\n # Add Voigt lines\n for i in smo.range(self.n):\n p = str(i + 1)\n self._v1d.assignValues(\n {\"A\": self[\"A\" + p], \"al\": self[\"al\" + p], \"ad\": self[\"ad\" + p], \"mu\": self[\"mu\" + p]})\n y += self._v1d.evaluate(x)\n return y",
"def Insurance(Md,X):\n u = X[iu]\n b = Md.b()\n return u/b - u/(1-u+u*b)",
"def lpointbiserialr(x,y):\r\n TINY = 1e-30\r\n if len(x) <> len(y):\r\n raise ValueError, 'INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.'\r\n data = pstats.abut(x,y)\r\n categories = pstats.unique(x)\r\n if len(categories) <> 2:\r\n raise ValueError, \"Exactly 2 categories required for pointbiserialr().\"\r\n else: # there are 2 categories, continue\r\n codemap = pstats.abut(categories,range(2))\r\n recoded = pstats.recode(data,codemap,0)\r\n x = pstats.linexand(data,0,categories[0])\r\n y = pstats.linexand(data,0,categories[1])\r\n xmean = mean(pstats.colex(x,1))\r\n ymean = mean(pstats.colex(y,1))\r\n n = len(data)\r\n adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))\r\n rpb = (ymean - xmean)/samplestdev(pstats.colex(data,1))*adjust\r\n df = n-2\r\n t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))\r\n prob = betai(0.5*df,0.5,df/(df+t*t)) # t already a float\r\n return rpb, prob",
"def process_O2(x, lb, ub):\n x = x.abs()\n x.loc[(x <= 1) & (x > 0)] = x.loc[(x <= 1) & (x > 0)] * 100\n x.loc[(x <= 10) & (x > 1)] = x.loc[(x <= 10) & (x > 1)] * 10\n x.loc[(x <= lb ) | (x > ub)] = np.nan\n return x",
"def prediction_cost(a, y):\n return np.sum(-(y * np.log(a) + (1 - y) * np.log(1 - a)))",
"def alincc(x,y):\r\n x = N.ravel(x)\r\n y = N.ravel(y)\r\n covar = acov(x,y)*(len(x)-1)/float(len(x)) # correct denom to n\r\n xvar = avar(x)*(len(x)-1)/float(len(x)) # correct denom to n\r\n yvar = avar(y)*(len(y)-1)/float(len(y)) # correct denom to n\r\n lincc = (2 * covar) / ((xvar+yvar) +((amean(x)-amean(y))**2))\r\n return lincc",
"def lin(x, m, b):\n return m*x + b",
"def lin_eqn(l, m, ind_arr, aa, bb, cc):\n m[ind_arr] = -cc / bb\n l[ind_arr] = (x[ind_arr] - a[0][ind_arr] - a[2][ind_arr]\n * m[ind_arr]) / (a[1][ind_arr] + a[3][ind_arr] * m[ind_arr])",
"def evaluate(bounds , func):\n if len(bounds) != 2:\n raise ValueError(\"Bounds should contain 2 elements, found %d.\" % len(bounds))\n\n a = bounds[0]\n b = bounds[1]\n ya = func(a)\n yb = func((a+b)/2.)\n yc = func(b)\n I = (b-a) * (ya + 4. * yb + yc) / 6.\n return I",
"def prediction_cost(a, y):\n return np.sum((a - y) ** 2) / 2",
"def _find_b1(self, x, x_bar, y, y_bar):\n self.model['b1'] = (np.sum((x - x_bar) * (y - y_bar)) / np.sum((x - x_bar) ** 2))",
"def gini(y):\n p = _proba(y)\n return 1.0 - sum(list(map(lambda x: x * x, p)))",
"def fun(_, y):\n return np.array([-self.r * self.beta * y[2] * y[0] / self.N,\n self.r * self.beta * y[2] * y[0] / self.N - self.sigma * y[1],\n self.sigma * y[1] - self.gamma * y[2],\n self.gamma * y[2]])",
"def _evaluate(self, x, y):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n f = (1 - alpha) * self.xInterpolators[y_pos - 1](\n x\n ) + alpha * self.xInterpolators[y_pos](x)\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n f = np.zeros(m) + np.nan\n if y.size > 0:\n for i in range(1, self.y_n):\n c = y_pos == i\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n f[c] = (1 - alpha) * self.xInterpolators[i - 1](\n x[c]\n ) + alpha * self.xInterpolators[i](x[c])\n return f"
] |
[
"0.6025226",
"0.6005712",
"0.5694815",
"0.5658959",
"0.56120896",
"0.5553306",
"0.554806",
"0.5538556",
"0.5525831",
"0.55137885",
"0.5503041",
"0.55006784",
"0.54923874",
"0.5491246",
"0.54875684",
"0.5486014",
"0.54742515",
"0.54602444",
"0.5458907",
"0.54526603",
"0.5451693",
"0.54499465",
"0.54441845",
"0.544352",
"0.5429089",
"0.5415571",
"0.54112315",
"0.5405755",
"0.54033875",
"0.5377581"
] |
0.6396224
|
0
|
Run each component of the PASCAL VOC dataset formatter in sequence.
|
def begin(self):
print("Renaming images to VOC data format...")
self.renamer.rename()
print("Renaming Complete.")
print("Splitting the data in to training/validation/test sets and creating text files respectively...")
self.data_splitter.split()
print("Data Splitting Complete.")
print("Annotating images...")
self.annotation_maker.build()
print("Annotation Complete.")
print("VOC PASCAL Data Formatting Complete.")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def main():\n (\n calibration_file,\n drs4_ped_file,\n time_calibration_file,\n systematic_correction_file,\n drive_log_file,\n run_summary_file,\n pedestal_ids_file,\n run_number,\n ) = data_sequence_cli_parsing()\n\n if options.verbose:\n log.setLevel(logging.DEBUG)\n else:\n log.setLevel(logging.INFO)\n\n # Run the routine piping all the analysis steps\n rc = data_sequence(\n calibration_file,\n drs4_ped_file,\n time_calibration_file,\n systematic_correction_file,\n drive_log_file,\n run_summary_file,\n pedestal_ids_file,\n run_number,\n )\n sys.exit(rc)",
"def pipeline_runner():\n # file_parser() # take raw data file and extract columns of interest. remove contaminants.\n entry_parser() # remove duplicates, faulty lines and format the whole thing normally.\n lfq_parser() # replace 0s in lfq reading with random small numbers for t testing purposes\n # open Rstudio and do T testing there\n ROutputFormatter() # reformat R output to something more appealing, add FDR and fold change values",
"def run(self):\n\t\tself.print_header_information()\n\n\t\t#self.get_number_of_instances_from_user()\n\n\t\t#self.compile_dataframe(self.number_of_instances)\n\n\t\tprint \"\\n{}\".format(self.data)\n\n\t\t# Uncomment these lines for debugging\n\t\tself.compile_dataframe_default()\n\t\t# print \"\\n{}\".format(self.data)\n\n\t\tself.analysis_of_dataframe(self.data)",
"def main():\r\n run_processes('tests.csv', 'labs.csv')",
"def run(self):\n\n self.load_file()\n self.cat_to_num()\n self.split()",
"def run(num_epochs, encoded_dim):\n # for patient_ in get_patient_ids():\n for patient_ in ['16']:\n print(\"Starting on index: \" + str(patient_))\n training_ae(num_epochs, encoded_dim, patient_, True)\n print(\"Completed \" + str(patient_) + \" reconstruction and encoding, saved test data to assess performance\")",
"def run_all(self):\n self.formatter.section_start('Scratch Memory Info')\n self.formatter.section_start('Per priority')\n self.analyse_per_priority()\n self.formatter.section_end()\n self.formatter.section_start('Per task')\n self.analyse_per_task()\n self.formatter.section_end()\n self.formatter.section_end()",
"def train_and_eva():\n for sol in _solvers:\n for sub_u_rate in _sub_u_rates:\n print(\"now processing \" + sol + \" \" + str(sub_u_rate))\n pu_first_stage_training(sol, sub_u_rate)\n first_stage_test(sol, sub_u_rate)\n print(\"\\n\\n\")",
"def process_example(self):\n\n name_files, transition_funcs = self.treat.get_transition_functions()\n for name_file, transition_func in zip(name_files, transition_funcs):\n print(f\"Name file: {name_file}\")\n self.afd(transition_func, self.q0, self.qfs, self.words)\n print('-'*50)",
"def run(self, verbose=0):\n self.verbose = verbose\n self._preproc()\n self._lda()\n self._evaluate()",
"def test_calibration_1_vs_all_vis_api(experiment_to_use):\n experiment = experiment_to_use\n probabilities = experiment.probabilities\n viz_outputs = (\"pdf\", \"png\")\n with TemporaryDirectory() as tmpvizdir:\n for viz_output in viz_outputs:\n vis_output_pattern_pdf = os.path.join(tmpvizdir, f\"*.{viz_output}\")\n visualize.calibration_1_vs_all(\n [probabilities, probabilities],\n experiment.ground_truth,\n experiment.ground_truth_metadata,\n experiment.output_feature_name,\n top_n_classes=[6],\n labels_limit=0,\n model_namess=[\"Model1\", \"Model2\"],\n output_directory=tmpvizdir,\n file_format=viz_output,\n )\n figure_cnt = glob.glob(vis_output_pattern_pdf)\n assert 5 == len(figure_cnt)",
"def run_pipeline() -> pd.DataFrame:\n\n print('Loading data...')\n data = load_data()\n print('Stage one processing...')\n text = data.text\n text_ = stage_one_preprocessing(text)\n data_ = data.copy()\n data_.text = text_\n #print('Splitting by sentences...')\n #data_ = split_by_sentences(data_)\n print('Stage two processing...')\n text_ = stage_two_preprocessing(data_.text)\n print('Stage three processing...')\n text_ = stage_three_preprocessing(text_)\n data_.text = text_\n print('Saving file...')\n data_.to_csv(r'./data/stage_three_text.csv')\n return data_",
"def process_examples(self):\n input_dir = self.input_directory\n counter_example_dir = self.counter_example_input_directory\n if input_dir is None:\n input_dir = Path.cwd() / \"examples\"\n if counter_example_dir is None:\n counter_example_dir = Path.cwd() / \"counter_examples\"\n for fmt in self.input_formats:\n input_examples = glob.glob(os.path.join(str(input_dir), f\"*.{fmt}\"))\n input_counter_examples = glob.glob(os.path.join(str(counter_example_dir), f\"*.{fmt}\"))\n if not input_counter_examples:\n logging.warning(\n f\"No counter examples found in {self.counter_example_input_directory}\"\n )\n self.process_examples_from_list(input_examples, fmt, False)\n self.process_examples_from_list(input_counter_examples, fmt, True)",
"def step(self):\r\n self.datacollector.collect(self)\r\n self.datacollector2.collect(self)\r\n self.datacollector3.collect(self)\r\n self.datacollector4.collect(self)\r\n self.datacollector5.collect(self)\r\n self.datacollector6.collect(self)\r\n self.datacollector7.collect(self)\r\n self.datacollector8.collect(self)\r\n self.datacollector9.collect(self)\r\n self.datacollector10.collect(self)\r\n self.datacollector11.collect(self)\r\n self.datacollector12.collect(self)\r\n self.datacollector13.collect(self)\r\n\r\n self.datacollector14.collect(self)\r\n self.datacollector15.collect(self)\r\n self.datacollector16.collect(self)\r\n self.datacollector17.collect(self)\r\n self.datacollector18.collect(self)\r\n self.datacollector19.collect(self)\r\n self.datacollector20.collect(self)\r\n self.datacollector21.collect(self)\r\n self.datacollector22.collect(self)\r\n self.datacollector23.collect(self)\r\n self.datacollector24.collect(self)\r\n self.datacollector25.collect(self)\r\n self.datacollector26.collect(self)\r\n self.schedule.step()",
"def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()",
"def preprocessing(dataset):\r\n # upload the processed time series data to its distinct numpy arrays\r\n print('')\r\n training_input = []\r\n training_output = []\r\n validation_input = []\r\n validation_output = []\r\n loop = tqdm.tqdm(total = len(dataset), position = 0, leave = False)\r\n for d in range(len(dataset)):\r\n loop.set_description('Packaging all processed time series data... ' .format(len(dataset)))\r\n time_series = dataset[d]\r\n if time_series.get_dataset_label() == \"TRAINING\":\r\n training_input.append(time_series.sampled_matrix())\r\n training_output.append(time_series.get_close_value())\r\n else:\r\n validation_input.append(time_series.sampled_matrix())\r\n validation_output.append(time_series.get_close_value())\r\n loop.update(1)\r\n\r\n training_input, training_output = np.array(training_input), np.array(training_output)\r\n training_input = np.reshape(training_input, (training_input.shape[0], training_input.shape[1], 1))\r\n validation_input, validation_output = np.array(validation_input), np.array(validation_output)\r\n validation_input = np.reshape(validation_input, (validation_input.shape[0], validation_input.shape[1], 1))\r\n print('\\n')\r\n loop.close()\r\n return training_input, training_output, validation_input, validation_output",
"def run(self):\n\n self.preprocess()\n self.restore_ratings()\n self.prepare_UI()\n self.loop_through_units()\n self.cleanup()\n\n print('\\nAll Done - results are available in:\\n\\t{}'.format(self.out_dir))",
"def run_preprocessing(self, serie):\n pass",
"def do(self):\n super().do()\n logger.info(\"TrainPipeStep started...\")\n records = self._get_current_step_records()\n logger.debug(\"load pipestep records: {}\".format(records))\n self.num_models = len(records)\n self.num_epochs = self.num_models * TrainerConfig.epochs\n self.update_status(Status.running)\n self.master = create_master()\n self._train_multi_models(records)\n self.master.join()\n ReportServer().output_step_all_records(step_name=self.task.step_name)\n self.master.close()\n ReportServer().backup_output_path()\n self.update_status(Status.finished)",
"def evaluate_batch(self, pipelines):",
"def step(self, actions):\n\n representative_data = []\n original_data = []\n\n actions = np.array(actions).reshape(3, -1)\n\n for np_data, df_data, calculator, som, action in zip(self.np_data_list, self.df_data_list, self.calculators, self.som_objects, actions):\n\n representative_days, cluster_numbers = calculator.get_representative_days(\n som, np_data, action)\n\n representative_days = pd.DataFrame(representative_days)\n\n representative_days = self.wide_to_long(representative_days)\n approximation_calc = ApproximateData(df_data, 4)\n representative_days = ApproximateData(df_data, 4).get_load_duration_curve(\n representative_days, cluster_numbers)\n\n representative_data.append(representative_days)\n\n # original_days = approximation_calc.get_load_duration_curve(\n # year=\"2013\")\n\n\n\n # original_data.append(original_days)\n\n # metrics_calculator = Metrics(original_data[0], representative_data[0], original_data[1],\n # representative_data[1], original_data[2], representative_data[2], \"dc\")\n\n pv_original = pd.read_csv(\n '{}data/processed/resources/pv_processed.csv'.format(project_dir))\n wind_original = pd.read_csv(\n '{}data/processed/resources/onshore_processed.csv'.format(project_dir))\n load_original = pd.read_csv(\n '{}data/processed/demand/load_NG/load_processed_normalised.csv'.format(project_dir))\n\n pv_original_ldcs, wind_original_ldcs, load_original_ldcs = get_each_ldc(pv_original, wind_original, load_original)\n\n multi_year_metrics_calculator = MultiYearMetrics(pv_original_ldcs, representative_data[0], wind_original_ldcs, representative_data[1], load_original_ldcs, representative_data[2], self.year_start)\n multi_year_metrics = multi_year_metrics_calculator.get_multi_year_average_metrics(\"dc\")\n multi_year_metrics = multi_year_metrics.reset_index()\n # logger.debug(\"multi_year_metrics: \\n{}\".format(multi_year_metrics))\n\n nrmse = multi_year_metrics[multi_year_metrics['metric'] == 'nrmse dc'].iloc[0].value\n rae = multi_year_metrics[multi_year_metrics['metric'] == 'rae dc'].iloc[0].value\n correlation = multi_year_metrics[multi_year_metrics['metric'] == 'correlation'].iloc[0].value\n\n # error_metrics = metrics_calculator.get_mean_error_metrics()\n # nrmse = error_metrics.iloc[1].value\n # rae = error_metrics.iloc[2].value\n # correlation = error_metrics.iloc[0].value\n # reward = -error_metrics.value.sum()\n # logger.info(\"error_metrics: {}\".format(error_metrics))\n # logger.info(\"error_metrics: {}\".format(error_metrics.iloc[0]))\n\n # return reward\n return nrmse, rae, correlation",
"def _proc_dataset(d):\n # merge 2dseq complex frame group if present\n if d.is_complex and d.type == '2dseq':\n d = FrameGroupMerger().merge(d, 'FG_COMPLEX')\n\n # prepare the data array\n if d.is_svs:\n data = _prep_data_svs(d)\n elif d.is_mrsi:\n data = _prep_data_mrsi(d)\n else:\n data = d.data\n\n # get properties\n properties = d.to_dict()\n\n # some Bruker datasets do not have affine property\n if d.type == 'fid': if not 'affine' in properties: properties.update({'affine':np.identity(4)})\n \n yield data, properties",
"def _formatData(self):\r\n assert self._runData is not None\r\n\r\n # Getting Axes data into separate lists\r\n x=[]; y=[]; z=[]\r\n for i in range(len(self._runData)):\r\n ySet = []; xSet = []; zSet = []\r\n for _ in range(len(self._runData[i][1][0])):\r\n ySet.append(self._runData[i][0])\r\n y.append(ySet)\r\n xSet.append(self._runData[i][1][0])\r\n x.append(xSet)\r\n zSet.append(self._runData[i][1][1])\r\n z.append(zSet)\r\n\r\n # Reduce extra brackets\r\n xnew = []; znew = []\r\n for i in range(len(x)):\r\n xnew.append(x[i][0])\r\n znew.append(z[i][0])\r\n x = xnew; z = znew\r\n\r\n self._frequency = x\r\n self._voltages = y\r\n self._intensity = z",
"def test_pipeline_data(self, doc_list):\n summaries = []\n for item in doc_list:\n summaries.append(self.preprocess(item))\n return summaries",
"def run(self):\n self.simulate_test_data()\n self.pipeline_test_data()\n self.plot_jump_flags_image()\n self.plot_groupdq_flags(pixel=[884, 550])\n self.plot_ramps_pre_post_correction(pixel=[884, 550])",
"def run(self):\r\n for pipe in self.inputs:\r\n for row in pipe.rows():\r\n self.put(row)",
"def run():\n logger.info(f\"Process started:\")\n logger.info(f\"Converting Glove file to Word2Vec format\")\n convert_to_word2vec.convert(\n \"./data/source/glove.6B.50d.txt\", \"./data/source/glove.6B.50d.w2vformat.txt\"\n )\n\n logger.info(f\"Extracting Click Stream data\")\n extract_click_stream_data()\n\n logger.info(\"Extracting Wiki articles\")\n extract_wiki_articles()\n\n logger.info(f\"Generating Clickstream dataset\")\n generate_datasets()\n\n logger.info(\"Tokenizing articles\")\n WikiArticlesTokenizer().process()\n\n logger.info(\"Creating dataset with Wiki Articles\")\n create_wiki_articles_dataset()",
"def cv_training(\n db: audformat.Database,\n partitioning: str,\n features: pd.DataFrame,\n normalization: str,\n root: str\n):\n\n df = db['covid'].df\n df = df.loc[~df['covid'].isna()]\n df['covid'] = df['covid'].apply(lambda x: 'positive' if x else 'negative')\n df['speaker'] = db['files'].get(index=df.index)['speaker']\n folds = sorted(list(set([x.split('.')[-2] for x in db.tables if f'folds.{partitioning}' in x])))\n\n metrics = {\n 'F1': audmetric.unweighted_average_fscore,\n 'UAR': audmetric.unweighted_average_recall,\n 'ACC': audmetric.accuracy\n }\n\n if not os.path.exists(os.path.join(root, 'results.csv')):\n for fold in folds:\n\n def get_fold(db, fold_name):\n df = db[f'folds.{partitioning}.{fold}.{fold_name}'].df\n df['speaker'] = db['files'].get(index=df.index)['speaker']\n df = df.loc[~df['covid'].isna()]\n df['covid'] = df['covid'].apply(lambda x: 'positive' if x else 'negative')\n return df\n df_train = get_fold(db, 'train')\n df_dev = get_fold(db, 'dev')\n df_test = get_fold(db, 'test')\n\n features = features.fillna(0)\n\n c_params = [\n .0001, \n .0005, \n .001, \n .005, \n .01, \n .05, \n .1, \n .5, \n 1\n ]\n\n steps = []\n if normalization == 'standard':\n # normalization performed on the fly for each fold\n steps.append(('scale', StandardScaler()))\n steps.append(('classify', SVC(kernel='rbf', probability=True)))\n\n max_f1 = 0\n best_c = None\n for c_param in audeer.progress_bar(\n c_params,\n total=len(c_params),\n desc='LOSO',\n disable=True\n ):\n \n clf = Pipeline(steps)\n clf.set_params(**{'classify__C': c_param})\n clf.fit(\n features.loc[df_train.index],\n df_train['covid'],\n )\n pred = clf.predict(features.loc[df_dev.index])\n f1_score = audmetric.unweighted_average_fscore(df_dev['covid'], pred)\n if f1_score > max_f1:\n max_f1 = f1_score\n best_c = c_param\n \n clf.set_params(**{'classify__C': best_c})\n clf.fit(\n features.loc[pd.concat((df_train, df_dev)).index],\n pd.concat((df_train, df_dev))['covid'],\n )\n joblib.dump(\n clf,\n os.path.join(root, f'clf.{fold}.pkl')\n )\n df.loc[df_test.index, 'predictions'] = clf.predict(features.loc[df_test.index])\n df.loc[df_test.index, 'probabilities'] = clf.predict_proba(features.loc[df_test.index])[:, 0]\n \n df.reset_index(inplace=True)\n df.to_csv(os.path.join(root, 'results.csv'), index=False)\n else:\n df = pd.read_csv(os.path.join(root, 'results.csv'))\n\n results = {\n key: metrics[key](df['covid'], df['predictions'])\n for key in metrics\n }\n with open(os.path.join(root, 'results.yaml'), 'w') as fp:\n yaml.dump(results, fp)\n\n file_df = df.groupby('file').apply(\n lambda x: pd.Series({\n 'covid': x['covid'].mode()[0],\n 'predictions': x['predictions'].mode()[0]\n })\n )\n\n results = {\n key: metrics[key](file_df['covid'], file_df['predictions'])\n for key in metrics\n }\n with open(os.path.join(root, 'speaker_results.yaml'), 'w') as fp:\n yaml.dump(results, fp)",
"def run(self):\n model = self.model\n self.summary_cards(model)\n self.hospitalizations_chart(model)\n self.available_beds_chart(model)\n self.write_population_info(model)\n self.write_age_distribution_chart(model)\n self.write_fatalities_chart(model)\n self.write_healthcare_parameters(model)\n self.write_epidemiological_parameters(model)\n self.write_footnotes(model)",
"def run(self):\n\n \"\"\" Detects labels given a GCS path. \"\"\"\n video_client = videointelligence.VideoIntelligenceServiceClient()\n features = [videointelligence.enums.Feature.LABEL_DETECTION]\n operation = video_client.annotate_video(self.input()[0].path, \n features=features)\n print('\\nProcessing video for label annotations:\\n')\n \n result = operation.result(timeout=900)\n \n print(result)\n print('\\nFinished processing.')\n \n segment_labels = result.annotation_results[0].shot_label_annotations\n \n output_csv = \"\"\n for i, segment_label in enumerate(segment_labels):\n print('Video label description: {}'.format(\n segment_label.entity.description))\n for category_entity in segment_label.category_entities:\n print('\\tLabel category description: {}'.format(\n category_entity.description))\n \n for i, segment in enumerate(segment_label.segments):\n start_time = (segment.segment.start_time_offset.seconds +\n segment.segment.start_time_offset.nanos / 1e9)\n end_time = (segment.segment.end_time_offset.seconds +\n segment.segment.end_time_offset.nanos / 1e9)\n positions = '{}s to {}s'.format(start_time, end_time)\n confidence = segment.confidence\n print('\\tSegment {}: {}'.format(i, positions))\n print('\\tConfidence: {}'.format(confidence))\n \n output_csv_line = '{},{},{},{}\\n'.format(\n segment_label.entity.description, \n category_entity.description,\n start_time, \n end_time)\n output_csv = output_csv + output_csv_line\n print(output_csv_line)\n print('\\n')\n print('\\n\\n-------\\n') \n print(output_csv) \n \n # output data\n f = self.output().open('w')\n f.write(output_csv)\n f.close()"
] |
[
"0.5771342",
"0.5765575",
"0.5497768",
"0.548974",
"0.5480084",
"0.547106",
"0.5441647",
"0.5418743",
"0.54071593",
"0.53904974",
"0.5355923",
"0.5344643",
"0.53241676",
"0.53178394",
"0.5260527",
"0.5237012",
"0.5234183",
"0.5232217",
"0.5231144",
"0.52279514",
"0.52246046",
"0.5221334",
"0.521476",
"0.52140754",
"0.52031136",
"0.5196831",
"0.518517",
"0.5182438",
"0.51773936",
"0.5171729"
] |
0.5895718
|
0
|
Some groups were deleted, remove them from users principals.
|
def on_groups_deleted(event):
permission_backend = event.request.registry.permission
for change in event.impacted_objects:
group = change["old"]
bucket_id = event.payload["bucket_id"]
group_uri = utils.instance_uri(event.request, "group", bucket_id=bucket_id, id=group["id"])
permission_backend.remove_principal(group_uri)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def cleanup_user_groups(event):\n name = event.object.name\n\n if name.startswith(\"group:\"):\n principals = get_principals()\n users_groups = [p for p in principals if name in principals[p].groups]\n for user_or_group in users_groups:\n principals[user_or_group].groups.remove(name)\n\n DBSession.query(LocalGroup).filter(\n LocalGroup.principal_name == name).delete()",
"def __check_removed_groups(self) -> None:\n for group in Group.objects.all():\n if group.name not in main_app_groups:\n self.__delete_group(group)\n\n self.stdout.write(f'Removed {group} group')",
"def del_from_groups(self, username, groups):\n pass",
"def test_groups_group_users_delete(self):\n pass",
"def test_groups_group_users_delete(self):\n pass",
"def remove_groups(self, resolvables):\n memberships = [membership for membership in self.group_memberships]\n for g in [self._resolve_group(group) for group in resolvables]:\n done = False\n for membership in memberships:\n if membership.group.href == g.href:\n membership.delete()\n done = True\n\n if not done:\n raise StormpathError({\n 'developerMessage': 'This user is not part of Group %s.' % g.name,\n })",
"def test_groups_group_users_user_delete(self):\n pass",
"def test_groups_group_users_user_delete(self):\n pass",
"def test_resource_user_resource_remove_user_from_user_groups_delete(self):\n pass",
"def test_removeGroup(self):\n\t\tuser = User.objects.get(id=1)\n\t\tself.client.force_authenticate(user=user)\n\t\tgroup = Group.objects.create(admin=user, name='testGroup3', isPublic=True, \n\t\t\tdescription='This is another test group that just created.')\n\n\t\turl = \"/groups/3/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n\t\turl = \"/groups/2/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def remove_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for membership in self.group_memberships:\n if membership.group.href == group.href:\n membership.delete()\n return\n\n raise StormpathError({\n 'developerMessage': 'This user is not part of Group %s.' % group.name,\n })",
"def test_delete_groups(self):\n pass",
"def remove_from_group(_request, group_id, email):\n group = models.UserGroup.get_by_id(int(group_id))\n user_key = models.UserProfile.load(email).key()\n if group.users is None:\n group.users = []\n logging.warning('Group \"%s\" had a None users list' % group.name)\n group.users.remove(user_key)\n group.put()\n\n url = urlresolvers.reverse('views.admin.edit_user', args=[email])\n return http.HttpResponseRedirect(url)",
"def delete_group(user):\n return 'do some magic!'",
"def test_deluser(self):\n self.run_function(\"group.add\", [self._group], gid=self._gid)\n self.run_function(\"user.add\", [self._user])\n self.run_function(\"group.adduser\", [self._group, self._user])\n self.assertTrue(self.run_function(\"group.deluser\", [self._group, self._user]))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertNotIn(self._user, str(group_info[\"members\"]))",
"def test_user_group_controller_delete(self):\n pass",
"def __check_removed_permissions(self) -> None:\n for permission in Permission.objects.all():\n if not self.__is_permission_allowed_to_delete(permission):\n continue\n\n if self.__is_permission_in_groups(permission.codename):\n raise PermissionInUse(f'Permission {permission.codename} is used in groups. Delete it first.')\n\n permission.delete()\n\n self.stdout.write(f'Removed {permission.codename} permission')",
"def update_user_backward(apps, schema_editor):\n Group.objects.all().delete()",
"def delusers(self, args):\n\n if len(args) < 2:\n print(self.addusers.__doc__)\n return\n\n gname = args[0]\n users = args[1:]\n\n g = sr.group(gname)\n\n if not g.in_db:\n print(\"Group '%s' not found.\" % ( gname ))\n return\n\n not_members = g.user_rm( users )\n g.save()\n\n for uname in not_members:\n print(\"Unable to remove non-member '%s' from '%s'\" % ( gname, uname ))",
"def test_050_delete_user_from_group(self):\n\n testflow.step(\n \"Removing user %s from group %s\", TEST_USER1, TEST_GROUP1\n )\n assert MANAGE_CLI.run(\n 'userdel',\n TEST_GROUP1,\n user=TEST_USER1\n )[0], \"Failed to remove user from group '%s'\" % TEST_GROUP1\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP1)\n assert not MANAGE_CLI.run(\n 'userdel',\n TEST_GROUP1,\n user='nonsense'\n )[0], \"Possible to remove nonexisting user from group\"\n\n testflow.step(\"Removing user %s from nonexistent group\", TEST_GROUP1)\n assert not MANAGE_CLI.run(\n 'userdel',\n 'nonsense',\n user=TEST_USER1\n )[0], \"Possible to remove user from nonexisting group\"",
"def delete_challenge_groups_hook(*_, instance: Challenge, using, **__):\n try:\n instance.admins_group.delete(using=using)\n except ObjectDoesNotExist:\n pass\n\n try:\n instance.participants_group.delete(using=using)\n except ObjectDoesNotExist:\n pass",
"def delete_groups(self, roles):\n security_group_names = self._get_all_group_names()\n\n for role in roles:\n role_group_name = self.group_name_for_role(role)\n if role_group_name in security_group_names:\n self.ec2Connection.delete_security_group(role_group_name)\n cluster_group_name = self.get_cluster_group_name()\n if cluster_group_name in security_group_names:\n self.ec2Connection.delete_security_group(cluster_group_name)",
"def delete_algorithm_groups_hook(*_, instance: Algorithm, using, **__):\n try:\n instance.editors_group.delete(using=using)\n except ObjectDoesNotExist:\n pass\n\n try:\n instance.users_group.delete(using=using)\n except ObjectDoesNotExist:\n pass",
"def delete_groups_and_permissions_for_recipes(apps, schema_editor):\n Group = apps.get_model('auth', 'Group')\n Permission = apps.get_model('auth', 'Permission')\n # Delete the recipe_submitters group.\n recipe_submitters = Group.objects.get(name='recipe_submitters')\n recipe_submitters.delete()\n # Remove permissions for recipes to the dcc groups.\n recipe_permissions = Permission.objects.filter(content_type__app_label='recipes',\n content_type__model__in=('unitrecipe', 'harmonizationrecipe'))\n developers = Group.objects.get(name='dcc_developers')\n developers.permissions.remove(*recipe_permissions)\n analysts = Group.objects.get(name='dcc_analysts')\n analysts.permissions.remove(*recipe_permissions)",
"def do_del_group(dbsync, group):\n pass",
"def deleteGroup(request):\n \n if request.method == 'POST':\n \n form = DeleteGroupForm(request.POST)\n \n if form.is_valid():\n \n cd = form.cleaned_data\n \n try:\n \n #Delete records from m2m of Users & Groups for selected groups\n for eachGroup in cd['group_id']:\n Group_User.objects.filter(group = eachGroup.id).delete()\n \n #Delete Group(s)\n for eachGroup in cd['group_id']:\n Group.objects.filter(id = eachGroup.id).delete()\n \n except:\n \n error = 'Unable to Delete Groups!'\n return render_to_response('deletegroup.html', \n {'form': form, 'error': error},\n context_instance=RequestContext(request))\n \n return HttpResponseRedirect('/deletegroup/success/')\n \n else:\n \n return render_to_response('deletegroup.html',\n {'form': form}, \n context_instance=RequestContext(request)) \n \n else:\n \n form = DeleteGroupForm()\n \n return render_to_response('deletegroup.html', \n {'form': form}, \n context_instance=RequestContext(request))",
"def test_groups_group_ref_delete(self):\n pass",
"def test_delete_group(self):\n pass",
"def test_delete_group(self):\n pass",
"def _delete_by_list(self, group_ids):\n path = '/members/%s/groups/remove' % self.member['member_id']\n data = {'group_ids': group_ids}\n if self.member.account.adapter.put(path, data):\n self._dict = dict(x for x in self._dict.items()\n if x[0] not in group_ids)"
] |
[
"0.7971463",
"0.7591969",
"0.7588629",
"0.73583055",
"0.73583055",
"0.7355232",
"0.7186501",
"0.7186501",
"0.7132678",
"0.7087664",
"0.705803",
"0.69757986",
"0.69535804",
"0.69411594",
"0.69188416",
"0.68842256",
"0.68559927",
"0.6846518",
"0.68274677",
"0.67699003",
"0.6699461",
"0.6668971",
"0.6639553",
"0.6633836",
"0.6615803",
"0.66115683",
"0.661068",
"0.66040117",
"0.66040117",
"0.66017765"
] |
0.7736013
|
1
|
Some groups were changed, update users principals.
|
def on_groups_changed(event):
permission_backend = event.request.registry.permission
for change in event.impacted_objects:
if "old" in change:
existing_record_members = set(change["old"].get("members", []))
else:
existing_record_members = set()
group = change["new"]
group_uri = f"/buckets/{event.payload['bucket_id']}/groups/{group['id']}"
new_record_members = set(group.get("members", []))
new_members = new_record_members - existing_record_members
removed_members = existing_record_members - new_record_members
for member in new_members:
# Add the group to the member principal.
permission_backend.add_user_principal(member, group_uri)
for member in removed_members:
# Remove the group from the member principal.
permission_backend.remove_user_principal(member, group_uri)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def groups_update(self, mar, request):\n group_id = mar.viewed_user_auth.user_id\n member_ids_dict, owner_ids_dict = self._services.usergroup.LookupMembers(\n mar.cnxn, [group_id])\n owner_ids = owner_ids_dict.get(group_id, [])\n member_ids = member_ids_dict.get(group_id, [])\n if not permissions.CanEditGroup(\n mar.perms, mar.auth.effective_ids, owner_ids):\n raise permissions.PermissionException(\n 'The user is not allowed to edit this group.')\n\n group_settings = self._services.usergroup.GetGroupSettings(\n mar.cnxn, group_id)\n if (request.who_can_view_members or request.ext_group_type\n or request.last_sync_time or request.friend_projects):\n group_settings.who_can_view_members = (\n request.who_can_view_members or group_settings.who_can_view_members)\n group_settings.ext_group_type = (\n request.ext_group_type or group_settings.ext_group_type)\n group_settings.last_sync_time = (\n request.last_sync_time or group_settings.last_sync_time)\n if framework_constants.NO_VALUES in request.friend_projects:\n group_settings.friend_projects = []\n else:\n id_dict = self._services.project.LookupProjectIDs(\n mar.cnxn, request.friend_projects)\n group_settings.friend_projects = (\n list(id_dict.values()) or group_settings.friend_projects)\n self._services.usergroup.UpdateSettings(\n mar.cnxn, group_id, group_settings)\n\n if request.groupOwners or request.groupMembers:\n self._services.usergroup.RemoveMembers(\n mar.cnxn, group_id, owner_ids + member_ids)\n owners_dict = self._services.user.LookupUserIDs(\n mar.cnxn, request.groupOwners, autocreate=True)\n self._services.usergroup.UpdateMembers(\n mar.cnxn, group_id, list(owners_dict.values()), 'owner')\n members_dict = self._services.user.LookupUserIDs(\n mar.cnxn, request.groupMembers, autocreate=True)\n self._services.usergroup.UpdateMembers(\n mar.cnxn, group_id, list(members_dict.values()), 'member')\n\n return api_pb2_v1.GroupsUpdateResponse()",
"def test_user_group_controller_update(self):\n pass",
"def update_groups(self, groups):\n self.fetch_group_messages() # preload messages before updating groups\n self.groups = groups\n self.put()",
"def update_user_forward(apps, schema_editor):\n group = Group.objects.update_or_create(\n id=1,\n name=\"Administrator\"\n )\n Group.objects.update_or_create(\n id=2,\n name=\"Manager\"\n )\n Group.objects.update_or_create(\n id=3,\n name=\"Leader\"\n )\n Group.objects.update_or_create(\n id=4,\n name=\"Sale\"\n )",
"def test_update_team_user_group(client):\n group = client.update_team_user_group(TEAM_ID, GROUP_ID, {\n \"name\": \"Updated Python group\",\n \"is_reviewer\": False,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert group.name == \"Updated Python group\"\n assert group.permissions['is_admin']\n assert not group.permissions['is_reviewer']",
"def update_user_backward(apps, schema_editor):\n Group.objects.all().delete()",
"def update_group_association(old_email, new_email):\n\n groups.update({'users': old_email},\n {'$set': {'users.$': new_email}},\n upsert=False,\n multi=True)",
"def __upgrade_group(self, group_old: Group, group_new: str) -> None:\n def upgrade_permissions(permissions_list_1: list, permissions_list_2: list, action) -> list:\n permissions_to_change = [\n permission_change\n for permission_change in permissions_list_1\n if permission_change not in permissions_list_2\n ]\n return self.__upgrade_group_permissions(group_old, permissions_to_change, action)\n\n messages = [f'Group {group_new} permission changes']\n\n permissions_from_db = [p.codename for p in group_old.permissions.all()]\n permissions_from_file = main_app_groups[group_new]\n\n # in db but not in file -> remove\n messages += upgrade_permissions(permissions_from_db, permissions_from_file, REMOVE)\n # in file but not in db -> add\n messages += upgrade_permissions(permissions_from_file, permissions_from_db, ADD)\n\n if len(messages) > 1:\n self.__print_messages(messages)",
"def test_update_group(self):\n pass",
"def __check_new_groups(self) -> None:\n for group in main_app_groups:\n try:\n group_old = Group.objects.get(name=group)\n self.__upgrade_group(group_old, group)\n except ObjectDoesNotExist: # need to create new group\n self.__create_new_group(group)\n\n self.stdout.write(f'Added new group {group} with {main_app_groups[group]} permissions')",
"def update_user():",
"def _migrate_users(correct_course_key, role):\r\n log.info(\r\n u'Giving %s users access to %s',\r\n group.name, correct_course_key\r\n )\r\n for user in orm['auth.user'].objects.filter(groups=group).all():\r\n entry = orm['student.courseaccessrole'](\r\n role=role,\r\n user=user,\r\n org=correct_course_key.org,\r\n course_id=correct_course_key,\r\n )\r\n try:\r\n entry.save()\r\n except IntegrityError:\r\n pass",
"def save_model(self, request, obj, form, change):\n add_groups, remove_groups = utils.get_m2m_changes(\n obj.groups,\n form.cleaned_data['groups']\n )\n\n super(UserAdmin, self).save_model(request, obj, form, change)\n\n log_user_groups(\n request.user,\n obj,\n (add_groups, remove_groups)\n )",
"def test_modify_group(self):\n # Add users\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n resp = self.app.post('/users', data=json.dumps(self.test_user2_data))\n assert resp.status_code == 200\n\n # Modify group 1 to add user 2\n resp = self.app.put('/groups/{}'.format(self.test_group1_groupid),\n data=json.dumps(self.test_group1_modify))\n assert resp.status_code == 200\n\n data = json.loads(resp.data)\n assert self.test_user1_userid in data\n assert self.test_user2_userid in data\n\n # Check user2 to see if it has group1 listed\n resp = self.app.get('/users/{}'.format(self.test_user2_userid))\n assert resp.status_code == 200\n\n data = json.loads(resp.data)\n assert 'groups' in data\n assert self.test_group1_groupid in data['groups']",
"def update_groups(self, user_id, group_ids):\n user = self.get(user_id, raise_error=True)\n new_groups = (\n self.session\n .query(tables.Group)\n .filter(tables.Group.group_id.in_(group_ids))\n )\n user.groups = new_groups.all()\n self.session.flush()",
"def allowed_group_access_change(user, group):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n return (user.has_perm(\"vnswww.group_change_any\")\n or (user.has_perm(\"vnswww.group_change_org\")\n and group.org == up.org))",
"def _migrate_users(correct_course_key, role, lower_org):\r\n for user in orm['auth.user'].objects.filter(groups=group).all():\r\n entry = orm['student.courseaccessrole'](\r\n role=role, user=user,\r\n org=correct_course_key.org, course_id=correct_course_key\r\n )\r\n try:\r\n entry.save()\r\n except IntegrityError:\r\n # already stored\r\n pass\r\n orgs[lower_org] = correct_course_key.org",
"async def group(ctx, *, new_group=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n # Can't be group-less\n if new_group is None:\n new_group = random.choice(changeable_groups)\n new_group = new_group.lower()\n author = ctx.message.author\n member_roles = author.roles\n server_roles = ctx.message.server.roles\n\n member_allowed = discord.utils.find(lambda r: r.name.lower() == required_role, member_roles)\n\n if not member_allowed:\n need_citizen = \"You must be a member of the {0} role to join a color group\"\n await amor_manager.say(need_citizen.format(required_role.title()))\n return\n\n if new_group in changeable_groups:\n # Remove the old group the user was in\n new_roles = [r for r in member_roles if not r.name.lower() in changeable_groups]\n # Get the proper object for the user's new group\n role = discord.utils.find(lambda r: r.name.lower() == new_group, server_roles)\n if role is not None:\n new_roles.append(role)\n await(amor_manager.replace_roles(author, *new_roles))\n await amor_manager.say('{0} moved to group {1}'.format(author.name, new_group))\n else:\n suggest = random.choice(changeable_groups)\n cant_join = \"`{0}` is not a color group you're allowed to join. Why not try `{1}`\"\n await amor_manager.say(cant_join.format(new_group, suggest))",
"def change_user_mailing_lists(sender, instance, action, reverse, model, pk_set, **kwargs):\n\tmail = instance.associated_user.email\n\tusername = instance.associated_user.first_name+\" \"+instance.associated_user.last_name\n\t#if groups are going to be added\n\tif action == \"post_add\":\n\t\tgroups = instance.groups_as_string\n\t\tgroups = groups.split(\", \")\n\t\t#put all added groups_as_string\n\t\tfor group in groups:\n\t\t \trequests.post(\"https://api.mailgun.net/v3/lists/{}@arenbergorkest.be/members\".format(group),\n\t\t auth=('api', settings.MAILGUN_API_KEY),\n\t\t data={'subscribed': True,\n\t\t \t 'name':username,\n\t\t 'address': mail})\n\t#if groups are going to be removed\n\tif action == \"pre_clear\": \n\t\t#put the removed groups from a set in a list\n\t\tprevious = UserProfile.objects.get(pk=instance.pk)\n\t\tgrplst = previous.groups_as_string.split(\", \")\n\t\t#loop over list\n\t\tfor grp in grplst:\n\t\t\trequests.delete(\"https://api.mailgun.net/v3/lists/{}@arenbergorkest.be/members/{}\".format(grp,mail),auth=('api', settings.MAILGUN_API_KEY))",
"def do_group_update():\n target_group = Group.query.filter_by(id=request.form['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n target_group.name = request.form['name']\n target_group.group_meter_id = request.form['meter']\n target_group.group_production_meter_id_first = request.form['group_production_meter_id_first']\n target_group.group_production_meter_id_second = request.form[\n 'group_production_meter_id_second']\n\n db.session.commit()\n return group_list(\"Updated group \" + target_group.name)",
"def test_modify_group(self):\n response = self.client.modify_group(\"ABC123\")\n self.assertEqual(response[\"method\"], \"POST\")\n self.assertEqual(response[\"uri\"], \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(response[\"body\"]), {\"account_id\": [self.client.account_id]})",
"def test_groups_group_users_put(self):\n pass",
"def test_groups_group_users_put(self):\n pass",
"def on_groups_deleted(event):\n permission_backend = event.request.registry.permission\n\n for change in event.impacted_objects:\n group = change[\"old\"]\n bucket_id = event.payload[\"bucket_id\"]\n group_uri = utils.instance_uri(event.request, \"group\", bucket_id=bucket_id, id=group[\"id\"])\n\n permission_backend.remove_principal(group_uri)",
"def update_group_profile_members(instance, sender, **kwargs):\n from geonode.groups.models import GroupProfile, GroupMember\n if not instance.groups:\n return\n\n if instance == instance.get_anonymous():\n # The invited user cannot be anonymous\n return\n\n member_joined = []\n\n for user_group in instance.groups.all():\n try:\n group = GroupProfile.objects.get(group=user_group)\n member, created = GroupMember.objects.get_or_create(\n group=group,\n user=instance)\n # Give member role as default\n if not member.role:\n member.role = GroupMember.MEMBER\n member.save()\n member_joined.append(member)\n except GroupProfile.DoesNotExist:\n continue\n\n for group_member in GroupMember.objects.filter(user=instance):\n if group_member not in member_joined:\n group_member.delete()",
"def update_user(BrokerId=None, ConsoleAccess=None, Groups=None, Password=None, Username=None):\n pass",
"def test_creator_in_group_can_update(self):\n\n self.client.login(username='notlogged', password='notlogged')\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_can_access(self, self.url,\n post_redirect_url=expected_url,\n data=self.data)\n\n updated_group = Group.objects.get(pk=self.group.pk)\n self.assertEqual(updated_group.name, self.data['name'])\n self.assertEqual(updated_group.description, self.data['description'])\n self.assertIsNotNone(updated_group.last_edit_date)",
"def update_user():\n #TODO user update \n pass",
"def updateOrgAdmins(request):\n\n return updateRole('gsoc_org_admin')",
"def customer_group_put(group_info):\n related_groups = customer_group_get_related(group_info[\"group_id\"])\n\n now = datetime.datetime.now()\n f = '%Y-%m-%d %H:%M:%S'\n insert_time = now.strftime(f)\n\n result = {\"success\" : 1, \"message\" : \"Customer Company can not be Updated\"}\n\n for groups in related_groups:\n c_group_info = list(groups)\n #check for the roles\n c_g_id = c_group_info[0]\n c_g_role = c_group_info[1].split(\"(\")[1][:-1]\n c_g_name = c_group_info[1].split(\"(\")[0]\n new_c_g_name = group_info[\"group_name\"] + \"(\"+ c_g_role +\")\"\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n cursor = db.cursor()\n query = \"\"\"\n UPDATE `groups`\n SET\n `group_name` = \"%s\",\n `company_name` = \"%s\",\n `company_address` = \"%s\",\n `company_telephone` = \"%s\",\n `company_fax` = \"%s\",\n `company_website` = \"%s\",\n `company_sales_contact` = \"%s\",\n `company_purchase_contact` = \"%s\",\n `company_business` = \"%s\",\n `company_business_type` = \"%s\",\n `company_sales_email` = \"%s\",\n `company_purchase_email` = \"%s\",\n `company_reg_number` = \"%s\",\n `company_vat_number` = \"%s\",\n `description` = \"%s\"\n WHERE\n `group_id` = \"%s\"\n \"\"\" %(\n new_c_g_name, \n group_info[\"company_name\"],\n group_info[\"company_address\"],\n group_info[\"company_telephone\"],\n group_info[\"company_fax\"],\n group_info[\"company_website\"],\n group_info[\"company_sales_contact\"],\n group_info[\"company_purchase_contact\"],\n group_info[\"company_business\"],\n group_info[\"company_business_type\"],\n group_info[\"company_sales_email\"],\n group_info[\"company_purchase_email\"],\n group_info[\"company_reg_number\"],\n group_info[\"company_vat_number\"],\n group_info[\"description\"],\n c_g_id\n )\n try:\n if cursor.execute(query):\n db.commit()\n result = {\"success\" : 0, \"message\" : \"Customer Company Updated Successfully\"}\n except Exception as e:\n result = {\"success\" : 1, \"message\" : \"Customer Company can not be Updated. Error \\\"\\'%s\\'\\\" \\\n Query = %s\" % (e, query) }\n finally:\n cursor.close()\n db.close()\n return result"
] |
[
"0.6849886",
"0.6760221",
"0.66351527",
"0.65919024",
"0.6334146",
"0.62793535",
"0.6236597",
"0.6152543",
"0.61075574",
"0.60988516",
"0.60501474",
"0.60424423",
"0.6019404",
"0.6014997",
"0.5987296",
"0.5920208",
"0.5881497",
"0.5880628",
"0.5869055",
"0.5863141",
"0.5857216",
"0.5828813",
"0.5828813",
"0.5812772",
"0.5791933",
"0.5791485",
"0.5790367",
"0.5784231",
"0.57560056",
"0.57554024"
] |
0.740874
|
0
|
read_json takes a list of str for the name of all json files. Uses the json to generate a Constituency objects that gets append to the Election constituency attribute
|
def read_json(self, json_files):
self.file_access.write_log("Attempting to read the json files {}".format(json_files))
for i in json_files:
self.constituency.append(self.file_access.read_election_json(i))
self.file_access.write_log("The {} json file has been added to the Constituency object".format(i))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def read_classification_json(fn):\n with open(fn) as f:\n classification_data = json.load(f)\n f.close()\n \n return classification_data",
"def read_json():\n try:\n rospack = rospkg.RosPack()\n file_path = rospack.get_path('autonomous') + \"/src/data.txt\"\n with open(file_path) as json_file:\n json_data = json.load(json_file)\n \n new_data = []\n for d in json_data:\n a = Autons(len(new_data))\n a.deserialize_json(d)\n new_data.append(a)\n\n global data\n data = new_data\n except:\n read_json()",
"def load_data_set_from_json(json_path, ratio=0.7):\n train_doc_list = []\n train_category_list = []\n\n test_doc_list = []\n test_category_list = []\n if os.path.exists(json_path):\n with open(json_path, \"r\") as f:\n category_map = json.load(f)\n categories = category_map.keys()\n\n for category in categories:\n all_doc_list = category_map.get(category)\n length = len(all_doc_list)\n train_set_length = int(length * ratio)\n\n for i in range(length):\n if i < train_set_length:\n train_doc_list.append(all_doc_list[i])\n train_category_list.append(category)\n else:\n test_doc_list.append(all_doc_list[i])\n test_category_list.append(category)\n\n else:\n print(\"File doesn't exist, please run load_file_to_json first\")\n\n return train_doc_list, train_category_list, test_doc_list, test_category_list",
"def read_json():\n with open('clubs.json') as json_file:\n return json.load(json_file)",
"def read_json(self):\n # read in all json files in the input_path, that match the\n # algorithm_name and are not outputs\n for f in os.listdir(self.input_path):\n if(os.path.splitext(f)[1] == \".json\") and (os.path.basename(f).startswith(self.algorithm_name)) and (not os.path.basename(f).startswith(\"_\")):\n self.__input_jsons += [json.load(open(self.input_path + f))]",
"def read_json(self, json_name):\r\n with open(json_name, 'r') as infile:\r\n self.pet_file = json.load(infile) # load existing json file\r\n self.pet_file_name = json_name # set name to passed name\r",
"def from_coco_json(json_file, max_size=None, min_freq=None, tokenizer=None):\n\n assert os.path.exists(json_file), f\"{json_file} not exists\"\n coco = COCO(json_file)\n\n image_ids = coco.getImgIds()\n\n texts = []\n for image_id in image_ids:\n ann_ids = coco.getAnnIds(imgIds=image_id)\n anns = coco.loadAnns(ann_ids)\n texts += [i['caption'] for i in anns]\n\n return Vocabulary.from_list(\n texts, max_size=max_size, \n min_freq=min_freq, tokenizer=tokenizer)",
"def json_import(json_file,import_types=['Gradelist','Category','Grade'],inherit=False):\n try:\n import json\n except ImportError:\n warnings.warn('Failed to import json module. Cannot execute json_import')\n return\n if not hasattr(json_file,'read'):\n if not isinstance(json_file,basestring) or not os.path.exists(json_file):\n raise ValueError, 'Argument \\'json_file\\' is not readable, ' \\\n 'and could not be validated as a file path.'\n else:\n json_file = open(json_file)\n \n if not isinstance(import_types,list):\n if isinstance(import_types,basestring) and import_types in \\\n ['Gradelist','Category','Grade']:\n import_types = [import_types]\n else:\n raise ValueError, 'import_types argument not supported, should'\\\n ' be list including one or more classes from grading'\n \n decoder = json.JSONDecoder()\n json_decoded = decoder.decode(json_file.read())\n \n grading_list = json_decoded.values()[0]\n \n ret_list = []\n if not inherit:\n for obj in grading_list:\n if not isinstance(obj,dict):\n raise TypeError, 'Unexpected type in grading array: \\'{}\\''.format(type(obj))\n if 'type' not in obj:\n raise KeyError, 'Could not find \\'type\\' key in structure.'\n typ = obj.get('type')\n if typ not in ['Gradelist','Category','Grade']:\n raise ValueError, 'type value \"{}\" is not an accepted value.'.format(typ)\n if typ not in import_types:\n continue\n \n if 'name' not in obj:\n raise KeyError, 'Could not find \\'name\\' key in structure.'\n name = obj.get('name')\n \n attribs = obj.get('attribs',{})\n if not isinstance(attribs,dict):\n raise ValueError, 'attribs of \\'{}\\' is not an object as expected.'.format(name)\n \n if typ == 'Gradebook':\n identifs = obj.get('identifiers',{})\n user = obj.get('user')\n if not isinstance(identifs,dict):\n identifs = {}\n ret_list.append(Gradebook(name,user,identifiers=identifs))\n elif typ == 'Category':\n ret_list.append(Category(name,**attribs))\n elif typ == 'Grade':\n ret_list.append(Grade(name,**attribs))\n return ret_list\n #for inherit\n \n grbk = [x for x in grading_list if x.get('type','') == 'Gradebook']\n if not grbk:\n raise ValueError, 'json_import requires a Gradebook in the file for inherit-mode.'\n grbk = grbk[0]\n grbknm = grbk.pop('name')\n grbk = Gradebook(grbknm,grbk.get('user'),identifiers=grbk.get('identifiers',{}))\n cat_dict = {}\n for itm in grading_list:\n if itm.get('type','') == 'Category':\n if 'name' not in itm:\n raise ValueError, 'Category missing a name in JSON file.'\n attribs = itm.get('attribs',{})\n if 'parent' in itm and itm['parent'] == grbknm:\n pass\n else:\n dbg('Category \\'{}\\'s parent does not match Gradebook name')\n continue\n cat_dict[itm['name']]=Category(itm['name'],**attribs)\n grbk.add_category(*cat_dict.values())\n \n for itm in grading_list:\n if itm.get('type','') == 'Grade':\n if 'name' not in itm:\n raise ValueError, 'Grade missing a name in JSON file.'\n attribs = itm.get('attribs',{})\n if 'parent' not in itm or itm['parent'] not in cat_dict:\n dbg('Grade parent does not match Category name')\n continue\n grbk.add_grade(itm['parent'],Grade(itm['name'],**attribs))\n return grbk",
"def read_json():\n with open(\"Ratings.json\") as json_data:\n json_list = []\n for line in json_data:\n json_dict = json.loads(line)\n json_list.append(json_dict)\n return json_list",
"def test_get_categories_from_json():\n allocator = RecipeAllocator()\n allocator.load_data(\n orders_dir=\"tests/orders.json\", recipes_dir=\"tests/recipes.json\"\n )\n allocator.get_categories_from_json()\n assert list(allocator.portion_categories_dict.keys()) == [\n \"two_portions\",\n \"four_portions\",\n ] and list(allocator.recipe_categories_dict.keys()) == [\n \"two_recipes\",\n \"three_recipes\",\n \"four_recipes\",\n ]",
"def _load_jsons(self):\n items = []\n labels = []\n segms = []\n for split in self._splits:\n anno = os.path.join(self._root, 'annotations', split) + '.json'\n _coco = COCO(anno)\n self._coco.append(_coco)\n classes = [c['name'] for c in _coco.loadCats(_coco.getCatIds())]\n if not classes == self.classes:\n raise ValueError(\"Incompatible category names with COCO: \")\n assert classes == self.classes\n json_id_to_contiguous = {\n v: k for k, v in enumerate(_coco.getCatIds())}\n if self.json_id_to_contiguous is None:\n self.json_id_to_contiguous = json_id_to_contiguous\n self.contiguous_id_to_json = {\n v: k for k, v in self.json_id_to_contiguous.items()}\n else:\n assert self.json_id_to_contiguous == json_id_to_contiguous\n\n # iterate through the annotations\n image_ids = sorted(_coco.getImgIds())\n for entry in _coco.loadImgs(image_ids):\n filename = entry['file_name']\n dirname = split.split('_')[-1] # \"train\" or \"val\"\n abs_path = os.path.join(self._root, dirname, filename)\n if not os.path.exists(abs_path):\n raise IOError('Image: {} not exists.'.format(abs_path))\n label, segm = self._check_load_bbox(_coco, entry)\n # skip images without objects\n if self._skip_empty and label is None:\n continue\n items.append(abs_path)\n labels.append(label)\n segms.append(segm)\n return items, labels, segms",
"def load():\n try:\n with open('learn.json', 'r') as file:\n return json.load(file)\n except IOError:\n return []",
"def load_life(name):\n\tif not '.json' in name:\n\t\tname += '.json'\n\t\n\twith open(os.path.join(LIFE_DIR, name), 'r') as e:\n\t\treturn json.loads(''.join(e.readlines()))",
"def json_parsing():\n with open('countries.json') as f:\n countries = json.load(f)\n\n return countries",
"def from_json_file(filename, check_format=True):\n filename = os.path.abspath(filename)\n directory = os.path.dirname(filename)\n with open(filename, \"r\") as infile:\n return ExperimentListFactory.from_json(\n infile.read(), check_format=check_format, directory=directory\n )",
"def read_json(filename):\n with open(filename) as json_file:\n file = json.load(json_file)\n shape = file[\"NN_Shape\"]\n top_genes = file[\"Gene_List\"]\n weights = file[\"Weights\"]\n output_key = file[\"One-Hot_Encoded_Output\"]\n biases = file[\"Biases\"]\n return shape, top_genes, weights, output_key, biases",
"def load_from_file(cls):\n try:\n with open(cls.__name__ + '.json', 'r') as f:\n jstr = f.read()\n list_d = Base.from_json_string(jstr)\n list_o = []\n for item in list_d:\n list_o.append(cls.create(**item))\n return list_o\n except FileNotFoundError:\n return []",
"def read_ARC_JSON(filepath):\n \n # Open the JSON file and load it \n data = json.load(open(filepath))\n\n # Extract the train/test input/output grids. Each grid will be a\n # list of lists of ints. We convert to Numpy.\n train_input = [np.array(data['train'][i]['input']) for i in range(len(data['train']))]\n train_output = [np.array(data['train'][i]['output']) for i in range(len(data['train']))]\n test_input = [np.array(data['test'][i]['input']) for i in range(len(data['test']))]\n test_output = [np.array(data['test'][i]['output']) for i in range(len(data['test']))]\n\n return (train_input, train_output, test_input, test_output)",
"def load_occlusion_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):\n from pycocotools.coco import COCO\n import pycocotools.mask as mask_util\n\n timer = Timer()\n json_file = PathManager.get_local_path(json_file)\n with contextlib.redirect_stdout(io.StringIO()):\n coco_api = COCO(json_file)\n if timer.seconds() > 1:\n logger.info(\"Loading {} takes {:.2f} seconds.\".format(json_file, timer.seconds()))\n\n id_map = None\n if dataset_name is not None:\n meta = MetadataCatalog.get(dataset_name)\n cat_ids = sorted(coco_api.getCatIds())\n cats = coco_api.loadCats(cat_ids)\n # The categories in a custom json file may not be sorted.\n thing_classes = [c[\"name\"] for c in sorted(cats, key=lambda x: x[\"id\"])]\n meta.thing_classes = thing_classes\n\n # In COCO, certain category ids are artificially removed,\n # and by convention they are always ignored.\n # We deal with COCO's id issue and translate\n # the category ids to contiguous ids in [0, 80).\n\n # It works by looking at the \"categories\" field in the json, therefore\n # if users' own json also have incontiguous ids, we'll\n # apply this mapping as well but print a warning.\n if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):\n if \"coco\" not in dataset_name:\n logger.warning(\n \"\"\"\n Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.\n \"\"\"\n )\n id_map = {v: i for i, v in enumerate(cat_ids)}\n meta.thing_dataset_id_to_contiguous_id = id_map\n print(meta)\n\n # sort indices for reproducible results\n img_ids = sorted(list(coco_api.imgs.keys()))\n # imgs is a list of dicts, each looks something like:\n # {'license': 4,\n # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',\n # 'file_name': 'COCO_val2014_000000001268.jpg',\n # 'height': 427,\n # 'width': 640,\n # 'date_captured': '2013-11-17 05:57:24',\n # 'id': 1268}\n # imgs = coco_api.loadImgs(img_ids)\n # print(imgs[0])\n # anns is a list[list[dict]], where each dict is an annotation\n # record for an object. The inner list enumerates the objects in an image\n # and the outer list enumerates over images. Example of anns[0]:\n # [{'segmentation': [[192.81,\n # 247.09,\n # ...\n # 219.03,\n # 249.06]],\n # 'area': 1035.749,\n # 'iscrowd': 0,\n # 'image_id': 1268,\n # 'bbox': [192.81, 224.8, 74.73, 33.43],\n # 'category_id': 16,\n # 'id': 42986},\n # ...]\n # anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]\n keep_idx = []\n anns = []\n for img_id in img_ids:\n ann = coco_api.imgToAnns[img_id]\n if len(ann) > 0:\n anns.append(ann)\n keep_idx.append(img_id)\n imgs = coco_api.loadImgs(keep_idx)\n\n\n if \"minival\" not in json_file:\n # The popular valminusminival & minival annotations for COCO2014 contain this bug.\n # However the ratio of buggy annotations there is tiny and does not affect accuracy.\n # Therefore we explicitly white-list them.\n ann_ids = [ann[\"id\"] for anns_per_image in anns for ann in anns_per_image]\n assert len(set(ann_ids)) == len(ann_ids), \"Annotation ids in '{}' are not unique!\".format(\n json_file\n )\n\n imgs_anns = list(zip(imgs, anns))\n\n logger.info(\"Loaded {} images in COCO format from {}\".format(len(imgs_anns), json_file))\n print(\"Loaded {} images in COCO format from {}\".format(len(imgs_anns), json_file))\n\n dataset_dicts = []\n\n ann_keys = [\"bbox\", \"category_id\"] + (extra_annotation_keys or [])\n\n num_instances_without_valid_segmentation = 0\n\n for (img_dict, anno_dict_list) in imgs_anns:\n record = {}\n record[\"file_name\"] = os.path.join(image_root, img_dict[\"file_name\"])\n record[\"height\"] = img_dict[\"height\"]\n record[\"width\"] = img_dict[\"width\"]\n image_id = record[\"image_id\"] = img_dict[\"id\"]\n record[\"sem_seg_file_name\"] = os.path.join(image_root, img_dict[\"seg_map\"])\n\n objs = []\n for anno in anno_dict_list:\n # Check that the image_id in this annotation is the same as\n # the image_id we're looking at.\n # This fails only when the data parsing logic or the annotation file is buggy.\n\n # The original COCO valminusminival2014 & minival2014 annotation files\n # actually contains bugs that, together with certain ways of using COCO API,\n # can trigger this assertion.\n assert anno[\"image_id\"] == image_id\n\n assert anno.get(\"ignore\", 0) == 0\n\n obj = {key: anno[key] for key in ann_keys if key in anno}\n\n segm = anno.get(\"segmentation\", None)\n if segm:\n if isinstance(segm, str): # path\n mask_path = os.path.join(image_root, segm) # binary mask\n if not os.path.exists(mask_path):\n num_instances_without_valid_segmentation += 1\n continue\n segm = cv2.imread(mask_path, 0)\n # cv2.imshow('1', segm)\n # cv2.waitKey(0)\n segm = np.asfortranarray(segm)\n segm = mask_util.encode(segm)\n # print(segm)\n obj[\"segmentation\"] = segm\n\n segm_occagn = anno.get(\"segmentation_occagn\", None)\n if segm_occagn:\n if isinstance(segm_occagn, str):\n mask_occagn_path = os.path.join(image_root, segm_occagn) # binary mask\n if not os.path.exists(mask_occagn_path):\n continue\n # segm_occagn = cv2.imread(mask_occagn_path)\n # # cv2.imshow('1', segm_occagn)\n # # cv2.waitKey(0)\n # segm_occagn = np.asfortranarray(segm_occagn)\n # segm_occagn = mask_util.encode(segm_occagn)\n obj[\"segmentation_occagn\"] = segm_occagn\n\n # USER: we only load center here, load other kpts in DatasetMapper\n keypts = anno.get(\"center_2d\", None)\n if keypts:\n keypts.append(2)\n obj[\"keypoints\"] = keypts\n # if keypts: # list[list[float]]\n # keypts = np.array(keypts)\n # keypts = np.insert(keypts, 2, 2, axis=1).flatten().tolist()\n # obj[\"keypoints\"] = keypts\n\n obj[\"bbox_mode\"] = BoxMode.XYWH_ABS\n if id_map:\n obj[\"category_id\"] = id_map[obj[\"category_id\"]]\n objs.append(obj)\n record[\"annotations\"] = objs\n dataset_dicts.append(record)\n\n if num_instances_without_valid_segmentation > 0:\n logger.warning(\n \"Filtered out {} instances without valid segmentation. \"\n \"There might be issues in your dataset generation process.\".format(\n num_instances_without_valid_segmentation\n )\n )\n return dataset_dicts",
"def load_json_fixture(filename: str) -> Any:\n return json.loads(load_fixture(f\"jellyfin/{filename}\"))",
"def load_json_data(path, fraction=None, examples_per_class=None):\n with open(path, 'rb') as file:\n data = json.load(file)\n features = np.array(data[0]).astype(float)\n targets = np.array(data[1]).astype(int)\n\n return features, np.array([[]]), targets, np.array([])",
"def read_json(fullpath):\n import json\n \n with open(fullpath, \"r\") as json_file_readed:\n json_readed = json.load(json_file_readed)\n\n return json_readed",
"def import_incomes_from_json(json_path):\n with open(json_path, \"rt\") as f:\n expenses = json.load(f)\n for exp in expenses:\n exp_record = far_core.db.IncomeRecord(\n date=datetime.datetime.strptime(exp[0], \"%Y-%m-%dT%H:%M:%S.%fZ\").date(),\n amount=exp[1],\n category=exp[2],\n note=exp[3],\n account=exp[4],\n )\n db.session.add(exp_record)\n db.session.commit()",
"def load_from_file(cls):\n\n try:\n list_of_ins = []\n with open(cls.__name__ + '.json') as my_file:\n dicts = Base.from_json_string(my_file.read())\n for key in dicts:\n list_of_ins += [cls.create(**key)]\n return (list_of_ins)\n except:\n return ([])",
"def read_json(fn):\n with open(fn) as f:\n return json.load(f, object_hook=_operator_object_hook)",
"def reconstitute():\n with open(TEXT_FPATH, 'w') as txt:\n for jfpath in json_fpaths():\n with open(jfpath) as f:\n jstruct = json.load(f)\n\n for recipe in jstruct.keys():\n _reconstitute_recipe(txt, jstruct[recipe])",
"def load_from_file(cls):\n new_list = []\n try:\n with open(\"%s.json\" % cls.__name__, mode='r') as f:\n file = cls.from_json_string(f.read())\n for i in file:\n new_list.append(cls.create(**i))\n except Exception:\n pass\n return new_list",
"def load_stat(input):\n with open(input['json'], 'r', encoding=input['encoding']) as f:\n return json.load(f)",
"def load_json(corpus):\n global corpus_dir, u_path, candidates, unknowns, encoding, language\n corpus_dir += corpus\n m_file = open(os.path.join(corpus_dir, META_FILENAME), \"r\")\n meta_json = json.load(m_file)\n m_file.close()\n\n u_path += os.path.join(corpus_dir, meta_json[\"folder\"])\n encoding += meta_json[\"encoding\"]\n language += meta_json[\"language\"]\n candidates += [author[\"author-name\"]\n for author in meta_json[\"candidate-authors\"]]\n unknowns += [text[\"unknown-text\"] for text in meta_json[\"unknown-texts\"]]",
"def load_co_registration_data_from_json(filename: str) -> Dict[str, CoRegistrationData]:\n with open(filename, \"r\") as json_file:\n data = json.load(json_file)\n co_reg_data = {}\n for index, data in data.items():\n co_reg_data[index] = CoRegistrationData(\n name=str(data['name']),\n target_w=int(data['target_w']),\n target_h=int(data['target_h']),\n transform_matrix=np.array(data['transform_matrix']),\n moving_img_name=str(data['moving_img_name'])\n )\n return co_reg_data"
] |
[
"0.65061456",
"0.6114574",
"0.6057887",
"0.5964582",
"0.5941496",
"0.59386635",
"0.5897578",
"0.5862386",
"0.58360434",
"0.5798207",
"0.5793471",
"0.5754497",
"0.5673281",
"0.5623062",
"0.5598817",
"0.55820906",
"0.55776715",
"0.55578196",
"0.55525863",
"0.5548048",
"0.55441093",
"0.5531891",
"0.5527013",
"0.5524623",
"0.54843265",
"0.54810584",
"0.54718167",
"0.5456915",
"0.54508215",
"0.54482996"
] |
0.7654895
|
0
|
Confirm creation of a CountyMortgageData object from a CSV row.
|
def test_data_creation_from_base_row(self, mock_read_csv):
f = StringIO(self.data_header + self.data_row)
reader = csv.DictReader(f)
mock_read_csv.return_value = reader
load_values()
self.assertEqual(CountyMortgageData.objects.count(), 1)
county = CountyMortgageData.objects.first()
fields = reader.fieldnames
fields.pop(fields.index('fips')) # test string separately
fields.pop(fields.index('open')) # 'open' is stored as 'total'
fields.pop(fields.index('date')) # date must be parsed before testing
self.assertEqual(county.fips, self.data_row_dict.get('fips'))
open_value = int(self.data_row_dict.get('open'))
self.assertEqual(county.total, open_value)
target_date = parser.parse(self.data_row_dict['date']).date()
self.assertEqual(county.date, target_date)
for field in fields: # remaining fields can be tested in a loop
self.assertEqual(
getattr(county, field), int(self.data_row_dict.get(field)))
# test computed values
self.assertEqual(
county.epoch,
int(target_date.strftime('%s')) * 1000)
self.assertEqual(
county.percent_90,
int(self.data_row_dict.get('ninety')) * 1.0 / open_value)
self.assertEqual(
county.percent_30_60,
(int(self.data_row_dict.get('thirty')) +
int(self.data_row_dict.get('sixty'))) * 1.0 / open_value)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def from_csv(self, user, row):\n if len(row) != 6:\n raise BadRequest(_(\"Invalid line\"))\n self.name = row[1].strip()\n self.target_host = row[2].strip()\n self.service, created = Service.objects.get_or_create(\n name=row[3].strip())\n self.enabled = (row[4].strip() == 'True')\n self.verify_recipients = (row[5].strip() == 'True')\n self.save(creator=user)",
"def __init__(self,csvrow):\n self.raw = csvrow\n data = csvrow.split(',')\n self.number = data[0]\n self.area = int(data[1])\n self.population = int(data[5])\n self.latitude = float(data[7])\n self.longitude = float(data[8])",
"def from_csv(self, user, row):\n if len(row) != 4:\n raise BadRequest(_(\"Invalid line\"))\n self.name = row[1].strip()\n try:\n self.target = RelayDomain.objects.get(name=row[2].strip())\n except RelayDomain.DoesNotExist:\n raise NotFound(_(\"Relay domain %s does not exist\" % row[2].strip()))\n self.enabled = (row[3].strip() == 'True')\n self.save(creator=user)",
"def test_import_csv_file_return_customer_object(self):\n\n self.assertIsInstance(self.customers[0], Customer)",
"def from_csv(self, user, row):\n if len(row) < 4:\n raise BadRequest(_(\"Invalid line\"))\n self.name = row[1].strip().lower()\n for model in [DomainAlias, Domain]:\n if model.objects.filter(name=self.name).exists():\n raise Conflict\n domname = row[2].strip()\n try:\n self.target = Domain.objects.get(name=domname)\n except Domain.DoesNotExist:\n raise BadRequest(_(\"Unknown domain %s\") % domname)\n core_signals.can_create_object.send(\n sender=\"import\", context=self.target, object_type=\"domain_aliases\")\n self.enabled = row[3].strip().lower() in [\"true\", \"1\", \"yes\", \"y\"]\n self.save(creator=user)",
"def check_valid_csv_data(self, row):\n obj = re.match(re.compile('^[0-9]{4}\\,[A-Z]{1}[a-z]{2}\\,.'),\n ','.join(row))\n if not obj:\n raise Exception(\"Invalid Data String must be like `1990` `Jan` Check Sample file\")",
"def test_csv_reader_data_contents(process_data):\n data = process_data(file_name_or_type='clean_map.csv')\n\n # Check row types\n for row in data:\n assert(isinstance(row['Country'], str))\n assert(isinstance(row['City'], str))\n assert(isinstance(row['State_Or_Province'], str))\n assert(isinstance(row['Lat'], float))\n assert(isinstance(row['Long'], float))\n assert(isinstance(row['Altitude'], float))\n\n # Basic data checks\n assert len(data) == 180 # We have collected 180 rows\n assert data[0]['Country'] == 'Andorra'\n assert data[106]['Country'] == 'Japan'",
"def csv_parser_test():\n data = csv_parser(myspreadsheet)\n print 'Your data object:'\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(data) \n # Did your parser work?\n for row_num, row in enumerate(data):\n try:\n assert len(row) == 3\n except AssertionError:\n print \"Row %s seems to be misparsed; its length is %s\" % (row_num, len(row))\n # Check on one of the values:\n try:\n assert data[4][2] == 'Linguist'\n except AssertionError:\n print \"Error: data[4][2] should equal 'Linguist'; actual value is %s\" % data[4][2]\n # Did you remember your int conversions?\n try:\n assert isinstance(data[0][0], int)\n except AssertionError:\n print \"Error: data[0][0] should be an int\"\n # Did you remember your float conversions?\n try:\n assert isinstance(data[6][1], float)\n except AssertionError:\n print \"Error: data[6][1] should be a float\"",
"def csv_parser_test():\r\n data = csv_parser(myspreadsheet)\r\n print('Your data object:')\r\n pp = pprint.PrettyPrinter(indent=4)\r\n pp.pprint(data) \r\n # Did your parser work?\r\n for row_num, row in enumerate(data):\r\n try:\r\n assert len(row) == 3\r\n except AssertionError:\r\n print ((\"Row %s seems to be misparsed; its length is %s\") % (row_num, len(row)))\r\n # Check on one of the values:\r\n try:\r\n assert data[4][2] == 'Linguist'\r\n except AssertionError:\r\n print ((\"Error: data[4][2] should equal 'Linguist'; actual value is %s\") % data[4][2])\r\n # Did you remember your int conversions?\r\n try:\r\n assert isinstance(data[0][0], int)\r\n except AssertionError:\r\n print (\"Error: data[0][0] should be an int\")\r\n # Did you remember your float conversions?\r\n try:\r\n assert isinstance(data[6][1], float)\r\n except AssertionError:\r\n print (\"Error: data[6][1] should be a float\")",
"def test_valid_csv(self):\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_csv(url, fname='wine2')\n self.assertTrue(str(type(readerobject)),\"_csv.reader\")",
"def _people_object_from_csv_row(row, header, distinct_id_index=None):\n distinct_id_index = (header.index(\"$distinct_id\") if distinct_id_index is None else distinct_id_index)\n props = Mixpanel._properties_from_csv_row(row, header, ['$distinct_id'])\n profile = {'$distinct_id': row[distinct_id_index], '$properties': props}\n return profile",
"def test_csv_import_city(self):\n from django.contrib.messages import get_messages\n path = reverse(\"import-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n file = open(\"city.csv\")\n client = Client()\n client.force_login(user)\n r = client.post(path, {\"title\": \"city\", \"csv_file\": file})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) == 1\n assert str(messages[0]) == \"Successfully Uploaded!\"",
"def seed_treasury_yield():\n with open(yield_csv, newline='') as yields:\n data_reader = csv.DictReader(yields, delimiter=',')\n # Read data\n for row in data_reader:\n TreasuryBill.objects.create(\n date=row['Date'],\n three_month=row['3 Mo'],\n six_month=row['6 Mo'],\n one_year=row['1 Yr'],\n five_year=row['5 Yr'],\n ten_year=row['10 Yr'],\n thirty_year=row['30 Yr']\n ) \n print('Seeded Treasury successfully!')\n return True",
"def __init__(self, path):\n self.csv_path = path\n # check if csv format is valid or not\n self.check_valid_csvformat(self.csv_path)\n \"\"\" empty dict to store all company names\n prepare initial company data in dictionary format \"\"\"\n self.company_data = dict()",
"def process_csv(self, user: User, csv_file):\n self.db_session.rollback()\n csv = pandas.read_csv(StringIO(csv_file.read().decode('utf-8')))\n missing_cols = [col_name for col_name in CSV_SENSOR_MAP.values() if col_name not in csv.columns.values]\n if missing_cols:\n raise OBDControllerError(f'CSV is missing the following columns: {\", \".join(missing_cols)}')\n\n csv = csv[CSV_SENSOR_MAP.values()]\n start_datetime = self._resolve_date_from_csv_row(csv.iloc[0])\n gen_session_id = str(start_datetime.timestamp()).replace('.', '')[:12]\n\n if self.db_session.query(OBDSession).filter(OBDSession.id == gen_session_id).first():\n return\n\n session = OBDSession.create(self.db_session, id=gen_session_id, user_id=user.id, date=start_datetime)\n _ = CarState.create_from_csv(self.db_session, session, csv)\n self.db_session.commit()",
"def test_handle__success(self):\n self.c.handle(csv=self.test_csv, confirmed=\"y\")\n self.assertIn(\"Successfully loaded data\", self.c.stdout.getvalue())\n self.assertEqual(CountyLimit.objects.count(), 3233)",
"def test_valid_rows(self):\n file = SimpleUploadedFile(\n \"test.csv\",\n b\"msisdn,facility code,id type,id number,messaging consent,edd year,\"\n b\"edd month,edd day,baby dob year, baby dob month, baby dob day,language\\n\"\n b\"+27820001001,123456,said,9001010001088,true,2021,12,1,,,,afr\\n\",\n )\n form = MomConnectImportForm(\n data={\"source\": \"MomConnect Import\"}, files={\"file\": file}\n )\n self.assertTrue(form.is_valid())\n instance = form.save()\n self.assertEqual(instance.status, MomConnectImport.Status.VALIDATING)\n self.assertEqual(instance.errors.count(), 0)\n\n [row] = instance.rows.all()\n self.assertEqual(row.row_number, 2)\n self.assertEqual(row.msisdn, \"+27820001001\")\n self.assertEqual(row.facility_code, \"123456\")\n self.assertEqual(row.id_type, ImportRow.IDType.SAID)\n self.assertEqual(row.id_number, \"9001010001088\")\n self.assertEqual(row.messaging_consent, True)\n self.assertEqual(row.research_consent, False)\n self.assertEqual(row.edd_year, 2021)\n self.assertEqual(row.edd_month, 12)\n self.assertEqual(row.edd_day, 1)\n self.assertEqual(row.language, ImportRow.Language.AFR)\n\n self.validate_momconnect_import.delay.assert_called_once_with(instance.id)",
"def test_single_customer(self):\n\n create_invoice = single_customer(\"Susan Wong\", \"invoice.csv\")\n create_invoice(\"test_items.csv\")\n\n # Generate list of rentals\n with open('invoice.csv', 'r') as csvfile:\n rentals = []\n for row in csvfile:\n rentals.append(row)\n\n print(rentals)\n\n # Assert statements\n self.assertEqual(rentals[3], ('Susan Wong,AT92,Office Chair,13\\n'))\n self.assertEqual(rentals[4], ('Susan Wong,KE25,Espresso Machine,30\\n'))",
"def ingest_customer_csv(csv_path):\n # Create a CSV import generator (next yields one db row)\n import_generator = import_csv_gen(csv_path)\n # Skip over the title row\n next(import_generator)\n # Iterate over all other rows\n while True:\n try:\n data = next(import_generator)\n if len(data) != 6:\n logger.error(f'Data with incorrect item count: {len(data)}')\n continue\n # extract items from list and add document to database\n with Connection():\n customer = Customer(\n user_id=data[CUST_USERID],\n name=data[CUST_NAME],\n address=data[CUST_ADDRESS],\n zip_code=int(data[CUST_ZIPCODE]),\n phone_number=data[CUST_PHONE],\n email=data[CUST_EMAIL]\n )\n customer.save() # This will perform an insert\n except StopIteration:\n break",
"def read_csv(self, csv_input):\n # https://stackoverflow.com/a/45063514\n dtypes = {\n 'lat': 'U',\n 'long': 'U'\n }\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''], dtype=dtypes)\n\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)\n #print('Data read from CSV %s' % csv_input)",
"def test_csv_import_hotel_success(self):\n from django.contrib.messages import get_messages\n path = reverse(\"import-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n file = open(\"city.csv\")\n client = Client()\n client.force_login(user)\n client.post(path, {\"title\": \"city\", \"csv_file\": file})\n file = open(\"hotel.csv\")\n r = client.post(path, {\"title\": \"hotel\", \"csv_file\": file})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) == 1\n assert str(messages[0]) == \"Successfully Uploaded!\"",
"def csvObj():\n CSV_URL = \"http://unitedstates.sunlightfoundation.com/legislators/legislators.csv\"\n s = requests.get(CSV_URL) # Download the csv using requests.\n reader = csv.DictReader(s.text.splitlines(), lineterminator=\"\\n\") # Use the dictreader to make a dictionary with the attribute name paired with the rows value for that attribute.\n name2twitter_id = {}\n for row in reader:\n if (row['in_office'] == \"1\" and row['twitter_id'] != \"\"):\n name = row['firstname'] + \" \" # Construct the name.\n if (row['middlename'] != \"\"): # Not all names have middle names.\n name += row['middlename'] + \" \"\n name += row['lastname']\n name2twitter_id[name] = row['twitter_id'] # Assign the name to their handle.\n del name2twitter_id[\"Tim Murphy\"] # This representative does not have an active twitter handle. \n name2twitter_id[\"Gregory W. Meeks\"] = \"RepGregoryMeeks\" # Insert this representatives twitter handle manually.\n return name2twitter_id",
"def test_import_csv_file_return_empty_customer_object(self):\n\n empty_customers = self.import_customer.import_customer_csv_file(self.empty_customer_file_path)\n self.assertEqual([], empty_customers)",
"def import_glucose_from_csv(user, csv_file):\n csv_data = []\n reader = csv.reader(csv_file.read().splitlines(), delimiter=',',\n quotechar='\"')\n for row in reader:\n csv_data.append([item.strip() for item in row])\n\n glucose_objects = []\n\n # Check if headers exists. Skip the first entry if true.\n header_check = ['value', 'category', 'date', 'time']\n first_row = [i.lower().strip() for i in csv_data[0]]\n if all(i in first_row for i in header_check):\n csv_data = csv_data[1:]\n\n for row in csv_data:\n # Let's do an extra check to make sure the row is not empty.\n if row:\n try:\n category = Category.objects.get(name__iexact=row[1].strip())\n except ObjectDoesNotExist:\n category = Category.objects.get(name__iexact='No Category'.strip())\n\n # Since we always store the value in mg/dL format in the db, we need\n # to make sure we convert it here if the user's setting is set to\n # mmol/L.\n if user.settings.glucose_unit.name == 'mmol/L':\n value = int(to_mg(row[0]))\n else:\n value = int(row[0])\n\n glucose_objects.append(Glucose(\n user=user,\n value=value,\n category=category,\n record_date=datetime.strptime(row[2], DATE_FORMAT),\n record_time=datetime.strptime(row[3], TIME_FORMAT),\n notes=row[4],\n ))\n\n Glucose.objects.bulk_create(glucose_objects)",
"def __init__(self, csv_path):\n # Checking files\n fdops.check_if_file_exists(csv_path)\n\n # loading proposal data as a data frame\n self._df = pd.read_csv(csv_path)\n\n # Dictionary containing proposal properties\n self.props = self._get_properties(csv_path)",
"def test_csv_row_bug(script_runner, tmpdir, test_dir):\n csv_file = tmpdir / 'csv_file.csv'\n\n ret = script_runner.run(\n 'mwcp-tool', '-p', 'foo', '-c', str(csv_file), str(test_dir), cwd=str(tmpdir))\n print(ret.stdout)\n print(ret.stderr, file=sys.stderr)\n assert ret.success\n\n assert csv_file.exists()\n\n with csv_file.open('r') as fo:\n reader = csv.reader(fo)\n rows = list(reader)\n assert len(rows) == len(test_dir.listdir()) + 1\n assert rows[0] == ['scan_date', 'inputfilename', 'outputfile.name',\n 'outputfile.description', 'outputfile.md5', 'address', 'debug', 'url']\n for i, row in enumerate(rows[1:]):\n assert row[0] and row[1]\n # Test entries except the timestamp and full file path.\n assert row[2:] == [\n 'fooconfigtest.txt',\n 'example output file',\n '5eb63bbbe01eeed093cb22bb8f5acdc3',\n '127.0.0.1',\n ('[+] File test_{0}.txt identified as Foo.\\n'\n '[+] size of inputfile is 23 bytes\\n'\n '[+] operating on inputfile test_{0}.txt').format(i),\n 'http://127.0.0.1',\n ]",
"def _process_csv_data(csv_file, user_data_map):\n with open(csv_file, 'r') as csvfile:\n rows = csv.reader(csvfile)\n for row in rows:\n if len(row) < 2:\n print('The CSV file is not in expected format.')\n raise Exception\n user_data_map[row[1].lower()] = row[0]",
"def test_csv_no_callback(self):\n csvfile = testdata.create_csv({\n \"foo\": testdata.get_int(),\n \"bar\": testdata.get_words(),\n })\n self.assertEqual(1, len(csvfile))",
"def parse_csv_row(self, row):\n\n for key in self.field_map:\n if self.field_map[key] is not None:\n if key == 'marking':\n self.obstacle_data[key] = self.get_marking_value(row[self.field_map[key]].strip())\n elif key == 'lighting':\n self.obstacle_data[key] = self.get_lighting_value(row[self.field_map[key]].strip())\n elif key == 'obst_type':\n self.obstacle_data['obst_type_id'] = self.get_obstacle_type_id(row[self.field_map[key]].strip())\n else:\n self.obstacle_data[key] = row[self.field_map[key]].strip()",
"def test_is_valid_row(self):\n dognames = student_submission.read_csv('./dognames.csv')\n\n self.assertTrue(student_submission.is_valid_row(dognames[999]),\n 'Your implementation seems wrong')\n self.assertTrue(student_submission.is_valid_row(dognames[999], year=2010),\n 'Your implementation seems wrong')\n self.assertTrue(student_submission.is_valid_row(dognames[999], sex='m'),\n 'Your implementation seems wrong')\n self.assertTrue(student_submission.is_valid_row(dognames[999], year=2010, sex='m'),\n 'Your implementation seems wrong')\n self.assertFalse(student_submission.is_valid_row(dognames[999], year=2006, sex='m'),\n 'Your implementation seems wrong')\n self.assertFalse(student_submission.is_valid_row(dognames[999], year=2010, sex='w'),\n 'Your implementation seems wrong')\n\n self.assertEqual(sum(student_submission.is_valid_row(dognames[i]) for i in range(len(dognames))), 6980,\n 'Your implementation seems wrong')\n\n self.assertEqual(sum(student_submission.is_valid_row(dognames[i], sex='w') for i in range(len(dognames))), 3549,\n 'Your implementation seems wrong')\n\n self.assertEqual(sum(student_submission.is_valid_row(dognames[i], year=2000) for i in range(len(dognames))), 118,\n 'Your implementation seems wrong')"
] |
[
"0.65647215",
"0.6546224",
"0.62184423",
"0.61489475",
"0.6088905",
"0.6068437",
"0.59123445",
"0.5874923",
"0.587123",
"0.58576244",
"0.5758785",
"0.5727914",
"0.5639902",
"0.56310624",
"0.5628611",
"0.5628605",
"0.5627368",
"0.5611206",
"0.5606662",
"0.5591287",
"0.5553421",
"0.55510676",
"0.55492777",
"0.55121934",
"0.5512027",
"0.54826504",
"0.5447559",
"0.5447177",
"0.5442588",
"0.5418869"
] |
0.66076845
|
0
|
Runs a backfill on a fixed number of objects.
|
def backfill(task, request, check_name, num_objects):
check = getattr(checks, check_name)(request.db)
target_object = getattr(packaging_models, check.hooked_object)
query = request.db.query(target_object.id).limit(num_objects)
request.log.info("Running backfill on %d %ss." % (num_objects, check.hooked_object))
runs = set()
for (elem_id,) in query:
runs.update([f"{check_name}:{elem_id}"])
malware_check_service = request.find_service_factory(IMalwareCheckService)
malware_check = malware_check_service(None, request)
malware_check.run_checks(runs, manually_triggered=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _grow(self): \n limit = 0\n #Iterating through the list to find the number of elements\n for i in xrange(len(self)):\n if self._items[i] != self._fillValue:\n #There's an element at index i, so update the limit\n limit = i\n \n #Only grow the array if the limit+1 and the physical size is the same.\n if limit+1 == len(self):\n temp = Array(len(self)*2)\n \n #Copy existing elements to the new Array\n for i in xrange(len(self)):\n temp[i] = self._items[i]\n \n #Initialize the new elements to the fillValue\n for j in xrange(len(self), len(self)*2):\n temp[j] = self._fillValue\n self._items = temp",
"def test_fill(self):\r\n for _ in range(SIZE):\r\n self.nb.add(_)\r\n\r\n self.assertFalse(self.nb.isEmpty())\r\n self.assertTrue(self.nb.isFull())\r\n self.assertEqual(5, len(self.nb))",
"def test_overFill(self):\r\n high = 15\r\n for _ in range(high):\r\n self.nb.add(_)\r\n\r\n self.assertFalse(self.nb.isEmpty())\r\n self.assertTrue(self.nb.isFull())\r\n self.assertEqual(5, len(self.nb))\r\n\r\n # check all are still present\r\n for _ in range(high-1, high - SIZE-1, -1):\r\n self.assertTrue(_ in self.nb)",
"def free_all(self, objs):\r\n\t\tmax_capacity = self.max_capacity\r\n\t\tfree_objects_size = len(self.free_objects)\r\n\t\tfor obj in objs:\r\n\t\t\tif obj is None:\r\n\t\t\t\tcontinue\r\n\r\n\t\t\tif free_objects_size < self.max_capacity:\r\n\t\t\t\tself.free_objects.append(obj)\r\n\t\t\t\tobj.reset()\r\n\r\n\t\t\tself.peak = max(self.peak, len(self.free_objects))",
"def _fillBatches(self):\n\n batchRE = r\"\"\"\n B\n (?P<observebatch>\\d+?)\n (?P<startend>[SE])\n (?P<sequence>\\d+?)\n _SR\n (?:_(?P<extraInjections>\\d+?|\\w+?))?\n $\n \"\"\"\n batchRE = re.compile(batchRE, re.VERBOSE)\n # We canot infer batches unless we have runorder\n if 'Run Order' in self.sampleMetadata.keys():\n currentBatch = 0\n # Loop over samples in run order\n for index, row in self.sampleMetadata.sort_values(by='Run Order').iterrows():\n nameComponents = batchRE.search(row['Sample File Name'])\n if nameComponents:\n # Batch start\n if nameComponents.group('startend') == 'S':\n # New batch - increment batch no\n if nameComponents.group('sequence') == '1':\n currentBatch = currentBatch + 1\n\n # Don't include the dilution series or blanks\n if not ((row['AssayRole'] == AssayRole.LinearityReference) or (row['SampleType'] == SampleType.ProceduralBlank)):\n self.sampleMetadata.loc[index, 'Batch'] = currentBatch\n self.sampleMetadata.loc[index, 'Correction Batch'] = currentBatch\n\n else:\n warnings.warn('Unable to infer batches without run order, skipping.')\n return",
"def execute_asset_backfill_iteration(\n backfill: \"PartitionBackfill\",\n logger: logging.Logger,\n workspace_process_context: IWorkspaceProcessContext,\n instance: DagsterInstance,\n) -> Iterable[None]:\n from dagster._core.execution.backfill import BulkActionStatus, PartitionBackfill\n\n workspace_context = workspace_process_context.create_request_context()\n unloadable_locations = _get_unloadable_location_names(workspace_context, logger)\n asset_graph = ExternalAssetGraph.from_workspace(workspace_context)\n\n if backfill.serialized_asset_backfill_data is None:\n check.failed(\"Asset backfill missing serialized_asset_backfill_data\")\n\n try:\n previous_asset_backfill_data = AssetBackfillData.from_serialized(\n backfill.serialized_asset_backfill_data, asset_graph, backfill.backfill_timestamp\n )\n except DagsterDefinitionChangedDeserializationError as ex:\n unloadable_locations_error = (\n \"This could be because it's inside a code location that's failing to load:\"\n f\" {unloadable_locations}\"\n if unloadable_locations\n else \"\"\n )\n if os.environ.get(\"DAGSTER_BACKFILL_RETRY_DEFINITION_CHANGED_ERROR\"):\n logger.error(\n f\"Backfill {backfill.backfill_id} was unable to continue due to a missing asset or\"\n \" partition in the asset graph. The backfill will resume once it is available\"\n f\" again.\\n{ex}. {unloadable_locations_error}\"\n )\n yield None\n return\n else:\n raise DagsterAssetBackfillDataLoadError(f\"{ex}. {unloadable_locations_error}\")\n\n backfill_start_time = utc_datetime_from_timestamp(backfill.backfill_timestamp)\n\n instance_queryer = CachingInstanceQueryer(\n instance=instance, asset_graph=asset_graph, evaluation_time=backfill_start_time\n )\n\n if backfill.status == BulkActionStatus.REQUESTED:\n result = None\n for result in execute_asset_backfill_iteration_inner(\n backfill_id=backfill.backfill_id,\n asset_backfill_data=previous_asset_backfill_data,\n instance_queryer=instance_queryer,\n asset_graph=asset_graph,\n run_tags=backfill.tags,\n backfill_start_time=backfill_start_time,\n ):\n yield None\n\n if not isinstance(result, AssetBackfillIterationResult):\n check.failed(\n \"Expected execute_asset_backfill_iteration_inner to return an\"\n \" AssetBackfillIterationResult\"\n )\n\n updated_asset_backfill_data = result.backfill_data\n\n if result.run_requests:\n for updated_asset_backfill_data in _submit_runs_and_update_backfill_in_chunks(\n instance,\n workspace_process_context,\n backfill.backfill_id,\n result,\n previous_asset_backfill_data,\n asset_graph,\n instance_queryer,\n ):\n yield None\n\n if not isinstance(updated_asset_backfill_data, AssetBackfillData):\n check.failed(\n \"Expected _submit_runs_and_update_backfill_in_chunks to return an\"\n \" AssetBackfillData object\"\n )\n\n # Update the backfill with new asset backfill data\n # Refetch, in case the backfill was canceled in the meantime\n backfill = cast(PartitionBackfill, instance.get_backfill(backfill.backfill_id))\n updated_backfill = backfill.with_asset_backfill_data(\n updated_asset_backfill_data, dynamic_partitions_store=instance\n )\n if updated_asset_backfill_data.is_complete():\n # The asset backfill is complete when all runs to be requested have finished (success,\n # failure, or cancellation). Since the AssetBackfillData object stores materialization states\n # per asset partition, the daemon continues to update the backfill data until all runs have\n # finished in order to display the final partition statuses in the UI.\n updated_backfill = updated_backfill.with_status(BulkActionStatus.COMPLETED)\n\n instance.update_backfill(updated_backfill)\n\n elif backfill.status == BulkActionStatus.CANCELING:\n if not instance.run_coordinator:\n check.failed(\"The instance must have a run coordinator in order to cancel runs\")\n\n # Query for cancelable runs, enforcing a limit on the number of runs to cancel in an iteration\n # as canceling runs incurs cost\n runs_to_cancel_in_iteration = instance.run_storage.get_run_ids(\n filters=RunsFilter(\n statuses=CANCELABLE_RUN_STATUSES,\n tags={\n BACKFILL_ID_TAG: backfill.backfill_id,\n },\n ),\n limit=MAX_RUNS_CANCELED_PER_ITERATION,\n )\n\n yield None\n\n if runs_to_cancel_in_iteration:\n for run_id in runs_to_cancel_in_iteration:\n instance.run_coordinator.cancel_run(run_id)\n yield None\n\n # Update the asset backfill data to contain the newly materialized/failed partitions.\n updated_asset_backfill_data = None\n for updated_asset_backfill_data in get_canceling_asset_backfill_iteration_data(\n backfill.backfill_id,\n previous_asset_backfill_data,\n instance_queryer,\n asset_graph,\n backfill_start_time,\n ):\n yield None\n\n if not isinstance(updated_asset_backfill_data, AssetBackfillData):\n check.failed(\n \"Expected get_canceling_asset_backfill_iteration_data to return a PartitionBackfill\"\n )\n\n updated_backfill = backfill.with_asset_backfill_data(\n updated_asset_backfill_data, dynamic_partitions_store=instance\n )\n # The asset backfill is successfully canceled when all requested runs have finished (success,\n # failure, or cancellation). Since the AssetBackfillData object stores materialization states\n # per asset partition, the daemon continues to update the backfill data until all runs have\n # finished in order to display the final partition statuses in the UI.\n if updated_asset_backfill_data.have_all_requested_runs_finished():\n updated_backfill = updated_backfill.with_status(BulkActionStatus.CANCELED)\n\n instance.update_backfill(updated_backfill)\n else:\n check.failed(f\"Unexpected backfill status: {backfill.status}\")",
"def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n for count in range(capacity):\r\n self._items.append(fillValue)",
"async def fill_orbit_with_garbage(self) -> NoReturn:\n\n # Wait for a year when the first rubbish will appear on the map\n delay_tick = get_garbage_delay_tics(self._current_year)\n while delay_tick is None:\n await sleep(5)\n delay_tick = get_garbage_delay_tics(self._current_year)\n\n rubbish_frames = [\n frame\n for name, frame in self._all_frames['rubbish'].items()\n if not name.startswith('rocket')\n ]\n\n max_y, max_x = get_canvas_size(self._canvas)\n rubbish_count = 0\n\n # This variable shows how much rubbish can be on the map simultaneously\n max_rubbish_count = max_x * max_y // min(frame.height * frame.width\n for frame in rubbish_frames)\n while True:\n await sleep(MapSettings.RUBBISH_COEFF)\n\n produce_next = False\n frame = rubbish_frames[\n random.randint(0, len(rubbish_frames) - 1)]\n start_x = random.randint(-frame.width + 2, max_x - 2)\n start_y = -frame.height\n rubbish_object = MapObject(frame, start_x, start_y)\n\n # Check that a new rubbish sample does not overlap existing\n # If it does, try to produce another sample.\n for existing_object in self._dynamic_objects.values():\n if rubbish_object & existing_object:\n produce_next = True\n break\n\n if produce_next:\n continue\n\n if rubbish_count > max_rubbish_count:\n # Reset count because objects with old IDs disappeared\n rubbish_count = 0\n else:\n rubbish_count += 1\n\n rubbish_id = f'rubbish_{rubbish_count}'\n self._dynamic_objects[rubbish_id] = rubbish_object\n self._coroutines.append(self.fly_garbage(rubbish_object,\n rubbish_id))\n await sleep(get_garbage_delay_tics(self._current_year))",
"def step(self):\n for c in self.spill_list:\n \n self._schedule.step()",
"def backfill_bulk(self, report):\n\n if report['origin_ts'] < report['oldest_ts']:\n\n # Determine poll sizing accounting for max_bin_size. Split\n # polling into a large batch of polls and then a final poll.\n required = int((report['oldest_ts'] - report['origin_ts']) / 60)\n final_poll_size = required % report['max_bin_size']\n total_polls_batch = int((\n (required - final_poll_size) / report['max_bin_size']))\n\n start = report['origin_ts']\n step = report['max_bin_size'] * 60\n\n delay = 1 # wait time before attmepting to re-poll after error\n stagger = 2 # delay co-efficient\n timeout = 10 # number of times to repoll before exception raised.\n\n # poll exchange REST endpoint for first bulk batch missing bars\n bars_to_store = []\n for i in range(total_polls_batch):\n try:\n bars = report['exchange'].get_bars_in_period(\n report['symbol'], start, report['max_bin_size'])\n for bar in bars:\n bars_to_store.append(bar)\n stagger = 2 # reset stagger to base after successful poll\n start += step # increment the starting poll timestamp\n time.sleep(stagger + 1)\n except Exception as e:\n # retry poll with an exponential delay after each error\n for i in range(timeout):\n try:\n time.sleep(delay)\n bars = (\n report['exchange'].get_bars_in_period(\n report['symbol'], start,\n report['max_bin_size']))\n for bar in bars:\n bars_to_store.append(bar)\n stagger = 2\n start += step\n break\n except Exception as e:\n delay *= stagger\n if i == timeout - 1:\n raise Exception(\"Polling timeout.\")\n\n # finish with a single poll for final_poll_size number of bars\n for i in range(timeout):\n try:\n time.sleep(delay)\n final_bars = report['exchange'].get_bars_in_period(\n report['symbol'], start, final_poll_size)\n for bar in final_bars:\n bars_to_store.append(bar)\n stagger = 2\n break\n except Exception as e:\n # retry poll with an exponential delay after each error\n delay *= stagger\n if i == timeout - 1:\n raise Exception(\"Polling timeout.\")\n\n # store bars, count how many stores\n query = {\"symbol\": report['symbol']}\n doc_count_before = (\n self.db_collections[report[\n 'exchange'].get_name()].count_documents(query))\n for bar in bars_to_store:\n try:\n self.db_collections[report['exchange']].insert_one(\n bar, upsert=True)\n except pymongo.errors.DuplicateKeyError:\n continue # skip duplicates if they exist\n doc_count_after = (\n self.db_collections[report['exchange']].count_documents(query))\n doc_count = doc_count_after - doc_count_before\n self.logger.debug(\n \"backfill_bulk() saved \" + str(doc_count) + \" bars.\")\n return True\n return False",
"def __init__(self, capacity, fillValue = None):\n \n self._items = list() \n self._fillValue = fillValue\n self._DEFAULT_CAPACITY = capacity\n self._logicalSize = 0 #as required by exercise 1\n \n \n for count in xrange(capacity):\n self._items.append(self._fillValue)",
"def initial_falling_objects(count, obj_class, container_1, container_2=None, container_3=None,\r\n min_y=-2000, max_y=-200, argument=None):\r\n for i in range(count):\r\n if argument is None:\r\n funcobj = obj_class()\r\n else:\r\n funcobj = obj_class(argument)\r\n funcobj.rect.x = random.randrange(0, 1024)\r\n funcobj.rect.y = random.randrange(min_y, max_y)\r\n container_1.add(funcobj)\r\n if container_2 is not None:\r\n container_2.add(funcobj)\r\n if container_3 is not None:\r\n container_3.add(funcobj)",
"def fill_artifacts_at_runtime(self, args):\n for j in self.jobs:\n j.fill_artifacts_at_runtime(args)",
"def test_back_fill(self):\n self.driver.start_sampling()\n\n # step 2 contains 3 blocks (4 records), start with this and get both since we used them\n # separately in other tests\n self.create_sample_data_set_dir(\"node59p1_step2.dat\", TELEM_DIR, \"node59p1.dat\",\n copy_metadata=False)\n self.assert_data((PhsenParserDataParticle,PhsenControlDataParticle),\n 'test_data_1-2.txt.result.yml', count=4)\n\n # This file has had a section of data replaced with 0s (14171-14675),\n # replacing PH1236501_01D6u51F11341_5D_E538\n self.create_sample_data_set_dir('node59p1_step3.dat', TELEM_DIR, \"node59p1.dat\",\n copy_metadata=False)\n self.assert_data(PhsenParserDataParticle, 'test_data_3.txt.result.yml',\n count=5)\n\n # Now fill in the zeroed section from step3, this should just return the new\n # data \n self.create_sample_data_set_dir('node59p1_step4.dat', TELEM_DIR, \"node59p1.dat\",\n copy_metadata=False)\n self.assert_data(PhsenParserDataParticle, 'test_data_4.txt.result.yml',\n count=1)\n\n # start over now using step 4\n self.driver.stop_sampling()\n # Reset the driver with no memento\n self.driver = self._get_driver_object(memento=None)\n self.driver.start_sampling()\n\n self.clear_async_data()\n self.create_sample_data_set_dir('node59p1_step4.dat', TELEM_DIR, \"node59p1.dat\",\n copy_metadata=False)\n self.assert_data((PhsenParserDataParticle,PhsenControlDataParticle),\n 'test_data_1-4.txt.result.yml', count=10)",
"def fill_array(data, size, fill_value=numpy.nan, push_back=True):\n\n if push_back:\n return numpy.append(data, numpy.repeat(fill_value, size - data.size))\n\n return numpy.append(numpy.repeat(fill_value, size - data.size), data)",
"def _fillcache(self, n: int | None) -> None:\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(self.modifier(next(self._iterable))) # type: ignore\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)",
"def fill(self, filler):\n\n for x in range(self.__xmax):\n for y in range(self.__ymax):\n self.__data[(x,y)] = filler(x,y) % self.mod",
"def _fillcache(self, n: int | None) -> None:\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(next(self._iterable)) # type: ignore\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)",
"def _next_generation(self, ranks):\n replace = ranks[:int(self.population_size * self.culling)]\n for idx in replace:\n self.population[idx] = self._create_offspring()",
"def backfill_gaps(self, report):\n\n # sort timestamps into sequential bins (to reduce # of polls)\n if len(report['gaps']) != 0:\n bins = [\n list(g) for k, g in groupby(\n sorted(report['gaps']),\n key=lambda n, c=count(0, 60): n - next(c))]\n\n # if any bins > max_bin_size, split them into smaller bins.\n # takes the old list\n bins = self.split_oversize_bins(bins, report['max_bin_size'])\n\n delay = 1 # wait time before attmepting to re-poll after error\n stagger = 2 # delay co-efficient\n timeout = 10 # number of times to repoll before exception raised.\n\n # poll exchange REST endpoint for replacement bars\n bars_to_store = []\n for i in bins:\n try:\n bars = report['exchange'].get_bars_in_period(\n report['symbol'], i[0], len(i))\n for bar in bars:\n bars_to_store.append(bar)\n stagger = 2 # reset stagger to base after successful poll\n time.sleep(stagger + 1)\n except Exception as e:\n # retry polling with an exponential delay after each error\n for i in range(timeout):\n try:\n time.sleep(delay + 1)\n bars = report['exchange'].get_bars_in_period(\n report['symbol'], i[0], len(i))\n for bar in bars:\n bars_to_store.append(bar)\n stagger = 2\n break\n except Exception as e:\n delay *= stagger\n if i == timeout - 1:\n raise Exception(\"Polling timeout.\")\n\n # Sanity check, check that the retreived bars match gaps\n timestamps = [i['timestamp'] for i in bars_to_store]\n timestamps = sorted(timestamps)\n bars = sorted(report['gaps'])\n if timestamps == bars:\n query = {\"symbol\": report['symbol']}\n doc_count_before = (\n self.db_collections[report[\n 'exchange'].get_name()].count_documents(query))\n for bar in bars_to_store:\n try:\n self.db_collections[\n report['exchange'].get_name()].insert_one(bar)\n except pymongo.errors.DuplicateKeyError:\n # Skip duplicates that exist in DB.\n continue\n doc_count_after = (\n self.db_collections[report[\n 'exchange'].get_name()].count_documents(query))\n doc_count = doc_count_after - doc_count_before\n self.logger.debug(\n \"Saved \" + str(doc_count) + \" missing \" +\n report['symbol'] + \" bars.\")\n return True\n else:\n raise Exception(\n \"Fetched bars do not match missing timestamps.\")\n else:\n # Return false if there is no missing data.\n return False",
"def grow(self):\r\n # Double the physical size if no more room for items\r\n # and add the fillValue to the new cells in the underlying list\r\n for count in range(len(self)):\r\n self._items.append(self._fillValue)",
"def _fill_time(beats, shard, *args, **kwargs):\n total = 0\n for delay in shard(*args, **kwargs):\n total += delay.count\n yield delay\n yield Beat(beats - total)",
"def _batching_call(self, *args, **kw):\n b_start = kw.pop('b_start', None)\n b_size = kw.pop('b_size', None)\n results = list(self._original_call(*args, **kw))\n\n if b_start is None:\n return results\n\n if b_size is None:\n b_size = len(results)\n\n return results[b_start:b_start+b_size]",
"def step1(ball_list, step,borders,obstacle=None):\n\n index_list = range(len(ball_list))\n for i in index_list:\n ball_list[i].compute_refl(step,borders,obstacle)\n for j in index_list:\n if i!=j:\n ball_list[i].compute_coll(ball_list[j],step)\n return ball_list",
"def fill(self):\n for _ in range(Pyro4.config.THREADPOOL_MINTHREADS):\n if not self.attemptSpawn():\n break",
"def backfill_revlist(buildername, revisions, times=1, dry_run=False):\n new_revisions_list = []\n repo_name = query_repo_name_from_buildername(buildername)\n LOG.info(\"We want to find a successful job for '%s' in this range: [%s:%s]\" %\n (buildername, revisions[0], revisions[-1]))\n for rev in revisions:\n jobs = query_jobs(repo_name, rev)\n matching_jobs = _matching_jobs(buildername, jobs)\n successful_jobs = _status_summary(matching_jobs)[0]\n if successful_jobs > 0:\n LOG.info(\"The last succesful job for buildername '%s' is on %s\" %\n (buildername, rev))\n # We don't need to look any further in the list of revisions\n break\n else:\n new_revisions_list.append(rev)\n\n LOG.info(\"We only need to backfill %s\" % new_revisions_list)\n return new_revisions_list",
"def manyBalls(self):\n self.action.transaction(self.cardUid, 5)\n self.start()",
"def test_back_fill(self):\n self.driver.start_sampling()\n\n # step 2 contains 2 blocks, start with this and get both since we used them\n # separately in other tests \n self.clear_async_data()\n self.create_sample_data_set_dir(\n \"node59p1_step2.dat\",\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n (DostadParserTelemeteredDataParticle, DostadParserTelemeteredMetadataDataParticle),\n 'test_data_1-2.txt.result.yml',\n count=3\n )\n\n # This file has had a section of DO data replaced with 0s\n self.clear_async_data()\n self.create_sample_data_set_dir(\n 'node59p1_step3.dat',\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n DostadParserTelemeteredDataParticle,\n 'test_data_3.txt.result.yml',\n count=3\n )\n\n # Now fill in the zeroed section from step3, this should just return the new\n # data\n self.clear_async_data()\n self.create_sample_data_set_dir(\n 'node59p1_step4.dat',\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n DostadParserTelemeteredDataParticle,\n 'test_data_4.txt.result.yml',\n count=1\n )\n\n # start over now, using step 4\n self.driver.stop_sampling()\n\n # Reset the driver with no memento\n self.memento = None\n self.driver = MflmDOSTADDataSetDriver(\n self._driver_config()['startup_config'],\n self.memento,\n self.data_callback,\n self.state_callback,\n self.event_callback,\n self.exception_callback)\n self.driver.start_sampling()\n\n self.clear_async_data()\n self.create_sample_data_set_dir(\n 'node59p1_step4.dat',\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n (DostadParserTelemeteredDataParticle, DostadParserTelemeteredMetadataDataParticle),\n 'test_data_1-4.txt.result.yml',\n count=7\n )",
"def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n self._logicalSize = 0\r\n # Track the capacity and fill value for adjustments later\r\n self._capacity = capacity\r\n self._fillValue = fillValue\r\n for count in range(capacity):\r\n self._items.append(fillValue)",
"def rslices(n, allow_empty=False):\n for _ in range(5):\n yield rslice(n, allow_empty)"
] |
[
"0.5782486",
"0.574313",
"0.55496156",
"0.54927135",
"0.5488854",
"0.54863733",
"0.5440862",
"0.5438565",
"0.5412206",
"0.5406161",
"0.540015",
"0.5367641",
"0.53450596",
"0.53275067",
"0.5306449",
"0.53035223",
"0.5301051",
"0.5275734",
"0.5271671",
"0.5263534",
"0.52617574",
"0.52274233",
"0.52095765",
"0.52064556",
"0.5199364",
"0.5157008",
"0.5152645",
"0.51391476",
"0.512765",
"0.5119959"
] |
0.5933235
|
0
|
Compares saved folders list with the current one
|
def get_folders_diff(self, folders):
missing_folders = list(set(self.saved_folders).difference(set(folders)))
added_folders = list(set(folders).difference(set(self.saved_folders)))
if any([missing_folders, added_folders]):
self.saved_folders = folders
return missing_folders, added_folders
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def cmp_directories(self, dir_1='./', dir_2='./'):\n dirs_cmp = filecmp.dircmp(dir_1, dir_2)\n list_dirs_json = dict()\n path_in = self.make_path_in(dir_1, dir_2)\n\n equal_files_json = self.equal_files_to_json(\n dirs_cmp.same_files,\n dir_1,\n dir_2\n )\n\n diff_files_json = self.diff_files_to_json(\n dirs_cmp.diff_files,\n dir_1,\n dir_2\n )\n only_in_one_json = self.only_in_one_to_json(\n dir_1,\n dirs_cmp.left_only,\n dir_2,\n dirs_cmp.right_only\n )\n common_dirs_json = self.common_dirs_to_json(\n dirs_cmp.common_dirs,\n dir_1,\n dir_2\n )\n\n all_lists_json = json.loads(\n json.dumps(\n list(\n equal_files_json +\n diff_files_json +\n only_in_one_json +\n common_dirs_json\n ),\n sort_keys=True))\n if dirs_cmp.common_dirs:\n list_dirs_json = self.internal_directories_json(\n dir_1,\n dir_2,\n dirs_cmp.common_dirs\n )\n list_dirs_json.update(\n dict({path_in: self.directory_to_json(path_in, all_lists_json)})\n )\n\n return list_dirs_json",
"def syncfolder():",
"def compare_and_copy(dst, compared):\n (folder_1, folder_2) = compared\n contents_1 = os.listdir(folder_1)\n contents_2 = os.listdir(folder_2)\n diff = list(set(contents_1) - set(contents_2))\n for files in diff:\n file_name = os.path.join(folder_1, files)\n if os.path.isfile(file_name) and files not in os.listdir(dst):\n shutil.copy(file_name, dst)",
"def get_files_diff(self, current_files_data):\n if self.saved_files_data:\n saved_files_paths, saved_files_hashes = zip(*self.saved_files_data.items())\n else:\n saved_files_paths, saved_files_hashes = [], []\n if current_files_data:\n current_files_paths, current_files_hashes = zip(*current_files_data.items())\n else:\n current_files_paths, current_files_hashes = [], []\n\n missing_files_paths = list(set(saved_files_paths).difference(set(current_files_paths)))\n missing_files_hashes = [self.saved_files_data[path] for path in missing_files_paths]\n added_files_paths = list(set(current_files_paths).difference(set(saved_files_paths)))\n added_files_hashes = [current_files_data[path] for path in added_files_paths]\n\n # get moved files paths\n moved_files_hashes = list(set(missing_files_hashes).intersection(set(added_files_hashes)))\n moved_files_paths = [\n json.dumps({\n 'from': self.get_file_path_by_hash(self.saved_files_data, hash_),\n 'to': self.get_file_path_by_hash(current_files_data, hash_)\n }) for hash_ in moved_files_hashes\n ]\n\n # get missing files paths\n missing_files_paths = [ # remove \"moved\" files paths\n self.get_file_path_by_hash(self.saved_files_data, hash_)\n for hash_ in missing_files_hashes if hash_ not in moved_files_hashes\n ]\n\n # get added files paths\n added_files_paths = [ # remove \"moved\" files paths\n self.get_file_path_by_hash(current_files_data, hash_)\n for hash_ in added_files_hashes if hash_ not in moved_files_hashes\n ]\n\n # get edited files paths\n remained_files_paths = list(set(saved_files_paths).intersection(set(current_files_paths)))\n for file_path in remained_files_paths:\n if self.saved_files_data[file_path] != current_files_data[file_path]: # compare hashes\n missing_files_paths.append(file_path)\n added_files_paths.append(file_path)\n\n if any([missing_files_paths, added_files_paths, moved_files_paths]):\n self.saved_files_data = current_files_data\n\n return missing_files_paths, added_files_paths, moved_files_paths",
"def test_main(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n result = listdir(dummy_folder,\n full_path=True,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=False,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=True,\n only_files=True,\n )\n need_result = ['antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n self.assertEqual(sorted(os.listdir('.')), sorted(listdir(path='.', full_path=False)))",
"def test_full(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n need_result = ['meme1.jpg',\n 'meme2.png',\n 'meme4.jpg',\n 'meme4.png',\n 'meme monty python',\n ]\n result = listdir(os.path.join(dummy_folder, 'memes'), full_path=False)\n self.assertEqual(sorted(result), sorted(need_result))\n\n need_result_new = [os.path.join(dummy_folder, 'memes', x) for x in need_result]\n result = listdir(os.path.join(dummy_folder, 'memes'), full_path=True)\n self.assertEqual(sorted(result), sorted(need_result_new))",
"def check_folder_state(self):\n while self:\n diff = self.get_diff()\n print(diff or 'No changes detected')\n if diff:\n self.parent.send_diff_data(diff)\n time.sleep(1)",
"def compare_tree(self):\n result = []\n \n pathA = os.path.join(self.testpath,'A')\n pathB = os.path.join(self.testpath,'B')\n\n filesA = [os.path.relpath(f,pathA) for f in self.tree(pathA)]\n filesB = [os.path.relpath(f,pathB) for f in self.tree(pathB)]\n\n filesAB = set(filesA).union(filesB)\n for fileAB in sorted(list(filesAB)):\n\n fileA = os.path.join(self.testpath,'A',fileAB)\n fileB = os.path.join(self.testpath,'B',fileAB)\n try:\n fileAtxt = open(fileA).read()\n except IOError:\n result.append( ('missing_inA',fileAB) )\n continue\n \n try:\n fileBtxt = open(fileB).read()\n except IOError:\n result.append( ('missing_inB',fileAB) )\n continue\n\n if not fileAtxt == fileBtxt:\n result.append( ('disagree',fileAB))\n \n return result",
"def same_folders(src1, src2):\n dcmp = dircmp(src1, src2)\n if dcmp.left_only or dcmp.right_only:\n return False\n for sub_dcmp in dcmp.subdirs.values():\n same_folders(sub_dcmp.left, sub_dcmp.right)\n\n return True",
"def sync_dir(self):\n\n # mark the trajectories that we have seen\n trajectories = os.listdir(self.trajectory_dir)\n \n for trajectory_file in trajectories:\n\n if trajectory_file not in self.seen_trajectories:\n\n created = self.upload_trajectory(trajectory_file)\n self.seen_trajectories.add(trajectory_file)\n\n if created is True:\n print \"Total of %s solved trajectories\" % \\\n SolvedTrajectory.objects.count(), created",
"def test_list_root(self):\n expected = [\"search1\", \"search2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}\".format(self.search.instance))]\n self.assertEqual(result, expected)",
"def files_in_folder(self):\n non_til = set()\n filesInFolder = []\n for f in self.find_all_files():\n newstr = f.replace(\"~\", \"\") \n if newstr in self.find_all_files():\n non_til.add(newstr)\n for fs in non_til:\n filesInFolder.append(fs)\n return filesInFolder",
"def diff_files(self):\n pdup = []\n # Print out files that are only found in the DB\n if self.comparison_info['dbonly']:\n print(\"Files only found in the database --------- \")\n for fname in sorted(self.comparison_info['dbonly']):\n fdb = self.files_from_db[fname]\n print(f\"\\t{fdb['path']}/{fname}\")\n\n # print out files that are only found on disk\n if self.comparison_info['diskonly']:\n print(\"\\nFiles only found on disk --------- \")\n for fname in sorted(self.comparison_info['diskonly']):\n addon = \"\"\n if fname in self.duplicates:\n addon = \" *\"\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fdisk['relpath']}/{fname}{addon}\")\n if self.comparison_info['pathdup']:\n print(\"\\n The following files had multiple paths on disk (path filesize):\")\n listing = {}\n for fname in self.comparison_info['pathdup']:\n pdup.append(fname)\n listing[self.comparison_info['pathdup']['relpath']] = self.comparison_info['pathdup']['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if fname in self.files_from_db and self.files_from_db[fname]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{fname} {listing[pth]:d}{addon}\")\n\n # Print files that have different paths on disk and in the DB\n if self.comparison_info['path']:\n print(\"\\nPath mismatch (file name, db path, disk path) --------- \")\n for fname in sorted(self.comparison_info['path']):\n addon = \"\"\n if fname in self.duplicates:\n addon = \" *\"\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname}\\t{fdb['path']}\\t{fdisk['relpath']}{addon}\")\n if self.comparison_info['duplicates']:\n print(\" The following files have multiple disk paths on disk (path filesize):\")\n for fname in self.comparison_info['duplicates']:\n pdup.append(fname)\n listing[self.comparison_info['duplicates']['relpath']] = self.comparison_info['duplicates']['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if fname in self.files_from_db and self.files_from_db[fname]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{fname} {listing[pth]:d}{addon}\")\n\n # Print files that have different file sizes on disk and in the DB\n if self.comparison_info['filesize']:\n print(\"\\nFilesize mismatch (File name, size in DB, size on disk) --------- \")\n for fname in sorted(self.comparison_info['filesize']):\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname} {fdb['filesize']} {fdisk['filesize']}\")\n\n # Print files that have different md5sum on disk and in DB\n if self.md5sum and 'md5sum' in self.comparison_info and self.comparison_info['md5sum']:\n print(\"\\nmd5sum mismatch (File name, sum in DB, sum on disk) --------- \")\n for fname in sorted(self.comparison_info['md5sum']):\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname} {fdb['md5sum']} {fdisk['md5sum']}\")\n\n # Print out files that have multiple paths on disk\n if len(self.duplicates) > len(pdup):\n print(\"\\nThe following files have multiple disk paths on disk (path filesize):\")\n for dup in sorted(self.duplicates):\n if dup not in pdup:\n listing = {}\n for fls in self.duplicates[dup]:\n listing[fls['relpath']] = fls['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if dup in self.files_from_db and self.files_from_db[dup]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{dup} {listing[pth]:d}{addon}\")\n\n # Print out files that have multiple endtries in the DB\n if self.db_duplicates:\n print(\"\\nThe following files have multiple entries in the database (path filesize):\")\n for dup in sorted(self.db_duplicates):\n listing = {}\n for fls in self.db_duplicates[dup]:\n listing[fls['relpath']] = fls['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if dup in self.files_from_disk and self.files_from_disk[dup]['path'] == pth:\n addon = \" (Disk Match)\"\n print(f\" {start} {pth}/{dup} {listing[pth]:d}{addon}\")",
"def compare(self, name):\n result = []\n for i in (os.listdir(self.path)):\n if i != name:\n comparison = filecmp.cmp(self.path.__add__(i), self.path.__add__(name), shallow=False)\n if comparison:\n result.append(i)\n return result",
"def _find_changes(self):\n added = set()\n modified = set()\n existing_files = set()\n for dirpath_str, _, filenames in walk(str(self.path)):\n dirpath = Path(dirpath_str)\n for filename in filenames:\n if filename == DB_FILENAME:\n continue\n abs_filename = (dirpath / filename).absolute()\n if abs_filename in self.entries:\n entry = self.entries[abs_filename]\n existing_files.add(entry)\n st = lstat(str(abs_filename))\n if entry != st:\n modified.add(entry)\n else:\n try:\n entry = HashEntry(abs_filename)\n entry.update_attrs()\n added.add(entry)\n except FileNotFoundError:\n # If file was removed between listing and processing,\n # just treat it as if it never existed\n # We have nothing to compare it to anyway\n pass\n removed = set(self.entries.values()) - existing_files\n return added, removed, modified",
"def diffInLocalFiles():\n\taddedFiles = listdir(globals.LOCAL_SHARED_FILE_SPACE)\t#aka current files\n\tremovedFiles = globals.LOCAL_FILE_LIST\t\t\t\t\t#aka previously recorded files\n\t#TODO: this can be a lot more efficient\n\t\n\t#record files that appear in both lists\n\tcommonFiles = []\n\tfor file in removedFiles:\n\t\tif file in addedFiles:\n\t\t\tcommonFiles.append(file)\n\t\t\t\n\t#remove files that appear in both lists\n\tfor file in commonFiles:\n\t\taddedFiles.remove(file)\n\t\tremovedFiles.remove(file)\n\t\t\n\t#The files remaining in the respective list were either recently added or removed\n\tmessages = []\n\tfor file in removedFiles:\n\t\tmessages.append((globals.REMOVE_FILE, file))\t#these files not longer exist\n\tfor file in addedFiles:\n\t\tmessages.append((globals.ADD_FILE, file))\t\t#these files have been recently added\n\n\t#redefine list of local files\n\tglobals.LOCAL_FILE_LIST = listdir(globals.LOCAL_SHARED_FILE_SPACE)\n\treturn messages",
"def compare_files(self):\n\n first_backup_ids = set(self.first_source_data.keys())\n second_backup_ids = set(self.second_source_data.keys())\n\n for deleted_user_id in first_backup_ids.difference(second_backup_ids):\n self.changes[Constants.DELETED_USER]\\\n .append({\n Constants.ID: deleted_user_id,\n Constants.USER_TYPE: self.first_source_data[deleted_user_id][Constants.USER_TYPE]\n })\n\n for added_user_id in second_backup_ids.difference(first_backup_ids):\n self.changes[Constants.ADDED_USER]\\\n .append({\n Constants.ID: added_user_id,\n Constants.USER_TYPE: self.second_source_data[added_user_id][Constants.USER_TYPE]\n })\n\n for id in first_backup_ids.intersection(second_backup_ids):\n self.compare_id(id)",
"def generate_test_file_list(self):\n if self.test_list_checked:\n return self.test_list_cached\n\n final_test_list = self.__generate_test_file_list()\n\n self.test_list_cached = final_test_list\n self.test_list_checked = True\n return self.test_list_cached",
"def updateDiskFileList(self):\n\n if self.m_curPath:\n # Get me just the files please.\n for _, _, files in os.walk(self.m_curPath):\n break\n else:\n files = []\n\n files.sort()\n if files != self.m_diskNames:\n self.m_diskNames[:] = files\n self.m_newNames[:] = []\n\n self.populateFileList()",
"def load_folders(self):\n # Trying to store the folder hierarchy in the name...\n self.folder0 = Folder.objects.get(id=1)\n self.folder00 = Folder.objects.get(id=2)\n self.folder000 = Folder.objects.get(id=3)\n self.folder0000 = Folder.objects.get(id=4)",
"def test_filecompare(self):\n cmp = filecmp.dircmp(self.root_gold, self.root_target, ignore=[])\n self.recursive_dircmp(cmp)",
"def get_folder_list(self, folders: List[str]) -> List[str]:\n if self.current_folder != Path(self.current_folder.parts[0]):\n return [self.PARENT, *(sorted(folders))]\n return sorted(folders)",
"def check_appropriate_dirs(self, dirs):\n\n add_up = []\n\n for d in dirs:\n path = join(self.base_dir, d)\n files = [f for f in listdir(path) if isfile(join(path, f))]\n rcts = [f for f in files if f.startswith(self.reactant_pre) and f.endswith(\".mol\")]\n pros = [f for f in files if f.startswith(self.product_pre) and f.endswith(\".mol\")]\n\n rct_mols = [get_molecule(join(self.base_dir, d, r)) for r in rcts]\n pro_mols = [get_molecule(join(self.base_dir, d, p)) for p in pros]\n\n total_pro_length = sum([len(p) for p in pro_mols])\n total_rct_length = sum([len(r) for r in rct_mols])\n\n if total_pro_length == total_rct_length:\n add_up.append(d)\n\n return add_up",
"def previous_saves(self):\n if os.path.exists(self.results_dir):\n return sorted([x for x in Path(self.results_dir).glob(f'{self.model_name}checkpoint_*.pk')], key=lambda s: int(s.name.replace(f'{self.model_name}checkpoint_', '').replace('.pk', '')))\n else:\n return []",
"def test_list_directory(self):\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1/tmp/study\".format(self.search.instance))]\n self.assertEqual(result, expected)",
"def test_list_kml_returns_kmls(self):\n with self.settings(MIDDLEWARE_CLASSES=self.fix_middleware(), KML_FILE_DIR=self.kml_file_dir):\n user = StaffUserFactory()\n ldv = self.initiate_view(user)\n base_path = settings.KML_FILE_DIR\n ldv.cache_dir_content(base_path)\n dirs = ldv.list_files()\n self.assertGreaterEqual(len(dirs), 1)\n for dir_name in dirs:\n dir_path = os.path.join(base_path, dir_name)\n self.assertTrue(os.path.isfile(dir_path))",
"def test_sharedstatedir(self):\n self.chck_triple('sharedstatedir')",
"def _set_dirs(self, datafolder):\n self.List_of_dir = []\n self.List_of_files = dict()\n folders = os.listdir(datafolder)\n folders.sort()\n for i in folders:\n if os.path.isdir(os.path.join(datafolder,i)) and i != '.ipynb_checkpoints': # ignore .ipynb_checkpoints, allowing the generator to work in Amazon\n self.List_of_dir.append(os.path.join(datafolder,i))\n self.List_of_files[os.path.join(datafolder,i)]=[]\n for file in os.listdir(os.path.join(datafolder, i, 'Input')):\n if file.split('.')[-1] == 'hdf5':\n self.List_of_files[os.path.join(datafolder,i)].append(file.split('.')[-2])\n self._nb_dir = len(self.List_of_dir)",
"def test_equality_function(self):\r\n self.assertFalse(directories_equal(self.version1_nodrafts, self.version0_nodrafts))\r\n self.assertFalse(directories_equal(self.version1_drafts_extra_branch, self.version1_drafts))",
"def find_benchmark_directories(self):\n for (benchmark, producer), result in self.results.items():\n pattern = benchmark + '_' + producer + '*'\n files = find_directory(pattern, self.root_folder)\n if files:\n # add just the latest one\n sorted_files = sorted(files)\n result.directory = sorted_files[-1]\n else:\n print('No benchmark directories found in ' + self.root_folder)"
] |
[
"0.6123963",
"0.6063844",
"0.6025782",
"0.6014728",
"0.5925235",
"0.5914503",
"0.5839932",
"0.5811035",
"0.57636356",
"0.57502395",
"0.5744658",
"0.5716511",
"0.56534886",
"0.5638854",
"0.56171244",
"0.5615707",
"0.5542048",
"0.5539833",
"0.55364084",
"0.55273443",
"0.55254096",
"0.5517578",
"0.5479084",
"0.5475699",
"0.5464317",
"0.5460631",
"0.54539317",
"0.5449862",
"0.5442013",
"0.54359597"
] |
0.6400781
|
0
|
Compares saved files data with the current files data
|
def get_files_diff(self, current_files_data):
if self.saved_files_data:
saved_files_paths, saved_files_hashes = zip(*self.saved_files_data.items())
else:
saved_files_paths, saved_files_hashes = [], []
if current_files_data:
current_files_paths, current_files_hashes = zip(*current_files_data.items())
else:
current_files_paths, current_files_hashes = [], []
missing_files_paths = list(set(saved_files_paths).difference(set(current_files_paths)))
missing_files_hashes = [self.saved_files_data[path] for path in missing_files_paths]
added_files_paths = list(set(current_files_paths).difference(set(saved_files_paths)))
added_files_hashes = [current_files_data[path] for path in added_files_paths]
# get moved files paths
moved_files_hashes = list(set(missing_files_hashes).intersection(set(added_files_hashes)))
moved_files_paths = [
json.dumps({
'from': self.get_file_path_by_hash(self.saved_files_data, hash_),
'to': self.get_file_path_by_hash(current_files_data, hash_)
}) for hash_ in moved_files_hashes
]
# get missing files paths
missing_files_paths = [ # remove "moved" files paths
self.get_file_path_by_hash(self.saved_files_data, hash_)
for hash_ in missing_files_hashes if hash_ not in moved_files_hashes
]
# get added files paths
added_files_paths = [ # remove "moved" files paths
self.get_file_path_by_hash(current_files_data, hash_)
for hash_ in added_files_hashes if hash_ not in moved_files_hashes
]
# get edited files paths
remained_files_paths = list(set(saved_files_paths).intersection(set(current_files_paths)))
for file_path in remained_files_paths:
if self.saved_files_data[file_path] != current_files_data[file_path]: # compare hashes
missing_files_paths.append(file_path)
added_files_paths.append(file_path)
if any([missing_files_paths, added_files_paths, moved_files_paths]):
self.saved_files_data = current_files_data
return missing_files_paths, added_files_paths, moved_files_paths
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compare_files(self):\n\n first_backup_ids = set(self.first_source_data.keys())\n second_backup_ids = set(self.second_source_data.keys())\n\n for deleted_user_id in first_backup_ids.difference(second_backup_ids):\n self.changes[Constants.DELETED_USER]\\\n .append({\n Constants.ID: deleted_user_id,\n Constants.USER_TYPE: self.first_source_data[deleted_user_id][Constants.USER_TYPE]\n })\n\n for added_user_id in second_backup_ids.difference(first_backup_ids):\n self.changes[Constants.ADDED_USER]\\\n .append({\n Constants.ID: added_user_id,\n Constants.USER_TYPE: self.second_source_data[added_user_id][Constants.USER_TYPE]\n })\n\n for id in first_backup_ids.intersection(second_backup_ids):\n self.compare_id(id)",
"def _compare_files(self, first_file, second_file):\n\n self.log.info('-' * 80)\n self.log.info('Compare files')\n\n code, out = cmd_exec(['cmp', str(first_file), str(second_file)], shell=False, log=self.log)\n if code:\n self.log.warning('md5 checksum IS NOT SAME with ffmpeg sw decode')\n self.log.warning(out)\n return False\n\n self.log.info('md5 checksum IS SAME with ffmpeg sw decode')\n return True",
"def test_verify_compare_data(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n\n # Test verify for the file with --compare-data\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable',\n options=[u\"--compare-data\"])",
"def load_files_to_compare(self):\n self.first_source_data = load_path(self.path1)\n self.second_source_data = load_path(self.path2)",
"def diff_files(self):\n pdup = []\n # Print out files that are only found in the DB\n if self.comparison_info['dbonly']:\n print(\"Files only found in the database --------- \")\n for fname in sorted(self.comparison_info['dbonly']):\n fdb = self.files_from_db[fname]\n print(f\"\\t{fdb['path']}/{fname}\")\n\n # print out files that are only found on disk\n if self.comparison_info['diskonly']:\n print(\"\\nFiles only found on disk --------- \")\n for fname in sorted(self.comparison_info['diskonly']):\n addon = \"\"\n if fname in self.duplicates:\n addon = \" *\"\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fdisk['relpath']}/{fname}{addon}\")\n if self.comparison_info['pathdup']:\n print(\"\\n The following files had multiple paths on disk (path filesize):\")\n listing = {}\n for fname in self.comparison_info['pathdup']:\n pdup.append(fname)\n listing[self.comparison_info['pathdup']['relpath']] = self.comparison_info['pathdup']['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if fname in self.files_from_db and self.files_from_db[fname]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{fname} {listing[pth]:d}{addon}\")\n\n # Print files that have different paths on disk and in the DB\n if self.comparison_info['path']:\n print(\"\\nPath mismatch (file name, db path, disk path) --------- \")\n for fname in sorted(self.comparison_info['path']):\n addon = \"\"\n if fname in self.duplicates:\n addon = \" *\"\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname}\\t{fdb['path']}\\t{fdisk['relpath']}{addon}\")\n if self.comparison_info['duplicates']:\n print(\" The following files have multiple disk paths on disk (path filesize):\")\n for fname in self.comparison_info['duplicates']:\n pdup.append(fname)\n listing[self.comparison_info['duplicates']['relpath']] = self.comparison_info['duplicates']['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if fname in self.files_from_db and self.files_from_db[fname]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{fname} {listing[pth]:d}{addon}\")\n\n # Print files that have different file sizes on disk and in the DB\n if self.comparison_info['filesize']:\n print(\"\\nFilesize mismatch (File name, size in DB, size on disk) --------- \")\n for fname in sorted(self.comparison_info['filesize']):\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname} {fdb['filesize']} {fdisk['filesize']}\")\n\n # Print files that have different md5sum on disk and in DB\n if self.md5sum and 'md5sum' in self.comparison_info and self.comparison_info['md5sum']:\n print(\"\\nmd5sum mismatch (File name, sum in DB, sum on disk) --------- \")\n for fname in sorted(self.comparison_info['md5sum']):\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname} {fdb['md5sum']} {fdisk['md5sum']}\")\n\n # Print out files that have multiple paths on disk\n if len(self.duplicates) > len(pdup):\n print(\"\\nThe following files have multiple disk paths on disk (path filesize):\")\n for dup in sorted(self.duplicates):\n if dup not in pdup:\n listing = {}\n for fls in self.duplicates[dup]:\n listing[fls['relpath']] = fls['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if dup in self.files_from_db and self.files_from_db[dup]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{dup} {listing[pth]:d}{addon}\")\n\n # Print out files that have multiple endtries in the DB\n if self.db_duplicates:\n print(\"\\nThe following files have multiple entries in the database (path filesize):\")\n for dup in sorted(self.db_duplicates):\n listing = {}\n for fls in self.db_duplicates[dup]:\n listing[fls['relpath']] = fls['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if dup in self.files_from_disk and self.files_from_disk[dup]['path'] == pth:\n addon = \" (Disk Match)\"\n print(f\" {start} {pth}/{dup} {listing[pth]:d}{addon}\")",
"def compare(self):\n self.success = True\n\n # evaluate if comparison should be made\n if not self.make_comparison:\n return\n\n msgall = \"\"\n msg = sfmt.format(\"Comparison test\", self.name)\n print(msg)\n\n if self.action is not None:\n cpth = os.path.join(self.simpath, self.action)\n files_cmp = None\n if self.action.lower() == \"compare\":\n files_cmp = []\n files = os.listdir(cpth)\n for file in files:\n files_cmp.append(file)\n elif \"mf6\" in self.action:\n fpth = os.path.join(cpth, \"mfsim.nam\")\n cinp, self.coutp = get_mf6_files(fpth)\n\n head_extensions = (\n \"hds\",\n \"hed\",\n \"bhd\",\n \"ahd\",\n \"bin\",\n )\n if \"mf6_regression\" in self.action:\n success, msgall = self._compare_heads(\n msgall,\n extensions=head_extensions,\n )\n if not success:\n self.success = False\n # non-regression runs - for new features\n else:\n files1 = []\n files2 = []\n exfiles = []\n ipos = 0\n for file1 in self.outp:\n ext = os.path.splitext(file1)[1][1:]\n\n if ext.lower() in head_extensions:\n\n # simulation file\n pth = os.path.join(self.simpath, file1)\n files1.append(pth)\n\n # look for an exclusion file\n pth = os.path.join(self.simpath, file1 + \".ex\")\n if os.path.isfile(pth):\n exfiles.append(pth)\n else:\n exfiles.append(None)\n\n # Check to see if there is a corresponding compare file\n if files_cmp is not None:\n\n if file1 + \".cmp\" in files_cmp:\n # compare file\n idx = files_cmp.index(file1 + \".cmp\")\n pth = os.path.join(cpth, files_cmp[idx])\n files2.append(pth)\n txt = sfmt.format(\n f\"Comparison file {ipos + 1}\",\n os.path.basename(pth),\n )\n print(txt)\n else:\n if self.coutp is not None:\n for file2 in self.coutp:\n ext = os.path.splitext(file2)[1][1:]\n\n if ext.lower() in head_extensions:\n # simulation file\n pth = os.path.join(cpth, file2)\n files2.append(pth)\n\n else:\n files2.append(None)\n\n if self.nam_cmp is None:\n pth = None\n else:\n pth = os.path.join(cpth, self.nam_cmp)\n\n for ipos in range(len(files1)):\n file1 = files1[ipos]\n ext = os.path.splitext(file1)[1][1:].lower()\n outfile = os.path.splitext(os.path.basename(file1))[0]\n outfile = os.path.join(\n self.simpath, outfile + \".\" + ext + \".cmp.out\"\n )\n if files2 is None:\n file2 = None\n else:\n file2 = files2[ipos]\n\n # set exfile\n exfile = None\n if file2 is None:\n if len(exfiles) > 0:\n exfile = exfiles[ipos]\n if exfile is not None:\n txt = sfmt.format(\n f\"Exclusion file {ipos + 1}\",\n os.path.basename(exfile),\n )\n print(txt)\n\n # make comparison\n success_tst = compare_heads(\n None,\n pth,\n precision=\"double\",\n text=extdict[ext],\n outfile=outfile,\n files1=file1,\n files2=file2,\n htol=self.htol,\n difftol=True,\n # Change to true to have list of all nodes exceeding htol\n verbose=self.cmp_verbose,\n exfile=exfile,\n )\n msg = sfmt.format(\n f\"{extdict[ext]} comparison {ipos + 1}\",\n self.name,\n )\n print(msg)\n\n if not success_tst:\n self.success = False\n msgall += msg + \" ... FAILED\\n\"\n\n # compare concentrations\n if \"mf6_regression\" in self.action:\n success, msgall = self._compare_concentrations(msgall)\n if not success:\n self.success = False\n\n # compare cbc files\n if \"mf6_regression\" in self.action:\n cbc_extensions = (\n \"cbc\",\n \"bud\",\n )\n success, msgall = self._compare_budgets(\n msgall, extensions=cbc_extensions\n )\n if not success:\n self.success = False\n\n assert self.success, msgall\n return",
"def cmp(f1, f2):\n with open(f1) as f1, open(f2) as f2:\n return f1.read() == f2.read()",
"def fileCompare(a, b):\n if a[\"file_run\"] > b[\"file_run\"]:\n return 1\n elif a[\"file_run\"] == b[\"file_run\"]:\n if a[\"file_lumi\"] > b[\"file_lumi\"]:\n return 1\n elif a[\"file_lumi\"] == b[\"file_lumi\"]:\n if a[\"file_first_event\"] > b[\"file_first_event\"]:\n return 1\n if a[\"file_first_event\"] == b[\"file_first_event\"]:\n return 0\n\n return -1",
"def load_files(file1, file2):\n\n f = open(\"results/\" + file1, \"r\")\n results = json.loads(f.read())\n f.close()\n\n f = open(\"results/\" + file2, \"r\")\n previous_results = json.loads(f.read())\n f.close()\n\n compare(results, previous_results)",
"def same_file(wavecar1, wavecar2, wavecar3):\n same = False\n if (filecmp.cmp(wavecar1, wavecar2, shallow=False)):\n print(\"Serious problem:: {} and {} are the same\".format(wavecar1, wavecar2))\n same = True\n if (filecmp.cmp(wavecar1, wavecar3, shallow=False)):\n print(\"Serious problem:: {} and {} are the same\".format(wavecar1, wavecar3))\n same = True\n if (filecmp.cmp(wavecar2, wavecar3, shallow=False)):\n print(\"Serious problem:: {} and {} are the same\".format(wavecar2, wavecar3))\n same = True\n\n if same:\n print(\"It seems that you are using same files to do finite difference, exit\")\n print(\"\\tComment the 'same_file' checker if you know what you are doing\")\n raise SystemExit",
"def compare_db(compressed=True):\r\n #wipe demand file\r\n with open(\"{}DEMAND.txt\".format(db_folder_path), \"w\", encoding=\"utf8\") as demand_file:\r\n demand_file.write(str([]))\r\n #взять все файлы, которые есть в папке с дб,\r\n #и всем сделать compare_data, по их порядку создания\r\n if compressed:\r\n _db_files = sorted(glob.iglob('{}\\\\jsons\\\\DB_*.json.gz'.format(db_folder_path)), key=os.path.getctime)\r\n else:\r\n _db_files = sorted(glob.iglob('{}\\\\jsons\\\\DB_*.json'.format(db_folder_path)), key=os.path.getctime)\r\n #create temporary lists, that will hold db data\r\n _older_vend_data = []\r\n _newer_vend_data = []\r\n #\r\n _older_buy_data = []\r\n _newer_buy_data = []\r\n #iterate through all dbs\r\n for _n in range(len(_db_files)-1):\r\n print('comparing {} and {} db out of {}'.format(_n+1, _n+2, len(_db_files)))\r\n\r\n #get data from dbs\r\n if _older_vend_data == [] and _older_buy_data == []:\r\n #take older file\r\n get_vend_data(_db_files[_n], _older_vend_data, _older_buy_data)\r\n #compare it with newer file (+1)\r\n get_vend_data(_db_files[_n+1], _newer_vend_data, _newer_buy_data)\r\n #compare 2 datas and fill demand file\r\n compare_data(_older_vend_data, _newer_vend_data)\r\n compare_data(_older_buy_data, _newer_buy_data)\r\n #saving data that is already compared, and making it count as old one\r\n _older_vend_data = _newer_vend_data\r\n _older_buy_data = _newer_buy_data\r\n #wipe new data for next cycle\r\n _newer_vend_data = []\r\n _newer_buy_data = []",
"def compare_files(file1, file2):\n return filecmp.cmp(file1, file2)",
"def __compare_files(self, filename1, filename2):\n self.assertTrue(os.path.isfile(filename1))\n self.assertTrue(os.path.isfile(filename2))\n self.assertEqual(os.path.getsize(filename1), os.path.getsize(filename2))\n with open(filename1, \"rb\") as f1:\n with open(filename2, \"rb\") as f2:\n n_blocks = int(self.args.size) // self.max_block_size\n for i in range(n_blocks):\n self.assertEqual(f1.read(self.max_block_size), \\\n f2.read(self.max_block_size))\n remaining = int(self.args.size) % self.max_block_size\n if remaining > 0:\n self.assertEqual(f1.read(remaining), \\\n f2.read(remaining))",
"def compare_predictions():\n validation_labels = np.array(pd.read_csv(val_true_labels_dir + dataset_version + 'validation_labels.csv', index_col=0))\n validation_labels = np.reshape(validation_labels, (-1))\n\n diff_between_files = []\n also1s = []\n also2s = []\n for filename1 in os.listdir(val_predictions_dir):\n if filename1.endswith(\".csv\"):\n for filename2 in os.listdir(val_predictions_dir):\n if filename2.endswith(\".csv\"):\n if filename1 < filename2:\n wrong1 = 0\n wrong2 = 0\n diff_between = 0\n also1 = 0\n also2 = 0\n diff_corr1 = 0\n diff_corr2 = 0\n f1 = np.array(pd.read_csv(val_predictions_dir + filename1, index_col=0))\n f1 = np.reshape(f1, (-1))\n f2 = np.array(pd.read_csv(val_predictions_dir + filename2, index_col=0))\n f2 = np.reshape(f2, (-1))\n for line in range(f1.shape[0]):\n if f1[line] != validation_labels[line]:\n wrong1 += 1\n if f2[line] != validation_labels[line]:\n wrong2 += 1\n if f1[line] != f2[line]:\n diff_between += 1\n if f1[line] == validation_labels[line]:\n diff_corr1 += 1\n if f2[line] == validation_labels[line]:\n diff_corr2 += 1\n if f1[line] != validation_labels[line]:\n if f2[line] != validation_labels[line]:\n also2 += 1\n if f2[line] != validation_labels[line]:\n if f1[line] != validation_labels[line]:\n also1 += 1\n\n diff_between_files.append(diff_between)\n print(filename1)\n print('Wrongly predicted by 1: ' + str(100 * wrong1 / f1.shape[0]) + '%')\n print(filename2)\n print('Wrongly predicted by 2: ' + str(100 * wrong2 / f1.shape[0]) + '%')\n print()\n print('Differences between files: ' + str(100 * diff_between / f1.shape[0]) + '%')\n print(f'\\t of which correct by 1 {100 * diff_corr1 / diff_between}%, by 2 {100 * diff_corr2 / diff_between}%')\n also1s.append(also1 / wrong2)\n also2s.append(also2 / wrong1)\n print('Wrongly predicted by other among wrong ones: ' + str(100 * also2 / wrong1) + '%, ' + str(\n 100 * also1 / wrong2) + '%\\n\\n\\n')\n\n print('Max, min and avg differences between files:')\n print(str(100 * max(diff_between_files) / validation_labels.shape[0]) + '%')\n print(str(100 * min(diff_between_files) / validation_labels.shape[0]) + '%')\n print(str(100 * np.mean(diff_between_files) / validation_labels.shape[0]) + '%')\n\n print('\\nWrongly predicted by first that were also wrongly predicted by second:')\n print('Max: ' + str(100 * max(also2s)) + '%')\n print('Min: ' + str(100 * min(also2s)) + '%')\n print('Avg: ' + str(100 * np.mean(also2s)) + '%')\n\n print('\\nWrongly predicted by second that were also wrongly predicted by first:')\n print('Max: ' + str(100 * max(also1s)) + '%')\n print('Min: ' + str(100 * min(also1s)) + '%')\n print('Avg: ' + str(100 * np.mean(also1s)) + '%')",
"def checkFiles(self): \r\n mdate_filenames_list = []\r\n mdate_filenames_tuple = {}\r\n last24 = []\r\n now = datetime.datetime.now() \r\n noise,ft = file_type.split('.')\r\n ## note can do an entry bg color stoplight thing >24 hrs = red, 12-24 hrs = yellow < 12 = green nice little if loop\r\n for f in filenames_list:\r\n if os.path.isfile(f):\r\n lastmod_date = datetime.datetime.fromtimestamp(os.path.getmtime(f))\r\n mdate_filenames_tuple = lastmod_date, f\r\n mdate_filenames_list.append(mdate_filenames_tuple)\r\n \r\n if now - lastmod_date < file_age:\r\n \r\n #print (\"{} was last modified on {:%a %b %d %Y, %H:%M:%S, %Z}. Moving to 'destinaiton' transfer folder.\".format(f, lastmod_date))\r\n last24.append(f)\r\n shutil.copy2(f, destination)\r\n xferTime=time.time()\r\n \r\n fa = str(file_age) \r\n with sqlite3.connect('fileTransfer.db') as connection:\r\n c = connection.cursor()\r\n c.execute(\"INSERT INTO tbl_lastRun(col_timestamp, col_source, col_destination, col_file_type, col_file_age) VALUES (?,?,?,?,?)\",(xferTime, source, destination, ft, hrs))\r\n connection.commit()\r\n connection.close \r\n\r\n clear(self)\r\n ask_quit(self)",
"def compare_contents(lhs, rhs):\n for filename in (lhs, rhs):\n if not os.path.exists(filename):\n return False\n\n with open(lhs, \"r\") as lhs_file, open(rhs, \"r\") as rhs_file:\n return lhs_file.read() == rhs_file.read()",
"def cmpfile(file_left, file_right):\n nobv.visual_comparefile(file_left, file_right)",
"def compare_prediction(self, answer_dir, out_dir, filenames):\n from astropy.io import ascii\n\n for fn in filenames:\n new_fn = out_dir / fn\n old_fn = answer_dir / fn\n new_data = ascii.read(new_fn).as_array()\n old_data = ascii.read(old_fn).as_array()\n # Compare test run data to gold standard. Since we're loading from\n # ASCII text files here, floating-point comparisons will be different\n # at machine precision, others will be exact.\n for k, dt in new_data.dtype.descr:\n if \"f\" in dt:\n exception_catcher(\n assert_allclose,\n new_data[k],\n old_data[k],\n f\"Prediction arrays for {k}\",\n rtol=1.0e-5,\n )\n else:\n exception_catcher(\n assert_array_equal,\n new_data[k],\n old_data[k],\n f\"Prediction arrays for {k}\",\n )",
"def checkFiles(self):\n if self.user[\"Save\"] != \"\":\n self.of_exist = os.path.exists(os.path.join(self.user[\"Save\"], \"Of\"))\n self.back_of_exist = os.path.exists(\n os.path.join(self.user[\"Save\"], \"Back_Of\")\n )\n self.img_exist = os.path.exists(os.path.join(self.user[\"Save\"], \"Images\"))\n self.depth_exist = os.path.exists(os.path.join(self.user[\"Save\"], \"Depth\"))\n\n self.object_detection_dir_exist = os.path.exists(\n os.path.join(self.user[\"Save\"], \"ObjectDetection\")\n )\n\n self.gt_exist = self.user[\"GT\"] != \"\"\n\n self.create_super_pixel_label = (\n self.super_pixel_method != \"\"\n and not os.path.exists(\n os.path.join(self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method)\n )\n )\n\n self.ui.c_crash_plot_video.setEnabled(self.ui.c_crash_plot.isChecked())\n self.ui.t_low.setEnabled(not self.ui.c_optimize.isChecked())\n self.ui.t_high.setEnabled(not self.ui.c_optimize.isChecked())\n self.ui.c_optimize.setEnabled(self.gt_exist)\n self.ui.c_error_plot.setEnabled(self.gt_exist)\n self.ui.c_error_plot_video.setEnabled(self.ui.c_error_plot.isChecked())\n self.ui.c_speed_plot_video.setEnabled(self.ui.c_speed_plot.isChecked())\n self.ui.c_super_pixel_video.setEnabled(\n self.ui.combo_superpixel.currentIndex() != 0\n )\n self.ui.c_csv.setEnabled(self.ui.c_error_plot.isChecked())\n\n if self.runRequirements():\n self.ui.b_run.setEnabled(True)\n else:\n self.ui.b_run.setEnabled(False)",
"def are_files_equal(file1, file2):\n input_file_1 = open(file1, \"r\")\n input_file_2 = open(file2, \"r\")\n\n file1 = input_file_1.read()\n file2 = input_file_2.read()\n print(type(file1), file1, type(file2), file2)\n\n result =False\n if file1 == file1:\n result = True\n\n input_file_1.close()\n input_file_2.close()\n return result",
"def artist_comparisons():\n file_names = os.listdir(\"lyrics_files\")\n songs_dict = {song_file[:-8]: pickle.load(open(\"lyrics_files/\" + song_file, 'rb')) for song_file in file_names} # filenames end with _songs.p, so we use -8 to delete that\n artists = songs_dict.keys()\n output_dict = {}\n artist_pairs = []\n print \"Comparing artists\"\n for i in xrange(0, len(artists) - 1):\n for j in xrange(i + 1, len(artists)):\n artist_pairs.append((artists[i], artists[j]))\n for pair in artist_pairs:\n print pair\n output_dict[pair] = compare_artists(pair[0], pair[1], songs_dict)\n print output_dict[pair] \n pickle.dump(output_dict, open(\"artist_comparisons.p\", \"wb\"))\n print \"Pickled artist comparisons\"",
"def diff_it(self):\n data = {}\n differ = difflib.Differ()\n first = self.first.data.keys()\n first.sort()\n second = self.second.data.keys()\n second.sort()\n # Save data differently if file was measured only in first, only\n # in second or in both reports\n for item in differ.compare(first, second):\n fil = item[2:]\n mode = item[:1]\n if mode == ' ':\n data[fil] = [self.first.data[fil], self.second.data[fil]]\n elif mode == '+':\n data[fil] = [None, self.second.data[fil]]\n elif mode == '-':\n data[fil] = [self.first.data[fil], None]\n return collections.OrderedDict(sorted(data.items()))",
"def fileCmp (working, ref, compare_content=0, verbose=0):\n\tif verbose and working and ref:\n\t\tprint \"fileCmp\\n\\t working: %s\\n\\tref: %s\" % (\n\t\t\tworking.path or \"no working path\", \n\t\t\tref.path or \"no reference path\")\n\t\t\n\tflag = \"UNASSIGNED\"\n\tdebugging = 0\n\t\n\tif ref and not working:\n\t\tflag = \"missing\"\n\t\n\telif not ref: # or not os.path.exists(ref.path):\n\t\tflag = \"new\"\n\t\t\n\telif isinstance (working, JloFile):\n\t\t# print \"ref: %s\" % ref.__class__.__name__\n\t\tif debugging:\n\t\t\tif not working.equals (ref):\n\t\t\t\tprint \"working file is different\"\n\t\t\t\t\n\t\t\tif not working.newerthan (ref):\n\t\t\t\tprint \"working file has same date as ref\"\n\t\t\n\t\t\tif working.modtime == ref.modtime:\n\t\t\t\tprint \"mods dates match\"\n\t\t\telse:\n\t\t\t\t# print \"wrk: %d ref: %d\" % (working.modtime,ref.modtime)\n\t\t\t\tprint \"wrk: %s ref: %s\" % \\\n\t\t\t\t\t(working.ppDate (working.modtime),\n\t\t\t\t\t working.ppDate (ref.modtime))\n\t\t\n\t\tif compare_content:\n\t\t\tif working.equals (ref):\n\t\t\t\tflag = \"\"\n\t\t\telse:\n\t\t\t\tflag = \"modified\"\n\t\t\t\t\n\t\telse:\n\t\t\tflag = \"\"\n\n\t\t\t\n\t\t\t\n\t\t\t# elif not working.newerthan (ref):\n\t\t\t# flag = \"obsolete-check\"\n\t\t# elif working.newerthan (ref) and not working.equals (ref):\n\t\t\t# flag = \"modified\"\n\t\t# elif not working.equals (ref):\n\t\t\t# print \"not modified\"\n\t\t\t# flag = \"different\"\n\t\t# elif working.newerthan (ref):\n\t\t\t# flag = \"modified\"\n\tif verbose and working:\n\t\tprint \"%s --> %s\" % (working.name, flag)\n\treturn flag",
"def diffInLocalFiles():\n\taddedFiles = listdir(globals.LOCAL_SHARED_FILE_SPACE)\t#aka current files\n\tremovedFiles = globals.LOCAL_FILE_LIST\t\t\t\t\t#aka previously recorded files\n\t#TODO: this can be a lot more efficient\n\t\n\t#record files that appear in both lists\n\tcommonFiles = []\n\tfor file in removedFiles:\n\t\tif file in addedFiles:\n\t\t\tcommonFiles.append(file)\n\t\t\t\n\t#remove files that appear in both lists\n\tfor file in commonFiles:\n\t\taddedFiles.remove(file)\n\t\tremovedFiles.remove(file)\n\t\t\n\t#The files remaining in the respective list were either recently added or removed\n\tmessages = []\n\tfor file in removedFiles:\n\t\tmessages.append((globals.REMOVE_FILE, file))\t#these files not longer exist\n\tfor file in addedFiles:\n\t\tmessages.append((globals.ADD_FILE, file))\t\t#these files have been recently added\n\n\t#redefine list of local files\n\tglobals.LOCAL_FILE_LIST = listdir(globals.LOCAL_SHARED_FILE_SPACE)\n\treturn messages",
"def _find_changes(self):\n added = set()\n modified = set()\n existing_files = set()\n for dirpath_str, _, filenames in walk(str(self.path)):\n dirpath = Path(dirpath_str)\n for filename in filenames:\n if filename == DB_FILENAME:\n continue\n abs_filename = (dirpath / filename).absolute()\n if abs_filename in self.entries:\n entry = self.entries[abs_filename]\n existing_files.add(entry)\n st = lstat(str(abs_filename))\n if entry != st:\n modified.add(entry)\n else:\n try:\n entry = HashEntry(abs_filename)\n entry.update_attrs()\n added.add(entry)\n except FileNotFoundError:\n # If file was removed between listing and processing,\n # just treat it as if it never existed\n # We have nothing to compare it to anyway\n pass\n removed = set(self.entries.values()) - existing_files\n return added, removed, modified",
"def compareData(self) : \n logger.info('Comparing data ...')\n showAll = self.ui.showAll_checkBox.isChecked()\n selData = self.getSelectedRowData()\n\n if selData : \n assetName = selData[self.setCols.index('Asset Name')]\n rootLoc, rootExists = self.getRootLoc(assetName)\n \n if rootExists : \n abcAssetHero = str(self.ui.asset_lineEdit.text())\n abcShotHero = str(self.ui.shot_lineEdit.text())\n add = None \n remove = None\n\n if self.ui.compareCurrent_checkBox.isChecked() : \n if abcAssetHero : \n if self.ui.asset_radioButton.isChecked() : \n add, remove = sd_utils.compareLoc(rootLoc, abcAssetHero)\n\n if abcShotHero : \n if self.ui.shot_radioButton.isChecked() : \n add, remove = sd_utils.compareLoc(rootLoc, abcShotHero)\n\n else : \n add, remove = sd_utils.compareAbc(abcShotHero, abcAssetHero)\n\n self.ui.compare_listWidget.clear()\n \n if not showAll : \n if add : \n print 'add', add\n for item in add : \n self.addListWidgetItem(item, color=self.green)\n\n if remove : \n print 'remove', remove\n for item in remove : \n self.addListWidgetItem(item, color=self.red)\n\n if showAll : \n rootLocs = sd_utils.getSceneLocator(rootLoc)\n\n for item in rootLocs : \n color = [0, 0, 0]\n\n if item in remove : \n color = self.red \n\n self.addListWidgetItem(item, color=color)\n\n if add : \n for item in add : \n self.addListWidgetItem(item, color=self.green)\n\n else : \n logger.info('No set found')",
"def compare(file1, file2):\n\tfrom os.path import exists\n\tresult = False\n\t\n\tfile1 = adaptPath(file1)\n\tfile2 = adaptPath(file2)\n\t\n\t# If two files existing\n\tif exists(file1) and exists(file2):\n\t\t# If the date and size equal\n\t\tif getFileSize(file1) == getFileSize(file2):\n\t\t\ttry:\n\t\t\t\t# Read the content of first file\n\t\t\t\tcontent1 = open(file1, \"rb\").read()\n\t\t\t\ttry:\n\t\t\t\t\t# Read the content of second file\n\t\t\t\t\tcontent2 = open(file2, \"rb\").read()\n\t\t\t\t\t# If content differs\n\t\t\t\t\tif content1 == content2:\n\t\t\t\t\t\tresult = True\n\t\t\t\texcept IOError:\n\t\t\t\t\tpass\n\t\t\texcept IOError:\n\t\t\t\tpass\n\treturn result",
"def test_verify_compare_data_changed_source_file(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n\n # Edit source file\n with open('testfiles/various_file_types/executable', 'r+') as f:\n f.write('This changes a source file.')\n\n # Test verify for edited file fails with --compare-data\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable',\n options=[u\"--compare-data\"])\n except CmdError as e:\n self.assertEqual(e.exit_status, 1, str(e))\n else:\n self.fail('Expected CmdError not thrown')",
"def compare_tree(self):\n result = []\n \n pathA = os.path.join(self.testpath,'A')\n pathB = os.path.join(self.testpath,'B')\n\n filesA = [os.path.relpath(f,pathA) for f in self.tree(pathA)]\n filesB = [os.path.relpath(f,pathB) for f in self.tree(pathB)]\n\n filesAB = set(filesA).union(filesB)\n for fileAB in sorted(list(filesAB)):\n\n fileA = os.path.join(self.testpath,'A',fileAB)\n fileB = os.path.join(self.testpath,'B',fileAB)\n try:\n fileAtxt = open(fileA).read()\n except IOError:\n result.append( ('missing_inA',fileAB) )\n continue\n \n try:\n fileBtxt = open(fileB).read()\n except IOError:\n result.append( ('missing_inB',fileAB) )\n continue\n\n if not fileAtxt == fileBtxt:\n result.append( ('disagree',fileAB))\n \n return result",
"def do_comparex(self, str_arg):\n arg = validateString(str_arg)\n file1, fileset = arg.split(' ', 1)\n if len(fileset) == 0:\n self.resultFlag = False\n raise ValueError('Bad parameter. Please check your script.')\n if not os.path.isfile(file1):\n self.resultFlag = False\n raise ValueError(file1 + ' not exist, Please check your script.')\n # f_list=[pp1 for pp1 in fileset.split(' ') if pp1!='']\n for fn in fileset.split(' '):\n # print file1, f2\n if not os.path.isfile(fn):\n self.resultFlag = False\n raise ValueError(fn + ' not exist, Please check your script.')\n if self.__compareImage(file1, fn):\n self.resultFlag = True\n print('[Found match. %s and %s are identical.]' % (file1, fn))\n return\n print('[No match found.]')\n self.resultFlag = False"
] |
[
"0.68501765",
"0.66970176",
"0.6690322",
"0.64321995",
"0.6393277",
"0.6354407",
"0.6287291",
"0.62570655",
"0.6238033",
"0.6206817",
"0.61869234",
"0.61782014",
"0.6162856",
"0.6142978",
"0.61206084",
"0.6094404",
"0.6058897",
"0.60207516",
"0.60071945",
"0.5990195",
"0.5985952",
"0.59697634",
"0.5938081",
"0.5918018",
"0.59043753",
"0.5903485",
"0.58962864",
"0.5889646",
"0.5878108",
"0.58691955"
] |
0.67130214
|
1
|
Infinite checker to determine any files and folders changes
|
def check_folder_state(self):
while self:
diff = self.get_diff()
print(diff or 'No changes detected')
if diff:
self.parent.send_diff_data(diff)
time.sleep(1)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def detect_changed_files(self) -> list[Path]:\n repos = [(self.open_repo(), self.git_directory)]\n # Check server and api dirs too\n # Normally these are ignored but we need to check these\n if (server_repo_path := Path(self.git_directory, \"Paper-Server\")).exists():\n repos.append((pygit2.Repository(str(server_repo_path)), server_repo_path))\n if (api_repo_path := Path(self.git_directory, \"Paper-API\")).exists():\n repos.append((pygit2.Repository(str(api_repo_path)), api_repo_path))\n changed = []\n for repo, repo_path in repos:\n changed.extend(p.relative_to(self.git_directory) for p in detect_changed_files(repo, repo_path))\n changed.sort()\n return changed",
"def files_unchanged(self):\n\n passed = []\n failed = []\n ignored = []\n fixed = []\n could_fix = False\n\n # Check that we have the minimum required config\n required_pipeline_config = {\"manifest.name\", \"manifest.description\", \"manifest.author\"}\n missing_pipeline_config = required_pipeline_config.difference(self.nf_config)\n if missing_pipeline_config:\n return {\"ignored\": [f\"Required pipeline config not found - {missing_pipeline_config}\"]}\n try:\n prefix, short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n except ValueError:\n log.warning(\n \"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is <pipeline> and default to repo 'nf-core'\"\n )\n short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\")\n prefix = \"nf-core\"\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n files_exact = [\n [\".gitattributes\"],\n [\".prettierrc.yml\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"LICENSE\", \"LICENSE.md\", \"LICENCE\", \"LICENCE.md\"], # NB: British / American spelling\n [os.path.join(\".github\", \".dockstore.yml\")],\n [os.path.join(\".github\", \"CONTRIBUTING.md\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"config.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.yml\")],\n [os.path.join(\".github\", \"PULL_REQUEST_TEMPLATE.md\")],\n [os.path.join(\".github\", \"workflows\", \"branch.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting_comment.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting.yml\")],\n [os.path.join(\"assets\", \"email_template.html\")],\n [os.path.join(\"assets\", \"email_template.txt\")],\n [os.path.join(\"assets\", \"sendmail_template.txt\")],\n [os.path.join(\"assets\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_dark.png\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"lib\", \"nfcore_external_java_deps.jar\")],\n [os.path.join(\"lib\", \"NfcoreTemplate.groovy\")],\n ]\n files_partial = [\n [\".gitignore\", \".prettierignore\", \"pyproject.toml\"],\n ]\n\n # Only show error messages from pipeline creation\n logging.getLogger(\"nf_core.create\").setLevel(logging.ERROR)\n\n # Generate a new pipeline with nf-core create that we can compare to\n tmp_dir = tempfile.mkdtemp()\n\n # Create a template.yaml file for the pipeline creation\n template_yaml = {\n \"name\": short_name,\n \"description\": self.nf_config[\"manifest.description\"].strip(\"\\\"'\"),\n \"author\": self.nf_config[\"manifest.author\"].strip(\"\\\"'\"),\n \"prefix\": prefix,\n }\n\n template_yaml_path = os.path.join(tmp_dir, \"template.yaml\")\n with open(template_yaml_path, \"w\") as fh:\n yaml.dump(template_yaml, fh, default_flow_style=False)\n\n test_pipeline_dir = os.path.join(tmp_dir, f\"{prefix}-{short_name}\")\n create_obj = nf_core.create.PipelineCreate(\n None, None, None, no_git=True, outdir=test_pipeline_dir, template_yaml_path=template_yaml_path\n )\n create_obj.init_pipeline()\n\n # Helper functions for file paths\n def _pf(file_path):\n \"\"\"Helper function - get file path for pipeline file\"\"\"\n return os.path.join(self.wf_path, file_path)\n\n def _tf(file_path):\n \"\"\"Helper function - get file path for template file\"\"\"\n return os.path.join(test_pipeline_dir, file_path)\n\n # Files that must be completely unchanged from template\n for files in files_exact:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file has an identical match\n else:\n for f in files:\n try:\n if filecmp.cmp(_pf(f), _tf(f), shallow=True):\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n shutil.copy(_tf(f), _pf(f))\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # Files that can be added to, but that must contain the template contents\n for files in files_partial:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file contains the template file contents\n else:\n for f in files:\n try:\n with open(_pf(f), \"r\") as fh:\n pipeline_file = fh.read()\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n if template_file in pipeline_file:\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n with open(_pf(f), \"w\") as fh:\n fh.write(template_file)\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # cleaning up temporary dir\n shutil.rmtree(tmp_dir)\n\n return {\"passed\": passed, \"failed\": failed, \"ignored\": ignored, \"fixed\": fixed, \"could_fix\": could_fix}",
"def test_scan_dir_files(self):\n self.run_scan(self.subdir, self.nest_fcount + 1)",
"def checkGit(directory):",
"def should_watch_dir(self, entry):\n return True",
"def test_filecompare(self):\n cmp = filecmp.dircmp(self.root_gold, self.root_target, ignore=[])\n self.recursive_dircmp(cmp)",
"def _find_changes(self):\n added = set()\n modified = set()\n existing_files = set()\n for dirpath_str, _, filenames in walk(str(self.path)):\n dirpath = Path(dirpath_str)\n for filename in filenames:\n if filename == DB_FILENAME:\n continue\n abs_filename = (dirpath / filename).absolute()\n if abs_filename in self.entries:\n entry = self.entries[abs_filename]\n existing_files.add(entry)\n st = lstat(str(abs_filename))\n if entry != st:\n modified.add(entry)\n else:\n try:\n entry = HashEntry(abs_filename)\n entry.update_attrs()\n added.add(entry)\n except FileNotFoundError:\n # If file was removed between listing and processing,\n # just treat it as if it never existed\n # We have nothing to compare it to anyway\n pass\n removed = set(self.entries.values()) - existing_files\n return added, removed, modified",
"def _findChangedFiles(self):\n changedFiles = []\n # calculate and update checksums always for ALL files\n for observedFile in self.observedFiles:\n if os.path.isfile(observedFile.filePath):\n currentChecksum = checksumFile(observedFile.filePath)\n else:\n currentChecksum = None\n # different values with None value checking\n if ((observedFile.lastChecksum is None\n and currentChecksum is not None)\n or observedFile.lastChecksum != currentChecksum):\n changedFiles.append(observedFile) # notify change\n observedFile.lastChecksum = currentChecksum # update checksum\n\n return changedFiles",
"def check(self):\n self.logger.info(\"Performing check ... (database file: '%s')\" % self.config.database)\n # read the database file\n try:\n f = open(self.config.database)\n data = json.load(f)\n f.close()\n except Exception, ex:\n self.logger.error(\"Could not read database file, reason: %s\" % ex)\n return 1\n # perform actual check against the database file\n # data: {file_path: {last_modif: <value>, last_modif_human: <value>}}\n for file_name, values in data.items():\n try:\n dt = os.path.getmtime(file_name)\n if dt != values[\"last_modif\"]:\n self._modified.append(file_name)\n except OSError:\n self._removed.append(file_name)\n # check actual files in the directory tree - check for newly\n # added files\n # get files currently in the directory - returns full file paths\n curr_file_names = helpers.get_files(path=self.config.watched_dir,\n file_mask=self.config.watch_masks,\n recursive=True)\n for file_name in curr_file_names:\n if file_name in self.config.ignore_list:\n continue\n encoded_file_name = unicode(file_name, \"utf-8\")\n if encoded_file_name not in data.keys():\n self._added.append(file_name)\n self.summarize()\n return 0",
"def file_changes(self):\n new = []\n changed = []\n deleted = []\n parent = self.parent_tag\n # Loop through the files and find the ones that have changed\n for relative_path, file_dict in self.checksum[\"files\"].items():\n if relative_path not in parent[\"files\"]:\n new.append(relative_path)\n elif file_dict[\"checksum\"] != parent[\"files\"][relative_path][\"checksum\"]:\n changed.append(relative_path)\n # Loop through the parent files and see which files have been deleted\n for relative_path in parent[\"files\"].keys():\n if relative_path not in self.checksum[\"files\"]:\n deleted.append(relative_path)\n return {\"new\": new, \"changed\": changed, \"deleted\": deleted}",
"def _watchFolder(self):\n wm = pyinotify.WatchManager()\n wm.add_watch(self.gdocs_folder, pyinotify.IN_MODIFY, rec=True)\n \n handler = EventHandler(self)\n notifier = pyinotify.Notifier(wm, handler)\n \n print 'waiting for changes . . .'\n notifier.loop()",
"def verify_entire_folder_against_full_history_subcommand(root_path, verbose, ignore_list=None, ignore_spec_file=None):\n # command formerly known as \"check\"\n logger.verbose_logging = verbose\n\n if not os.path.isabs(root_path):\n root_path = os.path.join(os.getcwd(), root_path)\n\n logger.verbose(f'check folder at path: {root_path}')\n\n existing_history = MHLHistory.load_from_path(root_path)\n\n if len(existing_history.hash_lists) == 0:\n raise errors.NoMHLHistoryException(root_path)\n\n # we collect all paths we expect to find first and remove every path that we actually found while\n # traversing the file system, so this set will at the end contain the file paths not found in the file system\n not_found_paths = existing_history.set_of_file_paths()\n\n num_failed_verifications = 0\n num_new_files = 0\n\n ignore_spec = ignore.MHLIgnoreSpec(existing_history.latest_ignore_patterns(), ignore_list, ignore_spec_file)\n\n for folder_path, children in post_order_lexicographic(root_path, ignore_spec.get_path_spec()):\n for item_name, is_dir in children:\n file_path = os.path.join(folder_path, item_name)\n not_found_paths.discard(file_path)\n relative_path = existing_history.get_relative_file_path(file_path)\n history, history_relative_path = existing_history.find_history_for_path(relative_path)\n if is_dir:\n # TODO: find new directories here\n continue\n\n # check if there is an existing hash in the other generations and verify\n original_hash_entry = history.find_original_hash_entry_for_path(history_relative_path)\n\n # in case there is no original hash entry continue\n if original_hash_entry is None:\n logger.error(f'found new file {relative_path}')\n num_new_files += 1\n continue\n\n # create a new hash and compare it against the original hash entry\n current_hash = create_filehash(original_hash_entry.hash_format, file_path)\n if original_hash_entry.hash_string == current_hash:\n logger.verbose(f'verification of file {relative_path}: OK')\n else:\n logger.error(f'ERROR: hash mismatch for {relative_path} '\n f'old {original_hash_entry.hash_format}: {original_hash_entry.hash_string}, '\n f'new {original_hash_entry.hash_format}: {current_hash}')\n num_failed_verifications += 1\n\n exception = test_for_missing_files(not_found_paths, root_path, ignore_spec)\n if num_new_files > 0:\n exception = errors.NewFilesFoundException()\n if num_failed_verifications > 0:\n exception = errors.VerificationFailedException()\n\n if exception:\n raise exception",
"async def start_watching_roots(self):\n db.clear_visits(self.db_conn)\n for root in self.config.roots:\n await self.watch_tree(root)\n\n for path in db.get_unvisited_files(self.db_conn):\n print(path)\n await self.process_change(path, None)",
"def check_consistent(root):\n\n if path.islink(root) or not path.isdir(root):\n raise Check(\"projects root %s is not a directory\" % root)\n\n dot_exile = path.join(root, '.exile')\n if not path.lexists(dot_exile):\n raise Check(\"projects root %s has no .exile\" % root)\n if path.islink(dot_exile) or not path.isdir(dot_exile):\n raise Check(\".exile in projects root %s is not a directory\" % root)\n\n def check_link_target(project_name, folder_name):\n target = path.join(dot_exile, project_name, folder_name)\n if not path.lexists(target):\n raise Check(\"archived folder %s does not exist\" % target)\n if path.islink(target):\n raise Check(\"archived folder %s is itself a symlink\" % target)\n if not path.isdir(target):\n raise Check(\"archived folder %s is not a directory\" % target)\n \n def check_link_source(project_name, folder_name):\n target = path.join(dot_exile, project_name, folder_name)\n source = path.join(root, project_name, folder_name)\n if not path.islink(source):\n raise Check(\"source %s is not a symlink\" % source)\n if os.readlink(source) != target:\n msg = \"source %s does not point to target %s\"\n raise Check(msg % (source, target))\n \n for project_name in os.listdir(dot_exile):\n full_archived_project_path = path.join(dot_exile, project_name)\n folders = os.listdir(full_archived_project_path)\n if not folders:\n msg = \".exile project directory %s has no folders\"\n raise Check(msg % full_archived_project_path)\n for folder_name in folders:\n check_link_target(project_name, folder_name)\n check_link_source(project_name, folder_name)",
"def file_checker():\n\n PATH_RELEASE1_IDEN = os.getcwd()+'/archive_all_2014-10/'\n PATH_RELEASE1_UNIDE = None\n #PATH_RELEASE1_UNIDE = os.getcwd()+'/archive_all_2014-10/'\n\n PATH_RELEASE2_IDEN = os.getcwd()+'/archive_all_2016-10/archive_identified_2016-10/'\n PATH_RELEASE2_UNIDE = os.getcwd() + '/archive_all_2016-10/archive_unidentified_2016-10/'\n\n\n #From here don't change anything.\n #This global function finds the .mgf files in paths\n list_of_files_release1_ide = glob.glob(PATH_RELEASE1_IDEN+'*.mgf')\n list_of_files_release1_unide = None #REMOVE THIS PART AND UNCOMMENT NEXT LINE IN NEXT RELEASES.\n\n #list_of_files_release1_unid = glob.glob(PATH_RELEASE1_UNID'+*.mgf')\n\n list_of_files_release2_ide = glob.glob(PATH_RELEASE2_IDEN+'*.mgf')\n list_of_files_release2_unide = glob.glob(PATH_RELEASE2_UNIDE+'*.mgf')\n\n\n #Check if exist cache folder. If not will make it. \n #RELEASE 1 \n if not os.path.exists(PATH_RELEASE1_IDEN+'cache'):\n os.makedirs(PATH_RELEASE1_IDEN+'cache')\n\n # if not os.path.exists(PATH_RELEASE1_UNIDE'+cache'):\n # os.makedirs(PATH_RELEASE1_UNIDE'+cache')\n\n #RELEASE2\n if not os.path.exists(PATH_RELEASE2_IDEN+'cache'):\n os.makedirs(PATH_RELEASE2_IDEN+'cache')\n\n if not os.path.exists(PATH_RELEASE2_UNIDE+'cache'):\n os.makedirs(PATH_RELEASE2_UNIDE+'cache')\n \n\n return PATH_RELEASE1_IDEN, \\\n PATH_RELEASE2_IDEN, \\\n PATH_RELEASE2_UNIDE, \\\n list_of_files_release1_ide, \\\n list_of_files_release2_ide, \\\n list_of_files_release2_unide",
"def pass1(self, verbose):\n \n for root, dirs, files in os.walk(self.dir_to_check, topdown=False):\n t_size = 0\n for f in files:\n new_f = os.path.join(root,f) #complete path in case of homonyms\n size = os.path.getsize(new_f)\n t_size += size\n self.cache[new_f] = HumanReadableSize(size)\n t_size += sum ([self.cache[os.path.join(root,d)].val for d in dirs])\n self.cache[root] = HumanReadableSize(t_size)\n if verbose:\n print ('.................... Computing size of {}!'.format(root))\n \n #print (self.cache) #debugging",
"def check_unstaged_changes(self):\n pass",
"def _on_watch_changes(self, *changes):\n self.dirty = self._git.is_dirty()\n if self._watcher:\n for change in self._watcher.changes:\n for tracker in self._trackers:\n tracked_path = Path(self._git.working_dir) / change[\"path\"]\n if tracker.path.resolve() == tracked_path.resolve():\n tracker._on_file_change(None)\n return [\n dict(a_path=diff.a_path, b_path=diff.b_path, change_type=diff.change_type)\n for diff in self._git.index.diff(None)\n ] + [\n dict(a_path=None, b_path=ut, change_type=\"U\")\n for ut in self._git.untracked_files\n ]",
"def test_files(self):\r\n\r\n for path in self.get_files():\r\n self.assertTrue(datetime.fromtimestamp(os.path.getmtime(path)) > self.start_time,\r\n msg='File not recently modified: %s' % os.path.basename(path))",
"def check_init_files_and_folders():\n\t#['cascade_wimb_bus_front_100_stages_1000_pos_3000_neg.xml', 'cascade_wimb_bus_front_33_stages_1000_pos_3000_neg_wrong.xml', 'color_detect_2.py', 'dedupe.py', 'detect_image_group_ku.py', 'detect_shape_5.py', 'get_cam_id_2.py', 'get_image_8.py', 'gui_hsv.py', 'knaps.py', 'knapsack_2.py', 'maps.html', 'program_detect_rectangle.zip', 'start_capture.py']\n\tfile_list=[\n\t#'cascade_wimb_bus_front_100_stages_1000_pos_3000_neg.xml', \n\t'models/cascade_wimb_bus_front_33_stages_1000_pos_3000_neg_wrong.xml', \n\t#'color_detect_2.py', \n\t#'dedupe.py', \n\t'detect_bus_haar_group.py', \n\t#'detect_shape_5.py', \n\t'get_cam_detail.py', \n\t'get_image.py', \n\t#'gui_hsv.py', \n\t#'knaps.py', \n\t#'knapsack_2.py', \n\t#'maps.html', \n\t#'program_detect_rectangle.zip', \n\t'start_wimb.py',\n\t'g.php',\n\t]\n\tdirectory_list=[\n\t'images',\n\t'images_bgs',\n\t'images_bgs_mask',\n\t#'images_bgs_result',\n\t'images_color',\n\t'images_haar',\n\t'images_haar_result',\n\t'images_number',\n\t'images_number_result',\n\t'models',\n\t'images_old',\n\t'text_number',\n\t]\n\t\n\tfor file_name in file_list: print 'file '+file_name+' existed: '+str(os.path.isfile(file_name))\n\tfor directory_name in directory_list: \n\t\tprint 'directory '+directory_name+' existed: '+str(os.path.isdir(directory_name))\n\t\tif not os.path.isdir(directory_name): \n\t\t\tos.makedirs(directory_name)\n\t\tif \"images\" in directory_name: shutil.copy(path+'/g.php',path+'/'+directory_name+'/g.php')",
"def diff_entire_folder_against_full_history_subcommand(root_path, verbose, ignore_list=None, ignore_spec_file=None):\n logger.verbose_logging = verbose\n\n if not os.path.isabs(root_path):\n root_path = os.path.join(os.getcwd(), root_path)\n\n logger.verbose(f'check folder at path: {root_path}')\n\n existing_history = MHLHistory.load_from_path(root_path)\n\n if len(existing_history.hash_lists) == 0:\n raise errors.NoMHLHistoryException(root_path)\n\n # we collect all paths we expect to find first and remove every path that we actually found while\n # traversing the file system, so this set will at the end contain the file paths not found in the file system\n not_found_paths = existing_history.set_of_file_paths()\n num_failed_verifications = 0\n num_new_files = 0\n\n ignore_spec = ignore.MHLIgnoreSpec(existing_history.latest_ignore_patterns(), ignore_list, ignore_spec_file)\n\n for folder_path, children in post_order_lexicographic(root_path, ignore_spec.get_path_spec()):\n for item_name, is_dir in children:\n file_path = os.path.join(folder_path, item_name)\n not_found_paths.discard(file_path)\n relative_path = existing_history.get_relative_file_path(file_path)\n history, history_relative_path = existing_history.find_history_for_path(relative_path)\n if is_dir:\n # TODO: find new directories here\n continue\n\n # check if there is an existing hash in the other generations and verify\n original_hash_entry = history.find_original_hash_entry_for_path(history_relative_path)\n\n # in case there is no original hash entry continue\n if original_hash_entry is None:\n logger.error(f'found new file {relative_path}')\n num_new_files += 1\n continue\n\n exception = test_for_missing_files(not_found_paths, root_path, ignore_spec)\n if num_new_files > 0:\n exception = errors.NewFilesFoundException()\n if num_failed_verifications > 0:\n exception = errors.VerificationFailedException()\n\n if exception:\n raise exception",
"def compare_old_and_new_change_status_files():\n rdict=dict()\n mastcontrol=dirutil.get_mast_control_path()\n mastscratch=dirutil.get_mast_scratch_path()\n recipedirs=dirutil.immediate_subdirs(os.path.join(mastcontrol,\"changestatusfiles\"))\n for recipedir in recipedirs:\n mystatus=\"unknown\"\n rdict[recipedir]=dict()\n changelist=list()\n if not os.path.exists(os.path.join(mastscratch,recipedir)):\n mystatus=\"archived\"\n else:\n ingreddirs = dirutil.immediate_subdirs(os.path.join(mastcontrol,\"changestatusfiles\",recipedir))\n for ingreddir in ingreddirs:\n scratchstatusfile = MASTFile(os.path.join(mastscratch,recipedir,ingreddir,\"change_status.txt\"))\n controlstatusfile = MASTFile(os.path.join(mastcontrol,\"changestatusfiles\",recipedir,ingreddir,\"change_status.txt\"))\n if scratchstatusfile.data == controlstatusfile.data:\n mystatus=\"unchanged\"\n else:\n mystatus=\"changed\"\n rdict[recipedir][ingreddir]=\"send\"\n rdict[recipedir][\"MAIN\"]=mystatus\n return rdict",
"def _analyze(self):\n for _, self.subdirs, files in os.walk(self.path):\n if self.p.sort:\n self.subdirs.sort()\n files.sort()\n for f in files:\n self._analyze_file(fileextlow(f), f)\n break # stop walk() from entering subdirectories\n\n self.p.nr_dirs += 1\n if self.lossless or self.compressed or self.videos:\n if self.lossless or self.compressed:\n if not self.images:\n if self.p.warn_covers:\n print(f\"{W}{self.path}{R}: no cover file\")\n self.p.nr_no_cover += 1\n elif not have_valid_cover_name(self.images):\n if self.p.warn_covers:\n print(f\"{W}{self.path}{R}: wrong cover names\")\n self.p.nr_wrong_cover_name += 1\n if self.lossless:\n if self.compressed:\n self.p.nr_mixed_lossless_compressed += 1\n else:\n self.p.nr_lossless_dirs += 1\n\n if self.cue:\n if not self.lossless:\n if self.p.warn_cue:\n print(f\"{W}{self.path}{R}: cue but no lossless files\")\n self.p.nr_lossy_cue += 1\n elif not self.compressed:\n if len(self.cue) == 1:\n self.p.nr_cue += 1\n else:\n if self.p.warn_cue:\n print(f\"{W}{self.path}{R}: {len(self.cue)} cue files\")\n self.p.nr_multiple_cue += 1\n\n self.p.nr_media_dirs += 1\n self.p.nr_lossless += len(self.lossless)\n self.p.nr_compressed += len(self.compressed)\n self.p.nr_video_files += len(self.videos)\n self.p.nr_ignored += self.ignored\n self.p.unknown.update(self.unknown)\n else:\n if self.images and not self.subdirs:\n self.p.nr_only_images += 1\n else:\n self.p.nr_no_media_dirs += 1",
"def watch_for_file_changes(self, root_dir, callback):\n # type: (str, Callable[[], None]) -> None\n raise NotImplementedError(\"watch_for_file_changes\")",
"def _assets_are_stale(self, sourcedirectory, cachedirectory):\n comparison = filecmp.dircmp(sourcedirectory, cachedirectory, [], [])\n if comparison.left_only or comparison.right_only:\n # We have files in one directory and not the other\n return True\n if comparison.diff_files:\n # Some of the files have changed\n return True\n\n return False",
"def check_working_tree():\n result = _subprocess(['git', '--no-pager', 'diff', '--ignore-submodules=untracked'])\n if result:\n print(result)\n print(f\"Warning: Working tree contains changes to tracked files. Please commit or discard \"\n f\"your changes and try again.\")\n exit(1)",
"def is_rev_dirty(ctx: \"PlanemoCliContext\", directory: str) -> bool:\n return io.shell([\"git\", \"diff\", \"--quiet\"], cwd=directory) != 0",
"def verify(root_path, verbose, ignore_list, ignore_spec_file):\n #TODO distinguish different behavior\n verify_entire_folder_against_full_history_subcommand(root_path, verbose, ignore_list, ignore_spec_file)\n return",
"def _get_changed_paths(self):\n paths = set()\n while True:\n if not self._inotify_poll.poll(0):\n break\n\n self._inotify_events += os.read(self._inotify_fd, 1024)\n while len(self._inotify_events) > _INOTIFY_EVENT_SIZE:\n wd, mask, cookie, length = _INOTIFY_EVENT.unpack(\n self._inotify_events[:_INOTIFY_EVENT_SIZE])\n if len(self._inotify_events) < _INOTIFY_EVENT_SIZE + length:\n break\n\n name = self._inotify_events[\n _INOTIFY_EVENT_SIZE:_INOTIFY_EVENT_SIZE+length]\n name = name.rstrip('\\0')\n\n logging.debug('wd=%s, mask=%s, cookie=%s, length=%s, name=%r',\n wd, hex(mask), cookie, length, name)\n\n self._inotify_events = self._inotify_events[_INOTIFY_EVENT_SIZE+length:]\n\n if mask & IN_IGNORED:\n continue\n try:\n directory = self._watch_to_directory[wd]\n except KeyError:\n logging.debug('Watch deleted for watch descriptor=%d', wd)\n continue\n\n path = os.path.join(directory, name)\n if os.path.isdir(path) or path in self._directory_to_watch_descriptor:\n if mask & IN_DELETE:\n self._remove_watch_for_path(path)\n elif mask & IN_MOVED_FROM:\n self._remove_watch_for_path(path)\n elif mask & IN_CREATE:\n self._add_watch_for_path(path)\n elif mask & IN_MOVED_TO:\n self._add_watch_for_path(path)\n if path not in paths:\n paths.add(path)\n return paths",
"def get_files_changed():\n files_list = []\n test = os.popen('git show --name-only')\n repo_location = os.popen('git rev-parse --show-toplevel')\n repo_location = repo_location.readlines()\n repo_location = repo_location[0]\n repo_location = repo_location.replace('\\n', '')\n if \"Not a git repository\" in repo_location:\n files_list.append(\"Not a git repository\")\n return files_list\n files_list.append(repo_location.split('/')[-1])\n output = test.readlines()\n for a in range(6, len(output)):\n files_list.append(output[a].replace('\\n', ''))\n return files_list"
] |
[
"0.649415",
"0.6353063",
"0.6351424",
"0.63136977",
"0.6285552",
"0.6275514",
"0.62463176",
"0.621994",
"0.6208854",
"0.6180186",
"0.61614585",
"0.61113906",
"0.60959333",
"0.60786444",
"0.6073537",
"0.60468596",
"0.60426575",
"0.6011889",
"0.6000292",
"0.59920996",
"0.5940317",
"0.5937484",
"0.5929546",
"0.59268886",
"0.59110194",
"0.5906403",
"0.59055334",
"0.58969504",
"0.589054",
"0.5890166"
] |
0.7357654
|
0
|
get top k largest elements from each corressponding rows of matrix
|
def get_top_k(matrix,k):
assert k <= matrix.shape[1]
col_inds = np.argpartition(matrix, -k)[:,-k:].flatten()
row_inds = np.repeat(range(matrix.shape[0]),k)
vals = matrix[row_inds, col_inds]
return vals, col_inds
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def fetch_top_k(vect, mat, k):\n resultant = np.dot(mat, vect)\n arglist = np.argsort(resultant)\n arglist = arglist[-1:(-1 - k):-1]\n return arglist, resultant",
"def top_k(m, k):\n ml = m.tolil()\n ms = [_top_k(d, r, k) for d, r in zip(ml.data, ml.rows)]\n return zip(*ms)",
"def get_tops(similarities, k):\n tops = similarities.argsort(axis=1)[:, :k].tolist()\n return tops",
"def _sort_rows(matrix, num_rows):\n tmatrix = array_ops.transpose(matrix, [1, 0])\n sorted_tmatrix = nn_ops.top_k(tmatrix, num_rows)[0]\n return array_ops.transpose(sorted_tmatrix, [1, 0])",
"def indices_of_top_k(arr, k):\n return np.sort(np.argpartition(np.array(arr), -k)[-k:])",
"def get_top_values(weights, top_k=4):\n top_idx = np.argsort(weights)[-top_k:]\n top_idx = np.flip(top_idx)\n top_values = [weights[i] for i in top_idx]\n return top_idx, top_values",
"def topk(vec, k):\n vec = torch.topk(vec, k)\n return vec.view(-1).data.tolist()",
"def tflite_top_k_probs(probs, k):\n\n if k > 0:\n return np.flip(probs[0].argsort()[-k:])\n else:\n return np.flip(probs[0].argsort())",
"def sorted_top_k(item_counts, k):\n # Partitioning runs in O(d) time.\n top_k_unsorted = np.argpartition(-item_counts, k - 1)[:k]\n # Sorting highest k counts runs in O(k * log(k)) time.\n sorting_order = np.argsort(item_counts[top_k_unsorted])[::-1]\n return top_k_unsorted[sorting_order]",
"def find_top_k(predictions, boxes, k):\r\n\r\n if predictions.shape[0] == 0:\r\n predictions2 = torch.Tensor([]).to(device)\r\n labels2 = torch.Tensor([]).to(device)\r\n boxes2 = torch.Tensor([]).to(device)\r\n scores2 = torch.Tensor([]).to(device)\r\n\r\n else:\r\n predictions0 = predictions\r\n scores0 = torch.max(predictions0, dim=1)[0]\r\n labels0 = torch.argmax(predictions0, dim=1)\r\n boxes0 = boxes\r\n\r\n sort = torch.argsort(scores0, descending=True)\r\n boxes1, labels1, scores1, predictions1 = boxes0[sort], labels0[sort], scores0[sort], predictions0[sort]\r\n\r\n boxes2, labels2, scores2, predictions2 = boxes1[:k], labels1[:k] + 1, scores1[:k], predictions1[:k]\r\n\r\n return predictions2, boxes2, labels2, scores2",
"def as_top_k(\n self,\n k,\n matrix,\n type_name,\n simplify_unitsize_minibatch = True\n ):\n if k < 1:\n raise ValueError('k must be positive but it is %d' % k)\n result = []\n num_entity_sets = matrix.shape[0]\n # Find the indices with the highest valued weights.\n top_k_idx = np.flip(np.argsort(matrix, axis=1)[:, -k:], axis=1)\n row_index = np.arange(num_entity_sets).repeat(k)\n column_index = top_k_idx.reshape(-1)\n # Slice, reshape, and sort descending.\n top_k_weights = np.flip(\n np.sort(\n matrix[row_index, column_index].reshape(num_entity_sets, k),\n axis=1),\n axis=1)\n # Convert column indices into entities.\n for indices, weights in zip(top_k_idx, top_k_weights):\n entities = [\n self.get_entity_name(entity_index, type_name)\n for entity_index in indices\n ]\n result.append(list(zip(entities, weights)))\n if simplify_unitsize_minibatch and len(result) == 1:\n return result[0]\n else:\n return result",
"def _my_top_k(x, k):\n if k > 10:\n return tf.nn.top_k(x, k)\n values = []\n indices = []\n depth = tf.shape(x)[1]\n for i in range(k):\n values.append(tf.reduce_max(x, 1))\n argmax = tf.argmax(x, 1)\n indices.append(argmax)\n if i + 1 < k:\n x += tf.one_hot(argmax, depth, -1e9)\n return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1))",
"def partition_arg_topK(matrix, K, axis=0):\n a_part = np.argpartition(matrix, K, axis=axis)\n if axis == 0:\n row_index = np.arange(matrix.shape[1 - axis])\n a_sec_argsort_K = np.argsort(matrix[a_part[0:K, :], row_index], axis=axis)\n return a_part[0:K, :][a_sec_argsort_K, row_index]\n else:\n column_index = np.arange(matrix.shape[1 - axis])[:, None]\n a_sec_argsort_K = np.argsort(matrix[column_index, a_part[:, 0:K]], axis=axis)\n return a_part[:, 0:K][column_index, a_sec_argsort_K]",
"def tf_top_k_probs(probs, k):\n\n if k > 0:\n return probs.argsort()[-k:][::-1]\n else:\n return probs.argsort()[:][::-1]",
"def top_k(self, k = 1):\n\t if self.shapley_rank == {}:\n\t \treturn []\n\n\t n = self.nodes\n\t topknodes = []\n\t i = 0\n\t count = 0\n\t while count < k and not i == n:\n\t if self.shapley_rank[i][0] not in topknodes and not self.is_adj(self.shapley_rank[i][0], topknodes):\n\t topknodes.append(self.shapley_rank[i][0])\n\t count += 1\n\t i += 1\n\t i = 0\n\t if not count == k:\n\t while not count == k:\n\t if self.shapley_rank[i][0] not in topknodes:\n\t topknodes.append(self.shapley_rank[i][0])\n\t count += 1\n\t i += 1\n\t return topknodes",
"def order_preserving_k_max(input_tensor, k):\n ndims = input_tensor.shape.ndims\n \n # get indices of topk elements\n indices = tf.nn.top_k(input_tensor, k, sorted=False).indices#shape [d1,d2..,dn-1,k]\n # sort indices of topk elements\n indices = tf.nn.top_k(indices, k, sorted=True).values#shape [d1,d2..,dn-1,k]\n indices = tf.expand_dims(indices, axis=ndims)#shape [d1,d2..,dn-1,1,k]\n\n # build supporting indices for first n-1 dims\n support = tf.meshgrid(*[tf.range(tf.shape(input_tensor)[d])\n for d in xrange(ndims-1)], indexing='ij')#see numpy.meshgrid\n support = tf.stack(support, axis=ndims-1)#shape [d1,d2..,dn-1,ndims-1]\n support = tf.expand_dims(support, axis=ndims-1)#shape [d1,d2..,dn-1,1,ndims-1]\n support = tf.tile(support, [1]*(ndims-1)+[k, 1])#shape [d1,d2..,dn-1,k,ndims-1]\n\n full_indices = tf.concat([support, indices], axis=ndims)#shape [d1,d2..,dn-1,k,ndims]\n output = tf.gather_nd(input_tensor, full_indices)\n \n return output",
"def more_than_just_topk(result_table, k, metric):\n num_rows = result_table.shape[0]\n\n # No suggestion if all rows already in the result\n if k >= num_rows or k == -1:\n return\n\n # standard deviation of top k rows\n standard_deviation_topk = None\n if k == 1:\n standard_deviation_topk = 0\n else:\n standard_deviation_topk = result_table[:k][metric].std()\n\n # lower bound & upper bound for the value of metric \n val_lower_bound = result_table[metric][k - 1] - standard_deviation_topk * constants.MORE_THAN_JUST_TOPK_THRESHOLD\n val_upper_bound = result_table[metric][k - 1] + standard_deviation_topk * constants.MORE_THAN_JUST_TOPK_THRESHOLD\n\n # init the k in suggested query as k in original query\n new_k = k\n confidence_score = 0\n\n for row in range(k, num_rows):\n # value of metric at row \n val = result_table[metric][row]\n if val_lower_bound <= val and val <= val_upper_bound:\n new_k = row + 1\n else:\n break\n\n if standard_deviation_topk == 0:\n return\n\n confidence_score = abs(result_table[metric][new_k - 1] - result_table[metric][k - 1]) / standard_deviation_topk\n\n if new_k != k:\n change_list = {'topKLimit':new_k}\n suggestion = {}\n suggestion['change_list'] = change_list\n suggestion['suggestion'] = 'value of ' + metric + ' in some rows after the top-k is similar to the Kth row'\n suggestion['confidence_score'] = confidence_score\n suggestion['oversight'] = enums.Oversights.MORE_THAN_JUST_TOPK\n return suggestion\n else:\n return",
"def _topk(vec, k):\n # on a gpu, sorting is faster than pytorch's topk method\n #topkIndices = torch.sort(vec**2)[1][-k:]\n # however, torch.topk is more space efficient\n\n # topk on cuda returns what looks like uninitialized memory if\n # vals has nan values in it\n # saving to a zero-initialized output array instead of using the\n # output of topk appears to solve this problem\n topkVals = torch.zeros(k, device=vec.device)\n topkIndices = torch.zeros(k, device=vec.device).long()\n torch.topk(vec**2, k, sorted=False, out=(topkVals, topkIndices))\n\n ret = torch.zeros_like(vec)\n if len(vec.size()) == 1:\n ret[topkIndices] = vec[topkIndices]\n elif len(vec.size()) == 2:\n rows = torch.arange(vec.size()[0]).view(-1,1)\n ret[rows, topkIndices] = vec[rows, topkIndices]\n return ret",
"def top_k(input, k=1, sorted=True, index_type=dtypes.int32, name=None): # pylint: disable=redefined-builtin\n return gen_nn_ops.top_kv2(\n input, k=k, sorted=sorted, index_type=index_type, name=name\n )",
"def top_k(indices, words, k):\n inds = np.argpartition(indices, -k)[-k:]\n topkwords = words[inds]\n topkvals = indices[inds]\n top = [(word, val) for word, val in zip(topkwords, topkvals)]\n top = sorted(top, key=lambda t: t[1], reverse=True)\n return top",
"def top_n_array_2d(array_2d, top_n):\n row_indices, column_indices = np.unravel_index(np.argsort(array_2d, axis=None), array_2d.shape)\n row_indices = row_indices[::-1][:top_n]\n column_indices = column_indices[::-1][:top_n]\n sorted_values = array_2d[row_indices, column_indices]\n return np.stack([row_indices, column_indices, sorted_values], axis=1) # (N, 3)",
"def _get_top_k_movies(self, similarity, movie_id, k):\n return [\n self._get_movies()[str(x+1)]\n for x in np.argsort(similarity[movie_id-1,:])[:-k-1:-1]\n ]",
"def topN_array_2d(array_2d, topN=None):\n row_indices, column_indices = np.unravel_index(np.argsort(array_2d, axis=None), array_2d.shape)\n row_indices = row_indices[::-1][:topN]\n column_indices = column_indices[::-1][:topN]\n sorted_values = array_2d[row_indices, column_indices]\n sorted_triples = zip(row_indices, column_indices, sorted_values)\n return sorted_triples",
"def similarity_matrix_topk(item_score_matrix, topk=100,verbose=False):\n\n assert(item_score_matrix.shape[0] == item_score_matrix.shape[1]), \"the score matrix is not square\"\n start_time = time.time()\n if verbose:\n print(\"generaing topk matrix\")\n\n # iterate over each column and keep only the topk similar items\n\n data, row_indices, col_indptr = [], [], []\n item_score_matrix = item_score_matrix.tocsc()\n n_items = item_score_matrix.shape[0]\n\n for item_index in range(item_score_matrix.shape[0]):\n col_indptr.append(len(data))\n start_position = item_score_matrix.indptr[item_index]\n end_position = item_score_matrix.indptr[item_index+1]\n col_data = item_score_matrix.data[start_position:end_position]\n col_row_index = item_score_matrix.indices[start_position:end_position]\n\n index_sorted = np.argsort(col_data)\n topk_index = index_sorted[-topk:]\n\n data.extend(col_data[topk_index])\n row_indices.extend(col_row_index[topk_index])\n col_indptr.append(len(data))\n\n result = sps.csc_matrix((data, row_indices, col_indptr), shape=(n_items, n_items), dtype=np.float32)\n result = result.tocsr()\n\n if verbose:\n print(\"sparse topk matrix generated in {:.2f} seconds\".format(time.time()-start_time))\n\n return result",
"def batchtopk(\n probs: np.ndarray, k: Optional[int] = None, axis: int = -1, reverse: bool = False\n) -> Tuple[np.ndarray, np.ndarray]:\n\n if k is not None and k <= 0:\n raise ValueError(\"k must be larger than 0. Use None to chose all elements.\")\n\n if axis != -1:\n raise ValueError(\"Only last axis supported atm\")\n\n if len(probs.shape) <= 1:\n raise ValueError(\"probs must be at least 2-dimensional\")\n\n if reverse:\n sign = -1\n else:\n sign = 1\n\n indices = np.argsort(sign * probs, axis=-1) # use argpartition?\n probs = np.take_along_axis(probs, indices[..., :k], axis=-1)\n\n return indices, probs",
"def getTopK(counter, tup, k=25):\n adj_list = [] #list of tuples that co occur with tup at least once\n for t in counter.relgram_map[tup]:\n adj_list.append((tup, t)) #add all that appear after tup\n\n\n for i in counter.relgram_map: #find any that appear before tup\n for j in counter.relgram_map[i]:\n if j == tup and i not in adj_list: \n adj_list.append((i, tup))\n\n scores = [(x, SCP(counter, x[0], x[1])) for x in adj_list] \n return sorted(scores, key=lambda x: x[1], reverse=True)",
"def top_indices(preds, num):\n sort_preds = np.sort(preds, 1)\n sort_preds = np.flip(sort_preds)\n sort_index = np.argsort(preds, 1)\n sort_index = np.flip(sort_index)\n\n print(f\"Top {num} results:\")\n for i in range(num):\n print(sort_index[0][i], sort_preds[0][i])\n\n return 0",
"def get_top_k_items(\n dataframe, col_user=DEFAULT_USER_COL, col_rating=DEFAULT_RATING_COL, k=DEFAULT_K\n):\n # Sort dataframe by col_user and (top k) col_rating\n top_k_items = (\n dataframe.groupby(col_user, as_index=False)\n .apply(lambda x: x.nlargest(k, col_rating))\n .reset_index(drop=True)\n )\n # Add ranks\n top_k_items[\"rank\"] = top_k_items.groupby(col_user, sort=False).cumcount() + 1\n return top_k_items",
"def _get_k_largest(lst, k):\n sorted_lst = sorted([(val, index) for index, val in enumerate(lst)])\n return list(reversed(sorted_lst[-k:]))",
"def get_top_k(similarity, question_ids, paragraph_ids, k):\n n_questions = similarity.shape[0]\n idxs = [np.argsort(similarity[row,:])[-k:][::-1] for row in range(n_questions)]\n out = {question_ids[i]:np.array(paragraph_ids)[idxs[i]] for i in range(n_questions)}\n return out"
] |
[
"0.78078854",
"0.7482105",
"0.7286839",
"0.71473676",
"0.7088986",
"0.6973559",
"0.6957784",
"0.6926557",
"0.68686914",
"0.68671626",
"0.6864181",
"0.6754134",
"0.66880155",
"0.6621688",
"0.65671676",
"0.6561875",
"0.65489244",
"0.6497595",
"0.646024",
"0.6455032",
"0.64490044",
"0.64245725",
"0.6325811",
"0.63233703",
"0.63141423",
"0.63015854",
"0.6283851",
"0.6283687",
"0.62677515",
"0.6267142"
] |
0.76114184
|
1
|
get k random element from each corressponding rows of matrix
|
def get_random_k(matrix, k):
assert k <= matrix.shape[1]
col_inds = np.array([np.random.choice(matrix.shape[1],k) for _ in range(matrix.shape[0])]).flatten()
row_inds = np.repeat(range(matrix.shape[0]),k)
vals = matrix[row_inds, col_inds]
return vals, col_inds
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _randomnk(m, n, k):\n a = np.random.random((m, n))\n for i in range(k, n):\n pars = np.random.random(k)\n a[:, i] = a[:, :k] @ pars\n return a",
"def initialize_clusters(points, k):\r\n return points[np.random.randint(points.shape[0], size=k)]",
"def randCent(data,k):\n index = set()\n while len(index) != k:\n index.add(random.randint(0, data.shape[0]))\n index = list(index)\n centroids = data[index]\n return centroids",
"def get_random_centroids(data, k) :\r\n centroids = []\r\n columns = np.size(data, axis=1)\r\n ranges = []\r\n for i in range(columns) :\r\n ranges.append([np.min(data[:,i]), np.max(data[:,i])])\r\n \r\n for i in range(k) :\r\n centroid = []\r\n for span in ranges :\r\n centroid.append(np.random.uniform(span[0], span[1]))\r\n centroids.append(centroid)\r\n \r\n return np.matrix(centroids)",
"def random_cluster_matrix(shape):\n N, k = shape\n A = np.zeros((N, k))\n for row in A: \n row[randint(0, k - 1)] = 1\n\n return A",
"def random_centers(k,):\n #centr = np.random.random((k, pos.shape[1]))\n return",
"def creaCent(data,k):\n\n Cen = random.sample(data[:,:].tolist(),k=k)\n return np.asarray(Cen)",
"def act(self):\n return np.random.randint(self.k)",
"def _sample(self, k, with_replacement, weights):\n indices = np.random.choice(\n self.num_rows, k, replace=with_replacement, p=weights)\n return [self.rows[i] for i in indices]",
"def build_k_indices(num_row, k_fold, seed):\n #num_row = y.shape[0]\n interval = int(num_row / k_fold)\n np.random.seed(seed)\n indices = np.random.permutation(num_row)\n k_indices = [indices[k * interval: (k + 1) * interval]\n for k in range(k_fold)]\n return np.array(k_indices)",
"def get_initial_means(array, k):\n idx = np.random.choice(array.shape[0], k, replace=False)\n return array[idx]",
"def random_indices(n,k):\n return np.sort(np.random.permutation(n)[:k])",
"def rand_k(self, k):\n\n k_N = self.prior.k_0 + self.counts[k]\n v_N = self.prior.v_0 + self.counts[k]\n m_N = self.m_N_numerators[k]/k_N\n S_N = self.S_N_partials[k] - k_N*np.square(m_N)\n\n mean = np.zeros(self.D)\n var = np.zeros(self.D)\n\n for i in range(self.D):\n var[i] = invchisquared_sample(v_N, S_N[i]/v_N, 1)[0]\n mean[i] = np.random.normal(m_N[i], np.sqrt(var[i]/k_N))\n\n return mean, var",
"def init_centroids(self, points , k):\n centroids = points.copy()\n numpy.random.shuffle(centroids)\n return centroids[0:k,:]",
"def initialize_k_mediods(data, k):\n return random.sample(range(len(data)), k)",
"def random_dist(k):\n res = [random.random() for i in range(k)]\n s = sum(res)\n return [v/s for v in res]",
"def random_weight(N, k, self_link=True):\n row = np.arange(N*k, dtype=np.int) / k\n col = np.random.randint(0, N, N*k)\n weight = np.ones(N*k)\n w = sparse.csr_matrix((weight, (row, col)), shape=(N, N), dtype=np.int8)\n if self_link:\n w += sparse.identity(N, dtype=np.int8)\n return w",
"def init_centroids(X,K):\n c = random.sample(list(X),K)\n return c",
"def sample_X(self, m, n):\n return np.random.permutation(m)[:n]",
"def getRandomCentroids(data_points, k):\n return random.sample(data_points, k)",
"def generate_initial_centroids(k, data):\n centroids = []\n used_indexes = []\n while len(centroids) < k:\n random_index = random.randint(0, len(data) - 1)\n if random_index not in used_indexes:\n centroids.append(data[random_index])\n used_indexes.append(random_index)\n return centroids",
"def kmm_init(X, m = 10):\r\n\r\n # compute the distances\r\n XXT = np.dot(X, X.T)\r\n D = (-2.*XXT + np.diag(XXT)[:,np.newaxis] + np.diag(XXT)[np.newaxis,:])\r\n\r\n # select the first point\r\n s = np.random.permutation(X.shape[0])[0]\r\n inducing = [s]\r\n prob = D[s]/D[s].sum()\r\n\r\n for z in range(m-1):\r\n s = np.random.multinomial(1, prob.flatten()).argmax()\r\n inducing.append(s)\r\n prob = D[s]/D[s].sum()\r\n\r\n inducing = np.array(inducing)\r\n return X[inducing]",
"def generateClusterPoints(N, k=2, scale=1):\n rands = [[np.random.uniform(0, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n rands += [[np.random.uniform(-scale, 0) * np.random.rand() for _ in range(k)] for i in range(N)]\n point_list = []\n for rand in rands:\n # lastItem = math.sqrt(sum([1 + item**2 for item in rand]))\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n return np.array(point_list)",
"def k_rank_approximate(doc_matrix, k):\n return []",
"def kmm_init(X, m = 10):\n\n # compute the distances\n XXT = np.dot(X, X.T)\n D = (-2.*XXT + np.diag(XXT)[:,np.newaxis] + np.diag(XXT)[np.newaxis,:])\n\n # select the first point\n s = np.random.permutation(X.shape[0])[0]\n inducing = [s]\n prob = D[s]/D[s].sum()\n\n for z in range(m-1):\n s = np.random.multinomial(1, prob.flatten()).argmax()\n inducing.append(s)\n prob = D[s]/D[s].sum()\n\n inducing = np.array(inducing)\n return X[inducing]",
"def sample_n_k(n, k):\n\n if not 0 <= k <= n:\n raise ValueError(\"Sample larger than population or is negative\")\n if k == 0:\n return np.empty((0,), dtype=np.int64)\n elif 3 * k >= n:\n return np.random.choice(n, k, replace=False)\n else:\n result = np.random.choice(n, 2 * k)\n selected = set()\n selected_add = selected.add\n j = k\n for i in range(k):\n x = result[i]\n while x in selected:\n x = result[i] = result[j]\n j += 1\n if j == 2 * k:\n # This is slow, but it rarely happens.\n result[k:] = np.random.choice(n, k)\n j = k\n selected_add(x)\n return result[:k]",
"def random_sample(population, k):\r\n \r\n newpopulation = population[:]\r\n if len(population) < k:\r\n raise ValueError, \"sample larger than population\"\r\n\r\n retlist = []\r\n populationsize = len(population)-1\r\n\r\n for num in range(k):\r\n pos = random_randint(0,populationsize-num)\r\n retlist.append(newpopulation[pos])\r\n del newpopulation[pos]\r\n\r\n return retlist",
"def sample_noreplace(arr, n, k): \n # code from https://www.iditect.com/how-to/58566613.html\n idx = np.random.randint(len(arr) - np.arange(k), size=[n, k])\n for i in range(k-1, 0, -1):\n idx[:,i:] += idx[:,i:] >= idx[:,i-1,None]\n return np.array(arr)[idx]",
"def gen_k_centers(k, dim):\n delta = abs(np.random.normal(0.0, 5.0))\n eps = 0.001\n centers = []\n for i in range(k):\n c = np.random.multivariate_normal(np.zeros(dim), np.identity(dim))\n if len(centers):\n c1 = centers[0]\n x = np.random.multivariate_normal(c1, np.identity(c1.size)) - c1\n direction = x / np.linalg.norm(x)\n centers.append(c1 + 2.0 * i * delta * direction + eps)\n else:\n centers.append(c)\n return centers, delta",
"def tensor_choose_k(boolean_mask, rng, k=1, random=False):\n\n mask = boolean_mask\n if mask.ndim > 2:\n raise Exception('Input tensor must be either 1d or 2d.')\n elif mask.ndim == 1:\n mask = mask.dimshuffle('x', 0)\n\n assert T.lt(k, mask.shape[1]), 'k must be < then # of possible choices'\n\n if random is True:\n noise = rng.uniform(mask.shape, low=0, high=mask.shape[1])\n else:\n noise = T.arange(mask.shape[1])[::-1] + 1 # Descending order\n noise = T.cast(noise, theano.config.floatX).dimshuffle('x', 0)\n\n if k == 1:\n return T.argmax(mask*noise, axis=1)\n return T.argsort(mask*noise, axis=1)[:, ::-1][:, :k]"
] |
[
"0.7244414",
"0.69032",
"0.65335214",
"0.6526351",
"0.6523111",
"0.6515291",
"0.6423375",
"0.63610154",
"0.6351249",
"0.6316154",
"0.6297875",
"0.62714523",
"0.6265273",
"0.6165894",
"0.61577964",
"0.6157002",
"0.6144533",
"0.6099416",
"0.6081216",
"0.6075068",
"0.60582954",
"0.6030557",
"0.5996686",
"0.59919345",
"0.59845597",
"0.5967022",
"0.59467113",
"0.5940505",
"0.5914105",
"0.5904092"
] |
0.74868983
|
0
|
Count records across batches.
|
def count_records(batches: List[Batch]) -> int:
return sum(b.current_size for b in batches)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_num_batches(self, instances: Iterable[Instance]) -> int:\n n_docs = len(set([instance[\"metadata\"][\"doc_key\"] for instance in instances]))\n return n_docs",
"def get_num_batches(self,batch_size):\r\n \r\n return len(self) // batch_size",
"def batch_size(self) -> int:\n ...",
"def num_batches(self):\n\t\t\n\t\treturn len(self.batch_stats)",
"def number_of_batches(self):\n return int(np.floor(len(self.file_paths_list) / self.batch_size))",
"def count(self):\n return self.query.count(with_limit_and_skip = True)",
"def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total",
"def count():",
"def __len__(self) -> int:\n num_batches, remainder = divmod(len(self.mapped_triples), self.batch_size)\n if remainder and not self.drop_last:\n num_batches += 1\n return num_batches",
"def __len__(self):\n if self.TRAIN_BOOL is True:\n count = len(self.dict_batch_1[b'data'])\n count += len(self.dict_batch_2[b'data'])\n count += len(self.dict_batch_3[b'data'])\n count += len(self.dict_batch_4[b'data'])\n count += len(self.dict_batch_5[b'data'])\n else:\n count = len(self.dict_batch_test[b'data'])\n return count",
"def _compute_no_of_invoices(self):\n for record in self:\n record.invoice_count = len(record.invoice_ids)",
"def count(self):\n\n raise NotImplementedError",
"def __len__(self):\n return len(self.indexes) // self.batch_size",
"def __len__(self):\n return len(self.indexes) // self.batch_size",
"def _update_num_batches(self):\n # maximum possible number of batches is equal to number of whole times\n # batch_size divides in to the number of data points which can be\n # found using integer division\n possible_num_batches = self.inputs.shape[0] // self.batch_size\n if self.max_num_batches == -1:\n self.num_batches = possible_num_batches\n else:\n self.num_batches = min(self.max_num_batches, possible_num_batches)",
"def total_test_batches(self) -> int:\n return sum(self.trainer.num_test_batches)",
"def get_record_count(self):\n return os.path.getsize(self.path) / self._get_record_size()",
"def batch_size(self):\n return self.size",
"def __len__(self):\n return len(self.batches)",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def get_num_records(self):\n return self.__num_records",
"def num_sown_batches(self):\n self.calc_progress()\n return self._num_sown_batches",
"def _compute_log_count(self):\n log_line_obj = self.env['common.log.lines.ept']\n model_id = log_line_obj.get_model_id('amazon.vcs.tax.report.ept')\n records = log_line_obj.search_read([('model_id', '=', model_id), ('res_id', '=', self.id)],\n [\"id\"])\n self.log_count = len(records)",
"def run_count(make_query, update_counter, counter):\n # Get the next batch of entities.\n query = make_query()\n if counter.last_key:\n query = query.filter('__key__ >', db.Key(counter.last_key))\n entities = query.order('__key__').fetch(FETCH_LIMIT)\n if not entities:\n counter.last_key = ''\n return False\n\n # Pass the entities to the counting function.\n for entity in entities:\n update_counter(counter, entity)\n\n # Remember where we left off.\n counter.last_key = str(entities[-1].key())\n return True",
"def count_data_items(fileids, train=True):\n sizes = 28000 if train else 22500\n return len(fileids) * sizes",
"def __len__(self):\n gen_len = len(self.image_ids) // self.batch_size\n if len(self.image_ids) % self.batch_size != 0:\n gen_len += 1\n return gen_len",
"def __len__(self):\n return int(np.ceil(len(self.ids) / self.batch_size))"
] |
[
"0.71313673",
"0.6932072",
"0.6870095",
"0.67976797",
"0.66032314",
"0.6554192",
"0.6484331",
"0.64692736",
"0.64613444",
"0.6396987",
"0.63178724",
"0.62802833",
"0.62721443",
"0.62721443",
"0.62378734",
"0.62235904",
"0.62096214",
"0.61888677",
"0.61625093",
"0.61575264",
"0.61575264",
"0.61575264",
"0.61575264",
"0.61523914",
"0.6151656",
"0.6147739",
"0.61379796",
"0.6129133",
"0.61274314",
"0.6113142"
] |
0.8504124
|
0
|
Crawl through the batches. Produces a generator function that yields (r.header, r.seq) for each record r across batches. Batches are crawled through their r.record_gen if self.doSort==False, otherwise using r.sorted.
|
def do_records(self, batches: List[Batch]) -> Iterator[Tuple[str, str]]:
if any(type(b) not in [Batch, BatchAppendable] for b in batches):
raise AssertionError()
if self.doSort:
generators = [
((str(r.header), str(r.seq)) for r in b.sorted(self.doSmart))
for b in batches
if type(None) != type(b)
]
else:
generators = [
((str(r.header), str(r.seq)) for r in b.record_gen(self.doSmart))
for b in batches
if type(None) is not type(b)
]
yield from merge(*generators, key=lambda x: x[1])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def do_batch(self, batches: List[Batch]) -> Iterator[Tuple[List[str], str]]:\n crawler = self.do_records(batches)\n\n try:\n first_record = next(crawler)\n except StopIteration:\n logging.error(\"nothing to crawl\")\n return\n\n current_seq = first_record[1]\n current_headers = [first_record[0]]\n\n crawler = (\n tqdm(crawler, initial=1, desc=self.desc, total=self.count_records(batches))\n if self.verbose\n else crawler\n )\n\n for record in crawler:\n if current_seq == record[1]:\n current_headers.append(record[0])\n else:\n yield (current_headers, current_seq)\n current_seq = record[1]\n current_headers = [record[0]]\n\n yield (current_headers, current_seq)",
"def _record_iterator(self):\n\n # Alias these values so the code won't need to keep performing\n # attribute lookups on `self` (small optimization)\n stream = self.stream\n sync_marker = self._sync_marker\n writer_schema = self.writer_schema\n reader_schema = self.reader_schema\n\n # Get the block decoder\n if self.codec == 'deflate':\n read_block = deflate_read_block\n elif self.codec == 'snappy':\n read_block = snappy_read_block\n else:\n read_block = null_read_block\n\n block_buf = BytesIO()\n\n try:\n while True:\n block_count = read_long(stream)\n read_block(stream, block_buf)\n\n for i in xrange(block_count):\n yield read_data(block_buf, writer_schema, reader_schema)\n\n skip_sync(stream, sync_marker)\n\n except EOFError:\n pass\n finally:\n block_buf.close()",
"def gen_records(self, count=None):\n if not count:\n count = self.num_rec\n tt = time.localtime(time.time())\n addr = None\n for i in range(count):\n logdbg(\"reading record %d of %d\" % (i+1, count))\n addr, record = self.get_record(addr, tt.tm_year, tt.tm_mon)\n yield addr, record",
"def _read_recs(basedir):\n for borotag in boro_tags:\n datafile = \"%s/%s.csv\" % (basedir,borotag)\n print(\"slurp '%s' ..\" % datafile)\n recs = read_recs(datafile)\n yield from (pluto.parse.normalize(r) for r in recs)",
"def serialize_batches():\n # a set of variables for the state of current batch which will be converted to Arrow\n # RecordBatch.\n pdfs = []\n state_pdfs = []\n pdf_data_cnt = 0\n state_data_cnt = 0\n\n return_schema = None\n\n for data in iterator:\n # data represents the result of each call of user function\n packaged_result = data[0]\n\n # There are two results from the call of user function:\n # 1) iterator of pandas DataFrame (output)\n # 2) updated state instance\n pdf_iter = packaged_result[0][0]\n state = packaged_result[0][1]\n\n # This is static and won't change across batches.\n return_schema = packaged_result[1]\n\n for pdf in pdf_iter:\n # We ignore empty pandas DataFrame.\n if len(pdf) > 0:\n pdf_data_cnt += len(pdf)\n pdfs.append(pdf)\n\n # If the total number of records in current batch exceeds the configured\n # threshold, time to construct the Arrow RecordBatch from the batch.\n if pdf_data_cnt > self.arrow_max_records_per_batch:\n batch = construct_record_batch(\n pdfs, pdf_data_cnt, return_schema, state_pdfs, state_data_cnt\n )\n\n # Reset the variables to start with new batch for further data.\n pdfs = []\n state_pdfs = []\n pdf_data_cnt = 0\n state_data_cnt = 0\n\n yield batch\n\n # This has to be performed 'after' evaluating all elements in iterator, so that\n # the user function has been completed and the state is guaranteed to be updated.\n state_pdf = construct_state_pdf(state)\n\n state_pdfs.append(state_pdf)\n state_data_cnt += 1\n\n # processed all output, but current batch may not be flushed yet.\n if pdf_data_cnt > 0 or state_data_cnt > 0:\n batch = construct_record_batch(\n pdfs, pdf_data_cnt, return_schema, state_pdfs, state_data_cnt\n )\n\n yield batch",
"def iter_records(self):\n\n decomp_type = 'gzip'\n block_size = 16384\n\n self.reader = DecompressingBufferedReader(self.fh,\n block_size=block_size)\n self.offset = self.fh.tell()\n\n next_line = None\n\n while True:\n try:\n record = self._next_record(next_line)\n yield record\n except EOFError:\n break\n\n self.read_to_end(record)\n\n # for non-compressed, consume blank lines here\n if not self.reader.decompressor:\n next_line = self._consume_blanklines()\n if next_line is None:\n # at end of file\n break\n\n # reset reader for next member\n else:\n self.reader.read_next_member()",
"def tfrecord_iterator(\n data_path: str,\n index_path: typing.Optional[str] = None,\n shard: typing.Optional[typing.Tuple[int, int]] = None,\n compression_type: typing.Optional[str] = None,\n) -> typing.Iterable[memoryview]:\n if compression_type == \"gzip\":\n file = gzip.open(data_path, \"rb\")\n elif compression_type is None:\n file = io.open(data_path, \"rb\")\n else:\n raise ValueError(\"compression_type should be either 'gzip' or None\")\n length_bytes = bytearray(8)\n crc_bytes = bytearray(4)\n datum_bytes = bytearray(1024 * 1024)\n\n def read_records(start_offset=None, end_offset=None):\n nonlocal length_bytes, crc_bytes, datum_bytes\n\n if start_offset is not None:\n file.seek(start_offset)\n if end_offset is None:\n end_offset = os.path.getsize(data_path)\n while file.tell() < end_offset:\n if file.readinto(length_bytes) != 8:\n raise RuntimeError(\"Failed to read the record size.\")\n if file.readinto(crc_bytes) != 4:\n raise RuntimeError(\"Failed to read the start token.\")\n length, = struct.unpack(\"<Q\", length_bytes)\n if length > len(datum_bytes):\n datum_bytes = datum_bytes.zfill(int(length * 1.5))\n datum_bytes_view = memoryview(datum_bytes)[:length]\n if file.readinto(datum_bytes_view) != length:\n raise RuntimeError(\"Failed to read the record.\")\n if file.readinto(crc_bytes) != 4:\n raise RuntimeError(\"Failed to read the end token.\")\n yield datum_bytes_view\n\n if index_path is None:\n yield from read_records()\n else:\n index = np.loadtxt(index_path, dtype=np.int64)[:, 0]\n if shard is None:\n offset = np.random.choice(index)\n yield from read_records(offset)\n yield from read_records(0, offset)\n else:\n num_records = len(index)\n shard_idx, shard_count = shard\n start_index = (num_records * shard_idx) // shard_count\n end_index = (num_records * (shard_idx + 1)) // shard_count\n start_byte = index[start_index]\n end_byte = index[end_index] if end_index < num_records else None\n yield from read_records(start_byte, end_byte)\n\n file.close()",
"def next_batch(self):\n for nb in xrange(self.num_batches):\n if self.batch_end < self.full_len:\n batch_X_raw = self.full_X[self.batch_start:self.batch_end]\n batch_y_raw = self.full_y[self.batch_start:self.batch_end]\n else:\n batch_X_raw = self.full_X[self.batch_start:]\n batch_y_raw = self.full_y[self.batch_start:]\n batch_X, batch_y = pad_sort_data(batch_X_raw, batch_y_raw)\n self.batch_start = self.batch_end\n self.batch_end += self.batch_size\n yield batch_X, batch_y",
"def _message_batches(cls, records):\n # Dump the records to a list of minimal json\n records_json = [\n json.dumps(record, separators=(',', ':')) for record in records\n ]\n\n current_batch_size = 0\n current_batch = []\n for record in records_json:\n line_len = len(record)\n # Check if the max size of the batch has been reached or if the current\n # record will exceed the max batch size and start a new batch\n if ((len(current_batch) == cls.MAX_BATCH_COUNT) or\n (current_batch_size + line_len > cls.MAX_BATCH_SIZE)):\n yield current_batch[:]\n current_batch_size = 0\n del current_batch[:]\n\n if line_len > cls.MAX_BATCH_SIZE:\n LOGGER.error('Record too large (%d) to send to SQS:\\n%s', line_len, record)\n cls._log_failed(1)\n continue\n\n # Add the record to the batch\n current_batch_size += line_len\n current_batch.append(record)\n\n # yield the result of the last batch (no need to copy via slicing)\n if current_batch:\n yield current_batch",
"def __iter__(self):\n index = len(self._logrecords)\n while index > 0:\n index -= 1\n yield self._logrecords[index]",
"def process_records(self, shard_iterator: str, shard_id: str):\n response = self.client.get_records(\n ShardIterator=shard_iterator\n )\n\n logger.debug('Getting data from shard: {shard_id}', extra=response)\n records = response['Records']\n\n if len(records) == 0:\n logger.info(f'Nothing to process for shard: \"{shard_id}\"')\n self._empty_shards.append(shard_id)\n else:\n for item in records:\n if self.item_callback:\n data = json.loads(item.get('Data'))\n order = data.get('order')\n contents = data.get('contents')\n self.item_callback(order, contents)\n self._sequences[shard_id] = item['SequenceNumber']\n\n next_shard_iterator = response['NextShardIterator']\n date = response.get('ResponseMetadata').get('HTTPHeaders').get('date')\n self._iterators[shard_id] = next_shard_iterator, date",
"def parse_records(self, handle, do_features=...): # -> Generator[SeqRecord, None, None]:\n ...",
"def gen_rebatch(self, *args, **kwargs):\n _action = self._action_list[0]\n self._rest_batch = None\n while True:\n if self._rest_batch is None:\n cur_len = 0\n batches = []\n else:\n cur_len = len(self._rest_batch)\n batches = [self._rest_batch]\n self._rest_batch = None\n while cur_len < _action['batch_size']:\n try:\n new_batch = _action['pipeline'].next_batch(*args, **kwargs)\n except StopIteration:\n break\n else:\n batches.append(new_batch)\n cur_len += len(new_batch)\n if len(batches) == 0:\n break\n else:\n if _action['merge_fn'] is None:\n batch, self._rest_batch = batches[0].merge(batches, batch_size=_action['batch_size'])\n else:\n batch, self._rest_batch = _action['merge_fn'](batches, batch_size=_action['batch_size'])\n yield batch",
"def __iter__(self):\n counter = 0\n while True:\n if counter < len(self.all_records):\n yield self.all_records[counter]\n else:\n yield self.next()\n counter += 1",
"def _batcher(self, rows):\n row_count = 0\n batch = []\n batch_count = 1\n\n total_rows_modified = 0\n throttle_count = 0\n\n i = 0\n for row in rows:\n if row_count > self.batch_size - 1:\n logger.debug(f\"row_count={row_count} batch_size={self.batch_size} and batch={len(batch)}\")\n # Yield the previous batch\n yield batch\n\n # Start the new batch\n batch = []\n batch.append(row)\n row_count = 1\n\n batch_count += 1\n # break # toggle to load one batch only\n else:\n row_count += 1\n batch.append(row)\n\n # Put in a sleep timer to throttle how hard we hit the database\n if self.throttle_time and self.throttle_size and (throttle_count > self.throttle_size - 1):\n logger.info(f\"Sleeping for {self.throttle_time} seconds... row: {i}\")\n time.sleep(int(self.throttle_time))\n throttle_count = 0\n elif self.throttle_time and self.throttle_size:\n throttle_count += 1\n i += 1\n\n yield batch",
"def do(self, recordBatch: List[Batch]) -> None:\n batchList = [\n recordBatch[i : min(len(recordBatch), i + self.n_batches)]\n for i in range(0, len(recordBatch), self.n_batches)\n ]\n batches = Parallel(n_jobs=self.threads, verbose=11)(\n delayed(SeqCountBatcher.build_batch)(\n batchedRecords, self.type, self.tmp, self.doSort\n )\n for batchedRecords in batchList\n )\n self.feed_collection(batches, self.FEED_MODE.REPLACE)",
"def make_batch(self, batch_size):\n filenames = self.get_filenames()\n\n if os.path.isdir(filenames):\n num_records = len(os.listdir(filenames))\n print(\"Loading from directory. \" + str(num_records) + \" tfRecords found.\")\n files = tf.data.Dataset.list_files(filenames + \"/\" + \"*.tfrecord\").shuffle(num_records)\n dataset = files.apply(\n tf.contrib.data.parallel_interleave(\n lambda x: tf.data.TFRecordDataset(x, num_parallel_reads=256, buffer_size=8*1024*1024),\n cycle_length=32, sloppy=True)\n )\n else:\n print(\"Loading from single tfRecord...\")\n dataset = tf.data.TFRecordDataset(filenames + \".tfrecord\").repeat()\n \n dataset = dataset.map(self.parser, num_parallel_calls=128)\n \n if self.subset == 'train':\n min_queue_examples = int(\n Cifar10DataSet.num_examples_per_epoch(self.subset) * 0.4)\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)\n \n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n dataset = dataset.prefetch(10)\n \n iterator = dataset.make_one_shot_iterator()\n seq_batch, input_batch, map_batch, transformation_batch = iterator.get_next()\n\n return seq_batch, input_batch, map_batch, transformation_batch",
"def __iter__(self):\n for batch in self.iterator:\n yield Batch.from_iterator_batch(batch, self.pad_index, self.sos_index, self.eos_index)",
"def _create_record_iterator(self) -> Iterator[RDSModel]:\n for downstream_key in self.downstream_deps:\n record = RDSTableLineage(\n table_source_rk=self.table_key,\n table_target_rk=downstream_key\n )\n yield record",
"def _format_csv(self, records):\n # build a unique list of all records keys as CSV headers\n headers = set()\n for rec in records:\n headers.update(rec.keys())\n\n # write the CSV output in memory\n line = Line()\n writer = csv.DictWriter(line, fieldnames=sorted(headers))\n writer.writeheader()\n yield line.read()\n\n for record in records:\n writer.writerow(record)\n yield line.read()",
"def walk(self):\n data = open(self.data_file_path, 'rb')\n read_metric = globals()[\"ProtoDefinition\"].Payload()\n read_metric.ParseFromString(data.read())\n\n # One record for the whole file\n self.payload_metadata = read_metric.payloadMetadata\n self.device = read_metric.device\n\n # Get list of all *repeated* field types\n field_names = []\n for field_desc in read_metric.DESCRIPTOR.fields:\n field_name = field_desc.name\n\n if field_desc.label == field_desc.LABEL_REPEATED:\n field_names.append(field_name)\n\n # For each repeated field type, get the data and yield one item at a time\n for field_name in field_names:\n stream_samples = getattr(read_metric, field_name)\n for sample in stream_samples:\n yield self.device, sample",
"def next(self) -> Iterable[RecordBatch]:\n for batch in self._parent_operator.next():\n args = self._process_arguments(self._arguments, batch=batch)\n yield self._kernel(batch, args)",
"def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):\n yield from Visitor(fil, rec, ignore, bf, sort).gen(self)",
"def generate_results():\n for idx, examples in enumerate(generate_groups()):\n # converting iterators to list so resources\n # are not shared in concurrent workers\n yield write_tfrecord(\n examples=examples,\n encode_fn=encode_fn,\n file_name=tfrecord_name.format(idx))",
"def reBasedRecordGenerator(taskType,records,infile):\n recordIDRE=recordIDRE[taskType]\n currentRecord=None\n currentData=[]\n with open(infile,'rt') as f:\n for line in f:\n m=recordIDRE.match(line)\n if m:\n if currentRecord is not None:\n yield currentData\n record=m.group(1)\n if record in records:\n currentRecord=record\n del currentData[:]\n currentData.append(line)\n else:\n currentRecord=None\n elif currentRecord is not None:\n currentData.append(line)\n if currentRecord is not None:\n yield currentData",
"def __iter__(self):\n\n # collector will fetch chunksize array for each 'get' call\n collector = FIFOArray(self.chunksize, self.axis)\n\n # make tmp array to hold generated subarrs\n tmp = []\n tmp_size = 0\n for subarr in self.data(**self.kwargs):\n\n tmp.append(subarr)\n tmp_size += subarr.shape[self.axis]\n\n # if tmp exceeds chunksize put in collector\n if tmp_size >= self.chunksize:\n arr = np.concatenate(tmp, axis=self.axis)\n collector.put(arr)\n\n # fetch chunksize till not full\n while collector.full():\n yield collector.get()\n\n # place leftover back into tmp and empty collector\n tmp = [collector.queue]\n tmp_size = collector.qsize()\n collector.queue = np.array([])\n\n else:\n\n # append to tmp again\n continue\n\n # else runs after normal loop exit -- required here\n else: #pylint: disable=useless-else-on-loop\n\n # yield whatever is left in tmp (its below chunksize)\n remaining = np.concatenate(tmp, axis=self.axis)\n if remaining.size > 0:\n yield remaining",
"def next_batch(self):\n\n while self.cap.isOpened():\n flag, frame = self.cap.read()\n yield frame",
"def __call__(self):\n if self.numbatches is None:\n pool = self.pooler()\n if self.batchsize is None:\n self.batchsize = self.pooler.nInPool()\n self.numbatches = self.pooler.nInPool()//self.batchsize\n for i in xrange(self.numbatches):\n pool = self.pooler()\n self._reset_batch()\n if self.samplemethod == 'balance' and len(self.keysamplers)>0:\n batchinds,keyids = self._samplebalanced(pool)\n elif self.samplemethod == 'uniform':\n batchinds,keyids = self._sampleuniform(pool)\n else:\n batchinds,keyids = self._samplesequential(i)\n batch = self._extractInds(pool,batchinds,keyids)\n for k in batch:\n batch[k][np.isnan(batch[k])] = self.nanreplacement\n yield batch",
"def _create_record_iterator(self) -> Iterator[RDSModel]:\n for downstream_key in self.downstream_deps:\n record = RDSColumnLineage(\n column_source_rk=self.column_key,\n column_target_rk=downstream_key\n )\n yield record",
"def next(self):\n # Most batches will be equal to batch_size\n if self.cur < (self.n - self.batch_size):\n # Get positions of files in batch\n positions = self.order[self.cur:self.cur + self.batch_size]\n\n self.cur += self.batch_size\n\n # create Batches\n X_train, y_train, sample_weights = self.createBatches(positions)\n\n return X_train, y_train, sample_weights\n\n # Final batch is smaller than batch_size\n if self.cur < self.n:\n positions = self.order[self.cur::]\n\n # Step is maximum - next will return None\n self.cur = self.n\n\n # Create Batches\n X_train, y_train, sample_weights = self.createBatches(positions)\n\n return X_train, y_train, sample_weights\n\n else:\n # reshuffle order for next batch\n np.random.shuffle(self.order)\n\n # Reset cur\n self.cur = 0\n\n # Signal end of epoch\n return None"
] |
[
"0.71820664",
"0.57776165",
"0.5710268",
"0.57072127",
"0.5692253",
"0.5690749",
"0.56448466",
"0.56083345",
"0.56011665",
"0.55865467",
"0.55815995",
"0.5555864",
"0.5494762",
"0.54750973",
"0.5467062",
"0.54343045",
"0.54213136",
"0.5420634",
"0.5399811",
"0.5376439",
"0.53669107",
"0.5354579",
"0.5335132",
"0.5334778",
"0.53272206",
"0.5296501",
"0.5289816",
"0.52857244",
"0.52833664",
"0.52537817"
] |
0.74110866
|
0
|
Group records from batches based on sequence. Crawls into groups of records from input batches.
|
def do_batch(self, batches: List[Batch]) -> Iterator[Tuple[List[str], str]]:
crawler = self.do_records(batches)
try:
first_record = next(crawler)
except StopIteration:
logging.error("nothing to crawl")
return
current_seq = first_record[1]
current_headers = [first_record[0]]
crawler = (
tqdm(crawler, initial=1, desc=self.desc, total=self.count_records(batches))
if self.verbose
else crawler
)
for record in crawler:
if current_seq == record[1]:
current_headers.append(record[0])
else:
yield (current_headers, current_seq)
current_seq = record[1]
current_headers = [record[0]]
yield (current_headers, current_seq)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def batches(data, batch_size) -> list:\n rv = []\n for idx, line in enumerate(data):\n if idx != 0 and idx % batch_size == 0:\n yield rv\n rv = []\n rv.append(line)\n yield rv",
"def do_records(self, batches: List[Batch]) -> Iterator[Tuple[str, str]]:\n if any(type(b) not in [Batch, BatchAppendable] for b in batches):\n raise AssertionError()\n\n if self.doSort:\n generators = [\n ((str(r.header), str(r.seq)) for r in b.sorted(self.doSmart))\n for b in batches\n if type(None) != type(b)\n ]\n\n else:\n generators = [\n ((str(r.header), str(r.seq)) for r in b.record_gen(self.doSmart))\n for b in batches\n if type(None) is not type(b)\n ]\n\n yield from merge(*generators, key=lambda x: x[1])",
"def generate_groups():\n groups = group_elements(\n generate_examples(file_name),\n cfg.tfrecord_size)\n\n # pairing groups to unique numbers and \n # filtering nulls from zip_longest\n groups = (\n list(filter(is_not_none, group))\n for group in groups\n )\n\n yield from groups",
"def group(seq):\n pass # replace with your solution",
"def _fillBatches(self):\n\n batchRE = r\"\"\"\n B\n (?P<observebatch>\\d+?)\n (?P<startend>[SE])\n (?P<sequence>\\d+?)\n _SR\n (?:_(?P<extraInjections>\\d+?|\\w+?))?\n $\n \"\"\"\n batchRE = re.compile(batchRE, re.VERBOSE)\n # We canot infer batches unless we have runorder\n if 'Run Order' in self.sampleMetadata.keys():\n currentBatch = 0\n # Loop over samples in run order\n for index, row in self.sampleMetadata.sort_values(by='Run Order').iterrows():\n nameComponents = batchRE.search(row['Sample File Name'])\n if nameComponents:\n # Batch start\n if nameComponents.group('startend') == 'S':\n # New batch - increment batch no\n if nameComponents.group('sequence') == '1':\n currentBatch = currentBatch + 1\n\n # Don't include the dilution series or blanks\n if not ((row['AssayRole'] == AssayRole.LinearityReference) or (row['SampleType'] == SampleType.ProceduralBlank)):\n self.sampleMetadata.loc[index, 'Batch'] = currentBatch\n self.sampleMetadata.loc[index, 'Correction Batch'] = currentBatch\n\n else:\n warnings.warn('Unable to infer batches without run order, skipping.')\n return",
"def grouping(filename, outdir, minsog, maxsog):\n records = Records(Extractor.extract_records(filename))\n\n groups = records.group(minsog, maxsog)\n for key in groups:\n rw = RecordsWriter(groups[key])\n rw.write_to_dir(key + \".fasta\", outdir)",
"def in_memory_rechunk(\n inputs: List[Tuple[core.ChunkKey, xarray.Dataset]],\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n key, dataset = consolidate_chunks(inputs)\n yield from split_chunks(key, dataset, target_chunks)",
"def group(seq, size):\n if not hasattr(seq, 'next'):\n seq = iter(seq)\n while True:\n yield [seq.next() for i in xrange(size)]",
"def prepare_batches(self, data):\n batches = []\n start, end = 0, 100\n if len(data) > 100:\n while True:\n data_batch = data[start:end]\n if not data_batch:\n break\n temp = end + 100\n start, end = end, temp\n if data_batch:\n batches.append(data_batch)\n else:\n batches.append(data)\n return batches",
"def get_batches(arr, batch_size, seq_length):\n\n ## TODO: Get the number of batches we can make\n n_batches = int(arr.size / (batch_size * seq_length))\n\n ## TODO: Keep only enough characters to make full batches\n arr = arr[:(n_batches * batch_size * seq_length)]\n\n ## TODO: Reshape into batch_size rows\n arr = arr.reshape((batch_size, -1))\n\n ## TODO: Iterate over the batches using a window of size seq_length\n for n in range(0, arr.shape[1], seq_length):\n # The features\n x = arr[:, n:(n + seq_length)]\n # The targets, shifted by one\n y = np.zeros_like(x)\n try:\n y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n + seq_length]\n except IndexError:\n y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]\n yield x, y",
"def group_batches_for_parallel_processing(batchnamesfile, batchgroupsfile, cores):\n l = [[] for i in range(cores)]\n counter = 0\n with open(batchnamesfile) as f:\n for line in f:\n counter += 1\n filename = line.rstrip('\\n')\n group = counter % cores\n l[group].append(filename)\n write_nested_string_list_to_file(l, batchgroupsfile)\n return l",
"def ign_group(paths=[], **kwargs):\n for path in paths:\n rows = helpers.load_csv(path)\n rows = list(rows)\n helpers.batch(process_group, rows, total=len(rows))",
"def RecordBatches(\n self, options: dataset_options.RecordBatchesOptions\n ) -> Iterator[pa.RecordBatch]:",
"def split_to_batches(self, train_data, batch_size):\n num_of_training_examples = len(train_data)\n for i in range(0, num_of_training_examples, batch_size):\n x, y = zip(*train_data[i: i+batch_size])\n yield np.vstack(x), np.vstack(y)",
"def generateBatches(data, batch_size):\n random.shuffle(data)\n batches = []\n size = len(data)\n def loadBatches(data, total_size, batch_size_):\n for i in range(0, total_size, batch_size_):\n yield data[i:min(total_size, i + batch_size_)]\n\n for unprocessed_batch in loadBatches(data, size, batch_size):\n processed_batch = processBatch(unprocessed_batch)\n batches.append(processed_batch)\n return batches",
"def batch(self, lo=None, hi=None, max_recs=None, max_bytes=None,\n preserve=True, packer=None, txn=None, max_phys=None,\n grouper=None):\n assert max_bytes or max_recs, 'max_bytes and/or max_recs is required.'\n txn = txn or self.engine\n packer = packer or self.packer\n it = self._iter(txn, None, lo, hi, False, None, True, max_phys)\n groupval = None\n items = []\n\n for batch, key, data in it:\n if preserve and batch:\n self._write_batch(txn, items, packer)\n else:\n txn.delete(encode_keys(self.prefix, key))\n items.append((key, data))\n if max_bytes:\n _, encoded = self._prepare_batch(items, packer)\n if len(encoded) > max_bytes:\n items.pop()\n self._write_batch(txn, items, packer)\n items.append((key, data))\n done = max_recs and len(items) == max_recs\n if (not done) and grouper:\n val = grouper(self.encoder.unpack(data))\n done = val != groupval\n groupval = val\n if done:\n self._write_batch(txn, items, packer)\n self._write_batch(txn, items, packer)",
"def batch_chunks(exp_chunks):\n import numpy as np\n batch_idx = np.array([chunk[0]['batch_id'] for chunk in exp_chunks])\n unique_batch_idx = np.unique(batch_idx)\n ids_per_array = [np.where(batch_idx == array_bidx)[0] for array_bidx in unique_batch_idx]\n exp_arrays = [[exp_chunks[idx] for idx in chunk_ids] for chunk_ids in ids_per_array]\n return exp_arrays",
"def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):\n for batch_i in range(0, len(sources)//batch_size):\n start_i = batch_i * batch_size\n\n # Slice the right amount for the batch\n sources_batch = sources[start_i:start_i + batch_size]\n targets_batch = targets[start_i:start_i + batch_size]\n\n # Pad\n pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))\n pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))\n\n # Need the lengths for the _lengths parameters\n pad_targets_lengths = []\n for target in pad_targets_batch:\n pad_targets_lengths.append(len(target))\n\n pad_source_lengths = []\n for source in pad_sources_batch:\n pad_source_lengths.append(len(source))\n\n yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths",
"def pack_data_into_batches(self, ids):\n\n # create buckets sorted by the number of src tokens\n # each bucket is also sorted by the number of tgt tokens\n buckets = {}\n for i, line_ids in enumerate(ids):\n len_ = len(line_ids)\n if len_ not in buckets:\n buckets[len_] = [i]\n else:\n buckets[len_].append(i)\n\n for b_idx in buckets:\n buckets[b_idx] = sorted(buckets[b_idx])\n\n buckets = OrderedDict(sorted(buckets.items()))\n\n batches = []\n batch_elem_lengths = []\n curr_batch = []\n len_of_longest_sent = 0\n for sent_len, bucket in buckets.items():\n for sent_i in bucket:\n if sent_len * (len(curr_batch) + 1) > self.tokens_in_batch:\n if not curr_batch:\n raise ValueError(\n f\"The limitation on number of tokens in batch {self.tokens_in_batch} is too strong.\"\n f\"Several sentences contain {sent_len} tokens.\"\n )\n batches.append(curr_batch)\n batch_elem_lengths.append(sent_len)\n curr_batch = []\n curr_batch.append(sent_i)\n len_of_longest_sent = sent_len\n if curr_batch:\n batches.append(curr_batch)\n batch_elem_lengths.append(len_of_longest_sent)\n return batches, batch_elem_lengths",
"def gen_batches(data, batch_size):\n data = np.array(data)\n\n for i in range(0, data.shape[0], batch_size):\n yield data[i:i+batch_size]",
"def get_batches(summaries, texts, batch_size):\r\n for batch_i in range(0, len(texts)//batch_size):\r\n start_i = batch_i * batch_size\r\n summaries_batch = summaries[start_i:start_i + batch_size]\r\n texts_batch = texts[start_i:start_i + batch_size]\r\n pad_summaries_batch = np.array(pad_sentence_batch(summaries_batch))\r\n pad_texts_batch = np.array(pad_sentence_batch(texts_batch))\r\n \r\n # Need the lengths for the _lengths parameters\r\n pad_summaries_lengths = []\r\n for summary in pad_summaries_batch:\r\n pad_summaries_lengths.append(len(summary))\r\n \r\n pad_texts_lengths = []\r\n for text in pad_texts_batch:\r\n pad_texts_lengths.append(len(text))\r\n \r\n yield pad_summaries_batch, pad_texts_batch, pad_summaries_lengths, pad_texts_lengths",
"def get_batches(summaries, texts, batch_size):\n for batch_i in range(0, len(texts)//batch_size):\n start_i = batch_i * batch_size\n summaries_batch = summaries[start_i:start_i + batch_size]\n texts_batch = texts[start_i:start_i + batch_size]\n pad_summaries_batch = np.array(pad_sentence_batch(summaries_batch))\n pad_texts_batch = np.array(pad_sentence_batch(texts_batch))\n \n # Need the lengths for the _lengths parameters\n pad_summaries_lengths = []\n for summary in pad_summaries_batch:\n pad_summaries_lengths.append(len(summary))\n \n pad_texts_lengths = []\n for text in pad_texts_batch:\n pad_texts_lengths.append(len(text))\n \n yield pad_summaries_batch, pad_texts_batch, pad_summaries_lengths, pad_texts_lengths",
"def split_batches(filenames):\n by_time = {}\n for path_name in filenames:\n file_name = path.basename(path_name)\n parsed_fn = parse_agdc_fn(file_name)\n dt = parsed_fn['datetime']\n by_time.setdefault(dt, []).append((path_name, parsed_fn))\n\n rv = list(by_time.values())\n\n for group in rv:\n # Will raise exception if group is non-homogeneous\n check_sane(parsed for _, parsed in group)\n\n return rv",
"def gallery_groups(self):\n\n \"Collect data into fixed-length chunks or blocks\"\n # grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx\n n = 3\n iterable = self.context['gallery'].values()\n args = [iter(iterable)] * 3\n return izip_longest(fillvalue=None, *args)",
"def chunk(size, seq):\n if not isinstance(size, int) or size <= 0: # pragma: no cover\n raise ValueError(\"size must be an integer greater than zero\")\n\n group = []\n\n for item in seq:\n if len(group) >= size:\n yield group\n group = []\n group.append(item)\n\n if group:\n yield group",
"def gen_data_and_state(batches):\n\n state_for_current_group = None\n\n for batch in batches:\n batch_schema = batch.schema\n data_schema = pa.schema([batch_schema[i] for i in range(0, len(batch_schema) - 1)])\n state_schema = pa.schema(\n [\n batch_schema[-1],\n ]\n )\n\n batch_columns = batch.columns\n data_columns = batch_columns[0:-1]\n state_column = batch_columns[-1]\n\n data_batch = pa.RecordBatch.from_arrays(data_columns, schema=data_schema)\n state_batch = pa.RecordBatch.from_arrays(\n [\n state_column,\n ],\n schema=state_schema,\n )\n\n state_arrow = pa.Table.from_batches([state_batch]).itercolumns()\n state_pandas = [self.arrow_to_pandas(c) for c in state_arrow][0]\n\n for state_idx in range(0, len(state_pandas)):\n state_info_col = state_pandas.iloc[state_idx]\n\n if not state_info_col:\n # no more data with grouping key + state\n break\n\n data_start_offset = state_info_col[\"startOffset\"]\n num_data_rows = state_info_col[\"numRows\"]\n is_last_chunk = state_info_col[\"isLastChunk\"]\n\n if state_for_current_group:\n # use the state, we already have state for same group and there should be\n # some data in same group being processed earlier\n state = state_for_current_group\n else:\n # there is no state being stored for same group, construct one\n state = construct_state(state_info_col)\n\n if is_last_chunk:\n # discard the state being cached for same group\n state_for_current_group = None\n elif not state_for_current_group:\n # there's no cached state but expected to have additional data in same group\n # cache the current state\n state_for_current_group = state\n\n data_batch_for_group = data_batch.slice(data_start_offset, num_data_rows)\n data_arrow = pa.Table.from_batches([data_batch_for_group]).itercolumns()\n\n data_pandas = [self.arrow_to_pandas(c) for c in data_arrow]\n\n # state info\n yield (\n data_pandas,\n state,\n )",
"def batches(self, batch_size, count):\n entries = self.entries()\n for _ in range(count):\n yield [next(entries) for _ in range(batch_size)]",
"def _message_batches(cls, records):\n # Dump the records to a list of minimal json\n records_json = [\n json.dumps(record, separators=(',', ':')) for record in records\n ]\n\n current_batch_size = 0\n current_batch = []\n for record in records_json:\n line_len = len(record)\n # Check if the max size of the batch has been reached or if the current\n # record will exceed the max batch size and start a new batch\n if ((len(current_batch) == cls.MAX_BATCH_COUNT) or\n (current_batch_size + line_len > cls.MAX_BATCH_SIZE)):\n yield current_batch[:]\n current_batch_size = 0\n del current_batch[:]\n\n if line_len > cls.MAX_BATCH_SIZE:\n LOGGER.error('Record too large (%d) to send to SQS:\\n%s', line_len, record)\n cls._log_failed(1)\n continue\n\n # Add the record to the batch\n current_batch_size += line_len\n current_batch.append(record)\n\n # yield the result of the last batch (no need to copy via slicing)\n if current_batch:\n yield current_batch",
"def get_batches(int_text, batch_size, seq_length):\n n_batches = len(int_text) // (batch_size * seq_length)\n len_int_text = n_batches * (batch_size*seq_length)\n \n x = np.array(int_text[: len_int_text])\n y = np.hstack((np.array(int_text[1: len_int_text]) , np.array(int_text[0]))) #np.hstack()水平合并\n \n x_batches = np.split(x.reshape(batch_size, -1), n_batches, -1)\n y_batches = np.split(y.reshape(batch_size, -1), n_batches, -1)\n \n all_batches= np.array(list(zip(x_batches, y_batches)))\n return all_batches",
"def batch_split(self, batch_text, threads=8):\n pass"
] |
[
"0.6201766",
"0.61394286",
"0.60248244",
"0.5968052",
"0.59213907",
"0.5915099",
"0.58367103",
"0.5821988",
"0.581675",
"0.57865965",
"0.5782162",
"0.5763047",
"0.5718872",
"0.5709953",
"0.57008743",
"0.5692128",
"0.56830883",
"0.5660939",
"0.5658205",
"0.5650884",
"0.56485784",
"0.56435674",
"0.5631138",
"0.56253755",
"0.562012",
"0.56023365",
"0.56023204",
"0.55994594",
"0.55752903",
"0.55737144"
] |
0.6228171
|
0
|
Select the appropriate join function, based on the current mode.
|
def __set_join_function(self):
if self.mode == self.MODE.UNIQUE:
self.__join_function = self.join_unique
elif self.mode == self.MODE.SEQ_COUNT:
self.__join_function = self.join_sequence_count
elif self.mode == self.MODE.VEC_COUNT:
self.__join_function = self.join_vector_count
elif self.mode == self.MODE.VEC_COUNT_MASKED:
self.__join_function = self.join_vector_count_masked
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _select_mode(self):\n self.__check_mode()\n if self.mode[\"auto_mode\"]:\n self.mode_auto()\n elif self.mode[\"auto_mode\"] is None: # Do Nothing\n self.mode_standby()\n else:\n self.mode_manual()",
"def join(self) -> \"cocotb.triggers.Join\":\n return cocotb.triggers.Join(self)",
"def join(self, mode: int) -> int:\n\n if mode < 0 or mode > 1:\n raise ValueError('Bad mode')\n\n cmd = b'\\x40\\x01' + bytes([mode])\n try:\n self._serial.transmit(cmd)\n response = self._get_reply(0x40, 1, 0.25)\n finally:\n self._gpio.sleep()\n\n return response[2]",
"def _determine_joins(self) -> None:\n if self.secondaryjoin is not None and self.secondary is None:\n raise sa_exc.ArgumentError(\n \"Property %s specified with secondary \"\n \"join condition but \"\n \"no secondary argument\" % self.prop\n )\n\n # find a join between the given mapper's mapped table and\n # the given table. will try the mapper's local table first\n # for more specificity, then if not found will try the more\n # general mapped table, which in the case of inheritance is\n # a join.\n try:\n consider_as_foreign_keys = self.consider_as_foreign_keys or None\n if self.secondary is not None:\n if self.secondaryjoin is None:\n self.secondaryjoin = join_condition(\n self.child_persist_selectable,\n self.secondary,\n a_subset=self.child_local_selectable,\n consider_as_foreign_keys=consider_as_foreign_keys,\n )\n if self.primaryjoin_initial is None:\n self.primaryjoin = join_condition(\n self.parent_persist_selectable,\n self.secondary,\n a_subset=self.parent_local_selectable,\n consider_as_foreign_keys=consider_as_foreign_keys,\n )\n else:\n self.primaryjoin = self.primaryjoin_initial\n else:\n if self.primaryjoin_initial is None:\n self.primaryjoin = join_condition(\n self.parent_persist_selectable,\n self.child_persist_selectable,\n a_subset=self.parent_local_selectable,\n consider_as_foreign_keys=consider_as_foreign_keys,\n )\n else:\n self.primaryjoin = self.primaryjoin_initial\n except sa_exc.NoForeignKeysError as nfe:\n if self.secondary is not None:\n raise sa_exc.NoForeignKeysError(\n \"Could not determine join \"\n \"condition between parent/child tables on \"\n \"relationship %s - there are no foreign keys \"\n \"linking these tables via secondary table '%s'. \"\n \"Ensure that referencing columns are associated \"\n \"with a ForeignKey or ForeignKeyConstraint, or \"\n \"specify 'primaryjoin' and 'secondaryjoin' \"\n \"expressions.\" % (self.prop, self.secondary)\n ) from nfe\n else:\n raise sa_exc.NoForeignKeysError(\n \"Could not determine join \"\n \"condition between parent/child tables on \"\n \"relationship %s - there are no foreign keys \"\n \"linking these tables. \"\n \"Ensure that referencing columns are associated \"\n \"with a ForeignKey or ForeignKeyConstraint, or \"\n \"specify a 'primaryjoin' expression.\" % self.prop\n ) from nfe\n except sa_exc.AmbiguousForeignKeysError as afe:\n if self.secondary is not None:\n raise sa_exc.AmbiguousForeignKeysError(\n \"Could not determine join \"\n \"condition between parent/child tables on \"\n \"relationship %s - there are multiple foreign key \"\n \"paths linking the tables via secondary table '%s'. \"\n \"Specify the 'foreign_keys' \"\n \"argument, providing a list of those columns which \"\n \"should be counted as containing a foreign key \"\n \"reference from the secondary table to each of the \"\n \"parent and child tables.\" % (self.prop, self.secondary)\n ) from afe\n else:\n raise sa_exc.AmbiguousForeignKeysError(\n \"Could not determine join \"\n \"condition between parent/child tables on \"\n \"relationship %s - there are multiple foreign key \"\n \"paths linking the tables. Specify the \"\n \"'foreign_keys' argument, providing a list of those \"\n \"columns which should be counted as containing a \"\n \"foreign key reference to the parent table.\" % self.prop\n ) from afe",
"def _format_joining_functions(self):\n ## TODO: Extend to n possible neighs_info elements\n if self.staticneighs:\n if self.ifdistance:\n self.join_neighs_and = join_neighsinfo_AND_static_dist\n self.join_neighs_or = join_neighsinfo_OR_static_dist\n self.join_neighs_xor = join_neighsinfo_XOR_static_dist\n else:\n self.join_neighs_and = join_neighsinfo_AND_static_notdist\n self.join_neighs_or = join_neighsinfo_OR_static_notdist\n self.join_neighs_xor = join_neighsinfo_XOR_static_notdist\n else:\n if self.ifdistance:\n self.join_neighs_and = join_neighsinfo_AND_notstatic_dist\n self.join_neighs_or = join_neighsinfo_OR_notstatic_dist\n self.join_neighs_xor = join_neighsinfo_XOR_notstatic_dist\n else:\n self.join_neighs_and = join_neighsinfo_AND_notstatic_notdist\n self.join_neighs_or = join_neighsinfo_OR_notstatic_notdist\n self.join_neighs_xor = join_neighsinfo_XOR_notstatic_notdist",
"def mode(self):\n return self._mode_func",
"def dispatch_mode_for_channel(self, target, mode):\n channel = target[1:]\n assert channel in self.server.channels\n self.server.channels[channel].mode(self, mode)",
"def get_mode(self):\n self.read(\":FUNC?\")",
"def mode(self, target, *data):\n self.send_line('MODE %s %s' % (target, ' '.join(data)), nowait=True)",
"def select_calculation_mode(config: dict, light_model: LightModel) -> str:\n config_mode = config.get(CONF_MODE)\n if config_mode:\n return config_mode\n\n if config.get(CONF_LINEAR):\n return MODE_LINEAR\n\n if config.get(CONF_FIXED):\n return MODE_FIXED\n\n if light_model:\n return light_model.supported_modes[0]\n\n raise UnsupportedMode(\n \"Cannot select a mode (LINEAR, FIXED or LUT), supply it in the config\"\n )",
"def read_join(stream: Stream):\n join_bin = stream.read_uchar()\n if join_bin == 0:\n return 'miter'\n elif join_bin == 1:\n return 'round'\n elif join_bin == 2:\n return 'bevel'\n else:\n raise UnreadableSymbolException('unknown join style {}'.format(join_bin))",
"def set_mode(self, mode):\n self.write(\":FUNC {}\".format(mode))",
"def add_join(self, join_type=\"LEFT\", input1=0, input2=1):\n jp = self.obj_payload\n if not \"joins\" in jp:\n jp[\"joins\"] = []\n join = {\n \"conditionsMode\": \"AND\",\n \"on\": [],\n \"table1\": input1,\n \"table2\": input2,\n \"type\": join_type\n }\n jp[\"joins\"].append(join)\n return join",
"def join(self):\n pass",
"def mode(self) -> Mode:\n ...",
"def set_mode(self, mode):\n if mode in self.MODES:\n self.mode = self.MODES[mode]",
"def mode_to_network_class(mode):\n if mode in ['flat', 'signals']:\n return FlatPolicy\n elif 'skills' in mode:\n return SkillsPolicy\n elif 'film' in mode:\n return FilmPolicy\n elif mode == 'regression':\n return Regression\n else:\n raise NotImplementedError('mode {} is unknown'.format(mode))",
"def mode(self, channel, target, command=\"\"):\n time.sleep(1)\n self.s.send(\"MODE %s %s%s\\n\" % (channel, target, (command and (\" \" + command))))\n logger.log(\"MODE %s %s%s\" % (channel, target, (command and (\" \" + command)))).LogSend()",
"def mode(self, mode: Optional[int] = None) -> Optional[int]:\n ...",
"def join(ctx, network, force):\n return join_wrapper(ctx.obj['client'], network, force)",
"def DualMode(self) -> bool:",
"def setMode(self, newmode=None):\n if newmode==None and self.mode: return\n \n # find it in my dictionary\n for k,v in self.items():\n if k.lower() == \"mode\":\n if newmode:\n self.mode = newmode\n self[k] = str(self.mode)\n else:\n self.mode = int(v)\n \n # it wasn't in the dictionary\n if newmode and not self.mode:\n self.mode = newmode\n self[\"MODE\"] = str(self.mode)\n \n if not self.mode:\n raise NetworkException(\"Supplink mode not set: \" + str(self))",
"def _rewrite_join(self, node: saldag.Join):\n\n if node.is_mpc:\n out_rel = node.out_rel\n key_col_idx = 0\n # oversimplifying here. what if there are multiple singleton\n # coll_sets?\n singleton_coll_sets = filter(\n lambda s: len(s) == 1,\n out_rel.columns[key_col_idx].coll_sets)\n singleton_coll_sets = sorted(list(singleton_coll_sets))\n if singleton_coll_sets:\n trusted_party = next(iter(singleton_coll_sets[0]))\n hybrid_join_op = saldag.HybridJoin.from_join(node, trusted_party)\n parents = hybrid_join_op.parents\n for par in parents:\n par.replace_child(node, hybrid_join_op)",
"def _natural_join_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n if op.node_name != \"NaturalJoinNode\":\n raise TypeError(\n \"op was supposed to be a data_algebra.data_ops.NaturalJoinNode\"\n )\n inputs = [self._compose_polars_ops(s, data_map=data_map) for s in op.sources]\n assert len(inputs) == 2\n how = op.jointype.lower()\n if how == \"full\":\n how = \"outer\"\n if how != \"right\":\n coalesce_columns = (\n set(op.sources[0].columns_produced()).intersection(op.sources[1].columns_produced()) \n - set(op.on_a))\n orphan_keys = [c for c in op.on_b if c not in set(op.on_a)]\n input_right = inputs[1]\n if len(orphan_keys) > 0:\n input_right = input_right.with_columns([\n pl.col(c).alias(f\"{c}_da_join_tmp_key\") for c in orphan_keys\n ])\n res = inputs[0].join(\n input_right,\n left_on=op.on_a,\n right_on=op.on_b,\n how=how,\n suffix = \"_da_right_tmp\",\n )\n if len(coalesce_columns) > 0:\n res = res.with_columns([\n pl.when(pl.col(c).is_null())\n .then(pl.col(c + \"_da_right_tmp\"))\n .otherwise(pl.col(c))\n .alias(c)\n for c in coalesce_columns\n ])\n if len(orphan_keys) > 0:\n res = res.rename({f\"{c}_da_join_tmp_key\": c for c in orphan_keys})\n else:\n # simulate right join with left join\n coalesce_columns = (\n set(op.sources[0].columns_produced()).intersection(op.sources[1].columns_produced()) \n - set(op.on_b))\n orphan_keys = [c for c in op.on_a if c not in set(op.on_b)]\n input_right = inputs[0]\n if len(orphan_keys) > 0:\n input_right = input_right.with_columns([\n pl.col(c).alias(f\"{c}_da_join_tmp_key\") for c in orphan_keys\n ])\n res = inputs[1].join(\n input_right,\n left_on=op.on_b,\n right_on=op.on_a,\n how=\"left\",\n suffix = \"_da_left_tmp\",\n )\n if len(coalesce_columns) > 0:\n res = res.with_columns([\n pl.when(pl.col(c + \"_da_left_tmp\").is_null())\n .then(pl.col(c))\n .otherwise(pl.col(c + \"_da_left_tmp\"))\n .alias(c)\n for c in coalesce_columns\n ])\n if len(orphan_keys) > 0:\n res = res.rename({f\"{c}_da_join_tmp_key\": c for c in orphan_keys})\n res = res.select(op.columns_produced())\n return res",
"def mode(self, mode_type: str):\r\n self._mode = mode_type.lower()\r\n self.mode_hist.append(mode_type)\r\n\r\n if self.mode_hist[-2] != mode_type and self._daq:\r\n msg = Message(\"mode\", mode_type, self.checksum).message_bytes\r\n self._daq.asynch.transmit(msg)",
"def merge_join(planNode):\n cond = planNode.get_attr(\"Merge Cond\")\n cond_msg = \"\"\n \n if (cond):\n cond_msg += \" on condition {}\".format(cond)\n node = planNode.get_attr(\"Node Type\")\n join_type = planNode.get_attr(\"Join Type\")\n\n if join_type != \"\":\n join_type += ' '\n description = \"{}{}{}\".format( join_type, node, cond_msg )\n return description",
"def on_join(data):\n logger.info(f\"Joining: {data}\")\n to = data[\"to\"]\n if to in TO_OPTIONS.keys():\n join_room(to)\n logger.info(f\"Rooms: {rooms()}\")\n else:\n logger.warning(f\"{to} not in TO_OPTIONS\")",
"def _get_open_func(cls, filename: str, mode: str) -> Tuple[OpenFunc, str]:\n if filename.endswith('.gz'):\n return gzip.open, cls._map_disk_type('{}t', '{}').format(mode)\n else:\n return open, cls._map_disk_type('{}', '{}b').format(mode)",
"def join(self, model_or_queryset, *filter_q, **filter_kw):\n join_type = filter_kw.get('_join_type', INNER)\n queryset = super(With, self).join(model_or_queryset, *filter_q, **filter_kw)\n\n # the underlying Django code forces the join type into INNER or a LEFT OUTER join\n alias, _ = queryset.query.table_alias(self.name)\n join = queryset.query.alias_map[alias]\n if join.join_type != join_type:\n join.join_type = join_type\n return queryset",
"def on_join(self, raw_msg, source, **kwargs):"
] |
[
"0.54541767",
"0.5298494",
"0.5287994",
"0.5253134",
"0.5196433",
"0.51717496",
"0.5141866",
"0.50759524",
"0.50703716",
"0.50369006",
"0.49973783",
"0.49802926",
"0.4961307",
"0.4899268",
"0.4894296",
"0.48599124",
"0.48489833",
"0.47833675",
"0.47625113",
"0.47557896",
"0.4752377",
"0.4744475",
"0.47347644",
"0.47325534",
"0.46983612",
"0.4678385",
"0.46676645",
"0.466279",
"0.46559176",
"0.46473128"
] |
0.7746332
|
0
|
Perform sequence counting through joining. Counts sequence occurrences.
|
def join_sequence_count(
headers: List[str], seq: str, OH: IO, **kwargs
) -> Tuple[str, int]:
batch = (seq, len(headers))
OH.write("%s\t%d\n" % batch)
return batch
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def count(seq):\n\treturn sum(1 for x in seq)",
"def count():",
"def count_sequences(self, size):\n raise NotImplementedError",
"def Counting(seq):\n\n #Scan the sequence, looking for motifs\n\n counting = {k: 0 for k in MOT} # Initialize the counting dictionary.\n # Scan all the motifs and find them in the sequence\n for motif in MOT:\n if len(seq) > len(motif): # Check if the sequence is longer than the motif itself.\n for i in range(len(seq)-len(motif)+1):\n if i == 0: # In case the motif is in the beginning of the sequence\n # print(\"start: \" + seq[i:i+len(motif)] + \" next nuc: \" + seq[i+len(motif)])\n if seq[i:i+len(motif)] == motif and seq[i+len(motif)] != motif[0]: # Check if the next nucleotide is in not part of the motif.\n counting[motif] += 1\n elif i == len(seq)-len(motif): # In case the motif is in the end of the sequence\n \n if seq[i:i+len(motif)] == motif and seq[i-1] != motif[0]: # Check if the previuos nucleotide is in not part of the motif.\n counting[motif] += 1\n elif len(seq) > len(motif)+1: # In case the motif is in the middle of the sequence.\n # Check if the motif is not part of another motif (e.g. TT is in TTT).\n\n if seq[i:i+len(motif)] == motif and seq[i+len(motif)] != motif[0] and seq[i-1] != motif[0]:\n counting[motif] += 1\n for nuc_nr in NUC_NR:\n counting[nuc_nr+\"_NR\"] = seq.count(nuc_nr)\n\n return counting",
"def get_number_seqs_for_primer(percent_match,\n seq_count):\n \n total_seq_use=int((1-percent_match)*seq_count)\n \n return total_seq_use",
"def count_all(self):\n return Counter(self._sequence)",
"def join_vector_count(\n headers: List[str], seq: str, OH: IO, vector: AbundanceVector, **kwargs\n ) -> None:\n hcount = len(headers)\n for header in headers:\n coords = SequenceCoords.from_str(header)\n vector.add_count(\n coords.ref, coords.strand.label, int(coords.start), hcount, len(seq)\n )",
"def final_kmer_counts(seq_dict, num_seqs, alphabet, min_k, max_k):\n counted = Counter()\n len_seqs = 0\n for name, sequence in seq_dict.items():\n seq = seq_cleaner(sequence, alphabet)\n len_seqs += len(seq)\n counted.update(count_kmers_cython(seq, min_k, max_k))\n final_count = {k: (v // num_seqs) for k, v in counted.items()}\n # total_len = (len_seqs // num_seqs)\n return final_count, len_seqs",
"def count(seq, predicate):\n count = 0\n for item in seq:\n if predicate(item):\n count += 1\n return count",
"def increase(self):\n self.counter[0] += 1\n\n for x in range(len(self.sequences) -1):\n if self.counter[x] == len(self.sequences[x]) + 1:\n self.counter[x] = 0\n self.counter[x+1] += 1",
"def counts(self) -> dict:\n return Counter(self.sequence)",
"def _get_observation_count(self):\n observation_count = 0\n for sequence in self.seq_list:\n observation_count += sequence.shape[0] \n \n return observation_count",
"def at_frequency(self):\n result = str(self.seq).count(\"A\") + str(self.seq).count(\"T\")\n return result",
"def count_amino_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_amino_acids()\n return n",
"def count(self):\n\n raise NotImplementedError",
"def test_count_seqs(self):\r\n def seq_counter(filepath, parser=None):\r\n # Fake sequence counter to test count_seqs without\r\n # having to write files to disk (note don't need to\r\n # test actual sequence counters here as they're tested\r\n # elsewhere)\r\n if filepath.startswith('fake'):\r\n raise IOError\r\n else:\r\n return len(filepath), 0, 0\r\n\r\n in_fps = ['1.fasta', 'fake1.fasta', 'fake.fasta', '2.fa']\r\n expected = [((7, 0, 0), '1.fasta'),\r\n ((4, 0, 0), '2.fa')],\\\r\n 11, ['fake1.fasta', 'fake.fasta']\r\n self.assertEqual(count_seqs_in_filepaths(\r\n in_fps, seq_counter), expected)\r\n\r\n in_fps = ['fake1.fasta', 'fake.fasta']\r\n expected = [], 0, ['fake1.fasta', 'fake.fasta']\r\n self.assertEqual(count_seqs_in_filepaths(\r\n in_fps, seq_counter), expected)\r\n\r\n in_fps = ['1.fasta', '2.fa', '12.txt']\r\n expected = [((7, 0, 0), '1.fasta'),\r\n ((4, 0, 0), '2.fa'),\r\n ((6, 0, 0), '12.txt')], 17, []\r\n self.assertEqual(count_seqs_in_filepaths(\r\n in_fps, seq_counter), expected)",
"def _build_counter_sequence(events, bins):\n counter_sequence = []\n last_pos = 0\n for i in range(bins.shape[0]):\n timestamps = events[last_pos:]\n event_count = trace(timestamps, endtime=bins[i]).shape[0]\n if event_count > 0:\n event_count = int(ceilk(event_count, 10))\n counter_sequence.append(event_count)\n last_pos = last_pos + event_count\n\n return counter_sequence",
"def count(self, sub) -> int:\n pass",
"def count(self, sub, start=0, end=None):\n return count(self, sub, start, end)",
"def count(seq):\n\n if not seq:\n return 0\n elif isinstance(seq[0], list):\n return count(seq[0]) + count(seq[1:])\n else:\n return 1 + count(seq[1:])",
"def counts(sequence):\n # initialize the countainer\n count = defaultdict(int)\n # iterates through sequence elements\n for item in sequence:\n # if element not in counts add 0\n # else add 1\n count[item] = count.get(item, 0) + 1\n return dict(count)",
"def counter(self) -> int:",
"def counter(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def get_num_sequence(self):\n return len(self.study_list)",
"def countby(iteratee, seq):\n return dict(Counter(map(iteratee, seq)))",
"def count(self):\n\n count = 0\n x = self.begin\n\n if self.begin == self.end == None:\n return 0\n\n elif self.begin == self.end:\n return 1\n\n else:\n while x:\n count += 1\n x = x.next\n\n return count"
] |
[
"0.69582546",
"0.62097913",
"0.605739",
"0.6015187",
"0.5959785",
"0.5924617",
"0.57749957",
"0.5665839",
"0.56234276",
"0.5620457",
"0.5549516",
"0.5546007",
"0.5545018",
"0.5525091",
"0.55207217",
"0.5505692",
"0.550143",
"0.54988235",
"0.5486474",
"0.5467682",
"0.5455928",
"0.5437132",
"0.5437132",
"0.5425294",
"0.5425294",
"0.5425294",
"0.5425294",
"0.54251486",
"0.53992534",
"0.53919446"
] |
0.64208555
|
1
|
Join sequenceCount batches in parallel.
|
def __parallel_join(self, recordBatches: List[Batch], outpath: str) -> None:
kwargs = self._pre_join(outpath)
batcher = SeqCountBatcher.from_parent(self, self.batch_size)
batcher.doSort = self.doSort
print("Intermediate batching...")
batcher.do(recordBatches)
print("Joining...")
batcher.join(self.join_function, **kwargs)
self._post_join(**kwargs)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def join_sequence_count(\n headers: List[str], seq: str, OH: IO, **kwargs\n ) -> Tuple[str, int]:\n batch = (seq, len(headers))\n OH.write(\"%s\\t%d\\n\" % batch)\n return batch",
"def join(self, batches: List[Batch], outpath: str) -> None:\n if self.threads == 1:\n super().join(batches, outpath)\n else:\n self.__parallel_join(batches, outpath)",
"def dummy_join_fastq(inputs, outputs, log_files, batch_size=10000, gap=20, **kwargs):\n if gap >= 0:\n gap_seq = \"N\" * gap\n gap_qual = [0] * gap\n\n counts = {\n \"join_count\": 0,\n \"fwd_count\": 0,\n \"rev_count\": 0,\n \"total_joined\": 0,\n \"total_written\": 0,\n }\n faked_joins = []\n\n fwd_records = SeqIO.parse(get_file_name(inputs.fwd), \"fastq\")\n rev_records = SeqIO.parse(get_file_name(inputs.rev), \"fastq\")\n with open(get_file_name(outputs), \"w\") as out_fastq_stream:\n for frec, rrec in merge_record_iters(fwd_records, rev_records, **kwargs):\n # join seqs\n new_records = []\n if frec is None and rrec is None:\n logger.warning(\"Both ends missing from input\") # this shouldn't\n continue\n if frec is None:\n logger.debug(\"Forward seq trimmed to oblivion\")\n new_records.append(rev_comp_rec(rrec, qual=True, suffix=\".rev\"))\n counts[\"rev_count\"] += 1\n elif rrec is None:\n logger.debug(\"Reverse seq trimmed to oblivion\")\n new_records.append(frec)\n counts[\"fwd_count\"] += 1\n elif gap >= 0:\n counts[\"join_count\"] += 1\n # join sequence\n new_seq = (\n frec.seq\n + Seq.Seq(gap_seq, frec.seq.alphabet)\n + rrec.seq.reverse_complement()\n )\n new_record = SeqRecord.SeqRecord(\n new_seq, id=frec.id, name=frec.name, description=\"Faked join\"\n )\n # join quality\n new_record.letter_annotations[\"phred_quality\"] = (\n frec.letter_annotations[\"phred_quality\"]\n + gap_qual\n + list(reversed(rrec.letter_annotations[\"phred_quality\"]))\n )\n new_records.append(new_record)\n else:\n # gap < 0 means don't join...add separately\n new_records.append(frec)\n new_records.append(rev_comp_rec(rrec, qual=True, suffix=\".rev\"))\n\n faked_joins.extend(new_records)\n if len(faked_joins) >= batch_size:\n n_written = SeqIO.write(faked_joins, out_fastq_stream, format=\"fastq\")\n if n_written != len(faked_joins):\n logger.warning(\n \"Only %d of %d faked joins written!\"\n % (n_written, len(faked_joins))\n )\n counts[\"total_joined\"] += len(faked_joins)\n counts[\"total_written\"] += n_written\n del faked_joins[:]\n\n # at end of loop, write remaining cached records\n n_written = SeqIO.write(faked_joins, out_fastq_stream, format=\"fastq\")\n if n_written != len(faked_joins):\n logger.warning(\n \"Only %d of %d faked joins written!\" % (n_written, len(faked_joins))\n )\n counts[\"total_joined\"] += len(faked_joins)\n counts[\"total_written\"] += n_written\n\n # Report some counts\n msg = \"\"\"\n#======================\n# Faked joins\n# Total written: {total_written} of {total_joined}\n# Dummy Joins: {join_count}\n# FwdOnly: {fwd_count}\n# RevOnly: {rev_count}\n#======================\n\"\"\".format(\n **counts\n )\n\n with open(log_files[0], \"a\") as log_out_stream:\n log_out_stream.write(msg)\n\n logger.debug(msg)",
"def join(self, *args, **kwargs):\n for process in self.process:\n process.join(*args, **kwargs)",
"def join_vector_count(\n headers: List[str], seq: str, OH: IO, vector: AbundanceVector, **kwargs\n ) -> None:\n hcount = len(headers)\n for header in headers:\n coords = SequenceCoords.from_str(header)\n vector.add_count(\n coords.ref, coords.strand.label, int(coords.start), hcount, len(seq)\n )",
"def join(self, batches: List[Batch], outpath: str) -> None:\n kwargs = self._pre_join(outpath)\n\n crawler = Crawler()\n print(\"Joining...\")\n for batch in crawler.do_batch(batches):\n self.join_function(*batch, **kwargs)\n\n self._post_join(**kwargs)",
"def _link_jobs(self):\n for i, j in enumerate(self.jobs):\n j.link(self, i)\n j.claim_artifacts()",
"def join(self):\n for t in self.created_list:\n t.join()",
"def _seqAcqCyclops(self):\n print('Cyclops running')\n\n pool = ThreadPool(processes=2)\n print('Pool initialized')\n\n ledSwitchingThread = pool.apply_async(self._metadataSaving,())\n sleep(0.001) ## WAIT FOR INITIALIZATION AND WAITFORSIGNAL FCT\n frameSavingThread = pool.apply_async(self._frameSaving,())\n imageCount = ledSwitchingThread.get()\n print('Saving process counter : ', frameSavingThread.get())\n print('LED process counter : ', imageCount)\n #close the pool and wait for the work to finish\n pool.close()\n pool.join()\n print('sequ acq done')\n return imageCount",
"def add_batch(batch_index, pCS, orphans, fasta_d, cpus, dun_use_partial):\n cur_file = \"batch{0}.fasta\".format(batch_index)\n seqids = set([r.id for r in SeqIO.parse(open(cur_file), 'fasta')])\n o = ar.run_minimap(cur_file, \"seed{0}.S.fasta\".format(batch_index), cpus=cpus)\n print >> sys.stderr, \"processing\", o\n pCS, remains = sp.process_align_to_pCS(o, seqids, pCS, MiniReader, dun_use_partial=dun_use_partial)\n print >> sys.stderr, \"pCS: {0}, tucked: {1}, orphans: {2}, remains: {3}\".format( \\\n len(pCS.S), sum(v == 'T' for v in pCS.seq_stat.itervalues()), len(orphans), len(remains))\n # write batch<i>.remains.fasta\n cur_file = \"batch{0}.remains.fasta\".format(batch_index)\n FileIO.write_seqids_to_fasta(remains, cur_file, fasta_d)\n o = ar.run_minimap(cur_file, \"seed{0}.orphans.fasta\".format(batch_index), cpus=cpus)\n print >> sys.stderr, \"processing\", o\n pCS, orphans, remains = sp.process_align_to_orphan(o, remains, orphans, pCS, MiniReader, dun_use_partial=dun_use_partial)\n print >> sys.stderr, \"pCS: {0}, tucked: {1}, orphans: {2}, remains: {3}\".format( \\\n len(pCS.S), sum(v == 'T' for v in pCS.seq_stat.itervalues()), len(orphans), len(remains))\n # write batch<i>.remains2.fasta and self align\n cur_file = \"batch{0}.remains2.fasta\".format(batch_index)\n FileIO.write_seqids_to_fasta(remains, cur_file, fasta_d)\n o = ar.run_minimap(cur_file, cur_file, cpus=cpus)\n print >> sys.stderr, \"processing\", o\n pCS, remains = sp.process_self_align_into_seed(o, remains, MiniReader, pCS, dun_use_partial=dun_use_partial)\n print >> sys.stderr, \"pCS: {0}, tucked: {1}, orphans: {2}, remains: {3}\".format( \\\n len(pCS.S), sum(v == 'T' for v in pCS.seq_stat.itervalues()), len(orphans), len(remains))\n # combine remains+orphans to new orphans\n orphans = orphans.union(remains)\n FileIO.write_preClusterSet_to_fasta(pCS, \"seed{0}.S.fasta\".format(batch_index+1), fasta_d)\n FileIO.write_seqids_to_fasta(orphans, \"seed{0}.orphans.fasta\".format(batch_index+1), fasta_d)\n\n return pCS, orphans",
"def combine(args, library_sizes):\n with open(args.counts, \"r\") as counts, open(args.results, \"r\") as results:\n with open(args.output_dir + \"counts_results.txt\", \"w+\") as file1, \\\n open(args.output_dir + \"counts_results_rpm.txt\",\"w+\") \\\n as file2, \\\n open(args.output_dir + \"counts_results_rpkm.txt\", \"w+\") \\\n as file3:\n head = True\n for count_line, results_line in zip(counts, results):\n count_line = count_line.strip()\n results_line = results_line.strip()\n\n if head: # Process column names into one header\n head = False\n count_head_parts = count_line.split(\"\\t\")\n results_head_parts = results_line.split(\"\\t\")\n results_head_parts = [\"Chromosome\", \"Start\", \"End\"] + \\\n results_head_parts[1:]\n\n new_head_parts = results_head_parts + \\\n count_head_parts[2:]\n new_head = \"\\t\".join(new_head_parts)\n new_head += \"\\n\"\n file1.write(new_head)\n file2.write(new_head)\n file3.write(new_head)\n\n else:\n process(count_line, results_line,\n file1, file2, file3, library_sizes)",
"def write_all_rawcombined():\n subject_id = []\n for patient_idx in np.arange(settings.num_patients):\n if patient_idx < 10:\n subject_id.append(str('00') + str(patient_idx))\n else:\n subject_id.append(str('0') + str(patient_idx))\n \n pool = multiprocessing.Pool()\n pool.map(write_patient_concat_ch, subject_id)",
"def pool_combine(program_path='.', seq='999'):\n from os.path import dirname\n from subprocess import call\n\n new_pool = None\n for pool in get_pools(program_path=program_path, latest=False):\n if new_pool is None:\n path = dirname(pool)\n new_pool = open('/'.join([path, make_poolname(pool, seq=seq)]), 'w')\n call(['head', '-n', '1', pool], stdout=new_pool)\n call(['tail', '-n', '+2', pool], stdout=new_pool)\n new_pool.close()",
"def thread_batch(l_threadFunc, outerLoop, innerLoop, offset):\n start = 0\n join = 0\n il = lambda f, i, o, l : f + i + o * l\n for t_o in range(0, outerLoop):\n for t_i in range(0, innerLoop):\n idx = il(offset, t_i, t_o, innerLoop)\n l_threadFunc[idx].start()\n start += 1\n # self.dp.qprint('Started thread %d' % start)\n\n for t_i in range(0, innerLoop):\n idx = il(offset, t_i, t_o, innerLoop)\n l_threadFunc[idx].join()\n join += 1\n # self.dp.qprint('Join set on thread %d' % join)\n\n return start",
"def join(self, other):\n # In fact we simply will join the counters of this bundler with the\n # counters of the specified bundler.\n pairs = zip(self.gauges, other.gauges)\n\n # Wrap the call into the list conversion, since the imap method returns\n # a generator.\n list(map(lambda ab: ab[0].join(ab[1]), pairs))\n\n # It is important to return the referece to ourselves,\n # as it will be used as an accumulator in the reduce call.\n return self",
"def _join_running_pipelines(self):\n still_running = list(self._running_pipelines)\n for pipeline in still_running:\n pipeline.join_all()",
"def join(self, seq):\n return join(self, seq)",
"def joincount(pntGraph, lineGraph, criterion='', threshold=0):\n matNumDict, _ = spatialjoin._spatialjoin(pntGraph, lineGraph, criterion, threshold)\n for edge in lineGraph.edges(data=True):\n edge[2]['joinCount'] = matNumDict[edge[2]['Ind']]\n print('The join count is added to the POLYLINE type graph.')",
"def batch_split(self, batch_text, threads=8):\n pass",
"def build_joint_counters(sentence_counters):\n joint_counter = collections.Counter()\n for counter in tqdm.tqdm(sentence_counters):\n for w1, w2 in itertools.combinations(counter.keys(), 2):\n joint_counter[frozenset({w1, w2})] += min(counter[w1], counter[w2])\n return joint_counter",
"def map_reduce_queue(mapper, reducer, seq, num_reducer_args,\n block, timeout, chunk_size):\n n_yields = 0\n queue, jobs, num_successes = mapf(mapper, seq, block, 5 * chunk_size)\n for _ in chunk(chunk_size, jobs): # starts jobs that write to queue\n for rv in group_reduce(reducer, queue, # consumes queue\n timeout=0, group_size=num_reducer_args):\n n_yields += 1\n yield rv\n gevent.sleep(0)\n for rv in group_reduce(reducer, queue,\n timeout=timeout, group_size=num_reducer_args):\n n_yields += 1\n yield rv\n if num_successes.get() == n_yields:\n msg = \"\"\"num_tagged_urls: %s ncrawled_urls:%s\"\"\" % (\n n_yields, num_successes.get())\n gevent.sleep(5)\n msg += ' ncrawed_urls a few secs later: %s' % num_successes.get()\n log.error(msg)",
"async def run_deduplication(joined_path: Path, output_path: Path):\n counts = defaultdict(int)\n\n with open(output_path, \"w\") as f:\n for record in parse_joined_fastq(joined_path, counts):\n SeqIO.write(record, f, format=\"fasta\")\n\n return dict(counts)",
"def joint_dataset(l1, l2):\n N = np.max(l1) + 1\n return l2 * N + l1",
"def num_joins(self, num_joins):\n self._num_joins = num_joins",
"def inner_loop(i: int, alive_seq: torch.LongTensor, alive_log_probs: torch.Tensor, finished_seq: torch.LongTensor, finished_scores: torch.Tensor, finished_flags: torch.ByteTensor, states: Optional[State]) ->Tuple[int, torch.LongTensor, torch.Tensor, torch.LongTensor, torch.Tensor, torch.ByteTensor, Optional[State]]:\n topk_seq, topk_log_probs, topk_scores, topk_finished, states = grow_topk(i, alive_seq, alive_log_probs, states)\n alive_seq, alive_log_probs, _, states = grow_alive(topk_seq, topk_scores, topk_log_probs, topk_finished, states)\n finished_seq, finished_scores, finished_flags = grow_finished(finished_seq, finished_scores, finished_flags, topk_seq, topk_scores, topk_finished)\n return i + 1, alive_seq, alive_log_probs, finished_seq, finished_scores, finished_flags, states",
"def test_results_workers(self, affiliate_items):\n success_count = 0\n updater = mock.Mock()\n\n few_workers = BatchJob(affiliate_items, updater, workers=1)\n for result in few_workers.run():\n success_count += int(not result.is_error)\n\n many_workers = BatchJob(affiliate_items, updater, workers=4)\n for result in many_workers.run():\n success_count += int(not result.is_error)\n\n assert success_count == 8\n assert updater.call_count == 8",
"def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):\n for batch_i in range(0, len(sources)//batch_size):\n start_i = batch_i * batch_size\n\n # Slice the right amount for the batch\n sources_batch = sources[start_i:start_i + batch_size]\n targets_batch = targets[start_i:start_i + batch_size]\n\n # Pad\n pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))\n pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))\n\n # Need the lengths for the _lengths parameters\n pad_targets_lengths = []\n for target in pad_targets_batch:\n pad_targets_lengths.append(len(target))\n\n pad_source_lengths = []\n for source in pad_sources_batch:\n pad_source_lengths.append(len(source))\n\n yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths",
"def sequences_to_kmer_counts(sequences, kmer_size):\n pfunc = functools.partial(sequence_to_kmer_freqs, kmer_size=kmer_size, normalize=False)\n pool = multiprocessing.Pool(multiprocessing.cpu_count())\n kmer_freqs = pool.map(pfunc, sequences, chunksize=250)\n pool.close()\n pool.join()\n mat = np.vstack(kmer_freqs).astype(int)\n colnames = ['' for _ in range(len(generate_all_kmers(kmer_size)))]\n for kmer, i in generate_all_kmers(kmer_size).items():\n colnames[i] = kmer\n retval = pd.DataFrame(mat, columns=colnames)\n return retval",
"def _build_counter_sequence(events, bins):\n counter_sequence = []\n last_pos = 0\n for i in range(bins.shape[0]):\n timestamps = events[last_pos:]\n event_count = trace(timestamps, endtime=bins[i]).shape[0]\n if event_count > 0:\n event_count = int(ceilk(event_count, 10))\n counter_sequence.append(event_count)\n last_pos = last_pos + event_count\n\n return counter_sequence",
"def _shuffle_bin_counts(\r\n MAX_N_POSITION_BINS, position_unsynced, position_synced,\r\n frames_to_include, im_period, behav_period, true_starts,\r\n true_ends, transient_responses, n_bootstraps, n_processes,\r\n initial_counts):\r\n nROIs = len(true_starts[0])\r\n if n_processes > 1:\r\n pool = Pool(processes=n_processes)\r\n inputs = (true_starts, true_ends, transient_responses, position_unsynced,\r\n behav_period, position_synced, im_period, frames_to_include,\r\n nROIs, MAX_N_POSITION_BINS, initial_counts)\r\n if n_processes > 1:\r\n # chunksize = min(1 + n_bootstraps / n_processes, 200)\r\n chunksize = 1 + n_bootstraps / n_processes\r\n map_generator = pool.imap_unordered(\r\n _shuffler, it.repeat(inputs, n_bootstraps), chunksize=chunksize)\r\n else:\r\n map_generator = map(_shuffler, it.repeat(inputs, n_bootstraps))\r\n bootstrap_values = np.empty((nROIs, MAX_N_POSITION_BINS, n_bootstraps))\r\n bootstrap_counts = np.empty((nROIs, MAX_N_POSITION_BINS, n_bootstraps))\r\n bootstrap_idx = 0\r\n for values, counts in map_generator:\r\n bootstrap_values[:, :, bootstrap_idx] = values\r\n bootstrap_counts[:, :, bootstrap_idx] = counts\r\n bootstrap_idx += 1\r\n if n_processes > 1:\r\n pool.close()\r\n pool.join()\r\n return bootstrap_values, bootstrap_counts"
] |
[
"0.6463623",
"0.55326104",
"0.54898655",
"0.53560454",
"0.52667665",
"0.5244737",
"0.5173805",
"0.50776064",
"0.50258076",
"0.50120574",
"0.49834517",
"0.49575722",
"0.49538717",
"0.49265176",
"0.4919364",
"0.48957646",
"0.48888114",
"0.4872933",
"0.48429677",
"0.48294276",
"0.48083946",
"0.47730145",
"0.47684774",
"0.47606125",
"0.47375792",
"0.47226828",
"0.47197673",
"0.47187376",
"0.46949688",
"0.4687224"
] |
0.69355303
|
0
|
Perform kjoining of batches.
|
def join(self, batches: List[Batch], outpath: str) -> None:
if self.threads == 1:
super().join(batches, outpath)
else:
self.__parallel_join(batches, outpath)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __parallel_join(self, recordBatches: List[Batch], outpath: str) -> None:\n kwargs = self._pre_join(outpath)\n\n batcher = SeqCountBatcher.from_parent(self, self.batch_size)\n batcher.doSort = self.doSort\n print(\"Intermediate batching...\")\n batcher.do(recordBatches)\n print(\"Joining...\")\n batcher.join(self.join_function, **kwargs)\n\n self._post_join(**kwargs)",
"def join(self, batches: List[Batch], outpath: str) -> None:\n kwargs = self._pre_join(outpath)\n\n crawler = Crawler()\n print(\"Joining...\")\n for batch in crawler.do_batch(batches):\n self.join_function(*batch, **kwargs)\n\n self._post_join(**kwargs)",
"def join_on_k_closest(\n coords_1_iterator,\n coords_2_iterator,\n k,\n result_queue=None\n):\n assert k >= 1\n coords_1_list = list(coords_1_iterator)\n coords_2_list = list(coords_2_iterator)\n # heapq.pop() returns the smallest item so we use the opposite of the\n # distance function\n def dfn(c1_, c2_): return -gm.gc_dist_coords_deg(c1_, c2_)\n c1i = None\n for c1i, c1 in enumerate(coords_1_list, 1):\n items = []\n coords = []\n for c2 in coords_2_list:\n d = dfn(c1, c2)\n # Sorting by this item effectively means sorting by the first element\n # and as a bonus we keep a reference to the coords to which \"d\"\n # applies.\n item = (d, c2) \n if len(items) < k:\n heapq.heappush(items, item)\n elif len(items) == k:\n if item > items[0]:\n heapq.heapreplace(items, item)\n assert len(items) <= k\n for item in items:\n yield (c1, item[1])\n if result_queue and c1i % 25 == 0:\n result_queue.put({'type': 'PROGRESS', 'payload': '%s/%s' % (c1i, len(coords_1_list))})\n if result_queue and c1i is not None:\n result_queue.put({'type': 'PROGRESS', 'payload': '%s/%s' % (c1i, len(coords_1_list))})",
"def create_k_context(self):\n if self.k == 0:\n self.contextdata = self.data\n\n if self.contextdata is None:\n print(\"Start creating k-context Parallel\")\n\n with mp.Pool(mp.cpu_count()) as p:\n result = p.map(self.create_k_context_trace, self.data.groupby([self.trace]))\n self.contextdata = pd.concat(result, ignore_index=True)",
"def batch_split(self, batch_text, threads=8):\n pass",
"def generate_k_triplet(embeddeds, targets, K=2, B=2):\n batch_len = embeddeds.size(0)\n\n dis_mat = pairwise_distances(embeddeds).cpu().data.numpy()\n\n anchor, positive, negative = [], [], []\n\n ts = targets.reshape(-1).cpu().data.numpy()\n\n for i in range(batch_len):\n incls_id = np.where(ts == ts[i])[0]\n incls = dis_mat[i][incls_id]\n\n outcls_id = np.where(ts != ts[i])[0]\n outcls = dis_mat[i][outcls_id]\n\n incls_closeK = np.argsort(incls)[1:1 + K]\n outcls_closeB = np.argsort(outcls)[0:B]\n\n if len(incls_closeK) == 0 or len(outcls_closeB) == 0:\n continue\n\n an = embeddeds[i].unsqueeze(0)\n for c in incls_closeK:\n for o in outcls_closeB:\n anchor.append(an)\n positive.append(embeddeds[incls_id[c]].unsqueeze(0))\n negative.append(embeddeds[outcls_id[o]].unsqueeze(0))\n try:\n anchor = torch.cat(anchor, 0)\n positive = torch.cat(positive, 0)\n negative = torch.cat(negative, 0)\n except RuntimeError:\n print(dis_mat)\n print(anchor)\n print(positive)\n print(negative)\n print(targets)\n return anchor, positive, negative",
"def train_k():\n\n for i in range(n_families):\n print('[training-k] family:', i, '...')\n f1_fam_name = 'f1_fam_' + str(i) + '_features'\n feature_mat_fam1 = mask_features(f1_fam_name)\n n_samples = len(feature_mat_fam1)\n # use 80% of samples as training set\n feature_mat_fam1 = feature_mat_fam1[int(n_samples / 5):, :]\n my_k, my_result = train_with_inner_outer(feature_mat_fam1, 0)\n\n save_name = '/f1_fam_' + str(i) + '_validation'\n with open(saved_cluster_path + save_name, 'wb') as fp:\n pickle.dump(my_result, fp)",
"def join(self):\n pass",
"def get_batches(self, k=5):\n indexes = [x for x in range(len(self))]\n np.random.shuffle(indexes)\n s = 0\n size = int(np.ceil(len(indexes) / k))\n batches = []\n while s < len(indexes):\n batches += [indexes[s:s + size]]\n s = s + size\n return batches",
"def run(self):\n self.cc_log(\"INFO\", \"Data Processing Join: Started\")\n if self.left_joinon and isinstance(self.left_joinon, str): self.left_joinon = [self.left_joinon]\n if self.right_joinon and isinstance(self.right_joinon, str): self.right_joinon = [self.right_joinon]\n\n # Create the B-Tree for quick and easy search\n b_tree = genBTree(self.joinwith, self.left_joinon)\n\n json_fr = json_file_reader(self.src)\n json_fw = json_file_writer(self.target)\n\n # Loop through all the left table\n failed_counter = 0\n while not json_fr.isEOF():\n data = json_fr.readRecord()\n key = keyGen(self.right_joinon, data)\n (data, b_tree, failed_counter) = self.join(b_tree, key, data, failed_counter)\n json_fw.writeRecord(data)\n\n json_fr.close()\n json_fw.close()\n self.cc_log(\"INFO\", \"%i (btree) & %i (keyerror) records could not be mached\" % (len(b_tree), failed_counter))\n self.cc_log(\"INFO\", \"Data Processing Join: Finished\")\n return True",
"def combine_batches(chosen_dict):\n\n batches = set(sorted(chosen_dict.keys())) - {'meta_data'}\n batches = sorted(list(batches))\n root_dict = dict()\n root_dict['data'] = chosen_dict[batches[0]]['data']\n root_dict['labels'] = chosen_dict[batches[0]]['labels']\n root_dict['filenames'] = chosen_dict[batches[0]]['filenames']\n root_dict['meta_data'] = chosen_dict['meta_data']\n root_dict['meta_data'].append(batches[0])\n\n for curr_batch in batches[1:]:\n temp_dict = chosen_dict[curr_batch]\n root_dict['data'] = np.concatenate((root_dict['data'],\n temp_dict['data']),\n axis=0)\n root_dict['labels'] = root_dict['labels'] + temp_dict['labels']\n root_dict['filenames'] = root_dict['filenames'] + temp_dict['filenames']\n root_dict['meta_data'].append(curr_batch)\n\n tot_rows = root_dict['data'].shape[0]\n new_order = range(tot_rows)\n for _ in range(5):\n shuffle(new_order)\n\n ub_dict = dict()\n ub_data = np.zeros((tot_rows, 3072), dtype=root_dict['data'].dtype)\n ub_labels = [0] * tot_rows\n ub_filenames = [\"\"] * tot_rows\n\n for ctr, idx in enumerate(new_order):\n ub_data[ctr, :] = root_dict['data'][idx, :]\n ub_labels[ctr] = root_dict['labels'][idx]\n ub_filenames[ctr] = root_dict['filenames'][idx]\n\n ub_dict['data'] = ub_data\n ub_dict['labels'] = ub_labels\n ub_dict['filenames'] = ub_filenames\n ub_dict['meta_data'] = root_dict['meta_data']\n\n return ub_dict",
"def generate_k_folds(dataset, k):\n\n # TODO: finish this.\n folds = []\n dataset = np.concatenate((dataset[0], np.array(dataset[1]).reshape(-1,1)), axis=1)\n dataset_shape = dataset.shape\n shape_test_set = int(round(dataset_shape[0]/k,0))\n split_dataset = np.array_split(dataset,k,axis=0)\n for i in range(k):\n test_set = split_dataset[i]\n c = [k for j,k in enumerate(split_dataset) if j!=i]\n training_set = np.concatenate(c,axis=0)\n if test_set.shape[0] != shape_test_set:\n step = test_set.shape[0] - shape_test_set\n test_set = test_set[:-step,:]\n training_set = np.concatenate((training_set, test_set[-step:,:]), axis=0)\n r_test_set = (test_set[:,:-1], list(test_set[:,-1]))\n r_train_set = (training_set[:,:-1], list(training_set[:,-1]))\n folds.append((r_train_set, r_test_set))\n return folds",
"def join(self):\n for t in self.created_list:\n t.join()",
"def k_random_subsets(x, y, k):\n if k > len(y):\n raise Exception(\n \"Cannot split a dataset into more folds than it has rows.\")\n if k < 2:\n raise Exception(\"Cannot split a dataset into fewer than 2 fold.\")\n # Randomly shuffle dataset\n y = [[i] for i in y]\n z = np.append(x, y, axis=1)\n np.random.seed(0)\n np.random.shuffle(z)\n x = z[:, :-1]\n y = z[:, -1]\n # Create k equally sized subsets from the randomly sorted dataset\n subset_size = int(len(y) / k)\n remainder = len(y) - (subset_size * k)\n folds_x = list()\n folds_y = list()\n start = 0\n end = subset_size\n for i in range(k):\n fold_x = list(x[start:end])\n fold_y = list(y[start:end])\n folds_x.append(fold_x)\n folds_y.append(fold_y)\n start += subset_size\n end += subset_size\n\n for i in range(remainder):\n folds_x[i].append(x[-i])\n folds_y[i].append(y[-i])\n\n folds_x = np.array(folds_x).astype(np.int)\n folds_y = np.array(folds_y)\n return folds_x, folds_y",
"def batch(iterable, k=3):\n\n for i in range(0, len(iterable), k):\n yield iterable[i:i + k]",
"def create_k_context(self):\n print(\"Create k-context:\", self.k)\n\n if self.k == 0:\n self.contextdata = self.data\n\n if self.contextdata is None:\n with mp.Pool(mp.cpu_count()) as p:\n result = p.map(self.create_k_context_trace, self.data.groupby([self.trace]))\n self.contextdata = pd.concat(result, ignore_index=True)",
"def run_klds(window):\n Fk, nfreq = get_fft(window)\n for size in range(50):\n print size\n dom_fk, dom_fq = get_dom_freq(Fk, nfreq, size)\n entropy = kld(Fk, nfreq, dom_fk, dom_fq)\n print(entropy)",
"def join_kadaster_bag_info(kadaster_gdf, bag_gdf):\n return gpd.sjoin(bag_gdf, kadaster_gdf, op=\"within\")",
"def labelRDDs(targ_driv, path, sc, k=200):\n try:\n full_path = path + '/' + 'driver_' + targ_driv + '.csv'\n #print full_path\n target = sc.textFile(path + '/' + 'driver_' + targ_driv + '.csv') #load target driver's data\n target2 = target.map(lambda x: x.split(',')) #convert from string to list of strings\n positives = processRDD(target2, 1.0) #label target driver's RDD\n driv_lis = get_drivers(path) #get python list of all possible drivers to sample from\n #print driv_lis\n\n #generate random samples of drivers and tripIDs\n sampdriv, samptrip = random_samples(targ_driv, driv_lis, k)\n #generate RDD of random samples\n samples = sample_data(path, sampdriv, samptrip, sc)\n #print \"GETS HERE\"\n samplesRDD = ID_Data(targ_driv, samples, sc, k) #relabel samples to look like target driver's trips\n #print \"GETS HERE TOO\"\n negatives = processRDD(samplesRDD, 0.0) #label samples\n finalRDD = positives.union(negatives).cache() #join target driver and samples together\n return finalRDD\n except Exception as e:\n print e",
"def merge_images(sources, targets, opts, k=10):\n _, _, h, w = sources.shape\n row = int(np.sqrt(opts.batch_size))\n merged = np.zeros([3, row*h, row*w*2])\n for idx, (s, t) in enumerate(zip(sources, targets)):\n i = idx // row\n j = idx % row\n merged[:, i*h:(i+1)*h, (j*2)*h:(j*2+1)*h] = s\n merged[:, i*h:(i+1)*h, (j*2+1)*h:(j*2+2)*h] = t\n return merged.transpose(1, 2, 0)",
"def get_k_fold_data(ds, k=10):\n splits = ds.split(k)\n for i in range(k):\n yield (concatenate(splits[j] for j in range(k) if j != i), splits[i])",
"def LMBatchYielder(self,batch_size,d=\"train_train\"):\n data = None\n if d==\"train_train\":\n ending_cols = [\"tok_ending\"]\n data = self.train_train[[\"tok_context\"]+ending_cols].copy()\n elif d==\"train_val\":\n ending_cols = [\"tok_ending\"]\n data = self.train_val[[\"tok_context\"]+ending_cols].copy()\n elif d==\"val\":\n ending_cols = [\"tok_ending_1\",\"tok_ending_2\"]\n data = self.val[[\"rightending\",\"tok_context\"]+ending_cols].copy()\n elif d==\"test\":\n ending_cols = [\"tok_ending_1\",\"tok_ending_2\"]\n data = self.test[[\"rightending\",\"tok_context\"]+ending_cols].copy()\n \n n_yields = int(np.ceil(len(data)/batch_size))\n log(\"Yielding {} '{}' batches\".format(n_yields,d))\n\n cont_list = [[[self.bom()]+s+[self.eom()] for s in cs]\n for cs in data.tok_context]\n data[\"tok_context_cont\"] = [[t for s in cs for t in s]\n for cs in cont_list]\n\n if not hasattr(self,\"max_targ_len\"):\n self.max_targ_len = 0\n \n for c in ending_cols:\n data[\"tok_\"+c] = [[self.bom()]+s+[self.eom()]\n for s in data[c]]\n max_targ_len = max(data[\"tok_\"+c].apply(len))\n self.max_targ_len = max(self.max_targ_len,max_targ_len)\n\n if not hasattr(self,\"max_cont_len\"):\n self.max_cont_len = 0\n max_cont_len = max(data[\"tok_context_cont\"].apply(len))\n self.max_cont_len = max(max_cont_len,self.max_cont_len)\n \n # print(self.max_cont_len,self.max_targ_len)\n \n for i in range(n_yields):\n chunk = data.iloc[i*batch_size:(i+1)*batch_size]\n b_size = len(chunk)\n\n cont_len = np.array([len(c) for c in chunk[\"tok_context_cont\"]])\n assert cont_len.shape == (b_size,)\n \n cont_padded = [self.pad_sequence(c,self.max_cont_len)\n for c in chunk[\"tok_context_cont\"]]\n context = np.array(cont_padded)\n assert context.shape == (b_size,self.max_cont_len)\n \n if len(ending_cols) == 1: # train\n targ_len = np.array([len(s) for s in chunk[\"tok_\"+ending_cols[0]]])\n targ_padded = [self.pad_sequence(s,self.max_targ_len)\n for s in chunk[\"tok_\"+ending_cols[0]]]\n target = np.array(targ_padded)\n # if target.shape != (b_size,self.max_targ_len): embed()\n assert target.shape == (b_size,self.max_targ_len)\n \n b = Batch(\n context_seq=context,\n context_len=cont_len,\n target_seq=target,\n target_len=targ_len,\n ids=chunk.index,\n size=b_size)\n \n elif len(ending_cols) == 2:\n targ1_len = np.array([len(s) for s in chunk[\"tok_\"+ending_cols[0]]])\n targ1_padded = [self.pad_sequence(s,self.max_targ_len)\n for s in chunk[\"tok_\"+ending_cols[0]]]\n target1 = np.array(targ1_padded)\n # if target.shape != (b_size,self.max_targ_len): embed()\n assert target1.shape == (b_size,self.max_targ_len)\n\n targ2_len = np.array([len(s) for s in chunk[\"tok_\"+ending_cols[1]]])\n targ2_padded = [self.pad_sequence(s,self.max_targ_len)\n for s in chunk[\"tok_\"+ending_cols[1]]]\n target2 = np.array(targ2_padded)\n # if target.shape != (b_size,self.max_targ_len): embed()\n assert target2.shape == (b_size,self.max_targ_len)\n\n # Right ending\n right_ending = chunk[\"rightending\"]\n right_target = np.array(\n [t1 if i == 1 else t2\n for i,t1,t2 in zip(right_ending,target1,target2)])\n right_target_len = np.array(\n [l1 if i == 1 else l2\n for i,l1,l2 in zip(right_ending,targ1_len,targ2_len)])\n wrong_target = np.array(\n [t1 if i == 2 else t2\n for i,t1,t2 in zip(right_ending,target1,target2)])\n wrong_target_len = np.array(\n [l1 if i == 2 else l2\n for i,l1,l2 in zip(right_ending,targ1_len,targ2_len)])\n \n b = Batch(\n context_seq=context,\n context_len=cont_len,\n right_target_seq=right_target,\n right_target_len=right_target_len,\n wrong_target_seq=wrong_target,\n wrong_target_len=wrong_target_len,\n target1_seq=target1,\n target1_len=targ1_len,\n target2_seq=target2,\n target2_len=targ2_len,\n size=b_size,\n rightending=right_ending.as_matrix(),\n ids=right_ending.index\n )\n \n yield b",
"def dummy_join_fastq(inputs, outputs, log_files, batch_size=10000, gap=20, **kwargs):\n if gap >= 0:\n gap_seq = \"N\" * gap\n gap_qual = [0] * gap\n\n counts = {\n \"join_count\": 0,\n \"fwd_count\": 0,\n \"rev_count\": 0,\n \"total_joined\": 0,\n \"total_written\": 0,\n }\n faked_joins = []\n\n fwd_records = SeqIO.parse(get_file_name(inputs.fwd), \"fastq\")\n rev_records = SeqIO.parse(get_file_name(inputs.rev), \"fastq\")\n with open(get_file_name(outputs), \"w\") as out_fastq_stream:\n for frec, rrec in merge_record_iters(fwd_records, rev_records, **kwargs):\n # join seqs\n new_records = []\n if frec is None and rrec is None:\n logger.warning(\"Both ends missing from input\") # this shouldn't\n continue\n if frec is None:\n logger.debug(\"Forward seq trimmed to oblivion\")\n new_records.append(rev_comp_rec(rrec, qual=True, suffix=\".rev\"))\n counts[\"rev_count\"] += 1\n elif rrec is None:\n logger.debug(\"Reverse seq trimmed to oblivion\")\n new_records.append(frec)\n counts[\"fwd_count\"] += 1\n elif gap >= 0:\n counts[\"join_count\"] += 1\n # join sequence\n new_seq = (\n frec.seq\n + Seq.Seq(gap_seq, frec.seq.alphabet)\n + rrec.seq.reverse_complement()\n )\n new_record = SeqRecord.SeqRecord(\n new_seq, id=frec.id, name=frec.name, description=\"Faked join\"\n )\n # join quality\n new_record.letter_annotations[\"phred_quality\"] = (\n frec.letter_annotations[\"phred_quality\"]\n + gap_qual\n + list(reversed(rrec.letter_annotations[\"phred_quality\"]))\n )\n new_records.append(new_record)\n else:\n # gap < 0 means don't join...add separately\n new_records.append(frec)\n new_records.append(rev_comp_rec(rrec, qual=True, suffix=\".rev\"))\n\n faked_joins.extend(new_records)\n if len(faked_joins) >= batch_size:\n n_written = SeqIO.write(faked_joins, out_fastq_stream, format=\"fastq\")\n if n_written != len(faked_joins):\n logger.warning(\n \"Only %d of %d faked joins written!\"\n % (n_written, len(faked_joins))\n )\n counts[\"total_joined\"] += len(faked_joins)\n counts[\"total_written\"] += n_written\n del faked_joins[:]\n\n # at end of loop, write remaining cached records\n n_written = SeqIO.write(faked_joins, out_fastq_stream, format=\"fastq\")\n if n_written != len(faked_joins):\n logger.warning(\n \"Only %d of %d faked joins written!\" % (n_written, len(faked_joins))\n )\n counts[\"total_joined\"] += len(faked_joins)\n counts[\"total_written\"] += n_written\n\n # Report some counts\n msg = \"\"\"\n#======================\n# Faked joins\n# Total written: {total_written} of {total_joined}\n# Dummy Joins: {join_count}\n# FwdOnly: {fwd_count}\n# RevOnly: {rev_count}\n#======================\n\"\"\".format(\n **counts\n )\n\n with open(log_files[0], \"a\") as log_out_stream:\n log_out_stream.write(msg)\n\n logger.debug(msg)",
"def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()",
"def kmeans_006():\n n_centroids_vals = [1000, 2000, 2500, 3000]\n scores = []\n\n for n_centroids in n_centroids_vals:\n s = 15\n crop = 150\n n_patches = 400000\n rf_size = 5\n logger.info(\"Training with n_centroids {}\".format(n_centroids))\n\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n result_path='data/data_train_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n test_x_crop_scale = CropScaleImageTransformer(training=False,\n result_path='data/data_test_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_kmeans_006_centroids_{}'.format(n_centroids),\n n_iterations=20,\n n_jobs=-1,)\n\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n images = train_x_crop_scale.transform()\n\n patches = patch_extractor.transform(images)\n\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n train_x = kmeans_generator.transform(images, save_to_file='data/data_kmeans_features_006_centroids_{}.npy'.format(n_centroids), memmap=True)\n train_y = classes.train_solutions.data\n # Unload some objects\n del images\n gc.collect()\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 250}, n_jobs=-1)\n wrapper.cross_validation(train_x, train_y, n_folds=2, parallel_estimator=True)\n\n score = (n_centroids, wrapper.cv_scores)\n logger.info(\"Scores: {}\".format(score))\n scores.append(score)\n\n del wrapper\n gc.collect()",
"def _kshape(x, k, n_init=1, max_iter=100, n_jobs = 1, random_state=None,normalize=True ):\r\n #print \"n jobs run in parallel: \" + str(cpu_count() ) \r\n random_state = check_random_state(random_state)\r\n best_tot_dist,best_centroids,best_idx = None,None,None\r\n \r\n if n_jobs ==1:\r\n\r\n for i_init in range(n_init): \r\n # n_init is the number of random starting points\r\n # pdb.set_trace()\r\n \r\n idx, centroids,tot_dist = _kshape_single(x, k, max_iter=max_iter, random_state= random_state,normalize=normalize) \r\n if best_tot_dist is None or tot_dist < best_tot_dist:\r\n best_idx = idx.copy()\r\n best_centroids = centroids.copy()\r\n best_tot_dist = tot_dist\r\n else: # n_jobs not =1 # if -1, all CPUs are used\r\n # parallelisation of kshape runs\r\n seeds = random_state.randint(np.iinfo(np.int32).max,size=n_init)\r\n results = Parallel(n_jobs=n_jobs, verbose=0)(\r\n delayed(_kshape_single)(x,k,max_iter=max_iter, random_state=seed, normalize=normalize)\r\n for seed in seeds )\r\n # Get results with the lowest distances\r\n idx, centroids,tot_dist, iterations = zip(*results)\r\n best = np.argmin(tot_dist) \r\n best_idx = idx[best]\r\n best_centroids = centroids[best]\r\n best_tot_dist = tot_dist[best]\r\n sys.stdout.write(\"Done: k=\"+str(k)+\"\\n\")\r\n return {'centroids':best_centroids, 'labels':best_idx, 'distance':best_tot_dist,'centroids_all':centroids,'labels_all':idx,'distance_all':tot_dist,'iterations':iterations}",
"def run(self):\n first_index, last_index = \\\n self.get_initial_blocks()\n while last_index - first_index > self.block_size:\n first_index, last_index = \\\n self.join_blocks(first_index, last_index)\n self.merge_blocks(self.output_file_name, first_index, last_index)",
"def kmeans(bboxes, k, dist=np.median):\n rows = bboxes.shape[0]\n\n distances = np.empty((rows, k))\n last_clusters = np.zeros((rows,))\n\n np.random.seed()\n\n # the Forgy method will fail if the whole array contains the same rows\n clusters = bboxes[np.random.choice(rows, k, replace=False)]\n\n while True:\n for row in range(rows):\n distances[row] = 1 - iou(bboxes[row], clusters)\n\n nearest_clusters = np.argmin(distances, axis=1)\n\n if (last_clusters == nearest_clusters).all():\n break\n\n for cluster in range(k):\n clusters[cluster] = dist(bboxes[nearest_clusters == cluster], axis=0)\n\n last_clusters = nearest_clusters\n\n return clusters",
"def preprocess_multicluster(adj, parts, features, y_train, train_mask, num_clusters, block_size, diag_lambda=1):\n features_batches = []\n support_batches = []\n y_train_batches = []\n train_mask_batches = []\n total_nnz = 0\n np.random.shuffle(parts)\n\n for _, st in enumerate(range(0, num_clusters, block_size)):\n pt = parts[st]\n for pt_idx in range(st + 1, min(st + block_size, num_clusters)):\n pt = np.concatenate((pt, parts[pt_idx]), axis=0)\n features_batches.append(features[pt, :])\n y_train_batches.append(y_train[pt, :])\n support_now = adj[pt, :][:, pt]\n support_batches.append(sparse_to_tuple(normalize_adj_diag_enhance(support_now, diag_lambda=diag_lambda)))\n total_nnz += support_now.count_nonzero()\n\n train_pt = []\n for newidx, idx in enumerate(pt):\n if train_mask[idx]:\n train_pt.append(newidx)\n train_mask_batches.append(sample_mask(train_pt, len(pt)))\n\n return features_batches, support_batches, y_train_batches, train_mask_batches",
"def train_and_test_k_fold(\n ds, prd, k=10, comm=config.comm, online=False, classes=None, parallel_test=False,\n cycles_per_barrier=10):\n train_and_test = lambda tr, te: train_and_test_once(\n tr, te, prd, comm=comm, online=online, classes=classes, parallel_test=parallel_test,\n cycles_per_barrier=cycles_per_barrier)\n\n if k <= 0:\n raise ValueError(\"k must be positive\")\n elif k == 1:\n splits = ds.split(10)\n train = concatenate(splits[j] for j in range(9))\n test = splits[9]\n return train_and_test(train, test)\n else:\n r = null_training_result()\n for train, test in get_k_fold_data(ds, k=k):\n r += train_and_test(train, test)\n comm.barrier()\n\n return r"
] |
[
"0.6566977",
"0.60989094",
"0.57576424",
"0.540203",
"0.5401272",
"0.53988075",
"0.53314203",
"0.5316356",
"0.5256726",
"0.52000463",
"0.5192977",
"0.5165509",
"0.5111428",
"0.509478",
"0.5040657",
"0.5036986",
"0.50123674",
"0.50091857",
"0.5002618",
"0.49993095",
"0.49986637",
"0.49838126",
"0.49728113",
"0.49699464",
"0.49373904",
"0.49292436",
"0.49216002",
"0.49151888",
"0.49135882",
"0.4891239"
] |
0.6234955
|
1
|
Start batching the records. Batch seq.Sequence subclass batch.Batch records into seq.SequenceCounts batch.Batch instances.
|
def do(self, recordBatch: List[Batch]) -> None:
batchList = [
recordBatch[i : min(len(recordBatch), i + self.n_batches)]
for i in range(0, len(recordBatch), self.n_batches)
]
batches = Parallel(n_jobs=self.threads, verbose=11)(
delayed(SeqCountBatcher.build_batch)(
batchedRecords, self.type, self.tmp, self.doSort
)
for batchedRecords in batchList
)
self.feed_collection(batches, self.FEED_MODE.REPLACE)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def batch_start(self, batch_idx, batch_data):\n self.batch = batch_idx",
"def _fillBatches(self):\n\n batchRE = r\"\"\"\n B\n (?P<observebatch>\\d+?)\n (?P<startend>[SE])\n (?P<sequence>\\d+?)\n _SR\n (?:_(?P<extraInjections>\\d+?|\\w+?))?\n $\n \"\"\"\n batchRE = re.compile(batchRE, re.VERBOSE)\n # We canot infer batches unless we have runorder\n if 'Run Order' in self.sampleMetadata.keys():\n currentBatch = 0\n # Loop over samples in run order\n for index, row in self.sampleMetadata.sort_values(by='Run Order').iterrows():\n nameComponents = batchRE.search(row['Sample File Name'])\n if nameComponents:\n # Batch start\n if nameComponents.group('startend') == 'S':\n # New batch - increment batch no\n if nameComponents.group('sequence') == '1':\n currentBatch = currentBatch + 1\n\n # Don't include the dilution series or blanks\n if not ((row['AssayRole'] == AssayRole.LinearityReference) or (row['SampleType'] == SampleType.ProceduralBlank)):\n self.sampleMetadata.loc[index, 'Batch'] = currentBatch\n self.sampleMetadata.loc[index, 'Correction Batch'] = currentBatch\n\n else:\n warnings.warn('Unable to infer batches without run order, skipping.')\n return",
"def on_batch_begin(self, batch, logs=None):",
"def RecordBatches(\n self, options: dataset_options.RecordBatchesOptions\n ) -> Iterator[pa.RecordBatch]:",
"def on_batch_start(self):\n for callback in self.callbacks:\n callback.on_batch_start(self, self.get_model())",
"def _train_batch(self):\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n result = self._batch_iter(source, target, i)\n\n # yield\n yield result",
"def batch(self, batch):\n\n self._batch = batch",
"def make_batch(self, batch_size):\n filenames = self.get_filenames()\n\n if os.path.isdir(filenames):\n num_records = len(os.listdir(filenames))\n print(\"Loading from directory. \" + str(num_records) + \" tfRecords found.\")\n files = tf.data.Dataset.list_files(filenames + \"/\" + \"*.tfrecord\").shuffle(num_records)\n dataset = files.apply(\n tf.contrib.data.parallel_interleave(\n lambda x: tf.data.TFRecordDataset(x, num_parallel_reads=256, buffer_size=8*1024*1024),\n cycle_length=32, sloppy=True)\n )\n else:\n print(\"Loading from single tfRecord...\")\n dataset = tf.data.TFRecordDataset(filenames + \".tfrecord\").repeat()\n \n dataset = dataset.map(self.parser, num_parallel_calls=128)\n \n if self.subset == 'train':\n min_queue_examples = int(\n Cifar10DataSet.num_examples_per_epoch(self.subset) * 0.4)\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)\n \n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n dataset = dataset.prefetch(10)\n \n iterator = dataset.make_one_shot_iterator()\n seq_batch, input_batch, map_batch, transformation_batch = iterator.get_next()\n\n return seq_batch, input_batch, map_batch, transformation_batch",
"def _defineBatches(self):\n # extract all ids\n all_keys = list(self.data_dict.unique_ids)\n\n # randomly shuffle keys\n if self.random_shuffle_batches:\n random.shuffle(all_keys)\n\n # create batches based on number of batches\n if self.n_big_batches is not None:\n self.n_big_batches += 1\n # define cuts for batches\n cuts = np.linspace(0, self.n_observations,\n self.n_big_batches).round()\n # create batches based on batch size\n elif self.batch_size is not None:\n cuts = [x for x in range(0, self.n_observations,\n int(self.batch_size))]\n if cuts[-1] < self.n_observations:\n cuts.append(self.n_observations)\n\n # convert batch sizes to integers\n cuts = [int(x) for x in cuts]\n\n # save batches into dictionary\n batches = dict()\n for i in range(0, (len(cuts) - 1)):\n # create DataBatch object\n current_batch = DataBatch(ids=all_keys[cuts[i]:cuts[i+1]],\n batch_id=i)\n current_batch.setDiskStoragePath(self.disk_scratch)\n batches[i] = current_batch\n\n # save batches\n self.n_batches = len(batches.keys())\n self.batches = batches",
"def init_batch(self):\n pass",
"def on_test_batch_begin(self, batch, logs=None):",
"def run(self, batch_size=20):\n logging.info('%s: Starting.'% (self.__class__.__name__))\n deferred.defer(self._continue, None, batch_size, _queue=self.QUEUE)",
"def start_of_batch_hook(self, progress, logging_epoch):\n pass",
"def on_train_batch_start(self, batch, batch_idx, dataloader_idx):\n for callback in self.callbacks:\n callback.on_train_batch_start(self, self.get_model(), batch, batch_idx, dataloader_idx)",
"def on_train_batch_begin(self, batch, logs=None):\n # For backwards compatibility\n self.on_batch_begin(batch, logs=logs)",
"def do_records(self, batches: List[Batch]) -> Iterator[Tuple[str, str]]:\n if any(type(b) not in [Batch, BatchAppendable] for b in batches):\n raise AssertionError()\n\n if self.doSort:\n generators = [\n ((str(r.header), str(r.seq)) for r in b.sorted(self.doSmart))\n for b in batches\n if type(None) != type(b)\n ]\n\n else:\n generators = [\n ((str(r.header), str(r.seq)) for r in b.record_gen(self.doSmart))\n for b in batches\n if type(None) is not type(b)\n ]\n\n yield from merge(*generators, key=lambda x: x[1])",
"def do_batch(self, batches: List[Batch]) -> Iterator[Tuple[List[str], str]]:\n crawler = self.do_records(batches)\n\n try:\n first_record = next(crawler)\n except StopIteration:\n logging.error(\"nothing to crawl\")\n return\n\n current_seq = first_record[1]\n current_headers = [first_record[0]]\n\n crawler = (\n tqdm(crawler, initial=1, desc=self.desc, total=self.count_records(batches))\n if self.verbose\n else crawler\n )\n\n for record in crawler:\n if current_seq == record[1]:\n current_headers.append(record[0])\n else:\n yield (current_headers, current_seq)\n current_seq = record[1]\n current_headers = [record[0]]\n\n yield (current_headers, current_seq)",
"def _process(self, start_key, batch_size):\n\n query = self.MODEL.all()\n if start_key:\n query.filter('__key__ > ', start_key)\n\n try:\n entities = query.fetch(batch_size)\n\n if not entities:\n # all entities has already been processed\n return\n\n for entity in entities:\n try:\n self._processEntity(entity)\n except db.Error, e:\n import logging\n logging.exception(e)\n logging.error(\"Broke on %s: %s\" % (entity.key().name(), self.MODEL))\n\n # process the next batch of entities\n start_key = entities[-1].key()\n deferred.defer(self._process, start_key, batch_size)\n except DeadlineExceededError:\n # here we should probably be more careful\n deferred.defer(self._process, start_key, batch_size)",
"def _process(self, start_key, batch_size):\n\n query = StudentProject.all()\n if start_key:\n query.filter('__key__ > ', start_key)\n\n try:\n entities = query.fetch(batch_size)\n\n if not entities:\n # all entities has already been processed\n return\n\n for entity in entities:\n try:\n self._processEntity(entity)\n except db.Error, e:\n import logging\n logging.exception(e)\n logging.error(\"Broke on %s: %s\" % (entity.key().name(), self.MODEL))\n\n # process the next batch of entities\n start_key = entities[-1].key()\n deferred.defer(self._process, start_key, batch_size)\n except DeadlineExceededError:\n # here we should probably be more careful\n deferred.defer(self._process, start_key, batch_size)",
"def _message_batches(cls, records):\n # Dump the records to a list of minimal json\n records_json = [\n json.dumps(record, separators=(',', ':')) for record in records\n ]\n\n current_batch_size = 0\n current_batch = []\n for record in records_json:\n line_len = len(record)\n # Check if the max size of the batch has been reached or if the current\n # record will exceed the max batch size and start a new batch\n if ((len(current_batch) == cls.MAX_BATCH_COUNT) or\n (current_batch_size + line_len > cls.MAX_BATCH_SIZE)):\n yield current_batch[:]\n current_batch_size = 0\n del current_batch[:]\n\n if line_len > cls.MAX_BATCH_SIZE:\n LOGGER.error('Record too large (%d) to send to SQS:\\n%s', line_len, record)\n cls._log_failed(1)\n continue\n\n # Add the record to the batch\n current_batch_size += line_len\n current_batch.append(record)\n\n # yield the result of the last batch (no need to copy via slicing)\n if current_batch:\n yield current_batch",
"def val_batch_start(self, batch_idx, batch_data):\n self.val_batch = batch_idx",
"def init_stream_yield_batches(batches):\n should_write_start_length = True\n\n for batch in batches:\n if should_write_start_length:\n write_int(SpecialLengths.START_ARROW_STREAM, stream)\n should_write_start_length = False\n\n yield batch",
"def on_test_batch_start(self, batch, batch_idx, dataloader_idx):\n for callback in self.callbacks:\n callback.on_test_batch_start(self, self.get_model(), batch, batch_idx, dataloader_idx)",
"def produce_query_batches(self):\n pass",
"def _batcher(self, rows):\n row_count = 0\n batch = []\n batch_count = 1\n\n total_rows_modified = 0\n throttle_count = 0\n\n i = 0\n for row in rows:\n if row_count > self.batch_size - 1:\n logger.debug(f\"row_count={row_count} batch_size={self.batch_size} and batch={len(batch)}\")\n # Yield the previous batch\n yield batch\n\n # Start the new batch\n batch = []\n batch.append(row)\n row_count = 1\n\n batch_count += 1\n # break # toggle to load one batch only\n else:\n row_count += 1\n batch.append(row)\n\n # Put in a sleep timer to throttle how hard we hit the database\n if self.throttle_time and self.throttle_size and (throttle_count > self.throttle_size - 1):\n logger.info(f\"Sleeping for {self.throttle_time} seconds... row: {i}\")\n time.sleep(int(self.throttle_time))\n throttle_count = 0\n elif self.throttle_time and self.throttle_size:\n throttle_count += 1\n i += 1\n\n yield batch",
"def on_train_batch_begin(self, step, logs=None):",
"def create_batches(self, batch_size: int, repeat: bool, drop_last: bool, device: Device) -> None:\n self.repeat = repeat\n\n # Work out how cleanly we can divide the dataset into batch-sized parts\n num_batched_steps = self.indexed_corpus.shape[0] // batch_size\n\n # Trim off any extra elements that wouldn't cleanly fit (remainders)\n self.indexed_corpus = self.indexed_corpus.narrow(0, 0, num_batched_steps * batch_size)\n\n # Evenly divide the data across the bsz batches.\n raw_batches = self.indexed_corpus.view(batch_size, -1).t().contiguous().to(device)\n\n # If the last batch would be too short and drop_last is true, remove it\n if num_batched_steps % self.seq_len > 0 and drop_last:\n num_batched_steps -= num_batched_steps % self.seq_len\n\n self.num_batches = math.ceil(num_batched_steps / self.seq_len)\n\n self.batches = [raw_batches[n * self.seq_len: (n + 1) * self.seq_len + 1, :] for n in range(self.num_batches)]",
"def load_batch(self):\r\n\r\n #if we've seen all the data, start again with them in a new random order\r\n if self.batchcounter+self.batchsize > self.num_data:\r\n self.batchcounter = 0\r\n self.epochs += 1\r\n self._permutation = np.random.permutation(self.num_data)\r\n\r\n this_perm = self._permutation[self.batchcounter:self.batchcounter+self.batchsize]\r\n\r\n self.X_batch = self.X[this_perm]\r\n self.likelihood.set_data(self.Y[this_perm])\r\n if self.has_uncertain_inputs:\r\n self.X_variance_batch = self.X_variance[this_perm]\r\n\r\n self.batchcounter += self.batchsize\r\n\r\n self.data_prop = float(self.batchsize)/self.num_data\r\n\r\n self._compute_kernel_matrices()\r\n self._computations()",
"def on_train_batch_begin(\n self, batch: int, logs: tp.Optional[tp.Dict[str, np.ndarray]] = None\n ):\n pass",
"def train_batch(self, data, num_iteration, verbose=False):\n self.train(data, num_iteration, random_order=False, verbose=verbose)"
] |
[
"0.70635146",
"0.672973",
"0.6591983",
"0.65474814",
"0.6227351",
"0.6181943",
"0.616198",
"0.61579263",
"0.6150017",
"0.61381686",
"0.60892963",
"0.605244",
"0.60366535",
"0.60299593",
"0.60185724",
"0.60083634",
"0.5928345",
"0.58852",
"0.5877978",
"0.58668387",
"0.5854245",
"0.58458245",
"0.5801181",
"0.5775594",
"0.57524276",
"0.5751992",
"0.57339656",
"0.5722982",
"0.57152283",
"0.5701539"
] |
0.67457694
|
1
|
In this part, you need to try different distance functions you implemented in part 1.1 and different values of k (among 1, 3, 5, ... , 29), and find the best model with the highest f1score on the given validation set.
|
def tuning_without_scaling(self, distance_funcs, x_train, y_train, x_val, y_val):
best_f1 = 0
for name, func in distance_funcs.items():
for k in range(1, 30, 2):
model = KNN(k, func)
model.train(x_train, y_train)
valid_f1 = f1_score(y_val, model.predict(x_val))
if valid_f1 > best_f1:
self.best_distance_function = name
self.best_k = k
best_f1 = valid_f1
self.best_model = model
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find_best_k(X_train, y_train, X_test, y_test, min_k=1, max_k=25):\n best_k = 0\n best_score = 0.0\n for k in range(min_k, max_k+1, 2):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train, y_train)\n preds = knn.predict(X_test)\n f1 = f1_score(y_test, preds)\n if f1 > best_score:\n best_k = k\n best_score = f1\n print(\"Best Value for k: {}\".format(best_k))\n print(\"F1-Score: {}\".format(best_score))",
"def tuning_without_scaling(self, distance_funcs, x_train, y_train, x_val, y_val):\n self.best_k = None\n self.best_distance_function = None\n self.best_model = None\n bestf = float(\"-inf\")\n \n for d in distance_funcs:\n for k in range(1,min(len(x_train),30),2):\n knnmodel = KNN(k, distance_funcs[d])\n knnmodel.train(x_train, y_train)\n pred = knnmodel.predict(x_val)\n f1 = f1_score(y_val,pred)\n if f1 > bestf:\n bestk = k\n bestf = f1\n bestd = d\n bestmodel = knnmodel\n \n # You need to assign the final values to these variables\n self.best_k = bestk\n self.best_distance_function = bestd\n self.best_model = bestmodel\n #raise NotImplementedError",
"def trainAndTune(self, trainingData, trainingLabels, validationData, validationLabels, kgrid):\n\n \"*** YOUR CODE HERE ***\"\n\t#create dictionary of all features for each label\n dict = {}\n for feature in self.features:\n\t\tfor label in self.legalLabels:\n\t\t\tdict[feature, label] = util.Counter()\n\t\t\tfor i in [0,1]: #values of a counter from datum\n\t\t\t\tdict[(feature, label)][i] = 0\n\t\t\t\t#print str(feature) + str(label) + ' ' + str(dict[(feature, label)])\n labelCount = util.Counter()\n for i in range(len(trainingData)):\n\t\t#increment occurrences of each label found in the training data\n\t\tlabel = trainingLabels[i]\n\t\tlabelCount[label] += 1\n\t\tfor feature in trainingData[i]:\n\t\t\t#increment dictionary value by 1 when a feature label combination with a value is found\n\t\t\tdict[(feature, label)][trainingData[i][feature]] += 1\n #normalize labelCount to get P(y) for each label y, or the prior probability \n self.prior = util.normalize(labelCount)\n\t\n bestk = 0\n bestcond = {}\n topguesses = 0\n\t#iterate through each k to find the best k\n for k in kgrid:\n\t\t#empty cond probs\n\t\tself.condprobs = {} \n\t\t#smooth data\n\t\tfor feature_label in dict:\n\t\t\ttmpcounter = dict[feature_label] \n\t\t\t#print feature_label\n\t\t\ttmpcounter.incrementAll(tmpcounter.keys(), k)\n\t\t\t#set condprobs to cond probs with current k value\n\t\t\tself.condprobs[feature_label] = util.normalize(tmpcounter)\n\t\tguesses = self.classify(validationData)\n\t\tguesscorrect = 0\n\t\t#print[guesses]\n\t\tfor i in range(len(guesses)):\n\t\t\tif guesses[i] == validationLabels[i]:\n\t\t\t\tguesscorrect += 1\n\t\tif guesscorrect > topguesses:\n\t\t\tprint \"Guess \",k ,\" is better than \",bestk\n\t\t\ttopguesses = guesscorrect\n\t\t\tbestcond = self.condprobs\n\t\t\tbestk = k\n self.condprobs = bestcond\n self.k = bestk",
"def find_best_k(x_train, y_train, ks):\n params = {'n_neighbors': ks}\n knn = neighbors.KNeighborsRegressor()\n model = GridSearchCV(knn, params, cv=5)\n model.fit(x_train, y_train)\n best_k = model.best_params_\n return best_k",
"def try_latent_topics_intro_model(k):\n highest_f1 = 0\n print \"start time: {}\".format(datetime.now())\n print \"using {} latent topics\".format(k)\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/intro_data_w_content_5_22.csv')\n prep.prepare(n_components=k, use_cached_tfidf='/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl')\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features = topic_features\n X_train, y_train = prep.subset(features)\n print \"regular data prep complete\"\n print topic_features\n\n\n rf = RandomForestClassifier()\n gb = GradientBoostingClassifier()\n\n mc = ModelChooser([rf, gb])\n mc.fit_predict(X_train, y_train)\n mc.print_results()\n\n for i, score in enumerate(mc.f1_scores):\n if score > highest_f1:\n highest_f1 = score\n best_n_latent_features = k\n if i == 0:\n best_model_type = \"Random Forest\"\n else:\n best_model_type = \"Gradient Booster\"\n\n\n print \"end time: {}\".format(datetime.now())\n print \"-\"*10\n results = \"f1 score was {} with {} latent features on {} model\".format(highest_f1, best_n_latent_features, best_model_type)\n print results\n return results",
"def crossValidation(training, k, performance):\n\n predictions = []\n accuracy = []\n\n for index in range(1, 6):\n # print index\n temp = list(range(1, 6))\n temp.remove(index)\n # print 'index: ' + str(index) + ', temp: ' + str(temp)\n\n for x in range(len(training.get_group(index))):\n if x % 100 != 0:\n continue\n target = training.get_group(index).values.tolist()[x][-1]\n # if x % 500 == 0:\n # print 'index: ' + str(index) + ', x: ' + str(x)\n neighbors = []\n distances = []\n for validationSet in temp:\n getDistances(training.get_group(validationSet).values.tolist(), training.get_group(index).values.tolist()[x], distances)\n # Sort the distances list by the distance\n distances.sort(key = lambda item: item[1])\n # print distances\n # Select first k closest elements to return as the neighbors\n for x in range(k):\n neighbors.append(distances[x][0])\n\n result=getResponse(neighbors)\n\n # print distances\n # print neighbors\n # print result\n predictions.append(result)\n # print 'result: ' + str(result)\n # print 'target: ' + str(target)\n # print 'result == target: ' + str(result == target)\n if result == target:\n accuracy.append((True, target))\n else:\n accuracy.append((False, target))\n\n count = 0\n for item in accuracy:\n if item[0] == True:\n count += 1\n\n # print 'number of instances: ' + str(len(accuracy)) + ' number correct: ' + str(count)\n\n count = 0\n for item in accuracy:\n if item[0] == True:\n count += 1\n\n # Add the current k-value and its accuracy for this run to dictionary\n performance[k] = count / len(accuracy)\n\n print performance\n return performance",
"def tuning_with_scaling(self, distance_funcs, scaling_classes, x_train, y_train, x_val, y_val):\n \n # You need to assign the final values to these variables\n best_f1 = 0\n for scaling_name, scaling_func in scaling_classes.items():\n scaler = scaling_func()\n x_train_scaled = scaler(x_train)\n x_val_scaled = scaler(x_val)\n for name, func in distance_funcs.items():\n for k in range(1, 30, 2):\n model = KNN(k, func)\n model.train(x_train_scaled, y_train)\n valid_f1 = f1_score(y_val, model.predict(x_val_scaled))\n if valid_f1 > best_f1:\n self.best_distance_function = name\n self.best_k = k\n best_f1 = valid_f1\n self.best_model = model\n self.best_scaler = scaling_name",
"def kNN_train(self, x_train, y_train, x_test, k = 5, processing = None, distMethod = \"Manhattan\"):\n y_test = list()\n\n if processing == \"Scalar\":\n # print(\"Preprocessing = Scalar\")\n stdScalar = preprocessing.StandardScaler().fit(x_train)\n x_train = stdScalar.transform(x_train)\n x_test = stdScalar.transform(x_test)\n\n elif processing == \"MinMax\":\n\n # print(\"Preprocessing = MinMax\")\n mmScalar = preprocessing.MinMaxScaler()\n x_train = mmScalar.fit_transform(x_train)\n x_test = mmScalar.fit_transform(x_test)\n\n elif processing == \"None\":\n self.true = True\n # print(\"No Preprocessing\")\n\n else:\n print(\"wrong processing\")\n exit()\n\n for i in range(0, len(x_test)):\n y_test_temp = list()\n zeroCount = 0\n oneCount = 0\n\n # find distance of a instance in test test to all instances in training set\n for j in range(0, len(x_train)):\n if distMethod == \"Manhattan\":\n y_test_temp.append(self.manhattan(x_train[j], x_test[i]))\n elif distMethod == \"Euclidean\":\n y_test_temp.append(self.euclidean(x_train[j], x_test[i]))\n else:\n print \"something wrong with distance calculation\"\n exit()\n\n # take indices of k nearest points\n # print y_test_temp\n temp = np.asarray(y_test_temp).argsort()[:k]\n # check class of each of k nearest points\n for tmp in temp:\n if y_train[tmp] == 0:\n zeroCount += 1\n elif y_train[tmp] == 1:\n oneCount += 1\n else:\n print(\"something wrong in counting\")\n\n # classify\n if zeroCount >= oneCount:\n y_test.append(int(0))\n elif oneCount > zeroCount:\n y_test.append(int(1))\n else:\n print(\"somethign wrong\")\n\n # print y_test\n return y_test",
"def find_best_k_and_metric(data):\n metrics_and_scores = []\n possible_metrics = [euclidean_distance, manhattan_distance, hamming_distance, cosine_distance]\n for k in range(1, len(data)):\n for metric in possible_metrics:\n cross_validation_score = cross_validate(data, k, metric)\n metrics_and_scores.append([k, metric, cross_validation_score])\n sorted_metrics = sorted(metrics_and_scores, key = lambda item:item[2])\n return (sorted_metrics[-1][0], sorted_metrics[-1][1])",
"def tuning_with_scaling(self, distance_funcs, scaling_classes, x_train, y_train, x_val, y_val):\n self.best_k = None\n self.best_distance_function = None\n self.best_model = None\n bestf = float(\"-inf\")\n self.best_scaler = None\n \n for s in scaling_classes.keys():\n if s == \"min_max_scale\":\n minmax_scaler = MinMaxScaler()\n x_t = minmax_scaler(x_train)\n x_v = minmax_scaler(x_val)\n else:\n normal_scaler = NormalizationScaler()\n x_t = normal_scaler(x_train)\n x_v = normal_scaler(x_val)\n for d in distance_funcs.keys():\n for k in range(1,min(len(x_train),30),2):\n knnmodel = KNN(k, distance_funcs[d])\n knnmodel.train(x_t, y_train)\n pred = knnmodel.predict(x_v)\n f1 = f1_score(y_val,pred)\n if f1>bestf:\n bestk = k\n bestf = f1\n bestd = d\n bests = s\n bestmodel = knnmodel\n \n \n # You need to assign the final values to these variables\n self.best_k = bestk\n self.best_distance_function = bestd\n self.best_scaler = bests\n self.best_model = bestmodel\n #raise NotImplementedError",
"def get_k_best(data_dict, features_list, k):\r\n data = featureFormat(data_dict, features_list)\r\n labels, features = targetFeatureSplit(data)\r\n k_best = SelectKBest(k=k)\r\n k_best.fit(features, labels)\r\n scores = k_best.scores_\r\n print(scores)\r\n unsorted_pairs = zip(features_list[1:], scores)\r\n sorted_pairs = list(reversed(sorted(unsorted_pairs, key=lambda x: x[1])))\r\n k_best_features = dict(sorted_pairs[:k])\r\n print (\"{0} best features: {1}\\n\".format(k, k_best_features.keys(), scores))\r\n return k_best_features",
"def get_k_best(data_dict, features_list, k):\n data = featureFormat(data_dict, features_list)\n labels, features = targetFeatureSplit(data)\n\n k_best = SelectKBest(k=k)\n k_best.fit(features, labels)\n scores = k_best.scores_\n print(scores)\n unsorted_pairs = zip(features_list[1:], scores)\n sorted_pairs = list(reversed(sorted(unsorted_pairs, key=lambda x: x[1])))\n k_best_features = dict(sorted_pairs[:k])\n print (\"{0} best features: {1}\\n\".format(k, k_best_features.keys(), scores))\n return k_best_features",
"def select_knn_model(trainset, valset, trainlabel, vallabel, metric='minkowski'):\r\n train_accuracy, train_error, val_accuracy, val_error = [], [], [], []\r\n best_acc = 0\r\n\r\n for i in range(1, 21):\r\n neigh = KNeighborsClassifier(metric=metric, n_neighbors=i)\r\n neigh.fit(trainset, trainlabel)\r\n\r\n # compute the mean training errors and accuracy\r\n train_acc = neigh.score(trainset, trainlabel)\r\n train_accuracy.append(train_acc)\r\n train_error.append(1-train_acc)\r\n\r\n # compute the mean validation errors and accuracy\r\n val_acc = neigh.score(valset, vallabel)\r\n val_accuracy.append(val_acc)\r\n val_error.append(1 - val_acc)\r\n\r\n # find the model with the highest validation accuracy to tune the hyperparameter k\r\n if best_acc <= val_acc:\r\n best_acc, best_k = val_acc, i\r\n\r\n # plot the training and validation accuracy for each k\r\n plt.plot(range(1, 21), train_accuracy, label='training with metric=' + metric)\r\n plt.plot(range(1, 21), val_accuracy, label='validation with metric=' + metric)\r\n plt.xlabel('k - number of nearest neighbour')\r\n plt.ylabel('accuracy')\r\n plt.legend()\r\n plt.show()\r\n\r\n return best_k, train_error, val_error",
"def main():\n # Read in trainingSet and testSet as a DataFrame\n trainingOriginal = pd.read_csv(\n filepath_or_buffer=\"~/Desktop/KNN Implementation/adult.train.5fold.csv\")\n testOriginal = pd.read_csv(filepath_or_buffer=\"~/Desktop/KNN Implementation/adult.test.csv\")\n\n # Select only the numeric data\n training = pd.DataFrame(trainingOriginal.select_dtypes(['number']))\n training = pd.concat([training.reset_index(drop=True),\n trainingOriginal['earns'].reset_index(drop=True)], axis=1)\n\n # Select only the numeric data\n test = pd.DataFrame(testOriginal.select_dtypes(['number']))\n test = pd.concat([test.reset_index(drop=True),\n testOriginal['earns'].reset_index(drop=True)], axis=1)\n\n # Normalize the columns for training and test\n # print training['age'].min()\n # print training['age'].max()\n # print training.head()\n\n # Run max-min normalization on numerical columns for testing and training data\n for i in range(6):\n training.iloc[:, i] = (training.iloc[:, i]- training.iloc[:, i].min())/(training.iloc[:, i].max() - training.iloc[:, i].min())\n test.iloc[:, i] = (test.iloc[:, i]- test.iloc[:, i].min())/(test.iloc[:, i].max() - test.iloc[:, i].min())\n\n # Convert the 'earns' column to boolean as follows\n training['earns'] = training['earns'] == '>50K'\n test['earns'] = test['earns'] == ' >50K'\n\n # Group the training set by the fold attribute as given by the dataset\n trainingForFinal = training\n training = training.groupby('fold')\n\n # Since we want to consider odd k-values from 1 to 39, construct a list with these values\n kList = []\n for i in range(40):\n if i % 2 == 1:\n kList.append(i)\n\n # Empty dictionary to hold performance of each k-values and its accuracy\n performance = {}\n\n # Compute the performance for each k-value\n for k in kList:\n performance = crossValidation(training, k, performance)\n\n # Sort the performance dictionary by its accuracy (value)\n performance = sorted(performance.items(), key=operator.itemgetter(1), reverse=True)\n\n # Open file to write results\n file = open('grid.results.txt', 'w')\n # Write the results to file\n file.write(\"K | Accuracy\\n\")\n for item in performance:\n if item[0] < 10:\n file.write(str(item[0]) + ' | ' + str(item[1]) + '\\n')\n else:\n file.write(str(item[0]) + ' | ' + str(item[1]) + '\\n')\n # Close file\n file.close()\n\n # The best K is the one at the top of the list after the sorting\n bestK = performance[0][0]\n\n print 'Running Test Set with K = ' + str(bestK)\n\n applyModel(test,trainingForFinal,bestK)",
"def get_best_k_cv(air_quality_model):\n\n locations = air_quality_model.air_quality_locations\n time_series = air_quality_model.air_quality_time_series\n\n for each_location in locations:\n\n other_locations = [i for i in locations if i != each_location]\n training_time_series = time_series[other_locations]\n scaled_training_time_series = air_quality_model.scaler.transform(training_time_series)\n training_time_series_dropna = scaled_training_time_series.dropna().T\n\n # k means determine k\n distortions = []\n K = range(1, len(other_locations) + 1, 1)\n for k in K:\n kmeans = KMeans(n_clusters=k, max_iter=300).fit(training_time_series_dropna)\n # err = sum(np.min(cdist(training_time_series_dropna, kmeans.cluster_centers_, 'euclidean'), axis=1)) \\\n # / training_time_series_dropna.shape[0]\n\n # Sum of squared distances of samples to their closest cluster center\n err = kmeans.inertia_\n distortions.append(err)\n print(k, dict(zip(other_locations, kmeans.labels_)))\n print(each_location, k, 'err=', err)\n\n # Plot the elbow\n plt.figure(figsize=(15, 20))\n plt.plot(K, distortions, 'bx-')\n plt.xlabel('k')\n plt.ylabel('Distortion')\n plt.title(str(each_location) + ' The Elbow Method showing the optimal k')\n plt.show()",
"def get_k_best(data_dict, features_list, k):\n \n data = featureFormat(data_dict, features_list)\n labels, features = targetFeatureSplit(data) \n\n k_best = SelectKBest(k=k).fit(features, labels)\n scores = k_best.scores_\n pairs = zip(scores, features_list[1:])\n pairs.sort(reverse = True)\n pairs_sorted = [(v2,v1) for v1,v2 in pairs]\n k_best_features = dict(pairs_sorted[:k])\n pprint(pairs_sorted)\n return k_best_features",
"def forward(self, kp1: Tensor, kp2: Tensor, weights: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:\n self.validate_inputs(kp1, kp2, weights)\n best_score_total: float = float(self.minimal_sample_size)\n num_tc: int = len(kp1)\n best_model_total = zeros(3, 3, dtype=kp1.dtype, device=kp1.device)\n inliers_best_total: Tensor = zeros(num_tc, 1, device=kp1.device, dtype=torch.bool)\n for i in range(self.max_iter):\n # Sample minimal samples in batch to estimate models\n idxs = self.sample(self.minimal_sample_size, num_tc, self.batch_size, kp1.device)\n kp1_sampled = kp1[idxs]\n kp2_sampled = kp2[idxs]\n\n kp1_sampled, kp2_sampled = self.remove_bad_samples(kp1_sampled, kp2_sampled)\n if len(kp1_sampled) == 0:\n continue\n # Estimate models\n models = self.estimate_model_from_minsample(kp1_sampled, kp2_sampled)\n models = self.remove_bad_models(models)\n if (models is None) or (len(models) == 0):\n continue\n # Score the models and select the best one\n model, inliers, model_score = self.verify(kp1, kp2, models, self.inl_th)\n # Store far-the-best model and (optionally) do a local optimization\n if model_score > best_score_total:\n # Local optimization\n for lo_step in range(self.max_lo_iters):\n model_lo = self.polish_model(kp1, kp2, inliers)\n if (model_lo is None) or (len(model_lo) == 0):\n continue\n _, inliers_lo, score_lo = self.verify(kp1, kp2, model_lo, self.inl_th)\n # print (f\"Orig score = {best_model_score}, LO score = {score_lo} TC={num_tc}\")\n if score_lo > model_score:\n model = model_lo.clone()[0]\n inliers = inliers_lo.clone()\n model_score = score_lo\n else:\n break\n # Now storing the best model\n best_model_total = model.clone()\n inliers_best_total = inliers.clone()\n best_score_total = model_score\n\n # Should we already stop?\n new_max_iter = int(\n self.max_samples_by_conf(int(best_score_total), num_tc, self.minimal_sample_size, self.confidence)\n )\n # print (f\"New max_iter = {new_max_iter}\")\n # Stop estimation, if the model is very good\n if (i + 1) * self.batch_size >= new_max_iter:\n break\n # local optimization with all inliers for better precision\n return best_model_total, inliers_best_total",
"def get_k_best(data_dict, features_list, k):\n\n data = featureFormat(data_dict, features_list)\n labels, features = targetFeatureSplit(data)\n\n k_best = SelectKBest(k='all')\n k_best.fit(features, labels)\n scores = k_best.scores_\n unsorted_pairs = zip(features_list[1:], scores)\n sorted_pairs = list(reversed(sorted(unsorted_pairs, key=lambda x: x[1])))\n k_best_features = dict(sorted_pairs[:k])\n return k_best_features",
"def test_k_models(param_dict, features, classes, cross_val=4):\r\n assert type(param_dict) == dict\r\n model = GridSearchCV(KNeighborsClassifier(), param_dict, cv=cross_val)\r\n model.fit(features, classes)\r\n return list(model.best_params_.values())[0]",
"def getBestFittedModel( models, features ):\r\n\r\n\tvalidModels = []\r\n\tclusteringScores = []\r\n\tfor model in models:\r\n\t\t#Skip mono cluster models\r\n\t\tif st.getNbClusters( model ) < 2: continue\r\n\t\tvalidModels.append( model )\r\n\t\tlabels = model.labels_\r\n\t\tclusteringScore = evaluateClusters(features, labels)\r\n\t\tclusteringScores.append( clusteringScore)\r\n\tif len(clusteringScores) == 0: return False, -1\r\n\tbestScoreIndex = np.argmax(clusteringScores)\r\n\treturn validModels[bestScoreIndex], clusteringScores[bestScoreIndex]",
"def kbest_matches(self, k=1):\n self.align(k=k)\n # if k is None:\n # return [SSMatch(best_idx, self) for best_idx in range(len(self.distances))]\n # if self.keep_all_distances:\n # best_idxs = np.argpartition(self.distances, k)\n # return [SSMatch(best_idx, self) for best_idx in best_idxs[:k]]\n # distances = reversed(sorted(self.h))\n # return [SSMatch(best_idx, self) for dist, best_idx in distances]\n return SSMatches(self)",
"def run_knn(k, train_data, train_labels, valid_data):\n\n dist = l2_distance(valid_data.T, train_data.T)\n nearest = np.argsort(dist, axis=1)[:,:k]\n\n train_labels = train_labels.reshape(-1)\n valid_labels = train_labels[nearest]\n\n # note this only works for binary labels\n valid_labels = (np.mean(valid_labels, axis=1) >= 0.5).astype(np.int)\n valid_labels = valid_labels.reshape(-1,1)\n\n return valid_labels",
"def find_best_rmse(name, x_train, y_train, x_test, y_test, k_max=23, metric='euclidean', plot=True, debug=False):\n \"\"\"\n :param k_max: maximum number of k to test for\n :param plot: True if matplotlib plot should be created\n :param debug: print additional info about accuracy test results\n :return: best k and best accuracy for given training and test set with features (k < k_max)\n \"\"\"\n\n # Go through all k between k=1 and k=k_max-1 and find best_k and best_a\n # rsmes = np.zeros(k_max) # Write rsmes for each k into here for plot to work...\n\n rmse_val = [] # to store rmse values for different k\n for k in range(1, k_max):\n model = neighbors.KNeighborsRegressor(n_neighbors=k, metric=metric)\n model.fit(x_train, y_train) # fit the model\n pred = model.predict(x_test) # make prediction on test set\n rsme = sqrt(mean_squared_error(y_test, pred)) # calculate rmse\n\n if k == 1:\n best_rmse = rsme\n best_k = k\n elif rsme < best_rmse:\n best_rmse = rsme\n best_k = k\n\n rmse_val.append(rsme) # store rmse values\n if debug:\n print('RMSE value for k=', k, 'is:', rsme)\n\n if plot:\n t = range(1, k_max)\n plt.plot(t, rmse_val[0:k_max - 1], '--', label=name)\n plt.xticks(t)\n plt.xlabel('# neighbours (k)')\n plt.ylabel('Root Mean Squared Error')\n plt.scatter(best_k, best_rmse)\n plt.legend()\n return best_rmse, best_k",
"def knn(k, Xtrain, Ytrain, Xtest):\n d = euclidean_distances(Xtest, Xtrain, squared=True)\n nnc = Ytrain[np.argsort(d)[..., :k].flatten()].reshape(Xtest.shape[0], k)\n pred = [max(nnc[i], key=Counter(nnc[i]).get) for i in range(nnc.shape[0])]\n return np.array(pred)",
"def crossValidate(x_training_data, y_training_data, test_size_percentage, k_values):\n data_length = len(x_training_data)\n foldSize = int(round(data_length * test_size_percentage)) # size of each temporary test data\n groups = int(data_length/foldSize + 1) # # of groups\n\n best_score = 0\n best_k = 0\n\n for k in k_values: # Test different values of k\n score = 0\n for i in range(0, data_length, foldSize): # Switch section of test data\n \n x_temp_test = x_training_data[i:i+foldSize] # get temporary data to test\n known_y_test = y_training_data[i:i+foldSize] # we already know their labels\n x_temp_training = np.append(x_training_data[0:i], x_training_data[i+foldSize:], axis=0) # the rest is our temporary training data\n y_temp_training = np.append(y_training_data[0:i], y_training_data[i+foldSize:], axis=0)\n\n y_temp_test = knnForAll(x_temp_training, y_temp_training, x_temp_test, k) # labels determined for a current k value\n\n for i in range(len(known_y_test)): # Score how well this value of k did based \n if y_temp_test[i] == known_y_test[i]: # on how well it matches the known labels\n score += 1\n\n print(\"k:\",k,\"-- % correct: \",\"{:0.2f}\".format(score/data_length)) # print accuracy for training data \n if score > best_score: # Choose the best k value up to this point\n best_score = score\n best_k = k\n\n return best_k",
"def knn(p, k, x, t):\r\n\r\n # Number of instances in data set\r\n N = x.shape[0]\r\n\r\n Euclidean_Distance = numpy.square(x - p) #Euclidean distance\r\n dis = numpy.sum(Euclidean_Distance, axis=1) #sum of the euclidean distance\r\n inds = numpy.argsort(dis)[:k] #sort the indices of the distance array\r\n tgt_cat = Counter([t[i] for i in inds]) #count the times of equivalent target labels\r\n top_class = max(tgt_cat, key= tgt_cat.get) #top class among the k nearest points\r\n\r\n\r\n #top_class = 0\r\n\r\n return top_class",
"def K_Nearest_Neighbours_Model(train_features, train_labels, k_value=5, algorithm_auto=\"auto\"):\n # create an instance of the KNN SciKit learn class\n model = KNeighborsClassifier(n_neighbors=k_value, algorithm=algorithm_auto)\n # fit the model to the training data and labels\n model.fit(train_features, train_labels.values.ravel())\n # return the .fit() model\n return model",
"def knn(trainingSetData, testSetData, k):\n trainingSet = trainingSetData.drop([14], axis=1) # drop income\n testSet = testSetData.drop([14], axis=1) # drop income\n\n distances = {}\n # this will store the distances re-sorted in ascending/descending order\n sort = {}\n # income band results (>=50k or <50K)\n incomePredictions = []\n\n # Calculating euclidean distance between each row of training data and test data instance\n for testInstance in range(len(testSet)): # len(testSet)\n \n # Store current test Point:\n testInstance = testSet.iloc[testInstance] \n \n distances = euclideanDistanceRow(testInstance, trainingSet)\n\n # sort the distances in order of smallest first:\n sorted_d = sorted(distances.items(), key=lambda x: x[1], reverse=False)\n\n neighbors = []\n\n # Extracting top k neighbors\n for x in range(k):\n neighbors.append(sorted_d[x])\n\n\n classVotes = {}\n\n # Calculating the most freq class in the neighbors\n results = {\"lessThan50\": 0, \"moreThan50\": 0}\n\n # creating a dataframe to which we will add the income values:\n\n for x in range(len(neighbors)):\n if (trainingSetData.iloc[neighbors[x][0]][14] == 0.0):\n results[\"lessThan50\"] += 1\n elif (trainingSetData.iloc[neighbors[x][0]][14] == 1.0):\n results[\"moreThan50\"] += 1\n\n print('results',results)\n\n if (results[\"lessThan50\"] > results[\"moreThan50\"]):\n incomePredictions.append(0.0)\n elif (results[\"lessThan50\"] < results[\"moreThan50\"]):\n incomePredictions.append(1.0)\n\n return incomePredictions",
"def kNN(self, u_eval, v_compares):\n max_sim = [] # [(tag, sim) ... ]\n\n for v_comp in v_compares:\n cosine_sim = self.cosine_sim(u_eval, v_comp)\n\n if cosine_sim > MIN_COS_SINE:\n # add vector tag and cos sim: (tag, sim)\n max_sim.append((self.get_vector(v_comp).tag, cosine_sim))\n\n # sort cosine similarity\n # [('SPORT', 0.2), ('ART', 0.60), ('ART', 0.13)]\n # [('ART', 0.13), ('SPORT', 0.2), ('ART', 0.60)]\n max_sim.sort(key=lambda tag_nb: tag_nb[1]) \n\n # return the k-nearest neighbor only\n # [('ART', 0.13), ('SPORT', 0.2), ('ART', 0.60)]\n # if K_ITEM = 2 \n # [('SPORT', 0.2), ('ART', 0.60)]\n return max_sim[-K_ITEM:]",
"def fit(self, X, y, k_features):\n self._X = X\n self._y = y\n self._best_features = []\n\n self._k_features = k_features\n argmax = [0] * (self._k_features + 1)\n k = 0\n\n while k < self._k_features:\n\n significance_score, i = self._most_significant(self._best_features) # calls SFS\n self._best_features.append(i)\n\n if k < 2:\n k += 1\n argmax[k] = significance_score\n else:\n significance_score_r, r = self._least_significant(self._best_features) # calls SBS\n if r == i:\n k += 1\n argmax[k] = significance_score\n else:\n features = self._best_features.copy()\n features.remove(r)\n if significance_score_r > argmax[k]:\n if k == 2:\n argmax[k] = significance_score_r\n k += 1\n else:\n stop = False\n while not stop:\n significance_score_s, s = self._least_significant(features)\n if significance_score_s <= argmax[k - 1]:\n self._best_features = features.copy()\n argmax[k] = significance_score_r\n stop = True\n else:\n features.remove(s)\n k -= 1\n if k == 2:\n self._best_features = features.copy()\n stop = True\n else:\n k += 1\n argmax[k] = significance_score\n\n print(\"Iteration: \", k, \"Best Features: \", self._best_features)"
] |
[
"0.7222877",
"0.6900722",
"0.6710727",
"0.66608876",
"0.6550627",
"0.65291595",
"0.65116113",
"0.64901376",
"0.64568204",
"0.644303",
"0.6424459",
"0.6413164",
"0.63255215",
"0.62936574",
"0.6276326",
"0.6232303",
"0.62189966",
"0.62153566",
"0.6194645",
"0.61713016",
"0.6168225",
"0.61413145",
"0.6129585",
"0.6124124",
"0.6115218",
"0.61079615",
"0.6057559",
"0.60378844",
"0.60341066",
"0.6029705"
] |
0.7077753
|
1
|
This part is the same as "tuning_without_scaling", except that you also need to try two different scalers implemented in Part 1.3. More specifically, before passing the training and validation data to KNN model, apply the scalers in scaling_classes to both of them.
|
def tuning_with_scaling(self, distance_funcs, scaling_classes, x_train, y_train, x_val, y_val):
# You need to assign the final values to these variables
best_f1 = 0
for scaling_name, scaling_func in scaling_classes.items():
scaler = scaling_func()
x_train_scaled = scaler(x_train)
x_val_scaled = scaler(x_val)
for name, func in distance_funcs.items():
for k in range(1, 30, 2):
model = KNN(k, func)
model.train(x_train_scaled, y_train)
valid_f1 = f1_score(y_val, model.predict(x_val_scaled))
if valid_f1 > best_f1:
self.best_distance_function = name
self.best_k = k
best_f1 = valid_f1
self.best_model = model
self.best_scaler = scaling_name
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def tuning_with_scaling(self, distance_funcs, scaling_classes, x_train, y_train, x_val, y_val):\n self.best_k = None\n self.best_distance_function = None\n self.best_model = None\n bestf = float(\"-inf\")\n self.best_scaler = None\n \n for s in scaling_classes.keys():\n if s == \"min_max_scale\":\n minmax_scaler = MinMaxScaler()\n x_t = minmax_scaler(x_train)\n x_v = minmax_scaler(x_val)\n else:\n normal_scaler = NormalizationScaler()\n x_t = normal_scaler(x_train)\n x_v = normal_scaler(x_val)\n for d in distance_funcs.keys():\n for k in range(1,min(len(x_train),30),2):\n knnmodel = KNN(k, distance_funcs[d])\n knnmodel.train(x_t, y_train)\n pred = knnmodel.predict(x_v)\n f1 = f1_score(y_val,pred)\n if f1>bestf:\n bestk = k\n bestf = f1\n bestd = d\n bests = s\n bestmodel = knnmodel\n \n \n # You need to assign the final values to these variables\n self.best_k = bestk\n self.best_distance_function = bestd\n self.best_scaler = bests\n self.best_model = bestmodel\n #raise NotImplementedError",
"def trainAndTune(self, trainingData, trainingLabels, validationData, validationLabels, kgrid):\n\n \"*** YOUR CODE HERE ***\"\n\t#create dictionary of all features for each label\n dict = {}\n for feature in self.features:\n\t\tfor label in self.legalLabels:\n\t\t\tdict[feature, label] = util.Counter()\n\t\t\tfor i in [0,1]: #values of a counter from datum\n\t\t\t\tdict[(feature, label)][i] = 0\n\t\t\t\t#print str(feature) + str(label) + ' ' + str(dict[(feature, label)])\n labelCount = util.Counter()\n for i in range(len(trainingData)):\n\t\t#increment occurrences of each label found in the training data\n\t\tlabel = trainingLabels[i]\n\t\tlabelCount[label] += 1\n\t\tfor feature in trainingData[i]:\n\t\t\t#increment dictionary value by 1 when a feature label combination with a value is found\n\t\t\tdict[(feature, label)][trainingData[i][feature]] += 1\n #normalize labelCount to get P(y) for each label y, or the prior probability \n self.prior = util.normalize(labelCount)\n\t\n bestk = 0\n bestcond = {}\n topguesses = 0\n\t#iterate through each k to find the best k\n for k in kgrid:\n\t\t#empty cond probs\n\t\tself.condprobs = {} \n\t\t#smooth data\n\t\tfor feature_label in dict:\n\t\t\ttmpcounter = dict[feature_label] \n\t\t\t#print feature_label\n\t\t\ttmpcounter.incrementAll(tmpcounter.keys(), k)\n\t\t\t#set condprobs to cond probs with current k value\n\t\t\tself.condprobs[feature_label] = util.normalize(tmpcounter)\n\t\tguesses = self.classify(validationData)\n\t\tguesscorrect = 0\n\t\t#print[guesses]\n\t\tfor i in range(len(guesses)):\n\t\t\tif guesses[i] == validationLabels[i]:\n\t\t\t\tguesscorrect += 1\n\t\tif guesscorrect > topguesses:\n\t\t\tprint \"Guess \",k ,\" is better than \",bestk\n\t\t\ttopguesses = guesscorrect\n\t\t\tbestcond = self.condprobs\n\t\t\tbestk = k\n self.condprobs = bestcond\n self.k = bestk",
"def set_scalers(self, df):\n print_info('Setting scalers with training data...')\n\n column_definitions = self.getcolumn_definition()\n id_column = get_single_col_by_input_type(InputTypes.ID, column_definitions)\n target_column = get_single_col_by_input_type(InputTypes.TARGET, column_definitions)\n\n # Format real scalers\n real_inputs = extract_cols_from_data_type(\n DataTypes.REAL_VALUED, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n # Initialise scaler caches\n self.real_scalers = {}\n self.target_scaler = {}\n identifiers = []\n for identifier, sliced in df.groupby(id_column):\n\n if len(sliced) >= self._time_steps:\n\n data = sliced[real_inputs].values\n targets = sliced[[target_column]].values\n self.real_scalers[identifier] = sk_preprocessing.StandardScaler().fit(data)\n self.target_scaler[identifier] = sk_preprocessing.StandardScaler().fit(targets)\n identifiers.append(identifier)\n\n # Format categorical scalers\n categorical_inputs = extract_cols_from_data_type(\n DataTypes.CATEGORICAL, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n categorical_scalers = {}\n num_classes = []\n for col in categorical_inputs:\n # Set all to str so that we don't have mixed integer/string columns\n srs = df[col].apply(str)\n categorical_scalers[col] = sk_preprocessing.LabelEncoder().fit(srs.values)\n num_classes.append(srs.nunique())\n\n # Set categorical scaler outputs\n self._cat_scalers = categorical_scalers\n self._num_classes_per_cat_input = num_classes\n\n # Extract identifiers in case required\n self.identifiers = identifiers",
"def scale(train, test):\n # fit scaler\n scaler = MinMaxScaler(feature_range=(-1, 1))\n scaler = scaler.fit(train)\n # transform train\n train = train.reshape(train.shape[0], train.shape[1])\n train_scaled = scaler.transform(train)\n # transform test\n test = test.reshape(test.shape[0], test.shape[1])\n test_scaled = scaler.transform(test)\n return scaler, train_scaled, test_scaled",
"def trainAndTune(self, trainingData, trainingLabels, validationData, validationLabels, Cgrid):\n \"*** YOUR CODE HERE ***\"\n\n #if predicted label is not equal to actual label\n num_errors = 0 \n \n #weights will be changed when checking if labels are equal to each other\n \n\n \n #traversing across the Cgrid to train each set across each value of c in Cgrid \n for c in Cgrid:\n updatedWeights = self.weights.copy()\n for iteration in range(self.max_iterations):\n \n print(\"Starting iteration \", iteration, \"..\")\n if iteration > 0:\n num_errors = 0\n\n for i in range(len(trainingData)):\n trainingUnit = trainingData[i].copy() #trainingUnit is one instance of training data at i\n #predLabel = self.classify(trainingUnit) #classifies data in order list of predicted label values\n #predictedLabel = predLabel[0] #extract predicted label where max is at first index\n realLabel = trainingLabels[i] #extract real label from training label in order to compare\n\n\n\n predY = 0\n predictedLabel = -1;\n for label in self.legalLabels:\n predLabel = trainingUnit * updatedWeights[label]\n if predictedLabel < predLabel or predictedLabel == -1:\n predictedLabel = predLabel\n predY = label\n\n tau = 0 \n \n #if predicted label is not equal to real label\n if predY != realLabel: \n feature = trainingUnit.copy() #extract feature of current training unit\n num_errors += 1 \n #t = ((wpred - wactual)*feature + 1.0)/(2 * feature * feature) = num/div \n num = updatedWeights[predY] - updatedWeights[realLabel]\n num = num * feature\n num += 1.0 \n \n\n div = (feature*feature)\n \n div += 2.0\n t = num/div\n \n tau = min(c,t)\n \n \n \n #for j in range(feature):\n for j in range(len(trainingData[i])):\n feature[j] = feature[j] * tau\n updatedWeights[realLabel] = updatedWeights[realLabel] + feature #wactual = wactual + tau*feature\n updatedWeights[predY] = updatedWeights[predY] - feature #wpred = wpred + tau*feature\n \n\n print(\"finished updating weights\")\n\n #determine guesses by classifying validation data\n guesses = self.classify(validationData)\n correct = 0\n bestAccuracy = None #no best accuracy rate yet\n\n #traverse over guesses, determine how many \n #answers were correct \n for i in range(len(guesses)):\n if guesses[i] == validationLabels[i]: #guess matches validation label\n correct += 1\n\n accuracy = correct / len(guesses) #determine percentage\n if(accuracy > bestAccuracy):\n bestAccuracy = accuracy\n\n self.weights = updatedWeights",
"def train(self, trainingData, trainingLabels, validationData, validationLabels): \n \n # might be useful in your code later...\n # this is a list of all features in the training set.\n self.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n \n if (self.automaticTuning):\n kgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n else:\n kgrid = [self.k]\n \n self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)",
"def tuning_without_scaling(self, distance_funcs, x_train, y_train, x_val, y_val):\n \n best_f1 = 0\n for name, func in distance_funcs.items():\n for k in range(1, 30, 2):\n model = KNN(k, func)\n model.train(x_train, y_train)\n valid_f1 = f1_score(y_val, model.predict(x_val))\n if valid_f1 > best_f1:\n self.best_distance_function = name\n self.best_k = k\n best_f1 = valid_f1\n self.best_model = model",
"def set_scalers(self, df):\n print('Setting scalers with training data...')\n\n column_definitions = self.get_column_definition()\n id_column = utils.get_single_col_by_input_type(InputTypes.ID,\n column_definitions)\n target_column = utils.get_single_col_by_input_type(InputTypes.TARGET,\n column_definitions)\n\n # Extract identifiers in case required\n self.identifiers = list(df[id_column].unique())\n\n # Format real scalers\n real_inputs = utils.extract_cols_from_data_type(\n DataTypes.REAL_VALUED, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n data = df[real_inputs].values\n self._real_scalers = sklearn.preprocessing.StandardScaler().fit(data)\n self._target_scaler = sklearn.preprocessing.StandardScaler().fit(\n df[[target_column]].values) # used for predictions\n\n # Format categorical scalers\n categorical_inputs = utils.extract_cols_from_data_type(\n DataTypes.CATEGORICAL, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n categorical_scalers = {}\n num_classes = []\n for col in categorical_inputs:\n # Set all to str so that we don't have mixed integer/string columns\n srs = df[col].apply(str)\n categorical_scalers[col] = sklearn.preprocessing.LabelEncoder().fit(\n srs.values)\n num_classes.append(srs.nunique())\n\n # Set categorical scaler outputs\n self._cat_scalers = categorical_scalers\n self._num_classes_per_cat_input = num_classes",
"def supervised_cost_scale(\n scale, loss_supervised, output_noise_labelled, labelled_target\n):\n cost_supervised = loss_supervised.forward(output_noise_labelled, labelled_target)\n\n cost_supervised *= scale\n return cost_supervised",
"def test_lr_scalers():\n # We include a cost other than SumOfParams so that data is actually\n # queried from the training set, and the expected number of updates\n # are applied.\n cost = SumOfCosts([SumOfParams(), (0., DummyCost())])\n\n scales = [.01, .02, .05, 1., 5.]\n shapes = [(1,), (9,), (8, 7), (6, 5, 4), (3, 2, 2, 2)]\n\n learning_rate = .001\n\n class ModelWithScalers(Model):\n def __init__(self):\n super(ModelWithScalers, self).__init__()\n self._params = [sharedX(np.zeros(shape)) for shape in shapes]\n self.input_space = VectorSpace(1)\n\n def __call__(self, X):\n # Implemented only so that DummyCost would work\n return X\n\n def get_lr_scalers(self):\n return dict(zip(self._params, scales))\n\n model = ModelWithScalers()\n\n dataset = ArangeDataset(1)\n\n sgd = SGD(cost=cost,\n learning_rate=learning_rate,\n learning_rule=Momentum(.0),\n batch_size=1)\n\n sgd.setup(model=model, dataset=dataset)\n\n manual = [param.get_value() for param in model.get_params()]\n manual = [param - learning_rate * scale for param, scale in\n zip(manual, scales)]\n\n sgd.train(dataset=dataset)\n\n assert all(np.allclose(manual_param, sgd_param.get_value())\n for manual_param, sgd_param\n in zip(manual, model.get_params()))\n\n manual = [param - learning_rate * scale\n for param, scale\n in zip(manual, scales)]\n\n sgd.train(dataset=dataset)\n\n assert all(np.allclose(manual_param, sgd_param.get_value())\n for manual_param, sgd_param\n in zip(manual, model.get_params()))",
"def standardization (x_train,x_test):\n scaler = StandardScaler()\n ## reshape training data to 2D, fit and transform scaler\n scaler.fit(np.reshape(x_train, [x_train.shape[0], x_train.shape[1]*x_train.shape[2]*x_train.shape[3]]))\n x_train = scaler.transform(np.reshape(x_train, [x_train.shape[0], x_train.shape[1]*x_train.shape[2]*x_train.shape[3]]))\n ## reshape training data to 3D (n * frequencyrate * number of channels)\n x_train = np.reshape(x_train, [x_train.shape[0],x_test.shape[1],x_test.shape[2],x_test.shape[3]])\n x_test = scaler.transform(np.reshape(x_test, [x_test.shape[0], x_test.shape[1]*x_test.shape[2]*x_test.shape[3]]))\n x_test = np.reshape(x_test,[x_test.shape[0], x_train.shape[1], x_train.shape[2], x_train.shape[3]])\n return x_train, x_test, scaler",
"def trainAndTune(self, trainingData, trainingLabels, validationData, validationLabels, kgrid):\n no_of_examples = len(trainingLabels)\n prior_prob = dict(Counter(trainingLabels))\n for key in prior_prob.keys():\n prior_prob[key] = prior_prob[key] / float(no_of_examples)\n\n self.prior_prob = prior_prob\n\n likelihoods = dict()\n for cls, prob in prior_prob.items():\n # initializing the dictionary\n likelihoods[cls] = defaultdict(list)\n\n for cls, prob in prior_prob.items():\n # taking samples of only 1 class at a time\n row_indices = list()\n for index, value in enumerate(trainingLabels):\n if value == cls:\n row_indices.append(index)\n\n subset = list()\n for index in row_indices:\n subset.append(trainingData[index])\n\n for r in range(len(subset)):\n for key, value in subset[r].items():\n likelihoods[cls][key].append(value)\n\n classes = [key for key in prior_prob]\n self.classes = classes\n _like = likelihoods\n for cls in classes:\n for key, value in likelihoods[cls].items():\n likelihoods[cls][key] = self._occurrences(likelihoods[cls][key])\n\n self.likelihoods = likelihoods\n\n # results = {}\n # correct = 0\n # for itr in range(len(validationData)):\n # for cls in classes:\n # class_probability = prior_prob[cls]\n # for key, value in validationData[itr].items():\n # relative_feature_values = likelihoods[cls][key]\n # class_probability *= relative_feature_values.get(validationData[itr][key], 0.01)\n #\n # results[cls] = class_probability\n #\n # norm_factor = 0.0\n #\n # for key, value in results.items():\n # norm_factor += value\n #\n # for key in results:\n # try:\n # results[key] = results[key]/norm_factor\n # except ZeroDivisionError:\n # pass\n #\n # if (list(results.keys())[list(results.values()).index(max([value for key, value in results.items()]))]) == validationLabels[itr]:\n # correct += 1\n #\n # print \"validation accuracy: {}%\".format((correct/float(len(validationLabels))) * 100)",
"def tune_classifier(trainX, trainy, devX, devy, cls):\n \n train_C = cls.C_[0]\n num_train_Cs = len(cls.Cs_)\n C_step_size = num_train_Cs / 5.0\n lower_C = 5.0\n upper_C = train_C\n dev_Cs = np.linspace(lower_C, upper_C, num=50, endpoint=False)\n train_accuracy = evaluate(devX, devy, cls, name='dev data')\n \n opt_cls = None\n opt_C = train_C\n max_accuracy = train_accuracy\n \n for C in dev_Cs:\n current_cls = train_classifier(trainX, trainy, Cs=[C])\n current_C = current_cls.C_[0] # sanity check\n current_accuracy = evaluate(devX, devy, current_cls, name='%f' % current_C)\n \n if current_accuracy >= max_accuracy:\n opt_cls = copy.deepcopy(current_cls)\n max_accuracy = current_accuracy\n opt_C = current_C\n \n cls = copy.deepcopy(opt_cls)\n \n return cls",
"def scale(options):\n\n # ONLY GCE is supported for scaling at this time\n cluster = gce_cluster_control(options)\n if options.test_k8s:\n k8s = k8s_control_test(options)\n else:\n k8s = k8s_control(options)\n\n slack_logger.addHandler(slack_handler(options.slack_token))\n if not options.slack_token:\n scale_logger.info(\n \"No message will be sent to slack, since there is no token provided\")\n\n scale_logger.info(\"Scaling on cluster %s\", k8s.get_cluster_name())\n\n nodes = [] # a list of nodes that are NOT critical\n for node in k8s.nodes:\n if node.metadata.name not in k8s.critical_node_names:\n nodes.append(node)\n\n # Shuffle the node list so that when there are multiple nodes\n # with same number of pods, they will be randomly picked to\n # be made unschedulable\n random.shuffle(nodes)\n\n # goal is the total number of nodes we want in the cluster\n goal = schedule_goal(k8s, options)\n\n scale_logger.info(\"Total nodes in the cluster: %i\", len(k8s.nodes))\n scale_logger.info(\n \"%i nodes are unschedulable at this time\", k8s.get_num_schedulable())\n scale_logger.info(\"Found %i critical nodes\",\n len(k8s.nodes) - len(nodes))\n scale_logger.info(\"Recommending total %i nodes for service\", goal)\n\n if confirm((\"Updating unschedulable flags to ensure %i nodes are unschedulable\" % max(len(k8s.nodes) - goal, 0))):\n update_unschedulable(max(len(k8s.nodes) - goal, 0), nodes, k8s)\n\n if goal > len(k8s.nodes):\n scale_logger.info(\n \"Resize the cluster to %i nodes to satisfy the demand\", goal)\n if options.test_cloud:\n resize_for_new_nodes_test(goal, k8s, cluster)\n else:\n slack_logger.info(\n \"Cluster resized to %i nodes to satisfy the demand\", goal)\n resize_for_new_nodes(goal, k8s, cluster)\n if options.test_cloud:\n shutdown_empty_nodes_test(nodes, k8s, cluster)\n else:\n # CRITICAL NODES SHOULD NOT BE SHUTDOWN\n shutdown_empty_nodes(nodes, k8s, cluster)",
"def tuning_without_scaling(self, distance_funcs, x_train, y_train, x_val, y_val):\n self.best_k = None\n self.best_distance_function = None\n self.best_model = None\n bestf = float(\"-inf\")\n \n for d in distance_funcs:\n for k in range(1,min(len(x_train),30),2):\n knnmodel = KNN(k, distance_funcs[d])\n knnmodel.train(x_train, y_train)\n pred = knnmodel.predict(x_val)\n f1 = f1_score(y_val,pred)\n if f1 > bestf:\n bestk = k\n bestf = f1\n bestd = d\n bestmodel = knnmodel\n \n # You need to assign the final values to these variables\n self.best_k = bestk\n self.best_distance_function = bestd\n self.best_model = bestmodel\n #raise NotImplementedError",
"def train(self, trainingData, trainingLabels, validationData, validationLabels):\n\n self.features = trainingData[0].keys() # this could be useful for your code later...\n\n if (self.automaticTuning):\n kgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n else:\n kgrid = [self.k]\n\n self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)",
"def scale_test_train(train, test=None, scale=QuantileTransformer):\n\n scaler = scale()\n train = Data(scaler.fit_transform(train.X), train.y)\n test = None if test is None else Data(scaler.transform(test.X), test.y)\n\n return train, test",
"def fit_model(x, y, model, data_configs, preprocessing_configs,\n time_sampling_configs, model_configs, training_configs,\n model_type='tuning'):\n\n err_msg = 'Error: model_type must be either tuning or final'\n assert any(x in model_type for x in ['tuning', 'final']), err_msg\n\n x = x.copy()\n\n ## REMOVE NON-MODELING COLS\n ##############################\n logging.info(' Removing non-modeling cols...')\n try:\n x.drop(labels=data_configs['remove_cols'], axis=1, inplace=True)\n except Exception as e:\n logging.exception('Failure dropping non-modeling cols')\n raise e\n\n ## CLASS IMBALANCE\n ##############################\n ## We test to see if the dataset even needs to be downsampled\n cur_ratio = sum(y) / sum(y == 0)\n ds_ratio = preprocessing_configs['downsample_ratio']\n\n if preprocessing_configs['do_downsampling'] and cur_ratio < ds_ratio:\n try:\n logging.info(' Downsampling to balance classes...')\n msg = ' Shape of data {} downsampling: {}'\n logging.info(msg.format('before', x.shape))\n\n rus = RandomUnderSampler(\n sampling_strategy=ds_ratio,\n random_state=model_configs['random_seed'])\n x_res, y_res = rus.fit_sample(x, y)\n\n ## Convert x_res back to DataFrame\n ## so that we can filter it using column names\n x_res = pd.DataFrame(x_res, columns=x.columns)\n logging.info(msg.format('after', x_res.shape))\n\n except Exception as e:\n logging.exception('Failure downsampling training data')\n raise e\n\n else:\n x_res, y_res = x, y\n\n ## FEATURE SELECTION\n ##############################\n if preprocessing_configs['do_feature_selection']:\n logging.info(' Selecting features with Lasso...')\n try:\n clf = Lasso(alpha=0.0005)\n clf.fit(x_res, y_res)\n\n model_cols = list(x.columns[clf.coef_ > 0])\n logging.info('\\tSelected {} columns'.format(len(model_cols)))\n\n ## If LightGBM is one of the models, then we need to update\n ## the list of categorical features\n if 'LightGBM' in model.keys():\n lightgbm_fit_params = training_configs['LightGBM']['fit_params']\n lightgbm_fit_params['categorical_feature'] = [\n col for col in lightgbm_fit_params['categorical_feature']\n if col in model_cols\n ]\n\n except Exception as e:\n logging.exception('Failure executing Lasso feature selection')\n raise e\n else:\n logging.info(' Skipping feature selection...')\n model_cols = x.columns\n\n ## TUNING MODEL\n ##############################\n if model_type == 'tuning':\n\n err_msg = \"If model_type='tuning' then model must be a dictionary\"\n assert isinstance(model, dict), err_msg\n\n ## OUT-OF-TIME SAMPLING\n ##############################\n ## If the model is for tuning hyperparameters\n ## then we do cross-validation using out-of-time\n ## sample. Otherwise we fit using either\n ## the provided or default hyperparameters\n logging.info(' Creating out-of-time sampling indices...')\n try:\n week_nums = x[data_configs['weeknum']].unique()\n\n ## Get the week numbers for each of the time slices\n time_slices = create_time_slices(\n weeks=week_nums,\n lookback=time_sampling_configs['max_lookback'],\n horizon=time_sampling_configs['pred_horizon'],\n gap=time_sampling_configs['pred_gap'],\n step_size=time_sampling_configs['step_size'],\n holdout_window=time_sampling_configs['holdout_window'],\n num_steps=time_sampling_configs['num_steps']\n )\n\n ## Get the row indices for each of the time slices\n cv_indices = get_cv_indices(\n x_res, data_configs['weeknum'], time_slices)\n except Exception as e:\n logging.exception('Failure creating time slices')\n raise e\n\n ## CROSS-VALIDATION\n ##############################\n results = {}\n logging.info(' Tuning hyper-parameters using out-of-time sampling...')\n for model_name in model.keys():\n try:\n train_params = training_configs[model_name]\n tuning_model = model[model_name]\n except KeyError as e:\n err_msg = 'Model {} not found in training configs'.format(model_name)\n logging.exception(err_msg)\n\n try:\n logging.info(' Tuning model {}...'.format(model_name))\n oots = outOfTimeTuner(tuning_model, train_params, 'auc')\n oots.tune_model(x_res[model_cols], y_res, cv_indices)\n # print(oots.get_results())\n best_results = oots.get_best_hyperparams()\n\n results[model_name] = best_results\n\n except Exception as e:\n logging.exception('Failure tuning hyper-parameters')\n raise e\n\n ## Here we select the best model\n best_results = select_best_results(results)\n logging.info(' Best model is {} using {} and has {} of {}'.format(\n best_results['model'],\n best_results['hyper_params'],\n 'mean_auc',\n best_results['mean_auc']))\n\n ## FIT FINAL TUNING MODEL\n ##############################\n logging.info(' Fitting final tuning model...')\n try:\n final_model = model[best_results['model']](**best_results['hyper_params'])\n final_model.fit(x[model_cols], y)\n except Exception as e:\n logging.exception('Failure fitting final model')\n raise e\n\n\n ## SINGLE MODEL\n ##############################\n elif model_type == 'final':\n try:\n tuning_results = model_configs['tuning_results']\n model_name = tuning_results['model']\n training_params = training_configs[model_name]\n model = model[model_name]\n\n final_model = model(\n **training_params['model_params'],\n **tuning_results['hyper_params'])\n final_model.fit(x[model_cols], y, **training_params['fit_params'])\n\n best_results = tuning_results.copy()\n best_results['tuning_auc'] = best_results.pop('mean_auc')\n\n except Exception as e:\n logging.exception('Failure fitting final model')\n raise e\n\n return final_model, best_results, list(model_cols),",
"def scale(self, X_train, X_test):\n\n #X_train, X_test, y_train, y_test = self.split_X_y_sets()\n self.scaler.fit(X_train)\n X_train_sc = self.scaler.transform(X_train)\n X_test_sc = self.scaler.transform(X_test)\n\n return X_train_sc, X_test_sc #, y_train, y_test",
"def train(self, trainingData, trainingLabels, validationData, validationLabels):\t \n\t \n\t# might be useful in your code later...\n\t# this is a list of all features in the training set.\n\tself.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n\t\n\tif (self.automaticTuning):\n\t\tkgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n\telse:\n\t\tkgrid = [self.k]\n\t\t\n\tself.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)",
"def __init__(self, scales, downscale, weights=None, train_loss = 'smoothL1', test_loss='L1', mask=False):\n super(MultiScaleLoss, self).__init__()\n self.downscale = downscale\n self.mask = mask\n self.weights = torch.Tensor(scales).fill_(1).cuda() if weights is None else torch.Tensor(weights).cuda()\n assert(len(self.weights) == scales)\n if train_loss == 'smoothL1':\n self.train_loss = smoothL1\n elif train_loss == 'L1':\n self.train_loss = EPE\n else:\n raise NotImplementedError\n if type(test_loss) is str:\n\n if test_loss == 'L1':\n self.test_loss = nn.L1Loss()\n else:\n raise NotImplementedError\n else:\n self.test_loss = test_loss\n self.multiScales = [nn.AvgPool2d(self.downscale*(2**i), self.downscale*(2**i)) for i in range(scales)]\n\n print('self.multiScales: ', self.multiScales, ' self.downscale: ', self.downscale)",
"def training(train_data, dev_data, param):\n text_to_vec = TextToVec(**param)\n\n # Fit with both train and dev data\n text_to_vec.fit(train_data['data'] + dev_data['data'])\n word_vec_map = text_to_vec.vectorizer.get_feature_names()\n train_vec = text_to_vec.transform(train_data['data'])\n dev_vec = text_to_vec.transform(dev_data['data'])\n logger.info(f\"train vec size:{train_vec.shape}, dev vec size:{dev_vec.shape}\")\n\n # # apply weights on tfidf based on whether the word appear in multiple classes\n # tt_occ = Counter(train_data['encoded_label'])\n # weight_list = []\n # for i in range(train_vec.shape[1]): # For every feature\n # occ = Counter(train_data['encoded_label'][train_vec[:, i] > 0.0])\n # for key, value in occ.items():\n # occ[key] = value/tt_occ[key]\n # weight_list.append(np.std(list(occ.values()))/0.35)\n # weight = np.array(weight_list).reshape(1, -1)\n # weight = weight/np.max(weight)\n # train_vec = np.multiply(train_vec, weight)\n\n # Perform oversampling on training data\n if param['balanced'] not in ['Bootstrap', 'Handsample']:\n logger.info(f\"class info before resampling: {sorted(Counter(train_data['encoded_label']).items())}\")\n train_vec, train_data['encoded_label'] = resample(X_train=train_vec, y_train=train_data['encoded_label'], balance=param['balanced'])\n logger.info(f\"class info after resampling:{sorted(Counter(train_data['encoded_label']).items())}\")\n\n # Fit model\n if param['classifier'] == 'MultinomialNB':\n clf = MultinomialNB()\n elif param['classifier'] == 'LDA':\n clf = LinearDiscriminantAnalysis()\n else:\n clf = svm.LinearSVC()\n\n if param['multiclass'] == 'OnevsOne':\n model = OneVsOneClassifier(clf)\n else:\n model = OneVsRestClassifier(clf)\n\n if param['classifier'] == 'LinearSVM' or param['multiclass'] == 'OnevsOne':\n logger.info(f'Fitting model: {param}')\n model = model.fit(train_vec, train_data['encoded_label'])\n train_prediction = model.predict(train_vec)\n dev_prediction = model.predict(dev_vec)\n else:\n logger.info(f'Fitting model: {param}')\n model = model.fit(train_vec, train_data['binary_label'])\n train_prediction = np.argmax(model.predict(train_vec), axis=1)\n dev_prediction = np.argmax(model.predict(dev_vec), axis=1)\n\n\n return train_prediction, dev_prediction, train_vec.shape, dev_vec.shape, model, word_vec_map",
"def scaling():\n \n for i in range(cfg.nfea):\n dm = 0\n var = 0\n for j in range(cfg.ntrain):\n dm += cfg.a[j,i]\n dm = dm/cfg.ntrain\n \n for j in range(cfg.ntrain):\n var += (cfg.a[j,i]-dm)**2\n\n var = var/cfg.ntrain\n var = np.sqrt(var)\n \n if var >= 10**(-5):\n cfg.clin[i] = 1.0/var \n cfg.dlin[i] = -dm/var \n \n else: \n if np.abs(dm)<=1.0:\n cfg.clin[i] = 1.0\n cfg.dlin[i] = 0.0 \n else: \n cfg.clin[i] = 1.0/dm\n cfg.dlin[i] = 0.0 \n \n for j in range(cfg.ntrain):\n cfg.a_scaled[j,i] = cfg.clin[i]*cfg.a[j,i] + cfg.dlin[i]\n \n return",
"def trainAndTune(self, trainingData, trainingLabels, validationData, validationLabels, Cgrid):\n\t\tweights = {}\n\t\tscore = util.Counter()\n\t\tfor c in Cgrid:\n\t\t\tself.initializeWeightsToZero()\n\t\t\tfor j in range(self.max_iterations):\n\t\t\t\tfor i,l in zip(trainingData, trainingLabels):\n\t\t\t\t\toutputLabel = self.classify([i])[0]\n\t\t\t\t \ttrainLabel= l #trainingLabels[i]\n\t\t\t\t\t#print \"training here----------------------\"\n\t\t\t\t\t#print i\n\t\t\t\t\tif trainLabel != outputLabel:\n\t\t\t\t\t\tt = min([c, (((self.weights[outputLabel] - self.weights[trainLabel]) * i + 1.0) / (2 * (i * i)))])\n\t\t\t\t\t\tupdate = util.Counter()\n\t\t\t\t\t\tfor key, val in i.iteritems():\n\t\t\t\t\t\t\tupdate[key] = t * val\n\t\t\t\t\t\tself.weights[trainLabel] += update\n\t\t\t\t\t\tself.weights[outputLabel] -= update\n\t\t\tweights[c] = self.weights\n\t\t\tscore[c] = set(list(self.classify(validationData))) & set(list(validationLabels))\n\n\t\tmaxc = Cgrid[0]\n\t\tmaxscore = score[0]\n\t\tfor c, cscore in zip(Cgrid, score):\n\t\t\tif cscore > maxscore:\n\t\t\t\tmaxscore = cscore\n\t\t\t\tmaxc = c\n\t\t\telif cscore == maxscore and c < maxc:\n\t\t\t\tmaxscore = cscore\n\t\t\t\tmaxc = c\n\t\tself.weights = weights[maxc]\n\t\t#self.C = maxc\n\n\t\t#util.raiseNotDefined()",
"def _fold_to_scale(conv_wrapper: QcQuantizeWrapper, bn_wrapper: QcQuantizeWrapper):\n # pylint: disable=protected-access, too-many-locals, too-many-branches, bad-whitespace, too-many-statements\n conv = conv_wrapper._module_to_wrap\n bn = bn_wrapper._module_to_wrap\n\n weight_quantizer = conv_wrapper.param_quantizers[\"weight\"]\n\n if not isinstance(weight_quantizer, LearnedGridTensorQuantizer):\n raise _BatchNormFoldingNotSupported(\n \"BatchNorm folding to scale supports LearnedGridTensorQuantizer only; \"\n f\"got {type(weight_quantizer)}.\"\n )\n\n output_quantizer = conv_wrapper.output_quantizers[0]\n\n if output_quantizer.enabled:\n raise _BatchNormFoldingNotSupported(\n \"BatchNorm should belong to the same supergroup with the layer to be folded to.\"\n )\n\n if \"bias\" in conv_wrapper.param_quantizers:\n bias_quantizer = conv_wrapper.param_quantizers[\"bias\"]\n if bias_quantizer.enabled:\n raise _BatchNormFoldingNotSupported(\n \"Can't fold BatchNorm to scale if bias quantizer is enabled.\"\n )\n\n encodings = weight_quantizer.encoding\n\n if encodings is None:\n raise RuntimeError\n\n if isinstance(encodings, libpymo.TfEncoding):\n encodings = [encodings]\n\n if isinstance(conv, _ConvTransposeNd) and conv.groups != 1:\n raise _BatchNormFoldingNotSupported(\n \"BatchNorm folding to scale is not supported for grouped ConvTransposeNd.\"\n )\n\n # Add quantization noise to the BN params (bn weight & bn bias) before folding.\n # NOTE: Quantization of foldable batchnorms is automatically disabled when\n # initializing quantsim. However, it is still safer to call _quantize_params here\n # as we can't guarantee this is always the case.\n # For example, the user can manually enable quantization of batchnorms, etc...\n # (FYI: _quantize_params takes effect only when the parameter quantizers are enabled)\n with bn_wrapper._quantize_params():\n _fold_to_weight(conv, bn, fold_backward=True)\n\n gamma = bn.weight\n sigma = torch.sqrt(bn.running_var + bn.eps)\n\n new_encodings = []\n for old_encoding, c in zip(encodings, gamma/sigma):\n new_encoding = libpymo.TfEncoding()\n new_encoding.delta = old_encoding.delta * abs(c)\n if c >= 0:\n new_encoding.max = old_encoding.max * c\n new_encoding.min = old_encoding.min * c\n else:\n new_encoding.max = old_encoding.min * c\n new_encoding.min = old_encoding.max * c\n new_encoding.offset = old_encoding.offset\n new_encoding.bw = old_encoding.bw\n new_encodings.append(new_encoding)\n\n weight_quantizer.encoding = new_encodings\n\n # Copy batchnorm's output quantizers to conv output quantizers\n for conv_output_quantizer, bn_output_quantizer in\\\n zip(conv_wrapper.output_quantizers, bn_wrapper.output_quantizers):\n conv_output_quantizer.enabled = bn_output_quantizer.enabled\n\n if bn_output_quantizer.encoding is not None:\n encoding = libpymo.TfEncoding()\n encoding.delta = bn_output_quantizer.encoding.delta\n encoding.max = bn_output_quantizer.encoding.max\n encoding.min = bn_output_quantizer.encoding.min\n encoding.offset = bn_output_quantizer.encoding.offset\n encoding.bw = bn_output_quantizer.encoding.bw\n conv_output_quantizer.encoding = encoding\n\n bn_output_quantizer.enabled = False\n\n if \"bias\" not in conv_wrapper.param_quantizers:\n bias_quantizer = LearnedGridTensorQuantizer(weight_quantizer.bitwidth,\n weight_quantizer.round_mode,\n weight_quantizer.quant_scheme,\n weight_quantizer.use_symmetric_encodings,\n enabled_by_default=False,\n data_type=weight_quantizer.data_type)\n bias_quantizer._ch_axis = weight_quantizer._ch_axis\n conv_wrapper.param_quantizers[\"bias\"] = bias_quantizer",
"def trainAndTune(self, trainingData, trainingLabels, validationData, validationLabels, kgrid):\n\n\tself.conditional_prob_table = util.Counter()\n\tself.prior_distribution_prob_table = util.Counter()\n\tself.conditional_prob = {}\n\tself.prior_distribution_prob = {}\n\titeration = 0 # use to symbolize which k value we are for storage in dict, since k is a float\n\t\n\t#iterate through all k values to obtain best probabilities prior/cond\n\tfor k in kgrid:\n\t\tprint \"K value: \", k, \" Iteration: \", iteration\n\t\t# helper function to calculate prior dist prob and conditional prob\n\t\tself.trainAndTuneHelper(trainingData, trainingLabels, k) \n\t\t# Set weights in tables before next iteration, save all data to choose highest probabilities at the end\n\t\tself.conditional_prob_table[iteration] = self.conditional_prob \n\t\tself.prior_distribution_prob_table[iteration] = self.prior_distribution_prob\n\t\titeration += 1 #represents k value since k is a float \n\t\t\n\t# Assign highest conditional probability and prior distribution probability, pull from dictionaries\n\tself.conditional_prob = self.conditional_prob_table[max(self.conditional_prob_table, key=self.conditional_prob_table.get)]\n\tself.prior_distribution_prob = self.prior_distribution_prob_table[max(self.prior_distribution_prob_table, key=self.prior_distribution_prob_table.get)]",
"def tune_params(self, X_train, Y_train):\n return self.model # No hyper-parameter tuning",
"def tune(alpha, filter_scales, K, L, D):\n opts = get_opts()\n n_cpu = util.get_num_CPU()\n OUT_DIR = opts.out_dir\n \n # Tests\n test = 0\n total_tests = len(L) * len(K) * len(filter_scales) \\\n * len(alpha) * len(D)\n \n for d in D:\n opts.D = d\n for fs in filter_scales:\n opts.filter_scales = fs\n for a in alpha:\n opts.alpha = a\n for k in K:\n opts.K = k\n for l in L:\n opts.L = l\n test += 1\n \n # Create test directory \n opts.out_dir = OUT_DIR + f\"/test_l-{l}_k-{k}_fs-{len(fs)}_a-{a}_d-{d}\"\n if not os.path.exists(opts.out_dir):\n os.mkdir(opts.out_dir)\n \n print(f\"TEST [{test}/{total_tests}]: L-{l}_K-{k}_fs-{len(fs)}_a-{a}_d-{d}\")\n \n # Dictionary \n if not os.path.exists(join(opts.out_dir, \"dictionary.npy\")):\n print(\"\\tBuilding Dictionary\")\n start = time()\n visual_words.compute_dictionary(opts, n_worker=n_cpu)\n print(f\"Time {(time() - start) / 60.0}\")\n else: \n print(\"\\tDictionary exists\")\n \n # Train\n if not os.path.exists(join(opts.out_dir, \"trained_system.npz\")):\n print(\"\\tBuilding Recognition System\")\n start = time()\n visual_recog.build_recognition_system(opts, n_worker=n_cpu)\n print(f\"Time {(time() - start) / 60.0}\")\n else:\n print(\"\\tRecognition system exists\")\n \n # Test\n if not os.path.exists(join(opts.out_dir, \"accuracy.txt\")) or \\\n not os.path.exists(join(opts.out_dir, \"confmat.csv\")) or \\\n not os.path.exists(join(opts.out_dir, \"model.npz\")): \n # Test\n print(\"\\tEvaluation\")\n start = time()\n conf, acc = visual_recog.evaluate_recognition_system(opts, \n n_worker=n_cpu)\n print(f\"Time {(time() - start) / 60.0}\")\n \n # Results\n print(f\"Confusion Matrix\\n{conf}\\n Accuracy: {acc}\")\n np.savetxt(join(opts.out_dir, 'confmat.csv'), conf, \n fmt='%d', delimiter=',')\n np.savetxt(join(opts.out_dir, 'accuracy.txt'), [acc], fmt='%g')\n np.savez_compressed(join(opts.out_dir, \"model.npz\"), \n filter_scales=fs, K=k, L=l, alpha=a, acc=acc, conf_mat=conf)\n else:\n print(\"\\tEvaluation exists\")",
"def train_label_weigthed(self, train_dataset, validation_dataset, label, lr = 0.02, epochs_num = 100, batch_size = 40, alpha = 0, momentum = 0.9):\n \n def get_proportions(dataset):\n \n positive_label = dataset.labels_tensor[:,label].sum()\n \n negative_label = (1 - dataset.labels_tensor[:,label]).sum()\n \n total_examples = positive_label + negative_label\n \n imbalance = abs(positive_label - 0.5) > 0.4\n \n if imbalance:\n \n if positive_label < negative_label:\n \n w_p = 1\n \n w_n = positive_label / negative_label\n \n else:\n \n w_p = negative_label / positive_label\n \n w_n = 1\n \n else:\n \n w_p = w_n = 1\n \n return w_p, w_n\n \n def get_w(labels, w_p, w_n):\n \n positives = labels\n \n negatives = 1 - labels\n \n w = w_p * positives + w_n * negatives\n \n return w\n \n# positive_label = train_dataset.labels_tensor[:,label].sum()\n \n# negative_label = (1 - train_dataset.labels_tensor[:,label]).sum()\n \n# total_examples = positive_label + negative_label\n \n# print('num examples {}'.format(positive_label + negative_label))\n \n# print('% positive labels: {}'.format(positive_label/total_examples))\n \n# print('% negative labels: {}'.format(negative_label/total_examples))\n \n# imbalance = abs(positive_label - 0.5) > 0.4\n \n# if imbalance:\n \n# if positive_label < negative_label:\n \n# w_p = 1\n \n# w_n = positive_label / negative_label\n \n# else:\n \n# w_p = negative_label / positive_label\n \n# w_n = 1\n \n# else:\n \n# w_p = w_n = 1\n \n# print('w_p: {}'.format(w_p))\n \n# print('w_n: {}'.format(w_n))\n\n w_p, w_n = get_proportions(train_dataset) \n \n label_name = train_dataset.labels.items()[label][0]\n \n print(\"Training label {} ... \".format(label_name))\n \n optimizer = SGD(self.parameters(), lr = lr, weight_decay = alpha, momentum = momentum)\n\n train_losses = []\n\n validation_losses = []\n\n epochs = []\n\n start = time.time()\n\n remaining_time = 0\n\n train_dataloader = DataLoader(train_dataset, batch_size = batch_size, collate_fn = PPD.collate_data)\n \n validation_segments, validation_labels = PPD.collate_data(validation_dataset)\n \n weight_matrix_v = get_w(validation_labels[:,label].unsqueeze(1), w_p, w_n)\n \n criterion_v = nn.BCELoss(weight=weight_matrix_v.float())\n \n print('w_p: {} and w_n: {}'.format(w_p, w_n))\n\n for epoch in range(epochs_num):\n\n for i_batch, sample_batched in enumerate(train_dataloader):\n\n input = sample_batched[0]\n\n target = sample_batched[1][:,label].unsqueeze(1)\n \n weight_matrix = w_p * target + w_n * (1 - target)\n \n criterion = nn.BCELoss(weight=weight_matrix.float())\n\n self.zero_grad()\n\n output = self(input)\n\n train_loss = criterion(output, target.float())\n\n train_loss.backward()\n\n optimizer.step()\n\n validation_loss = criterion_v(self(validation_segments.long()), validation_labels[:,label].unsqueeze(1).float())\n\n end = time.time()\n\n remaining_time = remaining_time * 0.90 + ((end - start) * (epochs_num - epoch + 1) / (epoch + 1)) * 0.1\n\n remaining_time_corrected = remaining_time / (1 - (0.9 ** (epoch + 1)))\n\n epoch_str = \"last epoch finished: \" + str(epoch)\n\n progress_str = \"progress: \" + str((epoch + 1) * 100 / epochs_num) + \"%\"\n\n time_str = \"time: \" + str(remaining_time_corrected / 60) + \" mins\"\n\n sys.stdout.write(\"\\r\" + epoch_str + \" -- \" + progress_str + \" -- \" + time_str)\n\n sys.stdout.flush()\n\n train_losses.append(train_loss.item())\n\n validation_losses.append(validation_loss.item())\n\n epochs.append(epoch)\n\n print(\"\\n\" + \"Training completed. Total training time: \" + str(round((end - start) / 60, 2)) + \" mins\")\n\n return epochs, train_losses, validation_losses",
"def __init__(self, input_dim=600+9, output_dim=1*3, dropout_prob=0., scale=3):\n super(F0_RNN_Scaled, self).__init__(input_dim=input_dim, output_dim=output_dim, dropout_prob=dropout_prob)\n self.scale = scale"
] |
[
"0.68935436",
"0.61498564",
"0.6074106",
"0.60027444",
"0.5959983",
"0.5950137",
"0.5849057",
"0.5829975",
"0.5805299",
"0.5801477",
"0.57977337",
"0.57452184",
"0.57352483",
"0.5732406",
"0.5709",
"0.57076544",
"0.5691176",
"0.56769305",
"0.5666044",
"0.5643244",
"0.56248814",
"0.55777824",
"0.55756515",
"0.5559259",
"0.55446994",
"0.5498356",
"0.54864097",
"0.54843545",
"0.5441028",
"0.54060537"
] |
0.68166316
|
1
|
Function for displaying all the available accounts at the home page
|
def all_accounts(request):
accounts = Account.objects.all()
return render(request, 'app/home.html', {'accounts': accounts})
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def display_accounts(cls):\n return cls.account_list",
"def list_accounts(self):\n pass",
"def accounts():\n if not session.get('authed', False):\n flash(\"Please log in.\")\n return redirect(my_url('index'))\n account_ids = redis_client.smembers('%s-accounts' % session['phone'])\n accounts = [kloudless.Account.retrieve(i) for i in account_ids]\n callback_url = quote_plus(my_url('auth_callback'))\n return render_template('accounts.html', accounts=accounts, app_number=APP_NUMBER,\n callback_url=callback_url, app_id=KLOUDLESS_APP_ID)",
"def accounts():",
"def list_account_request(request):\n account_list = Account.objects.all()\n context = {'account_list': account_list}\n return render(request, \"accounts/account_list.html\", context)",
"def list_accounts():\n\n try:\n accounts = Account.query.all()\n except NoResultFound:\n print(f\"No account configured yet.\")\n return\n n_len = max([len(a.nickname) for a in accounts if a.nickname != 'no.name'])\n fmt = \"{nickname:\" + str(n_len) + \"s}: {email:s}\"\n #import pdb; pdb.set_trace()\n for acct in [acct for acct in accounts if acct.nickname != 'no.name']:\n print(fmt.format(nickname=acct.nickname, email=acct.email))\n return",
"def get_all_accounts():\n accounts = Account.query.all()\n print(accounts)\n return \"\"",
"def accounts():\n pass",
"def fetch_accounts(self):\n return self.fetch('/accounts')",
"def list(ctx):\n if ctx.obj.get('NAMESPACE') != 'accounts':\n click.echo(\n click.style('Only account data is available for listing.', fg='red')\n )\n return\n\n swag = create_swag_from_ctx(ctx)\n accounts = swag.get_all()\n _table = [[result['name'], result.get('id')] for result in accounts]\n click.echo(\n tabulate(_table, headers=[\"Account Name\", \"Account Number\"])\n )",
"def display_accounts_details():\n return Records.display_records()",
"def all_users():\n\n users = crud.get_users()\n\n return render_template('all_users.html', users=users)",
"def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)",
"def get_all_accounts_information(self):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_GET_ALL_ACCOUNTS_INFORMATION)",
"def show_user_account_home():\n\n user = User.query.filter_by(user_id=int(session['user_id'])).one()\n print user\n\n return render_template(\"base.html\")\n # return render_template(\"user_account.html\", user_id=user.user_id, name=user.first_name)\n #, user_id=user.user_id, email=email, name=first_name)",
"def show_users():\n\n user = User(connection=connection, cursor=cursor)\n\n all_users = user.get_all_users()\n\n context = {\n 'all_users': all_users\n }\n\n return render_template('pages/tables/users.html', **context)",
"def show_all_accounts(self, account_name=None, account_id=None, search=False,\n print_table=True):\n pt = PrettyTable(['ACCOUNT_NAME', 'ACCOUNT_ID'])\n pt.hrules = 1\n pt.align = 'l'\n list = self.get_all_accounts(account_name=account_name,\n account_id=account_id,\n search=search)\n for account in list:\n pt.add_row([account['account_name'], account['account_id']])\n if print_table:\n self.log.info(\"\\n\" + str(pt) + \"\\n\")\n else:\n return pt",
"def account():\n\n return render_template('account_page.html', title='Account')",
"def get_accounts(self):\n return self.accounts.all()",
"def display_accounts_details():\n return Credentials.display_credentials()",
"def main_page():\n pages=get_accounts()\n return render_template('disp.html',pages=pages)",
"def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n }\n }\n }\n\n headers = {'Content-type': 'application/json',\n 'Accept': 'application/json',\n 'CSRFToken': self.json_token}\n path = '/im/json/overview/getaccounts'\n req = self.session.post(\n self.BASE_URL + path,\n data=json.dumps(data),\n headers=headers)\n\n for account in req.json()['response']['accounts']:\n self.accounts[account['number']] = account\n del(self.accounts[account['number']]['number'])\n\n return self.accounts",
"def gameaccount_list(request, page):\n gameaccounts = GameAccount.query.filter_by(user=request.user).limit(PER_PAGE).offset(PER_PAGE * (page - 1)).all()\n pagination = AdminPagination('account/gameaccounts', page, PER_PAGE,\n GameAccount.query.filter_by(user=request.user).count())\n if not gameaccounts and page != 1:\n raise NotFound()\n\n return render_account_response('account/gameaccount_list.html', 'gameaccounts',\n gameaccounts=gameaccounts, pagination=pagination)",
"def index(page):\r\n per_page = 24\r\n count = cached_users.get_total_users()\r\n accounts = cached_users.get_users_page(page, per_page)\r\n if not accounts and page != 1:\r\n abort(404)\r\n pagination = Pagination(page, per_page, count)\r\n if current_user.is_authenticated():\r\n user_id = current_user.id\r\n else:\r\n user_id = 'anonymous'\r\n top_users = cached_users.get_leaderboard(current_app.config['LEADERBOARD'],\r\n user_id)\r\n return render_template('account/index.html', accounts=accounts,\r\n total=count,\r\n top_users=top_users,\r\n title=\"Community\", pagination=pagination)",
"def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)",
"def list_all_users():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n users_list = get_users_list()\n if check_authentication(session_id, user_id) and is_admin_user(user_id):\n return render_template('admin_area.html', user=user_id, session_id=session_id, users_list=users_list)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)",
"def users(accountable, query):\n users = accountable.users(query)\n headers = ['display_name', 'key']\n if users:\n rows = [[v for k, v in sorted(u.items()) if k in headers]\n for u in users]\n rows.insert(0, headers)\n print_table(SingleTable(rows))\n else:\n click.secho('No users found for query {}'.format(\n query\n ), fg='red')",
"def account():\n return render_template('user/account.html')",
"def all_users(request):\n # order users by last name\n users = UserProfile.objects.all().order_by('last_name')\n return render(request, \"allusers.html\", {'users': users})",
"def show_all_users(self, account_name=None, account_id=None, path=None, user_name=None,\n user_id=None, search=False, print_table=True ):\n pt = PrettyTable(['ACCOUNT:', 'USERNAME:', 'USER_ID', 'ACCT_ID'])\n pt.hrules = 1\n pt.align = 'l'\n list = self.get_all_users(account_name=account_name, account_id=account_id, path=path,\n user_name=user_name, user_id=user_id, search=search)\n for user in list:\n pt.add_row([user['account_name'], user['user_name'],\n user['user_id'], user['account_id']])\n if print_table:\n self.log.info(\"\\n\" + str(pt) + \"\\n\")\n else:\n return pt"
] |
[
"0.79639024",
"0.78714097",
"0.7488144",
"0.7481553",
"0.7406604",
"0.73886055",
"0.7223013",
"0.71588624",
"0.7116961",
"0.70824236",
"0.6985502",
"0.6905734",
"0.687565",
"0.68730795",
"0.6851584",
"0.67743534",
"0.6767352",
"0.6742166",
"0.6719278",
"0.66504353",
"0.6633801",
"0.6614835",
"0.66001314",
"0.6578742",
"0.6563812",
"0.6548971",
"0.6547408",
"0.65320045",
"0.6519822",
"0.65102816"
] |
0.8378931
|
0
|
Function for selecting a source account, and sending the application to the next page where a destination account should be chosen
|
def select_account(request, account_id):
account = Account.objects.get(pk=account_id)
other_accounts = Account.objects.exclude(pk=account_id)
context = {
'source': account,
'destinations': other_accounts
}
return render(request, 'app/destination.html', context)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def prepare_transfer(request):\n source = Account.objects.get(pk=int(request.POST.get('source-id', False)))\n destination = Account.objects.get(pk=int(request.POST.get('destination-id', False)))\n context = {\n 'source': source,\n 'destination': destination\n }\n return render(request, 'app/transfer.html', context)",
"def change_account_action(self, page: ChangeAccountPage):\n log.info(f\"Selecting an account with {self._configuration.utility_account_id}\")\n page.select_account(self._configuration.utility_account_id)",
"def owner_account_chooser():\n owner_accounts = fetch_owner_accounts()\n return render_template('export.html', owner_accounts=owner_accounts)",
"def set_source_to_add_destination(self, source_name):\n self.single_selection_from_static_kendo_dropdown(self.source_kendo_dropdown_arrow_locator, source_name)",
"def select_account(self, account_id: str):\n account_number_box = self.driver.find_or_raise(self.AccountNumberBoxSelector)\n account_number_box.send_keys(account_id)\n account_number_box.send_keys(Keys.ENTER)\n self.driver.sleep(2)\n self.driver.find_or_raise('//button[.=\"View\"]', xpath=True).click()\n self.driver.sleep(2)",
"def accounts():",
"def return_to_source(origin,parent_object_id,target_username):\n\tif origin in ('home','home_reply'):\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"home_loc_pk\",pk=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'history':\n\t\tif target_username:\n\t\t\treturn redirect(\"user_activity\",slug=target_username)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'public':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"public_group\",slug=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'search':\n\t\treturn redirect(\"search_username\")\n\telif origin == 'profile':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"user_profile\",slug=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\t\n\telif origin == 'profile_photos':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"profile\",slug=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\t\n\telif origin == 'best_photos':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"best_photo_loc_pk\", parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'photo_comments':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"comment\", parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'fresh_photos':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"see_photo_pk\", parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"photo\")\n\telse:\n\t\treturn redirect(\"home\")",
"def click_account(self):\n try:\n account_link = self.driver.find_element(\n By.XPATH,\n f\"//td[contains(., '{self.account_id}')]/following-sibling::td/a\",\n )\n except NoSuchElementException:\n raise BillingScraperAccountUnavailable(\n f\"Account {self.account_id} not available from account page.\"\n )\n scroll_to(self.driver, account_link)\n self.driver.sleep(0.5)\n account_link.click()",
"def what_next(request):\n user = request.user\n account = Account._default_manager.get(user=request.user)\n accounttype = account.accounttype\n if user.is_superuser:\n return HttpResponseRedirect(\"../../admin\")\n elif accounttype==\"patient\":\n \treturn HttpResponseRedirect(\"../../newsfeed\")\n elif accounttype==\"doctor\":\n return HttpResponseRedirect(\"../../patient_management\")\n elif accounttype==\"family\":\n return HttpResponseRedirect(\"../../broadcast/familypage\")\n else:\n return HttpResponse(\"accounttype=\"+accounttype+\"lala\")",
"def set_rates_destination(self):\n self.set_value_into_input_field(self.rates_tab_destination_textbox_locator, self.buy_page_excel_data_dictionary[\"Destination\"])",
"def accounts():\n pass",
"def open_account():\n print(\"\\n\")\n print(messages.open_account)\n u_id = pyip.inputInt(\"Id: \", greaterThan=0)\n name = pyip.inputCustom(raiseNameError, prompt=\"Name: \")\n address = pyip.inputCustom(raiseAddressError, prompt=\"Address: \")\n email = pyip.inputEmail(\"Email: \")\n balance = pyip.inputInt(\"Balance: \", min=0)\n password = pyip.inputPassword(\"Password: \")\n\n user_data = [u_id, name, address, balance, email, password]\n result = BankOperationsBackend.open_account(user_data)\n\n start_again() if result else BankOperationsUi.open_account()",
"def open_accounts_page(self):\n log.info(\"In landing page: click bill view button\")\n bills_page_for_meters_link = self.driver.find_element(\n *self.link_to_accs_locator\n )\n bills_page_for_meters_link.click()\n self.driver.sleep(5)\n self.driver.switch_to.window(self.driver.window_handles[-1])",
"def prepare_transfer(self, transfer_amount, from_account, to_account):\n BaseElement(self.driver, locators.TRANSFER_MONEY_BUTTON).click()\n from_account_dropdown = BaseElement(self.driver, locators.FROM_ACCOUNT_DROP_DOWN)\n to_account_dropdown = BaseElement(self.driver, locators.TO_ACCOUNT_DROP_DOWN)\n from_account_dropdown.wait_until_displayed()\n to_account_dropdown.wait_until_displayed()\n from_account_dropdown.select_dropdown_value(from_account)\n to_account_dropdown.select_dropdown_value(to_account)\n TextElement(self.driver, locators.AMOUNT_INPUT).set_text(str(transfer_amount))",
"def change_account_details(main_page):\n\n header = \"What do you want to change?\"\n change_choices =(\n ('Name',change_name),\n ('Surname',change_surname),\n ('Password',change_password),\n ('To exit',log.exit)\n )\n\n change_account = Screen( header, change_choices, main_page.login,\n main_page.password)\n\n change_account.activate()",
"def choose_account (self):\n\t\traise ae.AccountError(\"Fixing the config file hasn't been overloaded\")",
"def set_destination_to_add_destination(self, destination):\n self.multiple_items_selection_from_kendo_dropdown(self.destination_multiple_kendo_dropdown_locator, destination)\n self.click_element(self.new_destination_header_locator)",
"def _forward_page_select(self, current_page):\r\n\r\n if current_page == 0:\r\n Widgets.select_source_file()\r\n else:\r\n self.assistant.set_current_page(current_page + 1)",
"def departure_chooser(dep):\n\n # Find the input box of departure\n departure_from = browser.find_element_by_xpath(\n \"//input[@id='flight-origin-hp-flight']\")\n time.sleep(1.5)\n\n # Clear the text in the box\n departure_from.clear()\n\n # Specify the departure country\n departure_from.send_keys(dep)\n time.sleep(2)\n\n # Select the first result in the list when specify departure country\n try:\n first_item = browser.find_element_by_xpath(\"//a[@id='aria-option-0']\")\n except Exception as e:\n print(\"Fail to click the departure country\")\n\n first_item.click()",
"def set_bill_account_from_single_selection_kendo_dropdown(self, bill_account):\n self.single_selection_from_kendo_dropdown(self.bill_account_single_selection_kendo_dropdown_locator, bill_account)",
"def __call__(self, account_from: Optional[str] = None, account_to: Optional[str] = None, accounts: Optional[str] = None, amount: Optional[Union[int, float, Decimal]] = None, currency: Optional[str] = None, passthrough: Optional[Any] = None, req_id: Optional[int] = None):\n\n data = {\n \"transfer_between_accounts\": int(1)\n }\n\n if account_from:\n data['account_from'] = str(account_from)\n\n if account_to:\n data['account_to'] = str(account_to)\n\n if accounts:\n data['accounts'] = str(accounts)\n\n if amount:\n data['amount'] = amount\n\n if currency:\n data['currency'] = str(currency)\n\n return self.send_websocket_request(self.name, data, passthrough=passthrough, req_id=req_id)",
"def goto_login(self):\n self.driver.find_element(*BasePageLocators.MY_ACCOUNT_DROPDOWN).click()\n self.driver.find_element(*BasePageLocators.GO_LOGIN).click()\n return LoginPage(self.driver)",
"def activate_account(self):\n self.driver.execute_script(\"window.scrollTo(0, 1000)\")\n self.click_on_element_by_css(tep.ACTIVATION_LINK)\n self.click_on_element_by_css(tep.ACTIVATION_BUTTON)",
"def go_to(session: Session) -> Response:\n headers = {\"Referer\": get_absolute_url(\"ui-buyer:company-profile\")}\n response = make_request(Method.GET, URL, session=session, headers=headers)\n\n should_be_here(response)\n return response",
"def on_UseDefaultA1Account_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError",
"def arrival_chooser(arrive):\n # Find the input box of arrival country\n arrive_to = browser.find_element_by_xpath(\n \"//input[@id='flight-destination-hp-flight']\")\n time.sleep(1.5)\n\n # Clear the text in the box\n arrive_to.clear()\n\n # Specify the arrival country\n arrive_to.send_keys(arrive)\n time.sleep(2)\n\n # Select first result in the list\n try:\n first_item = browser.find_element_by_xpath(\"//a[@id='aria-option-0']\")\n except Exception as e:\n print(\"Fail to click the arrival country\")\n\n first_item.click()",
"def do_destination(self, args):\n self.destination = int(args)",
"def select_destinations_tab(self):\n self.select_static_tab(self.destinations_tab_locator, True)",
"def select_fedcm_account(self, account_index):\n pass",
"def go_to_tranfer_market_and_input_parameters(self, cardname, fullname, cardoverall):\n try:\n cardname = cardname.lower()\n fullname = fullname.lower()\n\n # Go to transfer market\n self.driver.find_element(By.CLASS_NAME, 'icon-transfer').click()\n self.sleep_approx(1)\n WebDriverWait(self.driver, 10).until(\n EC.visibility_of_element_located(\n (By.CLASS_NAME, 'ut-tile-transfer-market'))\n )\n self.sleep_approx(1)\n self.driver.find_element(\n By.CLASS_NAME, 'ut-tile-transfer-market').click()\n\n WebDriverWait(self.driver, 10).until(\n EC.visibility_of_element_located(\n (By.CLASS_NAME, 'ut-player-search-control'))\n )\n wait_for_shield_invisibility(self.driver)\n\n # Insert player name into search\n self.driver.find_element(\n By.XPATH, '//div[contains(@class, \"ut-player-search-control\")]//input').click()\n self.sleep_approx(2)\n self.driver.find_element(\n By.XPATH, '//div[contains(@class, \"ut-player-search-control\")]//input').send_keys(cardname)\n\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located(\n (By.XPATH, '//ul[contains(@class, \"playerResultsList\")]/button'))\n )\n\n # Player list dropdown is visible now, so we must /html/body/main/section/section/div[2]/div/div[2]/div/div[1]/div[1]/div[1]/div/div[2]/ul\n results_list = self.driver.find_elements_by_xpath(\n '//ul[contains(@class, \"playerResultsList\")]/button')\n num_results = len(results_list)\n\n result_to_click = 1\n for x in range(num_results):\n x += 1\n playername = self.driver.find_element_by_xpath(\n \"/html/body/main/section/section/div[2]/div/div[2]/div/div[1]/div[1]/div[1]/div/div[2]/ul/button[\" + str(x) + \"]/span[1]\").text\n playeroverall = self.driver.find_element_by_xpath(\n \"/html/body/main/section/section/div[2]/div/div[2]/div/div[1]/div[1]/div[1]/div/div[2]/ul/button[\" + str(x) + \"]/span[2]\").text\n\n playername = str(playername)\n playername = playername.lower()\n\n playeroverall = int(playeroverall)\n target_overall = int(cardoverall)\n\n diff = playeroverall - target_overall\n\n if (diff == 0):\n if (playername == cardname):\n result_to_click = x\n if (playername == fullname):\n result_to_click = x\n\n # log_event(self.queue, \"waiting a sec Should click result number: \" + str(result_to_click))\n self.sleep_approx(1)\n self.driver.find_element_by_xpath(\n \"/html/body/main/section/section/div[2]/div/div[2]/div/div[1]/div[1]/div[1]/div/div[2]/ul/button[\" + str(result_to_click) + \"]\").click()\n except:\n log_event(\n self.queue, \"Exception go_to_transfer_market_and_input_parameters\")\n return \"error\"\n # self.go_to_tranfer_market_and_input_parameters(cardname, fullname, cardoverall)"
] |
[
"0.6072919",
"0.5678098",
"0.559927",
"0.5527686",
"0.5519306",
"0.5226638",
"0.5185696",
"0.5179164",
"0.51324326",
"0.5109042",
"0.50586045",
"0.50556964",
"0.5055251",
"0.5048144",
"0.5034489",
"0.5032942",
"0.50032514",
"0.496251",
"0.4950748",
"0.49470022",
"0.49423796",
"0.4919826",
"0.4917955",
"0.49177158",
"0.49010226",
"0.48785847",
"0.48724225",
"0.4849526",
"0.4840577",
"0.48246574"
] |
0.74542475
|
0
|
Function for retrieving the information about the source and destination accounts and sending the information to the transfer function
|
def prepare_transfer(request):
source = Account.objects.get(pk=int(request.POST.get('source-id', False)))
destination = Account.objects.get(pk=int(request.POST.get('destination-id', False)))
context = {
'source': source,
'destination': destination
}
return render(request, 'app/transfer.html', context)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def transfer(self, origin, dest, amount):\n or_elem = 0\n de_elem = 0\n for elem in self.account:\n if origin == elem.id or origin == elem.name:\n or_elem = elem\n if dest == elem.id or dest == elem.name:\n de_elem = elem\n if or_elem == 0 or de_elem == 0:\n print(\"Couldn't find account.\")\n return False\n if self.corrupted(or_elem) or self.corrupted(de_elem):\n print(\"Corrupted account.\")\n return False\n if amount <= 0 or or_elem.value < amount:\n print(\"Invalid amount.\")\n return False\n or_elem.transfer(-amount)\n de_elem.transfer(amount)\n print(\"Transfer successful.\")\n return True",
"def getDest(): #status: Done, Tested\r\n pass",
"def select_account(request, account_id):\n account = Account.objects.get(pk=account_id)\n other_accounts = Account.objects.exclude(pk=account_id)\n context = {\n 'source': account,\n 'destinations': other_accounts\n }\n return render(request, 'app/destination.html', context)",
"def transfer(self, request, *args, **kwargs):\n origin_account = self.get_object()\n destiny_account = request.data.get(\"id_conta\", None)\n amount = request.data.get(\"valor\", None)\n account_serializer = self.get_serializer()\n\n try:\n transfer = account_serializer.transfer(origin_account, destiny_account, amount)\n except ObjectDoesNotExist as obj:\n return Response({\"detail\": \"Could not transfer the amount: Destiny account does not exist.\",\n \"status_code\": status.HTTP_404_NOT_FOUND}, status=status.HTTP_404_NOT_FOUND)\n except ValueError as ve:\n return Response({\"detail\": \"Could not transfer the amount: {0}.\".format(ve),\n \"status_code\": status.HTTP_400_BAD_REQUEST}, status=status.HTTP_400_BAD_REQUEST)\n\n return Response(transfer)",
"def transfer(self,source,target):\n self.__transfer_dict[tuple([source,target])]()",
"def __call__(self, account_from: Optional[str] = None, account_to: Optional[str] = None, accounts: Optional[str] = None, amount: Optional[Union[int, float, Decimal]] = None, currency: Optional[str] = None, passthrough: Optional[Any] = None, req_id: Optional[int] = None):\n\n data = {\n \"transfer_between_accounts\": int(1)\n }\n\n if account_from:\n data['account_from'] = str(account_from)\n\n if account_to:\n data['account_to'] = str(account_to)\n\n if accounts:\n data['accounts'] = str(accounts)\n\n if amount:\n data['amount'] = amount\n\n if currency:\n data['currency'] = str(currency)\n\n return self.send_websocket_request(self.name, data, passthrough=passthrough, req_id=req_id)",
"def accounts():",
"def transfer_money(request):\n source = Account.objects.get(pk=int(request.POST.get('source-id', False)))\n destination = Account.objects.get(pk=int(request.POST.get('destination-id', False)))\n amount = float(request.POST.get('amount', False))\n enough_cash = source.available_cash >= amount\n if enough_cash:\n source.available_cash -= amount\n source.save()\n destination.available_cash += amount\n destination.save()\n messages.success(request, 'OK 200: Transfer successfully executed.')\n else:\n messages.error(request, f'Error 400: Tried to transfer {amount} from {source.name}, but only had {source.available_cash} available.')\n \n transaction = Transaction(description=f\"Transfer from {source.name} to {destination.name}.\", success=enough_cash, cash_amount=amount, source_account=source, \n destination_account=destination)\n transaction.save()\n\n return redirect('overview')",
"async def transfer(self, code: str, amount, fromAccount, toAccount, params={}):\n # transferring between derivatives wallet and regular wallet is not documented in their API\n # however we support it in CCXT(from just looking at web inspector)\n await self.load_markets()\n accountsByType = self.safe_value(self.options, 'v2AccountsByType', {})\n fromId = self.safe_string(accountsByType, fromAccount)\n if fromId is None:\n keys = list(accountsByType.keys())\n raise ArgumentsRequired(self.id + ' transfer() fromAccount must be one of ' + ', '.join(keys))\n toId = self.safe_string(accountsByType, toAccount)\n if toId is None:\n keys = list(accountsByType.keys())\n raise ArgumentsRequired(self.id + ' transfer() toAccount must be one of ' + ', '.join(keys))\n currency = self.currency(code)\n fromCurrencyId = self.convert_derivatives_id(currency, fromAccount)\n toCurrencyId = self.convert_derivatives_id(currency, toAccount)\n requestedAmount = self.currency_to_precision(code, amount)\n # self request is slightly different from v1 fromAccount -> from\n request = {\n 'amount': requestedAmount,\n 'currency': fromCurrencyId,\n 'currency_to': toCurrencyId,\n 'from': fromId,\n 'to': toId,\n }\n response = await self.privatePostAuthWTransfer(self.extend(request, params))\n #\n # [\n # 1616451183763,\n # \"acc_tf\",\n # null,\n # null,\n # [\n # 1616451183763,\n # \"exchange\",\n # \"margin\",\n # null,\n # \"UST\",\n # \"UST\",\n # null,\n # 1\n # ],\n # null,\n # \"SUCCESS\",\n # \"1.0 Tether USDt transfered from Exchange to Margin\"\n # ]\n #\n error = self.safe_string(response, 0)\n if error == 'error':\n message = self.safe_string(response, 2, '')\n # same message v1\n self.throw_exactly_matched_exception(self.exceptions['exact'], message, self.id + ' ' + message)\n raise ExchangeError(self.id + ' ' + message)\n return self.parse_transfer(response, currency)",
"def transfer(request):\n if request.method == 'POST':\n form = TransferForm(request.user, data=request.POST)\n if form.is_valid():\n from_account = form.cleaned_data['from_account']\n to_account = form.cleaned_data['to_account']\n date = form.cleaned_data['date']\n amount = form.cleaned_data['amount']\n from_transaction = Transaction(\n user=request.user,\n account=from_account,\n date=date,\n summary=\"Transfer to {}\".format(to_account),\n amount=-amount,\n )\n from_transaction.save()\n to_transaction = Transaction(\n user=request.user,\n account=to_account,\n date=date,\n summary=\"Transfer from {}\".format(from_account),\n amount=amount,\n linked=from_transaction,\n )\n to_transaction.save()\n from_transaction.linked = to_transaction\n from_transaction.save()\n return redirect(to_transaction)\n else:\n form = TransferForm(request.user, initial={'date': now()})\n return render(request, 'pages/form.html', {\n 'title': \"Transfer\",\n 'form': form,\n })",
"def transfer(self, dest_id, resourceType, amount, logs=None) -> str:\n if logs is not None:\n logs.append((self.id, ValidActions.TRANSFER, dest_id))\n return \"t {} {} {} {}\".format(self.id, dest_id, resourceType, amount)",
"def transferfunds(self):",
"def transfer(self, request, *args, **kwargs):\n\t\tuser = request.user\n\t\ttransfer_from = self.get_object()\n\t\ttransfer_to = get_object_or_404(Container, pk=request.data['into'])\n\t\t\n\t\ttransfer_from.transfer_to(transfer_to, user=user)\n\t\n\t\treturn Response({\n\t\t\t'origin': self.get_serializer(transfer_from).data,\n\t\t\t'destination': self.get_serializer(transfer_to).data\n\t\t})",
"def transfer(self, amount, target):\n\n connection = sqlite3.connect('/home/BorneAgain/Desktop/flasktest/accounts.db')\n\n cursor = connection.cursor()\n\n cursor.execute(\"\"\"select * from accounts where name=?\"\"\", (target, ))\n\n if len(cursor.fetchall()) > 0:\n\n self.withdraw(amount)\n\n cursor.execute(\"\"\"update accounts set amount=amount+? where name=?\"\"\", (amount, target))\n \n connection.commit()\n\n return cursor.fetchall()\n \n else:\n\n return None",
"def prepare(self,admin):\n\n res=admin.helper.getAccounts(admin.userName,self.dst)\n if res != None:\n if res == []:\n return False\n else:\n resdst = res[0]\n res=admin.helper.getAccounts(admin.userName,self.org)\n if res==[]:\n return False\n resorg=res[0]\n rate=admin.ServiceQuery(self.org,self.dst)\n if self.operation=='BUY':\n if resorg['amount']>=self.cnt*rate:\n return dict(status='OK',\n bal_dst=resdst['amount']+self.cnt,\n bal_org=(resorg['amount']-self.cnt*rate))\n else:\n return dict(status='NOT OK',\n bal_dst=resdst['amount']+self.cnt,\n bal_org=(resorg['amount']-self.cnt*rate))\n elif self.operation=='DEBIT':\n if resorg['amount']>=self.cnt:\n return dict(status='OK',\n bal_dst=resdst['amount']+self.cnt/rate,\n bal_org=(resorg['amount']-self.cnt))\n else:\n return dict(status='NOT OK',\n bal_dst=resdst['amount']+self.cnt/rate,\n bal_org=(resorg['amount']-self.cnt))\n else:\n return None#operacion invalida \n else:\n return False#No hay cuenta destino",
"def get_destination(self, request, format=None):\n user = request.user\n cn = request.DATA.get('center', \"\")\n imei = request.DATA.get('imei', \"\")\n center_dict = dict(get_center_cache('unicode_name'))\n\n if not cn:\n return Response({'error': 'center key is missing from request',\n 'error_code': 'ECDCMCN005','success': False},\n status=status.HTTP_200_OK)\n\n #Validate origin/destination center\n if cn not in center_dict.keys():\n return Response({'error': 'Invalid center passed {}'.format(cn),\n 'error_code' : 'ECDCICN006', 'success': False},\n status=status.HTTP_200_OK)\n\n station = connection.Station.find_one({'user':user.username , 'cn' : cn})\n\n if not station:\n return Response({'error':'station not found for the user',\n 'error_code':'ECDCNST007','success': False},\n status=status.HTTP_200_OK)\n\n #restrict user if he want to access insta\n # bagging from different device\n if imei != station.get('imei') and station.get('lock'):\n return Response({'error': 'Session already exist for user',\n 'error_code': 'ECDCSEU001', 'success': False})\n\n station['lock'] = True\n station['imei'] = imei\n data = dict()\n data['destinations'] = station.bag_cn\n data['station'] = station.get('name')\n data['success'] = True\n station.save()\n return Response(data, status=status.HTTP_200_OK)",
"def Destination(self) -> _n_0_t_1:",
"def _get_accounts_data(self, accounts, display_account,tables,where_clause,where_params):\n\n account_result = {}\n # Prepare sql query base on selected parameters from wizard\n tables, where_clause, where_params = tables,where_clause,where_params\n\n # print tables, where_clause, where_params\n # print \"tables data\",tables\n # print \"Table Type\",type(tables)\n # print \"where clause data\",where_clause\n # print \"where clause\",type(where_clause)\n # print \"where params data\",where_params\n # print \"where params\",type(where_params)\n\n tables = tables.replace('\"','')\n if not tables:\n tables = 'account_move_line'\n wheres = [\"\"]\n if where_clause.strip():\n wheres.append(where_clause.strip())\n filters = \" AND \".join(wheres)\n # compute the balance, debit and credit for the provided accounts\n request = (\"SELECT account_id AS id, SUM(debit) AS debit, SUM(credit) AS credit, (SUM(debit) - SUM(credit)) AS balance\" +\\\n \" FROM \" + tables + \" WHERE account_id IN %s \" + filters + \" GROUP BY account_id\")\n params = (tuple(accounts.ids),) + tuple(where_params)\n self.env.cr.execute(request, params)\n for row in self.env.cr.dictfetchall():\n account_result[row.pop('id')] = row\n account_res = []\n for account in accounts:\n res = dict((fn, 0.0) for fn in ['credit', 'debit', 'balance'])\n currency = account.currency_id and account.currency_id or account.company_id.currency_id\n res['code'] = account.code\n res['name'] = account.name\n if account.id in account_result.keys():\n res['debit'] = account_result[account.id].get('debit')\n res['credit'] = account_result[account.id].get('credit')\n res['balance'] = account_result[account.id].get('balance')\n if display_account == 'all':\n account_res.append(res)\n if display_account == 'not_zero' and not currency.is_zero(res['balance']):\n account_res.append(res)\n if display_account == 'movement' and (not currency.is_zero(res['debit']) or not currency.is_zero(res['credit'])):\n account_res.append(res)\n print \"data from core report model\",account_res\n return account_res",
"def transfer_ownership(args):\n xfer = TransferOwnershipApi(\n tsurl=args.ts_url,\n username=args.username,\n password=args.password,\n disable_ssl=args.disable_ssl\n )\n xfer.transfer_ownership(\n from_username=args.from_user, to_username=args.to_user\n )",
"def _extract_service_info(self, source, destination):\n self._validate_args(source, destination)\n src_tags = []\n dest_tags = []\n src_ranges = []\n dest_ranges = []\n if isinstance(source, Service):\n src_tags.append(\"%s-%s\" % (source.network.name, source.name))\n if isinstance(source, CidrBlock):\n src_ranges.append(str(source.cidr_block))\n if isinstance(destination, Service):\n dest_tags.append(\"%s-%s\" % (destination.network.name, destination.name))\n if isinstance(destination, CidrBlock):\n dest_ranges.append(str(destination.cidr_block))\n return src_tags, dest_tags, src_ranges, dest_ranges",
"def transfer(self):\n pass",
"def _fund(src_acc, accounts, amount, shard_index):\n if not accounts:\n return []\n hashes = []\n for account in accounts:\n from_address = cli.get_address(src_acc)\n to_address = cli.get_address(account)\n passphrase = get_passphrase(src_acc)\n h = send_transaction(from_address, to_address, shard_index, shard_index, amount,\n passphrase=passphrase, retry=True, wait=True)\n if h is None:\n raise RuntimeError(f\"Failed to send tx from {from_address} to {to_address}\")\n hashes.append(h)\n return hashes",
"def get_transfer_list(self,\n address: str,\n token_address: str,\n start_block: Optional[int] = None,\n end_block: Optional[int] = None) -> Tuple[Transfer, ...]:\n ...",
"def transferChainAssignments(chainA, chainB):\n\n mapping = getChainResidueMapping(chainA, chainB)\n for residueA, residueB in mapping:\n if residueB:\n resonancesB = getResidueResonances(residueB)\n if resonancesB:\n msg = 'Destination residue %s%d has assignments. Continue?.'\n data = (residueB.seqCode,residueB.ccpCode)\n if not showOkCancel('Warning', msg % data):\n return\n \n for residueA, residueB in mapping:\n if residueA:\n if residueB is None:\n msg = 'Residue %d%s has no equivalent in destination chain'\n data = (residueA.seqCode,residueA.ccpCode)\n showWarning('Warning', msg % data)\n else:\n transferResidueAssignments(residueA,residueB)",
"def prepare_transfer(self, transfer_amount, from_account, to_account):\n BaseElement(self.driver, locators.TRANSFER_MONEY_BUTTON).click()\n from_account_dropdown = BaseElement(self.driver, locators.FROM_ACCOUNT_DROP_DOWN)\n to_account_dropdown = BaseElement(self.driver, locators.TO_ACCOUNT_DROP_DOWN)\n from_account_dropdown.wait_until_displayed()\n to_account_dropdown.wait_until_displayed()\n from_account_dropdown.select_dropdown_value(from_account)\n to_account_dropdown.select_dropdown_value(to_account)\n TextElement(self.driver, locators.AMOUNT_INPUT).set_text(str(transfer_amount))",
"def send_command_success(self, sn: TransactionID, destination: tuple, source: tuple):\n pass",
"def fake_destination_data():\n\n for _ in range(0, 35):\n user_id = random.randint(1, 8)\n\n print (str(user_id) + '|' + fake.company() + '|' + fake.street_address()\n + '|' + fake.city() + '|' + fake.state_abbr() + '|' +\n fake.postalcode())",
"def accounts():\n pass",
"def do_ACCEPT_COMPONENT_TRANSFER(self):\n try:\n self.logger = SimpleLogger(conf=None).get_logger_object()\n self.conf = SimpleLogger(conf=None).get_conf()\n self.msg = GlobalVariables(self.logger)\n self.ll_port = int(self.conf.get('llport', 61014))\n self.logger.info(\"Account-updater received ACCEPT_COMPONENT_\" \\\n \"TRANSFER request\")\n length = int(self.headers['Content-Length'])\n self.logger.debug(\"Headers:%s\" %self.headers)\n #sending intemediate (connection created) acknowledgement \n #to source node\n self.send_response(HTTP_CONTINUE)\n self.end_headers()\n \n #receiving new ownership list\n pickled_string = self.rfile.read(length)\n add_comp_list = ast.literal_eval(pickled_string)\n self.logger.info(\"Accepting new component ownership: %s\"% add_comp_list)\n\n\n #updating global map for new onwership\n thread = threading.Thread(target = self.update_my_ownership, \\\n args=(add_comp_list,))\n thread.start()\n\n self.logger.info(\"Completed ACCEPT_COMPONENTS_TRANSFER request\")\n self.send_response(HTTP_OK)\n self.end_headers()\n return\n except Exception as err:\n self.logger.exception('Exception raised in' \\\n 'ACCEPT_COMPONENTS_TRANSFER error :%s' % err)\n self.send_response(HTTP_INTERNAL_SERVER_ERROR)\n self.end_headers()",
"def _transfer(self):\n copy_func = BIDSCopy(overwrite=self.force_override.get(),\n verify=self.verify.get(),\n file_name_tracker=self.curr_file,\n file_num_tracker=self.transferred_count,\n file_prog_tracker=self.curr_file_progress)\n self.curr_file.set('Mapping destination BIDS structure...')\n dst_folder = BIDSTree(self.dst)\n for src in self.srcs:\n dst_folder.add(src, copier=copy_func.copy_files)\n if self.set_copied:\n self._rename_complete(src)\n self.transferred_count.set(self.file_count)\n self.curr_file.set('Complete!')"
] |
[
"0.6129466",
"0.59486693",
"0.5880699",
"0.57925147",
"0.57384783",
"0.5718324",
"0.5640389",
"0.56080145",
"0.55327165",
"0.5453918",
"0.54478085",
"0.5402419",
"0.5387087",
"0.5384199",
"0.53738874",
"0.5289498",
"0.52878386",
"0.5281779",
"0.52817446",
"0.5277293",
"0.5259135",
"0.5239773",
"0.52158743",
"0.51763624",
"0.5160839",
"0.51557565",
"0.51429963",
"0.5129645",
"0.5106657",
"0.5100426"
] |
0.651096
|
0
|
Method for performing the transaction. If there is enough money in the source account the transaction is performed successfully and the money is transferred, otherwise, the transaction is unsuccessful and the money stays where it was.
|
def transfer_money(request):
source = Account.objects.get(pk=int(request.POST.get('source-id', False)))
destination = Account.objects.get(pk=int(request.POST.get('destination-id', False)))
amount = float(request.POST.get('amount', False))
enough_cash = source.available_cash >= amount
if enough_cash:
source.available_cash -= amount
source.save()
destination.available_cash += amount
destination.save()
messages.success(request, 'OK 200: Transfer successfully executed.')
else:
messages.error(request, f'Error 400: Tried to transfer {amount} from {source.name}, but only had {source.available_cash} available.')
transaction = Transaction(description=f"Transfer from {source.name} to {destination.name}.", success=enough_cash, cash_amount=amount, source_account=source,
destination_account=destination)
transaction.save()
return redirect('overview')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def MakeMoneyTransfer(self, user_id_origin, user_id_beneficiary, amount_money):\n result_operation = self.OperateAccount(user_id_origin, (int(amount_money) * -1))\n if \"Error:\" in result_operation:\n return result_operation\n else:\n self.OperateAccount(user_id_beneficiary, amount_money)\n return result_operation",
"def transfer(self, amount, target):\n\n connection = sqlite3.connect('/home/BorneAgain/Desktop/flasktest/accounts.db')\n\n cursor = connection.cursor()\n\n cursor.execute(\"\"\"select * from accounts where name=?\"\"\", (target, ))\n\n if len(cursor.fetchall()) > 0:\n\n self.withdraw(amount)\n\n cursor.execute(\"\"\"update accounts set amount=amount+? where name=?\"\"\", (amount, target))\n \n connection.commit()\n\n return cursor.fetchall()\n \n else:\n\n return None",
"def transfer(self, origin, dest, amount):\n or_elem = 0\n de_elem = 0\n for elem in self.account:\n if origin == elem.id or origin == elem.name:\n or_elem = elem\n if dest == elem.id or dest == elem.name:\n de_elem = elem\n if or_elem == 0 or de_elem == 0:\n print(\"Couldn't find account.\")\n return False\n if self.corrupted(or_elem) or self.corrupted(de_elem):\n print(\"Corrupted account.\")\n return False\n if amount <= 0 or or_elem.value < amount:\n print(\"Invalid amount.\")\n return False\n or_elem.transfer(-amount)\n de_elem.transfer(amount)\n print(\"Transfer successful.\")\n return True",
"async def transfer(self, code: str, amount, fromAccount, toAccount, params={}):\n # transferring between derivatives wallet and regular wallet is not documented in their API\n # however we support it in CCXT(from just looking at web inspector)\n await self.load_markets()\n accountsByType = self.safe_value(self.options, 'v2AccountsByType', {})\n fromId = self.safe_string(accountsByType, fromAccount)\n if fromId is None:\n keys = list(accountsByType.keys())\n raise ArgumentsRequired(self.id + ' transfer() fromAccount must be one of ' + ', '.join(keys))\n toId = self.safe_string(accountsByType, toAccount)\n if toId is None:\n keys = list(accountsByType.keys())\n raise ArgumentsRequired(self.id + ' transfer() toAccount must be one of ' + ', '.join(keys))\n currency = self.currency(code)\n fromCurrencyId = self.convert_derivatives_id(currency, fromAccount)\n toCurrencyId = self.convert_derivatives_id(currency, toAccount)\n requestedAmount = self.currency_to_precision(code, amount)\n # self request is slightly different from v1 fromAccount -> from\n request = {\n 'amount': requestedAmount,\n 'currency': fromCurrencyId,\n 'currency_to': toCurrencyId,\n 'from': fromId,\n 'to': toId,\n }\n response = await self.privatePostAuthWTransfer(self.extend(request, params))\n #\n # [\n # 1616451183763,\n # \"acc_tf\",\n # null,\n # null,\n # [\n # 1616451183763,\n # \"exchange\",\n # \"margin\",\n # null,\n # \"UST\",\n # \"UST\",\n # null,\n # 1\n # ],\n # null,\n # \"SUCCESS\",\n # \"1.0 Tether USDt transfered from Exchange to Margin\"\n # ]\n #\n error = self.safe_string(response, 0)\n if error == 'error':\n message = self.safe_string(response, 2, '')\n # same message v1\n self.throw_exactly_matched_exception(self.exceptions['exact'], message, self.id + ' ' + message)\n raise ExchangeError(self.id + ' ' + message)\n return self.parse_transfer(response, currency)",
"def transfer_to(self, amount, another_user):\n if self.__balance >= amount and self.__is_logged_in:\n self.__balance = float(Decimal(str(self.__balance - amount)))\n another_user.deposit(amount)\n self.register_operation(self.ACTIONS['TRANSFERING'], amount, another_user)\n return True\n\n return False",
"def transfer(self, amount):\n if not self.destination.can_accept_credit(amount):\n raise ValueError(\"Destination account can not accept a credit of {0}\".format(amount))\n self.source.debit(amount)\n self.destination.credit(amount)",
"def transfer(self, transferee, transfer_amount):\n self.withdraw(transfer_amount)\n transferee.deposit(transfer_amount)\n return self.balance",
"def safeWithdrawal(self):\n if self._after_dead_line():\n # each contributor can withdraw the amount they contributed if the goal was not reached\n if not self._funding_goal_reached.get():\n amount = self._balances[self.msg.sender]\n self._balances[self.msg.sender] = 0\n if amount > 0:\n if self.icx.send(self.msg.sender, amount):\n self.FundTransfer(self.msg.sender, amount, False)\n Logger.debug(f'FundTransfer({self.msg.sender}, {amount}, False)', TAG)\n else:\n self._balances[self.msg.sender] = amount\n\n # The sales target has been met. Owner can withdraw the contribution.\n if self._funding_goal_reached.get() and self._addr_beneficiary.get() == self.msg.sender:\n if self.icx.send(self._addr_beneficiary.get(), self._amount_raised.get()):\n self.FundTransfer(self._addr_beneficiary.get(), self._amount_raised.get(), False)\n Logger.debug(f'FundTransfer({self._addr_beneficiary.get()},'\n f'{self._amount_raised.get()}, False)', TAG)\n # reset amount_raised\n self._amount_raised.set(0)\n else:\n # if the transfer to beneficiary fails, unlock contributors balance\n Logger.debug(f'Failed to send to beneficiary!', TAG)\n self._funding_goal_reached.set(False)",
"async def transfer(self, ctx: commands.Context, to: discord.Member, amount: int):\r\n from_ = ctx.author\r\n currency = await bank.get_currency_name(ctx.guild)\r\n\r\n try:\r\n await bank.transfer_credits(from_, to, amount)\r\n except (ValueError, errors.BalanceTooHigh) as e:\r\n return await ctx.send(str(e))\r\n\r\n await ctx.send(\r\n _(\"{user} transferred {num} {currency} to {other_user}\").format(\r\n user=from_.display_name,\r\n num=humanize_number(amount),\r\n currency=currency,\r\n other_user=to.display_name,\r\n )\r\n )",
"def transfer(self, request, *args, **kwargs):\n origin_account = self.get_object()\n destiny_account = request.data.get(\"id_conta\", None)\n amount = request.data.get(\"valor\", None)\n account_serializer = self.get_serializer()\n\n try:\n transfer = account_serializer.transfer(origin_account, destiny_account, amount)\n except ObjectDoesNotExist as obj:\n return Response({\"detail\": \"Could not transfer the amount: Destiny account does not exist.\",\n \"status_code\": status.HTTP_404_NOT_FOUND}, status=status.HTTP_404_NOT_FOUND)\n except ValueError as ve:\n return Response({\"detail\": \"Could not transfer the amount: {0}.\".format(ve),\n \"status_code\": status.HTTP_400_BAD_REQUEST}, status=status.HTTP_400_BAD_REQUEST)\n\n return Response(transfer)",
"def transfer(self, giver, receiver, amount):\n \n # Try to withdraw the full amount from the giver.\n # This will be either the amount or the giver's full balance.\n # Whatever the result, deposit it to the receiver's account.\n receiver.account.deposit(\n giver.account.withdraw(amount)\n )",
"def deposit(self, amount, another_user=None):\n if another_user:\n another_user.deposit(amount)\n self.register_operation(self.ACTIONS['RECEIVING'], amount)\n self.register_operation(self.ACTIONS['TRANSFERING'], amount, another_user)\n else:\n self.__balance = float(Decimal(str(self.__balance + amount)))\n self.register_operation(self.ACTIONS['RECEIVING'], amount)\n\n return True # False is never reached",
"def transfer_funds_randomly():\n\n source_id = get_random_account_id()\n sink_id = get_random_account_id()\n\n source = Account.get(id=source_id)\n amount = floor(source.balance / 2)\n\n if source.balance < amount:\n raise \"Insufficient funds\"\n\n source.balance -= amount\n sink = Account.get(id=sink_id)\n sink.balance += amount",
"def _handle_icx_send_transaction(self,\n context: 'IconScoreContext',\n params: dict) -> 'TransactionResult':\n tx_result = TransactionResult(context.tx, context.block)\n\n try:\n to: Address = params['to']\n tx_result.to = to\n\n if context.get_revision() >= 3:\n # Check if from account can charge a tx fee\n self._icon_pre_validator.execute_to_check_out_of_balance(context, params,\n step_price=context.step_counter.step_price)\n else:\n # Check if from account can charge a tx fee\n self._icon_pre_validator.execute_to_check_out_of_balance(None, params,\n step_price=context.step_counter.step_price)\n\n # Every send_transaction are calculated DEFAULT STEP at first\n context.step_counter.apply_step(StepType.DEFAULT, 1)\n input_size = self._get_byte_length(params.get('data', None))\n\n context.step_counter.apply_step(StepType.INPUT, input_size)\n self._transfer_coin(context, params)\n\n if to.is_contract:\n tx_result.score_address = self._handle_score_invoke(context, to, params)\n\n tx_result.status = TransactionResult.SUCCESS\n except BaseException as e:\n tx_result.failure = self._get_failure_from_exception(e)\n trace = self._get_trace_from_exception(context.current_address, e)\n context.tx_batch.clear()\n context.traces.append(trace)\n context.event_logs.clear()\n finally:\n # Revert func_type to IconScoreFuncType.WRITABLE\n # to avoid DatabaseException in self._charge_transaction_fee()\n context.func_type = IconScoreFuncType.WRITABLE\n\n # Charge a fee to from account\n final_step_used, final_step_price = \\\n self._charge_transaction_fee(\n context,\n params,\n tx_result.status,\n context.step_counter.step_used)\n\n # Finalize tx_result\n context.cumulative_step_used += final_step_used\n tx_result.step_used = final_step_used\n tx_result.step_price = final_step_price\n tx_result.cumulative_step_used = context.cumulative_step_used\n tx_result.event_logs = context.event_logs\n tx_result.logs_bloom = self._generate_logs_bloom(context.event_logs)\n tx_result.traces = context.traces\n\n return tx_result",
"def transfer(self, amount, budget):\n if (self.get_funds(amount)):\n self.withdrawal(amount, \"Transfer to\" + budget.category)\n budget.deposit(amount, \"Transfer from\" + self.category)\n return True\n else:\n return False",
"def test_transfer(self):\n obj = NonInterceptedBankTransaction()\n obj.transfer(1000)\n self.assertEquals(sys.stdout.getvalue().strip(), \"Transferring Rs. 1000\")",
"def call_transfer_fund(self):\n ## 1) Create expense line for current student\n ## 2) Create Deposite lines for oney transfer student\n\n ## 1\n student_pool = self.env['op.student']\n partner_obj = self.env['res.partner']\n employee_pool = self.env['hr.employee']\n\n if not self.pin_varification:\n raise except_orm(_('Warning!'),\n _(\"Enter Valid PIN to proceed!\"))\n\n\n student_id = student_pool.search([('user_id', '=', self._uid)])\n\n ## Validate Enter PIN\n if student_id:\n self.validate_current_user_pin(student_id)\n\n expense_vals = {\n 'name': student_id.id,\n 'amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s\" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n 'create_invoice': False,\n # 'student_id': student_id.id,\n }\n\n student_expenses_id = self.env['student.expenses'].sudo().create(expense_vals)\n self.total_expense_balance = student_id.stud_balance_amount\n\n ## Get employee form account id\n employee_id = employee_pool.sudo().search([('ean13', '=', self.account_no)])\n\n ## Search EMployee By Employee ID\n search_by_id_employee_id = employee_pool.sudo().search([('identification_id', '=', self.account_no)])\n\n ## Search by student matrix ID\n search_by_id_student_id = student_pool.sudo().search([('gr_no', '=', self.account_no)])\n\n if not self.account_no:\n ## Logic for search by User Name\n employee_id = self.pass_employee_id.sudo()\n student_id = self.pass_student_id.sudo()\n else:\n ## Get partner form account id\n student_id = student_pool.sudo().search([('ean13', '=', self.account_no)])\n if student_id:\n deposite_vals = {\n 'name': student_id.id,\n # 'amount': self.amount_to_transfer,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n }\n student_deposite_id = self.env['student.deposits'].sudo().create(deposite_vals)\n if not self.account_no:\n trans_student_id = student_id.sudo()\n else:\n trans_student_id = student_pool.sudo().search([('ean13', '=', self.account_no)])\n if trans_student_id:\n self.total_deposite_balance = trans_student_id.stud_balance_amount\n elif employee_id:\n deposite_vals = {\n 'name': employee_id.id,\n 'employee_id': employee_id.identification_id,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n employee_deposite_id = self.env['employee.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = employee_id.available_balance\n\n elif search_by_id_employee_id:\n deposite_vals = {\n 'name': search_by_id_employee_id.id,\n 'employee_id': search_by_id_employee_id.identification_id,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n employee_deposite_id = self.env['employee.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = search_by_id_employee_id.available_balance\n\n elif search_by_id_student_id:\n deposite_vals = {\n 'name': search_by_id_student_id.id,\n 'employee_id': search_by_id_student_id.gr_no,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n student_deposite_id = self.env['student.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = search_by_id_student_id.stud_balance_amount\n\n # return True\n compose_form = self.env.ref('deposite_management.transfer_confirmation_popup_view', False)\n\n try:\n template_id = self.env.ref('deposite_management.email_template_student_fund_transfer', False)\n except ValueError:\n template_id = False\n values = self.env['email.template'].generate_email(template_id.id, self.id)\n\n ## Append Student email id to send mail\n if values and 'email_to' in values:\n values['email_to'] = student_id.sudo().email\n mail_id = self.env['mail.mail'].sudo().create(values)\n if mail_id:\n mail_send_id = mail_id.send()\n\n try:\n template_id_new = self.env.ref('deposite_management.email_template_student_fund_transfer_self_notification', False)\n except ValueError:\n template_id_new = False\n values_new = self.env['email.template'].generate_email(template_id_new.id, self.id)\n ## Append email id to send mail\n if values_new and 'email_to' in values_new:\n if student_id and trans_student_id:\n values_new['email_to'] = trans_student_id.email\n elif employee_id:\n values_new['email_to'] = employee_id.sudo().work_email\n mail_id_new = self.env['mail.mail'].sudo().create(values_new)\n if mail_id_new:\n mail_send_id = mail_id_new.send()\n ## return wizard after click on Fund Transfer Button\n return {\n 'name': _('Fund Transfer Done'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'fund.confirmation.msg',\n 'view_id': compose_form.id,\n 'target': 'new',\n }",
"def prepare_transfer(self, transfer_amount, from_account, to_account):\n BaseElement(self.driver, locators.TRANSFER_MONEY_BUTTON).click()\n from_account_dropdown = BaseElement(self.driver, locators.FROM_ACCOUNT_DROP_DOWN)\n to_account_dropdown = BaseElement(self.driver, locators.TO_ACCOUNT_DROP_DOWN)\n from_account_dropdown.wait_until_displayed()\n to_account_dropdown.wait_until_displayed()\n from_account_dropdown.select_dropdown_value(from_account)\n to_account_dropdown.select_dropdown_value(to_account)\n TextElement(self.driver, locators.AMOUNT_INPUT).set_text(str(transfer_amount))",
"def __call__(self, account_from: Optional[str] = None, account_to: Optional[str] = None, accounts: Optional[str] = None, amount: Optional[Union[int, float, Decimal]] = None, currency: Optional[str] = None, passthrough: Optional[Any] = None, req_id: Optional[int] = None):\n\n data = {\n \"transfer_between_accounts\": int(1)\n }\n\n if account_from:\n data['account_from'] = str(account_from)\n\n if account_to:\n data['account_to'] = str(account_to)\n\n if accounts:\n data['accounts'] = str(accounts)\n\n if amount:\n data['amount'] = amount\n\n if currency:\n data['currency'] = str(currency)\n\n return self.send_websocket_request(self.name, data, passthrough=passthrough, req_id=req_id)",
"async def atomic_transfer(\n self, amount: int, target_wallet: \"Wallet\", nonce: str\n ) -> None:\n\n if target_wallet == self:\n raise ValueError(\"Impossible to transfer funds to self\")\n\n transaction = Transaction(\n wallet_id=self.wallet_id,\n nonce=nonce,\n type=TransactionType.TRANSFER,\n data={\"amount\": amount, \"target_wallet\": target_wallet.wallet_id},\n )\n\n try:\n await self.storage.transaction_write_items(\n items=[\n self.storage.item_factory.put_idempotency_item(\n pk=transaction.unique_id, data=transaction.as_dict()\n ),\n self.storage.item_factory.update_atomic_decrement(\n pk=self.unique_id, update_key=self.BALANCE_KEY, amount=amount\n ),\n self.storage.item_factory.update_atomic_increment(\n pk=target_wallet.unique_id,\n update_key=self.BALANCE_KEY,\n amount=amount,\n ),\n ]\n )\n except storage.exceptions.TransactionMultipleError as e:\n if e.errors[0]:\n raise crud.exceptions.WalletTransactionAlreadyRegisteredError(\n f\"Transaction with nonce {nonce} already registered.\"\n )\n\n if e.errors[1]:\n raise crud.exceptions.WalletInsufficientFundsError(\n \"Wallet has insufficient funds to \"\n f\"complete operation: {str(e.errors[1])}\"\n )\n\n if e.errors[2]:\n raise crud.exceptions.WalletNotFoundError(\n f\"Wallet does not exists: {str(e.errors[2])}\"\n )\n\n raise crud.exceptions.BaseWalletError(str(e))",
"def fund_transaction(address, role):\n amount = 0\n message_text = ''\n error_text = ''\n if encoding.is_valid_address(address):\n if check_optin(address):\n if role == 'player':\n amount = 1500\n message_text = 'Your account has been funded with 1,500 Monopoly Money'\n\n elif role == 'banker':\n amount = 20000\n message_text = 'Your account has been funded with 20,000 Monopoly Money'\n\n asset_transfer(SENDER_ADDRESS, SENDER_PRIVATE_KEY, address, amount, ASSET_ID)\n else:\n error_text = \"Your account not opt-in to Monopoly Money asset\"\n else:\n error_text = \"Enter correct Algorand address\"\n return message_text, error_text",
"def fallback(self):\n if self._crowdsale_closed.get():\n revert('Crowdsale is closed.')\n\n # Accepts the contribution\n amount = self.msg.value\n self._balances[self.msg.sender] = self._balances[self.msg.sender] + amount\n self._amount_raised.set(self._amount_raised.get() + amount)\n value = int(amount / self._price.get())\n data = b'called from Crowdsale'\n\n # Gives tokens to the contributor as a reward\n token_score = self.create_interface_score(self._addr_token_score.get(), TokenInterface)\n token_score.transfer(self.msg.sender, value, data)\n\n if self.msg.sender not in self._joiner_list:\n self._joiner_list.put(self.msg.sender)\n\n self.FundTransfer(self.msg.sender, amount, True)\n Logger.debug(f'FundTransfer({self.msg.sender}, {amount}, True)', TAG)",
"def deposit(account, amount):\n pass",
"def transfer(self, amount, category):\n amount = -abs(amount)\n if self.check_funds(amount):\n self.withdraw(amount, \"Transfer to %s\"%(category.name))\n category.deposit(amount, \"Transfer from %s\"%(self.name))\n return True\n return False",
"def transfer_amount(self, conn, data_subtract, data_add):\n sql_subtract = 'UPDATE card SET balance = balance - ? WHERE number = ?;'\n sql_add = 'UPDATE card SET balance = balance + ? WHERE number = ?;'\n\n c = conn.cursor()\n c.execute(sql_subtract, data_subtract)\n conn.commit()\n\n c = conn.cursor()\n c.execute(sql_add, data_add)\n conn.commit()\n\n # print(f\"amount {data_add[0]} was added to account {data_add[1]}\")\n print(\"Success!\")\n self.menus()",
"def OperateAccount(self, user_id, amount_money):\n user_data = self.db_manager.GetData(user_id)\n user_data = self._parsetouserDTO(user_data)\n old_balance = user_data.GetAmountMoney()\n new_balance = int(old_balance) + int(amount_money)\n if new_balance >= 0:\n user_data.SetAmountMoney(new_balance)\n self.db_manager.UpdateData(user_id, user_data.GetAmountMoney())\n return JsonSerializer.SerializeObject(user_data)\n else:\n return \"{\\\"ERROR\\\":\\\"Operation denied insufficient money\\\"}\"",
"def test_transfer(chain, token, shareholder1, boogieman):\n\n set_state(chain, token, canTransferFlag=True)\n token.transact({\"from\": shareholder1}).transfer(boogieman, 4000)\n assert token.call().balanceOf(shareholder1) == 0\n assert token.call().balanceOf(boogieman) == 4000",
"def make_transaction():\n account_id = request.json['account_id']\n aux_account = [account for account in accounts if account['id'] == account_id]\n if len(aux_account) == 0:\n abort(404)\n account_balance = Decimal(aux_account[0].get('balance')).quantize(Decimal('0.00'))\n transaction = request.json['transaction']\n transaction_amount = Decimal(abs(request.json['amount'])).quantize(Decimal('0.00'))\n\n if not request.json:\n abort(400)\n if transaction not in ['withdrawal', 'deposit']:\n abort(400, f'Invalid transaction name: {transaction}')\n if transaction == 'withdrawal':\n transaction_amount = transaction_amount*-1\n\n # the user can't withdraw more than the account has\n validation_sum = (account_balance + transaction_amount).quantize(Decimal('.01'), rounding=ROUND_DOWN)\n if validation_sum >= 0:\n for real_account in accounts:\n if real_account.get('id') == account_id:\n real_account['balance'] = round(float(validation_sum),2)\n else:\n abort(400, {'error':'Not enough funds for this transaction'})\n\n return json.dumps({f'{transaction.capitalize()} Done. New balance': str(validation_sum)}, ensure_ascii=False), 200",
"def deposit_money():\n print(\"\\n\")\n print(messages.account_credentials)\n u_id = pyip.inputInt(\"Your Id: \", greaterThan=0)\n password = pyip.inputPassword(\"Your Password: \")\n\n credentials = {\"id\":u_id, \"password\":password}\n result = BankOperationsBackend.deposit_money(credentials)\n start_again() if result else BankOperationsUi.deposit_money()",
"def transact(self, transaction_type, digicoins_No):\n\n #Raise an exception of digicoins_No is not multiple of 10.\n try:\n if digicoins_No % 10 != 0:\n raise MyError.MyError(digicoins_No)\n except Exception as inst:\n print \"\\nYou can only transact multiples of 10 of digicoins.\\nTransaction Failed!\"\n return\n\n lowest_price = 0\n digicoins_remain = digicoins_No\n while digicoins_remain > 0:\n if digicoins_remain > 100:\n digicoins_No_to_be_transacted = 100\n else:\n digicoins_No_to_be_transacted = digicoins_remain\n\n A_price = self.Broker1.offered_price(digicoins_No_to_be_transacted)\n B_price = self.Broker2.offered_price(digicoins_No_to_be_transacted)\n\n if A_price < B_price:\n self.Broker1.execute_transaction(digicoins_No_to_be_transacted)\n lowest_price += A_price\n else:\n self.Broker2.execute_transaction(digicoins_No_to_be_transacted)\n lowest_price += B_price\n digicoins_remain -= 100\n\n if transaction_type == \"BUY\":\n print self.name, \"buys\", digicoins_No_to_be_transacted, \"at\", lowest_price\n #update the clients list with a pair [price, digicoins]\n self.transactions.append([lowest_price, digicoins_No])\n else:\n print self.name, \"sells\", digicoins_No_to_be_transacted, \"at\", lowest_price\n self.transactions.append([lowest_price, -digicoins_No])"
] |
[
"0.66635925",
"0.66135406",
"0.64340895",
"0.63323474",
"0.63201946",
"0.6227278",
"0.61842847",
"0.61507404",
"0.61466444",
"0.6121741",
"0.610226",
"0.6071248",
"0.6053082",
"0.6041055",
"0.6032653",
"0.6019647",
"0.59909546",
"0.59867406",
"0.5973221",
"0.59658855",
"0.59468544",
"0.5904332",
"0.59024656",
"0.5900103",
"0.5898219",
"0.5885929",
"0.5871103",
"0.5860732",
"0.58468246",
"0.58284533"
] |
0.7401874
|
0
|
Plot a similarity matrix between a list of clusterings, using the provided scoring function.
|
def plot_sim_matrix(
clusterings: list, scoring: Callable[[object, object], object]
) -> object:
forDF = []
for c in clusterings:
cID = c.get_description()
for c2 in clusterings:
c2ID = c2.get_description()
forDF.append([cID, c2ID, scoring(c, c2).score])
df = pd.DataFrame(columns=["com1", "com2", "score"], data=forDF)
df = df.pivot(index="com1", columns="com2", values="score")
return sns.clustermap(df)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _plot_results(results, labels, score):\n \n labels_dict = {k: i for i, k in enumerate(labels)}\n rmin, rmax = min(results)[0], max(results)[0]\n to_color = lambda x : 0.8 * (x - rmax) / (rmin - rmax) + 0.5\n\n result_matrix = np.zeros((8, len(labels_dict)))\n labels_dict = {k: i for i, k in enumerate(labels)}\n for i, (v, r_labels) in enumerate(results):\n for k in r_labels:\n result_matrix[i, labels_dict[k]] = to_color(v)\n \n plt.figure(figsize=(11, 6))\n plt.imshow(result_matrix, cmap='Greys', aspect='auto')\n plt.xticks(np.arange(0, len(labels)), labels, rotation=90)\n plt.yticks(np.arange(0, len(results)), [round(x[0]) for x in results])\n plt.ylabel(score)\n plt.show()",
"def plot_composite_matrix(D, labeltext, show_labels=True, show_indices=True,\n vmax=1.0, vmin=0.0, force=False):\n if D.max() > 1.0 or D.min() < 0.0:\n print('This matrix doesn\\'t look like a distance matrix - min value {}, max value {}'.format(D.min(), D.max()))\n if not force:\n raise ValueError(\"not a distance matrix\")\n else:\n print('force is set; scaling to [0, 1]')\n D -= D.min()\n D /= D.max()\n\n if show_labels:\n show_indices = True\n\n fig = pylab.figure(figsize=(11, 8))\n ax1 = fig.add_axes([0.09, 0.1, 0.2, 0.6])\n\n # plot dendrogram\n Y = sch.linkage(D, method='complete')\n\n dendrolabels = labeltext\n if not show_labels:\n dendrolabels = [str(i) for i in range(len(labeltext))]\n\n Z1 = sch.dendrogram(Y, orientation='left', labels=dendrolabels,\n no_labels=not show_indices)\n ax1.set_xticks([])\n\n xstart = 0.45\n width = 0.45\n if not show_labels:\n xstart = 0.315\n scale_xstart = xstart + width + 0.01\n\n # plot matrix\n axmatrix = fig.add_axes([xstart, 0.1, width, 0.6])\n\n # (this reorders D by the clustering in Z1)\n idx1 = Z1['leaves']\n D = D[idx1, :]\n D = D[:, idx1]\n\n # show matrix\n im = axmatrix.matshow(D, aspect='auto', origin='lower',\n cmap=pylab.cm.YlGnBu, vmin=vmin, vmax=vmax)\n axmatrix.set_xticks([])\n axmatrix.set_yticks([])\n\n # Plot colorbar.\n axcolor = fig.add_axes([scale_xstart, 0.1, 0.02, 0.6])\n pylab.colorbar(im, cax=axcolor)\n\n return fig",
"def partition_clusters(clustering_matrix, args, nr_clusters=5, method='complete', metric='euclidean', plotting=False):\n # clustering with linkage\n fig = plt.figure(figsize=(8,8))\n ax1 = fig.add_axes([0.09,0.1,0.2,0.6])\n # gives back linkage matrix after hierarchical clustering\n Y = sch.linkage(clustering_matrix, method=method,metric=metric)\n # creates dendogram for plotting and flattening\n Z = sch.dendrogram(Y, orientation='left')\n ax1.set_xticks([])\n ax1.set_yticks([])\n # calculate cluster membership\n # fcluster flattens out dendograms to the specified nr_clusters\n cluster_memberships = sch.fcluster(Y, t=nr_clusters, criterion='maxclust') # ith element in this array is the cluster for i\n idx = np.array(Z['leaves']) # idx ordered in cluster\n \n ax2 = fig.add_axes([0.3,0.71,0.6,0.2])\n Z2 = sch.dendrogram(Y)\n ax2.set_xticks([])\n ax2.set_yticks([])\n\n axmatrix = fig.add_axes([0.3,0.1,0.6,0.6])\n\n clustering_matrix = clustering_matrix[idx,:]\n clustering_matrix = clustering_matrix[:,idx]\n im = axmatrix.matshow(clustering_matrix, aspect='auto', origin='lower', cmap=plt.cm.YlGnBu)\n axmatrix.set_xticks([])\n axmatrix.set_yticks([])\n\n # Plot colorbar.\n axcolor = fig.add_axes([0.91,0.1,0.02,0.6])\n plt.colorbar(im, cax=axcolor)\n if plotting:\n fig.savefig(f'{args.results_root_dir}/clust_{args.clustering_method}_nr_users-{args.num_users}_nr_of_partition_clusters_{nr_clusters}_method_{method}_reconstructed.png')\n\n # Plot filtered\n canvas = np.zeros_like(clustering_matrix)\n for i in range(1,nr_clusters+1):\n mask = np.ones_like(clustering_matrix)\n mask[cluster_memberships[idx]!=i,:] = 0\n mask[:,cluster_memberships[idx]!=i] = 0\n canvas+=clustering_matrix*mask\n fig = plt.figure()\n plt.matshow(canvas,origin='lower')\n if plotting:\n fig.savefig(f'{args.results_root_dir}/clust_{args.clustering_method}_nr_users-{args.num_users}_nr_of_partition_clusters_{nr_clusters}_method_{method}_filtered.png')\n\n d_error = np.sum(clustering_matrix-canvas)\n print(f'Decompostion error: {d_error}, {d_error/np.sum(clustering_matrix)}')\n\n # build cluster id to client id user dict\n cluster_user_dict = { i : idx[cluster_memberships==i] for i in range(1,nr_clusters+1)}\n\n # Test overlaps within clusters\n collected = []\n for i, cluster_members_a in cluster_user_dict.items():\n for j, cluster_members_b in cluster_user_dict.items():\n assert np.all(cluster_members_a != cluster_members_b) or set(cluster_members_a).intersection(set(cluster_members_b)) != {}, f'clusters {i} and {j} are not disjoint'\n collected.extend(cluster_members_a)\n assert np.all(np.arange(0,len(clustering_matrix),1) == np.sort(np.array(collected)))\n\n return cluster_user_dict",
"def visualize_clustering_results(cluster_points: list, labels: list) -> None:\n\n # First, split out the point tuples by label.\n points_by_label = defaultdict(list)\n for idx, point in enumerate(cluster_points):\n points_by_label[labels[idx]].append(point)\n\n # Next, stack the points for each label into a single array.\n big_xy_list_by_label = {}\n for label, points_for_that_label in points_by_label.items():\n big_xy_list_by_label[label] = np.stack(tuple(points_for_that_label))\n\n # Compute the centroids of each point cloud for labeling.\n centroids_by_label = {}\n for label, arr in big_xy_list_by_label.items():\n length = arr.shape[0]\n sum_x = np.sum(arr[:, 0])\n sum_y = np.sum(arr[:, 1])\n centroid = sum_x / length, sum_y / length\n centroids_by_label[label] = centroid\n\n # Initialize a counter to iterate through the color map\n i = 0\n plt.rcParams.update({\"font.size\": 22, \"font.weight\": \"bold\"})\n fig, ax = plt.subplots(figsize=(20, 20))\n for label, coords in centroids_by_label.items():\n ax.scatter(\n big_xy_list_by_label[label][:, 0],\n big_xy_list_by_label[label][:, 1],\n c=COLOR_DICT[i],\n s=50,\n alpha=0.5,\n label=label,\n )\n # plt.scatter(coords[0], coords[1], c=color_dict[i], label=label, s=100, alpha=0)\n ax.annotate(label, xy=coords, textcoords=\"data\", color=\"black\")\n i += 1\n ax.legend(loc=\"best\")\n plt.show()",
"def plot_scoring(\n graphs: list,\n ref_partitions: object,\n graph_names: list,\n methods: list,\n scoring: Callable[\n [object, object], object\n ] = cdlib.evaluation.adjusted_mutual_information,\n nbRuns: int = 5,\n) -> object:\n forDF = []\n for i, g in enumerate(graphs):\n for m in methods:\n for r in range(nbRuns):\n partition = m(g)\n\n score = scoring(partition, ref_partitions[i]).score\n forDF.append([graph_names[i], score, partition.get_description()])\n df = pd.DataFrame(columns=[\"graph\", \"score\", \"method\"], data=forDF)\n ax = sns.lineplot(x=\"graph\", y=\"score\", hue=\"method\", data=df, legend=\"brief\")\n ax.legend(loc=\"best\")\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n plt.tight_layout()\n\n return ax",
"def plot_similarity(self) -> None:\n if isinstance(self.model, FastTextWrapper):\n self.valid_data[\"vector\"] = self.valid_data[\"text\"].apply(\n lambda x: self.model.inference(word_tokenize(x), sentence_level=True))\n else:\n self.valid_data[\"vector\"] = self.valid_data[\"text\"].apply(\n lambda x: self.model.inference(word_tokenize(x))[0])\n messages = list(self.valid_data[\"label\"])\n vectors = list(self.valid_data[\"vector\"])\n similarity_matrix(messages=messages, vectors=vectors, name=self.folder, save_path=self.base_path)",
"def _plot_clusters(estimator, fdata, *, chart=None, fig=None, axes=None,\n n_rows=None, n_cols=None,\n labels, sample_labels, cluster_colors, cluster_labels,\n center_colors, center_labels, center_width, colormap):\n fig, axes = _get_figure_and_axes(chart, fig, axes)\n fig, axes = _set_figure_layout_for_fdata(fdata, fig, axes, n_rows, n_cols)\n\n _plot_clustering_checks(estimator, fdata, None, sample_labels,\n cluster_colors, cluster_labels, center_colors,\n center_labels)\n\n if sample_labels is None:\n sample_labels = [f'$SAMPLE: {i}$' for i in range(fdata.n_samples)]\n\n if cluster_colors is None:\n cluster_colors = colormap(\n np.arange(estimator.n_clusters) / (estimator.n_clusters - 1))\n\n if cluster_labels is None:\n cluster_labels = [\n f'$CLUSTER: {i}$' for i in range(estimator.n_clusters)]\n\n if center_colors is None:\n center_colors = [_darken(c, 0.5) for c in cluster_colors]\n\n if center_labels is None:\n center_labels = [\n f'$CENTER: {i}$' for i in range(estimator.n_clusters)]\n\n colors_by_cluster = cluster_colors[labels]\n\n patches = []\n for i in range(estimator.n_clusters):\n patches.append(\n mpatches.Patch(color=cluster_colors[i],\n label=cluster_labels[i]))\n\n for j in range(fdata.dim_codomain):\n for i in range(fdata.n_samples):\n axes[j].plot(fdata.sample_points[0],\n fdata.data_matrix[i, :, j],\n c=colors_by_cluster[i],\n label=sample_labels[i])\n for i in range(estimator.n_clusters):\n axes[j].plot(fdata.sample_points[0],\n estimator.cluster_centers_.data_matrix[i, :, j],\n c=center_colors[i],\n label=center_labels[i],\n linewidth=center_width)\n axes[j].legend(handles=patches)\n datacursor(formatter='{label}'.format)\n\n _set_labels(fdata, fig, axes)\n\n return fig",
"def plot_scores(scores: np.ndarray, gt_divisor=10, vecs_names=None, labels=None, colours=None, linestyles=None,\n title=None, type='plot', alphas=None, xtick_labels=None, ax=None, show=True, swapaxes=False):\n scs = deepcopy(scores)\n if 'ground_truth' in scores.dtype.names:\n scs['ground_truth'] /= gt_divisor\n\n if vecs_names is None:\n vecs_names = scs.dtype.names\n if labels is None:\n labs = [None for i in range(len(vecs_names))]\n else:\n labs = labels\n if colours is None:\n colours = [None for i in range(len(vecs_names))]\n if linestyles is None:\n linestyles = [None for i in range(len(vecs_names))]\n\n for nm, c, l, ls, al in zip(vecs_names, colours, labs, linestyles, alphas):\n mask = scs[nm] > MISSING # Leave out the pairs which aren't covered\n x = np.arange(scs[nm].shape[0])[mask]\n y = scs[nm][mask]\n if swapaxes:\n buf = deepcopy(x)\n x = y\n y = buf\n if type == 'scatter':\n ax.scatter(x, y, label=l, alpha=al, color=c)\n elif type == 'plot':\n ax.plot(x, y, label=l, alpha=al, color=c, linestyle=ls, lw=linewidth)\n if labels is not None:\n ax.legend(fontsize='small', loc='center left', bbox_to_anchor=(1, 0.5), borderaxespad=0.2)\n if title:\n ax.set_title(title)\n if show:\n plt.show()\n\n return ax",
"def plot_clusters(data_table, cluster_list, draw_centers = False):\n\tfips_to_line = {}\n\tfor line_idx in range(len(data_table)):\n\t\tfips_to_line[data_table[line_idx][0]] = line_idx\n\n\t# Load map image\n\tmap_file = urllib2.urlopen(MAP_URL)\n\tmap_img = plt.imread(map_file)\n\n\t# Scale plot to get size similar to CodeSkulptor version\n\typixels, xpixels, bands = map_img.shape\n\tDPI = 60.0 # adjust this constant to resize your plot\n\txinch = xpixels / DPI\n\tyinch = ypixels / DPI\n\tplt.figure(figsize=(xinch,yinch))\n\timplot = plt.imshow(map_img)\n\n\t# draw the counties colored by cluster on the map\n\tif not draw_centers:\n\t\tfor cluster_idx in range(len(cluster_list)):\n\t\t\tcluster = cluster_list[cluster_idx]\n\t\t\tcluster_color = COLORS[cluster_idx % len(COLORS)]\n\t\t\tfor fips_code in cluster.fips_codes():\n\t\t\t\tline = data_table[fips_to_line[fips_code]]\n\t\t\t\tplt.scatter(x = [line[1]], y = [line[2]], s = circle_area(line[3]), lw = 1,\n\t\t\t\t\t\t\tfacecolors = cluster_color, edgecolors = cluster_color)\n\n\t# add cluster centers and lines from center to counties\n\telse:\n\t\tfor cluster_idx in range(len(cluster_list)):\n\t\t\tcluster = cluster_list[cluster_idx]\n\t\t\tcluster_color = COLORS[cluster_idx % len(COLORS)]\n\t\t\tfor fips_code in cluster.fips_codes():\n\t\t\t\tline = data_table[fips_to_line[fips_code]]\n\t\t\t\tplt.scatter(x = [line[1]], y = [line[2]], s = circle_area(line[3]), lw = 1,\n\t\t\t\t\t\t\tfacecolors = cluster_color, edgecolors = cluster_color, zorder = 1)\n\t\tfor cluster_idx in range(len(cluster_list)):\n\t\t\tcluster = cluster_list[cluster_idx]\n\t\t\tcluster_color = COLORS[cluster_idx % len(COLORS)]\n\t\t\tcluster_center = (cluster.horiz_center(), cluster.vert_center())\n\t\t\tfor fips_code in cluster.fips_codes():\n\t\t\t\tline = data_table[fips_to_line[fips_code]]\n\t\t\t\tplt.plot( [cluster_center[0], line[1]],[cluster_center[1], line[2]], cluster_color, lw=1, zorder = 2)\n\t\tfor cluster_idx in range(len(cluster_list)):\n\t\t\tcluster = cluster_list[cluster_idx]\n\t\t\tcluster_color = COLORS[cluster_idx % len(COLORS)]\n\t\t\tcluster_center = (cluster.horiz_center(), cluster.vert_center())\n\t\t\tcluster_pop = cluster.total_population()\n\t\t\tplt.scatter(x = [cluster_center[0]], y = [cluster_center[1]], s = circle_area(cluster_pop), lw = 2,\n\t\t\t\t\t\tfacecolors = \"none\", edgecolors = \"black\", zorder = 3)\n\n\tplt.show()",
"def overlapped_score_distribution(data, log_scale=False, save_figure_path=None):\n # set figure size\n plt.figure(figsize=(20, 10))\n\n # set color scheme and font size\n att_to_color = {\n \"AM\": \"blue\",\n \"AF\": \"orange\",\n \"IM\": \"green\",\n \"IF\": \"red\",\n \"BM\": \"Purple\",\n \"BF\": \"brown\",\n \"WM\": \"hotpink\",\n \"WF\": \"black\",\n }\n fontsize = 14\n\n # plot distribution for each ethnicity-gender attribute\n for att in [f\"{e}{g}\" for e in [\"A\", \"I\", \"B\", \"W\"] for g in [\"M\", \"F\"]]:\n data_att = data.loc[data[\"a1\"] == att]\n\n # plot intra score\n sns.distplot(\n data_att.loc[data_att[\"label\"] == 1][\"score\"],\n hist=False,\n label=att,\n color=att_to_color[att],\n )\n # plot inter score\n sns.distplot(\n data_att.loc[data_att[\"label\"] == 0][\"score\"],\n hist=False,\n color=att_to_color[att],\n kde_kws={\"linestyle\": \"--\"},\n )\n\n # set label and font sizes\n plt.xlabel(\"Cosine Similarity Score\", fontsize=fontsize)\n plt.xticks(fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n\n # create legend\n color_legend = plt.legend(fontsize=fontsize)\n solid_line = Line2D([0], [0], color=\"black\", linestyle=\"-\")\n dash_line = Line2D([0], [0], color=\"black\", linestyle=\"--\")\n plt.legend([solid_line, dash_line], [\"intra\", \"inter\"], fontsize=fontsize, loc=2)\n plt.gca().add_artist(color_legend)\n\n # handle log scale\n if log_scale:\n title = \"Score Distribution Log Scale\"\n plt.semilogy()\n plt.ylim([10 ** (-5), 10])\n else:\n title = \"Score Distribution\"\n\n # set title\n plt.title(title, fontsize=fontsize)\n\n # save figure\n if save_figure_path is not None:\n plt.savefig(save_figure_path)",
"def compute_cluster_similarities(emb_clusters1, emb_clusters2, compare, order, clmethod, plot):\n def compute_sim(e, e1, cls, cls1):\n sims = np.empty((20, 20))\n xticks, yticks = [], []\n for i, c in enumerate(cls):\n yticks.append(', '.join(c[1]) + (f' {round(c[3], 5)}' if order == 'avgfreq' else ''))\n for j, c1 in enumerate(cls1):\n if len(xticks) < 20:\n xticks.append(', '.join(c1[1]) + (f' {round(c1[3], 5)}' if order == 'avgfreq' else ''))\n sims[i, j] = jaccard_similarity_score(c[2], c1[2])\n jaccard_similarities[f'{e}-{e1}'] = sims\n\n if plot:\n if order == 'clustermap':\n similarity_clustermap(sims, xticks, yticks, f'{e}-{e1}_{clmethod}')\n elif order == 'default' or order == 'avgfreq':\n similarity_heatmap(sims, xticks, yticks, f'{e}-{e1}_{clmethod}', order)\n else:\n pass\n\n jaccard_similarities = {}\n if compare == 'cross':\n for ie, (e, cls) in enumerate(emb_clusters1.items()):\n for ie1, (e1, cls1) in enumerate(emb_clusters2.items()):\n if ie < ie1:\n compute_sim(e, e1, cls, cls1)\n elif compare == 'dot':\n for (e, cls), (e1, cls1) in zip(emb_clusters1.items(), emb_clusters2.items()):\n compute_sim(e, e1, cls, cls1)\n\n return jaccard_similarities",
"def plot_confusion(cv,\n labels,\n fn=None,\n figsize=(9, 9),\n vmax=None,\n cmap='gist_heat_r',\n ACC=None):\n\n import seaborn as sns\n import matplotlib.pyplot as plt\n origlabels = cv.ca.stats.labels\n origlabels_indexes = dict([(x, i) for i, x in enumerate(origlabels)])\n reorder = [origlabels_indexes.get(labels[i]) for i in range(len(labels))]\n matrix = cv.ca.stats.matrix[reorder][:, reorder].T\n # Plot matrix with color scaled to 90th percentile\n fig, ax = plt.subplots(figsize=figsize)\n im = sns.heatmap(100*matrix.astype(float)/np.sum(matrix, axis=1)[:, None],\n cmap=cmap,\n annot=matrix,\n annot_kws={'size': 8},\n fmt=',',\n square=True,\n ax=ax,\n vmin=0,\n vmax=vmax or np.percentile(matrix, 90),\n xticklabels=labels,\n yticklabels=labels)\n ax.xaxis.tick_top()\n if ACC:\n plt.suptitle('Mean accuracy of classification: {}'.format(ACC))\n plt.xticks(rotation=90)\n plt.xlabel('Predicted labels')\n plt.ylabel('Actual labels')\n ax.xaxis.set_label_position('top')\n plt.tight_layout()\n if fn:\n plt.savefig(fn)\n else:\n # if matrix isn't saved, just show it\n plt.show()",
"def clustering(clusters, dend_matrix, labels, linkagefun):\n Z = linkagefun(dend_matrix)\n color_threshold = Z[-1*clusters][2]+0.0000000001 #Cut slightly above the tree node\n \n # Defining to which cluster belongs to each simulation\n T = fcluster(Z, t=clusters, criterion='maxclust')\n clustdict = { \"cluster\" + str(clust) : [] for clust in T }\n for sim,clust in zip(labels,T):\n clustdict[\"cluster\" + str(clust)].append(sim)\n\n return(color_threshold, clustdict)",
"def plot_clusters(self):\n pass",
"def plot_silhouette_scores(frame, range_n_clusters, diversity_matrix, merges):\n for n_clusters in range_n_clusters:\n # Create a subplot with 1 row and 2 columns\n fig, ax1 = plt.subplots(1, 1)\n fig.set_size_inches(18, 7)\n\n # The 1st subplot is the silhouette plot\n # The silhouette coefficient can range from -1, 1 but in this example all\n # lie within [-0.1, 1]\n ax1.set_xlim([-0.5, 1])\n # The (n_clusters+1)*10 is for inserting blank space between silhouette\n # plots of individual clusters, to demarcate them clearly.\n ax1.set_ylim([0, len(frame) + (n_clusters + 1) * 10])\n\n # Initialize the clusterer with n_clusters value and a random generator\n # seed of 10 for reproducibility.\n\n # The silhouette_score gives the average value for all the samples.\n # This gives a perspective into the density and separation of the formed\n # clusters\n labels = fcluster(merges, n_clusters, \"maxclust\")\n silhouette_avg = silhouette_score(diversity_matrix, labels, metric=\"precomputed\")\n\n # Compute the silhouette scores for each sample\n sample_silhouette_values = silhouette_samples(diversity_matrix, labels, metric=\"precomputed\")\n\n y_lower = 10\n for i in range(1, n_clusters + 1):\n # Aggregate the silhouette scores for samples belonging to\n # cluster i, and sort them\n ith_cluster_silhouette_values = \\\n sample_silhouette_values[labels == i]\n\n ith_cluster_silhouette_values.sort()\n\n size_cluster_i = ith_cluster_silhouette_values.shape[0]\n y_upper = y_lower + size_cluster_i\n\n color = cm.nipy_spectral(float(i) / n_clusters)\n ax1.fill_betweenx(np.arange(y_lower, y_upper),\n 0, ith_cluster_silhouette_values,\n facecolor=color, edgecolor=color, alpha=0.7)\n\n # Label the silhouette plots with their cluster numbers at the middle\n ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))\n\n # Compute the new y_lower for next plot\n y_lower = y_upper + 10 # 10 for the 0 samples\n\n ax1.set_xlabel(\"Silhouette Coefficient\")\n ax1.set_ylabel(\"Cluster Number\")\n\n # The vertical line for average silhouette score of all the values\n ax1.axvline(x=silhouette_avg, color=\"red\", linestyle=\"--\")\n\n ax1.set_yticks([]) # Clear the yaxis labels / ticks\n ax1.set_xticks([-0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1])\n\n plt.title(\"Silhouette Analysis For DDTW H. Clustering\")\n\n plt.show()",
"def print_clusters(vectors, labels, nclusters, show=False):\n plt.figure(1)\n plt.clf()\n\n vecs2D = TSNE(n_components=2).fit_transform(vectors)\n\n colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')\n for k, col in zip(range(nclusters), colors):\n my_members = labels == k\n\n cluster_vecs2D = vecs2D[my_members, :]\n\n print(cluster_vecs2D)\n print(cluster_vecs2D[:,0])\n print(cluster_vecs2D[:,1])\n\n plt.scatter(cluster_vecs2D[:,0], \n cluster_vecs2D[:,1], \n c=col, \n label='cluster {}'.format(k))\n\n plt.title('Estimated clusters')\n plt.legend()\n\n if show:\n plt.show()\n\n cwd = os.getcwd()\n if not os.path.exists(cwd+\"/plots\"):\n os.makedirs(cwd+\"/plots\")\n plt.savefig(cwd+'/plots/clusters.png')",
"def distortion_of_kmeans_clustering(data_table):\n num_iritations = 5\n singleton_list = []\n for line in data_table:\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n distortion_list = []\n for num in range(20, 5, -1):\n cluster_list = kmeans_clustering(singleton_list,num, num_iritations)\n distortion = compute_distortion(data_table, cluster_list)\n distortion_list.append(distortion)\n return distortion_list\n\n#####################################################################\n# Code to load cancer data, compute a clustering and\n# visualize the results\n\n\n# def run_example():\n# \"\"\"\n# Load a data table, compute a list of clusters and\n# plot a list of clusters\n#\n# Set DESKTOP = True/False to use either matplotlib or simplegui\n# \"\"\"\n# data_table = load_data_table(DATA_3108_URL)\n# singleton_list = []\n# for line in data_table:\n# singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n num_clusters = 16\n # cluster_list = sequential_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"sequential clusters\")\n #\n # cluster_list = alg_project3_solution.hierarchical_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"hierarchical clusters\")\n #\n # cluster_list = alg_project3_solution.kmeans_clustering(singleton_list, num_clusters, 5)\n # print(\"Displaying\", len(cluster_list), \"k-means clusters\")\n\n # draw the clusters using matplotlib or simplegui\n #\n # if DESKTOP:\n # # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, False)\n # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, True) #add cluster centers\n\n # else:\n # alg_clusters_simplegui.PlotClusters(data_table, cluster_list) # use toggle in GUI to add cluster centers",
"def score_heatmap(scores: np.ndarray):\n\n # plot the heatmap\n ax = sns.heatmap(scores.astype(int),\n annot=True,\n fmt='d',\n linewidths=0.5,\n square=True,\n cbar=False,\n cmap=plt.cm.Blues\n )\n\n # set the ticks for the labels\n ax.set_yticklabels(range(1,6))\n ax.set_xticklabels(range(1,11))",
"def display_clusters(assign):\n for c in assign:\n plt.plot(c[0], c[1], \"r*\")\n plt.plot(\n [p[0] for p in assign[c]],\n [p[1] for p in assign[c]],\n \"o\"\n )\n plt.show()\n plt.close()",
"def clustering_and_visulization(self):\n try:\n centroids, _ = kmeans(self.data_mat, self.k)\n except ValueError:\n print(\"The number of clusters is more than the data points\")\n self.idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[self.idx == i, 0])\n self.plot_list1.append(self.data_mat[self.idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n\n for i in range(self.k):\n self.cluster = self.data_mat[self.idx == i]\n self.clusterlist.append(self.cluster)\n print(self.clusterlist)\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n index_dict ={}\n for i in self.clusterdict:\n index_dict[i] = []\n for i in range(len(self.data_mat)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n index_dict[j].append(i)\n print(\"drugs cluster dict\", index_dict)\n\n self.drugsdict = {}\n for i in index_dict:\n self.drugsdict[i] = []\n drugslist = list(self.df.columns.values)\n print(\"drugs list from dataframe\", drugslist)\n\n for i in index_dict:\n self.drugsdict[i] = [drugslist[index] for index in index_dict[i]]\n\n print(\"drugs cluster dict\", self.drugsdict)\n########################################################################################################################\n clusterdict_from_df_as_drug_frequency = {}\n clusterdict_from_df_as_drug_non_O_frequency = {}\n\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i] = []\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i].append(self.df.iloc[i].to_dict()) #\n print(\"packs in dict form of drugs frequency\", clusterdict_from_df_as_drug_frequency)\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_frequency[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n for i in range(len(self.df)):\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse(\n [list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n robot_for_packs_dict = {}\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = []\n\n # for i in range(len(self.df)):\n for i in range(len(self.df)):\n for j in clusterdict_of_non_repeated_drugs[i]:\n if j in self.drugsdict[0]:\n robot_for_packs_dict[i].append(0)\n elif j in self.drugsdict[1]:\n robot_for_packs_dict[i].append(1)\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = set(robot_for_packs_dict[i])\n\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = list(more_itertools.collapse(robot_for_packs_dict[i]))\n print('\\n')\n print(\"clusterdict_of_non_repeated_drugs\", robot_for_packs_dict)",
"def calculate_similarity(self, cluster0, cluster1):\n\n def compare_spectrum(spectrum0, spectrum1): \n \"\"\"\n Compare a pair of spectra to decide the\n order. \n :param: pair of spectra\n :return: 0 equal, -1 spectrum0 is less,\n 1, spectrum0 is bigger.\n \"\"\"\n title0 = spectrum0.get_title() \n title1 = spectrum1.get_title() \n if(title0 < title1): \n return -1\n elif(title0 > title1): \n return 1\n else:\n return 0\n # end of compare_spectrum function\n\n spectra0 = self.sorted_spectra_dict[cluster0.id]\n spectra1 = self.sorted_spectra_dict[cluster1.id]\n\n (n,i,j) = (0,0,0)\n while(i<len(spectra0) and j<len(spectra1)):\n comp_score = compare_spectrum(spectra0[i], spectra1[j])\n if(comp_score < 0):\n i += 1\n elif(comp_score > 0):\n j += 1\n else: \n n += 1\n i += 1\n j += 1\n similarity_score = 0.5 * (n/len(spectra0) + n/len(spectra1))\n return (n,similarity_score)",
"def plot_results(self, predictions: list):\n fig, ax = plt.subplots()\n cm = confusion_matrix(self.test[1], predictions)\n conf = confusion_matrix(self.test[1], predictions).ravel()\n nbr_labels = len(set(self.test[1]))\n cm = conf.reshape(nbr_labels, nbr_labels)\n sns.heatmap(cm, annot=True, fmt=\"d\", cmap=\"Spectral\")\n ax.set_xlabel(\"predicted label\")\n ax.set_ylabel(\"true label\")\n fig.savefig(\"confusion_matrix\")\n\n fig, ax = plt.subplots()\n x = self.train[0] + self.test[0]\n y = self.train[1] + self.test[1]\n x = [i[0] for i in x]\n y = [i for i in y]\n results = pd.DataFrame({\"polarity strength\": x, \"true label\": y})\n sns.boxplot(data=results, x=\"true label\", y=\"polarity strength\")\n fig.savefig(\"boxplot\")",
"def _plot_clustering_checks(estimator, fdata, sample_colors, sample_labels,\n cluster_colors, cluster_labels,\n center_colors, center_labels):\n\n if sample_colors is not None and len(\n sample_colors) != fdata.n_samples:\n raise ValueError(\n \"sample_colors must contain a color for each sample.\")\n\n if sample_labels is not None and len(\n sample_labels) != fdata.n_samples:\n raise ValueError(\n \"sample_labels must contain a label for each sample.\")\n\n if cluster_colors is not None and len(\n cluster_colors) != estimator.n_clusters:\n raise ValueError(\n \"cluster_colors must contain a color for each cluster.\")\n\n if cluster_labels is not None and len(\n cluster_labels) != estimator.n_clusters:\n raise ValueError(\n \"cluster_labels must contain a label for each cluster.\")\n\n if center_colors is not None and len(\n center_colors) != estimator.n_clusters:\n raise ValueError(\n \"center_colors must contain a color for each center.\")\n\n if center_labels is not None and len(\n center_labels) != estimator.n_clusters:\n raise ValueError(\n \"centers_labels must contain a label for each center.\")",
"def cluster_and_plot_correlation_matrix(C, column_names, how_to_sort):\n C = copy.deepcopy(C)\n if np.abs(C).max() - 1 > 1e-6:\n print(\"Warning: maximum absolute value in C is %2.3f, which is larger than 1; this will be truncated in the visualization.\" % np.abs(C).max()) \n for i in range(len(C)):\n if(np.abs(C[i, i] - 1) > 1e-6):\n print(\"Warning: correlation matrix diagonal entry is not one (%2.8f); setting to one for visualization purposes.\" % C[i, i].mean())\n C[i, i] = 1 # make it exactly one so hierarchical clustering doesn't complain. \n C[C > 1] = 1\n C[C < -1] = -1\n assert how_to_sort in ['alphabetically', 'hierarchical']\n assert(len(C) == len(column_names))\n \n if how_to_sort == 'hierarchical':\n y = squareform(1 - np.abs(C))\n Z = linkage(y, method = 'average')\n clusters = fcluster(Z, t = 0)\n # print(clusters)\n reordered_idxs = np.argsort(clusters)\n else:\n reordered_idxs = np.argsort(column_names)\n \n C = C[:, reordered_idxs]\n C = C[reordered_idxs, :]\n plt.figure(figsize=[50, 50])\n plt.set_cmap('bwr')\n plt.imshow(C, vmin = -1, vmax = 1)\n reordered_colnames = np.array(column_names)[reordered_idxs]\n plt.yticks(range(len(column_names)), \n reordered_colnames, \n fontsize = 24)\n plt.xticks(range(len(column_names)), \n reordered_colnames,\n rotation = 90, \n fontsize = 24)\n plt.colorbar()\n for i in range(len(C)):\n for j in range(len(C)):\n if np.abs(C[i][j]) > .1:\n plt.scatter([i], [j], color = 'black', s = 1)\n plt.show()",
"def clustering(rating_matrix):\r\n\r\n #Fitting k-means to the dataset\r\n kmeans = KMeans(n_clusters=5, init='k-means++')\r\n kmeans.fit_predict(rating_matrix)\r\n\r\n #Centre - Item rating matrix\r\n\r\n centres = np.array(kmeans.cluster_centers_)\r\n\r\n #Appending labels to the user-item rating matrix\r\n labels = np.array(kmeans.labels_)\r\n A_matrix = np.c_[rating_matrix, labels]\r\n\r\n return A_matrix , centres",
"def plot_cluster_lines(estimator, X, chart=None, fig=None, axes=None,\n sample_colors=None, sample_labels=None,\n cluster_labels=None,\n colormap=plt.cm.get_cmap('rainbow'),\n x_label=None, y_label=None, title=None):\n fdata = X\n _check_if_estimator(estimator)\n\n if not isinstance(estimator, FuzzyKMeans):\n raise ValueError(\"The estimator must be a FuzzyKMeans object.\")\n\n try:\n estimator._check_is_fitted()\n estimator._check_test_data(X)\n except NotFittedError:\n estimator.fit(X)\n\n fig, axes = _get_figure_and_axes(chart, fig, axes)\n fig, axes = _set_figure_layout(fig, axes)\n\n _plot_clustering_checks(estimator, fdata, sample_colors, sample_labels,\n None, cluster_labels, None, None)\n\n x_label, y_label, title = _get_labels(x_label, y_label, title, \"Cluster\")\n\n if sample_colors is None:\n cluster_colors = colormap(np.arange(estimator.n_clusters) /\n (estimator.n_clusters - 1))\n labels_by_cluster = np.argmax(estimator.labels_, axis=1)\n sample_colors = cluster_colors[labels_by_cluster]\n\n if sample_labels is None:\n sample_labels = ['$SAMPLE: {}$'.format(i) for i in\n range(fdata.n_samples)]\n\n if cluster_labels is None:\n cluster_labels = ['${}$'.format(i) for i in\n range(estimator.n_clusters)]\n\n axes[0].get_xaxis().set_major_locator(MaxNLocator(integer=True))\n for i in range(fdata.n_samples):\n axes[0].plot(np.arange(estimator.n_clusters),\n estimator.labels_[i],\n label=sample_labels[i],\n color=sample_colors[i])\n axes[0].set_xticks(np.arange(estimator.n_clusters))\n axes[0].set_xticklabels(cluster_labels)\n axes[0].set_xlabel(x_label)\n axes[0].set_ylabel(y_label)\n datacursor(formatter='{label}'.format)\n\n fig.suptitle(title)\n return fig",
"def plot_clusters(true_data, preds, cluster_center, cluster_name, savefig=\"\", title=\"\"):\n\n colors = plt.cm.get_cmap('hsv', len(cluster_name)+1) # get colors for each cluster using get_cmap. This will give us len(cluster_name) colors in a object form.\n \n for i, c in enumerate(cluster_name): # iterate through each cluster name\n if c == -1: # -1 is given by DBScan for noise\n clrs = 'grey' # make it grey\n label = 'Noise' # label it 'Noise'\n else:\n clrs = colors(c) # get color for it\n label=f'Cluster {c}' # label it by its name\n df = true_data[preds == c] # get the points from dataset whose prediction was cluster `c`\n x, y = df.iloc[:, 0], df.iloc[:, 1] # x and y axis\n plt.scatter( # plotting the x and y axis\n x, y,\n label=label,\n color=clrs\n )\n if c != -1:\n plt.text(\n cluster_center[i][0] + 0.03, cluster_center[i][1] + 0.1,\n f\"Cluster {i}\",\n weight='bold',\n fontsize=9,\n )\n \n plt.scatter(\n cluster_center[:, 0], cluster_center[:, 1], # plotting the cluster centers\n s=250, marker='*',\n c='red', edgecolor='black',\n label='Centroids'\n )\n \n plt.title(title)\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.tight_layout()\n if savefig != \"\" : plt.savefig(f\"{savefig}.png\")\n plt.show()\n plt.close()",
"def cluster_linkage_seaborn(features, \n metadata, \n groupby='gene_name', \n saveDir=None, \n method='average', \n metric='euclidean'):\n \n # Normalise data\n featZ = features.apply(zscore, axis=0)\n featZ = dropNaN(featZ) # drop NaN values after normalising\n\n plt.close('all')\n cg = plot_clustermap(featZ, \n metadata,\n group_by=groupby,\n col_linkage=None,\n method=method,\n metric=metric,\n saveto=(saveDir / \"heatmap_{}.pdf\".format(method + '_' + metric) if \n saveDir is not None else None),\n figsize=[20,40],\n sns_colour_palette=\"Pastel1\",\n sub_adj={'top':0.98,'bottom':0.02,'left':0.02,'right':0.9})\n plt.close()\n\n # extract distances from clustermap dendrogram\n Z = cg.dendrogram_row.linkage\n \n # extract mean df (one sample per row)\n mean_featZ = cg.data\n\n # extract row labels from clustermap heatmap\n labels = sorted(metadata[groupby].unique())\n mean_featZ.index = labels # strain names as index \n \n return Z, mean_featZ",
"def cluster_graph(dist_df, graph):\n # change and modify as per your data and needs (although remember\n # you are using a precomputed distance matrix and most sklearn assume\n # you are providing raw data)\n model = sklearn.cluster.SpectralClustering(affinity='precomputed',\n n_clusters=5)\n\n model.fit(dist_df.values)\n\n labels = {node: label for node, label in zip(dist_df.columns,\n model.labels_)}\n\n nx.set_node_attributes(graph, labels, \"cluster\")\n\n return graph",
"def plotClusters(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Identification of Cluster Particles with Voronoi Volumes', fontsize=22)\n ax.set_xlabel('x [m]', fontsize=18)\n ax.set_ylabel('y [m]', fontsize=18)\n ax.set_zlabel('z [m]', fontsize=18)\n\n strength = np.linspace(0, 0.8, len(self.unique_labels))\n np.random.shuffle(strength)\n colors = [plt.cm.nipy_spectral(each) for each in strength]\n np.random.shuffle(strength)\n colorsB = [plt.cm.nipy_spectral(each) for each in strength]\n\n for k, col, colB in zip(self.unique_labels, colors, colorsB):\n a = 1\n s = 3\n if k == -1:\n # Black used for noise.\n col = [1, 0, 0]\n a = 0.3\n s = 1\n\n class_member_mask = (self.labels == k)\n xy = self.data[class_member_mask]\n if len(xy) > 0:\n ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], c=np.reshape(np.array(col), (1, -1)),\n edgecolors=np.reshape(np.array(colB), (1, -1)), alpha=a, s=s, label='Cluster ' + str(k))"
] |
[
"0.6115021",
"0.59878033",
"0.59782255",
"0.5956138",
"0.59467465",
"0.5874676",
"0.586053",
"0.5834884",
"0.57424116",
"0.573193",
"0.5709266",
"0.56929475",
"0.5670776",
"0.5646054",
"0.5634125",
"0.5631307",
"0.5604918",
"0.5588165",
"0.55874056",
"0.5565134",
"0.5547118",
"0.5533668",
"0.5530083",
"0.55251884",
"0.54840803",
"0.54678035",
"0.54632586",
"0.5456084",
"0.54504824",
"0.54502416"
] |
0.8091768
|
0
|
Plot the distribution of a property among all communities for a clustering, or a list of clusterings (violinplots)
|
def plot_com_stat(
com_clusters: list, com_fitness: Callable[[object, object, bool], object]
) -> object:
if isinstance(com_clusters, cdlib.classes.clustering.Clustering):
com_clusters = [com_clusters]
allVals = []
allNames = []
for c in com_clusters:
prop = com_fitness(c.graph, c, summary=False)
allVals += prop
allNames += [c.get_description()] * len(prop)
ax = sns.violinplot(x=allNames, y=allVals, cut=0, saturation=0.5, palette="Set3")
for tick in ax.get_xticklabels():
tick.set_rotation(90)
plt.ylabel("%s" % com_fitness.__name__)
plt.xlabel("Algorithm")
plt.tight_layout()
return ax
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def plot_com_properties_relation(\n com_clusters: object,\n com_fitness_x: Callable[[object, object, bool], object],\n com_fitness_y: Callable[[object, object, bool], object],\n **kwargs: dict\n) -> object:\n if isinstance(com_clusters, cdlib.classes.clustering.Clustering):\n com_clusters = [com_clusters]\n\n for_df = []\n\n for c in com_clusters:\n x = com_fitness_x(c.graph, c, summary=False)\n y = com_fitness_y(c.graph, c, summary=False)\n for i, vx in enumerate(x):\n for_df.append([c.get_description(), vx, y[i]])\n\n df = pd.DataFrame(\n columns=[\"Method\", com_fitness_x.__name__, com_fitness_y.__name__], data=for_df\n )\n ax = sns.lmplot(\n x=com_fitness_x.__name__,\n y=com_fitness_y.__name__,\n data=df,\n hue=\"Method\",\n fit_reg=False,\n legend=False,\n x_bins=100,\n **kwargs\n )\n plt.legend(loc=\"best\")\n plt.tight_layout()\n\n return ax",
"def plotClusters(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Identification of Cluster Particles with Voronoi Volumes', fontsize=22)\n ax.set_xlabel('x [m]', fontsize=18)\n ax.set_ylabel('y [m]', fontsize=18)\n ax.set_zlabel('z [m]', fontsize=18)\n\n strength = np.linspace(0, 0.8, len(self.unique_labels))\n np.random.shuffle(strength)\n colors = [plt.cm.nipy_spectral(each) for each in strength]\n np.random.shuffle(strength)\n colorsB = [plt.cm.nipy_spectral(each) for each in strength]\n\n for k, col, colB in zip(self.unique_labels, colors, colorsB):\n a = 1\n s = 3\n if k == -1:\n # Black used for noise.\n col = [1, 0, 0]\n a = 0.3\n s = 1\n\n class_member_mask = (self.labels == k)\n xy = self.data[class_member_mask]\n if len(xy) > 0:\n ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], c=np.reshape(np.array(col), (1, -1)),\n edgecolors=np.reshape(np.array(colB), (1, -1)), alpha=a, s=s, label='Cluster ' + str(k))",
"def display_clusters(assign):\n for c in assign:\n plt.plot(c[0], c[1], \"r*\")\n plt.plot(\n [p[0] for p in assign[c]],\n [p[1] for p in assign[c]],\n \"o\"\n )\n plt.show()\n plt.close()",
"def plot_clusters(self):\n pass",
"def plot_many(self, property_list):\n\t\tself.property_existence(property_list)\n\n\t\tsns.pairplot(self.df[property_list])\n\t\tplt.tight_layout()\n\t\tplt.show()",
"def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")",
"def distortion_of_kmeans_clustering(data_table):\n num_iritations = 5\n singleton_list = []\n for line in data_table:\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n distortion_list = []\n for num in range(20, 5, -1):\n cluster_list = kmeans_clustering(singleton_list,num, num_iritations)\n distortion = compute_distortion(data_table, cluster_list)\n distortion_list.append(distortion)\n return distortion_list\n\n#####################################################################\n# Code to load cancer data, compute a clustering and\n# visualize the results\n\n\n# def run_example():\n# \"\"\"\n# Load a data table, compute a list of clusters and\n# plot a list of clusters\n#\n# Set DESKTOP = True/False to use either matplotlib or simplegui\n# \"\"\"\n# data_table = load_data_table(DATA_3108_URL)\n# singleton_list = []\n# for line in data_table:\n# singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n num_clusters = 16\n # cluster_list = sequential_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"sequential clusters\")\n #\n # cluster_list = alg_project3_solution.hierarchical_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"hierarchical clusters\")\n #\n # cluster_list = alg_project3_solution.kmeans_clustering(singleton_list, num_clusters, 5)\n # print(\"Displaying\", len(cluster_list), \"k-means clusters\")\n\n # draw the clusters using matplotlib or simplegui\n #\n # if DESKTOP:\n # # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, False)\n # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, True) #add cluster centers\n\n # else:\n # alg_clusters_simplegui.PlotClusters(data_table, cluster_list) # use toggle in GUI to add cluster centers",
"def plot_pipeline_diagrams(phenotype):\n # Read data\n n = 10\n replication_results = pd.read_table(f'{phenotype}_replication_scores.txt', sep=\"\\t\").head(n)\n\n # Wider ranksep for the more convoluted result\n if phenotype == \"p_cd4difw48w4\":\n ranksep = 4\n else:\n ranksep = 2\n\n # Set up graph\n graph = pydot.Dot(graph_type='digraph', rankdir=\"LR\", ranksep=ranksep, nodesep=0.02,\n label=f\"{VARIABLE_DESCRIPTIONS[phenotype]}\", labelloc=\"t\", fontsize=TITLE_FONTSIZE)\n\n # Set up clusters\n cluster_fss = pydot.Cluster('fss', label='Feature Set Selector', rank=\"same\", penwidth=0)\n cluster_transformer = pydot.Cluster('transformer', label='Transformer', rank=\"same\", penwidth=0)\n cluster_regressor = pydot.Cluster('regressor', label='Regressor', rank=\"same\", penwidth=0)\n cluster_score = pydot.Cluster('score', label='R^2 Score', rank=\"same\", penwidth=0)\n\n # Add clusters\n graph.add_subgraph(cluster_fss)\n graph.add_subgraph(cluster_transformer)\n graph.add_subgraph(cluster_regressor)\n graph.add_subgraph(cluster_score)\n\n # Setup representative nodes and add them to their clusters\n cluster_fss_node = pydot.Node('cluster_fss', style='invis', shape='point')\n cluster_fss.add_node(cluster_fss_node)\n cluster_transformer_node = pydot.Node('cluster_transformer', style='invis', shape='point')\n cluster_transformer.add_node(cluster_transformer_node)\n cluster_regressor_node = pydot.Node('cluster_regressor', style='invis', shape='point')\n cluster_regressor.add_node(cluster_regressor_node)\n cluster_score_node = pydot.Node('cluster_score', style='invis', shape='point')\n cluster_score.add_node(cluster_score_node)\n\n # Link Clusters via their representative nodes\n graph.add_edge(pydot.Edge(cluster_fss_node, cluster_transformer_node, style=\"invisible\", arrowhead=\"none\", weight=1000))\n graph.add_edge(pydot.Edge(cluster_transformer_node, cluster_regressor_node, style=\"invisible\", arrowhead=\"none\", weight=1000))\n graph.add_edge(pydot.Edge(cluster_regressor_node, cluster_score_node, style=\"invisible\", arrowhead=\"none\", weight=1000))\n\n # Create Nodes\n fss_nodes = []\n for fss in replication_results['FSS Name'].unique():\n node = pydot.Node(fss, label=format_fss_name(fss), shape='box', style='rounded', fontsize=TEXT_FONTSIZE)\n cluster_fss.add_node(node)\n fss_nodes.append(node)\n transformer_nodes = []\n for transformer in replication_results['Transformer'].unique():\n node = pydot.Node(transformer, fontsize=TEXT_FONTSIZE)\n cluster_transformer.add_node(node)\n transformer_nodes.append(node)\n regressor_nodes = []\n for regressor in replication_results['Regressor'].unique():\n node = pydot.Node(regressor, fontsize=TEXT_FONTSIZE)\n cluster_regressor.add_node(node)\n regressor_nodes.append(node)\n\n # Create score nodes from min score to max score, marking every 0.001\n max_score = math.ceil(replication_results['R^2 Score'].max() * 100) / 100\n min_score = math.floor(replication_results['R^2 Score'].min() * 100) / 100\n last = None\n\n # Iterate through a range of scores using integers\n i = max_score * 1000\n while i >= (min_score * 1000): \n score = format_score(i/1000)\n if i % 10 == 0:\n node = pydot.Node(score, shape=\"plain\", label=score, fontsize=TEXT_FONTSIZE)\n else:\n node = pydot.Node(score, shape=\"point\")\n cluster_score.add_node(node)\n # Decrement\n i -= 1\n # Add edge\n if last is not None:\n cluster_score.add_edge(pydot.Edge(last, node, penwidth=0.5, constraint=\"false\", arrowhead=\"none\", len=0.01))\n last = node\n\n # Add each pipeline\n for idx, row in replication_results.iterrows():\n fss = row['FSS Name']\n transformer = row['Transformer']\n regressor = row['Regressor']\n score = format_score(row['R^2 Score'])\n color = COLORS[idx]\n penwidth = PATH_WIDTH[idx]\n graph.add_edge(pydot.Edge(fss, transformer, color=color, label=str(idx+1), penwidth=penwidth, constraint=\"false\", fontsize=TEXT_FONTSIZE))\n graph.add_edge(pydot.Edge(transformer, regressor, color=color, label=str(idx+1), penwidth=penwidth, constraint=\"false\", fontsize=TEXT_FONTSIZE))\n graph.add_edge(pydot.Edge(regressor, score, color=color, label=str(idx+1), penwidth=penwidth, constraint=\"false\", fontsize=TEXT_FONTSIZE))\n\n graph.write_png(f\"plots/{phenotype}_pipeline_diagram.png\")\n graph.write_svg(f\"plots/{phenotype}_pipeline_diagram.svg\")",
"def plot(self, p: int):\n self.compute_clusters(p)\n self.plot_clusters()",
"def print_clusters(vectors, labels, nclusters, show=False):\n plt.figure(1)\n plt.clf()\n\n vecs2D = TSNE(n_components=2).fit_transform(vectors)\n\n colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')\n for k, col in zip(range(nclusters), colors):\n my_members = labels == k\n\n cluster_vecs2D = vecs2D[my_members, :]\n\n print(cluster_vecs2D)\n print(cluster_vecs2D[:,0])\n print(cluster_vecs2D[:,1])\n\n plt.scatter(cluster_vecs2D[:,0], \n cluster_vecs2D[:,1], \n c=col, \n label='cluster {}'.format(k))\n\n plt.title('Estimated clusters')\n plt.legend()\n\n if show:\n plt.show()\n\n cwd = os.getcwd()\n if not os.path.exists(cwd+\"/plots\"):\n os.makedirs(cwd+\"/plots\")\n plt.savefig(cwd+'/plots/clusters.png')",
"def cluster_plot(self):\r\n train = StandardScaler().fit_transform(self.X)\r\n pca = PCA(n_components=3)\r\n pca_component = pca.fit_transform(self.X)\r\n fig = plt.figure(figsize=(10,8))\r\n sns.set_palette(sns.color_palette(\"cubehelix\", 8))\r\n ax = Axes3D(fig)\r\n ax.scatter(pca_component[:,0].tolist(),pca_component[:,1].tolist(),pca_component[:,2].tolist(),c=self.labels,marker='v')\r\n ax.legend()\r\n plt.show()",
"def clustering_and_visulization(self):\n try:\n centroids, _ = kmeans(self.data_mat, self.k)\n except ValueError:\n print(\"The number of clusters is more than the data points\")\n self.idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[self.idx == i, 0])\n self.plot_list1.append(self.data_mat[self.idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n\n for i in range(self.k):\n self.cluster = self.data_mat[self.idx == i]\n self.clusterlist.append(self.cluster)\n print(self.clusterlist)\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n index_dict ={}\n for i in self.clusterdict:\n index_dict[i] = []\n for i in range(len(self.data_mat)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n index_dict[j].append(i)\n print(\"drugs cluster dict\", index_dict)\n\n self.drugsdict = {}\n for i in index_dict:\n self.drugsdict[i] = []\n drugslist = list(self.df.columns.values)\n print(\"drugs list from dataframe\", drugslist)\n\n for i in index_dict:\n self.drugsdict[i] = [drugslist[index] for index in index_dict[i]]\n\n print(\"drugs cluster dict\", self.drugsdict)\n########################################################################################################################\n clusterdict_from_df_as_drug_frequency = {}\n clusterdict_from_df_as_drug_non_O_frequency = {}\n\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i] = []\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i].append(self.df.iloc[i].to_dict()) #\n print(\"packs in dict form of drugs frequency\", clusterdict_from_df_as_drug_frequency)\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_frequency[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n for i in range(len(self.df)):\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse(\n [list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n robot_for_packs_dict = {}\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = []\n\n # for i in range(len(self.df)):\n for i in range(len(self.df)):\n for j in clusterdict_of_non_repeated_drugs[i]:\n if j in self.drugsdict[0]:\n robot_for_packs_dict[i].append(0)\n elif j in self.drugsdict[1]:\n robot_for_packs_dict[i].append(1)\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = set(robot_for_packs_dict[i])\n\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = list(more_itertools.collapse(robot_for_packs_dict[i]))\n print('\\n')\n print(\"clusterdict_of_non_repeated_drugs\", robot_for_packs_dict)",
"def plotProp(pdict, title=None, sameax=True, showmean=True, \n bounds=[None,None]):\n try:\n pdict.pop('all stats')\n except:\n pass\n spk, groups = [], list(pdict.keys())\n fig = plt.figure()\n c_colors = {}\n \n if sameax:\n ax = fig.add_subplot(111)\n for g in range(len(groups)):\n sofar = []\n for cell in pdict[groups[g]].keys():\n if cell not in c_colors.keys():\n c_colors[cell] = np.random.random(3)\n this = [u for u in pdict[groups[g]][cell][0]]\n if len(pdict[groups[g]][cell]) > 1:\n for sp in pdict[groups[g]][cell][1]:\n this.append(sp)\n ax.plot([i for i in np.random.normal(loc=g, scale=0.1, size=len(this))], this, 'o',\n color=c_colors[cell], label=groups[g], alpha=0.3,\n markeredgecolor='none', markersize=1)\n for t in this:\n sofar.append(t)\n if showmean:\n ax.plot([g-.5,g+.5], [np.mean(sofar), np.mean(sofar)],\n '--', color='black', lw=2)\n # Cosmetics\n plt.xticks(range(len(groups)), groups, rotation=30)\n plt.ylim([bounds[0], bounds[1]])\n \n else:\n plots = [fig.add_subplot(1, len(groups)+1, p) for p in range(len(groups))]\n for g in range(len(groups)):\n for cell in pdict[groups[g]].keys():\n if cell not in c_colors.keys():\n c_colors[cell] = np.random.random(3)\n this = [u for u in pdict[groups[g]][cell][0]]\n if len(pdict[groups[g]][cell]) > 1:\n for sp in pdict[groups[g]][cell][1]:\n this.append(sp)\n plots[g].plot([i+g for i in np.random.random(len(this))], this, 'o',\n color=c_colors[cell], label=groups[g], alpha=0.3,\n markeredgecolor='none')\n \n if title:\n plt.title(title)\n plt.show()\n return",
"def plt_gm_clusters(df_all, model):\n\n # color_iter = itertools.cycle([cmap(i) for i in range(cmap.N)])\n\n color_iter = itertools.cycle([cmap(i) for i in range(clus_params['n_components'])])\n\n df = df_all[featureSet_dic[clus_params['feat_list']]].copy()\n\n XX = df.values\n Y_ = model.predict(XX) # predict labels for each model\n\n plt.figure(figsize=(8, 6))\n splot = plt.subplot(1, 1, 1)\n\n for i, (mean, cov, color) in enumerate(zip(model.means_, model.covariances_, color_iter)):\n\n if \"MEAN\" in clus_params['feat_list']:\n v, w = linalg.eigh(cov)\n else:\n\n subset = [0, 5] # mean torque L & R\n v, w = linalg.eigh(cov[np.ix_(subset, subset)])\n mean = np.array([mean[0], mean[5]])\n\n if not np.any(Y_ == i):\n continue\n\n if \"MEAN\" in clus_params['feat_list']:\n plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 1], color=color, s=60)\n else:\n plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 5], color=color, s=60)\n\n # Plot an ellipse to show the Gaussian component\n angle = np.arctan2(w[0][1], w[0][0])\n angle = 180. * angle / np.pi # convert to degrees\n v = 2. * np.sqrt(2.) * np.sqrt(v)\n ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)\n ell.set_clip_box(splot.bbox)\n ell.set_alpha(.5)\n splot.add_artist(ell)\n\n plt.xticks(())\n plt.yticks(())\n\n plt.title('Subject: {}, feature set: {}'.format(USER, clus_params['feat_list']))\n plt.subplots_adjust(hspace=.35, bottom=.02)\n plt.show()",
"def plot_MDS():\n lds = {} #lds is a dictionary of dictionaries: {\"slovenian.txt\": {\"abc\":3,\"efg\":4...}, \"macedonian.txt\":{\"abc\":5,\"efg\":6...},...}\n for fn in listdir(\"clustering\"):\n if fn.lower().endswith(\".txt\"):\n with open(join(\"clustering\", fn), encoding=\"utf8\") as f:\n text = f.read()\n nter = terke(text, n=3)\n lds[fn] = nter\n \n distances={} #a dictionary of dictionaries that saves the distances between a language and all other languages\n \n for x in lds.keys():\n distances[x]={}\n for y in lds.keys():\n if x == y: distances[x][y]=0.0\n else: distances[x][y]=cosine_dist(lds[x],lds[y])\n\n dst=np.zeros([len(lds.keys()), len(lds.keys())])\n i=0\n j=0\n for x in lds.keys():\n j=0\n for y in lds.keys():\n dst[i,j]=distances[x][y]\n j+=1\n i+=1\n\n X, languages = prepare_data_matrix()\n\n transformer = MDS(n_components=2, dissimilarity='precomputed')\n transformed = transformer.fit_transform(dst)\n\n plt.scatter(transformed [:,0], transformed [:,1])\n for i in range(len(transformed)):\n plt.text(transformed[i,0], transformed[i,1], languages[i][:3])\n plt.show()",
"def plot_and_spearman_task4(infection_times_median, clustering_coefficient_net, degree_net, strength_net,\n betweenness_centrality_net, n_nodes):\n # ordered list of values, the index represent the node\n infection_times_median_list = []\n clustering_coefficient_net_list = []\n degree_net_list = []\n strength_net_list = []\n betweenness_centrality_net_list = []\n\n for i in range(n_nodes):\n infection_times_median_list.append(infection_times_median[str(i)])\n clustering_coefficient_net_list.append(clustering_coefficient_net[str(i)])\n degree_net_list.append(degree_net[str(i)])\n strength_net_list.append(strength_net[str(i)])\n betweenness_centrality_net_list.append(betweenness_centrality_net[str(i)])\n\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(clustering_coefficient_net_list, infection_times_median_list, alpha=0.5)\n plt.suptitle(r'Median infection times as a function of the unweighted clustering coefficient')\n ax.set_xlabel(r'clustering coefficient $c$')\n ax.set_ylabel(r'median infection time')\n fig.set_figwidth(6.7)\n fig.savefig(\"./plots/t4_clustering_coefficient.pdf\")\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(degree_net_list, infection_times_median_list, alpha=0.5)\n plt.suptitle(r'Median infection times as a function of the degree')\n ax.set_xlabel(r'degree $k$')\n ax.set_ylabel(r'median infection time')\n fig.set_figwidth(6.7)\n fig.savefig(\"./plots/t4_degree_net.pdf\")\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(strength_net_list, infection_times_median_list, alpha=0.5)\n plt.suptitle(r'Median infection times as a function of the strength')\n ax.set_xlabel(r'strength $s$')\n ax.set_ylabel(r'median infection time')\n fig.set_figwidth(6.7)\n fig.savefig(\"./plots/t4_strength_net.pdf\")\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(betweenness_centrality_net_list, infection_times_median_list, alpha=0.5)\n plt.suptitle(r'Median infection times as a function of the unweighted betweenness centrality')\n ax.set_xlabel(r'betweenness centrality')\n ax.set_ylabel(r'median infection time')\n fig.set_figwidth(6.7)\n fig.savefig(\"./plots/t4_betweenness_centrality_net.pdf\")\n\n # Spearman rank-correlation coefficient\n print(\"Spearman rank-correlation coefficient between median infection time and: \")\n print(\"- clustering coefficient: \" + str(\n spearmanr(infection_times_median_list, clustering_coefficient_net_list).correlation))\n print(\"- degree: \" + str(spearmanr(infection_times_median_list, degree_net_list).correlation))\n print(\"- strength: \" + str(spearmanr(infection_times_median_list, strength_net_list).correlation))\n print(\"- betweenness centrality: \" + str(\n spearmanr(infection_times_median_list, betweenness_centrality_net_list).correlation))",
"def plot_cluster_distributions(name, dist_arr):\n plt.clf()\n\n # create subplots with 1 row and len(dist_arr) columns\n fig, axs = plt.subplots(1, len(dist_arr), sharey=True)\n fig.suptitle(\"Cluster Distributions\")\n fig.text(0.04, 0.5, 'Relative Cluster Size',\n va='center', rotation='vertical')\n\n for i, vals in enumerate(dist_arr):\n dist, title, xticks = vals[\"dist\"], vals[\"title\"], vals[\"xticks\"]\n\n # go from [0...n] to [1...n+1]\n dist = dist + 1\n\n # set weights so sum of all bins adds to 100\n weights = 100 * np.ones_like(dist) / dist.size\n\n # plot the histogram\n axs[i].hist(dist, weights=weights, edgecolor=\"black\")\n axs[i].set_xticks(xticks)\n axs[i].set_title(title, y=-0.01, pad=-26)\n\n plt.savefig(f\"{name}/cluster_distributions.jpeg\")",
"def plot_priors(params):\n prior_dicts = {'ic' : params['ic_prior'], 'ii' : params['ii_prior']}\n pidxs = (pidx for pidx in onp.arange(1,12))\n f = plt.figure(figsize=(12,8))\n for k in prior_dicts:\n for j in prior_dicts[k]:\n plt.subplot(2,3,next(pidxs));\n data = prior_dicts[k][j]\n if \"log\" in j:\n data = onp.exp(data)\n j_title = j.strip('log')\n else:\n j_title = j\n plt.stem(data)\n plt.title(k + ' ' + j_title)\n return f",
"def plot_collective(xdict, ydict, xprop, yprop, documents):\n x_ion = {\"Mg\": [], \"Ca\": [], \"Zn\": [], \"Li\": [], \"Na\": []}\n y_ion = {\"Mg\": [], \"Ca\": [], \"Zn\": [], \"Li\": [], \"Na\": []}\n for item in documents:\n if item[\"path_id\"][-3:] == \"001\":\n x_ion[item[\"cation_type\"]].append(xdict[item[\"path_id\"]])\n y_ion[item[\"cation_type\"]].append(ydict[item[\"path_id\"]])\n fig = plt.figure(figsize=(6,6), dpi=plotting_dpi)\n ax = fig.add_subplot(111)\n for ion in [\"Mg\", \"Ca\", \"Zn\", \"Li\", \"Na\"]:\n ax.scatter(x_ion[ion], y_ion[ion], s=70, zorder=2, color=color_dict[ion], linewidths=2.5, edgecolors='black',\n label=ion)\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n \n # # Plot lines for fitting, if useful\n # x2 = np.arange(-700, 3300, 1)\n # ax.plot(x2, x2)\n \n # # For setting axis boundaries\n # ax.set_xlim([-700, 3500])\n # ax.set_ylim([0,100])\n \n # Plot display settings\n ax.set_xlabel(xlabel, fontsize=24)\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size':10})\n # plt.legend(loc='best')\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.show()",
"def plot(self, \n\t\t\t voronoi: bool = False):\n\t\tif (voronoi):\n\t\t\tif (len(self.X) == 2):\n\t\t\t\tfrom verticapy.learn.plot import voronoi_plot\n\t\t\t\tquery = \"SELECT GET_MODEL_ATTRIBUTE(USING PARAMETERS model_name = '{}', attr_name = 'centers')\".format(self.name)\n\t\t\t\tself.cursor.execute(query)\n\t\t\t\tclusters = self.cursor.fetchall()\n\t\t\t\tvoronoi_plot(clusters = clusters, columns = self.X)\n\t\t\telse:\n\t\t\t\traise ValueError(\"Voronoi Plots are only available in 2D\")\n\t\telse:\n\t\t\tvdf = vDataFrame(self.input_relation, self.cursor)\n\t\t\tself.predict(vdf, \"kmeans_cluster\")\n\t\t\tif (len(self.X) <= 3):\n\t\t\t\tvdf.scatter(columns = self.X, catcol = \"kmeans_cluster\", max_cardinality = 100, max_nb_points = 10000)\n\t\t\telse:\n\t\t\t\traise ValueError(\"Clustering Plots are only available in 2D or 3D\")",
"def plot_clustered_data(points, c_means, covs, test_name, image_num, gaussians):\n\t#colors = cm.rainbow(np.linspace(0, 1, gaussians))\n\tcolors = ['b', 'g', 'm', 'y', 'c', 'k']\n\n\tax = plt.gca()\n\t#for i in range(points.shape[1]):\n\t\t#plt.plot(points[:, i][0], points[:, i][1], \".\", color=\"r\", zorder=0)\n\tplt.plot(points[0], points[1], \".\", color=\"r\", zorder=0)\n\t\n\tfor i in range(gaussians):\n\t\tplt.plot(c_means[i][0], c_means[i][1], \".\", color=colors[i], zorder=1)\n\n\t\twidth, height, theta = cov_ellipse(points, covs[i], nstd=2)\n\t\tellipse = Ellipse(xy=(c_means[i][0], c_means[i][1]), width=width, \\\n\t\t\t\theight=height, angle=theta, edgecolor=colors[i], fc='None', lw=2,\n\t\t\t\t\t\tzorder=4)\n\n\t\tax.add_patch(ellipse)\n\t\n\tplt.savefig(\"./images/{0}/{1:08d}.png\".format(test_name, image_num))\n\tplt.close()",
"def network_schematic(clusters, primaries, edges):\n # Define the node positions (primaries on the left, secondaries on the right)\n n = len(clusters)\n pos = np.array([[1.-float(i in primaries), i] for i in range(n)])\n\n # Define the node features (label, size, color)\n node_labels = [str(i) for i in range(n)]\n \n logn = np.array([np.log(len(c)) for c in clusters])\n node_sizes = np.interp(logn, (logn.min(), logn.max()), (5, 50))\n \n node_colors = ['#ff7f0e' if i in primaries else '#1f77b4' for i in range(n)]\n\n # Define the nodes as sphere of radius proportional to the log of the cluster voxel content\n graph_data = []\n graph_data.append(go.Scatter(\n x = pos[:,0],\n y = pos[:,1],\n mode = 'markers',\n name = 'clusters',\n marker = dict(\n color = node_colors,\n size = node_sizes,\n ),\n text = node_labels,\n hoverinfo = 'text'\n ))\n\n # Initialize the edges\n edge_vertices = np.concatenate([[pos[i], pos[j], [None, None]] for i, j in zip(edges[0], edges[1])])\n graph_data.append(go.Scatter(x = edge_vertices[:,0], y = edge_vertices[:,1],\n mode = 'lines',\n name = 'edges',\n line = dict(\n color = 'rgba(50, 50, 50, 0.5)',\n width = 1\n ),\n hoverinfo = 'none'\n ))\n\n return graph_data",
"def plot_clusters(ax, component1, component2, df, name):\n\n # Plot input data onto first two components at given axes\n y_colors = sns.color_palette('hls', len(np.unique(df['y'])))\n c_colors = sns.color_palette('hls', len(np.unique(df['c'])))\n sns.scatterplot(x=component1, y=component2, hue='y', palette=y_colors, data=df, legend='full', alpha=0.3, ax=ax[0])\n sns.scatterplot(x=component1, y=component2, hue='c', palette=c_colors, data=df, legend='full', alpha=0.3, ax=ax[1])\n\n # Set titles\n ax[0].set_title('True Clusters represented with {}'.format(component1[:-1].upper()))\n ax[1].set_title('{} Clusters represented with {}'.format(name.upper(), component1[:-1].upper()))\n\n # Set axes limits\n xlim = 1.1 * np.max(np.abs(df[component1]))\n ylim = 1.1 * np.max(np.abs(df[component2]))\n ax[0].set_xlim(-xlim, xlim)\n ax[0].set_ylim(-ylim, ylim)\n ax[1].set_xlim(-xlim, xlim)\n ax[1].set_ylim(-ylim, ylim)",
"def plot_variation_distn(gene_vars: pd.DataFrame):\n plt.hist(gene_vars.median(axis=1), bins=100, alpha=0.4, label='median')\n plt.hist(gene_vars.mean(axis=1), bins=100, alpha=0.4, label='mean')\n plt.legend()",
"def interactions_plot():\n data = load_data('ints_CC'),load_data('ints_CD')\n fig,ax = plt.subplots()\n plot_mean_std(data_CC,ax,'C-C interactions')\n plot_mean_std(data_CD,ax,'C-D interactions')\n plt.xlabel('cluster size, n')\n plt.legend(loc='best')\n plt.savefig('interactions.pdf')",
"def plot(self):\n\t\tif (2 <= len(self.X) <= 3):\n\t\t\tvDataFrame(self.name, self.cursor).scatter(columns = self.X, catcol = \"dbscan_cluster\", max_cardinality = 100, max_nb_points = 10000)\n\t\telse:\n\t\t\traise ValueError(\"Clustering Plots are only available in 2D or 3D\")",
"def DisplayCentroids(Centroids,outputs,ax,N=1,sections=1):\r\n\r\n SliceValues = np.linspace(float(min(Centroids[:,0])),float(max(Centroids[:,0])),sections+1) # Create boundaries in x for each slice.\r\n idx1 = np.asarray((Centroids[:,0]>=SliceValues[N-1]))*np.asarray((Centroids[:,0]<=SliceValues[N]))\r\n\r\n idx1 = idx1.flatten() \r\n\r\n CentroidSlice = Centroids[idx1,:]\r\n \r\n outputSlice = outputs[idx1,:]\r\n\r\n # Plot Data-------------------------------------------------------------------------------------------------------\r\n ax.scatter(CentroidSlice[:,0],CentroidSlice[:,1],CentroidSlice[:,2],c = [float(N) for N in outputSlice],cmap = 'bwr')\r\n ax.set_zlabel('z')\r\n ax.set_ylabel('y')\r\n ax.set_xlabel('x')",
"def show(self):\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n\n fig = plt.figure()\n ax = Axes3D(fig)\n pos = self.cluster.get_positions()\n from itertools import combinations\n for tri in self.mesh.simplices:\n for comb in combinations(tri, 2):\n x1 = pos[comb[0], 0]\n x2 = pos[comb[1], 0]\n y1 = pos[comb[0], 1]\n y2 = pos[comb[1], 1]\n z1 = pos[comb[0], 2]\n z2 = pos[comb[1], 2]\n ax.plot([x1, x2], [y1, y2], zs=[z1, z2], color=\"black\")\n plt.show()",
"def pca_visual(X_data, Y_data, dict_CLnames, comp=False, clusters=None,):\n pca = PCA(2) # project from 72 to 2 dimensions\n X_pca = pca.fit_transform(X_data)\n\n #encode class labels into numeric values\n le = preprocessing.LabelEncoder()\n label_encoder = le.fit(Y_data)\n y = label_encoder.transform(Y_data)\n\n Xax=X_pca[:,0] #First Principal Component\n Yax=X_pca[:,1] #Second Principal Component\n labels= y\n cdict={0:'red',1:'green'} #dict with colors\n labl=dict_CLnames\n labl_cl = {0:'cluster 1',1:'cluster 2'}\n if comp == False:\n fig,ax=plt.subplots(figsize=(7,5))\n fig.patch.set_facecolor('white')\n for l in np.unique(labels):\n ix=np.where(labels==l)\n ax.scatter(Xax[ix],Yax[ix],c=cdict[l],s=40, label=labl[l])\n # for loop ends\n plt.xlabel(\"First Principal Component\",fontsize=14)\n plt.ylabel(\"Second Principal Component\",fontsize=14)\n plt.legend()\n plt.show()\n \n if comp == True:\n fig,axs =plt.subplots(nrows=1, ncols=2, figsize=(15,5))\n fig.patch.set_facecolor('white')\n ax = axs[0]\n for l in np.unique(labels):\n ix=np.where(labels==l)\n ax.scatter(Xax[ix],Yax[ix],c=cdict[l],s=40, label=labl[l])\n # for loop ends\n ax.set_xlabel(\"First Principal Component\",fontsize=14)\n ax.set_ylabel(\"Second Principal Component\",fontsize=14)\n ax.set_title('Original data')\n ax.legend()\n\n \n ax = axs[1]\n for l in np.unique(clusters):\n ix=np.where(clusters==l)\n ax.scatter(Xax[ix],Yax[ix],c=cdict[l],s=40, label=labl_cl[l])\n # for loop ends\n ax.set_xlabel(\"First Principal Component\",fontsize=14)\n ax.set_ylabel(\"Second Principal Component\",fontsize=14)\n ax.set_title('Clustered data')\n ax.legend()\n plt.show()",
"def plot_closeness_heatmap(seqids,ali,delimiter=None,rename=None,pout=None,ddout=None,clustering=None,subtypes=None,log=False):\n if clustering==\"clustered\":\n pats,seq_dict=get_clustered_bins(seqids,ali,delimiter=delimiter,rename=rename)\n elif clustering==\"individual\":\n pats,seq_dict=get_individual_bins(seqids,ali,delimiter=delimiter,rename=rename)\n else:\n f=open(clustering,\"r\")\n seq_dict=pickle.load(f)\n pats=seq_dict.keys()\n dfDists, dfCount = get_closeness(pats,seq_dict,log=log)\n if subtypes==None:\n cg = sns.clustermap(dfDists,vmin=0,vmax=1,cmap=\"RdBu_r\",linewidths = 0.30,metric='cityblock')\n else:\n subtypes_pal = sns.color_palette(\"Set1\", n_colors=len(subtypes), desat=.99)\n subtypes_lut = dict(zip(map(str, subtypes), subtypes_pal))\n columnsNames = dfDists.columns.values\n rowsNames = dfDists.index.values\n colscolor=[]\n rowscolor=[]\n for i,name in enumerate(zip(columnsNames,rowsNames)):\n colsubtype=name[0].split(\"-\")[-1]\n rowsubtype=name[1].split(\"-\")[-1]\n try:\n colscolor.append(subtypes_lut[colsubtype])\n rowscolor.append(subtypes_lut[rowsubtype])\n except:\n print subtypes_lut.keys()\n raise KeyError(\"Query sequence title %s doesn't have one of the specified subtypes at the end followed by a '-'. Rename option can be used to add it\"%(name[0]))\n dfcolcolors=pd.DataFrame({'subtype':colscolor},index=columnsNames)\n dfrowcolors=pd.DataFrame({'subtype':rowscolor},index=rowsNames)\n cg = sns.clustermap(dfDists,vmin=0,vmax=1,cmap=\"RdBu_r\",linewidths = 0.30,metric='cityblock',col_colors=dfcolcolors, row_colors=dfrowcolors)\n for label in subtypes:\n cg.ax_col_dendrogram.bar(0, 0, color=subtypes_lut[label],label=label, linewidth=0)\n cg.ax_col_dendrogram.legend(loc=\"best\", bbox_to_anchor=(0, 1.2) ,ncol=1)\n if log:\n if pout!=None:\n cg.savefig(pout+\".log.png\")\n if ddout!=None:\n with open(\"%s.dendro.log.pkl\"%ddout,\"w\") as f:\n pickle.dump(cg,f)\n with open(\"%s.dataframe.log.pkl\"%out,\"w\") as f:\n pickle.dump(dfDists,f)\n idxr=cg.dendrogram_row.reordered_ind\n idxc=cg.dendrogram_col.reordered_ind\n dfDists, dfCount = get_closeness(pats,seq_dict)\n columnsNames = dfDists.columns.values\n rowsNames = dfDists.index.values\n columnsNames = [columnsNames[i] for i in idxc]\n rowsNames = [rowsNames[i] for i in idxr]\n dfDists=dfDists.reindex(columns=columnsNames,index=rowsNames)\n cg = sns.clustermap(dfDists,vmin=0,vmax=1,cmap=\"RdBu_r\",linewidths = 0.30,metric='cityblock',col_colors=dfcolcolors, row_colors=dfrowcolors, row_cluster=False,col_cluster=False)\n else:\n if pout!=None:\n cg.savefig(pout+\".png\")\n if ddout!=None:\n with open(\"%s.dendro.pkl\"%ddout,\"w\") as f:\n pickle.dump(cg,f)\n with open(\"%s.data.pkl\"%ddout,\"w\") as f:\n pickle.dump(dfDists,f)\n return dfDists, dfCount"
] |
[
"0.69917107",
"0.66400284",
"0.6561251",
"0.6510797",
"0.63470906",
"0.5974687",
"0.5937347",
"0.5919538",
"0.59160763",
"0.5881239",
"0.5858356",
"0.58553314",
"0.5830493",
"0.58162296",
"0.5800447",
"0.5793978",
"0.57730776",
"0.57651776",
"0.5757485",
"0.5740799",
"0.5726858",
"0.56854475",
"0.5673409",
"0.5671979",
"0.56717974",
"0.5654767",
"0.5636174",
"0.5629948",
"0.56175315",
"0.5608028"
] |
0.7029017
|
0
|
Plot the relation between two properties/fitness function of a clustering
|
def plot_com_properties_relation(
com_clusters: object,
com_fitness_x: Callable[[object, object, bool], object],
com_fitness_y: Callable[[object, object, bool], object],
**kwargs: dict
) -> object:
if isinstance(com_clusters, cdlib.classes.clustering.Clustering):
com_clusters = [com_clusters]
for_df = []
for c in com_clusters:
x = com_fitness_x(c.graph, c, summary=False)
y = com_fitness_y(c.graph, c, summary=False)
for i, vx in enumerate(x):
for_df.append([c.get_description(), vx, y[i]])
df = pd.DataFrame(
columns=["Method", com_fitness_x.__name__, com_fitness_y.__name__], data=for_df
)
ax = sns.lmplot(
x=com_fitness_x.__name__,
y=com_fitness_y.__name__,
data=df,
hue="Method",
fit_reg=False,
legend=False,
x_bins=100,
**kwargs
)
plt.legend(loc="best")
plt.tight_layout()
return ax
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def plot_com_stat(\n com_clusters: list, com_fitness: Callable[[object, object, bool], object]\n) -> object:\n if isinstance(com_clusters, cdlib.classes.clustering.Clustering):\n com_clusters = [com_clusters]\n\n allVals = []\n allNames = []\n for c in com_clusters:\n prop = com_fitness(c.graph, c, summary=False)\n allVals += prop\n allNames += [c.get_description()] * len(prop)\n\n ax = sns.violinplot(x=allNames, y=allVals, cut=0, saturation=0.5, palette=\"Set3\")\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n\n plt.ylabel(\"%s\" % com_fitness.__name__)\n plt.xlabel(\"Algorithm\")\n plt.tight_layout()\n\n return ax",
"def plot_clusters(self):\n pass",
"def clustering_and_visulization(self):\n try:\n centroids, _ = kmeans(self.data_mat, self.k)\n except ValueError:\n print(\"The number of clusters is more than the data points\")\n self.idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[self.idx == i, 0])\n self.plot_list1.append(self.data_mat[self.idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n\n for i in range(self.k):\n self.cluster = self.data_mat[self.idx == i]\n self.clusterlist.append(self.cluster)\n print(self.clusterlist)\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n index_dict ={}\n for i in self.clusterdict:\n index_dict[i] = []\n for i in range(len(self.data_mat)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n index_dict[j].append(i)\n print(\"drugs cluster dict\", index_dict)\n\n self.drugsdict = {}\n for i in index_dict:\n self.drugsdict[i] = []\n drugslist = list(self.df.columns.values)\n print(\"drugs list from dataframe\", drugslist)\n\n for i in index_dict:\n self.drugsdict[i] = [drugslist[index] for index in index_dict[i]]\n\n print(\"drugs cluster dict\", self.drugsdict)\n########################################################################################################################\n clusterdict_from_df_as_drug_frequency = {}\n clusterdict_from_df_as_drug_non_O_frequency = {}\n\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i] = []\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i].append(self.df.iloc[i].to_dict()) #\n print(\"packs in dict form of drugs frequency\", clusterdict_from_df_as_drug_frequency)\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_frequency[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n for i in range(len(self.df)):\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse(\n [list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n robot_for_packs_dict = {}\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = []\n\n # for i in range(len(self.df)):\n for i in range(len(self.df)):\n for j in clusterdict_of_non_repeated_drugs[i]:\n if j in self.drugsdict[0]:\n robot_for_packs_dict[i].append(0)\n elif j in self.drugsdict[1]:\n robot_for_packs_dict[i].append(1)\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = set(robot_for_packs_dict[i])\n\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = list(more_itertools.collapse(robot_for_packs_dict[i]))\n print('\\n')\n print(\"clusterdict_of_non_repeated_drugs\", robot_for_packs_dict)",
"def showVs(df, feat1, feat2):\n colors = ['blue', 'red', 'green', 'coral']\n for u in range(len(cBouts)):\n plt.plot(f[f['clust_ind'] == u][feat1],\n f[f['clust_ind'] == u][feat2], 'o', color=colors[u],\n alpha=0.6, markeredgecolor='none')\n plt.xlabel(feat1)\n plt.ylabel(feat2)\n plt.show()\n return",
"def plotClusters(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Identification of Cluster Particles with Voronoi Volumes', fontsize=22)\n ax.set_xlabel('x [m]', fontsize=18)\n ax.set_ylabel('y [m]', fontsize=18)\n ax.set_zlabel('z [m]', fontsize=18)\n\n strength = np.linspace(0, 0.8, len(self.unique_labels))\n np.random.shuffle(strength)\n colors = [plt.cm.nipy_spectral(each) for each in strength]\n np.random.shuffle(strength)\n colorsB = [plt.cm.nipy_spectral(each) for each in strength]\n\n for k, col, colB in zip(self.unique_labels, colors, colorsB):\n a = 1\n s = 3\n if k == -1:\n # Black used for noise.\n col = [1, 0, 0]\n a = 0.3\n s = 1\n\n class_member_mask = (self.labels == k)\n xy = self.data[class_member_mask]\n if len(xy) > 0:\n ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], c=np.reshape(np.array(col), (1, -1)),\n edgecolors=np.reshape(np.array(colB), (1, -1)), alpha=a, s=s, label='Cluster ' + str(k))",
"def plot_and_spearman_task4(infection_times_median, clustering_coefficient_net, degree_net, strength_net,\n betweenness_centrality_net, n_nodes):\n # ordered list of values, the index represent the node\n infection_times_median_list = []\n clustering_coefficient_net_list = []\n degree_net_list = []\n strength_net_list = []\n betweenness_centrality_net_list = []\n\n for i in range(n_nodes):\n infection_times_median_list.append(infection_times_median[str(i)])\n clustering_coefficient_net_list.append(clustering_coefficient_net[str(i)])\n degree_net_list.append(degree_net[str(i)])\n strength_net_list.append(strength_net[str(i)])\n betweenness_centrality_net_list.append(betweenness_centrality_net[str(i)])\n\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(clustering_coefficient_net_list, infection_times_median_list, alpha=0.5)\n plt.suptitle(r'Median infection times as a function of the unweighted clustering coefficient')\n ax.set_xlabel(r'clustering coefficient $c$')\n ax.set_ylabel(r'median infection time')\n fig.set_figwidth(6.7)\n fig.savefig(\"./plots/t4_clustering_coefficient.pdf\")\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(degree_net_list, infection_times_median_list, alpha=0.5)\n plt.suptitle(r'Median infection times as a function of the degree')\n ax.set_xlabel(r'degree $k$')\n ax.set_ylabel(r'median infection time')\n fig.set_figwidth(6.7)\n fig.savefig(\"./plots/t4_degree_net.pdf\")\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(strength_net_list, infection_times_median_list, alpha=0.5)\n plt.suptitle(r'Median infection times as a function of the strength')\n ax.set_xlabel(r'strength $s$')\n ax.set_ylabel(r'median infection time')\n fig.set_figwidth(6.7)\n fig.savefig(\"./plots/t4_strength_net.pdf\")\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(betweenness_centrality_net_list, infection_times_median_list, alpha=0.5)\n plt.suptitle(r'Median infection times as a function of the unweighted betweenness centrality')\n ax.set_xlabel(r'betweenness centrality')\n ax.set_ylabel(r'median infection time')\n fig.set_figwidth(6.7)\n fig.savefig(\"./plots/t4_betweenness_centrality_net.pdf\")\n\n # Spearman rank-correlation coefficient\n print(\"Spearman rank-correlation coefficient between median infection time and: \")\n print(\"- clustering coefficient: \" + str(\n spearmanr(infection_times_median_list, clustering_coefficient_net_list).correlation))\n print(\"- degree: \" + str(spearmanr(infection_times_median_list, degree_net_list).correlation))\n print(\"- strength: \" + str(spearmanr(infection_times_median_list, strength_net_list).correlation))\n print(\"- betweenness centrality: \" + str(\n spearmanr(infection_times_median_list, betweenness_centrality_net_list).correlation))",
"def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")",
"def partial_visualize_in_2d(self, cluster_index=[5,12,35,44,64,75,81]):\n for i in cluster_index:\n list_x = []\n list_y = []\n for j in self.cluster[i]:\n list_x.append(self.code[0][j,0])\n list_y.append(self.code[0][j,1])\n plt.scatter(list_x,list_y, label=self.skill[self.present_skill[i]])\n plt.legend()\n plt.show()\n return",
"def plot_cc(graph):\n\tclustering_coeffs = []\n\tfor node in graph.nodes():\n\t\tclustering_coeffs.append(nx.clustering(graph, node))\n\t\n\tplt.axvline(x=np.mean(clustering_coeffs), color='r', linestyle='-')\n\tplt.hist(clustering_coeffs, bins=100)",
"def cluster_plot(self):\r\n train = StandardScaler().fit_transform(self.X)\r\n pca = PCA(n_components=3)\r\n pca_component = pca.fit_transform(self.X)\r\n fig = plt.figure(figsize=(10,8))\r\n sns.set_palette(sns.color_palette(\"cubehelix\", 8))\r\n ax = Axes3D(fig)\r\n ax.scatter(pca_component[:,0].tolist(),pca_component[:,1].tolist(),pca_component[:,2].tolist(),c=self.labels,marker='v')\r\n ax.legend()\r\n plt.show()",
"def hc_analysis(x, samples, feature=\"Pathway\"):\n\n size_label = 18\n colors = {idx:\"gray\" for idx in range(50000)}\n print(\"feature: %s\"%feature)\n\n fig = plt.figure(figsize=(10, 8))\n\n #ylabel\n ax1 = fig.add_axes([0.09,0.1,0.01,0.55])\n Y = linkage(x, method=\"ward\")\n Z1 = dendrogram(Y, orientation=\"left\", link_color_func=lambda k: colors[k], no_plot=True)\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax1.axis(\"off\")\n\n # xlabel\n # Compute and plot the dendrogram.\n ax2 = fig.add_axes([0.1,0.71,0.6,0.1])\n Y = linkage(x.T, method=\"ward\")\n Z2 = dendrogram(Y, link_color_func=lambda k: colors[k])\n ax2.set_xticks([])\n ax2.set_yticks([])\n ax2.axis(\"off\")\n\n # Plot distance matrix.\n axmatrix = fig.add_axes([0.1,0.1,0.6,0.55])\n idx1 = Z1[\"leaves\"]\n idx2 = Z2[\"leaves\"]\n tmp = x[idx1,:]\n tmp = tmp[:,idx2]\n im = axmatrix.matshow(1-tmp, aspect=\"auto\", origin=\"lower\", cmap=plt.cm.get_cmap(\"YlGnBu\"))#cmap=pylab.cm.YlGnBu)#bwr\n\n samples = [samples[idx] for idx in Z2[\"leaves\"]]\n plt.xticks([i+0.0 for i in range(len(samples))], samples, rotation=90)\n\n plt.ylabel(feature, fontsize=size_label)\n axmatrix.yaxis.set_label_position(\"right\")\n axmatrix.xaxis.set_ticks_position(\"bottom\")\n axmatrix.set_yticks([])\n\n # Plot the sample types\n axmatrix = fig.add_axes([0.1,0.66,0.6,0.04])\n\n list_pm = np.zeros((1,44),dtype=float)\n tmp = [(idx+1)%2 for idx in Z2[\"leaves\"]] #1:primary, 0:metastatic\n list_pm[0] = tmp\n\n im = axmatrix.matshow(list_pm, aspect=\"auto\", origin=\"lower\", cmap=plt.cm.get_cmap(\"autumn\"))\n\n for idx in range(44-1):\n axmatrix.plot([0.5+idx, 0.5+idx], [-0.5, 0.5], \"gray\")\n axmatrix.set_xticks([])\n axmatrix.set_yticks([])\n\n plt.show()\n\n #fig.savefig(\"figures/fig10hcpathway.pdf\", bbox_inches=\"tight\")\n #fig.savefig(\"figures/fig11hcgenes.pdf\", bbox_inches=\"tight\")\n\n # Statistical test.\n list_a = Y[:,0]\n list_b = Y[:,1]\n list_c = np.array([idx+x.shape[1] for idx in range(Y.shape[0])])\n\n n_nodes = 2*x.shape[1]-1\n\n G=nx.Graph()\n\n G.add_nodes_from([idx for idx in range(n_nodes)])\n\n edges = [(int(a), int(c)) for a, c in zip(list_a, list_c)]\n G.add_edges_from(edges)\n\n edges = [(int(b), int(c)) for b, c in zip(list_b, list_c)]\n G.add_edges_from(edges)\n\n distance = nx.floyd_warshall(G)\n\n idx_p = [idx for idx in range(44) if idx % 2 == 0]\n idx_m = [idx for idx in range(44) if idx % 2 == 1]\n\n set_idx_p = set(idx_p)\n set_idx_m = set(idx_m)\n\n msd, rmsd = calculate_msd(distance, set_idx_p, set_idx_m, show=True)\n\n list_rand_msd, list_rand_rmsd = [], []\n\n for _ in range(1000):\n list_pm = range(44)\n shuffle(list_pm)\n\n idx_p = list_pm[0:22]\n idx_m = list_pm[22:44]\n\n set_idx_p = set(idx_p)\n set_idx_m = set(idx_m)\n\n rand_msd, rand_rmsd = calculate_msd(distance, set_idx_p, set_idx_m)\n list_rand_msd.append(rand_msd)\n list_rand_rmsd.append(rand_rmsd)\n\n zmsd = (msd-np.mean(list_rand_msd))/np.std(list_rand_msd)\n zrmsd = (rmsd-np.mean(list_rand_rmsd))/np.std(list_rand_rmsd)\n\n #p_values = scipy.stats.norm.sf(16.1004606)\n print(\"Z_MSD=%.2f, Z_rMSD=%.2f\"%(zmsd, zrmsd))",
"def draw(pre, features, poi_, mark_poi=False, f1_name=\"feature 1\", f2_name=\"feature 2\"):\n\n # plot each cluster with a different color--add more colors for\n # drawing more than five clusters\n colors = [\"b\", \"c\", \"k\", \"m\", \"g\"]\n for ii, pp in enumerate(pre):\n plt.scatter(features[ii][0], features[ii][1], color=colors[pre[ii]])\n\n # if you like, place red stars over points that are POIs (just for funsies)\n if mark_poi:\n for ii, pp in enumerate(pre):\n if poi_[ii]:\n plt.scatter(features[ii][0], features[ii][1], color=\"r\", marker=\"*\")\n plt.xlabel(f1_name)\n plt.ylabel(f2_name)\n plt.show()",
"def display_clusters(assign):\n for c in assign:\n plt.plot(c[0], c[1], \"r*\")\n plt.plot(\n [p[0] for p in assign[c]],\n [p[1] for p in assign[c]],\n \"o\"\n )\n plt.show()\n plt.close()",
"def plot_closeness_heatmap(seqids,ali,delimiter=None,rename=None,pout=None,ddout=None,clustering=None,subtypes=None,log=False):\n if clustering==\"clustered\":\n pats,seq_dict=get_clustered_bins(seqids,ali,delimiter=delimiter,rename=rename)\n elif clustering==\"individual\":\n pats,seq_dict=get_individual_bins(seqids,ali,delimiter=delimiter,rename=rename)\n else:\n f=open(clustering,\"r\")\n seq_dict=pickle.load(f)\n pats=seq_dict.keys()\n dfDists, dfCount = get_closeness(pats,seq_dict,log=log)\n if subtypes==None:\n cg = sns.clustermap(dfDists,vmin=0,vmax=1,cmap=\"RdBu_r\",linewidths = 0.30,metric='cityblock')\n else:\n subtypes_pal = sns.color_palette(\"Set1\", n_colors=len(subtypes), desat=.99)\n subtypes_lut = dict(zip(map(str, subtypes), subtypes_pal))\n columnsNames = dfDists.columns.values\n rowsNames = dfDists.index.values\n colscolor=[]\n rowscolor=[]\n for i,name in enumerate(zip(columnsNames,rowsNames)):\n colsubtype=name[0].split(\"-\")[-1]\n rowsubtype=name[1].split(\"-\")[-1]\n try:\n colscolor.append(subtypes_lut[colsubtype])\n rowscolor.append(subtypes_lut[rowsubtype])\n except:\n print subtypes_lut.keys()\n raise KeyError(\"Query sequence title %s doesn't have one of the specified subtypes at the end followed by a '-'. Rename option can be used to add it\"%(name[0]))\n dfcolcolors=pd.DataFrame({'subtype':colscolor},index=columnsNames)\n dfrowcolors=pd.DataFrame({'subtype':rowscolor},index=rowsNames)\n cg = sns.clustermap(dfDists,vmin=0,vmax=1,cmap=\"RdBu_r\",linewidths = 0.30,metric='cityblock',col_colors=dfcolcolors, row_colors=dfrowcolors)\n for label in subtypes:\n cg.ax_col_dendrogram.bar(0, 0, color=subtypes_lut[label],label=label, linewidth=0)\n cg.ax_col_dendrogram.legend(loc=\"best\", bbox_to_anchor=(0, 1.2) ,ncol=1)\n if log:\n if pout!=None:\n cg.savefig(pout+\".log.png\")\n if ddout!=None:\n with open(\"%s.dendro.log.pkl\"%ddout,\"w\") as f:\n pickle.dump(cg,f)\n with open(\"%s.dataframe.log.pkl\"%out,\"w\") as f:\n pickle.dump(dfDists,f)\n idxr=cg.dendrogram_row.reordered_ind\n idxc=cg.dendrogram_col.reordered_ind\n dfDists, dfCount = get_closeness(pats,seq_dict)\n columnsNames = dfDists.columns.values\n rowsNames = dfDists.index.values\n columnsNames = [columnsNames[i] for i in idxc]\n rowsNames = [rowsNames[i] for i in idxr]\n dfDists=dfDists.reindex(columns=columnsNames,index=rowsNames)\n cg = sns.clustermap(dfDists,vmin=0,vmax=1,cmap=\"RdBu_r\",linewidths = 0.30,metric='cityblock',col_colors=dfcolcolors, row_colors=dfrowcolors, row_cluster=False,col_cluster=False)\n else:\n if pout!=None:\n cg.savefig(pout+\".png\")\n if ddout!=None:\n with open(\"%s.dendro.pkl\"%ddout,\"w\") as f:\n pickle.dump(cg,f)\n with open(\"%s.data.pkl\"%ddout,\"w\") as f:\n pickle.dump(dfDists,f)\n return dfDists, dfCount",
"def plot_clusters(ax, component1, component2, df, name):\n\n # Plot input data onto first two components at given axes\n y_colors = sns.color_palette('hls', len(np.unique(df['y'])))\n c_colors = sns.color_palette('hls', len(np.unique(df['c'])))\n sns.scatterplot(x=component1, y=component2, hue='y', palette=y_colors, data=df, legend='full', alpha=0.3, ax=ax[0])\n sns.scatterplot(x=component1, y=component2, hue='c', palette=c_colors, data=df, legend='full', alpha=0.3, ax=ax[1])\n\n # Set titles\n ax[0].set_title('True Clusters represented with {}'.format(component1[:-1].upper()))\n ax[1].set_title('{} Clusters represented with {}'.format(name.upper(), component1[:-1].upper()))\n\n # Set axes limits\n xlim = 1.1 * np.max(np.abs(df[component1]))\n ylim = 1.1 * np.max(np.abs(df[component2]))\n ax[0].set_xlim(-xlim, xlim)\n ax[0].set_ylim(-ylim, ylim)\n ax[1].set_xlim(-xlim, xlim)\n ax[1].set_ylim(-ylim, ylim)",
"def plotHClustProportions(figh, Z, resDf, alpha_col='pvalue', alpha=0.05, colors=None, ann='N', xLim=None, maxY=None, min_count=20):\n\n nCategories = len(resDf['observed'].iloc[0])\n if colors is None:\n colors = sns.color_palette('Set1', n_colors=nCategories)\n labels = resDf['labels'].iloc[0]\n \n dend = sch.dendrogram(Z, no_plot=True,\n color_threshold=None,\n link_color_func=lambda lid: hex(lid),\n above_threshold_color='FFFFF')\n figh.clf()\n axh = plt.axes((0.05, 0.07, 0.8, 0.8), facecolor='w')\n\n lowestY = None\n annotateCount = 0\n for xx, yy, hex_cid in zip(dend['icoord'], dend['dcoord'], dend['color_list']):\n cid = int(hex_cid, 16)\n xx = np.array(xx) / 10\n axh.plot(xx, yy, zorder=1, lw=0.5, color='k', alpha=1)\n\n N = np.sum(resDf.loc[cid, 'observed'])\n if alpha is None or resDf.loc[cid, alpha_col] <= alpha and N > min_count:\n obs = np.asarray(resDf.loc[cid, 'observed_prop'])\n obs = obs / np.sum(obs)\n L = (xx[2] - xx[1])\n xvec = L * np.concatenate(([0.], obs, [1.]))\n curX = xx[1]\n for i in range(len(obs)):\n c = colors[i]\n axh.plot([curX, curX + L*obs[i]],\n yy[1:3],\n color=c,\n lw=10,\n solid_capstyle='butt')\n curX += L*obs[i]\n if ann == 'N':\n s = '%1.0f' % N\n elif ann == 'CID':\n s = cid\n elif ann == 'alpha':\n if resDf.loc[cid, alpha_col] < 0.001:\n s = '< 0.001'\n else:\n s = '%1.3f' % resDf.loc[cid, alpha_col]\n if not ann == '':# and annotateCount < annC:\n xy = (xx[1] + L/2, yy[1])\n # print(s,np.round(xy[0]), np.round(xy[1]))\n annotateCount += 1\n axh.annotate(s,\n xy=xy,\n size='x-small',\n horizontalalignment='center',\n verticalalignment='center')\n if lowestY is None or yy[1] < lowestY:\n lowestY = yy[1]\n yl = axh.get_ylim()\n if not lowestY is None:\n yl0 = 0.9*lowestY\n else:\n yl0 = yl[0]\n if not maxY is None:\n yl1 = maxY\n else:\n yl1 = yl[1]\n axh.set_ylim((yl0, yl1))\n \n axh.set_yticks(())\n if not xLim is None:\n if xLim[1] is None:\n xl1 = axh.get_xlim()[1]\n xLim = (xLim[0], xl1)\n axh.set_xlim(xLim)\n else:\n xLim = axh.get_xlim()\n\n xt = [x for x in range(0, Z.shape[0]) if x <= xLim[1] and x>= xLim[0]]\n xt = xt[::len(xt) // 10]\n # xtl = [x//10 for x in xt]\n axh.set_xticks(xt)\n # axh.set_xticklabels(xtl)\n legh = axh.legend([plt.Rectangle((0,0), 1, 1, color=c) for c in colors],\n labels,\n loc='upper left', bbox_to_anchor=(1, 1))",
"def Draw(pred, features, poi, mark_poi=False, name=\"image.png\", f1_name=\"feature 1\", f2_name=\"feature 2\"):\n\n ### plot each cluster with a different color--add more colors for\n ### drawing more than five clusters\n colors = [\"b\", \"c\", \"k\", \"m\", \"g\"]\n for ii, pp in enumerate(pred):\n plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])\n\n ### if you like, place red stars over points that are POIs (just for funsies)\n if mark_poi:\n for ii, pp in enumerate(pred):\n if poi[ii]:\n plt.scatter(features[ii][0], features[ii][1], color=\"r\", marker=\"*\")\n plt.xlabel(f1_name)\n plt.ylabel(f2_name)\n plt.savefig(name)\n plt.show()",
"def Draw(pred, features, poi, mark_poi=False, name=\"image.png\", f1_name=\"feature 1\", f2_name=\"feature 2\"):\n\n ### plot each cluster with a different color--add more colors for\n ### drawing more than five clusters\n colors = [\"b\", \"c\", \"k\", \"m\", \"g\"]\n for ii, pp in enumerate(pred):\n plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])\n\n ### if you like, place red stars over points that are POIs (just for funsies)\n if mark_poi:\n for ii, pp in enumerate(pred):\n if poi[ii]:\n plt.scatter(features[ii][0], features[ii][1], color=\"r\", marker=\"*\")\n plt.xlabel(f1_name)\n plt.ylabel(f2_name)\n plt.savefig(name)\n plt.show()",
"def Draw(pred, features, poi, mark_poi=False, name=\"image.png\", f1_name=\"feature 1\", f2_name=\"feature 2\"):\n\n ### plot each cluster with a different color--add more colors for\n ### drawing more than five clusters\n colors = [\"b\", \"c\", \"k\", \"m\", \"g\"]\n for ii, pp in enumerate(pred):\n plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])\n\n ### if you like, place red stars over points that are POIs (just for funsies)\n if mark_poi:\n for ii, pp in enumerate(pred):\n if poi[ii]:\n plt.scatter(features[ii][0], features[ii][1], color=\"r\", marker=\"*\")\n plt.xlabel(f1_name)\n plt.ylabel(f2_name)\n plt.savefig(name)\n plt.show()",
"def visualize(vis, features, label):\n if vis == 'PCA':\n #n_components = st.sidebar.slider(\"n_components\", 2, 10)\n #alpha = st.sidebar.slider(\"alpha\", 0.8, 2.0)\n #pca = PCA(n_components)\n pca = PCA(2)\n\n X_projected = pca.fit_transform(features)\n \n x1 = X_projected[:, 0]\n x2 = X_projected[:, 1]\n\n\n fig = plt.figure()\n plt.scatter(x1, x2, c=label, alpha=0.8, cmap='viridis')\n plt.xlabel(\"Principal Component 1\")\n plt.ylabel(\"Principal Component 2\")\n plt.colorbar()\n\n st.pyplot()",
"def plot(self):\n cs = plt.contour(self.X, self.Y, self.fitness_function)\n plt.clabel(cs, inline=1, fontsize=6)\n plt.imshow(self.fitness_function, extent=self.limits, origin=\"lower\", alpha=0.3)",
"def Draw(pred, features, poi, mark_poi=False, name=\"image.png\", f1_name=\"feature 1\", f2_name=\"feature 2\"):\n\n ### plot each cluster with a different color--add more colors for\n ### drawing more than five clusters\n colors = [\"b\", \"c\", \"k\", \"m\", \"g\"]\n for ii, pp in enumerate(pred):\n plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])\n\n ### if you like, place red stars over points that are POIs (just for funsies)\n if mark_poi:\n for ii, pp in enumerate(pred):\n if poi[ii]:\n plt.scatter(features[ii][0], features[ii][1], color=\"r\", marker=\"*\")\n plt.xlabel(f1_name)\n plt.ylabel(f2_name)\n plt.savefig(name)\n #plt.show()\n #EG: Save to file instead of displaying it.\n plt.savefig('./theFile1.png')",
"def visualize(self):\n\t\tplt.figure(1)\n\t\tax1 = plt.add_suplot(1,2,1)\n\t\t# Plot free energy error\n\t\tax1.plot(self.FE_errors_GMM_CV_, linewidth=4, label='GMM with cross-validation')\n\t\tax1.plot(self.FE_errors_GMM_mix_models_, linewidth=4, label='GMM with mixture of models')\n\t\tplt.legend()\n\n\t\t# Plot density error\n\n\t\t# Plot log-likelihood of test set\n\n\t\t# Plot clustering score\n\n\t\tplt.show()\n\n\t\treturn",
"def Draw(pred, features, poi, mark_poi=False, name=\"image.png\", f1_name=\"feature 1\", f2_name=\"feature 2\"):\r\n\r\n ### plot each cluster with a different color--add more colors for\r\n ### drawing more than five clusters\r\n colors = [\"b\", \"c\", \"k\", \"m\", \"g\"]\r\n for ii, pp in enumerate(pred):\r\n plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])\r\n\r\n ### if you like, place red stars over points that are POIs (just for funsies)\r\n if mark_poi:\r\n for ii, pp in enumerate(pred):\r\n if poi[ii]:\r\n plt.scatter(features[ii][0], features[ii][1], color=\"r\", marker=\"*\")\r\n plt.xlabel(f1_name)\r\n plt.ylabel(f2_name)\r\n plt.savefig(name)\r\n plt.show()",
"def plot_clusters(cluster_1, cluster_2):\r\n plt.figure(figsize=(14, 7))\r\n plt.bar([i - 0.1 for i in cluster_1.keys()], cluster_1.values(), width=0.2, align='center', color='b',\r\n label='German Population')\r\n plt.bar([i + 0.1 for i in cluster_2.keys()], cluster_2.values(), width=0.2, align='center', color='g',\r\n label='Customer Population')\r\n plt.title('German Population versus Customers')\r\n plt.xlabel('Cluster No.')\r\n plt.ylabel('Cluster %')\r\n plt.xticks(range(1, len(cluster_1) + 1))\r\n plt.legend()\r\n plt.savefig('cluster_map.png')\r\n plt.show()\r\n\r\n return",
"def plot_learning(self):\n plt.plot([i for i in range(len(self.fitness_list))], self.fitness_list)\n plt.ylabel(\"Fitness\")\n plt.xlabel(\"Iteration\")\n plt.show()",
"def exercise_4(self):\n student_data = self.student_data\n # Change the legend order in the scatter plot\n sns.scatterplot(x=\"absences\", y=\"G3\", \n data=student_data, \n hue=\"location\",\n hue_order = [\"Rural\"\n ,\"Urban\"])\n\n # Show plot\n plt.show()",
"def plot(self):\n\t\tif (2 <= len(self.X) <= 3):\n\t\t\tvDataFrame(self.name, self.cursor).scatter(columns = self.X, catcol = \"dbscan_cluster\", max_cardinality = 100, max_nb_points = 10000)\n\t\telse:\n\t\t\traise ValueError(\"Clustering Plots are only available in 2D or 3D\")",
"def plot_MDS():\n lds = {} #lds is a dictionary of dictionaries: {\"slovenian.txt\": {\"abc\":3,\"efg\":4...}, \"macedonian.txt\":{\"abc\":5,\"efg\":6...},...}\n for fn in listdir(\"clustering\"):\n if fn.lower().endswith(\".txt\"):\n with open(join(\"clustering\", fn), encoding=\"utf8\") as f:\n text = f.read()\n nter = terke(text, n=3)\n lds[fn] = nter\n \n distances={} #a dictionary of dictionaries that saves the distances between a language and all other languages\n \n for x in lds.keys():\n distances[x]={}\n for y in lds.keys():\n if x == y: distances[x][y]=0.0\n else: distances[x][y]=cosine_dist(lds[x],lds[y])\n\n dst=np.zeros([len(lds.keys()), len(lds.keys())])\n i=0\n j=0\n for x in lds.keys():\n j=0\n for y in lds.keys():\n dst[i,j]=distances[x][y]\n j+=1\n i+=1\n\n X, languages = prepare_data_matrix()\n\n transformer = MDS(n_components=2, dissimilarity='precomputed')\n transformed = transformer.fit_transform(dst)\n\n plt.scatter(transformed [:,0], transformed [:,1])\n for i in range(len(transformed)):\n plt.text(transformed[i,0], transformed[i,1], languages[i][:3])\n plt.show()",
"def plot(self, p: int):\n self.compute_clusters(p)\n self.plot_clusters()"
] |
[
"0.6681499",
"0.6602194",
"0.6204443",
"0.6165348",
"0.61467725",
"0.609327",
"0.60596406",
"0.60276634",
"0.60208607",
"0.6020466",
"0.60039204",
"0.591291",
"0.59104687",
"0.5889127",
"0.587141",
"0.5845387",
"0.5842687",
"0.5842687",
"0.5842687",
"0.5832414",
"0.5796505",
"0.5795001",
"0.5790656",
"0.57870543",
"0.577803",
"0.5754649",
"0.57355213",
"0.57236403",
"0.57170784",
"0.5672356"
] |
0.6935457
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.