query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Extracts the body of a specified `size` from `buffer`.
def _extract_body(buffer, size): # We account for the message start and command code bytes, hence the +2. if len(buffer) < size + 2: return None, size + 2 - len(buffer) body = buffer[2:size + 2] buffer[:size + 2] = [] return body, 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __read_block(self, size):\n buf = b\"\"\n if len(self.__read_buffer):\n limit = (\n size if size <= len(self.__read_buffer) else\n len(self.__read_buffer)\n )\n buf = self.__read_buffer[:limit]\n self.__read_buffer = self.__read_buffer[limit:]\n size -= limit\n if not size:\n return buf\n try:\n buf += self.sock.recv(size)\n except (socket.timeout, ssl.SSLError):\n raise Error(\"Failed to read %d bytes from the server\" % size)\n self.__dprint(buf)\n return buf", "def read(self, size=-1):\n\n if size < 0:\n raise NotImplementedError(\"Don't be greedy, that could be massive!\")\n elif size == 0:\n if self._text:\n return \"\"\n else:\n return b\"\"\n elif self._within_block_offset + size <= len(self._buffer):\n # This may leave us right at the end of a block\n # (lazy loading, don't load the next block unless we have too)\n data = self._buffer[self._within_block_offset:self._within_block_offset + size]\n self._within_block_offset += size\n assert data # Must be at least 1 byte\n return data\n else:\n # if read data overflows to next block\n # pull in rest of data in current block\n data = self._buffer[self._within_block_offset:]\n\n # decrement size so that we only pull the rest of the data\n # from next block\n size -= len(data)\n self._load_block() # will reset offsets\n\n if not self._buffer:\n return data # EOF\n\n # if there is still more to read\n elif size:\n # pull rest of data from next block\n return data + self.read(size)\n else:\n # Only needed the end of the last block\n return data", "def read(self, size=-1):\n if not self._buf:\n self._buf.append(next(self._iter, b''))\n if len(self._buf[0]) < size or size < 0:\n return self._buf.pop(0)\n block = self._buf.pop(0)\n self._buf.insert(0, block[size:])\n return block[:size]", "def read_bytes(self, size):\n return self.read('bytes:'+str(size))", "def read(self, size=None):\n if size is None:\n size = self._size\n content = self._buffer()\n if not content:\n yield ''\n else:\n running = True\n while running:\n finished = False\n while len(content) < size and not finished:\n tmp_content = self._buffer()\n if not tmp_content:\n finished = True\n else:\n content = tmp_content + content\n if finished:\n yield content\n running = False\n else:\n rest = content[0:-size]\n content = content[-size:]\n yield content\n content = rest", "def read(self, size: int) -> bytes:\n\n return self.r.read(size)", "async def read_chunk(self, size: int = ...) -> bytes:\n ...", "def iter_content(response: requests.Response, size):\n buffer = bytearray(size)\n num = 0\n try:\n while num < size:\n content = next(response.iter_content(size - num))\n buffer[num:num + len(content)] = content\n num += len(content)\n except StopIteration:\n raise RuntimeError('Content not long enough')\n return buffer", "def read_until_size(self, size):\n if not size:\n do_return(b'')\n with self.reading:\n while len(self.read_buffer) < size:\n self.read_buffer.enqueue((yield self.base.read(self.bufsize)))\n do_return(self.read_buffer.dequeue(size))", "async def read_body(receive):\n body = b''\n more_body = True\n while more_body:\n message = await receive()\n body += message.get('body', b'')\n more_body = message.get('more_body', False)\n return body", "def read(self, size: int = None) -> bytes:\n if size is None:\n buffer = io.BytesIO()\n copy_stream(self, buffer)\n return buffer.getvalue()\n else:\n return self._read_chunked(size)", "def read(self, size: int = -1) -> bytes:\n if self.size_read >= self.chunksize:\n return b''\n if size < 0:\n size = self.chunksize - self.size_read\n if size > self.chunksize - self.size_read:\n size = self.chunksize - self.size_read\n data = self.file.read(size)\n self.size_read = self.size_read + len(data)\n if self.size_read == self.chunksize and (self.chunksize & 1):\n dummy = self.file.read(1)\n self.size_read = self.size_read + len(dummy)\n return data", "def read(self, size=-1):\n _complain_ifclosed(self._closed)\n buf = self._buf\n while size < 0 or len(buf) < size:\n try:\n buf = buf + next(self._generator)\n except StopIteration:\n break\n\n returned = b\"\"\n if size >= 1:\n self._buf = buf[size:]\n returned = buf[:size]\n else:\n self._buf = b\"\"\n returned = buf\n\n self._position = self._position + len(returned)\n return returned", "def chunker(seq, size):\n\n return (seq[pos : pos + size] for pos in range(0, len(seq), size))", "def peek(self, size, timeout=_UNSET):\n with self._recv_lock:\n if len(self.rbuf) >= size:\n return self.rbuf[:size]\n data = self.recv_size(size, timeout=timeout)\n self.rbuf = data + self.rbuf\n return data", "def read(self, size: int=-1) -> bytes:\n ...", "def read(self, size: int=-1) -> bytes:\n ...", "def read(self, size=None):\n if not size:\n rlen = self.end - self.pos\n else:\n rlen = size\n if rlen == 0:\n return '' # Fake EOF\n # Clamp rlen to window end\n if self.pos + rlen > self.end:\n rlen = self.end - self.pos\n data = self.file.read(rlen)\n self.pos += len(data)\n return data", "def readBody(self, file, fileLength):\n self.body=b\"\"\n read = 0\n progress_bar(read, fileLength)\n while 1:\n try:\n data = file.read(fileLength)\n except:\n raise Exception(\"Error reading a line in the body\")\n # check if the socket is empty\n if data is None:\n raise Exception(\"socket is empty\")\n self.body += data\n fileLength -= len(data)\n progress_bar(read, fileLength)\n sys.stdout.write(\"\\n\")\n if num_read >= size:\n break", "def read(self, size=None):\n if size is None or size < 0:\n return \"\".join(list(self))\n else:\n data_chunks = []\n data_readed = 0\n try:\n while data_readed < size:\n chunk = self.next_chunk()\n data_chunks.append(chunk)\n data_readed += len(chunk)\n except StopIteration:\n pass\n\n if data_readed > size:\n last_chunk = data_chunks.pop()\n extra_length = data_readed - size\n last_chunk, extra_data = last_chunk[:-extra_length], last_chunk[-extra_length:]\n self.unshift(extra_data)\n data_chunks.append(last_chunk)\n return \"\".join(data_chunks)", "def _unpack(self, _format: str, size: int) -> Any:\n return unpack(_format, self.read(size))[0]", "def recv_size(s, size):\n print 'Receive data in fixed size mode'\n reply = s.recv(size)\n print reply", "def _chunker(self, seq, size):\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))", "def decode(cls, buffer):\n\n if len(buffer) < struct.calcsize(b\"<i\"):\n raise IncompleteMessageError\n size = struct.unpack(b\"<i\", buffer[:4])[0]\n if len(buffer) - struct.calcsize(b\"<i\") < size:\n raise IncompleteMessageError\n packet = buffer[:size + 4]\n buffer = buffer[size + 4:]\n id = struct.unpack(b\"<i\", packet[4:8])[0]\n type = struct.unpack(b\"<i\", packet[8:12])[0]\n body = packet[12:][:-2].decode(\"ascii\")\n return cls(id, type, body), buffer", "def _read_source(self, size: int) -> bytes:\n raise NotImplementedError() # pragma: no cover", "def extract_body(message_dict):\n tagged_parts_list = message_dict[\"structured_text\"][\"text\"]\n body = \"\"\n for part_tag_dict in tagged_parts_list:\n part = part_tag_dict[\"part\"]\n tag = part_tag_dict[\"tags\"]\n if tag == \"BODY\":\n body += part + \" \"\n elif tag == \"GREETINGS\":\n break\n\n return body", "def _read_chunk_body(self, data):\n resp = self.current_response\n\n if resp._decompressor:\n resp.body += resp._decompressor.decompress(data[:-2])\n else:\n resp.body += data[:-2]\n\n self._stream.on_read = self._read_chunk_head\n self._stream.read_delimiter = CRLF", "def read(self, size=-1):\n ...", "def read(self, address, size):\n raise NotImplementedError", "def __extract_file(cls, stream, path, size):\n\n block_size = 0x4000\n\n with open(path, 'wb') as f:\n while size > 0:\n if block_size > size:\n block_size = size\n block = stream.read(block_size)\n if len(block) != block_size:\n raise Ai1wmError('error extracting a file: {}, error: bad file size'.format(path))\n f.write(block)\n size -= len(block)" ]
[ "0.6191367", "0.60895145", "0.5873173", "0.5772755", "0.57718474", "0.5739988", "0.5719125", "0.5698225", "0.56644", "0.5449429", "0.54320264", "0.5406147", "0.5332291", "0.5266665", "0.5217389", "0.51856273", "0.51856273", "0.5151981", "0.5148639", "0.5140132", "0.5135234", "0.5128295", "0.5124472", "0.5097763", "0.5086183", "0.50670147", "0.50503665", "0.49669516", "0.49509734", "0.49505615" ]
0.7962518
0
updates the treeview and fills it with all records in the transactions table
def update_table(self): self.cursor.execute("""SELECT * FROM transactions""") result = self.cursor.fetchall() self.tree.delete(*self.tree.get_children()) for item in result: self.tree.insert('', 'end', text=item[0], values=item[1:])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_treeview(self):\n for i in self.user_inventory.get_children():\n self.user_inventory.delete(i)\n \n #expired:\n self.df_expired = self.df_user.loc[self.df_user[\"expiration (days)\"] <= self.today]\n self.df_expired_rows = self.df_expired.to_numpy().tolist()\n for row in self.df_expired_rows:\n self.user_inventory.insert(\"\", \"end\", values=row, tags=(\"expired\", ))\n\n #notified:\n self.df_noti = self.df_user.loc[self.df_user[\"notify (days)\"] <= self.today]\n self.df_notify = pd.concat([self.df_noti, self.df_expired]).drop_duplicates(keep=False)\n self.df_expired_rows = self.df_expired.to_numpy().tolist()\n for row in self.df_expired_rows:\n self.user_inventory.insert(\"\", \"end\", values=row, tags=(\"notified\", ))\n\n #rest of the items:\n self.df_rest_of_items = self.df_user.loc[self.df_user[\"notify (days)\"] > self.today]\n self.df_user_rows = self.df_rest_of_items.to_numpy().tolist()\n for row in self.df_user_rows:\n self.user_inventory.insert(\"\", \"end\", values=row, tags=(\"others\", ))", "def refreshTree(self):\n\n # Gets default rows\n rows = getsAllClients()\n\n # Puts and displays rows in tree\n self.displayTreeRows(rows)", "def updateTree(self):\n self.reset()\n self.resetTree() \n self.read()", "def sync_tree_with_data(self, tree: QTreeView, data: List[DataNode]) -> None:\n tree.setModel(self.create_model_from_nodes(data))\n tree.expandAll()", "def show_pay_tree(self):\n self.pay_tree.delete(*self.pay_tree.get_children())\n ret = Bill()\n data = ret.view_bill()\n for i in data:\n self.pay_tree.insert(\"\", \"end\", text=i[0], values=i)", "def refresh():\n global tree\n tree = build_tree()\n tree.order_by_create()\n return index()", "def sync_tree_db(self) -> None:\n self.sync_tree_with_data(self.tree_db, self.data_db)", "def updatetree(self):\n if self.node:\n self.node.update()\n self.draw()", "def update_all_data(self):\n self.dataChanged.emit(qtc.QModelIndex(), qtc.QModelIndex())", "def update_treeview(self, search=False):\n if not search:\n self.vimiv.commandline.search_positions = []\n # Remove old columns\n for column in self.treeview.get_columns():\n self.treeview.remove_column(column)\n # Tree View\n current_file_filter = self.file_filter_create(self.datalist_create())\n self.treeview.set_model(current_file_filter)\n # Add the columns\n for i, name in enumerate([\"Num\", \"Name\", \"Size\", \"M\"]):\n renderer = Gtk.CellRendererText()\n column = Gtk.TreeViewColumn(name, renderer, markup=i)\n if name == \"Name\":\n column.set_expand(True)\n column.set_max_width(20)\n self.treeview.append_column(column)", "def iniciarTreeView(self):\n self.verDatos.configure(height=10, columns=3)\n self.verDatos[\"columns\"] = (\"idbase\",\"titulo\", \"descripcion\")\n self.verDatos.column(\"#0\", width=80, minwidth=20, anchor=E)\n self.verDatos.column(\"idbase\", width=60, minwidth=20, anchor=W)\n self.verDatos.column(\"titulo\", width=150, minwidth=150, anchor=W)\n self.verDatos.column(\"descripcion\", width=150, minwidth=150, anchor=W)\n self.verDatos.heading(\"#0\", text=\"index\", anchor=CENTER)\n self.verDatos.heading(\"idbase\", text=\"id\", anchor=CENTER)\n self.verDatos.heading(\"titulo\", text=\"Título\", anchor=CENTER)\n self.verDatos.heading(\"descripcion\", text=\"Descripción\", anchor=CENTER)\n self.verDatos.grid(column=0, row=4, columnspan=3, rowspan=2, padx=20, pady=15)\n self.verDatos.bind(\"<<TreeviewSelect>>\", self.selectTree)", "def refresh_tree(self):\n self.process_tree = ProcessNode.objects.get(id=self.process_tree_id)", "def sync():\n\n DFS.update(get_data_without_transactions())\n DFS[c.dfs.TRANS] = get_df_transactions()\n\n YML.update(get_config())", "def update_table_view(self):\n itemlist = [os.path.abspath(\n self.model.filePath(\n self.model.index(selection.row(),\n 0,\n selection.parent()\n )\n )\n )\n for selection in self.treeView.selectedIndexes()]\n self.listWidget.clear()\n self.listWidget.addItems(itemlist)\n\n nitemlist = []\n fileops = FileOperations()\n if not self.ckbxTrimDir.isChecked():\n flattencount = 0\n else:\n flattencount = self.trimdirCount.value()\n if self.lblDestPath.isEnabled():\n self.previewView.clear()\n for item in itemlist:\n nitemlist.append(fileops.get_dest_filepath(item, self.lblDestPath.text(), flattencount))\n self.previewView.addItems(nitemlist)\n else:\n self.previewView.clear()\n self.previewView.addItems(['No destination folder selected'])\n\n self.resize_tree_column()", "def _refresh_table(self):\n self._column_selected()\n self._table_selected()\n self._column_selection_change()\n self.refresh_column_list()\n self.refresh_table_list()\n self.refresh_table()", "def _fill_tree(self):\r\n self._tree.delete(*self._tree.get_children())\r\n for point_index, point in enumerate(self._structure.points):\r\n self._tree.insert('', 'end', values=[point_index, round(point[0]), round(point[1])])\r\n if point_index == self._index_of_sel_point:\r\n self._set_selection(point_index)", "def update(request):\n tree_data = request.data\n db_node_objects = Node.objects\n updating_tree_data = read_tree(update_or_create_node, tree_data, db_node_objects)\n db_node_objects = Node.objects\n resulting_tree_data = read_tree(update_deleted, updating_tree_data, db_node_objects)\n return JsonResponse({'tree': resulting_tree_data})", "def fillTreeView(self):\n #Parcours des sections (qui sont des ensembles)\n\n for section in self.getBindings():\n self.__treeB.insert(\"\", END,iid=section, text= section.capitalize(), open=True, tag=\"header\")\n for binding in self.getBindings()[section]:\n bd = self.getBindings()[section][binding]\n self.__listeItemTreeview.append(self.__treeB.insert(section, END,iid=section+binding, text=binding.capitalize(), value=(bd[\"description\"], \"; \".join(bd[\"bindings\"]))))\n\n\n self.__treeB.tag_configure(\"header\", font=\"arial 10 bold\") # à voir si on garde une stylisation comme ça", "def onDatabaseLog(self):\n root1 = Tk()\n root1.title(_('Transaction Root Detail Table'))\n root1.resizable(width=False, height=False)\n DataView(root1)", "def select_table(self):\n\n selected = self.mylist.selection_get()\n data = self.read_table(selected)\n db_frame = self.db_frame\n\n db_frame.pack(side=\"left\", fill=\"both\")\n col_names = tuple((\"heading%d\" % i for i in range(len(data[0]))))\n if not self.Tree:\n self.Tree = Treeview(db_frame, columns=col_names)\n else:\n self.Tree.destroy()\n self.scrollbarY.destroy()\n self.scrollbarX.destroy()\n self.Tree = Treeview(db_frame, columns=col_names)\n self.scrollbarY = Scrollbar(db_frame)\n self.scrollbarX = Scrollbar(db_frame, orient=HORIZONTAL)\n self.Tree.config(yscrollcommand=self.scrollbarY.set,\n xscrollcommand=self.scrollbarX.set)\n\n for x in data:\n self.Tree.insert('', 'end', values=x)\n for col in col_names:\n self.Tree.heading(col, text=col)\n self.scrollbarY.config(command=self.Tree.yview)\n self.scrollbarY.pack(side='right', fill=Y)\n self.scrollbarX.config(command=self.Tree.xview)\n self.scrollbarX.pack(side='bottom', fill=X)\n self.Tree.pack(side='left', fill='both')", "def update(ts):\n tables = ts.dump_tables()\n update_tables(tables)\n return tables.tree_sequence()", "def update(self):\n try:\n\n # 1 --> Get all the NewValidTransaction(s)\n new_valid_txns = self.frame.get(NewValidTransaction)\n\n for txn in new_valid_txns:\n\n # 2 --> Update the BankingRecord corresponding to Customer that initiated it\n self.atm.update_banking_record(txn, self.frame)\n\n # 3 --> Process the Transaction \n self.atm.process_transaction(txn, self.frame)\n\n # ~ Print based on a cycle count (optional functionality)\n self.atm.print_using_base(10, self.frame, Customer)\n #self.atm.print_using_base(10, self.frame, BankingRecord)\n\n except Exception:\n logger.exception(\"Error: \")", "def callUpdateTable(self):\r\n self.updateTable()", "def _do_change_row(self, treeview):\n _return = False\n\n self.treeview.handler_block(self._lst_handler_id[0])\n\n (_model, _row) = treeview.get_selection().get_selected()\n try:\n _level = _model.get_value(_row, 11)\n except TypeError:\n _level = None\n\n _columns = treeview.get_columns()\n\n # Change the column headings depending on what is being selected.\n if _level == 'mission':\n _headings = [\n _(u\"Mission ID\"),\n _(u\"Description\"),\n _(u\"Units\"),\n _(u\"Start Time\"),\n _(u\"End Time\"),\n _(u\"\"),\n _(u\"\"),\n _(u\"\")\n ]\n elif _level == 'phase':\n _headings = [\n _(u\"Phase ID\"),\n _(u\" Code\\t\\tDescription\"),\n _(u\"Units\"),\n _(u\"Start Time\"),\n _(u\"End Time\"),\n _(u\"\"),\n _(u\"\"),\n _(u\"\")\n ]\n elif _level == 'environment':\n _headings = [\n _(u\"Environment ID\"),\n _(u\"Condition\"),\n _(u\"Units\"),\n _(u\"Minimum Value\"),\n _(u\"Maximum Value\"),\n _(u\"Mean Value\"),\n _(u\"Variance\"),\n _(u\"\")\n ]\n else:\n _headings = []\n\n i = 0\n for _heading in _headings:\n _label = gtk.Label()\n _label.set_line_wrap(True)\n _label.set_alignment(xalign=0.5, yalign=0.5)\n _label.set_justify(gtk.JUSTIFY_CENTER)\n _label.set_markup(\"<span weight='bold'>\" + _heading + \"</span>\")\n _label.set_use_markup(True)\n _label.show_all()\n _columns[i].set_widget(_label)\n\n i += 1\n\n self.treeview.handler_unblock(self._lst_handler_id[0])\n\n return _return", "def sync_tree_cache(self) -> None:\n self.sync_tree_with_data(self.tree_cache, self.data_cache)", "def resetTree(self):\n for fila in self.verDatos.get_children():\n self.verDatos.delete(fila)", "def build_tree(self):\n active = self.get_active()\n family = self.dbstate.db.get_family_from_handle(active)\n self.goto_handle(handle=family)", "def update_view(self):\n for row in self.view.obj_list:\n for obj in row:\n obj._update(self.model)", "def selectTree(self, event):\n item = self.verDatos.selection()\n self.idInteger.set(self.verDatos.item(item)['values'][0]) \n self.tituloString.set(self.verDatos.item(item)['values'][1]) \n self.descripcionString.set(self.verDatos.item(item)['values'][2])", "def update_vluln_table():" ]
[ "0.71143794", "0.66827446", "0.6435092", "0.62941474", "0.6287067", "0.6279537", "0.61216444", "0.60320663", "0.59254116", "0.5900165", "0.58603156", "0.5796129", "0.56489253", "0.56352526", "0.55968213", "0.5595423", "0.55919605", "0.55464005", "0.5537613", "0.5521705", "0.5494953", "0.5485696", "0.5470691", "0.54269934", "0.5420541", "0.53408873", "0.5306749", "0.52951396", "0.5290646", "0.52671957" ]
0.8024838
0
Writes to the student/client's STDIN. Client should create a segment and send it to the student/server. Only checks that a segment is sent and contains the data (by checking segment length).
def client_sends(): test_str = "t35t1nG cl13nT 53nd1nG\n" server = start_server() client = start_client() write_to(client, test_str) segments = read_segments_from(client) if not segments: return False # The first segment should be one sent from the client, and should have the # correct length. segment = segments[0] return ( str(segment.source_port) == CLIENT_PORT and segment.length == CTCP_HEADER_LEN + len(test_str) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fin_sent():\n test_str = \"f1N s3nt\\n\"\n server = start_server()\n client = start_client()\n\n # First write some data.\n write_to(client, test_str)\n if not read_segments_from(client):\n return False\n time.sleep(1)\n\n # Write an EOF character.\n write_to(client, '\\x1a')\n client.stdin.close()\n\n # Check to see that segment sent from client is a FIN.\n segments = read_segments_from(client)\n if not segments:\n return False\n return \"FIN\" in segments[0].flags", "def writer(self):\n while self.alive:\n try:\n if controlEvent.isSet() == False:\n self.alive = False\n self.thread_read.join()\n break\n data = self.socket.recv(1024)\n if not data:\n break\n #if self.ser_newline and self.net_newline:\n # do the newline conversion\n # XXX fails for CR+LF in input when it is cut in half at the begin or end of the string\n #data = ser_newline.join(data.split(net_newline))\n # Only send data to serial if it is in active state\n if controlEvent.isSet() == True:\n self.serial.write(data) # get a bunch of bytes and send them\n # the spy shows what's on the serial port, so log it after converting newlines\n if self.spy:\n sys.stdout.write(codecs.escape_encode(data)[0])\n sys.stdout.flush()\n except socket.timeout:\n continue\n except socket.error, msg:\n sys.stderr.write('writer socket.error: %s\\n' % msg)\n # probably got disconnected\n break\n except IOError, msg:\n sys.stderr.write('writer IOError: %s\\n' % msg)\n except Exception, msg:\n sys.stderr.write('writer Other Exception: %s\\n' % msg)\n #self.alive = False", "def write(self):\n\n while self.dowrite:\n data = sys.stdin.readline()\n if (self.algo == \"rsa\"):\n data = self.ras_encrypt(data)\n if (self.algo == \"des\"):\n data = self.des_encrypt(data)\n if (self.algo == \"3des\"):\n data = self.triple_des_encrypt(data)\n if (self.algo == \"aes\"):\n data = self.aes_encrypt(data)\n self.conn.send(data)\n\n if (data.strip() == self.exitcode):\n self.conn.shutdown(socket.SHUT_RDWR)\n self.conn.close()\n self.dowrite = False", "def handle_write(self):\n self.initiate_send()", "def _on_stdin_read(self, data):\n if not self.opts[\"udp\"]:\n self.fire(write(data))\n else:\n self.fire(write((self.host, self.port), data))", "def writeInput(self):\n\n #self.collect.writeInput()", "def stdin_read(self, data):\n self.write_master(data)", "def send_after_fin():\n test_str = make_random(100)\n test_str_fin = \"s3nd 4ft3r f1N\\n\"\n server = start_server()\n client = start_client()\n\n # Write an EOF character to client so it sends a FIN.\n write_to(server, test_str)\n write_to(client, '\\x1a')\n client.stdin.close()\n\n # Check that a FIN was received.\n time.sleep(1)\n segments = read_segments_from(server)\n if not segments:\n return False\n if not \"FIN\" in [flag for segment in segments for flag in segment.flags]:\n return False\n\n # Write to server STDIN. It should continue sending data to the client.\n write_to(server, test_str_fin)\n return len(read_segments_from(server)) > 0", "def write(self):\r\n assert self.status == SEND_ANSWER\r\n sent = self.socket.send(self.message)\r\n if sent == len(self.message):\r\n self.status = WAIT_LEN\r\n self.message = ''\r\n self.len = 0\r\n else:\r\n self.message = self.message[sent:]", "def write( self, data ):\n os.write( self.stdin.fileno(), data )", "def server_do(self,input, connstream):\r\n pass", "def send(self, data):\n\n if self.subprocess.poll() is None:\n try:\n self.subprocess.stdin.write(\"{}\\n\".format(str(data).encode()))\n except IOError as e:\n logging.warning(\"IPC: Failed to send data! IOError: {}\".format(e))\n\n logging.debug(\"IPC: {}\".format(str(data)))\n else:\n logging.error(\"IPC: Process is dead! Poll: {}\".format(self.subprocess.poll()))", "def write(self, data):\r\n try:\r\n char_handle = self._stdinout_characteristic.getHandle()\r\n bytes_sent = 0\r\n while bytes_sent < len(data):\r\n # Computing data to send.\r\n bytes_to_send = min(\r\n self._MAXIMUM_MESSAGE_SIZE_BYTES,\r\n len(data) - bytes_sent\r\n )\r\n data_to_send = data[bytes_sent:bytes_sent + bytes_to_send]\r\n\r\n # Writing data.\r\n self._node.writeCharacteristic(\r\n char_handle,\r\n data_to_send,\r\n True)\r\n bytes_sent += bytes_to_send\r\n\r\n # Calling on-write callback for a debug characteristic.\r\n self.on_write_characteristic(\r\n self._stdinout_characteristic, data_to_send, True)\r\n\r\n return bytes_sent\r\n\r\n except BTLEException as e:\r\n self._node._unexpected_disconnect()", "def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]", "def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]", "def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]", "def _on_read(self, line):\n # Some game logic (or magic)\n line = line.strip()\n logger.info(\"RCV> %s\", line)\n if not line:\n self.stream.close()\n return\n\n self.stream.write(\"echo: %s\\n\" % line)\n\n # Wait for further input on this connection\n self.wait()", "def write(self):\n assert self.status == SEND_ANSWER\n sent = self.socket.send(self.message)\n if sent == len(self.message):\n self._set_status(WAIT_LEN)\n self.message = b''\n self.len = 0\n else:\n self.message = self.message[sent:]", "def send(self, sendstring):\n self.__proc.stdin.write(sendstring+'\\n')", "def s_write(self, data):\n self.s.flushOutput()\n\n if self.s.is_open:\n try:\n self.s.write(data)\n if self.log_output:\n self.logfile.write('\\nIN :' + str(len(data)) + '[' + hexlify(data) + ']' + '\\n')\n except Exception as e:\n print(\"Could not write to port \" + str(e))\n else:\n raise IOError('Comport is not open, use ctl_connect()')", "def _handle_writing(self, soc):\n self._log(\"writing %r\" % self._writing[soc])\n sent = soc.send(self._writing[soc])\n if not sent:\n self._handle_error(soc)\n # Offsets would be more efficient, but this is python so it's not worth it.\n self._writing[soc] = self._writing[soc][sent:]\n if not self._writing[soc]:\n # Finished writing the whole thing.\n self._cleanup(soc)", "def write( shell, data ):\n #print 'cmd: ' + data\n global waiting\n os.write( shell.stdin.fileno(), data )\n waiting = True", "def send_data(sock):\n while True:\n data = sys.stdin.readline()\n sock.send(data.encode())", "def on_write_characteristic(self, characteristic, data, status):\r\n try:\r\n if len(self._listeners) == 0:\r\n return\r\n\r\n data_str = self._decode_data(data)\r\n\r\n if characteristic.uuid == \\\r\n Debug.DEBUG_STDINOUT_BLUESTSDK_SERVICE_UUID:\r\n for listener in self._listeners:\r\n # Calling user-defined callback.\r\n self._thread_pool.submit(listener.on_stdin_send(\r\n self,\r\n data_str[0:self._MAXIMUM_MESSAGE_SIZE_BYTES],\r\n status))\r\n except BTLEException as e:\r\n self._node._unexpected_disconnect()", "def segment_truncated():\n test_str = \"n0t trunc4t3d 139482793 912847 192874 1928\\n\"\n truncated_str = DEBUG_TRUNCATE + \"trunc4t3d 139482793 912847 192874 1928\\n\"\n server = start_server()\n client = start_client(reference=True)\n\n # Send full segment.\n write_to(client, test_str)\n time.sleep(TEST_TIMEOUT)\n if read_from(server, num_lines=1) != test_str:\n return False\n\n # Write the truncated segment. Nothing should be read from the server.\n write_to(client, truncated_str)\n time.sleep(TEST_TIMEOUT)\n if read_from(server, num_lines=1) == truncated_str:\n return False\n\n return True", "def send_msg(self, msg):\n self.proc.stdin.write(msg)", "def write(self, segment, result):\n pass", "def run(self):\n self.socket.connect()\n with open('src/inputs/output.file', 'rb') as f:\n self.sent_bytes = f.read()\n self.socket.send(self.sent_bytes)\n self.socket.disconnect()\n self.socket.close()", "def send_stdin(self, s_bytes):\n self._proc.stdin.write(s_bytes)\n self._proc.stdin.flush()", "def client_receives():\n test_str = \"t35t1nG cl13nT r3c31\\/1NG\\n\"\n server = start_server()\n client = start_client()\n\n write_to(client, test_str)\n segments = read_segments_from(server)\n if not segments:\n return False\n\n # The first segment should be one received from the client, and should have\n # the correct length.\n segment = segments[0]\n return (\n str(segment.dest_port) == SERVER_PORT and\n segment.length == CTCP_HEADER_LEN + len(test_str)\n )" ]
[ "0.6080363", "0.57543296", "0.5716248", "0.56101906", "0.56061274", "0.54835385", "0.546739", "0.5389556", "0.53850347", "0.5369113", "0.5360866", "0.53589594", "0.5357063", "0.5339195", "0.5339195", "0.5339195", "0.5306196", "0.5294233", "0.52678835", "0.5245546", "0.5234753", "0.5224173", "0.52038056", "0.51850027", "0.5178073", "0.5177216", "0.5144604", "0.509942", "0.5031988", "0.5026102" ]
0.5910685
1
Sends two segments. Makes sure they have the correct checksum by comparing it to the checksum from the reference solution.
def correct_checksum(): test_strs = ["ch3ck1nG c0rr3ct ch3cksu|\/|\n", "y3T an0th3r str1ng0_x\/.!&\n"] def test_checksum(test_str): server = start_server() client = start_client() write_to(client, test_str) segments = read_segments_from(client) if not segments: return False teardown() # Start reference solution to get answers. ref_server = start_server(port=REF_PORT, reference=True) ref_client = start_client(server_port=REF_PORT, reference=True) # Get reference checksum. write_to(ref_client, test_str) ref_segment = read_segments_from(ref_client)[0] ref_checksum = ref_segment.checksum # Check the first sent segment. segment = segments[0] # Checksum equal to the reference checksum. if segment.checksum == ref_checksum: return True # Maybe they also set an ACK for this segment. Compare with the computed # checksum. return int(segment.checksum, 16) == segment.c_repr.cksum; return reduce(lambda a, b: a and b, [test_checksum(t) for t in test_strs])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify( fasta1, fasta2, num_iterations, fragment_size,\n stdout = sys.stdout, quiet = False ):\n if not quiet:\n options.stdout.write(\"verifying %s and %s using %i random segments of length %i\\n\" %\\\n (fasta1.getDatabaseName(),\n fasta2.getDatabaseName(),\n num_iterations,\n fragment_size ))\n options.stdout.flush()\n nerrors = 0\n for x in range(num_iterations):\n contig, strand, start, end = fasta1.getRandomCoordinates( fragment_size )\n s1 = fasta1.getSequence(contig,strand,start,end)\n s2 = fasta2.getSequence(contig,strand,start,end)\n if s1 != s2:\n if not quiet:\n options.stdout.write(\"discordant segment: %s:%s:%i:%i\\n%s\\n%s\\n\" %\\\n (contig, strand, start, end, s1, s2) )\n nerrors += 1\n return nerrors", "def test_multiple_segments(self):\n socket = Mock()\n data = service_call.encode_call('bar', [10])\n socket.recv = Mock()\n socket.recv.side_effect = [data[:3], data[3:]]\n\n service_call.handle_connection(self.handlers, socket)\n self.handlers['bar'].assert_any_call([10])", "def test_script_to_fs_two_chunks():\n expected_result_v1 = '\\n'.join(TEST_SCRIPT_FS_V1_HEX_LIST + [''])\n expected_result_v2 = '\\n'.join(TEST_SCRIPT_FS_V2_HEX_LIST + [''])\n\n with mock.patch('uflash._FS_START_ADDR_V1', 0x38C00), \\\n mock.patch('uflash._FS_END_ADDR_V1', 0x3F800):\n result_v1 = uflash.script_to_fs(TEST_SCRIPT_FS, uflash._MICROBIT_ID_V1)\n result_v2 = uflash.script_to_fs(TEST_SCRIPT_FS, uflash._MICROBIT_ID_V2)\n\n assert result_v1 == expected_result_v1\n assert result_v2 == expected_result_v2", "def correct_header_fields():\n test_str = \"c0rrect_!!heAd3R fi3ld5__%%!! @\\n\"\n server = start_server()\n client = start_client()\n\n write_to(client, test_str)\n segments = read_segments_from(client)\n if not segments:\n return False\n teardown()\n\n # Start reference solution to get answers.\n ref_server = start_server(port=REF_PORT, reference=True)\n ref_client = start_client(server_port=REF_PORT, reference=True)\n\n # Get reference checksum.\n write_to(ref_client, test_str)\n ref_segment = read_segments_from(ref_client)[0]\n\n # Check the first sent segment. Should have all the same header fields as\n # the reference.\n segment = segments[0]\n\n # Check the flags first. Maybe decided to ACK all segments.\n if not segment.has_same_flags(ref_segment):\n if \"ACK\" in segment.flags:\n segment.flags.remove(\"ACK\")\n\n return (\n segment.seqno == ref_segment.seqno and\n (segment.ackno == 0 or segment.ackno == ref_segment.ackno) and\n segment.length == ref_segment.length and\n segment.has_same_flags(ref_segment) and\n segment.window == ref_segment.window and\n (segment.checksum == ref_segment.checksum or\n int(segment.checksum, 16) == segment.c_repr.cksum)\n )", "def TCP(conn, addr):\n buffer = array('B', [0] * 300)\n cnt = 0\n while True:\n if cnt < 60000: cnt = cnt + 1\n else: cnt = 1\n try:\n conn.recv_into(buffer)\n TID0 = buffer[0] #Transaction ID to sync\n TID1 = buffer[1] #Transaction ID \n ID = buffer[6] #Unit ID\n FC = buffer[7]\n mADR = buffer[8]\n lADR = buffer[9]\n ADR = mADR * 256 + lADR\n LEN = buffer[10] * 256 + buffer[11]\n BYT = LEN * 2\n print(\"Received = \", buffer[0:13 + buffer[12]])\n if (FC in [1, 2, 3, 4]): # Read Inputs or Registers\n DAT = array('B')\n if FC < 3:\n BYT = ceil(LEN / 8) # Round off the no. of bytes\n v = 85 # send 85,86.. for bytes.\n for i in range(BYT):\n DAT.append(v)\n v = (lambda x: x + 1 if (x < 255) else 85)(v)\n else:\n DAT = array('B', np.arange(cnt, LEN+cnt, dtype=np.dtype('>i2')).tobytes())\n print(\"TID = %d, ID= %d, Fun.Code= %d, Address= %d, Length= %d\" \\\n %((TID0 * 256 + TID1), ID, FC, ADR, LEN))\n conn.send(\n array('B', [TID0, TID1, 0, 0, 0, BYT + 3, ID, FC, BYT]) + DAT)\n elif (FC in [5, 6, 15, 16]): # Write Registers\n BYT = buffer[12]\n conn.send(\n array('B', [TID0, TID1, 0, 0, 0, 6, ID, FC, mADR, lADR, buffer[10], buffer[11]]))\n buf = buffer[13:(13 + BYT)]\n print(\"TID = %d, ID= %d, Fun.Code= %d, Address= %d, Length= %d, Bytes= %d\" \\\n %((TID0 * 256 + TID1), ID, FC, ADR, LEN, BYT))\n if FC == 5 or FC == 15:\n message = 'bytes: '+ str(unpack('B' * BYT, buf))\n elif FC == 6 or FC == 16:\n message = str(unpack('>' + 'H' * int(BYT / 2), buf))\n print(\"Received Write Values =\", message)\n else:\n print(\"Funtion Code %d Not Supported\" % FC)\n exit()\n except Exception as e:\n print(e, \"\\nConnection with Client terminated\")\n exit()", "def client_sends():\n test_str = \"t35t1nG cl13nT 53nd1nG\\n\"\n server = start_server()\n client = start_client()\n\n write_to(client, test_str)\n segments = read_segments_from(client)\n if not segments:\n return False\n\n # The first segment should be one sent from the client, and should have the\n # correct length.\n segment = segments[0]\n return (\n str(segment.source_port) == CLIENT_PORT and\n segment.length == CTCP_HEADER_LEN + len(test_str)\n )", "def send_one_file(middlebox_module, testing_part_1):\n middlebox1 = middlebox_module.WanOptimizer()\n middlebox2 = middlebox_module.WanOptimizer()\n wide_area_network = wan.Wan(middlebox1, middlebox2)\n\n # Initialize client connected to middlebox 1.\n client1_address = \"1.2.3.4\"\n client1 = client.EndHost(\"client1\", client1_address, middlebox1)\n\n # Initialize client connected to middlebox 2.\n client2_address = \"5.6.7.8\"\n client2 = client.EndHost(\"client2\", client2_address, middlebox2)\n\n # Send a file from client 1 to client 2.\n filename = \"sample.txt\"\n client1.send_file(filename, client2_address)\n\n # Make sure that the files have the same contents.\n with open(filename, \"rb\") as input_file:\n input_data = input_file.read()\n\n output_file_name = \"{}-{}\".format(\"client2\", filename)\n with open(output_file_name, \"rb\") as output_file:\n result_data = output_file.read()\n # Remove the output file just created.\n os.remove(output_file_name)\n\n if input_data != result_data:\n raise Exception(\n \"The file received did not match the file sent. File received had: \" +\n \"{}\\n and file sent had: {}\\n\".format(result_data, input_data))", "def test_one_fragment(self):\n pkt = (\n Ether(src=self.src_if.local_mac, dst=self.src_if.remote_mac)\n / IPv6(src=self.src_if.remote_ip6, dst=self.dst_if.remote_ip6)\n / ICMPv6EchoRequest()\n / Raw(\"X\" * 1600)\n )\n frags = fragment_rfc8200(pkt, 1, 400)\n\n # send a fragment with known id\n self.send_and_expect(self.src_if, [frags[0]], self.dst_if)\n\n # send an atomic fragment with same id - should be reassembled\n pkt = (\n Ether(src=self.src_if.local_mac, dst=self.src_if.remote_mac)\n / IPv6(src=self.src_if.remote_ip6, dst=self.dst_if.remote_ip6)\n / IPv6ExtHdrFragment(id=1)\n / ICMPv6EchoRequest()\n )\n rx = self.send_and_expect(self.src_if, [pkt], self.dst_if)\n\n # now forward packets matching original reassembly, should still work\n rx = self.send_and_expect(self.src_if, frags[1:], self.dst_if)", "def test_send_second_file():\n\n # Generate the blocks for the test file which is not present on the server\n test_file = os.path.join(os.path.dirname(__file__),\n \"../test_files/debashis-rc-biswas-3U4gGsGNsMY-unsplash.jpg\")\n # Ask the server for the hash of the last block\n response = client.get(\"/latest_block_hash\")\n last_block_hash = response.json()[\"last_block_hash\"]\n blocks = generate_blocks(test_file, last_block_hash)\n # Collect all blocks into a single binary file using pickle\n blocks_pickled = pickle.dumps(blocks)\n # Send the collected blocks in a single transfer to the test server\n response = client.post(\"/send\",\n files={\"file\": blocks_pickled})\n assert response.ok\n assert response.json() \\\n == {\"success\": True,\n \"new_file\": True,\n \"hash\": \"415d4f66e1b8b9083014dcdca5ddd7d1dcca3f5a4a120603169b951b1c5fa0c9\",\n \"index_all\": 1704}", "def send_command_success(self, sn: TransactionID, destination: tuple, source: tuple):\n pass", "async def async_handle_direct_ack(self, cmd1, cmd2, target, user_data, hops_left):\n # Need to make sure the ACK has time to aquire the lock\n await asyncio.sleep(0.05)\n if self._response_lock.locked():\n await self._direct_response.put(ResponseStatus.SUCCESS)\n self._update_subscribers_on_direct_ack(\n cmd1, cmd2, target, user_data, hops_left\n )\n await asyncio.sleep(0.05)", "def formation():\n\tglobal c1, c2\n\tglobal a1, a2\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c2.recv(BUF_SIZE) # wait for the arrived message\n\tprint a2, ' >> ', msg\n\tif msg != 'Arrived':\n\t\terror(msg)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tmsg1, msg2 = {}, {}\n\t\tmsg1['msg'] = 'FORMATION'\n\t\tmsg2['msg'] = 'FORMATION'\n\t\t\n\t\tmsg1['arg1'] = init1\n\t\tmsg1['arg2'] = end1\n\t\tmsg1['arg3'] = step\n\n\t\tmsg2['arg1'] = init2\n\t\tmsg2['arg2'] = end2\n\t\tmsg2['arg3'] = step\n\n\t\tc1.send(json.dumps(msg1))\n\t\tc2.send(json.dumps(msg2))\n\t\twhile True:\n\t\t\tmsg1 = c1.recv(BUF_SIZE)\n\t\t\tmsg2 = c2.recv(BUF_SIZE)\n\t\t\tif msg1 == 'DONE' and msg2 == 'DONE':\n\t\t\t\tbreak\n\t\t\telif msg1 != 'Arrived' or msg2 != 'Arrived':\n\t\t\t\terror(msg1)\n\t\t\t\terror(msg2)\n\t\t\t\tstate = 9\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tnew_msg = 'GO'\n\t\t\t\tc1.send(new_msg)\n\t\t\t\tc2.send(new_msg)\n\t\t\n\t\tstate += 1", "def test_send_positive():\n\n # Generate the blocks for the test file\n test_file = os.path.join(os.path.dirname(__file__),\n \"../test_files/isaac-martin-61d2hT57MAE-unsplash.jpg\")\n blocks = generate_blocks(test_file, '0')\n # Collect all blocks into a single binary file using pickle\n blocks_pickled = pickle.dumps(blocks)\n # Send the collected blocks in a single transfer to the test server\n response = client.post(\"/send\",\n files={\"file\": blocks_pickled})\n assert response.ok\n assert response.json() \\\n == {\"success\": True,\n \"new_file\": True,\n \"hash\": \"45f293033312d42815155e871f37b56b4de9b925c07d4a5f6262320c1627db12\",\n \"index_all\": 5285}", "def test_burst_order(self):\n addr_1 = self.connection_client_1.address\n addr_2 = self.connection_client_2.address\n\n sent_envelopes = [\n self._make_envelope(addr_1, addr_2, i, i - 1)\n for i in range(1, self.NB_ENVELOPES + 1)\n ]\n for envelope in sent_envelopes:\n self.multiplexer_client_1.put(envelope)\n\n received_envelopes = []\n for _ in range(1, self.NB_ENVELOPES + 1):\n envelope = self.multiplexer_client_2.get(block=True, timeout=20)\n received_envelopes.append(envelope)\n\n # test no new message is \"created\"\n with pytest.raises(Empty):\n self.multiplexer_client_2.get(block=True, timeout=1)\n\n assert len(sent_envelopes) == len(\n received_envelopes\n ), f\"expected number of envelopes {len(sent_envelopes)}, got {len(received_envelopes)}\"\n for expected, actual in zip(sent_envelopes, received_envelopes):\n assert expected.message == actual.message, (\n \"message content differ; probably a wrong message \"\n \"ordering on the receiving end\"\n )", "def send_message_success(self, sn: TransactionID, destination: tuple, source: tuple):\n pass", "def try_combine(self, other):\n if self.saddr <= other.saddr and other.eaddr <= self.eaddr:\n self.sync_vars |= other.sync_vars\n return True\n return False", "def test_process_barcode_paired_end_data(self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"AAAATTTTCCCCGGGG\",\r\n np.arange(3, 19, dtype=np.int8)]\r\n fastq2_data = [\"HWI-ST830\", \"TCCCCGGGG\", np.arange(3, 12, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n reads2_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n\r\n process_barcode_paired_end_data(fastq1_data, fastq2_data,\r\n bcs_out, reads1_out, reads2_out, bc1_len=5, bc2_len=3,\r\n rev_comp_bc1=True, rev_comp_bc2=True)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'ATTTTGGA', '+', \"('&%$&%$\", '']\r\n\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TTTCCCCGGGG', '+', ')*+,-./0123', '']\r\n\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_reads = reads2_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'CCGGGG', '+', \"'()*+,\", '']\r\n\r\n self.assertEqual(actual_reads, expected_reads)", "def __send_short(self, msg_id, param1, param2):\n data_out = struct.pack(\"<HBBBB\", msg_id, param1, param2,\n self.__dst, self.__src)\n if self.__debug:\n print \">>> %s\" % binascii.hexlify(data_out)\n self.__ser.write(data_out)\n self.__ser.flush()", "def messageHandler_MulticastBasedCoordination(self, msg):\n\n data = msg.getData()\n sender = msg.getIDSender()\n self.log_message('ID {0} has received msg {1} from ID {2}'.format(self.CommID, data, sender))\n if data[0] == 'remainder':\n origin = data[1]\n remainder = copy.deepcopy(data[2])\n path = copy.deepcopy(data[3])\n path_schedules = copy.deepcopy(data[4])\n\n if not self.isGasBoiler():\n\n # is BES's load included in the received remainder?\n if self.CommID in path: #load included\n\n # find BES's index in path\n for p in range(len(path)):\n if path[p] == self.CommID:\n break\n\n #find origin index in list of origins\n for o in range(len(self.origins)):\n if self.origins[o] == origin:\n break\n\n if self.OPTcriterion == 'maxmindiff':\n criterion_1 = max(remainder) - min(remainder)\n elif self.OPTcriterion == 'absremainder':\n criterion_1 = 0\n for a in range(len(remainder)):\n criterion_1 += abs(remainder[a])\n\n #print 'ID {0}: I am in path at index {1} ({2}) | origin is {3} at index {4} ({5}) | max-min-diff is {6}, global min for this origin is {7}'.format(self.CommID, p, path[p], origin, o, self.origins[o], criterion_1, self.globalMin[o])\n\n if len(path) == self.pathLengths[o]: # if remainder has maximal known path length\n # try to improve it by choosing a new schedule\n\n self.chosenScheduleIndex = copy.deepcopy(path_schedules[p])\n self.EConsumptionChosenSchedule = copy.deepcopy(self.EConsumptionScheduleCurves[self.chosenScheduleIndex])\n self.selectBestSchedule(copy.deepcopy(remainder))\n\n new_remainder = copy.deepcopy(remainder)\n #update remainder\n for t in range(len(remainder)):\n new_remainder[t] -= self.EConsumptionScheduleCurves[path_schedules[p]][t]\n new_remainder[t] += self.EConsumptionChosenSchedule[t]\n\n #new minimum origin??\n if self.OPTcriterion == 'maxmindiff':\n criterion_2 = max(new_remainder) - min(new_remainder)\n elif self.OPTcriterion == 'absremainder':\n criterion_2 = 0\n for a in range(len(remainder)):\n criterion_2 += abs(new_remainder[a])\n\n if self.globalMin[o] - criterion_2 > 0.1:\n #print 'ID {0}: found better max-min-diff for origin {1} | {2} --> {3}'.format(self.CommID, origin, self.globalMin[o], copy.deepcopy(criterion_2))\n\n new_path_schedules = copy.deepcopy(path_schedules)\n\n new_path_schedules[p] = copy.deepcopy(self.chosenScheduleIndex)\n\n self.globalMin[o] = copy.deepcopy(criterion_2)\n # check the functionality of the nex line, was:\n # self.globalMinSchedIdx[o] = copy.deepcopy(path_schedules[p])\n self.globalMinSchedIdx[o] = copy.deepcopy(new_path_schedules[p])\n self.pathLengths[o] = len(path)\n self.min_path[o] = copy.deepcopy(path)\n self.min_path_schedules[o] = copy.deepcopy(new_path_schedules)\n\n for n in range(len(self.Neighbors)):\n self.sendMessage(self.Neighbors[n], 70 , ['remainder', copy.deepcopy(origin), copy.deepcopy(new_remainder), copy.deepcopy(path), copy.deepcopy(new_path_schedules)])\n # =============================================================================================\n elif self.globalMin[o] - criterion_1 > 0.1:\n self.globalMin[o] = copy.deepcopy(criterion_1)\n self.globalMinSchedIdx[o] = copy.deepcopy(path_schedules[p])\n self.pathLengths[o] = len(path)\n self.min_path[o] = copy.deepcopy(path)\n self.min_path_schedules[o] = copy.deepcopy(path_schedules)\n\n #multicast to all neighbors except sender:\n for n in range(len(self.Neighbors)):\n if self.Neighbors[n] != sender:\n self.sendMessage(self.Neighbors[n], 70, ['remainder', copy.deepcopy(origin), copy.deepcopy(remainder), copy.deepcopy(path), copy.deepcopy(path_schedules)])\n # =============================================================================================\n #else:\n #print 'ID {0}: NO IMPROVEMENT WITH NEW SCHEDULE'.format(self.CommID)\n\n elif len(path) > self.pathLengths[o]:\n #print 'ID {0}: path is longer than known path for origin {1}'.format(self.CommID, origin)\n self.pathLengths[o] = len(path)\n\n self.globalMin[o] = copy.deepcopy(criterion_1)\n self.globalMinSchedIdx[o] = copy.deepcopy(path_schedules[p])\n self.min_path[o] = copy.deepcopy(path)\n self.min_path_schedules[o] = copy.deepcopy(path_schedules)\n\n #multicast to all neighbors except sender:\n for n in range(len(self.Neighbors)):\n if self.Neighbors[n] != sender:\n self.sendMessage(self.Neighbors[n], 70, ['remainder', copy.deepcopy(origin), copy.deepcopy(remainder), copy.deepcopy(path), copy.deepcopy(path_schedules)])\n\n #elif self.globalMin[o] - criterion_1 > 0.1 and len(path) == self.pathLengths[o]: #new minimum\n # #print 'ID {0}: found better max-min-diff for origin {1}'.format(self.CommID, origin)\n # self.globalMin[o] = copy.deepcopy(criterion_1)\n # self.globalMinSchedIdx[o] = copy.deepcopy(path_schedules[p])\n # self.pathLengths[o] = len(path)\n # self.min_path[o] = copy.deepcopy(path)\n # self.min_path_schedules[o] = copy.deepcopy(path_schedules)\n\n # #multicast to all neighbors except sender:\n # for n in range(len(self.Neighbors)):\n # if self.Neighbors[n] != sender:\n # self.sendMessage(self.Neighbors[n], 70, ['remainder', copy.deepcopy(origin), copy.deepcopy(remainder), copy.deepcopy(path), copy.deepcopy(path_schedules)])\n else:\n self.log_message('ID {0}: NOT DOING ANYTHING WITH REMAINDER')\n\n else: #load NOT included\n self.log_message('ID {0}: I am not in path and my load is NOT included in the remainder'.format(self.CommID))\n\n # assume no schedule to be chosen before and choose best fitting schedule for this remainder\n self.chosenScheduleIndex = -1\n self.selectBestSchedule(copy.deepcopy(remainder))\n\n new_remainder = copy.deepcopy(remainder)\n\n #update remainder with chosen load\n for t in range(len(remainder)):\n new_remainder[t] += self.EConsumptionChosenSchedule[t]\n\n if self.OPTcriterion == 'maxmindiff':\n criterion = max(new_remainder) - min(new_remainder)\n elif self.OPTcriterion == 'absremainder':\n criterion = 0\n for a in range(len(remainder)):\n criterion += abs(new_remainder[a])\n\n #max_min_diff = max(new_remainder) - min(new_remainder)\n\n new_path = copy.deepcopy(path)\n new_path_schedules = copy.deepcopy(path_schedules)\n\n #update path and path_schedule fields\n new_path.append(self.CommID)\n new_path_schedules.append(self.chosenScheduleIndex)\n\n if origin in self.origins: # if origin of remainder is known\n\n #find origin index in list of origins\n for o in range(len(self.origins)):\n if self.origins[o] == origin:\n break\n\n #new minimal criterion?\n if self.globalMin[o] - criterion > 0.1 and len(new_path) == self.pathLengths[o]: #new minimal criterion\n self.globalMin[o] = copy.deepcopy(criterion)\n self.globalMinSchedIdx[o] = copy.deepcopy(self.chosenScheduleIndex)\n self.pathLengths[o] = len(new_path)\n self.min_path[o] = copy.deepcopy(new_path)\n self.min_path_schedules[o] = copy.deepcopy(new_path_schedules)\n\n\n # multicast remainder to all neighbors\n for n in range(len(self.Neighbors)):\n new_data = ['remainder', copy.deepcopy(origin), copy.deepcopy(new_remainder), copy.deepcopy(new_path), copy.deepcopy(new_path_schedules)]\n self.sendMessage(self.Neighbors[n], 70, new_data)\n\n\n elif len(new_path) > self.pathLengths[o]:\n self.globalMin[o] = copy.deepcopy(criterion)\n self.globalMinSchedIdx[o] = copy.deepcopy(self.chosenScheduleIndex)\n self.pathLengths[o] = len(new_path)\n self.min_path[o] = copy.deepcopy(new_path)\n self.min_path_schedules[o] = copy.deepcopy(new_path_schedules)\n\n\n # multicast remainder to all neighbors\n for n in range(len(self.Neighbors)):\n new_data = ['remainder', copy.deepcopy(origin), copy.deepcopy(new_remainder), copy.deepcopy(new_path), copy.deepcopy(new_path_schedules)]\n self.sendMessage(self.Neighbors[n], 70, new_data)\n\n else: #new origin\n self.origins.append(copy.deepcopy(origin))\n self.globalMin.append(copy.deepcopy(criterion))\n self.globalMinSchedIdx.append(copy.deepcopy(self.chosenScheduleIndex))\n self.pathLengths.append(len(new_path))\n self.min_path.append(copy.deepcopy(new_path))\n self.min_path_schedules.append(copy.deepcopy(new_path_schedules))\n\n # multicast remainder to all neighbors\n for n in range(len(self.Neighbors)):\n new_data = ['remainder', copy.deepcopy(origin), copy.deepcopy(new_remainder), copy.deepcopy(new_path), copy.deepcopy(new_path_schedules)]\n self.sendMessage(self.Neighbors[n], 70, new_data)\n\n\n\n min_criterion = min(self.globalMin)\n\n #find index\n for m in range(len(self.globalMin)):\n if self.globalMin[m] == min_criterion:\n break\n\n if self.chosenScheduleIndex != self.globalMinSchedIdx[m]:\n self.chosenScheduleIndex = self.globalMinSchedIdx[m]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n\n\n elif data[0] == 'minimalorigin':\n min_origin = copy.deepcopy(data[1])\n min_criterion = copy.deepcopy(data[2])\n #path_length = copy.deepcopy(data[3])\n min_path = copy.deepcopy(data[3])\n min_path_schedules = copy.deepcopy(data[4])\n\n\n # if number of participating BES in arrived solution is greater than known maximal path length\n if self.overall_max_path_length < len(min_path) and self.CommID in min_path:\n #print 'ID {0}: received longer path (old: {1}, new {2})'.format(self.CommID, self.overall_max_path_length, len(min_path))\n self.overall_max_path_length = len(min_path)\n self.overall_min = copy.deepcopy(min_criterion)\n\n #find index\n for u in range(len(min_path)):\n if min_path[u] == self.CommID:\n break\n\n #print 'ID {0}: choosing new schedule with index {1}'.format(self.CommID, min_path_schedules[u])\n #choose schedule corresponding to min origin\n self.chosenScheduleIndex = min_path_schedules[u]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n #multicast information to all neighbors except sender\n for n in range(len(self.Neighbors)):\n if self.Neighbors[n] != sender:\n self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(min_origin), copy.deepcopy(min_criterion), copy.deepcopy(min_path), copy.deepcopy(min_path_schedules)])\n #\n # else:\n # print 'ID {0}: unable to choose new schedule because I dont know origin {1}.'.format(self.CommID, min_origin)\n #\n #\n #\n # #if number of participating BES in arrived solution is equal to known maximal path length\n # elif self.overall_max_path_length == len(min_path):\n\n #print 'ID {0}: received new criterion with maximal known path length of {1}'.format(self.CommID, self.overall_max_path_length)\n elif self.overall_min - min_criterion > 0.1 and self.overall_max_path_length == len(min_path) and self.CommID in min_path: #received better criterion\n #print 'ID {0}: received better criterion (old: {1}, new {2})'.format(self.CommID, self.overall_min, min_criterion)\n self.overall_min = copy.deepcopy(min_criterion)\n\n\n #find index\n for u in range(len(min_path)):\n if min_path[u] == self.CommID:\n break\n\n #print 'ID {0}: received better criterion with path length {2}| choosing new schedule with index {1}'.format(self.CommID, min_path_schedules[u], len(min_path))\n #choose schedule corresponding to min origin\n self.chosenScheduleIndex = min_path_schedules[u]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n #multicast information to all neighbors except sender\n for n in range(len(self.Neighbors)):\n if self.Neighbors[n] != sender:\n self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(min_origin), copy.deepcopy(min_criterion), copy.deepcopy(min_path), copy.deepcopy(min_path_schedules)])\n\n else:\n self.log_message('ID {0}: EITHER PATH IS SMALLER THAN LONGEST KNOWN OR MINIMUM IS WORSE'.format(self.CommID))\n #else:\n # print 'ID {0}: received smaller path length {1}, ignore!'.format(self.CommID, len(min_path))", "def _fuse(track1, track2):\n if track1.fused or track2.fused:\n return False\n matched_box_num = 0\n if track1.end_fid - P['th_track_fuse_len'] + 1 < track2.start_fid or len(track1) < P['th_track_fuse_len']\\\n or track1.start_fid > track2.start_fid:\n # there is no chance that these two tracks are to be fused\n return False\n\n start_ind1 = max(0, len(track1) - (track1.end_fid - track2.start_fid + 1))\n start_ind2 = 0\n\n while start_ind1 < len(track1) and start_ind2 < len(track2):\n fid1, fid2 = track1[start_ind1].frame_id, track2[start_ind2].frame_id\n if fid1 < fid2:\n start_ind1 += 1\n elif fid1 > fid2:\n start_ind2 += 1\n else:\n cx1, cy1 = track1[start_ind1].center\n cx2, cy2 = track2[start_ind2].center\n if abs(cx1 - cx2) < P['th_track_fuse_diff'] \\\n and abs(cy1 - cy2) < P['th_track_fuse_diff']:\n matched_box_num += 1\n start_ind1 += 1\n start_ind2 += 1\n if matched_box_num >= P['th_track_fuse_len']:\n break\n\n if matched_box_num < P['th_track_fuse_len']:\n return False\n else:\n track1.fused = True\n track2.fused = True\n print \"track1 {}-->{} track2 {}-->{}\".format(track1.start_fid, track1.end_fid, track2.start_fid,\n track2.end_fid)\n track1.append(track2)\n track1.segment_id = self._segment_index # segment id is updated\n return True", "def schc_fragmenter_send(msg, s, opt):\n assert type(msg) == bytearray # avoid compatibility problems\n debug_print(2, \"message:\", msg)\n # XXX assuming that the rule_id is not changed in a session.\n\n # check if the L2 size is enough to put the message.\n if opt.l2_size >= len(msg):\n debug_print(1, \"no need to fragment this message.\")\n return\n\n # prepare fragmenting\n factory = sfs.fragment_factory(frr, logger=debug_print)\n factory.setbuf(msg, dtag=opt.dtag)\n\n # main loop\n debug_print(1, \"L2 payload size: %s\" % opt.l2_size)\n\n global n_packet\n n_packet = 0\n\n while True:\n\n # CONT: send it and get next fragment.\n # WAIT_ACK: send it and wait for the ack.\n # DONE: dont need to send it.\n # ERROR: error happened.\n ret, tx_obj = factory.next_fragment(opt.l2_size)\n n_packet += 1\n\n # error!\n if ret == sfs.STATE.FAIL:\n raise AssertionError(\"something wrong in fragmentation.\")\n elif ret == sfs.STATE.DONE:\n debug_print(1, \"done.\")\n break\n # end of the main loop\n\n if opt.func_packet_loss and opt.func_packet_loss() == True:\n debug_print(1, \"packet dropped.\")\n else:\n print(\"SEND:\", tx_obj.packet)\n address = get_sockaddr(RECV_UDP_ADDRESS, RECV_UDP_PORT)\n s.sendto(tx_obj.packet, address)\n debug_print(1, \"sent :\", tx_obj.dump())\n debug_print(2, \"hex :\", tx_obj.full_dump())\n\n if factory.R.mode != SCHC_MODE.NO_ACK and ret != sfs.STATE.CONT:\n # WAIT_ACK\n # a part of or whole fragments have been sent and wait for the ack.\n debug_print(1, \"waiting an ack.\", factory.state.pprint())\n try:\n rx_data, peer = s.recvfrom(DEFAULT_RECV_BUFSIZE)\n debug_print(1, \"message from:\", peer)\n #\n ret, rx_obj = factory.parse_ack(rx_data, peer)\n debug_print(1, \"parsed:\", rx_obj.dump())\n debug_print(2, \"hex :\", rx_obj.full_dump())\n #\n if ret == sfs.STATE.DONE:\n # finish if the ack against all1 is received.\n debug_print(1, \"done.\")\n break\n # end of the main loop\n\n except Exception as e:\n if \"timeout\" in repr(e):\n debug_print(1, \"timed out to wait for the ack.\")\n else:\n debug_print(1, \"Exception: [%s]\" % repr(e))\n debug_print(0, traceback.format_exc())\n\n time.sleep(opt.interval)", "def checkHSPS(hsp1, hsp2, HSPMIN=100):\n dict_update = {18: 0}\n\n # Check if the hsp2 is in a different orientation than hsp1: 'sens': 14\n if hsp1[14] != hsp2[14]:\n # print(f'orientation wrong: {hsp1[14]} != {hsp2[14]}')\n return dict_update\n\n # Check is hsp2 inside hsp1 for the query sequence: 'qstart': 6, 'qend': 7,\n if hsp1[7] >= hsp2[7] and hsp1[8] <= hsp2[8]:\n # print(f'hsp2 inside hsp1 for query: {hsp1[6]} >= {hsp2[6]} and {hsp1[7]} <= {hsp2[7]}')\n return dict_update\n\n # Check is hsp1 inside hsp2 for the query sequence: 'qstart': 6, 'qend': 7,\n elif hsp1[7] <= hsp2[7] and hsp1[8] >= hsp2[8]:\n # print(f'hsp1 inside hsp2 for query: {hsp1[6]} <= {hsp2[6]} and {hsp1[7]} >= {hsp2[7]}')\n return dict_update\n\n # Check is hsp1 inside hsp2 for the subject sequence: 'sstart': 8, 'send': 9,\n elif hsp1[9] >= hsp2[9] and hsp1[10] <= hsp2[10]:\n # print(f'hsp2 inside hsp1 for subject: {hsp1[8]} >= {hsp2[8]} and {hsp1[9]} <= {hsp2[9]}')\n return dict_update\n\n # Check is hsp2 inside hsp1 for the subject sequence: 'sstart': 8, 'send': 9,\n elif hsp1[9] <= hsp2[9] and hsp1[10] >= hsp2[10]:\n # print(f'hsp1 inside hsp2 for subject: {hsp1[8]} <= {hsp2[8]} and {hsp1[9]} >= {hsp2[9]}')\n return dict_update\n\n # reject HSPs that are in different orientation: 'qstart': 6, 'qend': 7, 'sstart': 8, 'send': 9,\n # Query: ---- A ---- B ----- A = HSP1\n # Sbjct: ---- B ---- A ----- B = HSP2\n\n if (hsp1[8] - hsp2[8]) * (hsp1[10] - hsp2[10]) < 0:\n # print(f'HSPs are in different orientation 1: ({hsp1[7]} - {hsp2[7]}) * ({hsp1[9]} - {hsp2[9]}) ===> {(hsp1[7] - hsp2[7]) * (hsp1[9] - hsp2[9])} < 0')\n return dict_update\n elif (hsp1[7] - hsp2[7]) * (hsp1[9] - hsp2[9]) < 0:\n # print(f'HSPs are in different orientation 2: ({hsp1[6]} - {hsp2[6]}) * ({hsp1[8]} - {hsp2[8]}) ===> {(hsp1[6] - hsp2[6]) * (hsp1[8] - hsp2[8])} < 0')\n return dict_update\n\n overlap_q = (hsp2[7] - hsp1[8]) * (hsp2[8] - hsp1[7])\n overlap_s = (hsp2[9] - hsp1[10]) * (hsp2[10] - hsp1[9])\n\n # Accept non-overlapping HSPs in correct orientation\n if overlap_q > 0 and overlap_s > 0:\n # print(f'No overlap in query and subject: {overlap_q} > 0 and {overlap_s} > 0')\n dict_update[18] = 1\n return dict_update\n\n # Test if the query is overlaping\n elif overlap_q < 0:\n # print(f'Overlap in query: {overlap_q} > 0')\n dict_update = remove_overlap_query(hsp1=hsp1, hsp2=hsp2)\n\n # update the hsp2 array with the new values\n for index_key in dict_update:\n hsp2[index_key] = dict_update[index_key]\n\n overlap_s = (hsp2[9] - hsp1[10]) * (hsp2[10] - hsp1[9])\n\n # Test if the subject is overlaping after possible update of an overlaping query\n if overlap_s < 0 and hsp2[17] > 0:\n # print(f'Overlap in subject: {overlap_s} > 0')\n dict_update = remove_overlap_subject(hsp1=hsp1, hsp2=hsp2)\n\n # update the hsp2 array with the new values\n for index_key in dict_update:\n hsp2[index_key] = dict_update[index_key]\n\n # Filter out HSPs that are too short\n if hsp2[17] < HSPMIN:\n # print(f'HSP too short: {hsp2[17]} < {HSPMIN}')\n dict_update[18] = 0\n return dict_update\n\n # Set status to 1 for consistent HSPs\n dict_update[18] = 1\n return dict_update", "def test_host_routes_two_subnets_with_same_segment_association(self):\n gateway_ips = ['10.0.1.1', '10.0.2.1']\n cidrs = ['10.0.1.0/24', '10.0.2.0/24']\n with self.network() as network:\n net = network['network']\n segment = self._test_create_segment(\n network_id=net['id'],\n physical_network='physnet1',\n network_type=constants.TYPE_VLAN,\n segmentation_id=201)['segment']\n\n with self.subnet(network=network,\n segment_id=segment['id'],\n gateway_ip=gateway_ips[0],\n cidr=cidrs[0]) as subnet0, \\\n self.subnet(network=network,\n segment_id=segment['id'],\n gateway_ip=gateway_ips[1],\n cidr=cidrs[1]) as subnet1:\n subnet0 = subnet0['subnet']\n subnet1 = subnet1['subnet']\n\n req = self.new_show_request('subnets', subnet0['id'])\n res = req.get_response(self.api)\n res_subnet0 = self.deserialize(self.fmt, res)\n\n req = self.new_show_request('subnets', subnet1['id'])\n res = req.get_response(self.api)\n res_subnet1 = self.deserialize(self.fmt, res)\n\n self.assertEqual([], res_subnet0['subnet']['host_routes'])\n self.assertEqual([], res_subnet1['subnet']['host_routes'])", "def test_one_fragment(self):\n pkt = (\n Ether(src=self.pg0.local_mac, dst=self.pg0.remote_mac)\n / IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6)\n / ICMPv6EchoRequest()\n / Raw(\"X\" * 1600)\n )\n frags = fragment_rfc8200(pkt, 1, 400)\n\n # send a fragment with known id\n self.send_and_assert_no_replies(self.pg0, [frags[0]])\n\n # send an atomic fragment with same id - should be reassembled\n pkt = (\n Ether(src=self.pg0.local_mac, dst=self.pg0.remote_mac)\n / IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6)\n / IPv6ExtHdrFragment(id=1)\n / ICMPv6EchoRequest()\n )\n rx = self.send_and_expect(self.pg0, [pkt], self.pg0)\n self.assertNotIn(IPv6ExtHdrFragment, rx)\n\n # now finish the original reassembly, this should still be possible\n rx = self.send_and_expect(self.pg0, frags[1:], self.pg0, n_rx=1)\n self.assertNotIn(IPv6ExtHdrFragment, rx)", "def arm_second():\n\tglobal c1, c2\n\tglobal a1, a2\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg1 = c1.recv(BUF_SIZE) # wait for the arrival message from first copter\n\tprint a1, ' >> ', msg1\n\tif msg1 != 'Arrived':\n\t\terror(msg1)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = 'ARM'\n\t\tc2.send(new_msg)\n\t\tstate += 1", "def shared_cost_peering(self, as1, as2):\n self._connect_ases(as1, as2)\n set_community(self, as1, as2.asn, str(as1.asn) + ':1337')\n set_community(self, as2, as1.asn, str(as2.asn) + ':1337')", "def test_10_9_4_2_3_1_2(self):\n\n # Register the device\n device_a = json.load(\n open(os.path.join('testcases', 'testdata', 'device_a.json')))\n self._sas_admin.InjectFccId({'fccId': device_a['fccId']})\n request = {'registrationRequest': [device_a]}\n response = self._sas.Registration(request)['registrationResponse'][0]\n # Check registration response\n self.assertEqual(response['response']['responseCode'], 0)\n cbsd_id = response['cbsdId']\n del request, response\n\n # Request grant\n grant_0 = json.load(\n open(os.path.join('testcases', 'testdata', 'grant_0.json')))\n grant_0['cbsdId'] = cbsd_id\n request = {'grantRequest': [grant_0]}\n # Check grant response\n response = self._sas.Grant(request)['grantResponse'][0]\n self.assertEqual(response['cbsdId'], cbsd_id)\n self.assertTrue(response['grantId'])\n self.assertEqual(response['response']['responseCode'], 0)\n grant_id = response['grantId']\n del request, response\n\n # First successful Heartbeat\n request = {\n 'heartbeatRequest': [{\n 'cbsdId': cbsd_id,\n 'grantId': grant_id,\n 'operationState': 'GRANTED'\n }]\n }\n response = self._sas.Heartbeat(request)['heartbeatResponse'][0]\n # Check the heartbeat response\n self.assertEqual(response['cbsdId'], cbsd_id)\n self.assertEqual(response['grantId'], grant_id)\n self.assertLess(datetime.utcnow(),\n datetime.strptime(response['transmitExpireTime'],\n '%Y-%m-%dT%H:%M:%SZ'))\n self.assertEqual(response['response']['responseCode'], 0)\n del request, response\n\n # grantId is missing\n request = {\n 'heartbeatRequest': [{\n 'cbsdId': cbsd_id,\n 'operationState': 'GRANTED'\n }]\n }\n response = self._sas.Heartbeat(request)['heartbeatResponse'][0]\n # Check the heartbeat response\n self.assertEqual(response['response']['responseCode'], 102)", "def test_multihop_receiver_on_success(vo, did_factory, root_account, caches_mock, metrics_mock):\n receiver_thread = threading.Thread(target=receiver, kwargs={'id_': 0, 'all_vos': True, 'total_threads': 1})\n receiver_thread.start()\n\n try:\n src_rse = 'XRD1'\n src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)\n jump_rse = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)\n dst_rse = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)\n\n all_rses = [src_rse_id, jump_rse_id, dst_rse_id]\n\n did = did_factory.upload_test_file(src_rse)\n rule_priority = 5\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None, priority=rule_priority)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=jump_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.DONE\n request = __wait_for_state_transition(dst_rse_id=dst_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.DONE\n\n fts_response = FTS3Transfertool(external_host=TEST_FTS_HOST).bulk_query({request['external_id']: {request['id']: request}})\n assert fts_response[request['external_id']][request['id']].job_response['priority'] == rule_priority\n\n # Two hops; both handled by receiver\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_receiver_update_request_state_total', labels={'updated': 'True'}) >= 2\n finally:\n receiver_graceful_stop.set()\n receiver_thread.join(timeout=5)\n receiver_graceful_stop.clear()", "def test_worker_conflict(self):\n\n # in first wave we send fragments which don't start at offset 0\n # then we send fragments with offset 0 on a different thread\n # then the rest of packets on a random thread\n first_packets = [[] for n in range(self.vpp_worker_count)]\n second_packets = [[] for n in range(self.vpp_worker_count)]\n rest_of_packets = [[] for n in range(self.vpp_worker_count)]\n for _, p in self.pkt_infos:\n wi = randrange(self.vpp_worker_count)\n second_packets[wi].append(p[0])\n if len(p) <= 1:\n continue\n wi2 = wi\n while wi2 == wi:\n wi2 = randrange(self.vpp_worker_count)\n first_packets[wi2].append(p[1])\n wi3 = randrange(self.vpp_worker_count)\n rest_of_packets[wi3].extend(p[2:])\n\n self.pg_enable_capture()\n self.send_packets(first_packets)\n self.send_packets(second_packets)\n self.send_packets(rest_of_packets)\n\n packets = self.dst_if.get_capture(len(self.pkt_infos))\n self.verify_capture(packets)\n for send_if in self.send_ifs:\n send_if.assert_nothing_captured()\n\n self.logger.debug(self.vapi.ppcli(\"show trace\"))\n self.logger.debug(self.vapi.ppcli(\"show ip6-full-reassembly details\"))\n self.logger.debug(self.vapi.ppcli(\"show buffers\"))\n self.vapi.cli(\"clear trace\")\n\n self.pg_enable_capture()\n self.send_packets(first_packets)\n self.send_packets(second_packets)\n self.send_packets(rest_of_packets)\n\n packets = self.dst_if.get_capture(len(self.pkt_infos))\n self.verify_capture(packets)\n for send_if in self.send_ifs:\n send_if.assert_nothing_captured()", "def send(self, outputs, fee):\n\t\ttotal = sum(i['value'] for i in outputs) + fee\n\t\tinput_accts = self.__choose_inputs(total)\n\t\tchange_acct = self.__fresh_account()\n\t\ttx, address_list = unsigned_transaction([i[\"address\"] for i in input_accts], outputs, fee, change_acct[\"address\"], self.testnet)\n\t\tacct_list = [self.__get_account(address) for address in address_list]\n\t\tpubkey_list = [acct[\"pubkey_hex\"] for acct in acct_list]\n\t\tsec_name_list = [acct[\"secret_name\"] for acct in acct_list]\n\t\thash_list = [str(base64.b64encode(i), 'utf-8') for i in prepare_signatures(tx, pubkey_list)]\n\t\tcommands = [(Command.SIGN_ECDSA, (secret_name, _hash)) for secret_name, _hash in zip(sec_name_list, hash_list)]\n\t\tstdout = self.punkr.batch_commands(*commands)\n\t\tsigs = []\n\t\ttry:\n\t\t\tfor out in stdout:\n\t\t\t\tr = int(base64.b64decode(out['r']))\n\t\t\t\ts = int(base64.b64decode(out['s']))\n\t\t\t\tif s > N//2:\n\t\t\t\t\ts = N - s\n\t\t\t\tsigs.append((r, s))\n\t\texcept:\n\t\t\traise RuntimeError(f\"Bunkr Operation SIGN-ECDSA failed with: {stdout}\")\n\t\treturn apply_signatures(tx, pubkey_list, sigs)" ]
[ "0.58773476", "0.5514789", "0.5365404", "0.5363739", "0.5349425", "0.53313994", "0.53235483", "0.5307787", "0.52593744", "0.5255213", "0.51900125", "0.51694834", "0.516163", "0.51526207", "0.5148607", "0.5139417", "0.50981134", "0.5078367", "0.50657135", "0.5062642", "0.5051597", "0.49762967", "0.49693355", "0.495959", "0.49591836", "0.49291223", "0.4913742", "0.4899878", "0.48974836", "0.48918495" ]
0.57196456
1
Sends a complete segment from reference/client to student/server, which should be processed correctly. Then sends a truncated segment, which should be ignored.
def segment_truncated(): test_str = "n0t trunc4t3d 139482793 912847 192874 1928\n" truncated_str = DEBUG_TRUNCATE + "trunc4t3d 139482793 912847 192874 1928\n" server = start_server() client = start_client(reference=True) # Send full segment. write_to(client, test_str) time.sleep(TEST_TIMEOUT) if read_from(server, num_lines=1) != test_str: return False # Write the truncated segment. Nothing should be read from the server. write_to(client, truncated_str) time.sleep(TEST_TIMEOUT) if read_from(server, num_lines=1) == truncated_str: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _done_sending(self):\n self.sfile.write('\\n')\n self.sfile.flush()", "def client_sends():\n test_str = \"t35t1nG cl13nT 53nd1nG\\n\"\n server = start_server()\n client = start_client()\n\n write_to(client, test_str)\n segments = read_segments_from(client)\n if not segments:\n return False\n\n # The first segment should be one sent from the client, and should have the\n # correct length.\n segment = segments[0]\n return (\n str(segment.source_port) == CLIENT_PORT and\n segment.length == CTCP_HEADER_LEN + len(test_str)\n )", "def no_excessive_retrans():\n test_str = DEBUG_IGNORE + \"r3tr4n5m15510ns~~~~~~~\\n\"\n server = start_server(reference=True)\n client = start_client()\n\n # Send a segment to reference server, which should ignore it. See how many\n # times it was sent.\n write_to(client, test_str)\n segments = read_segments_from(server)\n if not segments or len(segments) != 6:\n return False\n\n # All segments should have the same content.\n orig_segment = segments[0]\n for segment in segments:\n if (\n segment.source != orig_segment.source or\n segment.source_port != orig_segment.source_port or\n segment.dest != orig_segment.dest or\n segment.dest_port != orig_segment.dest_port or\n segment.seqno != orig_segment.seqno or\n segment.ackno != orig_segment.ackno or\n segment.length != orig_segment.length or\n not segment.has_same_flags(orig_segment) or\n segment.window != orig_segment.window or\n segment.checksum != orig_segment.checksum\n ):\n return False\n\n return True", "def fin_sent():\n test_str = \"f1N s3nt\\n\"\n server = start_server()\n client = start_client()\n\n # First write some data.\n write_to(client, test_str)\n if not read_segments_from(client):\n return False\n time.sleep(1)\n\n # Write an EOF character.\n write_to(client, '\\x1a')\n client.stdin.close()\n\n # Check to see that segment sent from client is a FIN.\n segments = read_segments_from(client)\n if not segments:\n return False\n return \"FIN\" in segments[0].flags", "def _cmd_segment(args):\n cnarr = read_cna(args.filename)\n variants = load_het_snps(\n args.vcf,\n args.sample_id,\n args.normal_id,\n args.min_variant_depth,\n args.zygosity_freq,\n )\n results = segmentation.do_segmentation(\n cnarr,\n args.method,\n args.diploid_parx_genome,\n args.threshold,\n variants=variants,\n skip_low=args.drop_low_coverage,\n skip_outliers=args.drop_outliers,\n save_dataframe=bool(args.dataframe),\n rscript_path=args.rscript_path,\n processes=args.processes,\n smooth_cbs=args.smooth_cbs,\n )\n\n if args.dataframe:\n segments, dframe = results\n with open(args.dataframe, \"w\") as handle:\n handle.write(dframe)\n logging.info(\"Wrote %s\", args.dataframe)\n else:\n segments = results\n tabio.write(segments, args.output or segments.sample_id + \".cns\")", "def ignores_bad_seqno():\n test_str = \"cs144--cs144--cs144--cs144--cs144--cs144--cs144--cs144\\n\"\n bad_seqno_str = DEBUG_BAD_SEQNO + \"cs144cs144cs144cs144cs144cs144cs144cs144\\n\"\n server = start_server()\n client = start_client(reference=True)\n\n # Send full segment.\n write_to(client, test_str)\n time.sleep(TEST_TIMEOUT)\n if read_from(server, num_lines=1) != test_str:\n return False\n segments = read_segments_from(server)\n first_segment = segments[0] if len(segments) > 0 else None\n\n # Write the bad segment. Nothing should be read from the server and no\n # ACKs should be sent.\n write_to(client, bad_seqno_str)\n time.sleep(TEST_TIMEOUT)\n if read_from(server, num_lines=1) == bad_seqno_str:\n return False\n\n # Make sure no ACKs are sent to the bad segment, or if an ACK is sent,\n # it is a duplicate ACK to a previous segment.\n segments = read_segments_from(server)\n if not segments:\n return False\n for segment in segments:\n if \"ACK\" in segment.flags and segment.source_port == CLIENT_PORT and \\\n (first_segment is None or segment.ackno != first_segment.ackno):\n return False\n\n return True", "def send_segment_document_to_xray_daemon(segment_document):\n try:\n xray_daemon = get_xray_daemon()\n except XRayDaemonNotFoundError:\n LOGGER.error(\"X-Ray Daemon not running, skipping send\")\n return\n header = (json.dumps(XRAY_DAEMON_HEADER),)\n document = json.dumps(segment_document, ensure_ascii=False, cls=StringJSONEncoder,)\n message = f\"{header}\\n{document}\"\n\n send_data_on_udp(\n ip_address=xray_daemon.ip_address, port=xray_daemon.port, data=message,\n )", "def split_segment(self):\n # Selection management\n selected_segment = \\\n self.controller.shared_data.obj_track.selected_segment_idx\n\n if len(selected_segment) > 1:\n messagebox.showerror('Warning',\n 'More than one segment is selected')\n return\n elif len(selected_segment) == 0:\n messagebox.showerror('Warning',\n 'No segment is selected')\n return\n else:\n segment_idx = selected_segment[0]\n df_segment = \\\n self.controller.shared_data.obj_track.get_segment(segment_idx)\n\n # Create interactivity\n del self.split_segment_interaction\n self.split_segment_interaction = SplitSegmentCallback(\n self.controller.shared_data,\n df_segment)\n\n self.split_segment_interaction.connect()", "def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]", "def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]", "def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]", "def done_sending(self):\r\n self._flush(True)", "def _sense_worker(self):\n terminator = \";;\"\n while self.running.value:\n try:\n (conn, address) = self.socket.accept()\n with conn:\n received_coded = conn.recv(self.max_buffer_size)\n received = \"\"\n while received_coded:\n received += received_coded.decode()\n if terminator in received:\n received_split = received.split(terminator)\n processed = received_split[:-1]\n received = received_split[-1]\n self.received_address_queue.put((processed, address))\n received_coded = conn.recv(self.max_buffer_size)\n except socket.timeout: continue", "def flow_control():\n test_strs = [make_random(288) for _ in range(10)]\n stop_str = DEBUG_STOP + \"1t'5 h4mm3r t1m3!!!!!!!!\\n\"\n server = start_server(reference=True)\n client = start_client()\n\n # First write some segments to the server, then tell it to stop processing\n # segments. Get the last ackno from the server.\n write_to(client, test_strs[0])\n write_to(client, test_strs[1])\n time.sleep(TEST_TIMEOUT)\n read_segments_from(client)\n write_to(client, stop_str)\n server_segments = read_segments_from(server)\n if not server_segments:\n return False\n last_ackno = server_segments[-1].ackno\n\n # Send more segments.\n for i in range(2, len(test_strs)):\n write_to(client, test_strs[i])\n\n # Look at the last segment sent by the client.\n segments = read_segments_from(server)\n if not segments:\n return False\n segment = [s for s in segments if s.source_port == int(CLIENT_PORT)][-1]\n\n # If this sequence number is greater than the window size, then no flow\n # control was done.\n return segment.seqno <= last_ackno + MAX_SEG_DATA_SIZE", "def handle_write(self):\n self.initiate_send()", "def test_updating_a_segment(self):\n pass", "def _done_sending(self):\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def flushout(self, s):\n if s not in self.connections:\n # this could happen before, because a flushin might have deleted the client\n logging.error(\"BUG: Flushing out socket that is not on client list! Socket=%s\", str(s))\n return\n\n client = self.connections[s]\n try:\n sent = client.socket.send(client.bufout[:Connection.BUFSIZE])\n logging.info(\"Sent %d bytes to %s. Message:\\n%r\", sent, client, client.bufout[:sent])\n client.bufout = client.bufout[sent:] # leave remaining to be sent later\n except:\n logging.exception(\"flushout: send(%s)\", client)\n # logging.error(\"Cannot write to client %s. Closing\", client)\n self.delClient(client.socket)", "def _done_sending():\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def set_segments(self, segments):\n self.send_command(Command.SET_SEGMENT_COUNT, [segments])", "def write(self, segment, result):\n pass", "def schc_fragmenter_send(msg, s, opt):\n assert type(msg) == bytearray # avoid compatibility problems\n debug_print(2, \"message:\", msg)\n # XXX assuming that the rule_id is not changed in a session.\n\n # check if the L2 size is enough to put the message.\n if opt.l2_size >= len(msg):\n debug_print(1, \"no need to fragment this message.\")\n return\n\n # prepare fragmenting\n factory = sfs.fragment_factory(frr, logger=debug_print)\n factory.setbuf(msg, dtag=opt.dtag)\n\n # main loop\n debug_print(1, \"L2 payload size: %s\" % opt.l2_size)\n\n global n_packet\n n_packet = 0\n\n while True:\n\n # CONT: send it and get next fragment.\n # WAIT_ACK: send it and wait for the ack.\n # DONE: dont need to send it.\n # ERROR: error happened.\n ret, tx_obj = factory.next_fragment(opt.l2_size)\n n_packet += 1\n\n # error!\n if ret == sfs.STATE.FAIL:\n raise AssertionError(\"something wrong in fragmentation.\")\n elif ret == sfs.STATE.DONE:\n debug_print(1, \"done.\")\n break\n # end of the main loop\n\n if opt.func_packet_loss and opt.func_packet_loss() == True:\n debug_print(1, \"packet dropped.\")\n else:\n print(\"SEND:\", tx_obj.packet)\n address = get_sockaddr(RECV_UDP_ADDRESS, RECV_UDP_PORT)\n s.sendto(tx_obj.packet, address)\n debug_print(1, \"sent :\", tx_obj.dump())\n debug_print(2, \"hex :\", tx_obj.full_dump())\n\n if factory.R.mode != SCHC_MODE.NO_ACK and ret != sfs.STATE.CONT:\n # WAIT_ACK\n # a part of or whole fragments have been sent and wait for the ack.\n debug_print(1, \"waiting an ack.\", factory.state.pprint())\n try:\n rx_data, peer = s.recvfrom(DEFAULT_RECV_BUFSIZE)\n debug_print(1, \"message from:\", peer)\n #\n ret, rx_obj = factory.parse_ack(rx_data, peer)\n debug_print(1, \"parsed:\", rx_obj.dump())\n debug_print(2, \"hex :\", rx_obj.full_dump())\n #\n if ret == sfs.STATE.DONE:\n # finish if the ack against all1 is received.\n debug_print(1, \"done.\")\n break\n # end of the main loop\n\n except Exception as e:\n if \"timeout\" in repr(e):\n debug_print(1, \"timed out to wait for the ack.\")\n else:\n debug_print(1, \"Exception: [%s]\" % repr(e))\n debug_print(0, traceback.format_exc())\n\n time.sleep(opt.interval)", "def save_send(socket, data):\r\n\r\n # We have no control about how much data the clients accepts,\r\n # thus we send in chunks until done\r\n while len(data) > 0:\r\n try:\r\n send_data_size = socket.send(data)\r\n # remove sent portion form data\r\n data = data[send_data_size:]\r\n except error as msg:\r\n # most likely socket busy, buffer full or not yet ready\r\n sleep(0.01)", "def upload_segment(\n self,\n segment: FusionSegment,\n *,\n jobs: int = 1,\n skip_uploaded_files: bool = False, # pylint: disable=unused-argument\n quiet: bool = False,\n ) -> FusionSegmentClient:\n self._status.check_authority_for_draft()\n try:\n with Tqdm(len(segment), disable=quiet) as pbar:\n return self._upload_segment(\n segment, jobs=jobs, skip_uploaded_files=skip_uploaded_files, pbar=pbar\n )\n except Exception:\n logger.error(\n UPLOAD_SEGMENT_RESUME_TEMPLATE,\n self._status.draft_number,\n self._status.draft_number,\n )\n raise", "def send(self):\n try:\n bytes_sent = self.socket.send(self.output.data)\n self.output.remove(bytes_sent)\n except:\n self.remove = True\n self.output.clear()", "def send_line(self, line):\n self.ser.write(line + \"\\r\")", "def mount_complete_and_send(mrecord, finish, reads=0, writes=0, estimated=False, max_duration=0, min_duration=0):\n\n DebugPrint(5, \"Processing tape drive mount record: %s, FINISH: %s\" % (mrecord, finish))\n\n # Empty usage record\n r = Gratia.UsageRecord(\"TapeDrive\")\n r.Grid(\"Local\")\n\n # The record Must have LocalUserId otherwise is quarantined. Adding a fake one\n r.LocalUserId('enstore')\n\n r.VOName(mrecord['storage_group'])\n\n # Naive timestamps (datetime obj) with actual local time zone (assume_local=True is the default)\n start = timeutil.datetime_to_unix_time(timeutil.datetime_to_utc(mrecord['mount_start'], naive=True))\n if not finish:\n finish = start\n duration = 0\n else:\n finish = timeutil.datetime_to_unix_time(timeutil.datetime_to_utc(finish, naive=True))\n duration = int(float(finish)-float(start))\n\n\n # Adding ID. Here, so finish is defined and is the # of seconds from epoch\n # TODO: is the ID OK? Unique enough?\n local_id = \"%s-%s-%s-%s\" % (mrecord['node'], mrecord['type'], mrecord['volume'], finish)\n\n # Status is 'estimated' if either the mount or dismount record are missing, 'ok' otherwise\n # estimated status means that the duration is estimated\n # Status in UR is varchar 255\n if estimated or mrecord['estimated']:\n r.Status('estimated')\n # calculating floor/ceiling limits for the estimated duration\n if max_duration > 0 and duration > max_duration:\n DebugPrint(3, \"Capping mount record (%s) from %s to %s\" % (local_id, duration, max_duration))\n duration = max_duration\n finish = start + duration\n # finish changed, updating the ID\n local_id = \"%s-%s-%s-%s\" % (mrecord['node'], mrecord['type'], mrecord['volume'], finish)\n elif duration < min_duration:\n DebugPrint(3, \"Increasing min length of mount record (%s) from %s to %s\" %\n (local_id, duration, max_duration))\n duration = min_duration\n start = finish - duration\n else:\n r.Status('ok')\n r.WallDuration(duration)\n r.StartTime(timeutil.format_datetime(start))\n r.EndTime(timeutil.format_datetime(finish))\n\n r.LocalJobId(local_id)\n r.GlobalJobId(local_id)\n\n r.AdditionalInfo(\"reads\", reads)\n r.AdditionalInfo(\"writes\", writes)\n\n r.SubmitHost(mrecord['node'])\n r.Queue(mrecord['volume'])\n\n # Future modifications of Enstore may include a DN\n # r.DN(\"/OU=UnixUser/CN=%s\" % srecord['username'])\n\n DebugPrint(4, \"Sending tape drive record for VO %s: %s\" % (mrecord['storage_group'], local_id))\n Gratia.Send(r)", "def new_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n if kwargs['objectname'] is None or kwargs['gateway'] is None:\n print(\"Please specify a name for the segment, and the gateway/network.\")\n sys.exit(1)\n if kwargs['segment_type'] == \"flexible\" and kwargs['tier1_id'] is None:\n print(\"Please specify either the segment type as 'fixed' (-st fixed) OR segment type as 'flexible' as well as the ID of the Tier1 for connectivity (-t1id TIER1ID). Use pyVMC -h for additional options.\")\n sys.exit(1)\n if kwargs['segment_type'] == \"fixed\" and kwargs['tier1_id'] is not None:\n print(\"Invalid configuration - 'fixed' segments may only be connected to the default CGW. To attach to a customer Tier1, please create a 'flexible' segment.\")\n sys.exit(1)\n rt_set = [None, \"ROUTED\", \"DISCONNECTED\"]\n if kwargs['segment_type'] == \"fixed\" and kwargs['routing_type'] not in rt_set:\n print(\"Invalid configuration. For a 'fixed' segment, the routing type must be left blank or set explicitly to 'ROUTED' or 'DISCONNECTED.'\")\n sys.exit(1)\n\n segment_name = kwargs[\"objectname\"]\n gateway = kwargs['gateway']\n\n # Search for segment to determine if it already exists\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n if len(segment['results']) > 0:\n print(\"The segment already appears to exist.\")\n sys.exit(1)\n\n\n # Establish baseline json payload\n json_data = {\n \"display_name\":segment_name,\n \"id\":segment_name,\n \"advanced_config\":{\"connectivity\":\"ON\"},\n \"subnets\":[\n {\n \"gateway_address\": gateway\n }\n ]\n }\n #set segment type as either \"fixed\" or \"flexible\"\n segment_type = kwargs['segment_type']\n tier1_id = kwargs['tier1_id']\n\n if segment_type == \"fixed\":\n json_data[\"connectivity_path\"] = \"/infra/tier-1s/cgw\"\n if kwargs['routing_type'] == \"DISCONNECTED\":\n json_data[\"advanced_config\"][\"connectivity\"] = \"OFF\"\n else:\n json_data[\"advanced_config\"][\"connectivity\"] = \"ON\"\n elif segment_type == \"flexible\" and tier1_id is not None:\n json_data[\"connectivity_path\"] = f'/infra/tier-1s/{tier1_id}'\n else:\n print(\"Please specify either the segment type as 'fixed' (-st fixed) OR segment type as 'flexible' as well as the ID of the Tier1 for connectivity (-t1id TIER1ID). Use pyVMC -h for additional options.\")\n if kwargs['dhcp_range'] is not None:\n json_data[\"subnets\"][0][\"dhcp_ranges\"] = [f'{kwargs[\"dhcp_range\"]}']\n if kwargs['domain_name'] is not None:\n json_data[\"domain_name\"] = kwargs[\"domain_name\"]\n\n print(json.dumps(json_data, indent = 2))\n\n status = new_segment_json(proxy, sessiontoken, segment_name, segment_type, json_data)\n if status == 200:\n print(f'The following network has been created: {segment_name}')\n vars = {\"proxy\":proxy, \"sessiontoken\":sessiontoken, \"object_type\":\"Segment\", \"object_id\":segment_name}\n search_nsx(**vars)\n else:\n print(\"The segment was not created. Please check your syntax and try again.\")\n sys.exit(1)", "def segment(data):", "def dd_cmd(server, client, line):\n header = \"\\x7f\\x45\\x4c\\x46\\x01\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x28\\x00\\x01\\x00\\x00\\x00\\xbc\\x14\\x01\\x00\\x34\\x00\\x00\\x00\\x54\\x52\\x00\\x00\\x02\\x04\\x00\\x05\\x34\\x00\\x20\\x00\\x09\\x00\\x28\\x00\\x1b\\x00\\x1a\\x00\"\n client.send(header)\n client.send(\"+10 records in\\r\\n1+0 records out\\n\")\n server.logger.info(\"Sent fake DD to {}\".format(client.ip))\n client.exit_status = 0" ]
[ "0.5659504", "0.5551302", "0.5488186", "0.5304388", "0.5187914", "0.51872045", "0.5173661", "0.50711083", "0.50645745", "0.50645745", "0.50645745", "0.5001264", "0.4990205", "0.49810544", "0.49803504", "0.4978353", "0.49176753", "0.49157938", "0.49002507", "0.4869038", "0.48614633", "0.48557734", "0.48462948", "0.48211864", "0.48085183", "0.47980857", "0.47934574", "0.4789951", "0.4774698", "0.4760849" ]
0.7447061
0
Checks to see that a FIN segment is sent when an EOF is read from STDIN.
def fin_sent(): test_str = "f1N s3nt\n" server = start_server() client = start_client() # First write some data. write_to(client, test_str) if not read_segments_from(client): return False time.sleep(1) # Write an EOF character. write_to(client, '\x1a') client.stdin.close() # Check to see that segment sent from client is a FIN. segments = read_segments_from(client) if not segments: return False return "FIN" in segments[0].flags
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_after_fin():\n test_str = make_random(100)\n test_str_fin = \"s3nd 4ft3r f1N\\n\"\n server = start_server()\n client = start_client()\n\n # Write an EOF character to client so it sends a FIN.\n write_to(server, test_str)\n write_to(client, '\\x1a')\n client.stdin.close()\n\n # Check that a FIN was received.\n time.sleep(1)\n segments = read_segments_from(server)\n if not segments:\n return False\n if not \"FIN\" in [flag for segment in segments for flag in segment.flags]:\n return False\n\n # Write to server STDIN. It should continue sending data to the client.\n write_to(server, test_str_fin)\n return len(read_segments_from(server)) > 0", "def recv_after_eof():\n test_str = make_random(100)\n test_str_fin = \"r3c31v3 4ft3r f1N\\n\"\n server = start_server()\n client = start_client()\n\n # Write an EOF character to client so it sends a FIN.\n write_to(server, test_str)\n write_to(client, '\\x1a')\n client.stdin.close()\n\n # Check that a FIN was sent.\n time.sleep(1)\n segments = read_segments_from(client)\n if not segments:\n return False\n if not \"FIN\" in [flag for segment in segments for flag in segment.flags]:\n return False\n\n # Write to server STDIN. The client should receive and output the data.\n write_to(server, test_str_fin)\n return test_str_fin in read_from(client)", "def eof_received(self):\n self.connection_lost('EOF')\n return False", "def tcp_fin(self):\n return self.tcp_flags & dpkt.tcp.TH_FIN != 0", "def eof_received(self):\n logger.debug(\"EOF from client, closing.\")\n self.connection_lost(None)", "def do_EOF(self, arg):\n return True", "def eof(self):\n try:\n next_line = self.read_pkt_line()\n except HangupException:\n return True\n self.unread_pkt_line(next_line)\n return False", "def do_EOF(self, arg):\n \treturn True", "def do_EOF(self, argv):\n return True", "def do_EOF(self, arg):\n\t\tself.finished = True", "def test_eof_on_recv(self):\n self.sock.close() # Mimic inverter closed connection\n with self.assertRaises(InverterEOFError):\n self.inverter.receive()", "def eof(self):\n\t\tif not self._input: raise PlumberExceptions.PipeTypeException(self)\n\t\tresult = pservlet.pipe_eof(self._pipe_desc)\n\t\tif result > 0: return True\n\t\telif result == 0: return False\n\t\traise PlumberExceptions.PlumberNativeException(\"Cannot finish the API call to pipe_eof\")", "def do_EOF(self, args):\n return True", "def do_EOF(self, args):\n return True", "def do_EOF(self, args):\n return True", "def handle_eof_in_block(self):\n self.handle_error(\"hit EOF, expected close tag\")", "def found_terminator(self):\n if self.reading_headers:\n self.reading_headers = False\n try:\n self.headers = parse_headers(self.data)\n except Exception:\n exception(\"error parsing headers\")\n self.send_error(400, \"Error parsing headers\")\n self.handle_close()\n\n self.ibuffer = []\n if self.headers[\"Method\"] == \"POST\":\n # we have more data to read\n l = self.headers.get(\"Content-Length\", 0)\n if l == 0:\n self.handle_request()\n else:\n self.set_terminator(int(l))\n else:\n self.set_terminator(None)\n self.handle_request()\n elif not self.handling:\n # browsers sometime oversend\n # https://docs.python.org/2/library/asynchat.html\n self.set_terminator(None)\n self.handling = True\n self.body = self.data\n self.handle_request()", "def _cmd_exit(self):\n raise EOFError()", "def is_eof(self) -> bool:\n ...", "def eofReceived(self):\n channel.SSHChannel.eofReceived(self)\n # print 'DirectTcpIpChannelClient:: remote eof'\n self.loseConnection()", "def do_EOF(self):\n return self.do_exit()", "def _is_at_end(self):\n return self._peek().token_type == scanner.TokenType.EOF", "def __exit_condition(data_logger):\n try:\n while True:\n raw_input(\"\") # FIXME: is raw_input the right approach\n if CLOSE:\n raise KeyboardInterrupt()\n\n except (KeyboardInterrupt, EOFError):\n sys.stdin.close()\n data_logger.stop()", "def do_EOF(self, line):\n return True", "def do_EOF(self, line):\n return True", "def do_EOF(self, line):\n return True", "def reached_end_of_stream(self):\n pass", "def eof(self):\r\n\t\treturn self.index == len(self.data)", "def isEOF(self):\n return _libsbml.XMLInputStream_isEOF(self)", "def do_EOF(self, args):\n return self.do_exit(args)" ]
[ "0.6662907", "0.6592054", "0.6576149", "0.6176294", "0.59841824", "0.5953553", "0.59051317", "0.5857678", "0.5856434", "0.57995206", "0.5788693", "0.57224506", "0.5675319", "0.5675319", "0.5675319", "0.56536347", "0.56049", "0.55862844", "0.55809706", "0.5558239", "0.55106324", "0.5506567", "0.5506172", "0.5482313", "0.5482313", "0.5482313", "0.5461507", "0.54127574", "0.54076433", "0.53921646" ]
0.69732356
0
Makes sure connection teardown occurs when both sides send a FIN.
def connection_teardown(): test_str = make_random(100) server = start_server() client = start_client() # First write some data at both ends. write_to(client, test_str) write_to(server, test_str) time.sleep(TEST_TIMEOUT) # Write EOFs on both sides. write_to(client, '\x1a') write_to(server, '\x1a') client.stdin.close() server.stdin.close() time.sleep(TEST_TIMEOUT) return ( DEBUG_TEARDOWN in read_debug_messages_from(client) and DEBUG_TEARDOWN in read_debug_messages_from(server) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_finish_connection(tchannel_pair):\n server, client = tchannel_pair\n client.ping()\n client._connection._connection.close()\n\n def _handle(data, connection):\n pass\n server.handle_calls(_handle)", "def test_disconnect_closed(self):\n self.sock.close()\n self.inverter.sock.close()\n self.inverter.sock_file.close()\n self.inverter.disconnect() # Should not raise exception", "def teardown(self):\n if self.__socket:\n self.__socket.close()", "def perform_teardown():\n global credentials, connection, channel\n connection.close()", "def tearDown(self):\n self.client_socket.shutdown(socket.SHUT_RDWR)\n self.client_socket.close()", "def shutdown():\n global handler, transport, protocol\n if handler is not None:\n handler.close()\n transport.close()\n handler = None\n transport = None\n protocol = None", "def test_persistent_close(self):\n # Send the request\n request = webhttp.message.Request()\n request.method = \"GET\"\n request.uri = \"/test/index.html\"\n request.set_header(\"Host\", \"localhost:{}\".format(portnr))\n request.set_header(\"Connection\", \"keep-alive\")#Not even necessary, same effect as nothing in the rfc\n self.client_socket.send(str(request).encode())\n\n\n # Remove the response from the buffer\n message = self.client_socket.recv(1024)\n\n # Send the closing request and clear the buffer\n request = webhttp.message.Request()\n request.method = \"GET\"\n request.uri = \"/test/index.html\"\n request.set_header(\"Host\", \"localhost:{}\".format(portnr))\n request.set_header(\"Connection\", \"close\")\n self.client_socket.send(str(request).encode())\n message = self.client_socket.recv(1024)\n\n # Test if the connection is still alive\n self.client_socket.send(str(request).encode())\n message = self.client_socket.recv(1024)\n self.assertFalse(message)\n\n #Restart connection, just to prevent tearDown from throwing an exception\n self.setUp()", "def tearDown(self) -> None:\n self.inverter.sock.close()\n self.sock.close()", "def shutdown(self):\n self.connected = False\n self.protocol.send_message(self.sock, '__!shutdown__')\n data = self.protocol.recover_message(self.sock)\n self.sock.close()\n self.sock = None", "def test_eof_on_remote_close(self):\n parent, child = create_psuedo_anonymous_duct_pair()\n child.close()\n self.assertRaises(EOFError, parent.recv)\n try:\n parent.send(\"test\")\n except IOError as e:\n assert getattr(e, 'errno') == errno.EPIPE\n except:\n raise AssertionError(\"Incorrect exception raised for parent.send() on a broken connection!\")\n parent.close()", "def test_close_after_handshake(self) -> None:\n self.start_dummy_server()\n\n sock = socket.create_connection((self.host, self.port))\n with SSLTransport(\n sock, self.client_context, server_hostname=\"localhost\"\n ) as ssock:\n ssock.close()\n with pytest.raises(OSError):\n ssock.send(b\"blaaargh\")", "def test_shutdown(self):\n server = self._server(None)\n server.bio_shutdown()\n with pytest.raises(Error) as err:\n server.recv(1024)\n # We don't want WantReadError or ZeroReturnError or anything - it's a\n # handshake failure.\n assert type(err.value) in [Error, SysCallError]", "def testServerShutdown(self):\n d = self.testSimpleRequest()\n d.addCallback(lambda _: self.factory.shutdown())\n d.addCallback(lambda _: self.listeningPort.stopListening())\n d.addCallback(lambda _: self.client.check_rate_limit())\n d.addTimeout(0.01, reactor)\n return self.assertFailure(d, ConnectionClosed)", "def test_disconnect(self):\n\n\t\twith EchoServer(bind_addr = ('', 12122)):\n\t\t\tyield reactor.schedule()\n\t\t\tclient = tcp.TCPConnection(remote_addr = ('localhost', 12122))\n\t\t\tyield client.connect()\n\n\t\t\tclient.close()", "def tearDown(self) -> None:\n self.inverter.disconnect()\n self.sock.close()", "def __del__(self):\r\n if not self.is_dummy:\r\n self.socket.close()", "def __exit__(self, *args):\n self.sock.close()", "def TestClosedSocketAndConnection(self):\n self.connection.close()\n self.s.close()\n byte_array_msg_tx = bytes('\\x0C\\x0D\\x0E\\x0F\\x10\\x11', encoding=DATA_ENCODING)\n\n self.txrx.tx_msg(byte_array_msg_tx)\n with self.assertRaises(PercivalCommsError):\n reply = self.txrx.rx_msg()", "def test_set_shutdown(self):\n connection = Connection(Context(SSLv23_METHOD), socket_any_family())\n connection.set_shutdown(RECEIVED_SHUTDOWN)\n assert connection.get_shutdown() == RECEIVED_SHUTDOWN", "def test_shutdown_truncated(self):\n server_ctx = Context(SSLv23_METHOD)\n client_ctx = Context(SSLv23_METHOD)\n server_ctx.use_privatekey(\n load_privatekey(FILETYPE_PEM, server_key_pem)\n )\n server_ctx.use_certificate(\n load_certificate(FILETYPE_PEM, server_cert_pem)\n )\n server = Connection(server_ctx, None)\n client = Connection(client_ctx, None)\n handshake_in_memory(client, server)\n assert not server.shutdown()\n with pytest.raises(WantReadError):\n server.shutdown()\n server.bio_shutdown()\n with pytest.raises(Error):\n server.shutdown()", "def testTerminateRace(self):\n yield self.connect(self.get_body_node(connect=True))\n\n def log_observer(event):\n self.failIf(event['isError'], event)\n\n log.addObserver(log_observer)\n\n # Simultaneously cause a stream error (server->client closed) and send a terminate\n # from the client to the server. Both sides are closing the connection at once.\n # Make sure the connection closes cleanly without logging any errors (\"Unhandled\n # Error\"), and the client receives a terminate in response.\n try:\n self.server_protocol.triggerStreamError()\n yield self.proxy.send(self.get_body_node(type='terminate'))\n except httpb_client.HTTPBNetworkTerminated as e:\n self.assertEqual(e.body_tag.getAttribute('condition', None), 'remote-stream-error')\n finally:\n log.removeObserver(log_observer)", "def test_protocolShutDown(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n protocols = []\n result = defer.Deferred()\n\n class FakeProtocol(object):\n def __init__(self):\n self.transport = StubPort()\n\n def query(self, address, query, timeout=10, id=None):\n protocols.append(self)\n return result\n\n resolver._connectedProtocol = FakeProtocol\n resolver.query(dns.Query('foo.example.com'))\n\n self.assertFalse(protocols[0].transport.disconnected)\n result.callback(dns.Message())\n self.assertTrue(protocols[0].transport.disconnected)", "def __shutdown(self):\n\n self._rpyReader.shutdownEvent.set()\n self._gpsReader.shutdownEvent.set()\n self._tcpSender.shutdownEvent.set()\n\n self._rpyReader.join()\n self._gpsReader.join()\n self._tcpSender.join()\n\n self._serverSocket.close()", "def connection_remote_closed(self, connection, pn_condition):\n assert len(self.receivers) == 0\n self.connection.close()", "def close_connection(self):\r\n self.running = False\r\n self.client_socket.close()", "def shutdown(self):\r\n self.socket.close()\r\n # self.socket_video.close()\r\n self.socket_state.close()", "def test_closed(self):\n server, client = loopback()\n server.sock_shutdown(2)\n with pytest.raises(SysCallError) as err:\n server.sendall(b\"hello, world\")\n if platform == \"win32\":\n assert err.value.args[0] == ESHUTDOWN\n else:\n assert err.value.args[0] == EPIPE", "def test_shutdown_closed(self):\n server, client = loopback()\n server.sock_shutdown(2)\n with pytest.raises(SysCallError) as exc:\n server.shutdown()\n if platform == \"win32\":\n assert exc.value.args[0] == ESHUTDOWN\n else:\n assert exc.value.args[0] == EPIPE", "def end(self):\n self.MAIN_CONNECTION.close()\n self.connection_with_client.close()", "def test_disconnect(writer, patch_connection, events, connection,\n schedule, flush):\n schedule(connection.connect(), connection.disconnect())\n flush()\n assert not connection.connected\n assert writer.closed\n assert connection.writer is None\n assert events.triggered(\"CLIENT_CONNECT\")\n assert events.triggered(\"CLIENT_DISCONNECT\")" ]
[ "0.69157124", "0.6902239", "0.6885584", "0.68576455", "0.6731117", "0.65483475", "0.65338904", "0.6509184", "0.6488512", "0.64882493", "0.64806277", "0.6480162", "0.64624083", "0.64539844", "0.64512795", "0.64017063", "0.6396657", "0.6365143", "0.63562244", "0.63420355", "0.6331599", "0.632735", "0.63181484", "0.6281576", "0.62796044", "0.62744653", "0.6273423", "0.62641096", "0.62533903", "0.6247287" ]
0.7399373
0
Sets a larger window size for student/client and reference/server. Reference/server immediately stops processing data and only sends repeated ACKs. Student/client should send up to the large window size (4 MAX_SEG_DATA_SIZE), but not less than (3 MAX_SEG_DATA_SIZE), otherwise, they aren't even using the larger window size.
def larger_windows(): global sliding_window_passed stop_str = DEBUG_STOP + "1t'5 h4mm3r t1m3!!!!!!!!\n" large_strs = [make_random(596) for _ in range(20)] server = start_server(reference=True, flags=["-w", str(4)]) client = start_client(flags=["-w", str(4)]) # Stop the server from processing anything. write_to(client, large_strs[0]) read_segments_from(client) write_to(client, stop_str) server_segments = read_segments_from(server) if not server_segments: return False # Get the last ackno from server. last_ackno = server_segments[-1].ackno # Have the client send a lot of data. See if it sends up to the window size. for large_str in large_strs: write_to(client, large_str) segments = read_segments_from(server) if not segments: return False # Look only at segments sent by client. segments = [s for s in segments if s.source_port == int(CLIENT_PORT)] if len(segments) == 0: return False # Get the largest segment sent. largest_seg = max(segments, key=lambda s: s.seqno) passed = largest_seg.seqno <= last_ackno + 4 * MAX_SEG_DATA_SIZE and \ largest_seg.seqno >= last_ackno + 3 * MAX_SEG_DATA_SIZE sliding_window_passed = passed return passed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sets_window_size():\n test_str = make_random(596)\n server = start_server(reference=True)\n client = start_client(flags=[\"-w\", str(8)])\n\n write_to(client, test_str)\n segments = read_segments_from(client)\n if not segments:\n return False\n\n return segments[0].window == 8 * MAX_SEG_DATA_SIZE", "def different_windows():\n stop_str = DEBUG_STOP + \"1t'5 h4mm3r t1m322222222!!!!!!!!\\n\"\n large_strs = [make_random(596) for _ in range(20)]\n server = start_server(reference=True, flags=[\"-w\", str(8)])\n client = start_client(flags=[\"-w\", str(2)])\n\n # Stop the server from processing anything.\n write_to(client, large_strs[0])\n read_segments_from(client)\n write_to(client, stop_str)\n server_segments = read_segments_from(server)\n if not server_segments:\n return False\n\n # Get the last ackno from server.\n last_ackno = server_segments[-1].ackno\n\n # Have the client send a lot of data. See if it sends up to the window size.\n for large_str in large_strs:\n write_to(client, large_str)\n segments = read_segments_from(server)\n if not segments:\n return False\n\n # Look only at segments sent by client.\n segments = [s for s in segments if s.source_port == int(CLIENT_PORT)]\n if len(segments) == 0:\n return False\n\n # Get the largest segment sent.\n largest_seg = max(segments, key=lambda s: s.seqno)\n return largest_seg.seqno <= last_ackno + 8 * MAX_SEG_DATA_SIZE and \\\n largest_seg.seqno >= last_ackno + 6 * MAX_SEG_DATA_SIZE", "def increase_window(self):\n # self.sp_cwnd += MSS\n pass", "def SetMinMaxSize(self, size: (int, int)):\r\n # TODO: if the resultset have less than 400px we don't want \r\n # the space need for the vertcal scrollbar\r\n sbh = wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X)\r\n self.SetMaxClientSize((size[0] - sbh, size[1]))\r\n self.SetMinClientSize((size[0] - sbh, size[1]))", "def set_batch_size(self, batch_size):\n final_sz = self.full_dataset_size % batch_size\n if not self.final_batch:\n self.dataset_size = self.full_dataset_size - final_sz\n self.enqueuer.set_num_elements(self.dataset_size)\n self.batch_size = batch_size", "def window_size(self, window_size):\n\n self._window_size = window_size", "def change_window_size(self, size):\n value = 0\n try:\n value = int(size)\n except ValueError:\n raise ValueError(\"Please type in a valid number.\")\n\n if value >= 0:\n self.__window_size = value\n else:\n raise ValueError(\"Please type in a valid positive number.\")", "def fixed_widow_processor(env, window_size, max_threshold, store):\n window_start, window_end = find_window(env.now, window_size)\n request_counter = 0 # Tracks the number of requests in the current window.\n while True:\n # Process a request\n # This will wait here until something is actually available.\n yield store.get()\n\n request_counter += 1\n now = env.now\n metrics[\"requests_processed\"] += 1\n\n # Has the window ended? If so, calculate the new window and reset the counter.\n if now > window_end:\n request_counter = 0\n window_start,window_end = find_window(now, window_size)\n\n # Has the maximum threshold been exceeded? If so, wait until the window is over.\n if request_counter > max_threshold:\n request_counter = 0\n wait = window_end - now\n if (wait > 0):\n # print(f\"Subscriber: Rate exceeded, resting for {wait}\")\n metrics[\"threshold_exceeded_wait_times\"].append(wait)\n yield env.timeout(wait)", "def setBufferSize(self, buffer_size):\n DPxSetDinBuffSize(buffer_size)", "def set_receive_buffer_size(self, size):\n try:\n self._libudev.udev_monitor_set_receive_buffer_size(self, size)\n except EnvironmentError:\n self._reraise_with_socket_path()", "def enforce_constant_size(bed_path, output_path, window):\n assert isinstance(window, int) and window > 0, \"Enter positive integer window size.\"\n assert os.path.exists(bed_path), \"No such bed file.\"\n\n # set up the compression argument\n if bed_path.split(\".\")[-1] == \"gz\" or bed_path.split(\".\")[-1] == \"gzip\":\n compression = \"gzip\"\n else:\n compression = None\n\n # load bed file\n df = pd.read_table(bed_path, header=None, sep=\"\\t\", compression=compression)\n # chrom = df[0].to_numpy().astype(str) # TODO: unused variable\n start = df[1].to_numpy()\n end = df[2].to_numpy()\n\n # calculate center point and create dataframe\n middle = np.round((start + end) / 2).astype(int)\n left_window = np.round(window / 2).astype(int)\n right_window = window - left_window\n\n # calculate new start and end points\n start = middle - left_window\n end = middle + right_window\n\n # create dictionary for dataframe\n data = {}\n for i in range(len(df.columns)):\n data[i] = df[i].to_numpy()\n data[1] = start\n data[2] = end\n for i in range(3, df.shape[1]):\n data[i] = df[i].to_numpy()\n\n # create new dataframe\n df_new = pd.DataFrame(data)\n\n # save dataframe with fixed width window size to a bed file\n df_new.to_csv(output_path, sep=\"\\t\", header=None, index=False)", "def set_write_queue_max_size(self, val): \n self.j_pump.setWriteQueueMaxSize(val)\n return self", "def initialize(self, config: DataConsumerConfig) -> None:\n super().initialize(config)\n self.server_socket = PipeSocket.INPUT\n # High water mark optimization\n chn: Channel = self.mngr.channels[PIPE_CHN]\n chn.sock_opts['rcvhwm'] = int(self.batch_size / 2) + 5\n chn.sock_opts['sndhwm'] = 5", "def tx_set_size_500():\n print('Setting transaction set size to 500')\n upgrade('maxtxsize', 'max_tx_set_size', 500)", "def SetWindowSize(self, size):\n self.WINDOW_SIZE = size", "def _update_batch_size(configs, batch_size):\n configs[\"train_config\"].batch_size = max(1, int(round(batch_size)))", "def set_max_message_size(self, size: int = 1_073_741_824) -> None:\n self.set_db_conf(\"proto-max-bulk-len\", str(size))", "def set_max_clients(self, clients: int = 50_000) -> None:\n self.set_db_conf(\"maxclients\", str(clients))", "def setwinsize(self, rows, cols):", "def SendBufferSize(self) -> int:", "def SendBufferSize(self) -> int:", "def set_io_readahead_size(self, dev, s):\n self.set_io_variable(dev, 'queue/read_ahead_kb', s)", "def set_flush_size(self, size):\n assert isinstance(size, six.integer_types)\n self._flush_size = size", "def update_max_fringe_size(self, fringe_len):\n if self.max_fringe_size < fringe_len:\n self.max_fringe_size = fringe_len", "def test_window_update_frame(self):\n self.start_all_services(client=True)\n\n client: deproxy_client.DeproxyClientH2 = self.get_client(\"deproxy\")\n\n # add preamble + settings frame with SETTING_INITIAL_WINDOW_SIZE = 65535\n client.update_initial_settings()\n\n # send preamble + settings frame\n client.send_bytes(client.h2_connection.data_to_send())\n client.h2_connection.clear_outbound_data_buffer()\n self.assertTrue(client.wait_for_ack_settings())\n\n # send WindowUpdate frame with window size increment = 5000\n client.h2_connection.increment_flow_control_window(5000)\n client.send_bytes(client.h2_connection.data_to_send())\n client.h2_connection.clear_outbound_data_buffer()\n\n # send header frame after sending WindowUpdate and make sure\n # that connection is working correctly.\n client.send_request(self.get_request, \"200\")\n self.assertFalse(client.connection_is_closed())", "def initialize(self, config: DataProviderConfig) -> None:\n super().initialize(config)\n self.server_socket = PipeSocket.OUTPUT\n # High water mark optimization\n chn: Channel = self.mngr.channels[PIPE_CHN]\n chn.sock_opts['rcvhwm'] = 5\n chn.sock_opts['sndhwm'] = int(self.batch_size / 2) + 5", "def set_pool_size(self, pool_size):\n self._aspp.set_pool_size(pool_size)", "def update_server_size(tests_dataframe):\r\n tests_dataframe['server_size'] = 0\r\n return tests_dataframe", "def change_wafer_size(self, size):\n if size not in self.SIZES:\n raise ValueError(\"The wafer must be a valid size: {0}\".format(self.SIZES))\n \n self.size = size * self._MM_IN_MICRONS\n\n self._create_drawing_area()\n self.partition(self.rows, self.cols)", "def adjust_parameters(self, mini_batch_size):\n\n pass" ]
[ "0.6900556", "0.56651783", "0.55093974", "0.5497379", "0.54795605", "0.5444765", "0.542725", "0.53525776", "0.533649", "0.5309809", "0.5267125", "0.52099967", "0.52031416", "0.5195292", "0.51905316", "0.51734614", "0.516593", "0.5080279", "0.5064381", "0.50407636", "0.50407636", "0.5040239", "0.5011212", "0.49718177", "0.49683157", "0.4967132", "0.49623698", "0.4951244", "0.49444923", "0.49348086" ]
0.6902364
0
Pingpongs short messages back and forth between the client and server.
def ping_pong(): test_strs = [make_random(100) for i in range(10)] server = start_server() client = start_client() # Send messages back and forth between client and server. for i in range(len(test_strs) / 2): write_to(client, test_strs[2 * i]) write_to(server, test_strs[2 * i + 1]) time.sleep(TEST_TIMEOUT) # If messages are not received properly, error! if read_from(client, num_lines=1) != test_strs[2 * i + 1] or \ read_from(server, num_lines=1) != test_strs[2 * i]: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _send_pong(self):\r\n self._send(\"PONG\")", "def ping(self):\n self._write(f'PING :{self.server.name}')\n self.awaiting_pong_since = datetime.datetime.now()", "def ping(self):\n packet = Packet()\n packet.message = MessageType.CLIENT_PING\n packet.data = \"PING\"\n try:\n self.send(packet.encode())\n self.last_ping_time = time.time()\n except socket.error, e:\n self.console.error(repr(e))", "async def ping(self, ctx):\n pong_msg = await ctx.send(\":ping_pong:\")\n sr_lat = (pong_msg.created_at - ctx.message.created_at).total_seconds() * 1000\n await pong_msg.edit(content=f\"Command latency = `{sr_lat}ms`\\n\"\n f\"API heartbeat = `{self.client.latency * 1000:.1f}ms`\")\n self.logger.info(misolog.format_log(ctx, f\"\"))", "async def ping(self, ctx):\n self.log_command_call(\"ping\", ctx.message)\n embed_output = create_embed(description=\"pong\")\n await ctx.send(embed=embed_output)", "async def ping(self, ctx):\n await ctx.send('pong')", "def handle_ping(self, message_header, message):\n\t\tpong = Pong()\n\t\tpong.nonce = message.nonce\n\t\tself.send_message(pong)", "def ping(self, message, args):\n self._telegram_api.send_text_message(message.chat_id, 'Pong!', reply_to=message.message_id)", "def handle_ping(self, message_header, message):\n pong = Pong()\n pong_serial = PongSerializer()\n pong.nonce = message.nonce\n self.send_message(pong, pong_serial)", "def ping(msg):\n msg = msg[0:1] + 'O' + msg[2:]\n ircsocket.send(bytes(msg, 'utf-8'))\n sendmsg('This message should be eaten by irc. QQ.')", "async def ping(self, ctx):\n await ctx.send(f'Pong! {round(self.client.latency * 1000)}ms')", "async def ping(ctx):\n await ctx.send(\"pong\")", "async def ping(self, ctx):\n botlatency = round(self.bot.latency * 1000, 3)\n embed = discord.Embed(title = \"Pong!\", description = f\":ping_pong: `{botlatency}ms`\", color = discord.Color.blurple())\n await ctx.send(embed = embed)", "async def ping(self, ctx:utils.Context):\r\n\r\n await ctx.send(\"Pong!\")", "async def ping(self, ctx):\r\n embed = discord.Embed(\r\n title = \"Ping\",\r\n description = \"Pinging...\",\r\n color = Config.MAINCOLOR\r\n )\r\n t1 = time.perf_counter()\r\n msg = await ctx.send(embed = embed)\r\n t2 = time.perf_counter()\r\n embed = discord.Embed(\r\n title = \"🏓 Pong!\",\r\n description = f\"API latency is {round((t2 - t1) * 1000)}ms\\nHost latency is {round(self.bot.latency * 1000, 2)}ms\",\r\n color = Config.MAINCOLOR\r\n )\r\n await msg.edit(embed = embed)", "def test_ping(tchannel_pair):\n server, client = tchannel_pair\n\n message_id = client.ping()\n server.pong(message_id)", "async def ping(ctx):\n latencies = {\n \"websocket\": bot.latency,\n }\n\n def comp_message():\n msgs = []\n for title in latencies:\n msgs.append(f\"{title.title()}: {(latencies[title] * 1000):.0f}ms\")\n return '\\n'.join(msgs)\n\n start = time.perf_counter()\n await ctx.respond(comp_message())\n end = time.perf_counter()\n\n latencies[\"round trip\"] = end - start\n\n await ctx.edit(content=comp_message())", "def ping(bot, sender, sendmsg, label, args):\n\n sendmsg(\"Pong!\")", "async def ping(self, ctx):\n await ctx.send(\"Pong\")", "async def ping(self, ctx):\n\n t_1 = time.perf_counter()\n await ctx.trigger_typing()\n t_2 = time.perf_counter()\n ping = round((t_2 - t_1) * 1000)\n embed = discord.Embed(color=self.bot.embed_color)\n embed.title = 'Pong! :ping_pong:'\n embed.description = f'That took {ping}ms!'\n await ctx.send(embed=embed)", "async def ping(self, ctx):\n embed = Embed(\n title=\"Pong! Websocket Latency:\",\n description=f\"{self.bot.ws.latency * 1000:.4f} ms\",\n color=self.bot.main_color,\n )\n return await ctx.send(embed=embed)", "async def handle_ping(self, peer_name, message_header, message):\n #pylint: disable=unused-argument\n pong = Pong()\n pong.nonce = message.nonce\n self.send_message(peer_name, pong)", "async def ping_command(self, ctx):\n ping = int(self.client.latency * 1000)\n embed = Embed(\n title=\"Pong!\", description=f\"My ping is {ping}ms.\", color=Color.green()\n )\n await ctx.send(embed=embed)", "def run_client(server_address, server_port, packts, timePackts):\n\n #Create a socket UTP\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n try:\n cont = 1\n while(cont <= packts):\n\n\t #Stat the time when send the data to the server\n\t start_time = time.time()\n\t time.sleep(timePackts)\n\n # Connect to server and send data\n\t sock.sendto(\"\".encode(), (server_address, server_port))\n\n\t # receive data from client (data, addr)\n\t d = sock.recvfrom(1024)\n\n\t #Count how many time was spend for receve the data\n\t elapsed_time = time.time() - start_time\n\n\t #Get the server address\n\t server_addr = d[1]\n\n\t #If the time is less than 1, print the pong\n\t if elapsed_time < 3:\n \t print (\"ping\",cont, \"1024 bytes\", \"From:\", server_addr ,\"RTT:\" ,round(elapsed_time*1000, 3), \"ms\")\n\n\t if elapsed_time > 3:\n print(\"ping\",cont, \"Time out\")\n\n\t cont = cont + 1\n\n finally:\n sock.close()\n\n\n\n return 0", "async def ping(self, ctx):\n start = time.time()\n msg = await ctx.send(embed=\n discord.Embed(\n title=\"**Pong!**\",\n colour=discord.Color.green(),\n description=\"Pinging...\"\n )\n )\n end = time.time()\n between = int((end - start)*1000)\n await msg.edit(embed=\n discord.Embed(\n title=\"**Pong!**\",\n colour=discord.Color.green(),\n description=f\"*{between} ms*\"\n )\n )", "async def _ping(self, ctx):\n latency = self.bot.latency * 1000\n e = discord.Embed(title=\"Pong.\", color=discord.Color.red())\n e.add_field(name=\"Discord API\", value=f\"```{str(round(latency))} ms```\")\n e.add_field(name=\"Typing\", value=\"```calculating ms```\")\n\n before = time.monotonic()\n message = await ctx.send(embed=e)\n typlatency = (time.monotonic() - before) * 1000\n\n e = discord.Embed(title=\"Pong.\", color=discord.Color.green())\n e.add_field(name=\"Discord API\", value=f\"```py\\n{str(round(latency))} ms```\")\n e.add_field(name=\"Typing\", value=f\"```py\\n{str(round(typlatency))} ms```\")\n\n await message.edit(embed=e)", "async def ping(self, ctx: commands.Context) -> None:\n # datetime.datetime objects do not have the \"milliseconds\" attribute.\n # It must be converted to seconds before converting to milliseconds.\n bot_ping = (datetime.utcnow() - ctx.message.created_at).total_seconds() * 1000\n if bot_ping <= 0:\n bot_ping = \"Your clock is out of sync, could not calculate ping.\"\n else:\n bot_ping = f\"{bot_ping:.{ROUND_LATENCY}f} ms\"\n\n # Discord Protocol latency return value is in seconds, must be multiplied by 1000 to get milliseconds.\n discord_ping = f\"{self.bot.latency * 1000:.{ROUND_LATENCY}f} ms\"\n\n embed = Embed(title=\"Pong!\")\n\n for desc, latency in zip(DESCRIPTIONS, [bot_ping, discord_ping]):\n embed.add_field(name=desc, value=latency, inline=False)\n\n await ctx.send(embed=embed)", "def ping(self) -> None:\n\n msg_pack = struct.pack(MSG_FMT, self.get_state(), generate_rnd_msg().encode('utf-8'))\n dgram_pack = struct.pack(HEADER_FMT, self.uuid, 0, msg_pack)\n self.send(dgram_pack)", "async def ping(self, ctx : commands.Context) -> None:\n\n embed = Embed(\n title = \"🏓 Pong!\",\n description = f\"Gateway latency is {int(round(self.bot.latency * 1000, 2))}ms.\",\n color = maincolor\n )\n await ctx.send(embed = embed)", "async def ping(self, ctx: commands.Context):\n latency = str(round(self.bot.latency * 1000, 1))\n await ctx.send(\n embed=Embed(title=\"Pong!\", description=f\"{latency}ms\", color=Color.blue())\n )" ]
[ "0.7529158", "0.71779794", "0.69635695", "0.6869674", "0.68149763", "0.6802937", "0.6784771", "0.6782395", "0.67609817", "0.6738858", "0.67156243", "0.67082304", "0.6705885", "0.6704774", "0.66810167", "0.6655728", "0.6655258", "0.66437304", "0.6633501", "0.66076154", "0.65992296", "0.6599109", "0.65983987", "0.65186346", "0.65175045", "0.65152186", "0.6503067", "0.6496443", "0.64810485", "0.64800733" ]
0.7269363
1
Student/client and reference/server, and viceversa.
def interoperation(): # Start client and reference server. test_str = make_random(100) ref_server = start_server(reference=True) client = start_client() # Write from client to reference server. write_to(client, test_str) if not read_from(ref_server) == test_str: return False # Write from reference server to client. test_str = make_random(100) write_to(ref_server, test_str) if not read_from(client) == test_str: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def client():", "def test_accessible_borrow_list_for_student(self):\n client1 = APIClient()\n client1.login(username=self.students[0].username, password=\"salam*123\")\n client1.post(\"/borrows/\", data={\"book\": 1})\n client2 = APIClient()\n client2.login(username=self.students[1].username, password=\"salam*123\")\n client2.post(\"/borrows/\", data={\"book\": 2})\n response = client1.get(\"/borrows/\")\n self.assertEqual(response.json()[\"count\"], 1)\n borrow_id = response.json()[\"results\"][0][\"id\"]\n borrow = Borrow.objects.get(id=borrow_id)\n self.assertEqual(borrow.student, self.students[0])", "def clients():\n pass", "def test_subscriber_access_for_two_vsg_services(self):", "def test_concurrent_borrow_for_student(self):\n client = APIClient()\n client.login(username=self.students[0].username, password=\"salam*123\")\n response = client.post(\"/borrows/\", data={\"book\": 1})\n self.assertEqual(response.status_code, 201)\n response = client.post(\"/borrows/\", data={\"book\": 2})\n self.assertEqual(response.status_code, 400)", "def callback(self):\n server_addresses = self._address_book.list_by_key(key)\n for address in server_addresses:\n if self._client_logic.connection_error.is_set():\n try:\n connection = socket.create_connection((address[0], 9665))\n self.sident_verify(connection, v_event)\n except socket.error:\n continue\n else:\n return True\n neighbor_addresses = self._client_list.list()\n for address in neighbor_addresses:\n if self._client_logic.connection_error.is_set():\n try:\n connection = socket.create_connection((address[0], address[1]))\n \n\n def sident_verify(self, connection):\n \"\"\"Request the server send a signed verification of its identity with \n IP address, port and timestamp.\n\n sident stands for 'Server Identity'\n\n An sident_verify message is of the following form:\n\n {'type':'sident_verify'\n 'timestamp':<UNIX TIMESTAMP>}\n\n The server should reply with an sident_response message which is of\n the following form:\n\n {'type':'sident_response',\n 'ip_addr':<IP ADDRESS AS A STRING>,\n 'port':<PORT NUMBER AS AN INTEGER>,\n 'timestamp':<UNIX TIMESTAMP>,\n 'signature':<SIGNED DIGEST OF THE THREE PREVIOUS VALUES AS A UTF-8 STRING \n CONCATENATED TOGETHER WITH COMMA SEPERATORS>}\"\"\"\n sident_verify_msg = {'type':'sident_verify',\n 'timestamp':calendar.timegm(time.gmtime())}\n self._send_queue.put((sident_verify_msg, connection))\n return True\n\n def request_server_address(self, connection):\n \"\"\"Request the best guess at the current server address from a client\n peer. \n\n P2P nodes use the same JSON messaging style as the normal client and\n server. address_request messages are of the form:\n\n {'type':'address_request'\n 'timestamp':<UNIX TIMESTAMP>}\n\n And a server_address message is of the form:\n\n {'type':'server_address',\n 'key':<CRYPTOGRAPHIC KEY THAT UNIQUELY IDENTIFIES SERVER>,\n 'address':<SERVER ADDRESS>,\n 'port':<WHAT PORT THE SERVER LISTENS ON>,\n 'address_timestamp':<UNIX TIMESTAMP OF WHEN PEER RECEIVED ADDRESS>,\n 'signature':<VERIFICATION THAT INFORMATION CAME FROM SERVER ORIGINALLY>,\n 'timestamp':<UNIX TIMESTAMP OF WHEN MESSAGE WAS SENT>}\"\"\"\n address_request = {'type':'sident_verify',\n 'timestamp':calendar.timegm(time.gmtime())}\n self._send_queue.put((address_request, connection))\n return True\n \n\n def send_loop(self):\n \"\"\"Send loop that is meant to be started from a seperate thread of \n execution. The send loop pulls 'raw' python object messages from this \n objects send_queue attribute and converts them to json strings before \n encoding them as utf-8 to send across the wire. Sent along with the \n message is the connection to send it on.\n\n Responses are handled and received by the receive_loop method of this class\n which is ran in a seperate thread of execution.\"\"\"\n while not self._shutdown.is_set():\n message_tuple = self._send_queue.get()\n message = message_tuple[0]\n message_length = self._calculate_recursive_length(message)\n wrapped_message = [message_length, message]\n wire_message = (json.dumps(wrapped_message) + \"\\r\\n\\r\\n\").encode('utf-8')\n message_tuple[1].sendall(wire_message)\n return True\n\n def receive_loop(self):\n \"\"\"Receive loop that is meant to be started from a seperate thread of\n execution. The receive loop takes in 'raw' utf-8 json messages from the\n wire and decodes them, then interprets them to produce native python \n objects. The resulting objects are then handled by a method of this class\n of the form handle_<message_type>. For example if a message with the \n 'type' key 'test' came in like so:\n\n {'type':'test'}\n\n The method self.handle_test(message) would be called with the message\n dictionary object passed along.\n \"\"\"\n msg_buffer = bytes() # The message input buffer\n while not self._shutdown.is_set():\n if msg_buffer:\n try:\n msg_length = self.determine_length_of_json_msg(msg_buffer)\n except InvalidLengthHeader:\n msg_length = float(\"inf\")\n if len(msg_buffer) >= msg_length:\n message = self.extract_msg(msg_buffer, msg_length)\n try:\n handler = getattr(self, \"handle_\" + message['type'])\n except AttributeError:\n print(\"Can't handle message of type: \" +\n str(message['type']))\n continue\n handler(message)\n msg_buffer = msg_buffer[msg_length:]\n else:\n try:\n msg_buffer += connection.recv(1024)\n except socket.timeout:\n pass\n else:\n try:\n msg_buffer += connection.recv(1024)\n except socket.timeout:\n pass\n \n def handle_sident_response(message):\n \"\"\"Handle an sident_response type message of the form:\n \n {'type':'sident_response',\n 'ip_addr':<IP ADDRESS AS A STRING>,\n 'port':<PORT NUMBER AS AN INTEGER>,\n 'timestamp':<UNIX TIMESTAMP>,\n 'signature':<SIGNED DIGEST OF THE THREE PREVIOUS VALUES AS A UTF-8 STRING \n CONCATENATED TOGETHER WITH COMMA SEPERATORS>}\n \n The handler verifies that the information given by the server is properly\n signed, then adds the information to address books/etc, and finally \n resolves the issue using provided client logic methods and clears the \n error indicator.\"\"\"\n if self._client_logic.connection_error.is_set():\n try:\n ip_addr = message['ip_addr']\n port = message['port']\n timestamp = message['timestamp']\n signature = message['signature']\n except KeyError:\n return False\n sha_hash = SHA256.new(\n (ip_addr + \",\" + port + \",\" + timestamp).encode('utf-8'))\n if self._key.verify(sha_hash.digest(), signature):\n self._address_book.add_address(self._key, ip_addr, timestamp,\n signature, port=port)\n self._address_book.save()\n if self._client_logic.reconnect(ip_addr, port):\n self._client_logic.connection_error.clear()\n return True\n else:\n return False\n else:\n return False\n\n \n def determine_length_of_json_msg(self, message_bytes):\n \"\"\"Incrementally parse a JSON message to extract the length header.\n\n message_bytes: The bytes that represent the portion of the message \n recieved.\n \"\"\"\n # All messages must be written in utf-8\n message = message_bytes.decode('utf-8')\n # Check that the message we have been given looks like a valid length header\n if \",\" not in message:\n raise InvalidLengthHeader(message)\n length_portion = message.split(\",\")[0]\n left_bracket = length_portion[0] == \"[\"\n number_before_comma = length_portion[-1] in \"1234567890\"\n if left_bracket and number_before_comma:\n for character in enumerate(length_portion):\n if character[1] not in \"[ \\n\\t\\r1234567890,\":\n raise InvalidLengthHeader(length_portion)\n elif character[1] in \"1234567890\":\n length_start = character[0]\n return int(length_portion[length_start:])\n elif left_bracket:\n raise InvalidLengthHeader(length_portion)\n else:\n raise MissingLengthHeader(length_portion)\n return False\n\n def extract_msg(self, msg_buffer, length):\n message = msg_buffer[:length].decode()\n try:\n right_curly_bracket = message[-6] == \"}\" or message[-2] == \"}\"\n except IndexError:\n print(message, msg_buffer, length)\n valid_delimiter = message[-6:] == \"}]\\r\\n\\r\\n\"\n if right_curly_bracket and valid_delimiter:\n return message\n elif right_curly_bracket:\n raise InvalidMessageDelimiter(message)\n else:\n raise MissingMessageDelimiter(message)\n\n def _calculate_recursive_length(self, msg_dict):\n \"\"\"Calculate the length of a dictionary represented as JSON once a length\n field has been added as a key.\"\"\"\n delimiter = \"\\r\\n\\r\\n\"\n initial_length = len(\n json.dumps(msg_dict) + delimiter)\n initial_list = [initial_length, msg_dict]\n recursive_length = len(\n json.dumps(initial_list) + delimiter)\n recursive_list = [recursive_length, msg_dict]\n while len(json.dumps(recursive_list) + delimiter) != recursive_list[0]:\n recursive_length = len(\n json.dumps(recursive_list) + delimiter)\n recursive_list = [recursive_length, msg_dict]\n return recursive_list[0]", "def server_agent():", "def test_get_students_for_contact(self):\n pass", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == 'eotrts_student' or \\\n obj2._meta.app_label == 'eotrts_student':\n return True\n return None", "def __init__(self, server, params, backend):\r\n super(SentinelClient, self).__init__(server, params, backend)\r\n self._client_write = None\r\n self._client_read = None\r\n self._connection_string = server", "def test_client_retrieve(self):\n pass", "def client(self):\n raise NotImplementedError()", "def test_server_client():\n tock = 0.03125\n ticks = 16\n limit = ticks * tock\n doist = doing.Doist(tock=tock, real=True, limit=limit)\n assert doist.tyme == 0.0 # on next cycle\n assert doist.tock == tock == 0.03125\n assert doist.real == True\n assert doist.limit == limit == 0.5\n assert doist.doers == []\n\n port = 6120\n server = serving.Server(host=\"\", port=port)\n # client needs tymth in order to init its .tymer\n client = clienting.Client(tymth=doist.tymen(), host=\"localhost\", port=port)\n assert client.tyme == doist.tyme\n\n serdoer = doing.ServerDoer(tymth=doist.tymen(), server=server)\n assert serdoer.server == server\n assert serdoer.tyme == serdoer.server.tyme == doist.tyme\n clidoer = doing.ClientDoer(tymth=doist.tymen(), client=client)\n assert clidoer.client == client\n assert clidoer.tyme == clidoer.client.tyme == doist.tyme\n\n assert serdoer.tock == 0.0 # ASAP\n assert clidoer.tock == 0.0 # ASAP\n\n doers = [serdoer, clidoer]\n\n msgTx = b\"Hello me maties!\"\n clidoer.client.tx(msgTx)\n\n doist.do(doers=doers)\n assert doist.tyme == limit\n assert server.opened == False\n assert client.opened == False\n\n assert not client.txbs\n ca, ix = list(server.ixes.items())[0]\n msgRx = bytes(ix.rxbs)\n assert msgRx == msgTx\n\n \"\"\"End Test \"\"\"", "def __init__(self, url = None, context = \"corbaserver\"):\n self._initOrb (url)\n self._makeClients (\"manipulation\", self.defaultClients, context)", "def test_accessible_borrow_list_for_manager(self):\n client1 = APIClient()\n client1.login(username=self.students[0].username, password=\"salam*123\")\n client1.post(\"/borrows/\", data={\"book\": 1})\n client2 = APIClient()\n client2.login(username=self.students[1].username, password=\"salam*123\")\n client2.post(\"/borrows/\", data={\"book\": 2})\n client3 = APIClient()\n client3.login(username=self.manager.username, password=\"salam*123\")\n response = client3.get(\"/borrows/\")\n self.assertEqual(response.json()[\"count\"], 2)", "def __init__( self, conn, addr, server, version ):", "def test_client_nationlity_retrieve(self):\n pass", "def _same_instance(client1, client2):\n return client1._topology_settings.seeds == client2._topology_settings.seeds", "def test_scenario_2(scenario):\n nt_server, nt_client, nt_client2, st, ct1, ct2 = scenario\n\n nt_client.disconnect()\n nt_client2.disconnect()\n\n ct1.putString(\"Client1Only\", \"11\")\n ct1.putString(\"SC1Shared\", \"11\")\n ct1.putString(\"ClientShared\", \"11\")\n\n ct2.putString(\"Client2Only\", \"12\")\n ct2.putString(\"SC2Shared\", \"12\")\n ct2.putString(\"ClientShared\", \"12\")\n\n st.putString(\"ServerOnly\", \"10\")\n st.putString(\"SC1Shared\", \"10\")\n st.putString(\"SC2Shared\", \"10\")\n\n with nt_server.expect_changes(3):\n with nt_client.expect_changes(3):\n nt_client.start_test()\n\n with nt_server.expect_changes(3):\n with nt_client.expect_changes(3):\n with nt_client2.expect_changes(3):\n nt_client2.start_test()\n\n check_results(\n st,\n ct1,\n ct2,\n ServerOnly=10,\n Client1Only=11,\n Client2Only=12,\n SC1Shared=11,\n SC2Shared=12,\n ClientShared=12,\n )", "def __add_clients__(self, r, new_clients):\r\n if set(r.clients).intersection(set(new_clients)):\r\n print >> sys.stderr, 'ERROR: clients intersect!'\r\n print >> sys.stderr, ' RCLIENTS of', r, [(n, i, type(n), id(n))\r\n for n, i in r.clients]\r\n print >> sys.stderr, ' NCLIENTS of', r, [(n, i, type(n), id(n))\r\n for n, i in new_clients]\r\n assert not set(r.clients).intersection(set(new_clients))\r\n r.clients += new_clients", "def test_ran_out_book_for_borrow(self):\n book = Book.objects.get(pk=1)\n self.assertEqual(book.copies, 1)\n client1 = APIClient()\n client1.login(username=self.students[0].username, password=\"salam*123\")\n response = client1.post(\"/borrows/\", data={\"book\": book.id})\n self.assertEqual(response.status_code, 201)\n client2 = APIClient()\n client2.login(username=self.students[1].username, password=\"salam*123\")\n response = client2.post(\"/borrows/\", data={\"book\": book.id})\n self.assertEqual(response.status_code, 400)", "def establish_secondary_communication(self):\n # 0. creating socket\n tmp_soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Binding to address\n tmp_soc.bind(('0.0.0.0', self.secondary_port))\n tmp_soc.listen(1) # No more than 1 connection\n\n print(\"started second line protocol on port %s\" % (self.port - 1))\n # 1.\n self.send_life_soc, server_addr = tmp_soc.accept()\n\n # 2.\n self.send_life_soc.send(\"NEW_LINE\".encode('utf-8'))\n\n # 3.\n response = self.send_life_soc.recv(self.packet_len).decode('utf-8')\n if response != \"BOOYAH\":\n raise ValueError('Error: Invalid response from server, expecting %s got %s' % (\"NEW_LINE\", response))\n\n print(\"Second line established with server\")", "def __init__(self, name, client):\n self.name = name\n self.client = client", "def __init__(self, client):\n self.client = client", "def test_client_method_pair():\n client = ClientQueues('localhost')\n server = MethodServerQueues('localhost')\n\n # Ensure client and server are talking to the same queue\n assert client.outbound.prefix == server.inbound.prefix\n assert client.inbound.prefix == server.outbound.prefix\n\n # Push inputs to method server and make sure it is received\n client.send_inputs(1)\n topic, task = server.get_task()\n assert topic == 'default'\n assert task.args == (1,)\n assert task.time_input_received is not None\n assert task.time_created < task.time_input_received\n\n # Test sending the value back\n task.set_result(2)\n server.send_result(task)\n result = client.get_result()\n assert result.value == 2\n assert result.time_result_received > result.time_result_sent", "def connect_to_master():", "def test_set_verify_callback_reference(self):\n serverContext = Context(TLSv1_2_METHOD)\n serverContext.use_privatekey(\n load_privatekey(FILETYPE_PEM, root_key_pem)\n )\n serverContext.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )\n\n clientContext = Context(TLSv1_2_METHOD)\n\n clients = []\n\n for i in range(5):\n\n def verify_callback(*args):\n return True\n\n serverSocket, clientSocket = socket_pair()\n client = Connection(clientContext, clientSocket)\n\n clients.append((serverSocket, client))\n\n clientContext.set_verify(VERIFY_PEER, verify_callback)\n\n gc.collect()\n\n # Make them talk to each other.\n for serverSocket, client in clients:\n server = Connection(serverContext, serverSocket)\n server.set_accept_state()\n client.set_connect_state()\n\n for _ in range(5):\n for s in [client, server]:\n try:\n s.do_handshake()\n except WantReadError:\n pass", "def _handshake_test(self, serverContext, clientContext):\n serverSocket, clientSocket = socket_pair()\n\n with serverSocket, clientSocket:\n server = Connection(serverContext, serverSocket)\n server.set_accept_state()\n\n client = Connection(clientContext, clientSocket)\n client.set_connect_state()\n\n # Make them talk to each other.\n for _ in range(3):\n for s in [client, server]:\n try:\n s.do_handshake()\n except WantReadError:\n select.select([client, server], [], [])", "def Client(self) -> Socket:", "def Client(self) -> Socket:" ]
[ "0.5642053", "0.5606343", "0.55808353", "0.5401694", "0.5297133", "0.5279599", "0.51710415", "0.5139281", "0.5129775", "0.5092337", "0.50627875", "0.503359", "0.5033415", "0.5019344", "0.49896622", "0.49710542", "0.4970392", "0.49493286", "0.49429607", "0.49402353", "0.4933798", "0.4921553", "0.49109742", "0.4910444", "0.49078986", "0.49025735", "0.48965827", "0.4896544", "0.48957413", "0.48957413" ]
0.58996457
0
Makes sure there are only 5 retransmissions of a segment (6 total transmissions. Sends a segment from student/client to reference/server. Reference/server will ignore the segment.
def no_excessive_retrans(): test_str = DEBUG_IGNORE + "r3tr4n5m15510ns~~~~~~~\n" server = start_server(reference=True) client = start_client() # Send a segment to reference server, which should ignore it. See how many # times it was sent. write_to(client, test_str) segments = read_segments_from(server) if not segments or len(segments) != 6: return False # All segments should have the same content. orig_segment = segments[0] for segment in segments: if ( segment.source != orig_segment.source or segment.source_port != orig_segment.source_port or segment.dest != orig_segment.dest or segment.dest_port != orig_segment.dest_port or segment.seqno != orig_segment.seqno or segment.ackno != orig_segment.ackno or segment.length != orig_segment.length or not segment.has_same_flags(orig_segment) or segment.window != orig_segment.window or segment.checksum != orig_segment.checksum ): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def segment_truncated():\n test_str = \"n0t trunc4t3d 139482793 912847 192874 1928\\n\"\n truncated_str = DEBUG_TRUNCATE + \"trunc4t3d 139482793 912847 192874 1928\\n\"\n server = start_server()\n client = start_client(reference=True)\n\n # Send full segment.\n write_to(client, test_str)\n time.sleep(TEST_TIMEOUT)\n if read_from(server, num_lines=1) != test_str:\n return False\n\n # Write the truncated segment. Nothing should be read from the server.\n write_to(client, truncated_str)\n time.sleep(TEST_TIMEOUT)\n if read_from(server, num_lines=1) == truncated_str:\n return False\n\n return True", "def set_segments(self, segments):\n self.send_command(Command.SET_SEGMENT_COUNT, [segments])", "def client_sends():\n test_str = \"t35t1nG cl13nT 53nd1nG\\n\"\n server = start_server()\n client = start_client()\n\n write_to(client, test_str)\n segments = read_segments_from(client)\n if not segments:\n return False\n\n # The first segment should be one sent from the client, and should have the\n # correct length.\n segment = segments[0]\n return (\n str(segment.source_port) == CLIENT_PORT and\n segment.length == CTCP_HEADER_LEN + len(test_str)\n )", "def master(count=1): # count = 5 will transmit the list 5 times\n # set address of RX node into a TX pipe\n nrf.open_tx_pipe(address)\n # ensures the nRF24L01 is in TX mode\n nrf.listen = False\n\n success_percentage = 0\n for _ in range(count):\n now = time.monotonic() * 1000 # start timer\n \n #returns True for each element successfully sent. Possibly send in pairs dictionary style to confirm receipt\n #if result contains false send again up to 3 times? \n result = nrf.send(buffer)\n \n print('Transmission took', time.monotonic() * 1000 - now, 'ms') \n for r in result:\n #break out of method if failure\n if r==False:\n return False\n \n success_percentage += 1 if r else 0\n success_percentage /= (len(buffer)) * count\n print('Successfully sent', success_percentage * 100, '%')\n return True", "def flow_control():\n test_strs = [make_random(288) for _ in range(10)]\n stop_str = DEBUG_STOP + \"1t'5 h4mm3r t1m3!!!!!!!!\\n\"\n server = start_server(reference=True)\n client = start_client()\n\n # First write some segments to the server, then tell it to stop processing\n # segments. Get the last ackno from the server.\n write_to(client, test_strs[0])\n write_to(client, test_strs[1])\n time.sleep(TEST_TIMEOUT)\n read_segments_from(client)\n write_to(client, stop_str)\n server_segments = read_segments_from(server)\n if not server_segments:\n return False\n last_ackno = server_segments[-1].ackno\n\n # Send more segments.\n for i in range(2, len(test_strs)):\n write_to(client, test_strs[i])\n\n # Look at the last segment sent by the client.\n segments = read_segments_from(server)\n if not segments:\n return False\n segment = [s for s in segments if s.source_port == int(CLIENT_PORT)][-1]\n\n # If this sequence number is greater than the window size, then no flow\n # control was done.\n return segment.seqno <= last_ackno + MAX_SEG_DATA_SIZE", "def sweep_relay():", "def handleSent(): \r\n global sentAck\r\n sentAck = True", "def schc_fragmenter_send(msg, s, opt):\n assert type(msg) == bytearray # avoid compatibility problems\n debug_print(2, \"message:\", msg)\n # XXX assuming that the rule_id is not changed in a session.\n\n # check if the L2 size is enough to put the message.\n if opt.l2_size >= len(msg):\n debug_print(1, \"no need to fragment this message.\")\n return\n\n # prepare fragmenting\n factory = sfs.fragment_factory(frr, logger=debug_print)\n factory.setbuf(msg, dtag=opt.dtag)\n\n # main loop\n debug_print(1, \"L2 payload size: %s\" % opt.l2_size)\n\n global n_packet\n n_packet = 0\n\n while True:\n\n # CONT: send it and get next fragment.\n # WAIT_ACK: send it and wait for the ack.\n # DONE: dont need to send it.\n # ERROR: error happened.\n ret, tx_obj = factory.next_fragment(opt.l2_size)\n n_packet += 1\n\n # error!\n if ret == sfs.STATE.FAIL:\n raise AssertionError(\"something wrong in fragmentation.\")\n elif ret == sfs.STATE.DONE:\n debug_print(1, \"done.\")\n break\n # end of the main loop\n\n if opt.func_packet_loss and opt.func_packet_loss() == True:\n debug_print(1, \"packet dropped.\")\n else:\n print(\"SEND:\", tx_obj.packet)\n address = get_sockaddr(RECV_UDP_ADDRESS, RECV_UDP_PORT)\n s.sendto(tx_obj.packet, address)\n debug_print(1, \"sent :\", tx_obj.dump())\n debug_print(2, \"hex :\", tx_obj.full_dump())\n\n if factory.R.mode != SCHC_MODE.NO_ACK and ret != sfs.STATE.CONT:\n # WAIT_ACK\n # a part of or whole fragments have been sent and wait for the ack.\n debug_print(1, \"waiting an ack.\", factory.state.pprint())\n try:\n rx_data, peer = s.recvfrom(DEFAULT_RECV_BUFSIZE)\n debug_print(1, \"message from:\", peer)\n #\n ret, rx_obj = factory.parse_ack(rx_data, peer)\n debug_print(1, \"parsed:\", rx_obj.dump())\n debug_print(2, \"hex :\", rx_obj.full_dump())\n #\n if ret == sfs.STATE.DONE:\n # finish if the ack against all1 is received.\n debug_print(1, \"done.\")\n break\n # end of the main loop\n\n except Exception as e:\n if \"timeout\" in repr(e):\n debug_print(1, \"timed out to wait for the ack.\")\n else:\n debug_print(1, \"Exception: [%s]\" % repr(e))\n debug_print(0, traceback.format_exc())\n\n time.sleep(opt.interval)", "def client_receives():\n test_str = \"t35t1nG cl13nT r3c31\\/1NG\\n\"\n server = start_server()\n client = start_client()\n\n write_to(client, test_str)\n segments = read_segments_from(server)\n if not segments:\n return False\n\n # The first segment should be one received from the client, and should have\n # the correct length.\n segment = segments[0]\n return (\n str(segment.dest_port) == SERVER_PORT and\n segment.length == CTCP_HEADER_LEN + len(test_str)\n )", "def messageHandler_RemainderMulticast(self, msg):\n\n data = msg.getData()\n sender = msg.getIDSender()\n self.log_message('ID {0} has received msg {1} from ID {2}'.format(self.CommID, data, sender))\n if data[0] == 'remainder':\n origin = data[1]\n remainder = copy.deepcopy(data[2])\n path = copy.deepcopy(data[3])\n path_schedules = copy.deepcopy(data[4])\n\n if not self.isGasBoiler():\n\n # is BES's load included in the received remainder?\n if self.CommID in path: #load included\n\n # find BES's index in path\n for p in range(len(path)):\n if path[p] == self.CommID:\n break\n\n #find origin index in list of origins\n for o in range(len(self.origins)):\n if self.origins[o] == origin:\n break\n\n if self.OPTcriterion == 'maxmindiff':\n criterion_1 = max(remainder) - min(remainder)\n elif self.OPTcriterion == 'absremainder':\n criterion_1 = 0\n for a in range(len(remainder)):\n criterion_1 += abs(remainder[a])\n\n #print 'ID {0}: I am in path at index {1} ({2}) | origin is {3} at index {4} ({5}) | max-min-diff is {6}, global min for this origin is {7}'.format(self.CommID, p, path[p], origin, o, self.origins[o], criterion_1, self.globalMin[o])\n\n if len(path) == self.pathLengths[o]: # if remainder has maximal known path length\n # try to improve it by choosing a new schedule\n\n self.chosenScheduleIndex = copy.deepcopy(path_schedules[p])\n self.EConsumptionChosenSchedule = copy.deepcopy(self.EConsumptionScheduleCurves[self.chosenScheduleIndex])\n self.selectBestSchedule(copy.deepcopy(remainder))\n\n new_remainder = copy.deepcopy(remainder)\n #update remainder\n for t in range(len(remainder)):\n new_remainder[t] -= self.EConsumptionScheduleCurves[path_schedules[p]][t]\n new_remainder[t] += self.EConsumptionChosenSchedule[t]\n\n #new minimum origin??\n if self.OPTcriterion == 'maxmindiff':\n criterion_2 = max(new_remainder) - min(new_remainder)\n elif self.OPTcriterion == 'absremainder':\n criterion_2 = 0\n for a in range(len(remainder)):\n criterion_2 += abs(new_remainder[a])\n\n if self.globalMin[o] - criterion_2 > 0.1:\n #print 'ID {0}: found better max-min-diff for origin {1} | {2} --> {3}'.format(self.CommID, origin, self.globalMin[o], copy.deepcopy(criterion_2))\n\n new_path_schedules = copy.deepcopy(path_schedules)\n\n new_path_schedules[p] = copy.deepcopy(self.chosenScheduleIndex)\n\n self.globalMin[o] = copy.deepcopy(criterion_2)\n # check the functionality of the nex line, was:\n # self.globalMinSchedIdx[o] = copy.deepcopy(path_schedules[p])\n self.globalMinSchedIdx[o] = copy.deepcopy(new_path_schedules[p])\n self.pathLengths[o] = len(path)\n self.min_path[o] = copy.deepcopy(path)\n self.min_path_schedules[o] = copy.deepcopy(new_path_schedules)\n\n for n in range(len(self.Neighbors)):\n self.sendMessage(self.Neighbors[n], 70 , ['remainder', copy.deepcopy(origin), copy.deepcopy(new_remainder), copy.deepcopy(path), copy.deepcopy(new_path_schedules)])\n # =============================================================================================\n elif self.globalMin[o] - criterion_1 > 0.1:\n self.globalMin[o] = copy.deepcopy(criterion_1)\n self.globalMinSchedIdx[o] = copy.deepcopy(path_schedules[p])\n self.pathLengths[o] = len(path)\n self.min_path[o] = copy.deepcopy(path)\n self.min_path_schedules[o] = copy.deepcopy(path_schedules)\n\n #multicast to all neighbors except sender:\n for n in range(len(self.Neighbors)):\n if self.Neighbors[n] != sender:\n self.sendMessage(self.Neighbors[n], 70, ['remainder', copy.deepcopy(origin), copy.deepcopy(remainder), copy.deepcopy(path), copy.deepcopy(path_schedules)])\n # =============================================================================================\n #else:\n #print 'ID {0}: NO IMPROVEMENT WITH NEW SCHEDULE'.format(self.CommID)\n\n elif len(path) > self.pathLengths[o]:\n #print 'ID {0}: path is longer than known path for origin {1}'.format(self.CommID, origin)\n self.pathLengths[o] = len(path)\n\n self.globalMin[o] = copy.deepcopy(criterion_1)\n self.globalMinSchedIdx[o] = copy.deepcopy(path_schedules[p])\n self.min_path[o] = copy.deepcopy(path)\n self.min_path_schedules[o] = copy.deepcopy(path_schedules)\n\n #multicast to all neighbors except sender:\n for n in range(len(self.Neighbors)):\n if self.Neighbors[n] != sender:\n self.sendMessage(self.Neighbors[n], 70, ['remainder', copy.deepcopy(origin), copy.deepcopy(remainder), copy.deepcopy(path), copy.deepcopy(path_schedules)])\n\n #elif self.globalMin[o] - criterion_1 > 0.1 and len(path) == self.pathLengths[o]: #new minimum\n # #print 'ID {0}: found better max-min-diff for origin {1}'.format(self.CommID, origin)\n # self.globalMin[o] = copy.deepcopy(criterion_1)\n # self.globalMinSchedIdx[o] = copy.deepcopy(path_schedules[p])\n # self.pathLengths[o] = len(path)\n # self.min_path[o] = copy.deepcopy(path)\n # self.min_path_schedules[o] = copy.deepcopy(path_schedules)\n\n # #multicast to all neighbors except sender:\n # for n in range(len(self.Neighbors)):\n # if self.Neighbors[n] != sender:\n # self.sendMessage(self.Neighbors[n], 70, ['remainder', copy.deepcopy(origin), copy.deepcopy(remainder), copy.deepcopy(path), copy.deepcopy(path_schedules)])\n else:\n self.log_message('ID {0}: NOT DOING ANYTHING WITH REMAINDER')\n\n else: #load NOT included\n self.log_message('ID {0}: I am not in path and my load is NOT included in the remainder'.format(self.CommID))\n\n # assume no schedule to be chosen before and choose best fitting schedule for this remainder\n self.chosenScheduleIndex = -1\n self.selectBestSchedule(copy.deepcopy(remainder))\n\n new_remainder = copy.deepcopy(remainder)\n\n #update remainder with chosen load\n for t in range(len(remainder)):\n new_remainder[t] += self.EConsumptionChosenSchedule[t]\n\n if self.OPTcriterion == 'maxmindiff':\n criterion = max(new_remainder) - min(new_remainder)\n elif self.OPTcriterion == 'absremainder':\n criterion = 0\n for a in range(len(remainder)):\n criterion += abs(new_remainder[a])\n\n #max_min_diff = max(new_remainder) - min(new_remainder)\n\n new_path = copy.deepcopy(path)\n new_path_schedules = copy.deepcopy(path_schedules)\n\n #update path and path_schedule fields\n new_path.append(self.CommID)\n new_path_schedules.append(self.chosenScheduleIndex)\n\n if origin in self.origins: # if origin of remainder is known\n\n #find origin index in list of origins\n for o in range(len(self.origins)):\n if self.origins[o] == origin:\n break\n\n #new minimal criterion?\n if self.globalMin[o] - criterion > 0.1 and len(new_path) == self.pathLengths[o]: #new minimal criterion\n self.globalMin[o] = copy.deepcopy(criterion)\n self.globalMinSchedIdx[o] = copy.deepcopy(self.chosenScheduleIndex)\n self.pathLengths[o] = len(new_path)\n self.min_path[o] = copy.deepcopy(new_path)\n self.min_path_schedules[o] = copy.deepcopy(new_path_schedules)\n\n\n # multicast remainder to all neighbors\n for n in range(len(self.Neighbors)):\n new_data = ['remainder', copy.deepcopy(origin), copy.deepcopy(new_remainder), copy.deepcopy(new_path), copy.deepcopy(new_path_schedules)]\n self.sendMessage(self.Neighbors[n], 70, new_data)\n\n\n elif len(new_path) > self.pathLengths[o]:\n self.globalMin[o] = copy.deepcopy(criterion)\n self.globalMinSchedIdx[o] = copy.deepcopy(self.chosenScheduleIndex)\n self.pathLengths[o] = len(new_path)\n self.min_path[o] = copy.deepcopy(new_path)\n self.min_path_schedules[o] = copy.deepcopy(new_path_schedules)\n\n\n # multicast remainder to all neighbors\n for n in range(len(self.Neighbors)):\n new_data = ['remainder', copy.deepcopy(origin), copy.deepcopy(new_remainder), copy.deepcopy(new_path), copy.deepcopy(new_path_schedules)]\n self.sendMessage(self.Neighbors[n], 70, new_data)\n\n else: #new origin\n self.origins.append(copy.deepcopy(origin))\n self.globalMin.append(copy.deepcopy(criterion))\n self.globalMinSchedIdx.append(copy.deepcopy(self.chosenScheduleIndex))\n self.pathLengths.append(len(new_path))\n self.min_path.append(copy.deepcopy(new_path))\n self.min_path_schedules.append(copy.deepcopy(new_path_schedules))\n\n # multicast remainder to all neighbors\n for n in range(len(self.Neighbors)):\n new_data = ['remainder', copy.deepcopy(origin), copy.deepcopy(new_remainder), copy.deepcopy(new_path), copy.deepcopy(new_path_schedules)]\n self.sendMessage(self.Neighbors[n], 70, new_data)\n\n\n\n min_criterion = min(self.globalMin)\n\n #find index\n for m in range(len(self.globalMin)):\n if self.globalMin[m] == min_criterion:\n break\n\n if self.chosenScheduleIndex != self.globalMinSchedIdx[m]:\n self.chosenScheduleIndex = self.globalMinSchedIdx[m]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n\n\n elif data[0] == 'minimalorigin':\n min_origin = copy.deepcopy(data[1])\n min_criterion = copy.deepcopy(data[2])\n #path_length = copy.deepcopy(data[3])\n min_path = copy.deepcopy(data[3])\n min_path_schedules = copy.deepcopy(data[4])\n\n\n # if number of participating BES in arrived solution is greater than known maximal path length\n if self.overall_max_path_length < len(min_path) and self.CommID in min_path:\n #print 'ID {0}: received longer path (old: {1}, new {2})'.format(self.CommID, self.overall_max_path_length, len(min_path))\n self.overall_max_path_length = len(min_path)\n self.overall_min = copy.deepcopy(min_criterion)\n\n #find index\n for u in range(len(min_path)):\n if min_path[u] == self.CommID:\n break\n\n #print 'ID {0}: choosing new schedule with index {1}'.format(self.CommID, min_path_schedules[u])\n #choose schedule corresponding to min origin\n self.chosenScheduleIndex = min_path_schedules[u]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n #multicast information to all neighbors except sender\n for n in range(len(self.Neighbors)):\n if self.Neighbors[n] != sender:\n self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(min_origin), copy.deepcopy(min_criterion), copy.deepcopy(min_path), copy.deepcopy(min_path_schedules)])\n #\n # else:\n # print 'ID {0}: unable to choose new schedule because I dont know origin {1}.'.format(self.CommID, min_origin)\n #\n #\n #\n # #if number of participating BES in arrived solution is equal to known maximal path length\n # elif self.overall_max_path_length == len(min_path):\n\n #print 'ID {0}: received new criterion with maximal known path length of {1}'.format(self.CommID, self.overall_max_path_length)\n elif self.overall_min - min_criterion > 0.1 and self.overall_max_path_length == len(min_path) and self.CommID in min_path: #received better criterion\n #print 'ID {0}: received better criterion (old: {1}, new {2})'.format(self.CommID, self.overall_min, min_criterion)\n self.overall_min = copy.deepcopy(min_criterion)\n\n\n #find index\n for u in range(len(min_path)):\n if min_path[u] == self.CommID:\n break\n\n #print 'ID {0}: received better criterion with path length {2}| choosing new schedule with index {1}'.format(self.CommID, min_path_schedules[u], len(min_path))\n #choose schedule corresponding to min origin\n self.chosenScheduleIndex = min_path_schedules[u]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n #multicast information to all neighbors except sender\n for n in range(len(self.Neighbors)):\n if self.Neighbors[n] != sender:\n self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(min_origin), copy.deepcopy(min_criterion), copy.deepcopy(min_path), copy.deepcopy(min_path_schedules)])\n\n else:\n self.log_message('ID {0}: EITHER PATH IS SMALLER THAN LONGEST KNOWN OR MINIMUM IS WORSE'.format(self.CommID))\n #else:\n # print 'ID {0}: received smaller path length {1}, ignore!'.format(self.CommID, len(min_path))", "def test_modifying_nb_segments_limits(self):\n blanket1 = paramak.BlanketFPPoloidalSegments(thickness=20, start_angle=0, stop_angle=180, rotation_angle=180)\n\n blanket2 = paramak.BlanketFPPoloidalSegments(\n thickness=20,\n start_angle=0,\n stop_angle=180,\n rotation_angle=180,\n cut=blanket1,\n )\n\n blanket1.nb_segments_limits = (4, 8)\n blanket2.nb_segments_limits = (3, 8)\n\n assert blanket2.volume() != 0", "def client_side_sfsa_round3(communication, client_socket, FEDSUBAVG_SELF_STORAGE, FEDSUBAVG_OTHERS_STORAGE):\r\n start_time = time.time()\r\n # U2: Except myself\r\n fedsubavg_u2_live = list(set(FEDSUBAVG_SELF_STORAGE['U2']) - set([FEDSUBAVG_SELF_STORAGE['my_index']]))\r\n # U1/U2\r\n fedsubavg_u2_drop = FEDSUBAVG_SELF_STORAGE['U1\\U2']\r\n\r\n # Shares of self mask's seed for live clients\r\n fedsubavg_live_b_shares = dict()\r\n for client_index_live in fedsubavg_u2_live:\r\n fedsubavg_live_b_shares[client_index_live] = FEDSUBAVG_OTHERS_STORAGE[client_index_live]['share_b']\r\n fedsubavg_live_b_shares[FEDSUBAVG_SELF_STORAGE['my_index']] = FEDSUBAVG_SELF_STORAGE['my_share_b']\r\n\r\n # Shares of mutual mask's secret key for dropped clients\r\n fedsubavg_drop_s_shares = dict()\r\n for client_index_drop in fedsubavg_u2_drop:\r\n fedsubavg_drop_s_shares[client_index_drop] = FEDSUBAVG_OTHERS_STORAGE[client_index_drop]['share_ssk']\r\n\r\n write_csv(FEDSUBAVG_SELF_STORAGE['client_computation_time_path'], [FEDSUBAVG_SELF_STORAGE['communication_round_number'], \\\r\n \"sfsa_U3\", time.time() - start_time])\r\n\r\n # Send shares to the server\r\n fedsubavg_shares = {'client_ID': FEDSUBAVG_SELF_STORAGE['my_index'],\r\n 'live_b_shares': fedsubavg_live_b_shares,\r\n 'drop_s_shares': fedsubavg_drop_s_shares}\r\n communication.send_np_array(fedsubavg_shares, client_socket)\r\n print('Client %d sent secret shares of live and dropped clients in round 2 to server in secure federated submodel averaging'\\\r\n % FEDSUBAVG_SELF_STORAGE['my_index'])\r\n sys.stdout.flush()\r\n\r\n del fedsubavg_live_b_shares\r\n del fedsubavg_drop_s_shares", "def handleSent(): \n global sentAck\n sentAck = True", "def messageHandler_MulticastBasedCoordination(self, msg):\n\n data = msg.getData()\n sender = msg.getIDSender()\n self.log_message('ID {0} has received msg {1} from ID {2}'.format(self.CommID, data, sender))\n if data[0] == 'remainder':\n origin = data[1]\n remainder = copy.deepcopy(data[2])\n path = copy.deepcopy(data[3])\n path_schedules = copy.deepcopy(data[4])\n\n if not self.isGasBoiler():\n\n # is BES's load included in the received remainder?\n if self.CommID in path: #load included\n\n # find BES's index in path\n for p in range(len(path)):\n if path[p] == self.CommID:\n break\n\n #find origin index in list of origins\n for o in range(len(self.origins)):\n if self.origins[o] == origin:\n break\n\n if self.OPTcriterion == 'maxmindiff':\n criterion_1 = max(remainder) - min(remainder)\n elif self.OPTcriterion == 'absremainder':\n criterion_1 = 0\n for a in range(len(remainder)):\n criterion_1 += abs(remainder[a])\n\n #print 'ID {0}: I am in path at index {1} ({2}) | origin is {3} at index {4} ({5}) | max-min-diff is {6}, global min for this origin is {7}'.format(self.CommID, p, path[p], origin, o, self.origins[o], criterion_1, self.globalMin[o])\n\n if len(path) == self.pathLengths[o]: # if remainder has maximal known path length\n # try to improve it by choosing a new schedule\n\n self.chosenScheduleIndex = copy.deepcopy(path_schedules[p])\n self.EConsumptionChosenSchedule = copy.deepcopy(self.EConsumptionScheduleCurves[self.chosenScheduleIndex])\n self.selectBestSchedule(copy.deepcopy(remainder))\n\n new_remainder = copy.deepcopy(remainder)\n #update remainder\n for t in range(len(remainder)):\n new_remainder[t] -= self.EConsumptionScheduleCurves[path_schedules[p]][t]\n new_remainder[t] += self.EConsumptionChosenSchedule[t]\n\n #new minimum origin??\n if self.OPTcriterion == 'maxmindiff':\n criterion_2 = max(new_remainder) - min(new_remainder)\n elif self.OPTcriterion == 'absremainder':\n criterion_2 = 0\n for a in range(len(remainder)):\n criterion_2 += abs(new_remainder[a])\n\n if self.globalMin[o] - criterion_2 > 0.1:\n #print 'ID {0}: found better max-min-diff for origin {1} | {2} --> {3}'.format(self.CommID, origin, self.globalMin[o], copy.deepcopy(criterion_2))\n\n new_path_schedules = copy.deepcopy(path_schedules)\n\n new_path_schedules[p] = copy.deepcopy(self.chosenScheduleIndex)\n\n self.globalMin[o] = copy.deepcopy(criterion_2)\n # check the functionality of the nex line, was:\n # self.globalMinSchedIdx[o] = copy.deepcopy(path_schedules[p])\n self.globalMinSchedIdx[o] = copy.deepcopy(new_path_schedules[p])\n self.pathLengths[o] = len(path)\n self.min_path[o] = copy.deepcopy(path)\n self.min_path_schedules[o] = copy.deepcopy(new_path_schedules)\n\n for n in range(len(self.Neighbors)):\n self.sendMessage(self.Neighbors[n], 70 , ['remainder', copy.deepcopy(origin), copy.deepcopy(new_remainder), copy.deepcopy(path), copy.deepcopy(new_path_schedules)])\n # =============================================================================================\n elif self.globalMin[o] - criterion_1 > 0.1:\n self.globalMin[o] = copy.deepcopy(criterion_1)\n self.globalMinSchedIdx[o] = copy.deepcopy(path_schedules[p])\n self.pathLengths[o] = len(path)\n self.min_path[o] = copy.deepcopy(path)\n self.min_path_schedules[o] = copy.deepcopy(path_schedules)\n\n #multicast to all neighbors except sender:\n for n in range(len(self.Neighbors)):\n if self.Neighbors[n] != sender:\n self.sendMessage(self.Neighbors[n], 70, ['remainder', copy.deepcopy(origin), copy.deepcopy(remainder), copy.deepcopy(path), copy.deepcopy(path_schedules)])\n # =============================================================================================\n #else:\n #print 'ID {0}: NO IMPROVEMENT WITH NEW SCHEDULE'.format(self.CommID)\n\n elif len(path) > self.pathLengths[o]:\n #print 'ID {0}: path is longer than known path for origin {1}'.format(self.CommID, origin)\n self.pathLengths[o] = len(path)\n\n self.globalMin[o] = copy.deepcopy(criterion_1)\n self.globalMinSchedIdx[o] = copy.deepcopy(path_schedules[p])\n self.min_path[o] = copy.deepcopy(path)\n self.min_path_schedules[o] = copy.deepcopy(path_schedules)\n\n #multicast to all neighbors except sender:\n for n in range(len(self.Neighbors)):\n if self.Neighbors[n] != sender:\n self.sendMessage(self.Neighbors[n], 70, ['remainder', copy.deepcopy(origin), copy.deepcopy(remainder), copy.deepcopy(path), copy.deepcopy(path_schedules)])\n\n #elif self.globalMin[o] - criterion_1 > 0.1 and len(path) == self.pathLengths[o]: #new minimum\n # #print 'ID {0}: found better max-min-diff for origin {1}'.format(self.CommID, origin)\n # self.globalMin[o] = copy.deepcopy(criterion_1)\n # self.globalMinSchedIdx[o] = copy.deepcopy(path_schedules[p])\n # self.pathLengths[o] = len(path)\n # self.min_path[o] = copy.deepcopy(path)\n # self.min_path_schedules[o] = copy.deepcopy(path_schedules)\n\n # #multicast to all neighbors except sender:\n # for n in range(len(self.Neighbors)):\n # if self.Neighbors[n] != sender:\n # self.sendMessage(self.Neighbors[n], 70, ['remainder', copy.deepcopy(origin), copy.deepcopy(remainder), copy.deepcopy(path), copy.deepcopy(path_schedules)])\n else:\n self.log_message('ID {0}: NOT DOING ANYTHING WITH REMAINDER')\n\n else: #load NOT included\n self.log_message('ID {0}: I am not in path and my load is NOT included in the remainder'.format(self.CommID))\n\n # assume no schedule to be chosen before and choose best fitting schedule for this remainder\n self.chosenScheduleIndex = -1\n self.selectBestSchedule(copy.deepcopy(remainder))\n\n new_remainder = copy.deepcopy(remainder)\n\n #update remainder with chosen load\n for t in range(len(remainder)):\n new_remainder[t] += self.EConsumptionChosenSchedule[t]\n\n if self.OPTcriterion == 'maxmindiff':\n criterion = max(new_remainder) - min(new_remainder)\n elif self.OPTcriterion == 'absremainder':\n criterion = 0\n for a in range(len(remainder)):\n criterion += abs(new_remainder[a])\n\n #max_min_diff = max(new_remainder) - min(new_remainder)\n\n new_path = copy.deepcopy(path)\n new_path_schedules = copy.deepcopy(path_schedules)\n\n #update path and path_schedule fields\n new_path.append(self.CommID)\n new_path_schedules.append(self.chosenScheduleIndex)\n\n if origin in self.origins: # if origin of remainder is known\n\n #find origin index in list of origins\n for o in range(len(self.origins)):\n if self.origins[o] == origin:\n break\n\n #new minimal criterion?\n if self.globalMin[o] - criterion > 0.1 and len(new_path) == self.pathLengths[o]: #new minimal criterion\n self.globalMin[o] = copy.deepcopy(criterion)\n self.globalMinSchedIdx[o] = copy.deepcopy(self.chosenScheduleIndex)\n self.pathLengths[o] = len(new_path)\n self.min_path[o] = copy.deepcopy(new_path)\n self.min_path_schedules[o] = copy.deepcopy(new_path_schedules)\n\n\n # multicast remainder to all neighbors\n for n in range(len(self.Neighbors)):\n new_data = ['remainder', copy.deepcopy(origin), copy.deepcopy(new_remainder), copy.deepcopy(new_path), copy.deepcopy(new_path_schedules)]\n self.sendMessage(self.Neighbors[n], 70, new_data)\n\n\n elif len(new_path) > self.pathLengths[o]:\n self.globalMin[o] = copy.deepcopy(criterion)\n self.globalMinSchedIdx[o] = copy.deepcopy(self.chosenScheduleIndex)\n self.pathLengths[o] = len(new_path)\n self.min_path[o] = copy.deepcopy(new_path)\n self.min_path_schedules[o] = copy.deepcopy(new_path_schedules)\n\n\n # multicast remainder to all neighbors\n for n in range(len(self.Neighbors)):\n new_data = ['remainder', copy.deepcopy(origin), copy.deepcopy(new_remainder), copy.deepcopy(new_path), copy.deepcopy(new_path_schedules)]\n self.sendMessage(self.Neighbors[n], 70, new_data)\n\n else: #new origin\n self.origins.append(copy.deepcopy(origin))\n self.globalMin.append(copy.deepcopy(criterion))\n self.globalMinSchedIdx.append(copy.deepcopy(self.chosenScheduleIndex))\n self.pathLengths.append(len(new_path))\n self.min_path.append(copy.deepcopy(new_path))\n self.min_path_schedules.append(copy.deepcopy(new_path_schedules))\n\n # multicast remainder to all neighbors\n for n in range(len(self.Neighbors)):\n new_data = ['remainder', copy.deepcopy(origin), copy.deepcopy(new_remainder), copy.deepcopy(new_path), copy.deepcopy(new_path_schedules)]\n self.sendMessage(self.Neighbors[n], 70, new_data)\n\n\n\n min_criterion = min(self.globalMin)\n\n #find index\n for m in range(len(self.globalMin)):\n if self.globalMin[m] == min_criterion:\n break\n\n if self.chosenScheduleIndex != self.globalMinSchedIdx[m]:\n self.chosenScheduleIndex = self.globalMinSchedIdx[m]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n\n\n elif data[0] == 'minimalorigin':\n min_origin = copy.deepcopy(data[1])\n min_criterion = copy.deepcopy(data[2])\n #path_length = copy.deepcopy(data[3])\n min_path = copy.deepcopy(data[3])\n min_path_schedules = copy.deepcopy(data[4])\n\n\n # if number of participating BES in arrived solution is greater than known maximal path length\n if self.overall_max_path_length < len(min_path) and self.CommID in min_path:\n #print 'ID {0}: received longer path (old: {1}, new {2})'.format(self.CommID, self.overall_max_path_length, len(min_path))\n self.overall_max_path_length = len(min_path)\n self.overall_min = copy.deepcopy(min_criterion)\n\n #find index\n for u in range(len(min_path)):\n if min_path[u] == self.CommID:\n break\n\n #print 'ID {0}: choosing new schedule with index {1}'.format(self.CommID, min_path_schedules[u])\n #choose schedule corresponding to min origin\n self.chosenScheduleIndex = min_path_schedules[u]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n #multicast information to all neighbors except sender\n for n in range(len(self.Neighbors)):\n if self.Neighbors[n] != sender:\n self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(min_origin), copy.deepcopy(min_criterion), copy.deepcopy(min_path), copy.deepcopy(min_path_schedules)])\n #\n # else:\n # print 'ID {0}: unable to choose new schedule because I dont know origin {1}.'.format(self.CommID, min_origin)\n #\n #\n #\n # #if number of participating BES in arrived solution is equal to known maximal path length\n # elif self.overall_max_path_length == len(min_path):\n\n #print 'ID {0}: received new criterion with maximal known path length of {1}'.format(self.CommID, self.overall_max_path_length)\n elif self.overall_min - min_criterion > 0.1 and self.overall_max_path_length == len(min_path) and self.CommID in min_path: #received better criterion\n #print 'ID {0}: received better criterion (old: {1}, new {2})'.format(self.CommID, self.overall_min, min_criterion)\n self.overall_min = copy.deepcopy(min_criterion)\n\n\n #find index\n for u in range(len(min_path)):\n if min_path[u] == self.CommID:\n break\n\n #print 'ID {0}: received better criterion with path length {2}| choosing new schedule with index {1}'.format(self.CommID, min_path_schedules[u], len(min_path))\n #choose schedule corresponding to min origin\n self.chosenScheduleIndex = min_path_schedules[u]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n #multicast information to all neighbors except sender\n for n in range(len(self.Neighbors)):\n if self.Neighbors[n] != sender:\n self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(min_origin), copy.deepcopy(min_criterion), copy.deepcopy(min_path), copy.deepcopy(min_path_schedules)])\n\n else:\n self.log_message('ID {0}: EITHER PATH IS SMALLER THAN LONGEST KNOWN OR MINIMUM IS WORSE'.format(self.CommID))\n #else:\n # print 'ID {0}: received smaller path length {1}, ignore!'.format(self.CommID, len(min_path))", "def new_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n if kwargs['objectname'] is None or kwargs['gateway'] is None:\n print(\"Please specify a name for the segment, and the gateway/network.\")\n sys.exit(1)\n if kwargs['segment_type'] == \"flexible\" and kwargs['tier1_id'] is None:\n print(\"Please specify either the segment type as 'fixed' (-st fixed) OR segment type as 'flexible' as well as the ID of the Tier1 for connectivity (-t1id TIER1ID). Use pyVMC -h for additional options.\")\n sys.exit(1)\n if kwargs['segment_type'] == \"fixed\" and kwargs['tier1_id'] is not None:\n print(\"Invalid configuration - 'fixed' segments may only be connected to the default CGW. To attach to a customer Tier1, please create a 'flexible' segment.\")\n sys.exit(1)\n rt_set = [None, \"ROUTED\", \"DISCONNECTED\"]\n if kwargs['segment_type'] == \"fixed\" and kwargs['routing_type'] not in rt_set:\n print(\"Invalid configuration. For a 'fixed' segment, the routing type must be left blank or set explicitly to 'ROUTED' or 'DISCONNECTED.'\")\n sys.exit(1)\n\n segment_name = kwargs[\"objectname\"]\n gateway = kwargs['gateway']\n\n # Search for segment to determine if it already exists\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n if len(segment['results']) > 0:\n print(\"The segment already appears to exist.\")\n sys.exit(1)\n\n\n # Establish baseline json payload\n json_data = {\n \"display_name\":segment_name,\n \"id\":segment_name,\n \"advanced_config\":{\"connectivity\":\"ON\"},\n \"subnets\":[\n {\n \"gateway_address\": gateway\n }\n ]\n }\n #set segment type as either \"fixed\" or \"flexible\"\n segment_type = kwargs['segment_type']\n tier1_id = kwargs['tier1_id']\n\n if segment_type == \"fixed\":\n json_data[\"connectivity_path\"] = \"/infra/tier-1s/cgw\"\n if kwargs['routing_type'] == \"DISCONNECTED\":\n json_data[\"advanced_config\"][\"connectivity\"] = \"OFF\"\n else:\n json_data[\"advanced_config\"][\"connectivity\"] = \"ON\"\n elif segment_type == \"flexible\" and tier1_id is not None:\n json_data[\"connectivity_path\"] = f'/infra/tier-1s/{tier1_id}'\n else:\n print(\"Please specify either the segment type as 'fixed' (-st fixed) OR segment type as 'flexible' as well as the ID of the Tier1 for connectivity (-t1id TIER1ID). Use pyVMC -h for additional options.\")\n if kwargs['dhcp_range'] is not None:\n json_data[\"subnets\"][0][\"dhcp_ranges\"] = [f'{kwargs[\"dhcp_range\"]}']\n if kwargs['domain_name'] is not None:\n json_data[\"domain_name\"] = kwargs[\"domain_name\"]\n\n print(json.dumps(json_data, indent = 2))\n\n status = new_segment_json(proxy, sessiontoken, segment_name, segment_type, json_data)\n if status == 200:\n print(f'The following network has been created: {segment_name}')\n vars = {\"proxy\":proxy, \"sessiontoken\":sessiontoken, \"object_type\":\"Segment\", \"object_id\":segment_name}\n search_nsx(**vars)\n else:\n print(\"The segment was not created. Please check your syntax and try again.\")\n sys.exit(1)", "async def send_drones_to_minerals(self):\n if self.vespene >= 100 or self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED):\n for drone in self.workers.filter(lambda w: w.is_carrying_vespene):\n self.do(drone.gather(self.mineral_field.closer_than(10, drone).closest_to(drone)))", "def test_sync_4(self):\n try:\n self.mit_1_3.amount_ram = 5.0\n except ValidationError:\n self.fail(\n \"When sync is disabled, it should allow changing synced field\"\n \" on instance.\")", "def TCP(conn, addr):\n buffer = array('B', [0] * 300)\n cnt = 0\n while True:\n if cnt < 60000: cnt = cnt + 1\n else: cnt = 1\n try:\n conn.recv_into(buffer)\n TID0 = buffer[0] #Transaction ID to sync\n TID1 = buffer[1] #Transaction ID \n ID = buffer[6] #Unit ID\n FC = buffer[7]\n mADR = buffer[8]\n lADR = buffer[9]\n ADR = mADR * 256 + lADR\n LEN = buffer[10] * 256 + buffer[11]\n BYT = LEN * 2\n print(\"Received = \", buffer[0:13 + buffer[12]])\n if (FC in [1, 2, 3, 4]): # Read Inputs or Registers\n DAT = array('B')\n if FC < 3:\n BYT = ceil(LEN / 8) # Round off the no. of bytes\n v = 85 # send 85,86.. for bytes.\n for i in range(BYT):\n DAT.append(v)\n v = (lambda x: x + 1 if (x < 255) else 85)(v)\n else:\n DAT = array('B', np.arange(cnt, LEN+cnt, dtype=np.dtype('>i2')).tobytes())\n print(\"TID = %d, ID= %d, Fun.Code= %d, Address= %d, Length= %d\" \\\n %((TID0 * 256 + TID1), ID, FC, ADR, LEN))\n conn.send(\n array('B', [TID0, TID1, 0, 0, 0, BYT + 3, ID, FC, BYT]) + DAT)\n elif (FC in [5, 6, 15, 16]): # Write Registers\n BYT = buffer[12]\n conn.send(\n array('B', [TID0, TID1, 0, 0, 0, 6, ID, FC, mADR, lADR, buffer[10], buffer[11]]))\n buf = buffer[13:(13 + BYT)]\n print(\"TID = %d, ID= %d, Fun.Code= %d, Address= %d, Length= %d, Bytes= %d\" \\\n %((TID0 * 256 + TID1), ID, FC, ADR, LEN, BYT))\n if FC == 5 or FC == 15:\n message = 'bytes: '+ str(unpack('B' * BYT, buf))\n elif FC == 6 or FC == 16:\n message = str(unpack('>' + 'H' * int(BYT / 2), buf))\n print(\"Received Write Values =\", message)\n else:\n print(\"Funtion Code %d Not Supported\" % FC)\n exit()\n except Exception as e:\n print(e, \"\\nConnection with Client terminated\")\n exit()", "def send(self,data):\r\n # Get the data length\r\n fullDataLength = len(data)\r\n \r\n # Input sanity\r\n if fullDataLength == 0:\r\n raise ValueError, \"Cannot send a null data-set!\"\r\n \r\n # Send chunks of data until it is all sent\r\n while True:\r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Make sure we have available outgoing bandwidth\r\n self.socketLocks[\"outgoing\"].acquire()\r\n try:\r\n self.socketLocks[\"outgoing\"].release()\r\n except:\r\n # Some weird timing issues can cause an exception, but it is harmless\r\n pass\r\n \r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Get our own lock\r\n self.socketLocks[\"send\"].acquire()\r\n \r\n # How much outgoing traffic is available?\r\n outgoingAvailable = self.bufferInfo[\"outgoing\"]\r\n \r\n # If we can, just send it all at once\r\n if len(data) < outgoingAvailable:\r\n try:\r\n # Instruct the multiplexer object to send our data\r\n self.mux._send(self.id, data)\r\n except AttributeError:\r\n # The multiplexer may be closed\r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Reduce the size of outgoing avail\r\n self.bufferInfo[\"outgoing\"] -= len(data)\r\n \r\n # Release the lock\r\n self.socketLocks[\"send\"].release()\r\n \r\n # We need to explicitly leave the loop\r\n break\r\n \r\n # We need to send chunks, while waiting for more outgoing B/W\r\n else:\r\n # Get a chunk of data, and send it\r\n chunk = data[:outgoingAvailable]\r\n try:\r\n # Instruct the multiplexer object to send our data\r\n self.mux._send(self.id, chunk)\r\n except AttributeError:\r\n # The multiplexer may be closed\r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Reduce the size of outgoing avail\r\n self.bufferInfo[\"outgoing\"] = 0\r\n\r\n # Lock the outgoing lock, so that we block until we get a MULTIPLEXER_CONN_BUF_SIZE message\r\n self.socketLocks[\"outgoing\"].acquire()\r\n \r\n # Trim data to only what isn't sent syet\r\n data = data[outgoingAvailable:]\r\n \r\n # Release the lock\r\n self.socketLocks[\"send\"].release()\r\n \r\n # If there is no data left to send, then break\r\n if len(data) == 0:\r\n break\r\n \r\n # Return bytes sent, which is always the full message\r\n # since we will block indefinately until everything is sent.\r\n return fullDataLength", "def rtrSweep(self,freq,lowID,highID, attempts = 1,duration = 1, verbose = True):\n #set up file for writing\n now = datetime.datetime.now()\n datestr = now.strftime(\"%Y%m%d\")\n path = self.DATA_LOCATION+datestr+\"_rtr.csv\"\n filename = path\n outfile = open(filename,'a');\n dataWriter = csv.writer(outfile,delimiter=',');\n dataWriter.writerow(['# Time Error Bytes 1-13']);\n dataWriter.writerow(['#' + \"rtr sweep from %d to %d\"%(lowID,highID)])\n if( verbose):\n print \"started\"\n #self.client.serInit()\n #self.spitSetup(freq)\n \n #for each id\n for i in range(lowID,highID+1, 1):\n self.client.serInit()\n self.spitSetup(freq) #reset the chip to try and avoid serial timeouts\n #set filters\n standardid = [i, i, i, i]\n self.addFilter(standardid, verbose = True)\n \n #### split SID into different areas\n SIDlow = (standardid[0] & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n SIDhigh = (standardid[0] >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n #create RTR packet\n packet = [SIDhigh, SIDlow, 0x00,0x00,0x40]\n dataWriter.writerow([\"#requested id %d\"%i])\n #self.client.poke8(0x2C,0x00); #clear the CANINTF register; we care about bits 0 and 1 (RXnIF flags) which indicate a message is being held \n #clear buffer\n packet1 = self.client.rxpacket();\n packet2 = self.client.rxpacket();\n #send in rtr request\n self.client.txpacket(packet)\n ## listen for 2 packets. one should be the rtr we requested the other should be\n ## a new packet response\n starttime = tT.time()\n while ((time.time() - starttime) < duration): #listen for the given duration time period\n packet = self.client.rxpacket()\n if( packet == None):\n continue\n # we have sniffed a packet, save it\n row = []\n row.append(\"%f\"%time.time()) #timestamp\n row.append(0) #error flag (not checkign)\n row.append(\"rtrRequest_%d\"%i) #comment\n row.append(duration) #sniff time\n row.append(1) # filtering boolean\n for byte in packet:\n row.append(\"%02x\"%ord(byte));\n dataWriter.writerow(row)\n print self.client.packet2parsedstr(packet)\n trial= 2;\n # for each trial repeat\n while( trial <= attempts):\n print \"trial: \", trial\n self.client.MCPrts(TXB0=True);\n starttime = time.time()\n # this time we will sniff for the given amount of time to see if there is a\n # time till the packets come in\n while( (time.time()-starttime) < duration):\n packet=self.client.rxpacket();\n if( packet == None):\n continue\n row = []\n row.append(\"%f\"%time.time()) #timestamp\n row.append(0) #error flag (not checking)\n row.append(\"rtrRequest_%d\"%i) #comment\n row.append(duration) #sniff time\n row.append(1) # filtering boolean\n for byte in packet:\n row.append(\"%02x\"%ord(byte));\n dataWriter.writerow(row)\n print self.client.packet2parsedstr(packet)\n trial += 1\n print \"sweep complete\"\n outfile.close()", "def transmission(self):\n return 1", "def test_updating_a_segment(self):\n pass", "def verify( fasta1, fasta2, num_iterations, fragment_size,\n stdout = sys.stdout, quiet = False ):\n if not quiet:\n options.stdout.write(\"verifying %s and %s using %i random segments of length %i\\n\" %\\\n (fasta1.getDatabaseName(),\n fasta2.getDatabaseName(),\n num_iterations,\n fragment_size ))\n options.stdout.flush()\n nerrors = 0\n for x in range(num_iterations):\n contig, strand, start, end = fasta1.getRandomCoordinates( fragment_size )\n s1 = fasta1.getSequence(contig,strand,start,end)\n s2 = fasta2.getSequence(contig,strand,start,end)\n if s1 != s2:\n if not quiet:\n options.stdout.write(\"discordant segment: %s:%s:%i:%i\\n%s\\n%s\\n\" %\\\n (contig, strand, start, end, s1, s2) )\n nerrors += 1\n return nerrors", "def ignores_bad_seqno():\n test_str = \"cs144--cs144--cs144--cs144--cs144--cs144--cs144--cs144\\n\"\n bad_seqno_str = DEBUG_BAD_SEQNO + \"cs144cs144cs144cs144cs144cs144cs144cs144\\n\"\n server = start_server()\n client = start_client(reference=True)\n\n # Send full segment.\n write_to(client, test_str)\n time.sleep(TEST_TIMEOUT)\n if read_from(server, num_lines=1) != test_str:\n return False\n segments = read_segments_from(server)\n first_segment = segments[0] if len(segments) > 0 else None\n\n # Write the bad segment. Nothing should be read from the server and no\n # ACKs should be sent.\n write_to(client, bad_seqno_str)\n time.sleep(TEST_TIMEOUT)\n if read_from(server, num_lines=1) == bad_seqno_str:\n return False\n\n # Make sure no ACKs are sent to the bad segment, or if an ACK is sent,\n # it is a duplicate ACK to a previous segment.\n segments = read_segments_from(server)\n if not segments:\n return False\n for segment in segments:\n if \"ACK\" in segment.flags and segment.source_port == CLIENT_PORT and \\\n (first_segment is None or segment.ackno != first_segment.ackno):\n return False\n\n return True", "def send_req(self):\n self.n_send_req += 1", "def form_segment(self, node_oid):\n # init empty segment and stuff\n new_segment = Segment()\n new_segment.save()\n name = \"%s_seg_%s\" % (self.PREFIX, new_segment.id)\n node = Node.objects(id=node_oid)[0]\n list_id = DripCampaign.objects(id=node[\"drip_campaign_id\"])[0][\"list_id\"]\n node.update(set__segment_oid=new_segment.id, set__updated_at=datetime.utcnow())\n\n # gather all users that apply for this node after triggers on previous nodes\n all_euids = set()\n if node[\"initial\"]:\n all_euids = set(List.objects(list_id=list_id)[0][\"members_euid\"])\n else:\n for trg in Trigger.objects(node_to=node_oid):\n for euids, to_node_oid in self.segment_by_triggers(trg[\"node_from\"]):\n if to_node_oid == node_oid:\n all_euids.update(set(euids))\n\n # # intersect euids with current state of the list\n # # it might be the case that some people are removed from the list since previous email\n self.fetch_members_for_list(list_id)\n all_euids = all_euids & set(List.objects(list_id=list_id)[0][\"members_euid\"])\n\n all_euids = list(all_euids)\n\n # apply the user list to segment n stuff\n # if user list is empty, save only meta info and don't actually work with mailchimp\n if all_euids:\n segment_id = self.mw.create_segment(list_id, name)\n self.mw.update_segment_members(list_id, segment_id, all_euids)\n else:\n segment_id = None\n new_segment.update(set__segment_id=segment_id, set__name=name, members_euid=all_euids,\n set__updated_at=datetime.utcnow())", "def test_subscriber_access_if_vsg1_goes_down(self):", "def syncloss(self):\n # expect a disassociation indication with a correct status\n assert(self.a.nxapi_disassociate_ind() == True)\n \n # generate a random frame\n msdu = self.host.tx_msdu(da=self.ap1.macaddr, length=1000, prio=1)\n \n # wait for data send confirmation (not in the air)\n self.a.host_send_data_cfm(msdu)", "def test_one_fragment(self):\n pkt = (\n Ether(src=self.src_if.local_mac, dst=self.src_if.remote_mac)\n / IPv6(src=self.src_if.remote_ip6, dst=self.dst_if.remote_ip6)\n / ICMPv6EchoRequest()\n / Raw(\"X\" * 1600)\n )\n frags = fragment_rfc8200(pkt, 1, 400)\n\n # send a fragment with known id\n self.send_and_expect(self.src_if, [frags[0]], self.dst_if)\n\n # send an atomic fragment with same id - should be reassembled\n pkt = (\n Ether(src=self.src_if.local_mac, dst=self.src_if.remote_mac)\n / IPv6(src=self.src_if.remote_ip6, dst=self.dst_if.remote_ip6)\n / IPv6ExtHdrFragment(id=1)\n / ICMPv6EchoRequest()\n )\n rx = self.send_and_expect(self.src_if, [pkt], self.dst_if)\n\n # now forward packets matching original reassembly, should still work\n rx = self.send_and_expect(self.src_if, frags[1:], self.dst_if)", "def handle_max_cons_per_ip(self):\n msg = \"Too many connections from the same IP address.\"\n self.respond(\"421 %s\" %msg)\n self.log(msg)\n self.close_when_done()" ]
[ "0.5473814", "0.5183754", "0.50722843", "0.49788105", "0.49673986", "0.4951302", "0.48840412", "0.48675218", "0.48543423", "0.48015732", "0.47839195", "0.4771921", "0.47533", "0.47516027", "0.47364625", "0.47244492", "0.4713454", "0.46841288", "0.46820426", "0.4659991", "0.4642309", "0.4629776", "0.46262386", "0.46236724", "0.4615001", "0.4590572", "0.45875135", "0.45837006", "0.4581482", "0.4578946" ]
0.6117153
0
Sends a complete segment from reference/client to student/server, which should be processed correctly. Then sends a segment with a sequence number completely out of scope, which should be ignored.
def ignores_bad_seqno(): test_str = "cs144--cs144--cs144--cs144--cs144--cs144--cs144--cs144\n" bad_seqno_str = DEBUG_BAD_SEQNO + "cs144cs144cs144cs144cs144cs144cs144cs144\n" server = start_server() client = start_client(reference=True) # Send full segment. write_to(client, test_str) time.sleep(TEST_TIMEOUT) if read_from(server, num_lines=1) != test_str: return False segments = read_segments_from(server) first_segment = segments[0] if len(segments) > 0 else None # Write the bad segment. Nothing should be read from the server and no # ACKs should be sent. write_to(client, bad_seqno_str) time.sleep(TEST_TIMEOUT) if read_from(server, num_lines=1) == bad_seqno_str: return False # Make sure no ACKs are sent to the bad segment, or if an ACK is sent, # it is a duplicate ACK to a previous segment. segments = read_segments_from(server) if not segments: return False for segment in segments: if "ACK" in segment.flags and segment.source_port == CLIENT_PORT and \ (first_segment is None or segment.ackno != first_segment.ackno): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def segment_truncated():\n test_str = \"n0t trunc4t3d 139482793 912847 192874 1928\\n\"\n truncated_str = DEBUG_TRUNCATE + \"trunc4t3d 139482793 912847 192874 1928\\n\"\n server = start_server()\n client = start_client(reference=True)\n\n # Send full segment.\n write_to(client, test_str)\n time.sleep(TEST_TIMEOUT)\n if read_from(server, num_lines=1) != test_str:\n return False\n\n # Write the truncated segment. Nothing should be read from the server.\n write_to(client, truncated_str)\n time.sleep(TEST_TIMEOUT)\n if read_from(server, num_lines=1) == truncated_str:\n return False\n\n return True", "def _done_sending(self):\n self.sfile.write('\\n')\n self.sfile.flush()", "def client_sends():\n test_str = \"t35t1nG cl13nT 53nd1nG\\n\"\n server = start_server()\n client = start_client()\n\n write_to(client, test_str)\n segments = read_segments_from(client)\n if not segments:\n return False\n\n # The first segment should be one sent from the client, and should have the\n # correct length.\n segment = segments[0]\n return (\n str(segment.source_port) == CLIENT_PORT and\n segment.length == CTCP_HEADER_LEN + len(test_str)\n )", "def fin_sent():\n test_str = \"f1N s3nt\\n\"\n server = start_server()\n client = start_client()\n\n # First write some data.\n write_to(client, test_str)\n if not read_segments_from(client):\n return False\n time.sleep(1)\n\n # Write an EOF character.\n write_to(client, '\\x1a')\n client.stdin.close()\n\n # Check to see that segment sent from client is a FIN.\n segments = read_segments_from(client)\n if not segments:\n return False\n return \"FIN\" in segments[0].flags", "def handleSent(): \r\n global sentAck\r\n sentAck = True", "def handleSent(): \n global sentAck\n sentAck = True", "def flow_control():\n test_strs = [make_random(288) for _ in range(10)]\n stop_str = DEBUG_STOP + \"1t'5 h4mm3r t1m3!!!!!!!!\\n\"\n server = start_server(reference=True)\n client = start_client()\n\n # First write some segments to the server, then tell it to stop processing\n # segments. Get the last ackno from the server.\n write_to(client, test_strs[0])\n write_to(client, test_strs[1])\n time.sleep(TEST_TIMEOUT)\n read_segments_from(client)\n write_to(client, stop_str)\n server_segments = read_segments_from(server)\n if not server_segments:\n return False\n last_ackno = server_segments[-1].ackno\n\n # Send more segments.\n for i in range(2, len(test_strs)):\n write_to(client, test_strs[i])\n\n # Look at the last segment sent by the client.\n segments = read_segments_from(server)\n if not segments:\n return False\n segment = [s for s in segments if s.source_port == int(CLIENT_PORT)][-1]\n\n # If this sequence number is greater than the window size, then no flow\n # control was done.\n return segment.seqno <= last_ackno + MAX_SEG_DATA_SIZE", "def no_excessive_retrans():\n test_str = DEBUG_IGNORE + \"r3tr4n5m15510ns~~~~~~~\\n\"\n server = start_server(reference=True)\n client = start_client()\n\n # Send a segment to reference server, which should ignore it. See how many\n # times it was sent.\n write_to(client, test_str)\n segments = read_segments_from(server)\n if not segments or len(segments) != 6:\n return False\n\n # All segments should have the same content.\n orig_segment = segments[0]\n for segment in segments:\n if (\n segment.source != orig_segment.source or\n segment.source_port != orig_segment.source_port or\n segment.dest != orig_segment.dest or\n segment.dest_port != orig_segment.dest_port or\n segment.seqno != orig_segment.seqno or\n segment.ackno != orig_segment.ackno or\n segment.length != orig_segment.length or\n not segment.has_same_flags(orig_segment) or\n segment.window != orig_segment.window or\n segment.checksum != orig_segment.checksum\n ):\n return False\n\n return True", "def handle_write(self):\n self.initiate_send()", "def variant_call_single_end(sam_file):\n\n\ttotal_reads_number = wccount(sam_file)\n\tpercentage_of_total_file = 0\n\n\tchr_seq = get_ref_geno(chr_name)\n\n\tglobal table_name\n\tcon = lite.connect(db_name)\n\twith con:\n\t\tcur = con.cursor()\n\n\t\tinputfile_sam = open(currentPath + sam_file, \"r\")\n\t\tsam_line_first = inputfile_sam.readline() # the first read line in a pair\n\t\ttotal_reads_num = 0\n\t\tcovered_snp_total_number = 0\n\n\t\tinsert_size_lower_bond = 0\n\t\tinsert_size_upper_bond = 1000\n\n\t\twhile sam_line_first != '':\n\t\t\tif not sam_line_first.startswith(\"@\"):\n\t\t\t\tcurrent_percent = int(float(total_reads_number * percentage_of_total_file) / 100)\n\t\t\t\tif total_reads_num == current_percent:\n\t\t\t\t\tprint \"current progress: \", percentage_of_total_file\n\t\t\t\t\tpercentage_of_total_file += 10\n\n\t\t\t\ttotal_reads_num += 1\n\t\t\t\telements_first = sam_line_first.strip().split()\n\t\t\t\ttry:\n\t\t\t\t\tread_ID_first = elements_first[0].strip()\n\t\t\t\t\tchrName_first = elements_first[2].strip()\n\t\t\t\t\tinsert_size_first = abs(int(elements_first[8].strip())) # insert_size for second read is negative\n\t\t\t\texcept:\n\t\t\t\t\tprint \"error in first read:\", sam_line_first\n\t\t\t\t#print \"this is a new read\"\t\n\t\t\t\tif (insert_size_first >= insert_size_lower_bond) and (insert_size_first <= insert_size_upper_bond):\n\t\t\t\t\tif True:\n\t\t\t\t\t\tif chrName_first.startswith(chr_name):\n\t\t\t\t\t\t\t# first read\n\t\t\t\t\t\t\tqName_first = elements_first[0].strip()\n\t\t\t\t\t\t\tflag_first = elements_first[1].strip()\n\t\t\t\t\t\t\tstart_position_first = int(elements_first[3].strip())\n\t\t\t\t\t\t\tread_sequence_first = elements_first[9].strip()\n\t\t\t\t\t\t\tread_length_first = len(read_sequence_first)\n\t\t\t\t\t\t\tquality_score_sequence_first = elements_first[10].strip()\n\n\t\t\t\t\t\t\tif len(read_sequence_first)\t== len(quality_score_sequence_first):\n\t\t\t\t\t\t\t\tfor i in range(read_length_first):\n\t\t\t\t\t\t\t\t\tcurrent_base_position = start_position_first + i\n\t\t\t\t\t\t\t\t\tA_depth = 0\n\t\t\t\t\t\t\t\t\tT_depth = 0\n\t\t\t\t\t\t\t\t\tC_depth = 0\n\t\t\t\t\t\t\t\t\tG_depth = 0\n\n\t\t\t\t\t\t\t\t\tcovered_snp = read_sequence_first[i] # ith position is the covered snp\n\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\tquality_score_symbol = quality_score_sequence_first[i]\n\t\t\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\t\t\tprint \"error in\", sam_line_first\n\t\t\t\t\t\t\t\t\t\tquality_score_symbol = 'N'\n\t\t\t\t\t\t\t\t\tif (not covered_snp == 'N') and (\n\t\t\t\t\t\t\t\t\t\t(ord(quality_score_symbol) - 33) > quality_score_threshold): # check quality_score\n\t\t\t\t\t\t\t\t\t\tif covered_snp == \"A\":\n\t\t\t\t\t\t\t\t\t\t\tA_depth += 1\n\t\t\t\t\t\t\t\t\t\telif covered_snp == \"T\":\n\t\t\t\t\t\t\t\t\t\t\tT_depth += 1\n\t\t\t\t\t\t\t\t\t\telif covered_snp == \"C\":\n\t\t\t\t\t\t\t\t\t\t\tC_depth += 1\n\t\t\t\t\t\t\t\t\t\telif covered_snp == \"G\":\n\t\t\t\t\t\t\t\t\t\t\tG_depth += 1\n\n\t\t\t\t\t\t\t\t\t\tcur.execute(\"SELECT * from \" + table_name + \" where position=\" + str(\n\t\t\t\t\t\t\t\t\t\t\tcurrent_base_position))\n\t\t\t\t\t\t\t\t\t\trow = cur.fetchone()\n\t\t\t\t\t\t\t\t\t\tif row == None:\n\t\t\t\t\t\t\t\t\t\t\tinset_querry = \"INSERT INTO \" + table_name + \\\n\t\t\t\t\t\t\t\t\t\t\t \" (position, chr, ref_allele, A_depth, T_depth, C_depth, G_depth ) VALUES (\" + \\\n\t\t\t\t\t\t\t\t\t\t\t str(current_base_position) + \\\n\t\t\t\t\t\t\t\t\t\t\t \",'\" + chrName_first + \"','\" + chr_seq[\n\t\t\t\t\t\t\t\t\t\t\t\t current_base_position - 1] + \"',\" + str(A_depth) + \",\" + str(\n\t\t\t\t\t\t\t\t\t\t\t\tT_depth) \\\n\t\t\t\t\t\t\t\t\t\t\t + \",\" + str(C_depth) + \",\" + str(G_depth) + \")\"\n\t\t\t\t\t\t\t\t\t\t\t#print inset_querry\n\t\t\t\t\t\t\t\t\t\t\tcur.execute(inset_querry)\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tA_depth += int(row[3])\n\t\t\t\t\t\t\t\t\t\t\tT_depth += int(row[4])\n\t\t\t\t\t\t\t\t\t\t\tC_depth += int(row[5])\n\t\t\t\t\t\t\t\t\t\t\tG_depth += int(row[6])\n\t\t\t\t\t\t\t\t\t\t\tupdate_querry = \"UPDATE \" + table_name + \" set A_depth=\" + str(A_depth) + \\\n\t\t\t\t\t\t\t\t\t\t\t \", T_depth=\" + str(T_depth) + \", C_depth=\" + str(\n\t\t\t\t\t\t\t\t\t\t\t\tC_depth) + \", G_depth=\" + \\\n\t\t\t\t\t\t\t\t\t\t\t str(G_depth) + \" where position=\" + str(current_base_position)\n\t\t\t\t\t\t\t\t\t\t\t#print update_querry\n\t\t\t\t\t\t\t\t\t\t\tcur.execute(update_querry)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprint \"different in read length and quality length\", sam_line_first\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint \"first and second read ID do not match\", read_ID_first\n\t\t\tsam_line_first = inputfile_sam.readline()\n\t\tinputfile_sam.close()\n\treturn total_reads_num", "def resend(self, seqno, address=None):\n super(Sender, self).send(self.window.get(seqno), address)", "def StartRecording( self ):\r\n\r\n self._socket.write( 'B' ) \r\n \r\n return self.GetServerResponse()", "def test_save_send(self):\r\n # Don't really know how to test this effectively...\r\n # Would require to simulate a blocking socket on the recipient side...\r\n pass", "def send_req(self):\n self.n_send_req += 1", "def send_segment_document_to_xray_daemon(segment_document):\n try:\n xray_daemon = get_xray_daemon()\n except XRayDaemonNotFoundError:\n LOGGER.error(\"X-Ray Daemon not running, skipping send\")\n return\n header = (json.dumps(XRAY_DAEMON_HEADER),)\n document = json.dumps(segment_document, ensure_ascii=False, cls=StringJSONEncoder,)\n message = f\"{header}\\n{document}\"\n\n send_data_on_udp(\n ip_address=xray_daemon.ip_address, port=xray_daemon.port, data=message,\n )", "def done_sending(self):\r\n self._flush(True)", "def send_message(self, message):\n for segment in self._gen_segments(message):\n self._segment_queue.append(segment)\n self._attempt_enabling_looping_send()", "def seqno_sent(self, seqno):\n self._gc()\n self._queue.append((seqno, time.time()))", "def write(self, segment, result):\n pass", "def test_updating_a_segment(self):\n pass", "def _cmd_segment(args):\n cnarr = read_cna(args.filename)\n variants = load_het_snps(\n args.vcf,\n args.sample_id,\n args.normal_id,\n args.min_variant_depth,\n args.zygosity_freq,\n )\n results = segmentation.do_segmentation(\n cnarr,\n args.method,\n args.diploid_parx_genome,\n args.threshold,\n variants=variants,\n skip_low=args.drop_low_coverage,\n skip_outliers=args.drop_outliers,\n save_dataframe=bool(args.dataframe),\n rscript_path=args.rscript_path,\n processes=args.processes,\n smooth_cbs=args.smooth_cbs,\n )\n\n if args.dataframe:\n segments, dframe = results\n with open(args.dataframe, \"w\") as handle:\n handle.write(dframe)\n logging.info(\"Wrote %s\", args.dataframe)\n else:\n segments = results\n tabio.write(segments, args.output or segments.sample_id + \".cns\")", "def set_segments(self, segments):\n self.send_command(Command.SET_SEGMENT_COUNT, [segments])", "def split_segment(self):\n # Selection management\n selected_segment = \\\n self.controller.shared_data.obj_track.selected_segment_idx\n\n if len(selected_segment) > 1:\n messagebox.showerror('Warning',\n 'More than one segment is selected')\n return\n elif len(selected_segment) == 0:\n messagebox.showerror('Warning',\n 'No segment is selected')\n return\n else:\n segment_idx = selected_segment[0]\n df_segment = \\\n self.controller.shared_data.obj_track.get_segment(segment_idx)\n\n # Create interactivity\n del self.split_segment_interaction\n self.split_segment_interaction = SplitSegmentCallback(\n self.controller.shared_data,\n df_segment)\n\n self.split_segment_interaction.connect()", "def _done_sending():\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def acknowledge(self, sequence: int):\n ackPacket = Rudp.Packet(self.seq, sequence)\n frame = ackPacket.construct()\n self.seqPlusOne()\n self.socket.sendto(frame, self.client)", "def schc_fragmenter_send(msg, s, opt):\n assert type(msg) == bytearray # avoid compatibility problems\n debug_print(2, \"message:\", msg)\n # XXX assuming that the rule_id is not changed in a session.\n\n # check if the L2 size is enough to put the message.\n if opt.l2_size >= len(msg):\n debug_print(1, \"no need to fragment this message.\")\n return\n\n # prepare fragmenting\n factory = sfs.fragment_factory(frr, logger=debug_print)\n factory.setbuf(msg, dtag=opt.dtag)\n\n # main loop\n debug_print(1, \"L2 payload size: %s\" % opt.l2_size)\n\n global n_packet\n n_packet = 0\n\n while True:\n\n # CONT: send it and get next fragment.\n # WAIT_ACK: send it and wait for the ack.\n # DONE: dont need to send it.\n # ERROR: error happened.\n ret, tx_obj = factory.next_fragment(opt.l2_size)\n n_packet += 1\n\n # error!\n if ret == sfs.STATE.FAIL:\n raise AssertionError(\"something wrong in fragmentation.\")\n elif ret == sfs.STATE.DONE:\n debug_print(1, \"done.\")\n break\n # end of the main loop\n\n if opt.func_packet_loss and opt.func_packet_loss() == True:\n debug_print(1, \"packet dropped.\")\n else:\n print(\"SEND:\", tx_obj.packet)\n address = get_sockaddr(RECV_UDP_ADDRESS, RECV_UDP_PORT)\n s.sendto(tx_obj.packet, address)\n debug_print(1, \"sent :\", tx_obj.dump())\n debug_print(2, \"hex :\", tx_obj.full_dump())\n\n if factory.R.mode != SCHC_MODE.NO_ACK and ret != sfs.STATE.CONT:\n # WAIT_ACK\n # a part of or whole fragments have been sent and wait for the ack.\n debug_print(1, \"waiting an ack.\", factory.state.pprint())\n try:\n rx_data, peer = s.recvfrom(DEFAULT_RECV_BUFSIZE)\n debug_print(1, \"message from:\", peer)\n #\n ret, rx_obj = factory.parse_ack(rx_data, peer)\n debug_print(1, \"parsed:\", rx_obj.dump())\n debug_print(2, \"hex :\", rx_obj.full_dump())\n #\n if ret == sfs.STATE.DONE:\n # finish if the ack against all1 is received.\n debug_print(1, \"done.\")\n break\n # end of the main loop\n\n except Exception as e:\n if \"timeout\" in repr(e):\n debug_print(1, \"timed out to wait for the ack.\")\n else:\n debug_print(1, \"Exception: [%s]\" % repr(e))\n debug_print(0, traceback.format_exc())\n\n time.sleep(opt.interval)", "def form_segment(self, node_oid):\n # init empty segment and stuff\n new_segment = Segment()\n new_segment.save()\n name = \"%s_seg_%s\" % (self.PREFIX, new_segment.id)\n node = Node.objects(id=node_oid)[0]\n list_id = DripCampaign.objects(id=node[\"drip_campaign_id\"])[0][\"list_id\"]\n node.update(set__segment_oid=new_segment.id, set__updated_at=datetime.utcnow())\n\n # gather all users that apply for this node after triggers on previous nodes\n all_euids = set()\n if node[\"initial\"]:\n all_euids = set(List.objects(list_id=list_id)[0][\"members_euid\"])\n else:\n for trg in Trigger.objects(node_to=node_oid):\n for euids, to_node_oid in self.segment_by_triggers(trg[\"node_from\"]):\n if to_node_oid == node_oid:\n all_euids.update(set(euids))\n\n # # intersect euids with current state of the list\n # # it might be the case that some people are removed from the list since previous email\n self.fetch_members_for_list(list_id)\n all_euids = all_euids & set(List.objects(list_id=list_id)[0][\"members_euid\"])\n\n all_euids = list(all_euids)\n\n # apply the user list to segment n stuff\n # if user list is empty, save only meta info and don't actually work with mailchimp\n if all_euids:\n segment_id = self.mw.create_segment(list_id, name)\n self.mw.update_segment_members(list_id, segment_id, all_euids)\n else:\n segment_id = None\n new_segment.update(set__segment_id=segment_id, set__name=name, members_euid=all_euids,\n set__updated_at=datetime.utcnow())", "def _done_sending(self):\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def __send(self):\r\n self.msgLock.acquire()\r\n if self.numMsg > 0:\r\n self.socket.send(self.msg.pop(0))\r\n self.numMsg -= 1\r\n self.msgLock.release()", "def req_scan_bin(self):\n message = b'SBD' + self.end_mess_bytes\n print('Requesting scan...')\n self.sock.sendall(message)" ]
[ "0.58423656", "0.55498934", "0.5530398", "0.5483263", "0.5479365", "0.5384695", "0.53782636", "0.5260664", "0.52126765", "0.5156179", "0.51479036", "0.51272166", "0.50434875", "0.49900317", "0.49877423", "0.49753916", "0.49564484", "0.49411252", "0.49073324", "0.49045774", "0.48989716", "0.48973608", "0.4877163", "0.4872885", "0.48685467", "0.48660675", "0.48323593", "0.48288417", "0.48243886", "0.4815414" ]
0.59031177
0
Student/server receives FIN. It should still send data to the client. Checks that a FIN was received first.
def send_after_fin(): test_str = make_random(100) test_str_fin = "s3nd 4ft3r f1N\n" server = start_server() client = start_client() # Write an EOF character to client so it sends a FIN. write_to(server, test_str) write_to(client, '\x1a') client.stdin.close() # Check that a FIN was received. time.sleep(1) segments = read_segments_from(server) if not segments: return False if not "FIN" in [flag for segment in segments for flag in segment.flags]: return False # Write to server STDIN. It should continue sending data to the client. write_to(server, test_str_fin) return len(read_segments_from(server)) > 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fin_sent():\n test_str = \"f1N s3nt\\n\"\n server = start_server()\n client = start_client()\n\n # First write some data.\n write_to(client, test_str)\n if not read_segments_from(client):\n return False\n time.sleep(1)\n\n # Write an EOF character.\n write_to(client, '\\x1a')\n client.stdin.close()\n\n # Check to see that segment sent from client is a FIN.\n segments = read_segments_from(client)\n if not segments:\n return False\n return \"FIN\" in segments[0].flags", "def recv_after_eof():\n test_str = make_random(100)\n test_str_fin = \"r3c31v3 4ft3r f1N\\n\"\n server = start_server()\n client = start_client()\n\n # Write an EOF character to client so it sends a FIN.\n write_to(server, test_str)\n write_to(client, '\\x1a')\n client.stdin.close()\n\n # Check that a FIN was sent.\n time.sleep(1)\n segments = read_segments_from(client)\n if not segments:\n return False\n if not \"FIN\" in [flag for segment in segments for flag in segment.flags]:\n return False\n\n # Write to server STDIN. The client should receive and output the data.\n write_to(server, test_str_fin)\n return test_str_fin in read_from(client)", "def eof_received(self):\n logger.debug(\"EOF from client, closing.\")\n self.connection_lost(None)", "def eof_received(self):\n self.connection_lost('EOF')\n return False", "def eofReceived(self):\n channel.SSHChannel.eofReceived(self)\n # print 'DirectTcpIpChannelClient:: remote eof'\n self.loseConnection()", "def flushin(self, s):\n client = self.connections[s]\n\n try:\n data = s.recv(Connection.BUFSIZE)\n logging.info(\"Received data from %s. Message:\\n%r\", client, data)\n except:\n logging.exception(\"flushin: recv(%s)\", client)\n logging.error(\"Received invalid data from %s. Closing\", client)\n self.delClient(s)\n else:\n if len(data) > 0:\n reqs = client.parseReqs(data)\n for req in reqs:\n self.handleRequest(s, req)\n else:\n self.delClient(s, exists=True)", "def _send_fin(self):\n fin_packet = packet.Packet.from_data(\n 0,\n self.dest_addr,\n self.own_addr,\n ack=self._next_expected_seqnum,\n fin=True\n )\n self._schedule_send_out_of_order(fin_packet)", "def test_eof_on_recv(self):\n self.sock.close() # Mimic inverter closed connection\n with self.assertRaises(InverterEOFError):\n self.inverter.receive()", "def handshake(self):\n print(\"No: \"+str(len(self.threads)))\n indexes_to_del = []\n if len(self.threads)>2:\n raise IOError\n for i in range(0,len(self.threads)):\n if not self.threads[i].is_alive():\n indexes_to_del.append(i)\n \n for i in indexes_to_del:#do this otherwise if deleted above, out of index error occurs\n del self.threads[i]\n \n while True:\n data = self.s.recv(1024)\n if data ==\"O\":\n print(\"Hanshake Received\")\n return", "def on_request_complete(self) -> Union[socket.socket, bool]:\n pass # pragma: no cover", "def on_send_eof(self):\n flag, msg_s = super(FileComm, self).on_send_eof()\n try:\n self.fd.flush()\n except (AttributeError, ValueError): # pragma: debug\n if self.is_open:\n raise\n # self.close()\n return flag, msg_s", "def socket_thread_stopped(self):\n self.done = True", "def _flow_in(self):\n print(\"MESSENGER: flow_in online!\")\n while self.running:\n data = b\"\"\n while data[-5:] != b\"ROGER\" and self.running:\n try:\n slc = self.sock.recv(1024)\n except socket.timeout:\n time.sleep(0.1)\n except socket.error as E:\n print(\"MESSENGER: caught socket exception:\", E)\n self.teardown(1)\n except Exception as E:\n print(\"MESSENGER: generic exception:\", E)\n self.teardown(1)\n else:\n data += slc\n if not self.running:\n if data:\n print(\"MESSENGER: data left hanging:\" + data[:-5].decode(\"utf8\"))\n return\n data = data[:-5].decode(\"utf8\")\n self.recvbuffer.extend(data.split(\"ROGER\"))\n print(\"MESSENGER: flow_in exiting...\")", "def tcp_fin(self):\n return self.tcp_flags & dpkt.tcp.TH_FIN != 0", "def serve_client():\n # begin to serve client\n with socket(AF_INET, SOCK_STREAM) as sock:\n sock.bind((BOB_HOST, BOB_PORT))\n sock.listen()\n conn, addr = sock.accept()\n with conn:\n print('Bob: connection from client with address', addr)\n while True:\n # get session key and client name from NS auth\n ssn_key = client_name = None\n result = ns_authentication(conn)\n if result:\n ssn_key, client_name = result\n else:\n return print(\"Bob: something went wrong with authentication, exiting...\")\n print(\"Bob: using session key {} from client {}\".format(ssn_key, client_name))\n\n # get file name and mode of transfer\n request = aes.decrypt(ssn_key, conn.recv(1024))\n file_name, mode = request.split(',')\n response = aes.encrypt(ssn_key, SIG_GOOD)\n print(\"Bob: recieved request of file {} for mode {}\".format(file_name, mode))\n\n # serve to upload or download the file\n if mode == UPLOAD:\n conn.sendall(response)\n serve_upload(conn, ssn_key, file_name, client_name)\n\n # if download, check if file exists\n elif mode == DOWNLOAD:\n file_path = \"{}/{}\".format(client_name, file_name)\n if os.path.isfile(file_path):\n conn.sendall(response)\n serve_download(conn, ssn_key, file_name, client_name)\n else:\n response = aes.encrypt(ssn_key, SIG_BAD)\n conn.sendall(response)\n return print(\"Bob: {} does not exist in server, exiting...\".format(file_name))\n # done, stop server\n return print(\"Bob: transfer complete, shutting down...\")", "def found_terminator(self):\n if self.reading_headers:\n self.reading_headers = False\n try:\n self.headers = parse_headers(self.data)\n except Exception:\n exception(\"error parsing headers\")\n self.send_error(400, \"Error parsing headers\")\n self.handle_close()\n\n self.ibuffer = []\n if self.headers[\"Method\"] == \"POST\":\n # we have more data to read\n l = self.headers.get(\"Content-Length\", 0)\n if l == 0:\n self.handle_request()\n else:\n self.set_terminator(int(l))\n else:\n self.set_terminator(None)\n self.handle_request()\n elif not self.handling:\n # browsers sometime oversend\n # https://docs.python.org/2/library/asynchat.html\n self.set_terminator(None)\n self.handling = True\n self.body = self.data\n self.handle_request()", "def handle_write(self):\n if self.established:\n return self.initiate_send()\n self._handshake()", "def server_close(self):\n\t\tself.socket.close()", "def _handle_data(self):\n\n # Once connected, keep receiving and sending the data, raise exception in case of errors\n try:\n\n # Send the frame\n self._client_socket.sendall(self._frame)\n\n # Mark that the frame was sent\n self._client_socket.sendall(self._end_payload)\n\n # Wait for the acknowledgement\n self._client_socket.recv(128)\n\n except (ConnectionResetError, ConnectionAbortedError, timeout):\n raise self.DataError", "def end(self):\n self.send_all(\"SHUTDOWN\") #On indique a tout le monde qu'on ferme\n self.socket.shutdown(socket.SHUT_RDWR)\n self.socket.close()", "def EndSession( self ):\r\n\r\n self._socket.write( 'X' ) \r\n # self._connection.write( 'X' ).flush() \r\n\r\n return self.GetServerResponse()", "def checkConnection(self,msg):\n if (len(msg) == 0):\n sleep(self.m_to/2)\n print >>sys.stderr, 'Closing due to possible server fault'\n self.close()", "def stopAndWaitRecv(self, f):\n self.listen()\n size = self.getSize()\n\n while True:\n try:\n data = self.recvData()\n except Exception as e:\n print(format(e))\n if data:\n f.write(data)\n f.flush()\n print(\"file size: \", Rudp.getFileSize(f))\n if Rudp.getFileSize(f) == size:\n f.close()\n log.info(\"Transmission Complete, exit...\")\n break", "def response_received(self, event):\n super().response_received(event)\n\n stream_id = event.stream_id\n response_stream = self.receive_streams.get(stream_id)\n if response_stream is None:\n self.conn.reset_stream(stream_id, error_code=ErrorCodes.PROTOCOL_ERROR)\n return\n\n headers = response_stream.headers\n\n if int(headers.get(\"grpc-status\", 0)) > 0:\n error = GrpcError.from_headers(headers)\n response_stream.close(error)\n del self.receive_streams[stream_id]", "def testStreamFlushOnError(self):\n yield self.connect(self.get_body_node(connect=True))\n\n # Set got_testing_node to true when the XMPP server receives the <testing/> we\n # send below.\n got_testing_node = [False] # work around Python's 2.6 lack of nonlocal\n wait = defer.Deferred()\n def received_testing(a):\n got_testing_node[0] = True\n wait.callback(True)\n self.server_protocol.addObserver(\"/testing\", received_testing)\n\n # Ensure that we always remove the received_testing listener.\n try:\n # Send <body type='terminate'><testing/></body>. This should result in a\n # HTTPBNetworkTerminated exception.\n try:\n yield self.proxy.send(self.get_body_node(ext='<testing/>', type='terminate'))\n except httpb_client.HTTPBNetworkTerminated as e:\n self.failUnlessEqual(e.body_tag.getAttribute('condition', None), None)\n\n # Wait until <testing/> is actually received by the XMPP server. The previous\n # request completing only means that the proxy has received the stanza, not that\n # it's been delivered to the XMPP server.\n yield wait\n\n finally:\n self.server_protocol.removeObserver(\"/testing\", received_testing)\n\n # This should always be true, or we'd never have woken up from wait.\n self.assertEqual(True,got_testing_node[0])", "def handle_close(self):\r\n self.end_time = time.time()\r\n self.time_ran = self.end_time - self.start_time\r\n if self.status != 'PASS':\r\n server_log.info('Client {} aborted!'.format(self.client_id))\r\n self.status = 'ABORTED'\r\n self.close()", "def _process_fin_packet(self, rudp_packet):\n self.shutdown()", "def receive(self, packet):\n if packet.dest in self.address_to_port:\n # The packet is destined to one of the clients connected to this middlebox;\n # send the packet there.\n # if packet.is_fin:\n # print(\"2nd wan sees a fin\")\n\n if packet.is_fin and len(packet.payload) == 0:\n # print(\"empty fin, foward fin\")\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n block_hash = get_hash(pack_buff)\n if block_hash not in self.hash_to_raw_data.keys():\n self.hash_to_raw_data[block_hash] = pack_buff\n self.send_data_in_packets(packet.src, packet.dest, True, False, pack_buff, is_wan_port = False)\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\" # reset buffer\n self.send(packet, self.address_to_port[packet.dest]) # forward empty fin\n return\n \n if (packet.src, packet.dest) not in self.srcdest_to_buffer.keys():\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n \n if packet.is_raw_data:\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n pack_buff += packet.payload\n\n block_list, remaining_buff = self.break_data_into_blocks(pack_buff)\n for block_to_send in block_list:\n block_hash = get_hash(block_to_send)\n # print(\"sending1\")\n if block_hash in self.hash_to_raw_data.keys():\n # send extract data from hash in packet\n block_to_send = self.hash_to_raw_data[block_hash]\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n\n if remaining_buff:\n # print(\"wan to client remaining_buff: \" + remaining_buff)\n if packet.is_fin:\n block_hash = get_hash(remaining_buff)\n block_to_send = remaining_buff\n # print(\"sending2\")\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n # print(\"sending fin1\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n pack_buff = \"\"\n else:\n pack_buff = remaining_buff\n else:\n pack_buff = \"\"\n if packet.is_fin:\n # print(\"sending fin2\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n self.srcdest_to_buffer[(packet.src, packet.dest)] = pack_buff\n else:\n block_hash = packet.payload\n block_to_send = self.hash_to_raw_data[block_hash]\n # print(\"sending3\")\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n if packet.is_fin:\n # print(\"sending fin3\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n # self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\" # TESTING\n else:\n # The packet must be destined to a host connected to the other middlebox\n # so send it across the WAN.\n if packet.is_fin and len(packet.payload) == 0:\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n block_hash = get_hash(pack_buff)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = pack_buff\n self.send_data_in_packets(packet.src, packet.dest, True, False, pack_buff, is_wan_port = True)\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n self.send(packet, self.wan_port)\n return\n\n if (packet.src, packet.dest) not in self.srcdest_to_buffer.keys():\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n\n pack_buff += packet.payload\n block_list, remaining_buff = self.break_data_into_blocks(pack_buff)\n\n # send off all completed blocks\n for block_to_send in block_list:\n block_hash = get_hash(block_to_send)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = True)\n\n if remaining_buff:\n # print(\"wan to wan remaining_buff: \" + remaining_buff)\n if packet.is_fin:\n # print(\"finfin\")\n block_to_send = remaining_buff\n block_hash = get_hash(block_to_send)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = True)\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.wan_port)\n pack_buff = \"\"\n else:\n pack_buff = remaining_buff\n else:\n pack_buff = \"\"\n self.srcdest_to_buffer[(packet.src, packet.dest)] = pack_buff", "def _on_close(self):\n self._stream = None\n self._is_secure = False\n self._process_request()", "def server_close(self):\n\t\tpass" ]
[ "0.7651779", "0.72565854", "0.67559373", "0.6544908", "0.6085609", "0.6053574", "0.60232884", "0.5952287", "0.588818", "0.58216536", "0.57556015", "0.5739564", "0.5722135", "0.57121927", "0.57035065", "0.56975365", "0.56864864", "0.5676624", "0.56664944", "0.56625754", "0.5655476", "0.56539375", "0.56406385", "0.5635348", "0.5634388", "0.5629848", "0.56263703", "0.5623574", "0.560346", "0.55807894" ]
0.7525104
1
Client reads an EOF and should send a FIN. It should still be able to receive data from the server.
def recv_after_eof(): test_str = make_random(100) test_str_fin = "r3c31v3 4ft3r f1N\n" server = start_server() client = start_client() # Write an EOF character to client so it sends a FIN. write_to(server, test_str) write_to(client, '\x1a') client.stdin.close() # Check that a FIN was sent. time.sleep(1) segments = read_segments_from(client) if not segments: return False if not "FIN" in [flag for segment in segments for flag in segment.flags]: return False # Write to server STDIN. The client should receive and output the data. write_to(server, test_str_fin) return test_str_fin in read_from(client)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eof_received(self):\n logger.debug(\"EOF from client, closing.\")\n self.connection_lost(None)", "def send_after_fin():\n test_str = make_random(100)\n test_str_fin = \"s3nd 4ft3r f1N\\n\"\n server = start_server()\n client = start_client()\n\n # Write an EOF character to client so it sends a FIN.\n write_to(server, test_str)\n write_to(client, '\\x1a')\n client.stdin.close()\n\n # Check that a FIN was received.\n time.sleep(1)\n segments = read_segments_from(server)\n if not segments:\n return False\n if not \"FIN\" in [flag for segment in segments for flag in segment.flags]:\n return False\n\n # Write to server STDIN. It should continue sending data to the client.\n write_to(server, test_str_fin)\n return len(read_segments_from(server)) > 0", "def eof_received(self):\n self.connection_lost('EOF')\n return False", "def fin_sent():\n test_str = \"f1N s3nt\\n\"\n server = start_server()\n client = start_client()\n\n # First write some data.\n write_to(client, test_str)\n if not read_segments_from(client):\n return False\n time.sleep(1)\n\n # Write an EOF character.\n write_to(client, '\\x1a')\n client.stdin.close()\n\n # Check to see that segment sent from client is a FIN.\n segments = read_segments_from(client)\n if not segments:\n return False\n return \"FIN\" in segments[0].flags", "def eofReceived(self):\n channel.SSHChannel.eofReceived(self)\n # print 'DirectTcpIpChannelClient:: remote eof'\n self.loseConnection()", "def test_eof_on_recv(self):\n self.sock.close() # Mimic inverter closed connection\n with self.assertRaises(InverterEOFError):\n self.inverter.receive()", "def handle_read(self):\n while True:\n try:\n content = self.recv(1024)\n if content:\n self.rbuf.write(content.decode('utf-8'))\n if len(content) < 1024:\n break\n except Exception as e:\n print(e)\n self.handle_rpc()", "def read(self):\n buff = self.conn.recv(4096)\n if (self.algo == \"rsa\"):\n buff = self.rsa_decrypt(buff)\n if (self.algo == \"des\"):\n buff = self.des_decrypt(buff)\n if (self.algo == \"3des\"):\n buff = self.triple_des_decrypt(buff)\n if (self.algo == \"aes\"):\n buff = self.aes_decrypt(buff)\n\n while buff.strip() != self.exitcode and len(buff) > 0:\n print 'Message received: ', buff.strip()\n #buff = self.rsa_decrypt(buff)\n buff = self.conn.recv(4096)\n\n if (self.algo == \"rsa\"):\n buff = self.rsa_decrypt(buff)\n if (self.algo == \"des\"):\n buff = self.des_decrypt(buff)\n if (self.algo == \"3des\"):\n buff = self.triple_des_decrypt(buff)\n if (self.algo == \"aes\"):\n buff = self.aes_decrypt(buff)\n # client disconnected\n self.stopWrite", "def test_unexpected_EOF(self):\n server_conn, client_conn = loopback()\n client_conn.sock_shutdown(SHUT_RDWR)\n with pytest.raises(SysCallError) as err:\n server_conn.recv(1024)\n if platform == \"win32\":\n assert err.value.args == (10054, \"WSAECONNRESET\")\n else:\n assert err.value.args == (-1, \"Unexpected EOF\")", "def test_eof_on_send(self):\n self.sock.close() # Mimic inverter closed connection\n with self.assertRaises((BrokenPipeError, ConnectionAbortedError)):\n self.inverter.send(b\"\\x00\\x01\\x02\", b\"\")\n # For Windows it only raises an error on the second try and it\n # raises ConnectionAbortedError instead of BrokenPipeError\n self.inverter.send(b\"\\x00\\x01\\x02\", b\"\")", "def handle_client(self, conn):\r\n\r\n while True:\r\n # Receive message\r\n msg = conn.recv(1024).decode()\r\n res = self.validateCommand(msg)\r\n\r\n print(res)\r\n\r\n # Send response\r\n conn.sendall(res.encode())\r\n\r\n if msg == '/exit':\r\n break\r\n\r\n # Close client connection\r\n print('Client disconnected...')\r\n conn.close()", "def server_client():\r\n MESSAGE = input(\"Mesaj vanzator:\")\r\n\r\n tcpClientA = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n tcpClientA.connect((host, bank_port))\r\n\r\n while MESSAGE != 'stop':\r\n tcpClientA.send(MESSAGE.encode())\r\n data = tcpClientA.recv(BUFFER_SIZE)\r\n print(\"Vanzator a primit:\", data)\r\n MESSAGE = input(\"Mesaj vanzator:\")\r\n\r\n tcpClientA.close()", "def serve_client():\n # begin to serve client\n with socket(AF_INET, SOCK_STREAM) as sock:\n sock.bind((BOB_HOST, BOB_PORT))\n sock.listen()\n conn, addr = sock.accept()\n with conn:\n print('Bob: connection from client with address', addr)\n while True:\n # get session key and client name from NS auth\n ssn_key = client_name = None\n result = ns_authentication(conn)\n if result:\n ssn_key, client_name = result\n else:\n return print(\"Bob: something went wrong with authentication, exiting...\")\n print(\"Bob: using session key {} from client {}\".format(ssn_key, client_name))\n\n # get file name and mode of transfer\n request = aes.decrypt(ssn_key, conn.recv(1024))\n file_name, mode = request.split(',')\n response = aes.encrypt(ssn_key, SIG_GOOD)\n print(\"Bob: recieved request of file {} for mode {}\".format(file_name, mode))\n\n # serve to upload or download the file\n if mode == UPLOAD:\n conn.sendall(response)\n serve_upload(conn, ssn_key, file_name, client_name)\n\n # if download, check if file exists\n elif mode == DOWNLOAD:\n file_path = \"{}/{}\".format(client_name, file_name)\n if os.path.isfile(file_path):\n conn.sendall(response)\n serve_download(conn, ssn_key, file_name, client_name)\n else:\n response = aes.encrypt(ssn_key, SIG_BAD)\n conn.sendall(response)\n return print(\"Bob: {} does not exist in server, exiting...\".format(file_name))\n # done, stop server\n return print(\"Bob: transfer complete, shutting down...\")", "def test_eofReceived(self):\n stdio = FakeStdio()\n channel = SSHSession()\n channel.stdio = stdio\n channel.eofReceived()\n self.assertTrue(stdio.writeConnLost)", "def run(self):\n print(\"Client: Started\", flush=True)\n ack = Packet()\n ack_data = b''\n\n request = \"download\"\n req_pkt = Packet(0, request)\n req_packed = req_pkt.pkt_pack()\n\n self.client_socket.sendto(req_packed, self.server_addr)\n\n ack_data = self.client_socket.recv(self.pkt_size)\n ack.pkt_unpack(ack_data)\n\n self.recv_img(self.img_save_to)\n\n ack = Packet()\n ack_data = b''\n request = \"upload\"\n req_pkt = Packet(0, request)\n req_packed = req_pkt.pkt_pack()\n\n self.client_socket.sendto(req_packed, self.server_addr)\n\n ack_data = self.client_socket.recv(self.pkt_size)\n ack.pkt_unpack(ack_data)\n\n self.send_img(self.img_to_send)\n\n sleep(5)\n\n ack = Packet()\n ack_data = b''\n request = \"exit\"\n req_pkt = Packet(0, request)\n req_packed = req_pkt.pkt_pack()\n\n self.client_socket.sendto(req_packed, self.server_addr)\n\n ack_data = self.client_socket.recv(self.pkt_size)\n ack.pkt_unpack(ack_data)\n\n print(\"Client: Exiting...\")\n # close socket when finished\n self.client_socket.close()", "def _handle_client(self, client_reader, client_writer):\n while True:\n data = (yield from client_reader.readline()).decode(\"utf-8\")\n if not data: # an empty string means the client disconnected\n break\n cmd, *args = data.rstrip().split(' ')\n if cmd == 'add':\n arg1 = float(args[0])\n arg2 = float(args[1])\n retval = arg1 + arg2\n client_writer.write(\"{!r}\\n\".format(retval).encode(\"utf-8\"))\n elif cmd == 'repeat':\n times = int(args[0])\n msg = args[1]\n client_writer.write(\"begin\\n\".encode(\"utf-8\"))\n for idx in range(times):\n client_writer.write(\"{}. {}\\n\".format(idx+1, msg)\n .encode(\"utf-8\"))\n client_writer.write(\"end\\n\".encode(\"utf-8\"))\n else:\n print(\"Bad command {!r}\".format(data), file=sys.stderr)\n\n # This enables us to have flow control in our connection.\n yield from client_writer.drain()", "def read_for_remote_control(self):\n try:\n if self.is_connect():\n print_msg(self.name, \"Receiving socket package\")\n b_data = self.client_sock.recv(1024)\n print \"Received from Android: %s\" % b_data\n if len(b_data) != 0:\n print_msg(self.name, \"decoding\")\n message = self.__decode_n_execute(b_data)\n self.write(message)\n except IOError:\n print_msg(self.name, \"disconnected\")\n self.is_connected = False\n self.client_sock.close()\n self.disconnect()\n sys.exit(-1)", "def handle_accept(self):\n \"\"\"Called when remote client initiates a connection.\"\"\"\n if not self.cmd_channel.connected:\n return self.close()\n try:\n sock, addr = self.accept()\n except TypeError:\n # sometimes accept() might return None (see issue 91)\n return\n except socket.error, err:\n # ECONNABORTED might be thrown on *BSD (see issue 105)\n if err.args[0] != errno.ECONNABORTED:\n self.log_exception(self)\n return\n else:\n # sometimes addr == None instead of (ip, port) (see issue 104)\n if addr == None:\n return\n\n # Check the origin of data connection. If not expressively\n # configured we drop the incoming data connection if remote\n # IP address does not match the client's IP address.\n if self.cmd_channel.remote_ip != addr[0]:\n if not self.cmd_channel.permit_foreign_addresses:\n try:\n sock.close()\n except socket.error:\n pass\n msg = 'Rejected data connection from foreign address %s:%s.' \\\n %(addr[0], addr[1])\n self.cmd_channel.respond(\"425 %s\" % msg)\n self.log(msg)\n # do not close listening socket: it couldn't be client's blame\n return\n else:\n # site-to-site FTP allowed\n msg = 'Established data connection with foreign address %s:%s.'\\\n % (addr[0], addr[1])\n self.log(msg)\n # Immediately close the current channel (we accept only one\n # connection at time) and avoid running out of max connections\n # limit.\n self.close()\n # delegate such connection to DTP handler\n if self.cmd_channel.connected:\n handler = self.cmd_channel.dtp_handler(sock, self.cmd_channel, self.stream_rate)\n if handler.connected:\n self.cmd_channel.data_channel = handler\n self.cmd_channel._on_dtp_connection()", "def flushin(self, s):\n client = self.connections[s]\n\n try:\n data = s.recv(Connection.BUFSIZE)\n logging.info(\"Received data from %s. Message:\\n%r\", client, data)\n except:\n logging.exception(\"flushin: recv(%s)\", client)\n logging.error(\"Received invalid data from %s. Closing\", client)\n self.delClient(s)\n else:\n if len(data) > 0:\n reqs = client.parseReqs(data)\n for req in reqs:\n self.handleRequest(s, req)\n else:\n self.delClient(s, exists=True)", "async def handle_echo(reader, writer):\r\n addr = writer.get_extra_info('peername')\r\n message = f\"{addr} is connected !!!!\"\r\n CLIENT_DICTIONARY[addr[1]] = Server()\r\n print(message)\r\n while True:\r\n data = await reader.read(10000)\r\n message = data.decode().strip()\r\n if message == 'quit':\r\n CLIENT_DICTIONARY[addr[1]].removelog()\r\n break\r\n print(f\"Received {message} from {addr}\")\r\n reply = CLIENT_DICTIONARY[addr[1]].split(message)\r\n print(f\"Send: {reply}\")\r\n #hello = 'successful'\r\n if reply != '' or reply != 'None':\r\n writer.write(reply.encode())\r\n else:\r\n reply = '.'\r\n writer.write(reply.encode())\r\n await writer.drain()\r\n print(\"Close the connection\")\r\n writer.close()", "def run(self):\n print('ClientThread[{}] is running!'.format(self.threadID))\n while True:\n request = self.receive()\n try:\n requestcode = request.split(',')[0]\n if requestcode == 'SYNCFROM':\n self.syncToClient()\n continue\n elif requestcode == 'SYNCTO':\n self.syncFromClient()\n continue\n elif requestcode == 'GETINDEX':\n self.sendIndex()\n continue\n elif requestcode == 'CLOSE':\n print('Connection to {}:{} closed'.format(self.ip,self.port))\n self.tcpsock.close()\n break\n elif not request:\n continue\n else:\n print(request, type(request))\n raise Exception('Unexpected bytes from client.')\n except KeyboardInterrupt:\n sys.exit()\n except Exception as err:\n traceback.print_exc()\n continue\n self.tcpsock.close()\n print('ClientThread[{}] exiting..'.format(self.threadID))", "def handle_read(self):\n if self.established:\n return self._handle_read()\n self._handshake()", "def run_server(self):\n print('Starting socket server (host {}, port {})'.format(self.host, self.port))\n\n client_sock, client_addr = self.sock.accept()\n\n print('Client {} connected'.format(client_addr))\n\n stop = False\n while not stop:\n if client_sock:\n # Check if the client is still connected and if data is available:\n try:\n rdy_read, rdy_write, sock_err = select.select([client_sock,], [], [])\n except select.error:\n print('Select() failed on socket with {}'.format(client_addr))\n return 1\n\n if len(rdy_read) > 0:\n read_data = client_sock.recv(255)\n # Check if socket has been closed\n if len(read_data) == 0:\n print('{} closed the socket.'.format(client_addr))\n stop = False # True\n client_sock, client_addr = self.sock.accept()\n print(\"New connection opened\")\n else:\n print('>>> Received: {}'.format(read_data.rstrip()))\n if read_data.rstrip() == 'quit':\n stop = False #True\n else:\n if read_data == 'right':\n self.moveRight(0.5)\n elif read_data == 'left':\n self.moveLeft(0.5)\n elif read_data == 'forward':\n self.moveForward(0.5)\n self.setGPIO(0,0,0,0,.01)\n client_sock.send(read_data)\n else:\n print(\"No client is connected, SocketServer can't receive data\")\n #stop = True\n time.delay(1)\n client_sock, client_addr = self.sock.accept()\n print(\"New connection opened\")\n\n # Close socket\n print('Closing connection with {}'.format(client_addr))\n client_sock.close()\n return 0", "def flushInput(self):\n self.sock.setblocking(0)\n try:\n while len(self.sock.recv(1)) > 0:\n pass\n except BaseException:\n pass\n self.sock.setblocking(1)\n self.sock.settimeout(self.__timeout)", "def _on_read(self, line):\n # Some game logic (or magic)\n line = line.strip()\n logger.info(\"RCV> %s\", line)\n if not line:\n self.stream.close()\n return\n\n self.stream.write(\"echo: %s\\n\" % line)\n\n # Wait for further input on this connection\n self.wait()", "def _read_data(self):\n while True:\n try:\n data = yield from asyncio.wait_for(self._socket.recv(), 1)\n except asyncio.TimeoutError:\n continue\n except asyncio.CancelledError:\n break\n except ConnectionClosed:\n break\n\n self._push_packet(data)\n\n self._loop.call_soon(self.close)", "def handle_client(self, client_socket):\n while True:\n response = client_socket.recv(self.HEADER_LENGTH)\n if not response:\n continue\n message_length = int(response.decode(self.FORMAT))\n message = client_socket.recv(message_length)\n response = self.parse_message(message)\n utils.send_message(response, client_socket, self.HEADER_LENGTH, self.FORMAT)", "def handle(self):\n line = b\"\"\n try:\n while True:\n raw = self.request.recv(1024)\n if not raw:\n return\n raw = bytearray(raw)\n while True:\n splitter = raw.find(b\"\\r\")\n if splitter > -1:\n line = raw[1:splitter]\n raw = raw[splitter + 1 :]\n else:\n break\n\n self.handle_line(line.decode())\n except Exception as exc:\n _LOGGER.error(\n \"TCP: Handle: last line %s gave error: %s\", line.decode(), str(exc)\n )\n return", "def dispecher(self):\r\n while True:\r\n connection, address = self._sockobj.accept()\r\n print('server connected by', address)\r\n print('at', self.now())\r\n thread.start_new(self.handleClient, (connection, address,))", "def __async_read_callback(self, data, err) -> None:\n if err != 0:\n logging.info('async_read (1): disconnected')\n self.close()\n elif not data:\n logging.info('async_read (2): disconnected')\n self.close()\n elif self.__is_active:\n # Push incoming data through Telnet Option Parser.\n self.receive_buffer.clear()\n for byte in data:\n # Add parsed text data\n return_byte = self.__telnet_parser.iac_sniffer(bytes([byte]))\n if return_byte is not None:\n # logging.info('byte received: {byte}'.format(byte=return_byte))\n # bytes_parsed = bytes_parsed + return_byte\n self.receive_buffer.append(return_byte)\n\n # Data other than Telnet Options, then send back to client. or push through system!!\n if len(self.receive_buffer) > 0:\n # This should now be pushed through for\n # Input on the STATE instead of echoed back!\n logging.info(\"Echo %s\", self.receive_buffer)\n self.async_write(b''.join(self.receive_buffer))\n\n # Ready for next set of incoming data\n self.wait_for_async_data()" ]
[ "0.78284335", "0.73806006", "0.7076207", "0.7013", "0.69987947", "0.6878545", "0.64714414", "0.6408276", "0.6382915", "0.6336182", "0.62248504", "0.62145495", "0.61686313", "0.6118387", "0.6106558", "0.6099158", "0.6079726", "0.60613066", "0.604967", "0.6037784", "0.6025446", "0.6023158", "0.6019335", "0.59960306", "0.597516", "0.5973219", "0.59726846", "0.59575754", "0.5924058", "0.591231" ]
0.7851663
0
Calculate depth data for points at requestedValues, given depthData and covarianceFunc
def getDepthArrayExperimental(depthData: numpy.ndarray, requestedValues: numpy.ndarray, covarianceFunc: Callable): """ xPrev = requestedValues[0] for xVal in requestedValues: # Calculate distance to previous distToPrev = abs(xVal - xPrev) # 2D distance # Calculate covariance with previous covariance = covarianceFunc(distToPrev) """ # Calculate distance matrix distanceMatrix = numpy.abs(numpy.subtract.outer())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def depth_estimation(x_left, x_right, f=33.4, d=114):\n depth = abs(f * d / ((x_left - x_right) / 72 * 2.54)) / 100 # - 0.418879\n return depth", "def disc_val(self, val_data, batch_size):\n fakes = self.generate_poses(len(val_data))\n labels = np.array([1] * len(val_data) + [0] * len(fakes))\n data = np.concatenate([val_data, fakes])\n return self.discriminator.evaluate(data, labels,\n batch_size=batch_size)", "def get_depth(depth, calibration_extrinsics, intrinsics_color,\n intrinsics_depth_inv):\n img_height, img_width = depth.shape[0], depth.shape[1]\n depth_ = np.zeros_like(depth)\n x = np.linspace(0, img_width-1, img_width)\n y = np.linspace(0, img_height-1, img_height)\n xx, yy = np.meshgrid(x, y)\n xx = np.reshape(xx, (1, -1))\n yy = np.reshape(yy, (1, -1))\n ones = np.ones_like(xx)\n pcoord_depth = np.concatenate((xx, yy, ones), axis=0)\n depth = np.reshape(depth, (1, img_height*img_width))\n ccoord_depth = np.dot(intrinsics_depth_inv, pcoord_depth) * depth\n ccoord_depth[1,:] = - ccoord_depth[1,:]\n ccoord_depth[2,:] = - ccoord_depth[2,:]\n ccoord_depth = np.concatenate((ccoord_depth, ones), axis=0)\n ccoord_color = np.dot(calibration_extrinsics, ccoord_depth)\n ccoord_color = ccoord_color[0:3,:]\n ccoord_color[1,:] = - ccoord_color[1,:]\n ccoord_color[2,:] = depth\n\n pcoord_color = np.dot(intrinsics_color, ccoord_color)\n pcoord_color = pcoord_color[:,pcoord_color[2,:]!=0]\n pcoord_color[0,:] = pcoord_color[0,:]/pcoord_color[2,:]+0.5\n pcoord_color[0,:] = pcoord_color[0,:].astype(int)\n pcoord_color[1,:] = pcoord_color[1,:]/pcoord_color[2,:]+0.5\n pcoord_color[1,:] = pcoord_color[1,:].astype(int)\n pcoord_color = pcoord_color[:,pcoord_color[0,:]>=0]\n pcoord_color = pcoord_color[:,pcoord_color[1,:]>=0]\n pcoord_color = pcoord_color[:,pcoord_color[0,:]<img_width]\n pcoord_color = pcoord_color[:,pcoord_color[1,:]<img_height]\n\n depth_[pcoord_color[1,:].astype(int),\n pcoord_color[0,:].astype(int)] = pcoord_color[2,:]\n return depth_", "def GetDepth(*args, **kwargs):\n return _gdi_.DC_GetDepth(*args, **kwargs)", "def evaluate_grad_cov(self, parameters_kernel, points):\n\n if self.type_kernel[0] == PRODUCT_KERNELS_SEPARABLE:\n inputs = separate_numpy_arrays_in_lists(points, self.kernel_dimensions[1])\n inputs_dict = {}\n for index, input in enumerate(inputs):\n inputs_dict[self.type_kernel[index + 1]] = input\n grad_cov = self.class_kernel.evaluate_grad_defined_by_params_respect_params(\n separate_numpy_arrays_in_lists(parameters_kernel, self.number_parameters[1]),\n inputs_dict,\n self.dimensions[1:], self.type_kernel[1:], **self.additional_kernel_parameters)\n elif self.type_kernel[0] == SCALED_KERNEL:\n grad_cov = self.class_kernel.evaluate_grad_defined_by_params_respect_params(\n parameters_kernel, points, self.dimensions[0],\n *([self.type_kernel[1]],))\n else:\n grad_cov = self.class_kernel.evaluate_grad_defined_by_params_respect_params(\n parameters_kernel, points, self.dimensions[0], **self.additional_kernel_parameters)\n\n return grad_cov", "def get_depth(\n filepath,\n ):\n with xr.open_dataset(filepath) as fmesh:\n if 'refZMid' in fmesh.variables.keys():\n z = fmesh.data_vars['refZMid'].values\n elif 'refBottomDepth' in fmesh.variables.keys():\n bottom_depth = fmesh.data_vars['refBottomDepth'].values\n z = np.zeros_like(bottom_depth)\n z[0] = -0.5*bottom_depth[0]\n z[1:] = -0.5*(bottom_depth[0:-1]+bottom_depth[1:])\n else:\n raise LookupError('Neither \\'refZMid\\' or \\'refBottomDepth\\' is found.')\n depth = xr.DataArray(\n z,\n dims=('nVertLevels'),\n coords={'nVertLevels': z},\n attrs={'units': 'm', 'long_name': 'depth'},\n )\n return depth", "def infer_depths(self):\n self.infer_depths_edges_from_cells()\n self.infer_depths_cells_from_edges()\n self.copy_depths_to_subgrid(depth_stat='depth_mean')", "def get_depths(self, variables):\n\n return [0.]", "def refDepth_callback(self, msg):\n self.mutex.acquire()\n depth_ref = np.array([0, 0, msg.depth]).reshape((3, 1))\n\n if not (self.reference_flags['depth']):\n # first assignment\n self.eta1_ref_body.last_value = self.controller.vehicle.ned2body_linear(deepcopy(depth_ref), self.eta2)\n self.eta1_ref_body.last_sampling = rospy.Time.now()\n self.reference_flags['depth'] = True\n else:\n self.eta1_ref_body.value = self.controller.vehicle.ned2body_linear(depth_ref, self.eta2)\n dt = rospy.Time.now() - self.eta1_ref_body.last_sampling\n\n # compute derivative\n self.eta1_ref_body.dot = (self.eta1_ref_body.value - self.eta1_ref_body.last_value) / dt.to_sec()\n self.eta1_ref_body.last_value = deepcopy(self.eta1_ref_body.value)\n self.eta1_ref_body.last_sampling = rospy.Time.now()\n\n self.mutex.release()\n rospy.loginfo(\"%s receive depth reference\", self.node_name)", "def get_depth(self, search_items, csv=False, output_dir=None, extra_param=None):\n\n # Get data from api and create objects\n api_datas = self.call_api(search_items, \"probability\", \"depth\", \"property\", extra_param=extra_param)\n product = [ProbabilityDepth(api_data) for api_data in api_datas]\n\n if csv:\n csv_format.to_csv(product, \"probability\", \"depth\", output_dir=output_dir)\n\n logging.info(\"Probability Depth Data Ready.\")\n\n return product", "def z(self):\n data = np.ma.array(np.zeros(self.n_levels()), mask=True)\n for i in range(self.n_levels()):\n if self.profile_data[i]['Missing']: continue\n data[i] = self.profile_data[i]['Depth']\n return data", "def __get_kde_values(\n self,\n spacings_range: Tuple[float, float],\n nnnsd: bool = False,\n kde_gridsize: int = 1000,\n ) -> fArr:\n spacings = np.sort(self.vals[2:] - self.vals[:-2]) if nnnsd else self.spacings\n kde = KDE(spacings)\n kde.fit(kernel=\"gau\", bw=\"scott\", cut=0, fft=False, gridsize=10000)\n s = np.linspace(spacings_range[0], spacings_range[1], kde_gridsize)\n # evaluated = np.empty_like(s)\n # for i, _ in enumerate(evaluated):\n # evaluated[i] = kde.evaluate(s[i])\n evaluated: fArr = np.array(kde.evaluate(s))\n return evaluated", "def get_data(datauri): \n print(datauri)\n nc = netCDF4.Dataset(datauri)\n time = nc.variables['JULD']\n depth = nc.variables['PRES']\n\n checkdepth = 0\n findepth = np.zeros(time.shape[0])\n for i in range (0, depth.shape[0]):\n maxdepth = np.amax(depth[i])\n findepth[i] = maxdepth\n if (maxdepth > checkdepth):\n dd=i\n checkdepth = maxdepth\n maxdepth = findepth[dd]\n \n temperature = nc.variables['TEMP'][dd] \n tempadj=nc.variables['TEMP_ADJUSTED'][dd]\n depthnew = nc.variables['PRES'][dd] \n depthadj = nc.variables['PRES_ADJUSTED'][dd] \n\n latitude = nc.variables['LATITUDE'][dd]\n longitude = nc.variables['LONGITUDE'][dd]\n\n lonm=nc.variables['LONGITUDE'][dd].mask\n latm=nc.variables['LATITUDE'][dd].mask\n timm=nc.variables['JULD'][dd].mask\n\n if (lonm == True or latm == True):\n longitude=-999.9\n latitude=-999.9\n\n\n out = {}\n out['latitude'] = nc.variables.pop('LATITUDE')[dd]\n out['longitude'] = nc.variables.pop('LONGITUDE')[dd]\n out['temperature'] = nc.variables.pop('TEMP')[dd]\n out['temperatureadj'] = nc.variables.pop('TEMP_ADJUSTED')[dd]\n out['salinity'] = nc.variables.pop('PSAL')[dd]\n out['salinityadj'] = nc.variables.pop('PSAL_ADJUSTED')[dd]\n out['depth'] = nc.variables.pop('PRES')[dd]\n out['depthadj'] = nc.variables.pop('PRES_ADJUSTED')[dd]\n \n return out", "def _get_depth(self, data): \r\n\r\n data = data.astype(np.float32)\r\n\r\n normalized = np.dot(data, [65536.0, 256.0, 1.0]) \r\n normalized /= (256 * 256 * 256 - 1)\r\n in_meters = 1000 * normalized\r\n\r\n return in_meters", "def _calc_ecdf(self):\n for numerator, vals in self.lift.items():\n for denominator, lift in vals.items():\n raw_data = np.array(lift)\n cdfx = np.sort(np.unique(lift))\n x_values = np.linspace(start=min(cdfx),\n stop=max(cdfx),\n num=len(cdfx))\n size_data = raw_data.size\n y_values = []\n for i in x_values:\n temp = raw_data[raw_data <= i]\n value = temp.size / size_data\n y_values.append(value)\n temp = {}\n temp['x'] = x_values\n temp['y'] = y_values\n if numerator not in self.ecdf.keys():\n self.ecdf[numerator] = {}\n self.ecdf[numerator][denominator] = temp\n else:\n self.ecdf[numerator][denominator] = temp", "def retrieve_EchoDepth(\n ds,\n threshold,\n variable=\"zFactorFinal\",\n radar_frequency=\"Ku\",\n min_threshold=0,\n mask_liquid_phase=False,\n):\n # Retrieve required DataArrays\n da = get_variable_dataarray(ds, variable=variable)\n if len(da[\"radar_frequency\"].data) != 1:\n da = da.sel({\"radar_frequency\": radar_frequency})\n da_height = ds[\"height\"].copy()\n # Mask height bin where not raining\n da_mask_3d_rain = da > min_threshold\n da_height = da_height.where(da_mask_3d_rain)\n\n # Mask heights where Z is not above threshold\n da_mask_3d = da > threshold\n da_height_masked = da_height.where(da_mask_3d)\n\n # Mask liquid phase\n if mask_liquid_phase:\n da_liquid_mask = get_liquid_phase_mask(ds)\n da_height_masked = da_height_masked.where(~da_liquid_mask)\n\n # Retrieve min and max echo height\n da_max_height = da_height_masked.max(dim=\"range\")\n da_min_height = da_height_masked.min(dim=\"range\")\n\n # OLD MASKING\n # if mask_liquid_phase:\n # da_isnan = np.isnan(da_min_height)\n # da_height_melting = ds[\"heightZeroDeg\"]\n # da_height_melting = da_height_melting.where(~da_isnan)\n # # If max is below the 0 °C isotherm --> set to nan\n # da_max_height = da_max_height.where(da_max_height > da_height_melting)\n # # If min below the 0 °C isoterm --> set the isotherm height\n # da_min_height = da_min_height.where(da_min_height > da_height_melting, da_height_melting)\n\n # Compute depth\n da_depth = da_max_height - da_min_height\n\n # Add attributes\n da_depth.name = f\"EchoDepth{threshold}dBZ\"\n da_depth.attrs[\"units\"] = \"m\"\n return da_depth", "def contourf_node_values(self,values,*args,**kwargs):\n ax=kwargs.pop('ax',None) or plt.gca()\n tri=self.mpl_triangulation()\n return ax.tricontourf(tri,values,*args,**kwargs)", "def depth_c(self) -> pd.DataFrame:\n return self._load_fetch(self.DEPTH_C)", "def gen_val(self, num_poses, batch_size):\n noise = self.make_noise(num_poses)\n labels = [1] * num_poses\n self.update_disc_copy()\n\n rv = self.nested_generator.evaluate(\n noise, labels, batch_size=batch_size)\n self.update_disc_copy()\n\n # Same trick we pull in disc_val\n fit_data = (noise[:100], labels[:100])\n self.nested_generator.fit(\n *fit_data,\n batch_size=batch_size,\n nb_epoch=1,\n verbose=0,\n validation_data=fit_data)\n\n return rv", "def depth_callback(self, data):\n cv_image = self.bridge.imgmsg_to_cv2(data, desired_encoding=\"passthrough\")\n self.depth_mutex.acquire()\n self.depth_image = cv_image\n self.depth_mutex.release()", "def spatial_covariance(distances, z, eval_distances, tolerance=0.2):\n if distances[np.triu_indices(distances.shape[0])].max() > 1000:\n sub_distances = distances\n else:\n sub_distances = np.array(distances, copy=True)\n sub_distances[np.triu_indices(sub_distances.shape[0])] = 999999\n covariances = np.zeros(eval_distances.size)\n z_flat = z.ravel()\n for d, eval_distance in enumerate(eval_distances):\n points_a, points_b = np.where(np.abs(sub_distances - eval_distance) <= tolerance)\n covariances[d] = np.sum((z_flat[points_a] - z_flat[points_a].mean()) *\n (z_flat[points_b] - z_flat[points_b].mean())) / (float(points_a.size) - 1.0)\n covariances[d] /= z_flat[points_a].std() * z_flat[points_b].std()\n return covariances", "def calculate_d_vals(self) -> None:\n # Skip last point if path is non-cyclic\n point_inds = range(self.num_points) if self.is_cyclic else range(self.num_points - 1)\n for i in point_inds:\n z_i = self.points[i % self.num_points]\n z_j = self.points[(i + 1) % self.num_points]\n z_i.d_val = abs(z_i - z_j)", "def cdf_discretize(self,variables=[]):\n #the errors in the code are due to the deleted files that require packages to be installed on the computer\n for i in variables:\n x=unique(self.data[:,i])\n m=max(x)-min(x)\n f=lambda x0,y0: array([m*(x0+y0)/(1+m**2), (x0*m+y0)/(1+m**2)])\n cdf=array([np.sum(self.data[:,i]<=t) for t in x])\n d=array([norm(array([x0,cdf[k]])-f(x0,cdf[k])) for k,x0 in\\\n enumerate(x)])", "def disc_val(self, val_data, batch_size):\n fakes = self.generate_poses(len(val_data))\n labels = np.array([1] * len(val_data) + [0] * len(fakes))\n data = np.concatenate([val_data, fakes])\n rv = self.discriminator.evaluate(data, labels, batch_size=batch_size)\n\n # Calling .fit() stores .validation_data on self.discriminator. The\n # TensorBoard callback can then use that to make an activation\n # histogram (or whatever it does). Need to pick first [:100] or\n # TensorFlow runs out of memory :P\n fit_indices = np.random.permutation(len(data))[:100]\n fit_data = (data[fit_indices], labels[fit_indices])\n self.discriminator.fit(\n *fit_data,\n batch_size=batch_size,\n nb_epoch=1,\n verbose=0,\n validation_data=fit_data)\n\n return rv", "def _gdal_preprocessing(self, nodatavalue: float = 1000000.0, z_positive_up: bool = True,\n layer_names: tuple = ('depth', 'vertical_uncertainty')):\n\n if self.is_vr:\n raise NotImplementedError(\"VR surfacing doesn't currently return gridded data arrays yet, have to figure this out\")\n\n layerdata = []\n geo_transform = []\n finalnames = []\n for cnt, layer in enumerate(layer_names):\n nodex, nodey, nodez, valid, newmins, newmaxs = self.return_surf_xyz(layer)\n if cnt == 0:\n cellx = nodex[0] - self.min_grid_size / 2 # origin of the grid is the cell, not the node\n celly = nodey[-1] + self.min_grid_size / 2\n geo_transform = [np.float32(cellx), self.min_grid_size, 0, np.float32(celly), 0, -self.min_grid_size]\n if z_positive_up:\n if layer.lower() == 'depth':\n nodez = nodez * -1 # geotiff depth should be positive up, make all depths negative\n layer = 'Elevation'\n nodez = nodez[:, ::-1]\n nodez[np.isnan(nodez)] = nodatavalue\n layerdata.append(nodez)\n finalnames.append(layer)\n return layerdata, geo_transform, layer_names", "def gen_depth_data(scan_folder, dst_folder, normalize=False):\n # specify the goal folder\n dst_folder = os.path.join(dst_folder, 'depth')\n try:\n os.stat(dst_folder)\n print('generating depth data in: ', dst_folder)\n except:\n print('creating new depth folder: ', dst_folder)\n os.mkdir(dst_folder)\n \n # load LiDAR scan files\n scan_paths = load_files(scan_folder)\n\n depths = []\n \n # iterate over all scan files\n for idx in range(len(scan_paths)):\n # load a point cloud\n current_vertex = np.fromfile(scan_paths[idx], dtype=np.float32)\n current_vertex = current_vertex.reshape((-1, 4))\n \n proj_range, _, _, _ = range_projection(current_vertex)\n \n # normalize the image\n if normalize:\n proj_range = proj_range / np.max(proj_range)\n \n # generate the destination path\n dst_path = os.path.join(dst_folder, str(idx).zfill(6))\n \n # save the semantic image as format of .npy\n np.save(dst_path, proj_range)\n depths.append(proj_range)\n print('finished generating depth data at: ', dst_path)\n\n return depths", "def depth(self, v):\n # method here", "def cum_density_func(xs,norm=True,rank=False,data_range='data',pdf=None):\n if pdf is None:\n pdf = prob_density_func(xs,False,data_range)\n pdfk = sorted(pdf.keys())\n pdfv = map(pdf.get,pdfk)\n if not rank:\n cdfv = np.cumsum(pdfv)\n if norm:\n cdfv = cdfv/np.sum(pdfv)\n else:\n cdfv = np.arange(1,len(pdfk)+1)\n if norm:\n cdfv = cdfv/float((len(pdfk)+1))\n return dict(zip(pdfk,cdfv))", "def method_RVKDE(data, xs, ys, DIMENSION = 2):\n mu, sigma = rvkde_sigmas(data, int(len(data)/10), DIMENSION)\n return get_density(xs, ys, mu, sigma, DIMENSION)", "def cdf(self, points):\n if self._y_cdf is not None:\n x = points[:, 0]\n y = points[:, 1]\n\n # map the y coordinate first.\n y_out = self._y_cdf(y)\n\n # select which x quantile curve to use.\n x_curve = (y_out - self.y_min) * self.y_res / (self.y_max - self.y_min)\n x_curve = np.floor(x_curve).astype(\"int\")\n\n # map the x coordinate.\n x_range = np.arange(x.shape[0])\n x_out = np.zeros_like(x)\n for i in range(self.y_res):\n mask = x_curve == i\n x_out[x_range[mask]] = self._x_cdfs[i](x[mask])\n\n x_out = tf.cast(x_out, dtype=points.dtype)\n y_out = tf.cast(y_out, dtype=points.dtype)\n return np.column_stack((x_out, y_out))\n else:\n raise RuntimeError(\n \"CumulativeDensityFunction: Must call compute() with the correct \"\n \"direction before evaluation.\"\n )" ]
[ "0.5295056", "0.5242438", "0.5220693", "0.5207092", "0.5097792", "0.50083566", "0.5006679", "0.4986816", "0.49857244", "0.49732733", "0.49697098", "0.49440157", "0.49382728", "0.4915959", "0.4913597", "0.49062333", "0.49047622", "0.49037802", "0.4896051", "0.48945013", "0.48668328", "0.48666185", "0.48435456", "0.48236924", "0.48217386", "0.4813901", "0.48130557", "0.47864926", "0.47808757", "0.47409683" ]
0.81340986
0
Dump data for domain to stream.
def dump(self, output_stream): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def dump(self, data: dict, file: IO):", "def dumpData(self,out):\n #--Get sizes\n for record in self.records:\n #--Text\n if record.name == 'NAME':\n #--Truncate text?\n if len(self.text) > 511:\n self.text = self.text[:511]\n record.data = self.text\n record.size = len(self.text)\n #--Speaker\n elif record.name == 'ONAM':\n record.data = self.spId+'\\x00'\n record.size = len(self.spId) + 1\n record.getSize()\n record.dump(out)", "def dumpData(self,out):\n raise AbstractError", "def dumpData(self,out):\n raise AbstractError", "def dump(self, stream):\n def _dump_part(stream, key, values):\n stream.write(\"{}:\\n\".format(key))\n for value in values:\n if not value:\n stream.write(\" .\\n\")\n elif value == \".\":\n stream.write(\" ..\\n\")\n else:\n stream.write(\" {}\\n\".format(value))\n for key, value in self.data.items():\n if isinstance(value, (list, tuple)):\n _dump_part(stream, key, value)\n elif isinstance(value, str) and \"\\n\" in value:\n values = value.split(\"\\n\")\n if not values[-1]:\n values = values[:-1]\n _dump_part(stream, key, values)\n else:\n stream.write(\"{}: {}\\n\".format(key, value))\n stream.write(\"\\n\")", "def dump(self):\n return self._data.dump()", "def dump(): # pragma: no cover\n dods = sys.stdin.read()\n dds, xdrdata = dods.split(b'\\nData:\\n', 1)\n dataset = dds_to_dataset(dds)\n xdr_stream = io.BytesIO(xdrdata)\n data = unpack_dap2_data(xdr_stream, dataset)\n pprint.pprint(data)", "def dumpData(self,out):\n #--Get sizes and dump into dataIO\n self.hedr.getSize()\n self.hedr.dump(out)\n for (name,size) in self.masters:\n out.packSub0('MAST',name)\n out.packSub('DATA','Q',size)\n if self.gmdt: \n self.gmdt.getSize()\n self.gmdt.dump(out)\n for other in self.others:\n other.getSize()\n other.dump(out)", "def export(self, stream):\n pass", "def dump(self) -> None:\n ...", "def write_data():", "def save(datastream):", "def dumpData(self,out):\n out.packSub0('NAME',self.id)\n if getattr(self,'isDeleted',False):\n out.packSub('DELE','i',0)\n return\n out.packSub('FNAM',self.type)\n out.packSub('FLTV','f',self.value)", "def dump_data(self,filename,dump_id):\n # get pure data copy\n data = [ d.get_pure_data_copy() for d in self.plotter.data ]\n # full file name of the file with manipulator dump\n filename=tdc_Filenames.get_full_vis_filename(dump_id, filename+'.pickle')\n pickle.dump( data, open(filename,'w') )\n print '\\nContent dumped in \"%s\" \\n' % filename", "def dump_data(self,filename,dump_id):\n import pickle\n from Auxiliary import tdc_Filenames\n data = [ d.get_pure_data_copy() for d in self.plotter.data ]\n dump_dict={}\n dump_dict['fft_data'] = data\n dump_dict['fitting_type'] = self.fft_fit.type \n dump_dict['nk_plot'] = self.fft_fit.nk_plot\n # full file name of the file with manipulator dump\n filename=tdc_Filenames.get_full_vis_filename(dump_id, filename+'.pickle')\n pickle.dump( dump_dict, open(filename,'w') )\n print '\\nContent dumped in \"%s\" \\n' % filename", "def dump(self):\n # dump self.data\n pickle.dump(self.data, open(self.data_dir + DATA_PATH, 'wb+'))\n # dump self.code2desc\n pickle.dump(self.code2desc, open(self.data_dir + CODE2DESC_PATH, 'wb+'))\n # dump self.family2tf\n pickle.dump(self.family2tf, open(self.data_dir + FAMILY2TF_PATH, 'wb+'))\n # dump self.word2tf\n pickle.dump(self.word2tf, open(self.data_dir + WORD2TF_PATH, 'wb+'))\n # dump self.word2df\n pickle.dump(self.word2df, open(self.data_dir + WORD2DF_PATH, 'wb+'))\n return None", "def dump(data):\n if Utils.memdmp is None:\n Utils.memdmp = list()\n if NS.LOG_DISABLED is True:\n Utils.memdmp.append(data)\n if Utils.dmpfile is None and NS.LOG_DISABLED is False:\n Utils.dmpfile = open(r\"C:\\\\ProgramData\\\\Dmp.txt\", \"w\") \n if NS.LOG_DISABLED is False:\n Utils.dmpfile.write(str(data))", "def dumpData(self,out):\n out.packSub0('INAM',self.id)\n out.packSub0('PNAM',self.prevId)\n out.packSub0('NNAM',self.nextId)\n if not self.isDeleted:\n out.packSub('DATA','2i4B',\n self.type, self.spDisp, self.spRank, self.spSex, self.pcRank, self.unk02)\n if self.spId: out.packSub0('ONAM',self.spId)\n if self.spRace: out.packSub0('RNAM',self.spRace)\n if self.spClass: out.packSub0('CNAM',self.spClass)\n if self.spFaction: out.packSub0('FNAM',self.spFaction)\n if self.cell: out.packSub0('ANAM',self.cell)\n if self.pcFaction: out.packSub0('DNAM',self.pcFaction)\n if self.speak: out.packSub0('SNAM',self.speak)\n if self.text: out.packSub('NAME',self.text)\n if self.qflag == 0:\n pass\n if self.qflag == 1: out.packSub('QSTN','\\x01')\n if self.qflag == 2: out.packSub('QSTF','\\x01')\n if self.qflag == 3: out.packSub('QSTR','\\x01')\n for index,test in enumerate(self.tests):\n if test: test.dumpData(out,index)\n if self.script: out.packSub('BNAM',self.script)\n if self.isDeleted: out.pack('DELE','i',0)", "def dump(self, stream):\n items = (\n ('time', self.time),\n ('inc', self.inc),\n )\n # use ordered dict to retain order\n ts = collections.OrderedDict(items)\n json.dump(dict(ts=ts), stream)", "def stream(self):\n d = self.dictionary()\n # binary data comes after dict\n self.maybe_spaces_or_comments()\n return self._stream(d)", "def dump(self, data_points):\n print(data_points)", "def dump(self):\n self._dump(self._head, [])", "def dumpo(self):\n return self.do_all()", "def dump(self):\n return", "def _export_domain (self, fp, sp=\" \"):\n\n fp.write(\"(define\" + \"\\n\")\n\n # domain name\n fp.write (sp)\n fp.write (\"(domain %s)%s\" % (self.domain_name, \"\\n\"))\n\n # requirements\n if len (self.types) > 1 or list(self.types)[0] != Predicate.OBJECT:\n fp.write (sp + \"(:requirements :strips :typing)\\n\")\n else:\n fp.write (sp + \"(:requirements :strips)\\n\")\n\n # types\n #TODO likely wrong, doesn't capture the type hierarchy\n s = \" \".join ([t for t in self.types if t!= Predicate.OBJECT])\n fp.write (sp + \"(:types %s)%s\" %(s, \"\\n\"))\n\n # predicates\n fp.write (sp + \"(:predicates \" + \"\\n\")\n for p in self.predicates:\n fp.write (p.export (2, sp) + \"\\n\")\n fp.write (sp + \")\" + \"\\n\")\n\n # actions\n for action in self.actions:\n fp.write (action.export (1, sp) + \"\\n\")\n\n fp.write (\")\") # close define", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def export(self, f_domain, f_problem):\n\n # write domain file\n sp = \" \"\n fp = open(f_domain, \"w\")\n self._export_domain (fp, sp)\n fp.close()\n\n if self.init is not None:\n fp = open (f_problem, \"w\")\n self._export_problem (fp, sp)\n fp.close ()", "def dumpData(self,out):\n #--Header\n out.packSub('SCHD','32s5i',\n self.id, \n self.numShorts, self.numLongs, self.numFloats, \n self.dataSize, self.varSize)\n #--Others\n for record in [getattr(self,srName.lower(),None) for srName in Scpt.subRecordNames]:\n if not record: continue\n record.size = len(record.data)\n record.dump(out)", "def write(self):\n self.f.write(yaml.safe_dump(self.data, default_flow_style=False, indent=4))", "def dump_stream(self, iterator, stream):\n batches = (self._create_batch(series) for series in iterator)\n super(ArrowStreamPandasSerializer, self).dump_stream(batches, stream)" ]
[ "0.6574605", "0.64592576", "0.637863", "0.637863", "0.63642424", "0.6257538", "0.6221386", "0.6161159", "0.6138144", "0.61353606", "0.60472876", "0.6043062", "0.59851927", "0.5921861", "0.5884786", "0.5884686", "0.5857817", "0.5850663", "0.58451045", "0.580958", "0.580143", "0.578416", "0.57830715", "0.5779357", "0.5751696", "0.5748256", "0.5717584", "0.5707562", "0.5693172", "0.5676121" ]
0.6548187
1
make the union of the trees of x and y
def union(self,x,y): assert x in self and y in self rx,ry = self.find(x),self.find(y) if rx!=ry: nx,ny = self.__rank[rx],self.__rank[ry] if nx<=ny: self.__parent[rx] = ry self.__size[ry] += self.__size[rx] if nx==ny: self.__rank[ry]+=1 else: self.__parent[ry] = rx self.__size[rx] += self.__size[ry]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def union(self, x, y):\n\t\trx, ry = self.find(x), self.find(y)\n\t\tkrx, kry = self.__rank[rx], self.__rank[ry]\n\t\tif(krx >= kry):\n\t\t\tself.__parent[ry] = rx\n\t\t\tif(krx == kry): self.__rank[rx] = self.__rank[rx] + 1\n\t\telse: self.__parent[rx] = ry", "def union(self, x, y):\n \n px, py = self.find(x), self.find(y)\n if px != py:\n \n if self.size[px] > self.size[py]:\n px, py = py, px\n \n\n self.parent[px] = py\n self.size[py] += self.size[px]", "def union(self, x, y):\n rootx = self.__find_root(x)\n rooty = self.__find_root(y)\n\n if self.weights[rootx] >= self.weights[rooty]:\n self.data[rooty] = rootx\n self.weights[rootx] += self.weights[rooty]\n else:\n self.data[rootx] = rooty\n self.weights[rooty] += self.weights[rooty]", "def union(self, node1, node2):\n\n root1 = self.root(node1)\n root2 = self.root(node2)\n\n if root1 == root2:\n return\n\n if node1 < node2:\n self.set[root2] = root1\n self.root(node2)\n else:\n self.set[root1] = root2\n self.root(node1)", "def union(set1, set2):", "def union(self, a, b):\n if (a in self.node_id) and (b in self.node_id) and (self.node_id[a] != self.node_id[b]):\n self.merge(a, b)\n elif (a in self.node_id) or (b in self.node_id):\n self.add(a,b)\n else:\n self.create_new_group(a,b)", "def union(self, other): # -> BaseGeometry:\n ...", "def union(node1, node2):\n node1_root = find(node1)\n node2_root = find(node2)\n if node1_root == node2_root:\n return\n if node1_root.rank < node2_root.rank:\n node1_root.parent = node2_root\n elif node2_root.rank > node2_root.rank:\n node2_root.parent = node1_root\n else:\n node2_root.parent = node1_root\n node1_root.rank = node1_root.rank + 1", "def union(one, other):\n left = min(one.left, other.left)\n right = max(one.right, other.right)\n top = min(one.top, other.top)\n bottom = max(one.bottom, other.bottom)\n return BBox([[left, top], [right, bottom]])", "def union(self, *args):\n return self.phy2abs.union(*args)", "def union(self, other):\n return self._geomgen(capi.geom_union, other)", "def union(self):\n nfa2 = self.aut_stack.pop()\n nfa1 = self.aut_stack.pop()\n\n nfa1_star = nfa1.transform('X')\n nfa2_star = nfa2.transform('Y')\n\n nfa_union = Automaton()\n nfa_union.states = list(set(nfa1_star.states).union(nfa2_star.states))\n nfa_union.states.append('S')\n nfa_union.alphabet = list(set(nfa1_star.alphabet).union(nfa2_star.alphabet))\n nfa_union.final = list(set(nfa1_star.final).union(nfa2_star.final))\n nfa_union.change_start_state('S')\n nfa_union.transition = dict(nfa1_star.transition, **nfa2_star.transition)\n nfa_union.transition['S, .'] = [nfa1_star.q_0, nfa2_star.q_0]\n\n self.aut_stack.append(nfa_union)", "def union(self, *objects):\n roots = [self[x] for x in objects]\n # Find the heaviest root according to its weight.\n heaviest = max(roots, key=lambda r: self.weights[r])\n for r in roots:\n if r != heaviest:\n self.weights[heaviest] += self.weights[r]\n self.parents[r] = heaviest", "def merge(self):\n leftSon = self.nodes.getNode(0)\n rightSon = self.nodes.getNode(1)\n self.nodes.removeNode(0)\n self.nodes.removeNode(0)\n self.nodes.addNode(Node(leftSon, rightSon))", "def mergeNodes(new, t1, t2):\n \n if t1 and t2:\n new.val = t1.val + t2.val\n elif not t1:\n new.val = t2.val\n elif not t2:\n new.val = t1.val", "def union(a, b):\r\n return list(set(a) | set(b))", "def union(self, data1, data2):\n root1 = self.find_set(data1)\n root2 = self.find_set(data2)\n\n if root1 == root2:\n return False\n\n elif root1.rank >= root2.rank:\n if root1.rank == root2.rank:\n root1.rank = root1.rank + 1\n root2.parent = root1\n else:\n root1.parent = root2\n\n return True", "def union(self, data1, data2):\n root1 = self.find_set(data1)\n root2 = self.find_set(data2)\n\n if root1 == root2:\n return False\n\n elif root1.rank >= root2.rank:\n if root1.rank == root2.rank:\n root1.rank = root1.rank + 1\n root2.parent = root1\n else:\n root1.parent = root2\n\n return True", "def union_all(x: DataFrame, y: DataFrame) -> DataFrame:\n _check_xy(x, y)\n return bind_rows(x, y, __calling_env=CallingEnvs.REGULAR)", "def union(self,i,j):\r\n root_i = self.find(i)\r\n root_j = self.find(j)\r\n if root_i != root_j:\r\n self.parent[root_j] = root_i\r\n return 1\r\n return 0", "def union(first, second):\n # Put your code here.", "def union(a, b):\n return list(set(a) | set(b))", "def union(a, b):\n return list(set(a) | set(b))", "def tree_vdot(tree_x, tree_y):\n vdots = tree_multimap(_vdot_safe, tree_x, tree_y)\n return tree_reduce(operator.add, vdots)", "def union(self, node1, node2):\n root1 = self.find(node1)\n root2 = self.find(node2)\n if root1 != root2: # only merge if the connected components differ\n if self.ranks[root1] > self.ranks[root2]:\n self.parents[root2] = root1\n else:\n self.parents[root1] = root2\n if self.ranks[root1] == self.ranks[root2]:\n self.ranks[root2] += 1", "def union(self, other):\n self.find_set()._link(other.find_set())", "def union(self, *args):\n _ub = None\n for _obj in args:\n if _ub is None:\n _ub = self.bbox(_obj)\n else:\n _b = self.bbox(_obj)\n _x = np.sort(np.array([_b[:, 0], _ub[:, 0]]), axis=None)\n _y = np.sort(np.array([_b[:, 1], _ub[:, 1]]), axis=None)\n _ub = np.array([[_x[0], _y[0]], [_x[3], _y[3]]])\n return _ub", "def union(G, H):\n\n if G.order() != H.order():\n msg = \"Node sets of the two directed graphs are not equal!\"\n raise StaticGraphNotEqNodesException(msg)\n\n n_nodes = G.order()\n edges = ((u, v) for u in G.nodes()\n\t\t for v in chain(G.successors(u), H.successors(u)))\n deg = make_deg(n_nodes, edges) \n edges = ((u, v) for u in G.nodes()\n\t\t for v in chain(G.successors(u), H.successors(u)))\n GC = make(n_nodes, G.size() + H.size(), edges, deg)\n return GC", "def graph_union(*args, **kwargs):\n\n if not len(args) > 1:\n raise AttributeError('At least two input Graphs required')\n\n # Validate if all arguments are Graphs\n check_graphbase_instance(*args)\n\n all_share_common_origin = all([share_common_origin(args[0], n) for n in args[1:]])\n if all_share_common_origin and not kwargs.get('return_copy', False):\n\n nids = []\n for graph in args:\n nids.extend([n for n in graph.nodes if n not in nids])\n\n eids = []\n for graph in args:\n eids.extend([e for e in graph.edges if e not in eids])\n\n result = args[0].origin.getnodes(nids)\n result.edges.set_view(eids)\n return result\n else:\n\n # make a deep copy of the first graph\n result = args[0].copy(deep=True, copy_view=False)\n\n # we need control over the node ID to add\n # temporary turn off auto_nid if needed\n auto_nid = result.data.auto_nid\n result.data.auto_nid = False\n\n for graph in args[1:]:\n for node, attrib in graph.nodes.items():\n if node not in result.nodes:\n result.add_node(node, **attrib)\n\n for edge, attrib in graph.edges.items():\n if edge not in result.edges:\n result.add_edge(*edge, **attrib)\n\n # Restore auto_nid\n result.data.auto_nid = auto_nid\n\n return result", "def union(self, x, y):\n xr, yr = self.find(x), self.find(y)\n if xr == yr:\n return False\n\n if self.sz[xr] < self.sz[yr]:\n xr, yr = yr, xr\n\n self.par[yr] = xr\n self.sz[xr] += self.sz[yr]\n self.sz[yr] = self.sz[xr]\n\n return True" ]
[ "0.7064512", "0.705985", "0.70128745", "0.6741631", "0.66894627", "0.66152054", "0.6579406", "0.64984643", "0.6430702", "0.63348776", "0.62756675", "0.62601674", "0.62502754", "0.6192367", "0.6139346", "0.61291367", "0.61289513", "0.61289513", "0.6125513", "0.61071765", "0.6057918", "0.6054801", "0.6054801", "0.60364527", "0.6033353", "0.6020496", "0.6015355", "0.6005051", "0.6003612", "0.60021114" ]
0.7619764
0
The supported lengths of the predictor
def supportedLength(self): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__(self):\n return len(self.train) + len(self.val) + len(self.test)", "def __len__(self):\r\n return len(self.train_data)", "def _predict_feature_sizes(self):\n return self._feature_sizes", "def get_pred_length(self):\n return self.prediction_length", "def __len__(self):\n\n value_length = []\n for v in chain(self.values(), self.metainfo_values()):\n if isinstance(v, LabelData):\n value_length.append(v.label.shape[0])\n elif is_splitable_var(v):\n value_length.append(len(v))\n else:\n continue\n\n # NOTE: If length of values are not same or the current data sample\n # is empty, return length as 1\n if len(list(set(value_length))) != 1:\n return 1\n\n length = value_length[0]\n return length", "def len(self):\r\n return len(self.input), len(self.target)", "def __len__(self) -> int:\n return len(self.samples)", "def __len__(self):\n return self.n_samples", "def __len__(self):\n if self.mode.lower() == 'train':\n return len(self.train_data)\n elif self.mode.lower() == 'val':\n return len(self.val_data)\n elif self.mode.lower() == 'test':\n return len(self.test_data)\n else:\n raise RuntimeError(\"Unexpected dataset mode. \"\n \"Supported modes are: train, val and test\")", "def __len__(self):\n return max(self.A_size, self.B50_size, self.B100_size, self.B150_size)", "def __len__(self):\n return self.__n_samples", "def __len__(self):\n\t\treturn min(len(self.dataset), self.opt.max_dataset_size)", "def getOutputLength(self):\n return len(self.Y[0])", "def __len__(self):\n return self.data.num_samples", "def __len__(self):\n if self.mode.lower() == 'train':\n return len(self.train_data)\n if self.mode.lower() == 'val':\n return len(self.val_data)\n if self.mode.lower() == 'test':\n return len(self.test_data)\n\n raise RuntimeError(\"Unexpected dataset mode. \"\n \"Supported modes are: train, val and test\")", "def __len__(self):\n return self._num_samples", "def __len__(self):\n return len(self.samples)", "def __len__(self):\n return len(self.samples)", "def __len__(self):\n _, timesteps, height, width = self.data.shape\n height //= self.size\n width //= self.size\n\n if self.subset == 'train':\n out = self.length\n elif self.subset == 'all':\n out = height * width\n else:\n out = (height // 2) * (width // 2)\n\n if not self.time:\n out *= timesteps\n\n return out", "def __len__(self):\n return len(self.dataset) * self.samples_per_pair", "def n_train(self):\n return self.factors[0].shape[0]", "def __len__(self):\n return self.total_num_sequences", "def __len__(self) -> int:\n\n length = self.n_classes * 100\n\n return length", "def __len__(self) -> int:\n\n return self.layout.gaDims", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def __len__(self):\n return len(self.labels)", "def size(self):\n\t\treturn len(self.lables)", "def length(self) -> int:\n return len(self.__samples)", "def __len__(self):\r\n\r\n return self.yInput.shape[1]" ]
[ "0.736361", "0.6998112", "0.6977338", "0.6975378", "0.69442517", "0.6935557", "0.68784916", "0.6864196", "0.68472785", "0.684683", "0.67938626", "0.67794424", "0.67620033", "0.6754116", "0.67379314", "0.6736056", "0.67266524", "0.67266524", "0.66896105", "0.66629016", "0.66589046", "0.6644809", "0.6637302", "0.6631234", "0.66299325", "0.66299325", "0.66145676", "0.65958685", "0.6594782", "0.6582858" ]
0.7227685
1
Converts alleles into the internal allele representation of the predictor and returns a string representation
def convert_alleles(self, alleles): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _post_process_output(self,predictions,convert_to_string):\n normalized = [(p[0],REVERSE_ARROWS.get(p[1],p[1]),p[2]) for p in predictions]\n if convert_to_string:\n return ' '.join([\"%s%s\" % (p[0],p[1]) for p in normalized])\n return normalized", "def __str__(self):\n analysis = []\n for analyze in self.analysis:\n if self.analysis[analyze] is not None:\n analysis.append(self.analysis[analyze])\n return \"Analises: {} \\n\".format(analysis)", "def _post_process_output(self,predictions,convert_to_string):\n if convert_to_string:\n return ' '.join([\"%s-%s\" % (p[0],p[1]) for p in predictions])\n return predictions", "def _to_str(self):\n\t\tprint(\"predictors: {}, types: {} \\n method: {}, preprocessing: {}\\\n\t\t\t \\n partition_rate: {}, metric: {}, file name: {}\".format(\n\t\t\t self.predictors, self.predictors_types, self.method_name,\n\t\t\t self.preprocessing_methods, self.data_split, self.metric,\n\t\t\t self.plotting_file_name))", "def convert_allele_list_to_ags(hla_allele_list):\n\tallele_list_dict = {}\n\tag_list = []\n\tbw4_6_list = []\n\tfor allele in hla_allele_list:\n\t\tallele = allele.rstrip(\"p P g G\")\n\t\tif allele in allele_to_ag_dict:\n\t\t\tag = \"\"\n\t\t\trule = \"\"\n\t\t\tag = allele_to_ag_dict[allele][0]\n\t\t\trule = allele_to_ag_dict[allele][1]\n\t\t\tbw4_6 = allele_to_ag_dict[allele][2]\n\t\t\tag_list.append(ag)\n\t\t\tbw4_6_list.append(bw4_6)\n\t\t\t\n\t\telse:\n\t\t\tag = \"NA\"\n\t\t\tag_list.append(ag)\n\t\n\tallele_list_dict = {\"Allele_list\": hla_allele_list, \"UNOS antigens\": ag_list, \"Bw4/6 epitopes\": bw4_6_list}\n\t\t\t\t\n\treturn allele_list_dict", "def to_string(self):\n return Classifier.to_string(self).format(\n name=self.Name,\n args=self._parameters_to_string(self.__multi_layer_perceptron.get_params()),\n )", "def _repr_(self):\n return \"Lie algebra %s over %s\" % (self._classification,\n self.base_ring())", "def convert_allele_to_ag(allele):\n\tallele_dict = {}\n\tallele = allele.rstrip(\"p P g G\")\n\tif allele in allele_to_ag_dict:\t\n\t\tag = allele_to_ag_dict[allele][0]\n\t\trule = allele_to_ag_dict[allele][1]\n\t\tbw4_6 = allele_to_ag_dict[allele][2]\n\t\n\telse:\n\t\tag = \"NA\"\n\tallele_dict[allele] = [ag, bw4_6]\n\treturn allele_dict", "def __str__(self):\n s = \"\"\n for v in self.vectors:\n s += str(v) + \"\\n\"\n return s", "def __str__(self):\n st=\"\"\n for g in self:\n st+=g.fasta()\n st+=\"\\n\"\n return st", "def inverse_transform_lemmas(self, predictions):\n pred_lemmas = []\n if self.include_lemma == 'generate':\n for pred in predictions:\n pred_lem = ''\n for positions in pred:\n top_idx = np.argmax(positions) # winning position\n c = self.lemma_char_idx[top_idx] # look up corresponding char\n if c in ('$', '%'):\n continue\n if c == '|':\n break\n else:\n pred_lem += c # add character\n pred_lemmas.append(pred_lem)\n\n elif self.include_lemma == 'label':\n predictions = np.argmax(predictions, axis=1)\n pred_lemmas = self.lemma_encoder.inverse_transform(predictions) \n \n return pred_lemmas", "def __str__(self):\n state = ''\n state += ' '.join([str(x) for x in self.pos]) + ' '\n state += ''.join([str(x) + ' ' + str(y) + ' ' for x,\n y in zip(self.BU, self.BD)])\n for e in self.BF:\n state += ' '.join([str(x) for x in e])\n state += ' '\n state += ' '.join([str(x) for x in self.LU]) + ' '\n state += ' '.join([str(x) for x in self.LD]) + ' '\n\n return state", "def toGenomeRepresentation(self):\n s = \"\"\n s += str(self.axiom)\n s += \"||\"+str(self.niterations) # The iterations must be shown as well\n for prod in self.productions:\n s += \"||\"\n s += prod.toGenomeRepresentation()\n return s", "def __str__(self) -> str:\n return '\\n'.join([' '.join([str(u) for u in row]) for row in self.adjacency_matrix])", "def __str__(self, depth=1):\n if self.isLeaf():\n return \"Predict: \\\"{:s}\\\"\".format(str(self.predict))\n else:\n s = \"if features[{:d}] != \\\"{:s}\\\" then:\\n {:s} \\n{:s}else:\\n {:s}\"\n return s.format(self.feature, \n str(self.value), \n \"\\t\" * depth+self.left.__str__(depth+1),\n \"\\t\" * (depth-1),\n \"\\t\" * depth+self.right.__str__(depth+1))", "def generate_amr_string_from_triples(self):\n def get_alignment(f_concept_var):\n \"\"\"\n Get alignment for a single concept\n \"\"\"\n for triplet, a in self.amr_obj.alignments().items():\n if f_concept_var == triplet[0] and triplet[1] == ':instance-of':\n return int(a.split('.')[1].split(',')[0])\n\n def get_all_amr_string(f_concept_var):\n \"\"\"\n Get all amr string from the concept\n \"\"\"\n def get_triples(key):\n result_triples = []\n f_triples = self.amr_obj.triples(dep=key, rel=':ARG-of', normalize_inverses=True)\n if f_triples:\n result_triples.extend(f_triples)\n f_triples = self.amr_obj.triples(head=key)\n if f_triples:\n result_triples.extend(f_triples)\n return result_triples\n entry = defaultdict(int)\n q = []\n q.append((amr.Var('TOP'), ':top', f_concept_var))\n entry[f_concept_var] += 1\n reentrancies = self.amr_obj.reentrancies()\n all_triples = []\n while q:\n u = q.pop()\n all_triples.append(u)\n triples = get_triples(u[2])\n for triplet in triples[::-1]:\n if triplet[2] in reentrancies:\n if entry[triplet[2]] <= reentrancies[triplet[2]] + 1:\n q.append(triplet)\n entry[triplet[2]] += 1\n else:\n q.append(triplet)\n entry[triplet[2]] += 1\n s = ''\n stack = []\n instance_fulfilled = None\n align = role_align = {}\n concept_stack_depth = {\n None: 0} # size of the stack when the :instance-of triple was encountered for the variable\n for h, r, d in all_triples + [(None, None, None)]:\n align_key = align.get((h, r, d), '')\n role_align_key = role_align.get((h, r, d), '')\n if r == ':top':\n s += '(' + d()\n stack.append((h, r, d))\n instance_fulfilled = False\n elif r == ':instance-of':\n s += ' / ' + d(align_key)\n instance_fulfilled = True\n concept_stack_depth[h] = len(stack)\n elif r == ':wiki':\n continue\n elif h == stack[-1][2] and r == ':polarity': # polarity gets to be on the same line as the concept\n s += ' ' + r + role_align_key + ' ' + d(align_key)\n else:\n while len(stack) > concept_stack_depth[h]:\n h2, r2, d2 = stack.pop()\n if instance_fulfilled is False:\n # just a variable or constant with no concept hanging off of it\n # so we have an extra paren to get rid of\n align_key2 = align.get((h2, r2, d2), '')\n s = s[:-len(d2(align_key2)) - 1] + d2(align_key2, append=not instance_fulfilled)\n else:\n s += ')'\n instance_fulfilled = None\n if d is not None:\n s += ' \\n' + ' ' * 4 * len(stack) + r + role_align_key + ' (' + d(align_key)\n stack.append((h, r, d))\n instance_fulfilled = False\n return s\n\n # def get_all_alignments(concept_var, sep, left=True):\n # '''\n # Get all alignments from the concept\n # '''\n #\n # # def alignment_to_text(alignments):\n # # '''\n # # Convert all alignments to text\n # # '''\n # # def filter(idxs, tol):\n # # '''\n # # Resulting only the longest contiguous elements\n # # '''\n # # diffs = [idxs[i + 1] - idxs[i] for i in range(len(idxs) - 1)]\n # # start = False\n # # max_length = -1\n # # for i in range(len(diffs)):\n # # if diffs[i] <= tol:\n # # if not start:\n # # start = True\n # # length = 1\n # # start_idx = i\n # # else:\n # # length += 1\n # # else:\n # # if start:\n # # start = False\n # # end_idx = i\n # # if length >= max_length:\n # # max_length = length\n # # max_start_idx = start_idx\n # # max_end_idx = end_idx\n # # if start:\n # # end_idx = i + 1\n # # if length >= max_length:\n # # max_start_idx = start_idx\n # # max_end_idx = end_idx\n # # return [idxs[i] for i in range(max_start_idx, max_end_idx + 1)]\n # #\n # # doc = en_nlp(\" \".join(self.amr_obj.tokens()))\n # # alignments = sorted(list(set(alignments)))\n # # # We used noun chunks to prevent orphaned noun\n # # noun_chunks = list(doc.noun_chunks)\n # # new_alignments = set()\n # # for a in alignments:\n # # new_alignments.add(a)\n # # # Insert all noun chunks to new alignment\n # # for noun in noun_chunks:\n # # if noun.start <= a <= noun.end:\n # # new_alignments.update([i for i in range(noun.start, noun.end)])\n # # text = [self.amr_obj.tokens()[idx] for idx in filter(sorted(list(new_alignments)), 3)]\n # # text = \" \".join(text)\n # # return text\n #\n # def get_triplet(key):\n # result_triplets = []\n # triples = self.amr_obj.f_triples(dep=key, rel=':ARG-of', normalize_inverses=True)\n # if triples:\n # result_triplets.extend(triples)\n # triples = self.amr_obj.f_triples(head=key)\n # if triples:\n # result_triplets.extend(triples)\n # return result_triplets\n #\n # triplets_stor = {}\n # entry = defaultdict(int)\n # q = queue.Queue()\n # q.put(concept_var)\n # entry[concept_var] += 1\n # result_alignments = []\n # alignments = self.amr_obj.alignments()\n # role_alignments = self.amr_obj.role_alignments()\n # reentrancies = self.amr_obj.reentrancies()\n # while not q.empty():\n # u = q.get()\n # triples = get_triplet(u)\n # for triplet in triples:\n # if triplet not in triplets_stor:\n # triplets_stor[triplet] = 0\n # if type(triplet[2]) is amr.Var:\n # if entry[triplet[2]] <= reentrancies[triplet[2]] + 1:\n # q.put(triplet[2])\n # entry[triplet[2]] += 1\n #\n # def is_pos_correct(idx, sep, left=True):\n # if left:\n # return True if idx < sep else False\n # else:\n # return True if idx > sep else False\n #\n # if triplet in alignments:\n # idx = int(alignments[triplet].split('.')[1])\n # #if is_pos_correct(idx, sep, left):\n # result_alignments.append(idx)\n # if triplet in role_alignments:\n # idx = int(role_alignments[triplet].split('.')[1])\n # #if is_pos_correct(idx, sep, left):\n # result_alignments.append(idx)\n # return alignment_to_text(result_alignments)\n\n if self.triples == {}:\n return ''\n\n results = []\n for key, triple in self.triples.items():\n result_1 = get_alignment(triple[1])\n if result_1 is None:\n continue\n if triple[0] is not None:\n result_0 = get_all_amr_string(triple[0])\n else:\n result_0 = ''\n for concept_var in triple[2]:\n if concept_var:\n result_2 = get_all_amr_string(concept_var)\n if len(result_2.split(' ')) == 1:\n if not result_2.startswith('('):\n result_2 = '(' + result_2 + ')'\n results.append((result_0, self.amr_obj.var2concept()[triple[1]]._name, result_2))\n\n # f = open('amr_string.txt', 'w')\n # for l, m, r in results:\n # if l != '':\n # f.write(l+'\\n')\n # if r != '':\n # f.write(r+'\\n')\n # f.close()\n return results", "def format_prediction(self, prediction, params=None):\n return str(prediction)", "def __str__(self):\n L = []\n for s,e in self.normalized():\n if s == e:\n L.append(str(s))\n else:\n L.append(str(s) + \"-\" + str(e))\n return \",\".join(L)", "def __str__(self):\n return self.get_ascii_trunk() + self.get_ascii_leaves()", "def postprocess(output):\n text=''\n order = output.argsort()[::-1][:6]\n # print('\\n------- predictions --------')\n for i in range(1):\n # print ('prediction ' + str(i) + ' (probability ' + str(output[order[i]]*100) + '%) is ' + gNetworkCategories[order[i]] + ' label index is: ' + str(order[i]) )\n text=text+str(gNetworkCategories[order[i]])\n\n return text", "def output_aa_string(residues):\n # Dictionary of 3 letter to 1 letter AA conversion\n aa_dict = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',\n 'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',\n 'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',\n 'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}\n\n s = ''\n for res in residues:\n s = s + aa_dict.get(res.type)\n return s", "def onehot_to_string(self, one_hot_seq):\n gen_ints = [np.where(r==1)[0][0] for r in one_hot_seq]\n gen_char_list = self.int_to_char(gen_ints)\n generated_text = ''.join(gen_char_list)\n return generated_text", "def __str__(self):\n return str(self.asMatrix())", "def to_string(self, smirnoff_data):\n pass", "def recodeAllele(allele, zero, ones):\n\tif allele==\"0\":\n\t\t\treturn zero\n\tif allele==\"1\":\n\t\t\treturn ones\n\treturn allele", "def __str__(self):\n return str(self.array)", "def __str__(self):\n return np.array2string(self.graph.toarray())", "def __str__(self):\n return str(self.arr)", "def __str__(self):\t\t\n\t\tcadena = []\n\t\tactual = self.prim\t\t\n\t\twhile actual:\n\t\t\tif type(actual.dato) == str:\n\t\t\t\tcadena.append(\"'\" + str(actual.dato) + \"'\")\n\t\t\telse:\t\n\t\t\t\tcadena.append(str(actual.dato))\n\t\t\tactual = actual.prox\n\t\treturn \"[\" + \", \".join(cadena) + \"]\"", "def reverter_1(frase: str) -> str:\n lista_de_palavras = frase.split() # Tempo e memória linear\n palavras_reordenadas = reversed(lista_de_palavras)\n return ' '.join(palavras_reordenadas)" ]
[ "0.6184396", "0.5768091", "0.5712769", "0.56190765", "0.55800796", "0.55560637", "0.54867584", "0.54019606", "0.54016256", "0.53793234", "0.53760767", "0.53371274", "0.528604", "0.527637", "0.52741337", "0.52704126", "0.52626795", "0.5242563", "0.520618", "0.5175799", "0.5162493", "0.5118752", "0.5116797", "0.51125264", "0.5109482", "0.5067157", "0.50651836", "0.50541735", "0.5046252", "0.5041646" ]
0.6403247
0
Predicts the binding affinity for a given peptide or peptide lists for a given list of alleles. If alleles is not given, predictions for all valid alleles of the predictor is performed. If, however, a list of alleles is given, predictions for the valid allele subset is performed.
def predict(self, peptides, alleles=None, **kwargs): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, peptides, **kwargs):\n raise NotImplementedError", "def predict_all(self, all_ex_words: List[List[str]]) -> List[int]:\n return [self.predict(ex_words) for ex_words in all_ex_words]", "def eval_predicted(predicted, inputs, outputs, parse_beam_fn):\n best_p, best_score = None, -1\n\n # predicted shape [beam_size, length]\n for beam in predicted:\n try:\n p = parse_beam_fn(beam)\n p_outs = [p(inp) for inp in inputs]\n score = np.sum([p_out == out for p_out, out in zip(p_outs, outputs)])\n if score > best_score:\n best_p, best_score = p, score\n except: # pylint: disable=bare-except\n pass\n if best_score >= len(inputs): # Found solution.\n break\n return best_p, best_score", "def predict(self, list_of_tokenized_text):\n self.eval()\n MAX_MINIBATCH_SIZE = 128\n if len(list_of_tokenized_text) <= MAX_MINIBATCH_SIZE:\n return self._predict_minibatch(list_of_tokenized_text)\n else:\n preds = []\n max_confs = []\n\n minibatch_count = ceil(len(list_of_tokenized_text) / MAX_MINIBATCH_SIZE)\n print(f'{minibatch_count} minibatches in total')\n for i in range(minibatch_count):\n print(f'processing minibatch {i + 1}')\n preds_minibatch, max_confs_minibatch = self._predict_minibatch(\n list_of_tokenized_text[i * MAX_MINIBATCH_SIZE: (i + 1) * MAX_MINIBATCH_SIZE])\n preds.extend(preds_minibatch)\n max_confs.extend(max_confs_minibatch)\n return preds, max_confs", "def eval_prediction(minibatches, dataset_name, predict_fn, norm_mus=None,\n norm_sigmas=None, nb_evals=1, rescale_inputs=False):\n \n print(\"\\nEvaluating performance on %s set with missing SNPs\" % dataset_name)\n \n # Make the minibatches into a list so we can iterate over it as many times\n # as we need to.\n minibatches = [m for m in minibatches]\n\n list_percent_missing_snp = [0.01, 0.02, 0.05, 0.10, 0.20, 0.25, 0.50,\n 0.75, 0.80, 0.90, 0.95, 0.98, 0.99]\n\n for percent_missing_snp in list_percent_missing_snp:\n\n per_batch_correct_predictions = []\n\n for batch in minibatches:\n inputs, targets_onehot = batch\n\n targets = targets_onehot.argmax(1)\n n_feats = inputs.shape[1]\n\n batch_correct_predictions = []\n\n for eval_idx in range(nb_evals):\n\n # If the proportion of missing SNPs is higher than 0.50,\n # instead of separating all the SNPs into groups we will remove\n # one by one, we separate the SNPs into groups we will *keep*\n # one by one. This is done by changing the group sizes here\n # and flipping the SNP binary mask later\n if percent_missing_snp > 0.50:\n percent_kept_snp = 1 - percent_missing_snp\n snp_group_size = int(math.ceil(n_feats * percent_kept_snp))\n reverse_mask = True\n else:\n snp_group_size = int(math.ceil(n_feats * percent_missing_snp))\n reverse_mask = False\n\n # Separate all the Snps into random groups of identical size\n snp_indices = np.arange(n_feats)\n np.random.shuffle(snp_indices)\n\n snp_groups = [snp_indices[i:i + snp_group_size] for i in\n xrange(0, len(snp_indices), snp_group_size)]\n\n for snp_group_idx in range(len(snp_groups)):\n group_indices = snp_groups[snp_group_idx]\n\n # Create masks that will be used to identify SNPs to impute\n input_mask = np.zeros((n_feats,), dtype=\"float32\")\n np.put(input_mask, group_indices, 1)\n\n # If the groups represent SNPs to keep and not SNPs to\n # discard, flip the SNP mask\n if reverse_mask:\n input_mask = (1 - input_mask)\n \n if norm_mus is not None and norm_sigmas is not None:\n normed_inputs = (inputs - norm_mus) / norm_sigmas\n else:\n normed_inputs = inputs\n\n # If needed, rescale the inputs to compensate for the\n # proportion of missing SNPs\n if rescale_inputs:\n scale_factor = 1 / (1 - percent_missing_snp)\n else:\n scale_factor = 1.0\n\n # Obtain the model's prediction with masked inputs\n masked_inputs = normed_inputs * (1 - input_mask) * scale_factor\n predictions = predict_fn(masked_inputs)\n\n # Compute the number of successful ethnicity predictions\n batch_correct_predictions.append(predictions == targets)\n\n batch_correct_predictions = np.vstack(batch_correct_predictions)\n per_batch_correct_predictions.append(batch_correct_predictions)\n\n per_batch_correct_predictions = np.hstack(per_batch_correct_predictions)\n\n print(\"%s prediction accuracy at with %i percent of missing SNPs : %f +- %f\" %\n (dataset_name, percent_missing_snp * 100,\n per_batch_correct_predictions.mean(),\n per_batch_correct_predictions.mean(1).std()))", "def _predict_triples_batched(\n model: Model,\n mapped_triples: MappedTriples,\n batch_size: int,\n *,\n mode: Optional[InductiveMode],\n) -> torch.FloatTensor:\n return torch.cat(\n [\n model.predict_hrt(hrt_batch=hrt_batch, mode=mode)\n for hrt_batch in mapped_triples.split(split_size=batch_size, dim=0)\n ],\n dim=0,\n )", "def predictionMultiple(files, refArrays, species, files2, Data, target2, nbC, repeat=0,\r\n param=None, predAn='prediction', predType='rand', ratio=1 / 7.0, random_state=None,\r\n save='show', cwd='', average=True):\r\n\r\n if param is None:\r\n param = ['FL1-A', 'FL3-A', 'FSC-A']\r\n data, target, species = f.treat(refArrays, species, nbC, mode='train')\r\n #data = data[['FSC-A', 'SSC-A', 'FL1-A', 'FL4-A']]#TODO remove after the feature importance done\r\n scaler, classifier, predict_lbl, known_lbl, predict_data = f.learning(predType, data, target, ratio, random_state,\r\n species,rept=repeat)\r\n #LIME TODO REMOVE AT THE END\r\n\r\n conf = confusion_matrix(known_lbl, predict_lbl,labels=species)\r\n statistics = f.statAnalysis(predict_lbl, known_lbl, species)\r\n fls = []\r\n for file in files:\r\n fls.append(file.split('/')[-1][:-4])\r\n f.exportStatistics(statistics, ['-'.join(fls)], cwd, 'align')\r\n if repeat == 0:\r\n f.graph3d(predict_data, predict_lbl, known_lbl, species, param, statistics, save, cwd, repeat,\r\n name='reference repeat 1', predtype='training')\r\n\r\n statistics2 = []\r\n predict_lbl2 = []\r\n conf2 = []\r\n if predAn == 'prediction':\r\n for i in range(len(Data)):\r\n pred_lbl2 = f.predict(predType, scaler, classifier, Data[i], species)\r\n predict_lbl2.append(pred_lbl2)\r\n statistics2.append(f.statAnalysis(pred_lbl2, target2[i], species))\r\n if not average and save is not None and repeat == 0:\r\n f.graph3d(Data[i], pred_lbl2, list(target2[i]), species + ['unknown'], param, statistics2[i], save, cwd,\r\n repeat,\r\n name=files2[i].split('/')[-1][:-4] + ' repeat 1', predtype=predAn)\r\n else:\r\n predict_lbl2 = f.predict(predType, scaler, classifier, Data[0], species)\r\n print('PREDICTION')\r\n statistics2.append(f.statAnalysis(predict_lbl2, target2[0], species))\r\n conf2 = confusion_matrix(target2[0], predict_lbl2, labels=species)\r\n predict_lbl2 = [predict_lbl2]\r\n\r\n if not average and save is not None and repeat == 0:\r\n f.graph3d(Data[0], predict_lbl2[0], list(target2[0]), species, param, statistics2[0], save, cwd, repeat,\r\n name='Tool analysis prediction repeat 1', predtype=predAn)\r\n return statistics, statistics2, predict_lbl2, target2, conf, conf2, species, Data", "def predict_all():\n\n # need train dir to list category names\n cfg = configparser.ConfigParser()\n cfg.read(sys.argv[1])\n base = os.environ['DATA_ROOT']\n eval_type = cfg.get('args', 'eval_type')\n train_xml_dir = os.path.join(base, cfg.get('data', 'train_xml_dir'))\n\n if eval_type == 'sparse':\n predict_sparse(train_xml_dir)\n else:\n predict_dense(train_xml_dir)", "def predict(self, grid : np.array, array : np.array) :\n self.set_grid_array(grid, array)\n guess = []\n candidates = []\n for i in range(len(grid)//AUTO_width2) :\n x = i*AUTO_width2\n for j in range(len(grid[i])//AUTO_width2):\n y = j*AUTO_width2\n if self.check_masked([x,y]):\n candidates.append([x,y])\n vectors = []\n for c in candidates :\n vectors.append(self.vector_convert(c))\n probs = self.clf.predict(np.array(vectors))\n for n in range(len(probs)) :\n # if np.argmax(probs[n]):\n if probs[n] :\n c = candidates[n]\n guess.append([min(c[0]+(AUTO_width1//2), self.m_x),min(c[1]+(AUTO_width1//2), self.m_y)])\n return guess", "def generatePredictions(method, hlas, peptides, cpus=1, verbose=False):\n if verbose:\n \"\"\"Create console handler and set level to debug\"\"\"\n logging.basicConfig(level=logging.INFO, format='%(levelname)s:%(asctime)s:%(message)s')\n logging.info('HLA prediction initialized for %d HLA allele(s) using method %s on %d CPU(s)', len(hlas), method, cpus)\n\n if cpus > 1:\n result = parmap.map(predictHLA_mhctools, hlas, method, peptides, verbose, pool=Pool(processes=cpus))\n else:\n result = parmap.map(predictHLA_mhctools, hlas, method, peptides, verbose, parallel=False)\n\n \"\"\"Remove None's\"\"\"\n outDf = pd.concat([r for r in result if not r is None], axis=0)\n\n \"\"\"Take the log of the prediction if neccessary.\"\"\"\n if outDf.affinity.max() > 100:\n outDf.loc[:, 'pred'] = np.log(outDf.affinity)\n\n if verbose:\n logging.info('Completed %d predictions (expected %d)', outDf.shape[0], len(hlas) * len(peptides))\n return outDf", "def predict_structure(prefix, model_runner_1: alphafold.model.model.RunModel,\n model_runner_3: alphafold.model.model.RunModel,\n feature_dict, Ls: list[int], model_params: haiku.Params, use_model, do_relax=False,\n random_seed=0):\n\n # Minkyung's code\n # add big enough number to residue index to indicate chain breaks\n idx_res = feature_dict['residue_index']\n L_prev = 0\n # Ls: number of residues in each chain\n for L_i in Ls[:-1]:\n idx_res[L_prev + L_i:] += 200\n L_prev += L_i\n chains = list(\"\".join([ascii_uppercase[n] * L for n, L in enumerate(Ls)]))\n feature_dict['residue_index'] = idx_res\n\n # Run the models.\n plddts, paes = [], []\n unrelaxed_pdb_lines = []\n relaxed_pdb_lines = []\n\n print(f\"Use_model {use_model}\")\n\n for model_name, params in model_params.items():\n if model_name in use_model:\n print(f\"running {model_name}\")\n # swap params to avoid recompiling\n # note: models 1,2 have diff number of params compared to models 3,4,5\n if any(str(m) in model_name for m in [1, 2]): model_runner = model_runner_1\n if any(str(m) in model_name for m in [3, 4, 5]): model_runner = model_runner_3\n model_runner.params = params\n\n processed_feature_dict: affeatures.FeatureDict = model_runner.process_features(feature_dict,\n random_seed=random_seed)\n # prediction_result is a dictionary of NumPy feature arrays\n prediction_result: dict = model_runner.predict(processed_feature_dict)\n unrelaxed_protein: protein.Protein = protein.from_prediction(processed_feature_dict, prediction_result)\n unrelaxed_pdb_lines.append(protein.to_pdb(unrelaxed_protein))\n plddts.append(prediction_result['plddt'])\n paes.append(prediction_result['predicted_aligned_error'])\n\n if do_relax:\n # Relax the prediction.\n amber_relaxer: relax.AmberRelaxation = relax.AmberRelaxation(max_iterations=0, tolerance=2.39,\n stiffness=10.0, exclude_residues=[],\n max_outer_iterations=20)\n relaxed_pdb_str, _, _ = amber_relaxer.process(prot=unrelaxed_protein)\n relaxed_pdb_lines.append(relaxed_pdb_str)\n\n # rerank models based on predicted lddt\n lddt_rank = np.mean(plddts, -1).argsort()[::-1]\n out = {}\n print(\"reranking models based on avg. predicted lDDT\")\n for n, r in enumerate(lddt_rank):\n print(f\"model_{n + 1} {np.mean(plddts[r])}\")\n\n unrelaxed_pdb_path = f'{prefix}_unrelaxed_model_{n + 1}.pdb'\n with open(unrelaxed_pdb_path, 'w') as f:\n f.write(unrelaxed_pdb_lines[r])\n set_bfactor(unrelaxed_pdb_path, plddts[r], idx_res, chains)\n\n if do_relax:\n relaxed_pdb_path = f'{prefix}_relaxed_model_{n + 1}.pdb'\n with open(relaxed_pdb_path, 'w') as f: f.write(relaxed_pdb_lines[r])\n set_bfactor(relaxed_pdb_path, plddts[r], idx_res, chains)\n\n out[f\"model_{n + 1}\"] = {\"plddt\": plddts[r], \"pae\": paes[r]}\n return out", "def process_predictions_and_anchors(self, anchor_list, valid_flag_list,\n cls_scores, bbox_preds, img_metas,\n gt_bboxes_ignore_list):\n num_imgs = len(img_metas)\n assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n # anchor number of multi levels\n num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n num_level_anchors_list = [num_level_anchors] * num_imgs\n\n anchor_list_ = []\n valid_flag_list_ = []\n # concat all level anchors and flags to a single tensor\n for i in range(num_imgs):\n assert len(anchor_list[i]) == len(valid_flag_list[i])\n anchor_list_.append(torch.cat(anchor_list[i]))\n valid_flag_list_.append(torch.cat(valid_flag_list[i]))\n\n # compute targets for each image\n if gt_bboxes_ignore_list is None:\n gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n\n num_levels = len(cls_scores)\n cls_score_list = []\n bbox_pred_list = []\n\n mlvl_cls_score_list = [\n cls_score.permute(0, 2, 3, 1).reshape(\n num_imgs, -1, self.num_base_priors * self.cls_out_channels)\n for cls_score in cls_scores\n ]\n mlvl_bbox_pred_list = [\n bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n self.num_base_priors * 4)\n for bbox_pred in bbox_preds\n ]\n\n for i in range(num_imgs):\n mlvl_cls_tensor_list = [\n mlvl_cls_score_list[j][i] for j in range(num_levels)\n ]\n mlvl_bbox_tensor_list = [\n mlvl_bbox_pred_list[j][i] for j in range(num_levels)\n ]\n cat_mlvl_cls_score = torch.cat(mlvl_cls_tensor_list, dim=0)\n cat_mlvl_bbox_pred = torch.cat(mlvl_bbox_tensor_list, dim=0)\n cls_score_list.append(cat_mlvl_cls_score)\n bbox_pred_list.append(cat_mlvl_bbox_pred)\n return (anchor_list_, valid_flag_list_, num_level_anchors_list,\n cls_score_list, bbox_pred_list, gt_bboxes_ignore_list)", "def predict_triples(\n model: Model,\n *,\n triples: Union[None, MappedTriples, LabeledTriples, Union[Tuple[str, str, str], Sequence[Tuple[str, str, str]]]],\n triples_factory: Optional[CoreTriplesFactory] = None,\n batch_size: Optional[int] = None,\n mode: Optional[InductiveMode] = None,\n) -> ScorePack:\n # note: the models' predict method takes care of setting the model to evaluation mode\n # normalize input\n triples = get_mapped_triples(triples, factory=triples_factory)\n # calculate scores (with automatic memory optimization)\n scores = _predict_triples_batched(\n model=model, mapped_triples=triples, batch_size=batch_size or len(triples), mode=mode\n ).squeeze(dim=1)\n return ScorePack(result=triples, scores=scores)", "def _predict(self, feature_map_tensor_list):\n pass", "def compute_detections(depc, gid_list, config=None):\n logger.info('[ibs] Preprocess Detections')\n logger.info('config = {!r}'.format(config))\n # Get controller\n ibs = depc.controller\n ibs.assert_valid_gids(gid_list)\n\n USE_CLASSIFIER = False\n\n if USE_CLASSIFIER:\n classifier_config = {\n 'classifier_weight_filepath': config['classifier_weight_filepath'],\n }\n # Filter the gids by annotations\n prediction_list = depc.get_property(\n 'classifier', gid_list, 'class', config=classifier_config\n )\n confidence_list = depc.get_property(\n 'classifier', gid_list, 'score', config=classifier_config\n )\n confidence_list = [\n confidence if prediction == 'positive' else 1.0 - confidence\n for prediction, confidence in zip(prediction_list, confidence_list)\n ]\n gid_list_ = [\n gid\n for gid, confidence in zip(gid_list, confidence_list)\n if confidence >= config['classifier_sensitivity']\n ]\n else:\n classifier_config = {\n 'classifier_two_weight_filepath': config['classifier_weight_filepath'],\n }\n # Filter the gids by annotations\n predictions_list = depc.get_property(\n 'classifier_two', gid_list, 'classes', config=classifier_config\n )\n gid_list_ = [\n gid\n for gid, prediction_list in zip(gid_list, predictions_list)\n if len(prediction_list) > 0\n ]\n\n gid_set_ = set(gid_list_)\n # Get the localizations for the good gids and add formal annotations\n localizer_config = {\n 'algo': config['localizer_algo'],\n 'config_filepath': config['localizer_config_filepath'],\n 'weight_filepath': config['localizer_weight_filepath'],\n 'grid': config['localizer_grid'],\n }\n bboxes_list = depc.get_property(\n 'localizations', gid_list_, 'bboxes', config=localizer_config\n )\n thetas_list = depc.get_property(\n 'localizations', gid_list_, 'thetas', config=localizer_config\n )\n confses_list = depc.get_property(\n 'localizations', gid_list_, 'confs', config=localizer_config\n )\n\n # Get the corrected species and viewpoints\n labeler_config = {\n 'labeler_weight_filepath': config['labeler_weight_filepath'],\n }\n # depc.delete_property('localizations_labeler', gid_list_, config=labeler_config)\n specieses_list = depc.get_property(\n 'localizations_labeler', gid_list_, 'species', config=labeler_config\n )\n viewpoints_list = depc.get_property(\n 'localizations_labeler', gid_list_, 'viewpoint', config=labeler_config\n )\n scores_list = depc.get_property(\n 'localizations_labeler', gid_list_, 'score', config=labeler_config\n )\n\n # Collect the detections, filtering by the localization confidence\n empty_list = [\n 0.0,\n np.array([]),\n np.array([]),\n np.array([]),\n np.array([]),\n np.array([]),\n ]\n detect_dict = {}\n for index, gid in enumerate(gid_list_):\n bbox_list = bboxes_list[index]\n theta_list = thetas_list[index]\n species_list = specieses_list[index]\n # species_dict = {}\n # for species in species_list:\n # if species not in species_dict:\n # species_dict[species] = 0\n # species_dict[species] += 1\n # for tup in species_dict.iteritems():\n # logger.info('\\t%r' % (tup, ))\n # logger.info('----')\n viewpoint_list = viewpoints_list[index]\n conf_list = confses_list[index]\n score_list = scores_list[index]\n zipped = list(\n zip(\n bbox_list,\n theta_list,\n species_list,\n viewpoint_list,\n conf_list,\n score_list,\n )\n )\n zipped_ = []\n for bbox, theta, species, viewpoint, conf, score in zipped:\n if (\n conf >= config['localizer_sensitivity']\n and score >= config['labeler_sensitivity']\n ):\n zipped_.append([bbox, theta, species, viewpoint, conf * score])\n else:\n logger.info(\n 'Localizer {:0.02f} {:0.02f}'.format(\n conf, config['localizer_sensitivity']\n )\n )\n logger.info(\n 'Labeler {:0.02f} {:0.02f}'.format(\n score, config['labeler_sensitivity']\n )\n )\n if len(zipped_) == 0:\n detect_list = list(empty_list)\n else:\n detect_list = [0.0] + [np.array(_) for _ in zip(*zipped_)]\n detect_dict[gid] = detect_list\n\n # Filter the annotations by the localizer operating point\n for gid in gid_list:\n if gid not in gid_set_:\n assert gid not in detect_dict\n result = list(empty_list)\n else:\n assert gid in detect_dict\n result = detect_dict[gid]\n # logger.info(result)\n # raw_input()\n # logger.info('')\n # image = ibs.get_images(gid)\n # image = vt.resize(image, (500, 500))\n # cv2.imshow('', image)\n # cv2.waitKey(0)\n yield tuple(result)", "def predict(self, testing_set):\r\n # Run prediction by multiply inputs with the weight and map it\r\n # Through the activation function\r\n final_prob = 0\r\n probability = self.activation(self.weighted_sum(testing_set))\r\n prediction = self.threshold(probability)\r\n if prediction == 1:\r\n final_prob = probability\r\n else:\r\n final_prob = 1 - probability\r\n return [prediction, final_prob]", "def predict_all(self, imgs):\n return self._predict(imgs)", "def single_predict_proba(self, vec, n_nearest):\n\n most_sim_ind = self.annoy_index.get_nns_by_vector(vec, n_nearest)\n most_similar_doc_ids = [self.document_ids[x] for x in most_sim_ind]\n return self.ids2class.loc[most_similar_doc_ids].mean().\\\n sort_values(ascending=False)", "def eval_det(pred_all, gt_all, ovthresh=0.25, use_07_metric=False):\n pred = {} # map {classname: pred}\n gt = {} # map {classname: gt}\n for img_id in pred_all.keys():\n for classname, bbox, score in pred_all[img_id]:\n if classname not in pred: pred[classname] = {}\n if img_id not in pred[classname]:\n pred[classname][img_id] = []\n if classname not in gt: gt[classname] = {}\n if img_id not in gt[classname]:\n gt[classname][img_id] = []\n pred[classname][img_id].append((bbox,score))\n for img_id in gt_all.keys():\n for classname, bbox in gt_all[img_id]:\n if classname not in gt: gt[classname] = {}\n if img_id not in gt[classname]:\n gt[classname][img_id] = []\n gt[classname][img_id].append(bbox)\n\n rec = {}\n prec = {}\n ap = {}\n for classname in gt.keys():\n print('Computing AP for class: ', classname)\n rec[classname], prec[classname], ap[classname] = eval_det_cls(pred[classname], gt[classname], ovthresh, use_07_metric)\n print(classname, ap[classname])\n \n return rec, prec, ap", "def likelihoods(self, alleles):\n\n models = self.models_dict[len(alleles)]\n\n F = self.joint_frequencies_combo(alleles)\n\n ### BPH ###\n (((A0, A1),((B0,),)),) = models['BPH'][1].items()\n\n BPH = (A0 / A1) * F[B0]\n\n\n BPH += sum( sum(F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['BPH'][2].items())\n\n if len(alleles)>2:\n BPH += sum( sum(F[B0] * sum( F[B1] * F[B2] for (B1, B2) in C[B0]) for B0 in C) * A0 / A1\n for (A0, A1), C in models['BPH'][3].items())\n\n ### SPH ###\n (((A0, A1),((B0,),)),) = models['SPH'][1].items()\n SPH = (A0 / A1) * F[B0]\n\n SPH += sum( sum(F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['SPH'][2].items())\n\n ### DIPLOIDY ###\n (((A0, A1),((B0,),)),) = models['DISOMY'][1].items()\n DISOMY = (A0 / A1) * F[B0]\n\n DISOMY += sum( sum( F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['DISOMY'][2].items())\n\n ### MONOSOMY ###\n ((B0,),) = models['MONOSOMY'][1][(1,1)]\n MONOSOMY = F[B0]\n\n result = likelihoods_tuple(MONOSOMY, DISOMY, SPH, BPH)\n return result", "def predict_numba(\n coordinates, points, coeffs, result, greens_function\n): # pylint: disable=not-an-iterable\n east, north, upward = coordinates[:]\n point_east, point_north, point_upward = points[:]\n for i in prange(east.size):\n for j in range(point_east.size):\n result[i] += coeffs[j] * greens_function(\n east[i],\n north[i],\n upward[i],\n point_east[j],\n point_north[j],\n point_upward[j],\n )", "def fit(\n self,\n base_models_predictions: np.ndarray,\n true_targets: np.ndarray,\n model_identifiers: List[Tuple[int, int, float]],\n ) -> 'SingleBest':\n return self", "def predict(self,Xtest,nn_list):\n\n #calculate distances first\n self.dist_calc(Xtest)\n\n ypred = []\n\n for nn in nn_list:\n\n neigh_ind = self.ind[:,0:nn]\n\n if self.weights == 'uniform':\n\n p = np.mean(self.ytrain[neigh_ind], axis=1)\n\n elif self.weights =='distance':\n\n p = np.empty((self.dist.shape[0], self.ytrain.shape[1]), dtype=np.float)\n\n for i in range(self.ytrain.shape[1]):\n p[:,i] = utilities.weighted_mean(self.ytrain[neigh_ind,i], self.dist[:,0:nn])\n\n ypred.append(p)\n\n self.ypred = ypred\n self.nn_list = nn_list\n return ypred", "def predictManyIpdFuncModel(self, refId):\n\n # Materialized the numpy wrapper around the shared data\n snipFunction = self.snippetFunc(refId, self.post, self.pre)\n\n def fMany(sites):\n contexts = [snipFunction(x[0], x[1]) for x in sites]\n return self.gbmModel.getPredictions(contexts)\n\n return fMany", "def predict(self, reps):\n return [self.classes_[self.predict_one(rep)] for rep in reps]", "def predict_multi_target_candidates(\n sparql, timeout, gps, sources, parallel=None, exclude_source=None):\n assert len(sources) > 1 and isinstance(sources[0], (URIRef, Literal))\n if parallel is None:\n parallel = config.PREDICTION_IN_PARALLEL\n if exclude_source is None:\n exclude_source = config.PREDICTION_EXCLUDE_SOURCE\n\n pq = partial(\n predict_multi_query,\n sparql, timeout,\n sources=sources,\n )\n map_ = parallel_map if parallel else map\n results = map_(pq, gps)\n # drop timings:\n res = [stcs for _, stcs in results]\n if exclude_source:\n res = [\n OrderedDict([\n (s, tcs - {s})\n for s, tcs in stcs.items()\n ])\n for stcs in res\n ]\n return res", "def predictRating(toPredict, candidateList):\n\n ratingRelevantCandidates = []\n\n #Remove candidates with no rating specified\n for candidate in candidateList:\n currentCandidate = candidate[1]\n\n if float(currentCandidate['vote_avg']) > 0:\n ratingRelevantCandidates.append((float(currentCandidate['vote_avg']), candidate))\n\n #print(\"ratings::::::::\",currentCandidate['vote_avg'])\n\n #Remove outlier candidates based on rating\n ratingMean = np.mean([x[0] for x in ratingRelevantCandidates])\n print(\"ratingMean\", ratingMean)\n ratingSD = np.std([x[0] for x in ratingRelevantCandidates])\n print(\"ratingSD\", ratingSD)\n\n finalRatings = [x for x in ratingRelevantCandidates if (float(x[0]) < ratingMean + ratingSD)]#1.5 *\n finalRatings = [x for x in finalRatings if (float(x[0]) > ratingMean - ratingSD)]#.75 *\n\n finalRatingCandidatesWithWeight = []\n\n #Weight each candidate based on vote count, direct and actor popularity and matching score from part 1\n for candidate in finalRatings:\n directorPoints = compareDirectorPoints(toPredict['director'], candidate[1][1]['director'])\n actorPoints = compareActorPoints(toPredict['cast'], candidate[1][1]['cast'])\n voteCountPoints = int(candidate[1][1]['vote_count'])\n matchPoints = candidate[1][0] / np.max([float(x[1][0]) for x in finalRatings]) * 100\n candidateWeight = PREDICTION_MATCHPOINTS_WEIGHT * matchPoints \\\n + PREDICTION_ACTOR_WEIGHT * actorPoints \\\n + PREDICTION_DIRECTOR_WEIGHT * directorPoints \\\n + PREDICTION_VOTECOUNT_WEIGHT * voteCountPoints\n\n finalRatingCandidatesWithWeight.append((candidateWeight, candidate[0]))\n\n #Calculate the prediction\n sumRatingCandidateWeights = np.sum([float(x[0]) for x in finalRatingCandidatesWithWeight])\n sumRatingTimesCandidateWeight = np.sum([float(x[0]) * float(x[1]) for x in finalRatingCandidatesWithWeight])\n\n ratingPrediction = float(sumRatingTimesCandidateWeight / sumRatingCandidateWeights)\n\n return ratingPrediction", "def calc_performance_given_labels(self, cutoff, ligand_labels):\n performance = defaultdict(dict)\n num_ligands = np.shape(ligand_labels)[1]\n\n if num_ligands > 1: # ligand-type assessment\n # calc per-ligand assessment for multi-label prediction\n for i in range(0, num_ligands):\n tp = fp = tn = fn = 0\n cross_pred = [0, 0, 0, 0]\n for idx, lig in enumerate(ligand_labels):\n if self.predictions[idx, i] >= cutoff: # predicted as binding to this ligand\n cross_prediction = False\n true_prediction = False\n\n for j in range(0, num_ligands):\n if i == j: # same as predicted ligand\n if lig[j] >= self.bind_cutoff: # also annotated to this ligand\n tp += 1\n cross_pred[i] += 1\n true_prediction = True\n else:\n fp += 1\n else:\n if lig[j] >= self.bind_cutoff and not true_prediction:\n cross_pred[j] += 1\n cross_prediction = True\n\n if not true_prediction and not cross_prediction:\n # residues is not annotated to bind any of the ligands\n cross_pred[3] += 1\n else:\n if lig[i] >= cutoff:\n fn += 1\n else:\n tn += 1\n\n if i == 0:\n ligand = 'metal'\n elif i == 1:\n ligand = 'nucleic'\n else:\n ligand = 'small'\n\n bound = False\n if (tp + fn) > 0:\n bound = True\n acc, prec, recall, f1, mcc = PerformanceAssessment.calc_performance_measurements(tp, fp, tn, fn)\n # calculate performance measurements for negatives\n _, neg_p, neg_r, neg_f1, _ = PerformanceAssessment.calc_performance_measurements(tn, fn, tp, fp)\n\n performance[ligand] = {'tp': tp, 'fp': fp, 'tn': tn, 'fn': fn, 'acc': acc, 'prec': prec,\n 'recall': recall, 'f1': f1, 'neg_prec': neg_p, 'neg_recall': neg_r,\n 'neg_f1': neg_f1, 'mcc': mcc, 'bound': bound,\n 'cross_prediction': cross_pred}\n\n # get overall performance\n reduced_labels = np.sum(ligand_labels > cutoff, axis=1)\n if len(self.predictions.shape) == 1:\n reduced_predictions = (self.predictions >= cutoff)\n else:\n reduced_predictions = np.sum(self.predictions >= cutoff, axis=1)\n\n tp = np.sum(np.logical_and(reduced_labels > 0, reduced_predictions > 0))\n fp = np.sum(np.logical_and(reduced_labels == 0, reduced_predictions > 0))\n tn = np.sum(np.logical_and(reduced_labels == 0, reduced_predictions == 0))\n fn = np.sum(np.logical_and(reduced_labels > 0, reduced_predictions == 0))\n\n acc, prec, recall, f1, mcc = PerformanceAssessment.calc_performance_measurements(tp, fp, tn, fn)\n _, neg_p, neg_r, neg_f1, _ = PerformanceAssessment.calc_performance_measurements(tn, fn, tp, fp)\n performance['overall'] = {'tp': tp, 'fp': fp, 'tn': tn, 'fn': fn, 'acc': acc, 'prec': prec, 'recall': recall,\n 'f1': f1, 'neg_prec': neg_p, 'neg_recall': neg_r, 'neg_f1': neg_f1, 'mcc': mcc,\n 'bound': True, 'cross_prediction': [0, 0, 0, 0]}\n\n return performance", "def _predict(self, feature_map_tensor_list):\n # [1 * 300, 4, 4, 1024]\n box_encodings, class_predictions = self._predict_boxes_and_classes(\n feature_map_tensor_list[0]) \n box_encoding_predictions_list = [box_encodings]\n class_score_predictions_list = [class_predictions]\n return box_encoding_predictions_list, class_score_predictions_list", "def get_data_predictions(ids, fasta_file):\n sequences = FileManager.read_fasta(fasta_file)\n\n max_length = ProteinInformation.determine_max_length(sequences, ids)\n labels = dict()\n for i in ids:\n prot_length = len(sequences[i])\n binding_tensor = np.zeros([prot_length, 3], dtype=np.float32)\n labels[i] = binding_tensor\n\n return sequences, max_length, labels" ]
[ "0.55614895", "0.5547054", "0.5459956", "0.54064333", "0.54053646", "0.52904963", "0.5285741", "0.51725566", "0.51354027", "0.51295596", "0.51257443", "0.5097779", "0.50609297", "0.50480694", "0.5019661", "0.5019566", "0.49970984", "0.49909043", "0.4950063", "0.494749", "0.49455565", "0.48972625", "0.4895079", "0.4886789", "0.48843226", "0.4880819", "0.48680115", "0.4862983", "0.48546878", "0.485122" ]
0.69647753
0
Returns the feature encoding for peptides
def encode(self, peptides): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEncoded(df, feat, column, out_name, oh=False, clip=False, write_out=False, thresh=False):\n le = preprocessing.LabelEncoder()\n le.fit(df[column])\n y = le.transform(df[column]) \n if write_out is True:\n df[out_name] = pd.Series(y)\n # One Hot encode features\n if oh is True:\n x = oneHot(df, y, clip, thresh=False)\n feat.append(x)\n return df, feat, le", "def getEncode(self, img):\n img_ = self.preprocess(img)\n fv = self.model_.predict(img_)\n fv = fv.reshape(-1, 1)\n return fv", "def encode(self, features, training=True):\n conditioning = self.preprocessor(features, training=training)\n return conditioning if self.encoder is None else self.encoder(conditioning)", "def _preorder_encode(self):\n features = np.expand_dims(self.get_features(), axis=0)\n\n features = np.pad(features, (1, 0),\n 'constant', constant_values=(0, 0))\n return features.transpose(1, 0), np.array([[1], [0], [0]])", "def feature_label(features):\n f=[]\n l=[]\n for item in features:\n f.append(item[0])\n l.append(item[1])\n return f,l", "def get_feature_names(self):\n return [self.char]", "def encode(self, pt, en):\n pt = [self.tokenizer_pt.vocab_size] +\\\n self.tokenizer_pt.encode(pt.numpy()) +\\\n [self.tokenizer_pt.vocab_size+1]\n en = [self.tokenizer_en.vocab_size] +\\\n self.tokenizer_en.encode(en.numpy()) +\\\n [self.tokenizer_en.vocab_size+1]\n return (pt, en)", "def output_features(self) -> List[str]:\n return self._pipeline.features", "def getEncoding(self):\n noteSimple = self.simplify()\n return self.octaveIndices.get(noteSimple.getNoteName().lower(), None)", "def _convert_to_features(self, img: np.ndarray) -> np.ndarray:", "def get_feature_names(self):\n\t\treturn np.array(['nouns', 'adjectives', 'verbs', 'adverbs'])", "def get_encoded(self):\n pass", "def get_encoded_faces_wanted():\n encoded = {}\n # c=0\n with open(\"wanted.pkl\",\"rb\") as f:\n encoded=pickle.load(f)\n return encoded", "def encoder_inference(self, features, states):\n with tf.name_scope(f\"{self.name}_encoder\"):\n outputs = tf.expand_dims(features, axis=0)\n outputs, new_states = self.encoder.recognize(outputs, states)\n return tf.squeeze(outputs, axis=0), new_states", "def binaryEncode(peptide):\n\n #do 1 hot encoding\n binaryPeptide=''\n for aa in peptide:\n binaryAmino=''\n if aa =='A':\n binaryAmino='10000000000000000000'\n if aa =='R':\n binaryAmino='01000000000000000000'\n if aa =='N':\n binaryAmino='00100000000000000000'\n if aa =='D':\n binaryAmino='00010000000000000000'\n if aa =='C':\n binaryAmino='00001000000000000000'\n if aa =='Q':\n binaryAmino='00000100000000000000'\n if aa =='E':\n binaryAmino='00000010000000000000'\n if aa =='G':\n binaryAmino='00000001000000000000'\n if aa =='H':\n binaryAmino='00000000100000000000'\n if aa =='I':\n binaryAmino='00000000010000000000'\n if aa =='L':\n binaryAmino='00000000001000000000'\n if aa =='K':\n binaryAmino='00000000000100000000'\n if aa =='M':\n binaryAmino='00000000000010000000'\n if aa =='F':\n binaryAmino='00000000000001000000'\n if aa =='P':\n binaryAmino='00000000000000100000'\n if aa =='S':\n binaryAmino='00000000000000010000'\n if aa =='T':\n binaryAmino='00000000000000001000'\n if aa =='W':\n binaryAmino='00000000000000000100'\n if aa =='Y':\n binaryAmino='00000000000000000010'\n if aa =='V':\n binaryAmino='00000000000000000001'\n binaryPeptide=binaryPeptide +binaryAmino\n if len(binaryPeptide) == 500*20:\n break \n \n while len(binaryPeptide) < 500*20:\n binaryPeptide = binaryPeptide +str(0)\n \n binaryPeptide = np.array(list(binaryPeptide),dtype=float)\n binaryPeptide = np.reshape(binaryPeptide,(binaryPeptide.shape[0],1))\n binaryPeptide = np.transpose(binaryPeptide)\n return binaryPeptide", "def get_labels_comp(F, is_p, is_m):\n labels = [\"C\"+str(idx+1)+\"|P\" if is_p[idx]\n else \"C\"+str(idx+1)+\"|M\" if is_m[idx]\n else \"C\"+str(idx+1) for idx in range(F.shape[0])]\n return labels", "def get_name_to_features(self):\n name_to_features = {\n 'input_ids': tf.io.FixedLenFeature([self.seq_len], tf.int64),\n 'label_ids': tf.io.FixedLenFeature([], tf.int64),\n }\n return name_to_features", "def encode(self, pt, en):\n tok_pt = [self.tokenizer_pt.vocab_size]+self.tokenizer_pt.encode(\n pt.numpy())+[(self.tokenizer_pt.vocab_size) + 1]\n tok_en = [self.tokenizer_en.vocab_size]+self.tokenizer_en.encode(\n en.numpy())+[(self.tokenizer_en.vocab_size) + 1]\n return tok_pt, tok_en", "def fit_transform(self):\n if self.enc_types == \"label\":\n return self._label_encoding()\n elif self.enc_types == \"ohe\":\n return self._one_hot_encoder()\n elif self.enc_types == \"binary\":\n return self._binarization()\n else:\n raise Exception(\"Encoding type not understood\")", "def get_encoded_label_from_example(example):\n return example.features.feature['label/encoded'].bytes_list.value[0]", "def extract_feat(self, img):\r\n _, _, x = self.pre_encoder(img)\r\n x = self.backbone(x)\r\n if self.with_neck:\r\n x = self.neck(x)\r\n return x", "def get_feature_labels(self):\n return self.feature_labels", "def getFeatures(self):\n return \"1:\" + str(self.getEnergy()) + \\\n \" 2:\" + str(self.getCentroid()) + \\\n \" 3:\" + str(self.getZCrossingRate()) + \\\n \" 4:\" + str(self.getBandwidth())", "def getFeatureNames(self):\n return [\"f100\", \"f103\", \"f104\"]", "def hotshot_encoding(\n self,\n peptides,\n peptide_length):\n shape = (len(peptides), peptide_length, 20)\n index_dict = self.index_dict()\n X = np.zeros(shape, dtype=bool)\n for i, peptide in enumerate(peptides):\n for j, amino_acid in enumerate(peptide):\n k = index_dict[amino_acid]\n X[i, j, k] = 1\n return X", "def feature(self):\n return self._feature", "def feature(self):\n return self._feature", "def one_hot_encode(self, meta_field):\n one_hot = pd.get_dummies(self.sample_meta[meta_field]).values\n return one_hot", "def out_featuretxt(self):\n return self.outputfrominput(inputformat='csv', stripextension='.csv', addextension='.features.csv')", "def encode_image(self, image):\n image = self.clip_preprocess(image).unsqueeze(0).to(self.device)\n image_features = self.clip_model.encode_image(image)\n return image_features.cpu().detach().numpy()" ]
[ "0.58020437", "0.56009674", "0.552618", "0.55103755", "0.5440784", "0.5409019", "0.5382088", "0.5371378", "0.5367779", "0.53632474", "0.5362326", "0.5335267", "0.5300006", "0.5298888", "0.5270705", "0.52702075", "0.52668303", "0.52648956", "0.5260709", "0.5254797", "0.52429825", "0.5237811", "0.5230884", "0.5228697", "0.52206933", "0.52184826", "0.52184826", "0.5216734", "0.5210861", "0.52097696" ]
0.65368515
0
Parses external results and returns the result
def parse_external_result(self, file): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_result(self, result, *, verbose=False, **kwargs):\n return get_fermilat_datafile(result)", "def task_parse_results():\n pass", "def parse_result(href):\n url=URLS['result-base']+href\n parse_functions={\n \"lobbyist\":parse_lobbyist\n , \"client\":parse_client\n }\n return parse_functions[pagetype(href)](url)", "def parse_result(href):\n url=URLS['result-base']+href\n parse_functions={\n \"lobbyist\":parse_lobbyist\n , \"client\":parse_client\n }\n return parse_functions[pagetype(href)](url)", "def getResults():", "def _parse_results(self):\n for line in self.file_dic['output'].splitlines():\n if line.startswith(' * GAMESS VERSION = '):\n temp = line.split('=')[1]\n temp = temp.split('*')[0]\n self.version = temp.strip()\n\n if line[1:25] == 'FREE ENERGY OF SOLVATION' and line.find('1 ATM') == -1:\n temp = line.split()\n #Take the next number after =\n #In KCAL/MOL\n self.solvation_energy = float(temp[temp.index(\"=\") + 1])", "def parse_results(stdout):\n for line in stdout.split(b\"\\n\"):\n log.debug(\"processing line %s\", line)\n fields = line.strip().split()\n if len(fields) != 9:\n continue\n metric = fields[1].decode(\"ascii\")\n info = lm_function_map[metric]\n dtype = info['dtype']\n yield {\n \"metric\": metric,\n \"n_compart\": int(fields[3]),\n \"n_exclude\": int(fields[4].strip(b\"()\")),\n \"total\": dtype(fields[2]),\n \"min\": dtype(fields[5]),\n \"avg\": float(fields[6]),\n \"max\": dtype(fields[7]),\n \"units\": info[\"units\"],\n }", "def parse_results(self, result):\n\n interesting = []\n for item in result[\"hits\"][\"hits\"]:\n source = item[\"_source\"]\n meta = source.get(\"meta\")\n\n title = \"No title found\"\n descr = None\n os_path = None\n highlight = None\n\n if meta is not None:\n title = meta.get(\"title\") or \"No title found\"\n if meta.get(\"raw\") is not None:\n descr = meta.get(\"raw\").get(\"description\")\n\n path = source.get(\"path\")\n if path is not None:\n os_path = path.get(\"real\")\n\n highlight = \" \".join(item[\"highlight\"][\"content\"][0].split())\n\n temp = {\n \"id\": item[\"_id\"],\n \"title\": title,\n \"description\": descr,\n \"path\": os_path,\n \"highlight\": highlight,\n }\n interesting.append(temp)\n self.interesting = interesting\n return interesting", "def parse_result_file(self, filepath: str):\n\n raise NotImplementedError", "def parseResults(result):\n # Split the results based on newline characters\n results_cut = result.text.split('\\n')[12:-49]\n # Initialize lists of the values to be parsed from results_cut \n visit_id = []\n name = []\n ra_hour = []\n ra_min = []\n ra_sec = []\n dec_deg = []\n dec_min = []\n dec_sec = []\n v_mag = []\n ra_motion = []\n dec_motion = []\n # Iterate through results_cut and append them to the respective lists\n for line in results_cut:\n visit_id.append(int(line[6:12]))\n name.append(line[12:36])\n ra_hour.append(int(line[38:40]))\n ra_min.append(int(line[41:43]))\n ra_sec.append(float(line[44:48]))\n dec_deg.append(int(line[49:52]))\n dec_min.append(int(line[53:55]))\n dec_sec.append(int(line[56:58]))\n try:\n v_mag.append(float(line[60:64]))\n except ValueError:\n # If there is no reported v_mag for the object, return -99\n v_mag.append(-99.0)\n ra_motion.append('%s%i' % (line[84], int(line[82:84])))\n dec_motion.append('%s%i' % (line[91], int(line[89:91])))\n # Initialize the pandas dataframe to be returned\n results_df = pd.DataFrame(np.array([visit_id, name, ra_hour, ra_min, ra_sec, \n dec_deg, dec_min, dec_sec, v_mag, \n ra_motion, dec_motion]).T, \n columns=['visit_id', 'name', 'ra_hour', 'ra_min', 'ra_sec', \n 'dec_deg', 'dec_min', 'dec_sec', 'v_mag', \n 'ra_motion', 'dec_motion'])\n # Add the lists to the dataframe\n results_df['visit_id'] = pd.to_numeric(results_df['visit_id'])\n results_df['ra_hour'] = pd.to_numeric(results_df['ra_hour'])\n results_df['ra_min'] = pd.to_numeric(results_df['ra_min'])\n results_df['ra_sec'] = pd.to_numeric(results_df['ra_sec'])\n results_df['dec_deg'] = pd.to_numeric(results_df['dec_deg'])\n results_df['dec_min'] = pd.to_numeric(results_df['dec_min'])\n results_df['dec_sec'] = pd.to_numeric(results_df['dec_sec'])\n results_df['v_mag'] = pd.to_numeric(results_df['v_mag'])\n results_df['ra_motion'] = pd.to_numeric(results_df['ra_motion'])\n results_df['dec_motion'] = pd.to_numeric(results_df['dec_motion'])\n \n return results_df", "def parse_search_results(fields, results):\n my_results = []\n for result in results:\n my_results.append(SearchAnimeResult(fields, result))\n return my_results", "def parse_results(results):\n # hold the results in a dictionary\n results_dict = {}\n # loop over each line (result)\n for result in results:\n # split the string based on spaces\n parts = result.split()\n # there should only be a name and an outcome ('w', 'l')\n if len(parts) > 2:\n raise Exception(\"the results file has a bad format\")\n # keep track of the name and the outcome so I don't have to use \n # parts[0] and parts[1]\n name = parts[0]\n outcome = parts[1]\n # add the name to the dictionary if it's not already there\n if name not in results_dict:\n results_dict[name] = (0, 0)\n # modify the results tuple according to whether its a win or loss\n if outcome == 'w':\n results_dict[name] = (results_dict[name][0]+1, results_dict[name][1])\n elif outcome == 'l':\n results_dict[name] = (results_dict[name][0], results_dict[name][1]+1)\n else:\n raise Exception(\"I didn't recognize the outcome\")\n return results_dict", "def parse_query_results(self):\n # TODO: nicely parsed needs defining; may depend on query\n return self.json_result", "def _read_result(self, stdout, stderr):\n parser = FasttreeParser()\n\n try:\n parser.parse(tree=stdout, other=stderr)\n result = parser.to_dict()\n except IOError as ioerr:\n logger.error('Error reading results')\n result = None\n except ParseException as parseerr:\n logger.error('Other parse error', parseerr)\n result = None\n\n return result", "def _parse_results(self, handle):\n result_reader = ResultsReader(handle)\n for result in result_reader:\n\n # Diagnostic messages may be returned in the results\n if isinstance(result, Message):\n logger.debug('[{}] {}'.format(result.type, result.message))\n\n # Normal events are returned as dicts\n elif isinstance(result, dict):\n result = dict(result)\n if '_time' in result:\n result['_time'] = SplunkAbstraction._to_datetime(result['_time'])\n yield {\n 'time': result['_time'] if '_time' in result else '',\n 'metadata': {k: v for k, v in result.items() if k.startswith('_')},\n 'state': {k: v for k, v in result.items() if not k.startswith('_')}\n }\n\n else:\n logger.warning('Unknown result type in _parse_results: {}'.format(result))\n\n assert result_reader.is_preview is False", "def parse_single_result(filePath):\r\n numThreads, queue, affinity = parse_parameters(filePath)\r\n\r\n # parse results\r\n for line in open(filePath):\r\n if \"reported_time\" in line:\r\n s = line.split(\" \")[0]\r\n \r\n bench = s.split(\".\")[3]\r\n runtime = float(get_last_column_number(line))\r\n\r\n model[bench][affinity][numThreads].append(runtime)\r\n \r\n #print(\"threads:\" + str(numThreads) + \" affinity:\" + str(affinity) + \" queue:\" + str(queue))\r", "def postparse(self, parse_result):\n return parse_result", "def parseResult(self):\n\n # parse all WHYPO tags\n result = []\n for msg in [m for m in self.msg if \"WHYPO\" in m]:\n result.append({})\n\n for prop in self.pattern.findall(msg):\n key = prop.split(\"=\")[0]\n value = prop.split('\"')[1]\n\n if key == \"CM\":\n try:\n value = float(value)\n except:\n pass\n if key == \"CLASSID\":\n try:\n value = int(value)\n except:\n pass\n result[-1][key] = value\n\n return result", "def __parse_document(self, results):\n fullname = self.__extract_fullname(results[0])\n if not results[1].startswith(\"-\"):\n raise ValueError(\"Invalid second line of output: '%s'. \"\\\n \"Expected a title underline.\"\n % text[1])\n results = results[2:] # trim off top two lines of header information\n maintests, cleanup = self.__split_on_cleanup(results)\n overall_success = not (maintests[0] == FAILURE_MARKER)\n\n if overall_success:\n testcases = self.__parse_success(fullname, maintests)\n else:\n testcases = self.__parse_failures(fullname, maintests)\n\n return testcases", "def get_data( filepath_query, filepath_results ):\n with open( filepath_query, 'r' ) as query_file:\n query = json.load( query_file )\n \n query_text = query['query']['multi_match']['query']\n query_scores = query['nlp_scores']\n query_data = {\n 'query_text' : query_text,\n 'bias_score' : query_scores['bias_score'],\n 'vocab_richness' : query_scores['stylo_scores']['vocab_richness'],\n 'hapax_legomena' : query_scores['stylo_scores']['hepax_legomena'],\n 'wordlength' : query_scores['stylo_scores']['readability_measures']['average_wordlength'],\n 'sentlength' : query_scores['stylo_scores']['readability_measures']['average_sentlength'],\n 'spelling_errors' : query_scores['stylo_scores']['spelling_errors'],\n 'topics' : query_scores['topics']\n }\n\n with open( filepath_results ) as results_file:\n results = json.load( results_file )\n \n results_data = []\n for doc in results:\n argID = doc['_source']['argsMeID']\n premise = doc['_source']['premise']\n average_wordlength = doc['nlp_scores']['stylo_scores']['readability_measures']['average_wordlength']\n average_sentlength = doc['nlp_scores']['stylo_scores']['readability_measures']['average_sentlength']\n bias_score = doc['nlp_scores']['bias_score']\n bias_distance = doc['bias_distance']\n stylo_distance = doc['stylo_distance']\n topic_match_count = doc['topic_match_count']\n old_score = doc['old_score']\n new_score = doc['new_score']\n scoring_distance = doc['scoring_distance']\n old_rank = doc['old_rank']\n new_rank = doc['new_rank']\n \n doc_data = {\n 'argID' : argID,\n 'premise' : premise,\n 'wordlength' : average_wordlength,\n 'sentlength' : average_sentlength,\n 'bias_score' : bias_score,\n 'bias_distance' : bias_distance,\n 'stylo_distance' : stylo_distance,\n 'topic_match_count' : topic_match_count,\n 'old_score' : old_score,\n 'new_score' : new_score,\n 'scoring_distance' : scoring_distance,\n 'old_rank' : old_rank,\n 'new_rank' : new_rank\n }\n results_data.append( doc_data )\n\n data_tuple = ( query_data, results_data )\n return data_tuple", "def parse_single_result(self, single_result, return_type=ReturnType.FULL, **kwargs):\n rdict = SearchItem()\n\n if return_type in (ReturnType.FULL, return_type.LINK):\n link = single_result.find('a', class_='rc-DesktopSearchCard anchor-wrapper').get('href')\n\n rdict[\"links\"] = urljoin('https://www.coursera.org', link)\n\n if return_type in (ReturnType.FULL, return_type.TITLE):\n title = single_result.find('h2', class_=\"card-title\").get_text()\n rdict[\"titles\"] = title\n\n if return_type in (ReturnType.FULL,):\n partner_elem = single_result.find('span', class_='partner-name')\n partner = ''\n if partner_elem:\n partner = partner_elem.get_text()\n\n rating_avg_elem = single_result.find('span', class_='ratings-text')\n rating_avg = None\n if rating_avg_elem:\n rating_avg = float(rating_avg_elem.get_text())\n\n enrollment_elem = single_result.find('span', class_='enrollment-number')\n enrolment_number = None\n\n if enrollment_elem:\n enr_cl_txt = enrollment_elem.get_text().lower().replace(',', '').replace('.', '')\\\n .replace('m', '0' * 6).replace('k', '0' * 3)\n if enr_cl_txt.isdigit():\n enrolment_number = int(enr_cl_txt)\n\n difficulty_elem = single_result.find('span', class_='difficulty')\n difficulty = ''\n if difficulty_elem:\n difficulty = difficulty_elem.get_text()\n\n rating_count_elem = single_result.find('span', class_='ratings-count')\n rating_count = None\n if rating_count_elem:\n rating_count_elem = rating_count_elem.find('span')\n rating_count_cl = rating_count_elem.get_text().replace(',', '')\n if rating_count_cl.isdigit():\n rating_count = int(rating_count_cl)\n\n rdict.update({\n \"partners\": partner,\n \"ratings_avg\": rating_avg,\n \"ratings_count\": rating_count,\n \"enrolments_numbers\": enrolment_number,\n \"difficulties\": difficulty,\n })\n return rdict", "def __extract_info(self) -> Results:\n results: Results = []\n\n response = request(self.home_url)\n\n html = bs(response, \"lxml\")\n table = html.find(\"table\")\n for row in table.find_all(\"tr\")[1:]:\n col1, col2, col3 = row.find_all(\"td\")\n filename1, perc1 = col1.text.strip().split()\n filename2, perc2 = col2.text.strip().split()\n\n with ThreadPoolExecutor() as executor:\n future = executor.submit(self.__get_line_numbers, col1.a.get(\"href\"))\n lines = future.result()\n\n result_dict = Result(\n file1=filename1,\n file2=filename2,\n percentage_file1=perc_str_to_int(perc1),\n percentage_file2=perc_str_to_int(perc2),\n no_of_lines_matched=int(col3.text.strip()),\n lines_matched=lines,\n )\n results.append(result_dict)\n return results", "def parse(self):", "def get_results_from_script(self, script):\n raise NotImplementedError()", "async def get_result_data(\n work_dir,\n extra_adjust_parameters,\n results_file=None,\n group_id=None,\n artifact_id=None,\n ):\n manipulation_file_path = os.path.join(work_dir, SMEG_MANIPULATION_FILE_NAME)\n\n if os.path.isfile(manipulation_file_path):\n file_path = manipulation_file_path\n logger.info(\"Reading '{}' file with alignment result\".format(file_path))\n\n with open(file_path, \"r\") as f:\n # SMEG returns manipulations.json file already in correct format\n result = json.load(f)\n if group_id is not None and artifact_id is not None:\n logger.warning(\n \"Overriding the groupId of the result to: \" + group_id\n )\n result[\"VersioningState\"][\"executionRootModified\"][\n \"groupId\"\n ] = group_id\n\n logger.warning(\n \"Overriding the artifactId of the result to: \" + artifact_id\n )\n result[\"VersioningState\"][\"executionRootModified\"][\n \"artifactId\"\n ] = artifact_id\n return result\n else:\n return {\n \"VersioningState\": {\n \"executionRootModified\": {\n \"groupId\": group_id,\n \"artifactId\": artifact_id,\n \"version\": None,\n }\n },\n \"RemovedRepositories\": [],\n }", "def _get_result(url, etag=None, last_modified=None, use_discovery=False):\n _validate_url(url)\n\n result = feedparser.parse(url, etag=etag, modified=last_modified)\n # update URL for any redirects that feedparser followed\n url = result.get('href', url)\n\n if _is_not_modified_result(result):\n raise FeedNotModifiedError\n elif not _is_valid_result(result):\n if use_discovery:\n url = _discover_url(result)\n return _get_result(url)\n else:\n _fail(url, \"Failed to download or parse feed\")\n else:\n return url, result", "def parseResult(self):\n\n # parse all WHYPO tags\n result = []\n for msg in [m for m in self.msg if \"WHYPO\" in m]:\n\n list = self.pattern.findall(msg)\n for prop in list:\n if \"WORD\" in prop:\n value = prop.split('\"')[1]\n result.append(value)\n return result", "def _transform_results(self) -> List[BenchmarkResult]:\n with open(self.result_file, \"r\") as f:\n raw_results = json.load(f)\n\n parsed_results = []\n for suite in raw_results[\"suites\"]:\n parsed_results += self._parse_suite(\n results=suite,\n extra_tags={\"suite\": suite[\"name\"], \"source\": \"cpp-micro\"},\n )\n\n return parsed_results", "def decode_result(found):\n ...", "def read_fatlasa_results(filename):\n\n pass" ]
[ "0.712207", "0.70239043", "0.69925654", "0.69925654", "0.6953482", "0.6418458", "0.6294692", "0.6246567", "0.6133936", "0.6113321", "0.6071524", "0.60253865", "0.6020445", "0.5989748", "0.5982246", "0.59464705", "0.5942175", "0.5941691", "0.59284854", "0.5917822", "0.5897199", "0.58880794", "0.58804435", "0.58613646", "0.5858304", "0.5847517", "0.5843716", "0.5837484", "0.5834381", "0.5821012" ]
0.75591046
0
Checks whether the specified execution command can be found in PATH
def is_in_path(self): exe = self.command.split()[0] for try_path in os.environ["PATH"].split(os.pathsep): try_path = try_path.strip('"') exe_try = os.path.join(try_path, exe).strip() if os.path.isfile(exe_try) and os.access(exe_try, os.X_OK): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_for(command):\n if shutil.which(command) is None:\n print(colored(\"{} not available on system\".format(command),\"red\"))\n sys.exit(1)", "def find_on_path(command):\n\n if 'PATH' not in os.environ:\n return False\n\n path = os.environ['PATH']\n for element in path.split(os.pathsep):\n if not element:\n continue\n filename = os.path.join(element, command)\n if os.path.isfile(filename) and os.access(filename, os.X_OK):\n return True\n\n return False", "def command_exists(name, path=None):\n if path is None:\n path = sys.path\n\n for prefix in path:\n filename = os.path.join(prefix, name)\n is_executable = os.access(filename, os.X_OK)\n is_file = os.path.isfile(filename)\n if is_executable and is_file:\n return True\n\n return False", "def check_command(self, cmd):\n which = \"which \" + cmd + self.echo_cmd_result\n self.connector.sendline(which)\n i = self.connector.expect(['\\r\\n0\\r\\n', '\\r\\n1\\r\\n', '\\r\\n2\\r\\n'])\n if i == 0:\n debug_log(\"command[%s] found!\", cmd)\n return True\n else:\n warn_log(\"command[%s] not found!\", cmd)\n return False", "def _check_for_cmd(command):\n slab_logger.log(15, 'Checking if %s is installed' % command)\n # Note: Using type git here to establish if posix system has a binary\n # called git instead of which git b/c which often doesn't return\n # proper 0 or 1 exit status' and type does. Which blah on many\n # systems returns 0, which is bad.\n if os.name == \"posix\":\n returncode, myinfo = run_this('type %s' % command)\n return(returncode, myinfo)\n elif os.name == \"nt\":\n # test windows for git\n pass", "def is_installed(cmd):\n rc, _, _ = execute(\"which %s\" % cmd, die=False)\n if rc:\n return False\n else:\n return True", "def check_path():\n print('[GenHub] Checking PATH for executables and scripts.')\n\n execs = ['gt', 'cd-hit', 'tidygff3', 'locuspocus', 'xtractore',\n 'canon-gff3', 'pmrna', 'lpdriver.py', 'uloci.py', 'seq-reg.py']\n paths = list()\n for exe in execs:\n try:\n proc = subprocess.Popen(['which', exe], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)\n resultcode = proc.wait()\n if resultcode == 0:\n procpath = next(proc.stdout)\n procpath = str(procpath).rstrip()\n paths.append((exe, procpath))\n else:\n paths.append((exe, None))\n except subprocess.CalledProcessError:\n paths.append((exe, None))\n\n missing = False\n for exe, path in paths:\n char = '+'\n if path is None:\n char = '-'\n path = '???'\n missing = True\n print('%s %-20s: %s' % (char, exe, path))\n if missing:\n print('Executables / scripts cannot be found in your PATH.', end='')\n print(' Certain build commands will not work.')", "def test_a_which_command(self):\n\t\ttheResult = False\n\t\ttry:\n\t\t\timport subprocess\n\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"which\"])\n\t\t\ttry:\n\t\t\t\tif (str(\"/which\") in str(theOutputtext)):\n\t\t\t\t\ttheResult = True\n\t\t\texcept Exception as err:\n\t\t\t\tprint(err.msg)\n\t\texcept Exception:\n\t\t\ttheResult = False\n\t\tassert theResult", "def test_process_path(path):\n try:\n subprocess.call([path, \"--version\"])\n return True\n except:\n print(\"Cannot find executable on {}\".format(path))\n return False", "def check_PATH_for_program(f):\n\n path = os.environ[\"PATH\"].split(\":\")\n\n for p in path:\n\n if os.path.isfile(os.path.join(p,f)):\n return True\n\n return False", "def _has_prog(prog):\n try:\n subprocess.check_output(f\"which {prog}\", shell=True)\n return True\n except subprocess.CalledProcessError:\n return False", "def which(cmd, path=None):\n if path is None:\n path = os.environ[\"PATH\"].split(os.pathsep)\n\n for prefix in path:\n filename = os.path.join(prefix, cmd)\n executable = os.access(filename, os.X_OK)\n is_not_directory = os.path.isfile(filename)\n if executable and is_not_directory:\n return True\n\n return False", "def checkForCommand(quickLogger, commandList):\n\n for command in commandList:\n\n cmd = \"which -s \" + command + \" > \" + os.devnull + \" 2>&1\"\n retcode = os.system(cmd)\n \n if(retcode):\n quickLogger.critical(\"unix command \"+command+\" not found.\")\n raise CommandNotFound", "def executable(command):\n\n\t\tif os.path.isabs(command):\n\t\t\tif os.access(command, os.X_OK):\n\t\t\t\treturn command\n\t\tfor path in os.environ.get(\"PATH\", []).split(os.pathsep):\n\t\t\tif os.access(os.path.join(path, command), os.X_OK):\n\t\t\t\treturn os.path.join(path, command)\n\t\treturn False", "def checkForBinary(binary):\n try:\n fullPath = subprocess.check_output(['which',binary])\n return True\n except subprocess.CalledProcessError as e:\n return False", "def verifyInstalled(cmd):\n\tprint \"Verifying %s works...\" % cmd\n\tif (sys.platform == 'win32'):\n\t\ttry:\n\t\t\tstatus = subprocess.call(shlex.split(cmd))\n\t\t\tprint \"Installation was successful.\"\n\t\t\treturn True\n\t\texcept OSError as e:\n\t\t\tprint >>sys.stderr, \"Execution failed with verification: \",e\n\t\t\tprint cmd + \" was not installed correctly.\"\n\t\t\treturn False\n\telse:\n\t\tstatus = os.system(cmd)\n\t\tif (status == NOT_INSTALLED):\n\t\t\tprint status\n\t\t\tprint \"An error occured with installation/environment variables. %s is still not installed.\" % cmd\n\t\t\treturn False\n\t\telse:\n\t\t\tprint \"Installation was successful.\"\n\t\t\treturn True", "def check_executable(op):\n try:\n proc = subprocess.Popen([op], stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n except OSError:\n return False\n try:\n if proc.poll():\n proc.kill()\n except OSError:\n return True\n return True", "def get_command_path(command):\n def excutable(command_path):\n return os.path.isfile(command_path) and os.access(command_path, os.X_OK)\n\n for path in os.environ[\"PATH\"].split(os.pathsep):\n command_path = os.path.join(path, command)\n if excutable(command_path):\n return command_path\n\n return None", "def _check_for_commands(keep_path):\n if not os.path.exists(keep_path):\n click.echo(\"You have not registered any command yet.\")\n quit()", "def IsInstalled(location=None):\n return not not base.Tool._GetExecutable(COMMAND, location)", "def _whicha(cmd, paths=None):\n import os\n if paths is None:\n paths = os.environ['PATH'].split(':')\n possibilities = [os.path.expanduser(os.path.join(p, cmd)) for p in paths]\n return filter(lambda bin: os.path.exists(bin), possibilities)", "def find_executable(cls, name, cmd, dry_run=False):\n if cls.PATH is None:\n cls.PATH = os.environ[\"PATH\"].split(\":\")\n for pdir in cls.PATH:\n pcmd = os.path.join(pdir, cmd)\n if os.path.exists(pcmd):\n return pcmd\n if dry_run:\n return cmd\n raise SystemExit(\"%s '%s' does not exist\" % (name, cmd))", "def which(cmd):\n for path in os.environ['PATH'].split(os.pathsep):\n path = path.strip('\"')\n cmd_path = os.path.join(path, cmd)\n if os.path.isfile(cmd_path) and os.access(cmd_path, os.X_OK):\n return cmd_path\n\n return None", "def check(*cmd):\n print >>sys.stderr, 'Run:', cmd\n subprocess.check_call(cmd)", "def is_valid_command(args):\n if args.command is not None:\n return True\n return False", "def is_command(line: str) -> bool:\n if line[0] == \"$\":\n return True\n return False", "def check_call(cmd):\r\n return subprocess.check_call(cmd)", "def which(exe):\n\n def wrapper(function):\n @functools.wraps(function)\n def wrapped(*args, **kwargs):\n if salt.utils.path.which(exe) is None:\n raise CommandNotFoundError(\n \"The '{}' binary was not found in $PATH.\".format(exe)\n )\n return function(*args, **kwargs)\n\n return wrapped\n\n return wrapper", "def program_exists(name):\n for path in os.environ['PATH'].split(os.path.pathsep):\n if path and os.path.exists(os.path.join(path, name)):\n return True\n return False", "def check_exec(p):\n if p.startswith('~'):\n raise OSError(p + ' executable has \"~\" in path.')\n try:\n fnull = open(os.devnull, 'w')\n subprocess.call(p, stdout=fnull, stderr=fnull)\n except OSError as e:\n raise OSError(p + ' executable not found.')" ]
[ "0.7577057", "0.75585085", "0.74920905", "0.74783725", "0.741286", "0.7362685", "0.73438174", "0.726088", "0.7077697", "0.70723623", "0.69637066", "0.6936767", "0.69339275", "0.68051726", "0.6798096", "0.6763038", "0.66706866", "0.66448194", "0.6632893", "0.6629532", "0.6569732", "0.65494955", "0.65334284", "0.65296113", "0.65109646", "0.6473664", "0.6471435", "0.6457848", "0.6449645", "0.643154" ]
0.7675532
0
Benchmark the compute_iterative_fibonacci function.
def test_iterative_fibonacci_benchmark(benchmark): computed_iterative_value = benchmark( fibonacci.compute_iterative_fibonacci, value=19 ) assert computed_iterative_value == 4181
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_recursive_fibonacci_benchmark(benchmark):\n computed_recursive_value = benchmark(\n fibonacci.compute_recursive_fibonacci, value=19\n )\n assert computed_recursive_value == 4181", "def fib_cached(i):\n if i < 2: return 1\n return fib_cached(i-1) + fib_cached(i-2)", "def test_quicker_than_recursive(self):\n mult = 250\n reps = 1000\n res_fib = timeit.timeit(\"fib_rec(20)\",\n setup=\"from ex1 import fib_rec\",\n number=reps//mult)\n res = timeit.timeit(\"fib(20)\",\n setup=\"from ex1 import fib\",\n number=reps)\n self.assertGreater(res_fib, res)", "def fibonacci(n):", "def optimized_fibonacci(f):\n a = 0\n b = 1\n if f < 2:\n return f\n else:\n for i in range(1, f):\n c = a + b\n a = b\n b = c\n return b", "def fast_fibonacci(n):\n return _fast_fibonacci(n)[0]", "def fib_iterative(n: int) -> int:\n print(n)\n return 0", "def fibonacci():\n return sum_series(a=0, b=1)", "def test_fibonacci_single():\n computed_iterative_value = fibonacci.compute_iterative_fibonacci(18)\n computed_recursive_value = fibonacci.compute_recursive_fibonacci(18)\n assert computed_iterative_value == 2584\n assert computed_recursive_value == 2584", "def fibonacci_iterative_memo(n, fib_cache={0: 0, 1: 1}):\n\n for i in range(2, n+1):\n fib_cache[i] = fib_cache[i-1] + fib_cache[i-2]\n\n return fib_cache[n]", "def test_fibonacci():\n\n def isPerfectSquare(x):\n s = int(math.sqrt(x))\n return s * s == x\n\n def isFibonacci(n):\n # n is Fibonacci if one of 5*n*n + 4 or 5*n*n - 4 or both is a perfect square\n return isPerfectSquare(5 * n * n + 4) or isPerfectSquare(5 * n * n - 4)\n\n for _ in range(10000):\n num = random.randint(0, 1000)\n f = s7.fibonacci()\n for i in range(0, (num // 2) + 1):\n if num in s7.cache.values():\n break\n f()\n assert isFibonacci(num) is (num in s7.cache.values()), \"Check your Fibonacci implementation\"", "def fib_iterative(n: int) -> int:\n if n < 0:\n raise ValueError\n number1 = 0\n number2 = 1\n counter = 1\n while counter < n:\n counter += 1\n number1, number2 = number2, number1 + number2\n return number2", "def fibonacci():\n yield 0\n element = yield 1\n previous = element\n while element < 1e100:\n current = yield element\n element = previous + current\n if current > 1:\n previous = current\n\n return element", "def fibonacci_iterative(nth_nmb: int) -> int:\n old, new = 0, 1\n if nth_nmb in (0, 1):\n return nth_nmb\n for __ in range(nth_nmb - 1):\n old, new = new, old + new\n return new", "def _fast_fibonacci(n):\n if n == 0:\n return (0, 1)\n else:\n a, b = _fast_fibonacci(n // 2)\n c = a * (b * 2 - a)\n d = a**2 + b**2\n if n % 2 == 1:\n return (d, c + d)\n else:\n return (c, d)", "def fib_efficient(number: int) -> int:\n if number >= 0:\n return calculate(number)[0]\n return -calculate(-number)[0] if not number % 2 else calculate(-number)[0]", "def fib(n):\n a, b = 1, 1\n while n:\n a, b = b, a + b\n n -= 1\n return a", "def fib(i):\n if i < 2: return 1\n return fib(i-1) + fib(i-2)", "def fib(n):\n i = 0\n j = 1\n n = n - 1\n\n while n >= 0:\n i, j = j, i + j\n n = n - 1\n \n return i", "def fib(n):\n i = 0\n j = 1\n n = n - 1\n\n while n >= 0:\n i, j = j, i + j\n n = n - 1\n \n return i", "def fib(n):\n i = 0\n j = 1\n n = n - 1\n\n while n >= 0:\n i, j = j, i + j\n n = n - 1\n \n return i", "def fib(f1, index):\n if index == 0:\n return 0\n if index == 1:\n return f1\n if (f1, index) not in FIBS_CACHE:\n FIBS_CACHE[f1, index] = fib(f1, index-1) + fib(f1, index-2)\n return FIBS_CACHE[f1, index]", "def fib(n):\n i = 0\n j = 1\n n = n - 1\n\n while n >= 0:\n i, j = j, i + j\n n = n - 1\n return i", "def t_fibonnaci():\n a = 1\n b = 1\n c = a + b\n while True:\n yield c\n a = b + c\n b = c + a \n c = a + b", "def _fib_iter(n=4000000):\n fib1 = 1\n fib2 = 2\n # Yield the first two fibonacci numbers\n yield fib1\n yield fib2\n fib_next = fib1 + fib2\n while fib_next < n:\n # iteratively gen\n yield fib_next\n fib1 = fib2\n fib2 = fib_next\n fib_next = fib1 + fib2", "def problem2():\n\n def _fib_iter(n=4000000):\n \"\"\" Generator for fibonacci numbers less than n \"\"\"\n fib1 = 1\n fib2 = 2\n # Yield the first two fibonacci numbers\n yield fib1\n yield fib2\n fib_next = fib1 + fib2\n while fib_next < n:\n # iteratively gen\n yield fib_next\n fib1 = fib2\n fib2 = fib_next\n fib_next = fib1 + fib2\n\n return sum(i for i in _fib_iter() if i % 2 == 0)", "def fib(index):\n return round((GR**index)/R5)", "def fib(n): # this line defines the function 'fib' where n is the input value\n i = 0\n j = 1\n n = n - 1\n\n while n >= 0:\n i, j = j, i + j\n n = n - 1\n \n return i", "def fibi(n):\n if n == 0: return 0\n if n == 1: return 1\n f_n2, f_n1 = 1, 1\n for i in range(3, n+1):\n f_n2, f_n1 = f_n1, f_n2+f_n1\n return f_n1", "def fibonacci():\n\ta, b = 0, 1\n\tyield 0\n\twhile True:\n\t\ta, b = b, a + b\n\t\tyield a" ]
[ "0.80183786", "0.74013287", "0.7330856", "0.7119381", "0.70927745", "0.70785844", "0.7045561", "0.6983172", "0.6976063", "0.6937987", "0.68992245", "0.6839805", "0.6728518", "0.6724451", "0.66643965", "0.66435885", "0.6634579", "0.66091865", "0.65841097", "0.65841097", "0.65841097", "0.65816075", "0.65573466", "0.6537245", "0.6527036", "0.65202534", "0.6512493", "0.6503231", "0.64616597", "0.646013" ]
0.89311844
0
Benchmark the compute_recusrive_fibonacci function.
def test_recursive_fibonacci_benchmark(benchmark): computed_recursive_value = benchmark( fibonacci.compute_recursive_fibonacci, value=19 ) assert computed_recursive_value == 4181
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_iterative_fibonacci_benchmark(benchmark):\n computed_iterative_value = benchmark(\n fibonacci.compute_iterative_fibonacci, value=19\n )\n assert computed_iterative_value == 4181", "def test_quicker_than_recursive(self):\n mult = 250\n reps = 1000\n res_fib = timeit.timeit(\"fib_rec(20)\",\n setup=\"from ex1 import fib_rec\",\n number=reps//mult)\n res = timeit.timeit(\"fib(20)\",\n setup=\"from ex1 import fib\",\n number=reps)\n self.assertGreater(res_fib, res)", "def fib_cached(i):\n if i < 2: return 1\n return fib_cached(i-1) + fib_cached(i-2)", "def fast_fibonacci(n):\n return _fast_fibonacci(n)[0]", "def fibonacci(n):", "def optimized_fibonacci(f):\n a = 0\n b = 1\n if f < 2:\n return f\n else:\n for i in range(1, f):\n c = a + b\n a = b\n b = c\n return b", "def test_fibonacci_single():\n computed_iterative_value = fibonacci.compute_iterative_fibonacci(18)\n computed_recursive_value = fibonacci.compute_recursive_fibonacci(18)\n assert computed_iterative_value == 2584\n assert computed_recursive_value == 2584", "def test_fibonacci():\n\n def isPerfectSquare(x):\n s = int(math.sqrt(x))\n return s * s == x\n\n def isFibonacci(n):\n # n is Fibonacci if one of 5*n*n + 4 or 5*n*n - 4 or both is a perfect square\n return isPerfectSquare(5 * n * n + 4) or isPerfectSquare(5 * n * n - 4)\n\n for _ in range(10000):\n num = random.randint(0, 1000)\n f = s7.fibonacci()\n for i in range(0, (num // 2) + 1):\n if num in s7.cache.values():\n break\n f()\n assert isFibonacci(num) is (num in s7.cache.values()), \"Check your Fibonacci implementation\"", "def fib(index):\n return round((GR**index)/R5)", "def _fast_fibonacci(n):\n if n == 0:\n return (0, 1)\n else:\n a, b = _fast_fibonacci(n // 2)\n c = a * (b * 2 - a)\n d = a**2 + b**2\n if n % 2 == 1:\n return (d, c + d)\n else:\n return (c, d)", "def fibonacci():\n return sum_series(a=0, b=1)", "def fibonacci_recursive_memo(n, fib_cache={0: 0, 1: 1}):\n\n if n < 0:\n return -1\n\n if n not in fib_cache:\n fib_cache[n] = fibonacci_recursive_memo(n-1) + fibonacci_recursive_memo(n-2)\n return fib_cache[n]", "def fibonacci_iterative_memo(n, fib_cache={0: 0, 1: 1}):\n\n for i in range(2, n+1):\n fib_cache[i] = fib_cache[i-1] + fib_cache[i-2]\n\n return fib_cache[n]", "def fib(n):\n global counter\n if n==0 or n==1:\n return 1\n else:\n if n-1==2 or n-2==2:\n counter+=1\n return fib(n-1) + fib(n-2)", "def benchmark(trials:int):\n def benchmark_method(function:Callable[[int],int]) -> Callable[[int],Tuple[float,str]]:\n def time_wrapper(*args) -> Tuple[float,str]:\n \"\"\" Return the time taken to run a fibonacci method in microseconds \"\"\"\n t1 = time.time()\n for _ in range(trials):\n function(*args)\n return ((time.time()-t1)/trials) * 1e6, function.__name__\n return time_wrapper\n return benchmark_method", "def fib(f1, index):\n if index == 0:\n return 0\n if index == 1:\n return f1\n if (f1, index) not in FIBS_CACHE:\n FIBS_CACHE[f1, index] = fib(f1, index-1) + fib(f1, index-2)\n return FIBS_CACHE[f1, index]", "def fibonacci_iterative(nth_nmb: int) -> int:\n old, new = 0, 1\n if nth_nmb in (0, 1):\n return nth_nmb\n for __ in range(nth_nmb - 1):\n old, new = new, old + new\n return new", "def test_fib(n, result):\n from fib_reloaded import fib\n assert fib(n) == result", "def fib_rec(n):\n if n == 1:\n return 0\n elif n == 2:\n return 1\n\n return fib_rec(n - 1) + fib_rec(n - 2)", "def fib_iterative(n: int) -> int:\n print(n)\n return 0", "def fibonacci_memory(nth_nmb: int) -> int:\n pass # TODO: Replace with implementation!", "def fibonacci(n):\n\n ## Auxiliary functions for working in our polynomial ring.\n def poly_sqr((a, b)):\n a2 = a*a\n return 2*a*b + a2, a2 + b*b\n def poly_mul((a, b), (c, d)):\n ac = a*c\n return a*d + b*c + ac, ac + b*d\n\n ## Do the job. For negative indices, we take powers of t^{-1}.\n if n < 0: return power((1, -1), -n, (0, 1), poly_sqr, poly_mul)\n else: return power((1, 0), n, (0, 1), poly_sqr, poly_mul)", "def fib(n):\n if n == 0: return 0\n if n == 1: return 1\n return fib(n-1) + fib(n-2)", "def get_fibonacci_last_digit_fast(n):\n fibonacci = [0 for i in range(n + 1)]\n fibonacci[1] = 1\n\n for i in range(2, n + 1):\n fibonacci[i] = (fibonacci[i - 1] + fibonacci[i - 2]) % 10\n\n return fibonacci[n]", "def test_fib_functools(n, result):\n from fib_reloaded import fib_functools\n assert fib_functools(n) == result", "def fib(n: int) -> int:\n if n == 0: return 0\n if n == 1: return 1\n return fib(n-1) + fib(n-2)", "def test_fibonacci_recursive(self):\r\n result = fib.fibonacci_recursive(5)\r\n self.assertEqual(result, 8)", "def fibonacci():\n yield 0\n element = yield 1\n previous = element\n while element < 1e100:\n current = yield element\n element = previous + current\n if current > 1:\n previous = current\n\n return element", "def fastrecursivefibonacci(n):\n # Needs a termination point, this is done @ n = 0\n if n == 0:\n return 0,1\n else: # now we can recursively solve this\n if n % 2 == 0: # even case\n [Fn, Fnm] = fastrecursivefibonacci(n/2)\n else: \n [Fn, Fnm] = fastrecursivefibonacci((n-1)/2)\n \n # Solve the fast matrix form\n Fn2 = ( 2 * Fnm - Fn ) * Fn \n Fn2m = pow(Fn,2) + pow(Fnm,2)\n\n # This must be returned in the form n, n+1\n if n % 2 == 0:\n return Fn2, Fn2m\n else:\n return Fn2m, Fn2m+Fn2", "def fib(n: int) -> int:\n if n == 0:\n return 0\n if n == 1:\n return 1\n return fib(n-1) + fib(n-2)" ]
[ "0.81779134", "0.74096096", "0.6899512", "0.6888527", "0.6815627", "0.66488624", "0.6562564", "0.6527517", "0.64697", "0.63618094", "0.6347348", "0.6316405", "0.6313529", "0.6299702", "0.62782294", "0.6275454", "0.6226415", "0.621229", "0.61825955", "0.61655694", "0.6117408", "0.6112175", "0.60935855", "0.6087443", "0.6079735", "0.6075222", "0.60684645", "0.6060688", "0.60542285", "0.60469085" ]
0.83512175
0
Checks the iterative and recursive fibonacci functions with multiple inputs.
def test_fibonacci_multiple(fibonacci_input, expected_answer): computed_iterative_value = fibonacci.compute_iterative_fibonacci(fibonacci_input) computed_recursive_value = fibonacci.compute_recursive_fibonacci(fibonacci_input) assert computed_iterative_value == expected_answer assert computed_recursive_value == expected_answer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_fibonacci(data: Sequence[int]) -> bool:\n if len(data) < 3:\n return False\n if data[0] != 0 or data[1] != 1:\n return False\n for n in range(2, len(data)):\n if data[n] != data[n - 1] + data[n - 2]:\n return False\n return True", "def fibonacciSeries(userinput):\n try:\n isinstance(int(userinput), int)\n userinput = int(userinput)\n except ValueError as e:\n print(e)\n else:\n if isPerfectSquare(\n (5 *\n userinput *\n userinput) -\n 4)or isPerfectSquare(\n (5 *\n userinput *\n userinput) +\n 4):\n return True\n else:\n return False", "def check_fibonacci(data: Sequence[int]) -> bool:\n if len(data) < 3:\n raise ValueError(\"Not enough data entered\")\n while len(data) >= 3:\n a, b, c = data[0], data[1], data[2]\n if not _check_window(a, b, c):\n return False\n del data[0]\n return True", "def test_fibonacci_single():\n computed_iterative_value = fibonacci.compute_iterative_fibonacci(18)\n computed_recursive_value = fibonacci.compute_recursive_fibonacci(18)\n assert computed_iterative_value == 2584\n assert computed_recursive_value == 2584", "def fibonacci(n):\n if n == 0:\n return 0\n elif n == 1 or n == 2:\n return 1\n elif n > 2:\n return fibonacci(n - 1) + fibonacci(n - 2)\n else:\n return False", "def fibonacci(input) :\n\n\n\n\n\n# if input == 0:\n\n# return 0\n\n\n\n# elif input == 1:\n\n# return 1\n\n# else :\n\n# return fibonacci(input-1) + fibonacci(input-2)\n\n\n\n\n return sum_series(input)", "def fibonacci_numbers():\n print(\"Problem: Fibonacci numbers\")\n\n n = int(input())\n\n result = fib(n)\n print(result)", "def check_fibonacci(data: Sequence[int]) -> bool:\n def fib_gen(n: int) -> Iterable:\n a, b = 0, 1\n while a <= n:\n yield a\n a, b = b, a + b\n\n last_el = data[-1]\n perfect_fib = list(el for el in fib_gen(last_el))\n\n if len(data) < 3:\n return False\n if data[::-1] != perfect_fib[:-len(data) - 1:-1]:\n return False\n return True", "def find_fibonacci(x: int) -> bool:\r\n # write your code here\r\n a = 1\r\n b = 1\r\n while True: #Looping sampai ketemu return\r\n if x == 0:\r\n return True\r\n elif b <= x:\r\n if b == x:\r\n return True\r\n else:\r\n temp = b\r\n b = b + a\r\n a = temp\r\n else:\r\n return False", "def test_fibonacci():\n\n def isPerfectSquare(x):\n s = int(math.sqrt(x))\n return s * s == x\n\n def isFibonacci(n):\n # n is Fibonacci if one of 5*n*n + 4 or 5*n*n - 4 or both is a perfect square\n return isPerfectSquare(5 * n * n + 4) or isPerfectSquare(5 * n * n - 4)\n\n for _ in range(10000):\n num = random.randint(0, 1000)\n f = s7.fibonacci()\n for i in range(0, (num // 2) + 1):\n if num in s7.cache.values():\n break\n f()\n assert isFibonacci(num) is (num in s7.cache.values()), \"Check your Fibonacci implementation\"", "def test_fibonacci_recursive(self):\r\n result = fib.fibonacci_recursive(5)\r\n self.assertEqual(result, 8)", "def fib(x):\r\n if x == 0 or x == 1: #two base cases \r\n return 1\r\n else:\r\n return fib(x-1) + fib(x-2) #two recursive cases\r\n #so basically if there are two base cases then I need two recursive cases\r", "def is_fibonacci_number(x):\n a = math.sqrt(5 * x ** 2 + 4)\n b = math.sqrt(5 * x ** 2 - 4)\n return a.is_integer() or b.is_integer()", "def fibonacci(n):", "def test_fib0(self):\n assert ten == fibonacci_procedure.recursion(10)", "def test_negative_check_fibonacci(arg, expected):\n assert major_and_minor_elem(arg) == expected", "def fibonacci(n: int) -> int:\n if n <= 0:\n raise ValueError(\"The number must be greater than zero.\")\n exit(code=1)\n if n == 1:\n return 0\n elif n == 2:\n return 1\n else:\n return fibonacci(n-1) + fibonacci(n - 2)", "def isfib(number):\n\n num1 = 1\n num2 = 1\n while True:\n if num2 < number:\n tempnum = num2\n num2 += num1\n num1 = tempnum\n elif num2 == number:\n return True\n else:\n return False", "def fibi(n):\n if n == 0: return 0\n if n == 1: return 1\n f_n2, f_n1 = 1, 1\n for i in range(3, n+1):\n f_n2, f_n1 = f_n1, f_n2+f_n1\n return f_n1", "def optimized_fibonacci(f):\n a = 0\n b = 1\n if f < 2:\n return f\n else:\n for i in range(1, f):\n c = a + b\n a = b\n b = c\n return b", "def fibonacci(n):\n if n==0 :\n return 0\n elif n==1:\n return 1\n else:\n return fibonacci(n-1) + fibonacci(n-2)", "def test_fibonacci_hypothesis(fibonacci_input):\n computed_iterative_value = fibonacci.compute_iterative_fibonacci(fibonacci_input)\n computed_recursive_value = fibonacci.compute_recursive_fibonacci(fibonacci_input)\n previous_computed_iterative_value = fibonacci.compute_iterative_fibonacci(\n fibonacci_input - 1\n )\n previous_computed_recursive_value = fibonacci.compute_recursive_fibonacci(\n fibonacci_input - 1\n )\n goldenratio = 1.61803398875 # The golden ratio for fibonacci values.\n assert computed_iterative_value > 0\n if fibonacci_input <= 2:\n assert computed_iterative_value == 1\n else:\n assert computed_iterative_value == (\n # pylint: disable=W1633\n round(goldenratio * previous_computed_iterative_value)\n )\n assert computed_recursive_value > 0\n if fibonacci_input <= 2:\n assert computed_recursive_value == 1\n else:\n assert computed_recursive_value == (\n # pylint: disable=W1633\n round(goldenratio * previous_computed_recursive_value)\n )", "def get_fibonacci(a: int = 0, b: int = 1):\n values = [a, b]\n result = 0\n def inner():\n nonlocal result\n if not result:\n result = 1\n return result\n result = values[0] + values[1]\n values[0] = values[1]\n values[1] = result\n return result\n return inner", "def fibonacci(n):\n print(n)\n if n == 0 or n == 1:\n return 1\n\n return fibonacci(n - 1) + fibonacci(n - 2)", "def fibonacci(n):\n\n if (n == 0):\n return 0\n elif (n == 1):\n return 1\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)", "def fibonacci():\n return sum_series(a=0, b=1)", "def fib(n): #Describe \"n\" as a variable in fib sequence\n while n == 0:\n return 0 #establish that 0 position is equal to 0\n if n == 1:\n return 1\n else:\n return fib(n-1) + fib(n-2)", "def fibonacci(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)", "def fibonacci(n):\n if n in (0, 1):\n return n\n return fibonacci(n - 2) + fibonacci(n - 1)", "def fibonacci(n):\n\n if n == 1:\n return 1\n elif n < 1:\n return 0\n else:\n return fibonacci(n-1) + fibonacci(n-2)" ]
[ "0.7178264", "0.7143379", "0.7013435", "0.6936545", "0.676014", "0.6662881", "0.6642217", "0.66400933", "0.66117245", "0.6579734", "0.64329857", "0.64186317", "0.6415477", "0.63852334", "0.62665355", "0.6260179", "0.6218458", "0.62144464", "0.6206261", "0.6198005", "0.6196254", "0.618655", "0.6179084", "0.6165905", "0.61614275", "0.6150905", "0.6142256", "0.61416143", "0.6139068", "0.6132822" ]
0.7537205
0
Check the iterative and recursive fibonacci functions with a single input.
def test_fibonacci_single(): computed_iterative_value = fibonacci.compute_iterative_fibonacci(18) computed_recursive_value = fibonacci.compute_recursive_fibonacci(18) assert computed_iterative_value == 2584 assert computed_recursive_value == 2584
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_fibonacci_multiple(fibonacci_input, expected_answer):\n computed_iterative_value = fibonacci.compute_iterative_fibonacci(fibonacci_input)\n computed_recursive_value = fibonacci.compute_recursive_fibonacci(fibonacci_input)\n assert computed_iterative_value == expected_answer\n assert computed_recursive_value == expected_answer", "def fibonacci(n):\n if n == 0:\n return 0\n elif n == 1 or n == 2:\n return 1\n elif n > 2:\n return fibonacci(n - 1) + fibonacci(n - 2)\n else:\n return False", "def fibonacciSeries(userinput):\n try:\n isinstance(int(userinput), int)\n userinput = int(userinput)\n except ValueError as e:\n print(e)\n else:\n if isPerfectSquare(\n (5 *\n userinput *\n userinput) -\n 4)or isPerfectSquare(\n (5 *\n userinput *\n userinput) +\n 4):\n return True\n else:\n return False", "def check_fibonacci(data: Sequence[int]) -> bool:\n if len(data) < 3:\n return False\n if data[0] != 0 or data[1] != 1:\n return False\n for n in range(2, len(data)):\n if data[n] != data[n - 1] + data[n - 2]:\n return False\n return True", "def find_fibonacci(x: int) -> bool:\r\n # write your code here\r\n a = 1\r\n b = 1\r\n while True: #Looping sampai ketemu return\r\n if x == 0:\r\n return True\r\n elif b <= x:\r\n if b == x:\r\n return True\r\n else:\r\n temp = b\r\n b = b + a\r\n a = temp\r\n else:\r\n return False", "def test_fibonacci_recursive(self):\r\n result = fib.fibonacci_recursive(5)\r\n self.assertEqual(result, 8)", "def test_fibonacci():\n\n def isPerfectSquare(x):\n s = int(math.sqrt(x))\n return s * s == x\n\n def isFibonacci(n):\n # n is Fibonacci if one of 5*n*n + 4 or 5*n*n - 4 or both is a perfect square\n return isPerfectSquare(5 * n * n + 4) or isPerfectSquare(5 * n * n - 4)\n\n for _ in range(10000):\n num = random.randint(0, 1000)\n f = s7.fibonacci()\n for i in range(0, (num // 2) + 1):\n if num in s7.cache.values():\n break\n f()\n assert isFibonacci(num) is (num in s7.cache.values()), \"Check your Fibonacci implementation\"", "def fibonacci_numbers():\n print(\"Problem: Fibonacci numbers\")\n\n n = int(input())\n\n result = fib(n)\n print(result)", "def test_fib0(self):\n assert ten == fibonacci_procedure.recursion(10)", "def fibonacci(input) :\n\n\n\n\n\n# if input == 0:\n\n# return 0\n\n\n\n# elif input == 1:\n\n# return 1\n\n# else :\n\n# return fibonacci(input-1) + fibonacci(input-2)\n\n\n\n\n return sum_series(input)", "def check_fibonacci(data: Sequence[int]) -> bool:\n if len(data) < 3:\n raise ValueError(\"Not enough data entered\")\n while len(data) >= 3:\n a, b, c = data[0], data[1], data[2]\n if not _check_window(a, b, c):\n return False\n del data[0]\n return True", "def check_fibonacci(data: Sequence[int]) -> bool:\n def fib_gen(n: int) -> Iterable:\n a, b = 0, 1\n while a <= n:\n yield a\n a, b = b, a + b\n\n last_el = data[-1]\n perfect_fib = list(el for el in fib_gen(last_el))\n\n if len(data) < 3:\n return False\n if data[::-1] != perfect_fib[:-len(data) - 1:-1]:\n return False\n return True", "def isfib(number):\n\n num1 = 1\n num2 = 1\n while True:\n if num2 < number:\n tempnum = num2\n num2 += num1\n num1 = tempnum\n elif num2 == number:\n return True\n else:\n return False", "def is_fibonacci_number(x):\n a = math.sqrt(5 * x ** 2 + 4)\n b = math.sqrt(5 * x ** 2 - 4)\n return a.is_integer() or b.is_integer()", "def fibonacci(n):", "def test_fibonacci_hypothesis(fibonacci_input):\n computed_iterative_value = fibonacci.compute_iterative_fibonacci(fibonacci_input)\n computed_recursive_value = fibonacci.compute_recursive_fibonacci(fibonacci_input)\n previous_computed_iterative_value = fibonacci.compute_iterative_fibonacci(\n fibonacci_input - 1\n )\n previous_computed_recursive_value = fibonacci.compute_recursive_fibonacci(\n fibonacci_input - 1\n )\n goldenratio = 1.61803398875 # The golden ratio for fibonacci values.\n assert computed_iterative_value > 0\n if fibonacci_input <= 2:\n assert computed_iterative_value == 1\n else:\n assert computed_iterative_value == (\n # pylint: disable=W1633\n round(goldenratio * previous_computed_iterative_value)\n )\n assert computed_recursive_value > 0\n if fibonacci_input <= 2:\n assert computed_recursive_value == 1\n else:\n assert computed_recursive_value == (\n # pylint: disable=W1633\n round(goldenratio * previous_computed_recursive_value)\n )", "def fib(x):\r\n if x == 0 or x == 1: #two base cases \r\n return 1\r\n else:\r\n return fib(x-1) + fib(x-2) #two recursive cases\r\n #so basically if there are two base cases then I need two recursive cases\r", "def fibonacci(n: int) -> int:\n if n <= 0:\n raise ValueError(\"The number must be greater than zero.\")\n exit(code=1)\n if n == 1:\n return 0\n elif n == 2:\n return 1\n else:\n return fibonacci(n-1) + fibonacci(n - 2)", "def fib(n): #Describe \"n\" as a variable in fib sequence\n while n == 0:\n return 0 #establish that 0 position is equal to 0\n if n == 1:\n return 1\n else:\n return fib(n-1) + fib(n-2)", "def fibonacci(n):\n print(n)\n if n == 0 or n == 1:\n return 1\n\n return fibonacci(n - 1) + fibonacci(n - 2)", "def fibonacci(n):\n if n==0 :\n return 0\n elif n==1:\n return 1\n else:\n return fibonacci(n-1) + fibonacci(n-2)", "def fibonacci(n):\n\n if (n == 0):\n return 0\n elif (n == 1):\n return 1\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)", "def fibonacci(n):\n if n in (0, 1):\n return n\n return fibonacci(n - 2) + fibonacci(n - 1)", "def fib_iterative(n: int) -> int:\n print(n)\n return 0", "def fibonacci(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)", "def fibonacci(n):\n\n if n == 1:\n return 1\n elif n < 1:\n return 0\n else:\n return fibonacci(n-1) + fibonacci(n-2)", "def fibo_element(n):\n f = ()\n if n < 0:\n print(\"Incorrect number\")\n elif n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibo_element(n-1) + fibo_element(n-2)", "def fib(x):\n if x < 0:\n raise ValueError('Must be greater than 0')\n elif x == 0:\n return 1\n elif x == 1:\n return 1\n else:\n return fib(x - 1) + fib(x - 2)", "def fibonacci(n):\n if n in (0, 1):\n return n\n return (fibonacci(n-2) + fibonacci(n-1))", "def test_negative_check_fibonacci(arg, expected):\n assert major_and_minor_elem(arg) == expected" ]
[ "0.75609815", "0.74707973", "0.74625546", "0.7423986", "0.73336613", "0.73101205", "0.72907925", "0.71682334", "0.7122653", "0.70729935", "0.7069784", "0.6992415", "0.69830483", "0.6973864", "0.69721997", "0.6952619", "0.69521326", "0.6895534", "0.68825495", "0.6838231", "0.68380654", "0.6830992", "0.68253005", "0.68156374", "0.68079495", "0.6797165", "0.67482156", "0.67421246", "0.6740011", "0.6730682" ]
0.7659369
0
Check the fibonacci function returns correct values in tuple.
def test_fibonacci_tuple(): computed_fibonacci_value = fibonacci.fibonacci_tuple(8) assert computed_fibonacci_value == (1, 1, 2, 3, 5, 8, 13, 21)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fibonacci_tuple(number: int) -> Tuple[int]:\n # TODO: Add all of the required source code for this tuple-based function\n # create an empty tuple that will ultimately contain the results\n result = ()\n return result", "def good_fibonacci(n):\n\n if n <= 1:\n return (0,n)\n else:\n (a,b) = good_fibonacci(n-1)\n return (a+b, a)", "def good_fibonacci(n):\n if n <= 1:\n return (n, 0)\n else:\n (a, b) = good_fibonacci(n-1)\n return (a+b, a)", "def test_fibonacci_multiple(fibonacci_input, expected_answer):\n computed_iterative_value = fibonacci.compute_iterative_fibonacci(fibonacci_input)\n computed_recursive_value = fibonacci.compute_recursive_fibonacci(fibonacci_input)\n assert computed_iterative_value == expected_answer\n assert computed_recursive_value == expected_answer", "def check_fibonacci(data: Sequence[int]) -> bool:\n if len(data) < 3:\n return False\n if data[0] != 0 or data[1] != 1:\n return False\n for n in range(2, len(data)):\n if data[n] != data[n - 1] + data[n - 2]:\n return False\n return True", "def test_negative_check_fibonacci(arg, expected):\n assert major_and_minor_elem(arg) == expected", "def test_fibonacci_single():\n computed_iterative_value = fibonacci.compute_iterative_fibonacci(18)\n computed_recursive_value = fibonacci.compute_recursive_fibonacci(18)\n assert computed_iterative_value == 2584\n assert computed_recursive_value == 2584", "def test_fibonacci(self):\n fibonacci = [x for x in generators.fibonacci(10)]\n self.assertEqual(fibonacci[7], 21)", "def fibonacci_list(number: int) -> Tuple[int]:\n # TODO: Add all of the required source code for this list-based function\n # create an empty list that will ultimately contain the results\n result = []\n return result", "def test_fibonacci():\n\n def isPerfectSquare(x):\n s = int(math.sqrt(x))\n return s * s == x\n\n def isFibonacci(n):\n # n is Fibonacci if one of 5*n*n + 4 or 5*n*n - 4 or both is a perfect square\n return isPerfectSquare(5 * n * n + 4) or isPerfectSquare(5 * n * n - 4)\n\n for _ in range(10000):\n num = random.randint(0, 1000)\n f = s7.fibonacci()\n for i in range(0, (num // 2) + 1):\n if num in s7.cache.values():\n break\n f()\n assert isFibonacci(num) is (num in s7.cache.values()), \"Check your Fibonacci implementation\"", "def test_fibonacci_list():\n computed_fibonacci_value = fibonacci.fibonacci_list(8)\n assert computed_fibonacci_value == [1, 1, 2, 3, 5, 8, 13, 21]", "def check_fibonacci(data: Sequence[int]) -> bool:\n def fib_gen(n: int) -> Iterable:\n a, b = 0, 1\n while a <= n:\n yield a\n a, b = b, a + b\n\n last_el = data[-1]\n perfect_fib = list(el for el in fib_gen(last_el))\n\n if len(data) < 3:\n return False\n if data[::-1] != perfect_fib[:-len(data) - 1:-1]:\n return False\n return True", "def fibonacci(x):\n # Type Error\n if type(x) != int:\n raise TypeError(\"x is not a positive integer.\")\n # Value Error\n if x < 0:\n raise ValueError(\"x is not a positive integer.\")\n if x < 3:\n # fibonacci(0) = 0, fibonacci(1) = 1, fibonacci(2) = 1\n return (0, 1, 1)[x]\n else:\n # fibonacci(n) = fibonacci(n-1) + fibonacci(n-2)\n return fibonacci(x - 1) + fibonacci(x - 2)", "def check_fibonacci(data: Sequence[int]) -> bool:\n if len(data) < 3:\n raise ValueError(\"Not enough data entered\")\n while len(data) >= 3:\n a, b, c = data[0], data[1], data[2]\n if not _check_window(a, b, c):\n return False\n del data[0]\n return True", "def test_fib_digits(n, result):\n from even_digit_primes import f\n assert f(n) == result", "def fibo_element(n):\n f = ()\n if n < 0:\n print(\"Incorrect number\")\n elif n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibo_element(n-1) + fibo_element(n-2)", "def fibonacci(n):", "def find_fibonacci(x: int) -> bool:\r\n # write your code here\r\n a = 1\r\n b = 1\r\n while True: #Looping sampai ketemu return\r\n if x == 0:\r\n return True\r\n elif b <= x:\r\n if b == x:\r\n return True\r\n else:\r\n temp = b\r\n b = b + a\r\n a = temp\r\n else:\r\n return False", "def fibonacci_numbers():\n print(\"Problem: Fibonacci numbers\")\n\n n = int(input())\n\n result = fib(n)\n print(result)", "def fibonacci(n):\n if n == 0:\n return 0\n elif n == 1 or n == 2:\n return 1\n elif n > 2:\n return fibonacci(n - 1) + fibonacci(n - 2)\n else:\n return False", "def test_fibonacci(self):\n fibonacci = [x for x in iterators.FibonacciIterator(10)]\n self.assertEqual(fibonacci[7], 21)", "def fibonacci(n:int):\n\n # Special Cases\n if n < 0:\n return (\"n has to be a positive integer\")\n \n if n == 0:\n return 0\n\n if n <= 2:\n return 1\n\n # Return Value\n return (fibonacci(n-1) + fibonacci(n-2))", "def test_fib():\n from fib import fibinacci\n result = fibinacci(0)\n assert result == CORRECT_LIST[0]", "def is_fibonacci_number(x):\n a = math.sqrt(5 * x ** 2 + 4)\n b = math.sqrt(5 * x ** 2 - 4)\n return a.is_integer() or b.is_integer()", "def test_fibonacci(n, result):\n from series import fibonacci\n assert fibonacci(n) == result", "def fibonacci():\n return sum_series(a=0, b=1)", "def _fast_fibonacci(n):\n if n == 0:\n return (0, 1)\n else:\n a, b = _fast_fibonacci(n // 2)\n c = a * (b * 2 - a)\n d = a**2 + b**2\n if n % 2 == 1:\n return (d, c + d)\n else:\n return (c, d)", "def fibonacci(n):\n fibval = sum_series(n, 0, 1)\n print(fibval)\n return fibval", "def test_fibonacci_recursive(self):\r\n result = fib.fibonacci_recursive(5)\r\n self.assertEqual(result, 8)", "def test_fibonacci_hypothesis(fibonacci_input):\n computed_iterative_value = fibonacci.compute_iterative_fibonacci(fibonacci_input)\n computed_recursive_value = fibonacci.compute_recursive_fibonacci(fibonacci_input)\n previous_computed_iterative_value = fibonacci.compute_iterative_fibonacci(\n fibonacci_input - 1\n )\n previous_computed_recursive_value = fibonacci.compute_recursive_fibonacci(\n fibonacci_input - 1\n )\n goldenratio = 1.61803398875 # The golden ratio for fibonacci values.\n assert computed_iterative_value > 0\n if fibonacci_input <= 2:\n assert computed_iterative_value == 1\n else:\n assert computed_iterative_value == (\n # pylint: disable=W1633\n round(goldenratio * previous_computed_iterative_value)\n )\n assert computed_recursive_value > 0\n if fibonacci_input <= 2:\n assert computed_recursive_value == 1\n else:\n assert computed_recursive_value == (\n # pylint: disable=W1633\n round(goldenratio * previous_computed_recursive_value)\n )" ]
[ "0.7495312", "0.7087924", "0.7006348", "0.6943637", "0.6943494", "0.6726427", "0.67202973", "0.6707711", "0.66854054", "0.661282", "0.660494", "0.65465456", "0.6528639", "0.6525882", "0.6490575", "0.64755964", "0.6462241", "0.6450144", "0.64456385", "0.64450544", "0.6440967", "0.6421626", "0.64215773", "0.64041954", "0.6358646", "0.6329931", "0.63296115", "0.62531936", "0.6244589", "0.62269384" ]
0.82736534
0
Check the fibonacci function returns correct values in list.
def test_fibonacci_list(): computed_fibonacci_value = fibonacci.fibonacci_list(8) assert computed_fibonacci_value == [1, 1, 2, 3, 5, 8, 13, 21]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_fib():\n from fib import fibinacci\n result = fibinacci(0)\n assert result == CORRECT_LIST[0]", "def check_fibonacci(data: Sequence[int]) -> bool:\n if len(data) < 3:\n return False\n if data[0] != 0 or data[1] != 1:\n return False\n for n in range(2, len(data)):\n if data[n] != data[n - 1] + data[n - 2]:\n return False\n return True", "def test_fibonacci(self):\n fibonacci = [x for x in generators.fibonacci(10)]\n self.assertEqual(fibonacci[7], 21)", "def check_fibonacci(data: Sequence[int]) -> bool:\n def fib_gen(n: int) -> Iterable:\n a, b = 0, 1\n while a <= n:\n yield a\n a, b = b, a + b\n\n last_el = data[-1]\n perfect_fib = list(el for el in fib_gen(last_el))\n\n if len(data) < 3:\n return False\n if data[::-1] != perfect_fib[:-len(data) - 1:-1]:\n return False\n return True", "def test_fibonacci(self):\n fibonacci = [x for x in iterators.FibonacciIterator(10)]\n self.assertEqual(fibonacci[7], 21)", "def test_fibonacci():\n\n def isPerfectSquare(x):\n s = int(math.sqrt(x))\n return s * s == x\n\n def isFibonacci(n):\n # n is Fibonacci if one of 5*n*n + 4 or 5*n*n - 4 or both is a perfect square\n return isPerfectSquare(5 * n * n + 4) or isPerfectSquare(5 * n * n - 4)\n\n for _ in range(10000):\n num = random.randint(0, 1000)\n f = s7.fibonacci()\n for i in range(0, (num // 2) + 1):\n if num in s7.cache.values():\n break\n f()\n assert isFibonacci(num) is (num in s7.cache.values()), \"Check your Fibonacci implementation\"", "def find_fibonacci(x: int) -> bool:\r\n # write your code here\r\n a = 1\r\n b = 1\r\n while True: #Looping sampai ketemu return\r\n if x == 0:\r\n return True\r\n elif b <= x:\r\n if b == x:\r\n return True\r\n else:\r\n temp = b\r\n b = b + a\r\n a = temp\r\n else:\r\n return False", "def test_fibonacci_multiple(fibonacci_input, expected_answer):\n computed_iterative_value = fibonacci.compute_iterative_fibonacci(fibonacci_input)\n computed_recursive_value = fibonacci.compute_recursive_fibonacci(fibonacci_input)\n assert computed_iterative_value == expected_answer\n assert computed_recursive_value == expected_answer", "def fibonacci_numbers():\n print(\"Problem: Fibonacci numbers\")\n\n n = int(input())\n\n result = fib(n)\n print(result)", "def fibonacci(n):", "def test_fibonacci_single():\n computed_iterative_value = fibonacci.compute_iterative_fibonacci(18)\n computed_recursive_value = fibonacci.compute_recursive_fibonacci(18)\n assert computed_iterative_value == 2584\n assert computed_recursive_value == 2584", "def isfib(number):\n\n num1 = 1\n num2 = 1\n while True:\n if num2 < number:\n tempnum = num2\n num2 += num1\n num1 = tempnum\n elif num2 == number:\n return True\n else:\n return False", "def check_fibonacci(data: Sequence[int]) -> bool:\n if len(data) < 3:\n raise ValueError(\"Not enough data entered\")\n while len(data) >= 3:\n a, b, c = data[0], data[1], data[2]\n if not _check_window(a, b, c):\n return False\n del data[0]\n return True", "def fibonacci_list(number: int) -> Tuple[int]:\n # TODO: Add all of the required source code for this list-based function\n # create an empty list that will ultimately contain the results\n result = []\n return result", "def fibonacci(n):\n if n == 0:\n return 0\n elif n == 1 or n == 2:\n return 1\n elif n > 2:\n return fibonacci(n - 1) + fibonacci(n - 2)\n else:\n return False", "def test_fib_digits(n, result):\n from even_digit_primes import f\n assert f(n) == result", "def test_fibonacci_recursive(self):\r\n result = fib.fibonacci_recursive(5)\r\n self.assertEqual(result, 8)", "def fibonacci(n):\n fibval = sum_series(n, 0, 1)\n print(fibval)\n return fibval", "def fib_in_range():\n limit1, limit2, nums_in_range = create_range()\n fib1, fib2 = 0, 1\n for elem in range(limit1, limit2):\n fib1, fib2 = fib2, fib1 + fib2\n if fib1 in nums_in_range:\n print(fib1)", "def fibonacci(a):\n fib = [1,1]\n x = 0\n i = 1\n while x < a:\n x = fib [i] + fib[i-1]\n i += 1\n fib.append(x)\n return i, fib", "def fibo_element(n):\n f = ()\n if n < 0:\n print(\"Incorrect number\")\n elif n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibo_element(n-1) + fibo_element(n-2)", "def fibonacci(n=100):\n result = [] # list\n a, b = 0, 1\n while a < n:\n result.append(a)\n a, b = b, a + b\n return result", "def test_fibonacci(n, result):\n from series import fibonacci\n assert fibonacci(n) == result", "def get_fibonacci(count_numbers: int) -> list:\n fibonacci = [0, 1]\n for _ in range(0, count_numbers - 2):\n sum_last_two = sum(fibonacci[-2:])\n fibonacci.append(sum_last_two)\n\n return fibonacci", "def ex2() :\r\n print(\" - Fibonacci Series - \")\r\n userNumber = int(input(\"Enter number of fibonacci numbers to show: \"))\r\n fiboList = [1,1] #list starts with 1,1\r\n if userNumber == 0 : print(0)\r\n if userNumber < 0 : print(\"Number entered can't be less than 0.\")\r\n elif userNumber == 1 : print(1) #fibonacci list rules \r\n elif userNumber == 2 : print(\"1,1\")\r\n else :\r\n while userNumber > len(fiboList) : #until our list reaches the desired number - \r\n fiboList.append(fiboList[-1] + fiboList[-2]) # - keep calculating the fibo number and adding it to the end of the list\r\n print(\"{}\".format(\",\".join(map(repr, fiboList)))) #format the list without spaces and comma inbetween\r", "def fibonacciSequence(number:int) -> List[int]:\n return [fibonacci(num) for num in range(number + 1)]", "def test_negative_check_fibonacci(arg, expected):\n assert major_and_minor_elem(arg) == expected", "def test_fibonacci_hypothesis(fibonacci_input):\n computed_iterative_value = fibonacci.compute_iterative_fibonacci(fibonacci_input)\n computed_recursive_value = fibonacci.compute_recursive_fibonacci(fibonacci_input)\n previous_computed_iterative_value = fibonacci.compute_iterative_fibonacci(\n fibonacci_input - 1\n )\n previous_computed_recursive_value = fibonacci.compute_recursive_fibonacci(\n fibonacci_input - 1\n )\n goldenratio = 1.61803398875 # The golden ratio for fibonacci values.\n assert computed_iterative_value > 0\n if fibonacci_input <= 2:\n assert computed_iterative_value == 1\n else:\n assert computed_iterative_value == (\n # pylint: disable=W1633\n round(goldenratio * previous_computed_iterative_value)\n )\n assert computed_recursive_value > 0\n if fibonacci_input <= 2:\n assert computed_recursive_value == 1\n else:\n assert computed_recursive_value == (\n # pylint: disable=W1633\n round(goldenratio * previous_computed_recursive_value)\n )", "def fibonacci(number: int) -> int:\n fibs = [0] * (number + 2)\n fibs[0] = 0\n fibs[1] = 1\n for i in range(2, number + 1):\n fibs[i] = fibs[i - 1] + fibs[i - 2]\n return fibs[number]", "def fibonacci():\n return sum_series(a=0, b=1)" ]
[ "0.7688436", "0.74937403", "0.73775816", "0.73261154", "0.7310144", "0.7110707", "0.71075284", "0.7072487", "0.69218445", "0.68786097", "0.68698096", "0.68660873", "0.68476725", "0.6838543", "0.67813754", "0.6742429", "0.6730063", "0.6712836", "0.6694579", "0.6681888", "0.6673642", "0.6667664", "0.66481334", "0.6639598", "0.6638774", "0.6632774", "0.6619639", "0.65943074", "0.6583533", "0.657301" ]
0.81559217
0
Check the fibonacci function returns generator.
def test_fibonacci_generator(): computed_fibonacci_value = fibonacci.fibonacci_generator(8) assert isinstance(computed_fibonacci_value, types.GeneratorType) is True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_fibonacci(self):\n fibonacci = [x for x in generators.fibonacci(10)]\n self.assertEqual(fibonacci[7], 21)", "def fibonacci():\n\ta, b = 0, 1\n\tyield 0\n\twhile True:\n\t\ta, b = b, a + b\n\t\tyield a", "def yieldFibonacci():\n yield 1\n a = 1\n b = 2\n while True:\n yield b\n a, b = b, a + b", "def test_fibonacci():\n\n def isPerfectSquare(x):\n s = int(math.sqrt(x))\n return s * s == x\n\n def isFibonacci(n):\n # n is Fibonacci if one of 5*n*n + 4 or 5*n*n - 4 or both is a perfect square\n return isPerfectSquare(5 * n * n + 4) or isPerfectSquare(5 * n * n - 4)\n\n for _ in range(10000):\n num = random.randint(0, 1000)\n f = s7.fibonacci()\n for i in range(0, (num // 2) + 1):\n if num in s7.cache.values():\n break\n f()\n assert isFibonacci(num) is (num in s7.cache.values()), \"Check your Fibonacci implementation\"", "def find_fibonacci(x: int) -> bool:\r\n # write your code here\r\n a = 1\r\n b = 1\r\n while True: #Looping sampai ketemu return\r\n if x == 0:\r\n return True\r\n elif b <= x:\r\n if b == x:\r\n return True\r\n else:\r\n temp = b\r\n b = b + a\r\n a = temp\r\n else:\r\n return False", "def fib(number: int) -> int:\n return next(islice(generator(number), number, number + 1))", "def fibonacci():\n yield 0\n element = yield 1\n previous = element\n while element < 1e100:\n current = yield element\n element = previous + current\n if current > 1:\n previous = current\n\n return element", "def fibonacci(n):\n a = 0\n b = 1\n counter = 0\n while True:\n if (counter > n): return\n yield a\n a = b\n b = a + b\n counter += 1", "def fibonacci(a=1, b=2):\n while True:\n yield a\n a, b = b, b+a", "def t_fibonnaci():\n a = 1\n b = 1\n c = a + b\n while True:\n yield c\n a = b + c\n b = c + a \n c = a + b", "def test_fibonacci_single():\n computed_iterative_value = fibonacci.compute_iterative_fibonacci(18)\n computed_recursive_value = fibonacci.compute_recursive_fibonacci(18)\n assert computed_iterative_value == 2584\n assert computed_recursive_value == 2584", "def fibonacci_gen(n=1):\n a, b = 0, 1\n while True:\n yield a\n a, b = b, (a + b) * n", "def fibo_generator(count):\n try:\n if count <= 0:\n return\n a = 0\n b = 1\n yield a\n if count == 1:\n return\n yield b\n if count == 2:\n return\n for i in range(count - 2):\n c = a + b\n yield c\n a, b = b, c\n except TypeError:\n raise TypeError(\"Only integers allowed\")", "def test_fibonacci(self):\n fibonacci = [x for x in iterators.FibonacciIterator(10)]\n self.assertEqual(fibonacci[7], 21)", "def fibonacci() -> Iterator[int]:\n a, b = 0, 1\n while True:\n yield a\n a, b = b, a + b", "def fib(n):\n try:\n assert isinstance(n, int)\n except AssertionError:\n print(\"n must be an integer\")\n return None\n\n try:\n assert n >= 2\n except AssertionError:\n print(\"n must be greater than or equal to 2\")\n return None\n\n a, b = 0, 1\n while a < n:\n yield a\n a, b = b, a+b", "def fibonacci(n):", "def fibonacci(n):\n a, b, counter = 0, 1, 0\n while True:\n if counter > n:\n return\n yield a\n a, b = b, a + b\n counter += 1", "def fibonacci(n):\n if n == 0:\n return 0\n elif n == 1 or n == 2:\n return 1\n elif n > 2:\n return fibonacci(n - 1) + fibonacci(n - 2)\n else:\n return False", "def fibonacci(n):\n a, b = 1, 1\n count = 0\n while count < n:\n yield a\n count += 1\n a, b = b, a+b", "def fibonacci(n: int):\n a, b, counter = 0, 1, 0\n while True:\n if (counter > n):\n return\n yield a\n a, b = b, a + b\n counter += 1", "def fibonacci_numbers():\n print(\"Problem: Fibonacci numbers\")\n\n n = int(input())\n\n result = fib(n)\n print(result)", "def fib():\n x, y = 0, 1\n while True:\n yield x\n x, y = y, x + y", "def fibonacci(n):\n a = 0\n b = 1\n counter = n\n placeholder = 1\n\n while(counter > 0):\n placeholder = a + b\n yield \"fib({})\".format(counter)\n a = b\n b = placeholder\n counter = counter - 1\n yield a\n return", "def isfib(number):\n\n num1 = 1\n num2 = 1\n while True:\n if num2 < number:\n tempnum = num2\n num2 += num1\n num1 = tempnum\n elif num2 == number:\n return True\n else:\n return False", "def fib(limit):\n a, b = 0, 1\n while a <= limit:\n yield a\n a, b = b, a + b", "def test_fib_digits(n, result):\n from even_digit_primes import f\n assert f(n) == result", "def fibonacci_generator():\n fib_prev = 0 # prev fib number\n fib_cur = 1 # next fib number\n i = 1 # number position\n while True:\n yield i, fib_cur\n i += 1\n fib_prev, fib_cur = fib_cur, fib_prev + fib_cur", "def test_fibonacci(n, result):\n from series import fibonacci\n assert fibonacci(n) == result", "def test_fibonacci_recursive(self):\r\n result = fib.fibonacci_recursive(5)\r\n self.assertEqual(result, 8)" ]
[ "0.77140635", "0.7277792", "0.7176902", "0.71680737", "0.71608746", "0.7140828", "0.711126", "0.7085678", "0.70451903", "0.7023813", "0.7023769", "0.7016897", "0.69939935", "0.6988844", "0.69802195", "0.6955412", "0.6863057", "0.6839489", "0.6838587", "0.6837633", "0.6831056", "0.6806507", "0.6778928", "0.6776697", "0.6732586", "0.67140436", "0.6707181", "0.66825366", "0.66690177", "0.66592556" ]
0.7865597
0
play rock paper scissors with the computer
def rock_paper_scissors(): # creates a random integer between 0 and 2 and converts it to rock, paper, or scissors computer_guess = random.randint(0, 2) if computer_guess == 0: computer_guess = 'rock' elif computer_guess == 1: computer_guess = 'paper' else: computer_guess = 'scissors' # takes user input and normalizes it to lower case and with no white space user_guess = input("Enter either rock, paper, or scissors: ") user_guess = user_guess.strip() user_guess = user_guess.lower() # prints whether you won, lost, or tied based on the possible outcomes available if not(user_guess == 'rock' or user_guess == 'paper' or user_guess == 'scissors'): print("you did not enter rock, paper, or scissors") elif user_guess == 'rock' and computer_guess == 'scissors': print('The computer guessed scissors, You won!') elif user_guess == 'paper' and computer_guess == 'rock': print('The computer guessed rock, You won!') elif user_guess == 'scissors' and computer_guess == 'paper': print('The computer guessed paper, You won!') elif user_guess == 'rock' and computer_guess == 'paper': print('The computer guessed paper, You lost!') elif user_guess == 'paper' and computer_guess == 'scissors': print('The computer guessed scissors, You lost!') elif user_guess == 'scissors' and computer_guess == 'rock': print('The computer guessed rock, You lost!') else: print('You tied!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rock_paper_scissors():\n rps = [\"rock\", \"paper\", \"scissors\"]\n wannaplay = input(\"do you want to play rock paper scissors? \")\n if wannaplay == \"yes\":\n computerpick = random.choice(rps)\n playerpick = input(\"what do you choose?\")\n if computerpick == \"rock\":\n if playerpick == \"scissors\":\n print(\"and I chose rock, I win. \")\n if playerpick == \"rock\":\n print(\"me too, tie. \")\n if playerpick == \"paper\":\n print(\"and I chose rock, you win. \")\n if computerpick == \"paper\":\n if playerpick == \"scissors\":\n print(\"and I chose paper, you win. \")\n if playerpick == \"rock\":\n print(\"and I chose paper, I win. \")\n if playerpick == \"paper\":\n print(\"and I chose paper, tie. \")\n if computerpick == \"scissors\":\n if playerpick == \"scissors\":\n print(\"and I chose scissors, tie. \")\n if playerpick == \"rock\":\n print(\"and I chose scissors, you win. \")\n if playerpick == \"paper\":\n print(\"and I chose scissors, I win. \")\n else:\n return", "async def rock_paper_scissors(self, ctx):\n authorize(ctx, \"mentions\") # check for a mentioned user\n\n p1 = Player.get(ctx.author.id)\n p2 = Player.get(ctx.message.mentions[0].id)\n\n # Ensure player is someone else\n if p1 == p2:\n raise UserInputError(\"You can't play against yourself\")\n\n # Create new game\n embed = discord.Embed(title=\"Rock Paper Scissors\",\n description=f\"{p1.name} **VS** {p2.name}\\n\\nCheck DMs for how to play\")\n await ctx.send(embed=embed)\n game = RPS(ctx.channel, p1, p2)\n await game.send_dms()", "async def rps(self, ctx):\r\n\r\n async def play():\r\n await ctx.send('Lets play **Rock, Paper, Scissors**. Choose your weapon:')\r\n choices = ('rock', 'paper', 'scissors')\r\n computer = choices[randint(0, 2)]\r\n player = await self.viking.wait_for('message', check=lambda message: message.author == ctx.author)\r\n player = player.content.lower()\r\n\r\n beats = {\r\n 'rock': ['paper'],\r\n 'paper': ['scissors'],\r\n 'scissors': ['rock']\r\n }\r\n\r\n if computer and player in choices:\r\n if computer == player:\r\n await ctx.send('**Tie!** You both chose **{}**.'.format(computer.title()))\r\n await gameover()\r\n elif player in beats[computer]:\r\n await ctx.send('**You win!** Viking chose: **{}** and you chose: **{}**.'.format(computer.title(), player.title()))\r\n await gameover()\r\n else:\r\n await ctx.send('**You lose!** Viking chose: **{}** and you chose: **{}**.'.format(computer.title(), player.title()))\r\n await gameover()\r\n else:\r\n await ctx.send('Please choose a weapon.')\r\n await play()\r\n\r\n async def gameover():\r\n await ctx.send('Do you want to play again? (Enter: **Yes** / **No**)')\r\n response = await self.viking.wait_for('message', check=lambda message: message.author == ctx.author)\r\n response = response.content.lower()\r\n\r\n if response == 'yes':\r\n await play()\r\n elif response == 'no':\r\n await ctx.send('Thanks for playing!')\r\n else:\r\n await ctx.send('Invalid option!')\r\n await gameover()\r\n\r\n await play()", "def rpsls(player_choice):\n \n # Convert the player's choice into numbers\n player_number = name_to_number(player_choice)\n\n # Prints error if input does not match any of the cases\n # and ends the game.\n if (player_number == -1):\n print \"ERROR! Please choose either rock, paper, scissors, lizard, or Spock!\\n\"\n \n else:\n # Prints the player's choice and also converts the\n # upper/lowercase letters into its lower/uppercase \n # letters\n print \"Player chooses \" + number_to_name(player_number)\n \n # Assigns a random choice for the computer's number\n comp_number = random.randrange(0,5)\n \n # Converts computer number into string\n comp_choice = number_to_name(comp_number)\t\n\n # Converts computer's number into string\n print \"Computer chooses \" + comp_choice\n\n # Takes the difference between the computer's and player's number\n difference_modulo = (player_number - comp_number) % 5\n \n # Decide who wins based on the difference\n if (difference_modulo == 0):\n print \"Player and computer tie!\\n\"\n elif (difference_modulo == 1 or difference_modulo == 2):\n print \"Player wins!\\n\"\n else:\n print \"Computer wins!\\n\"", "def play_game():\n clear_screen()\n user_choice = input(\"Please enter 'R' for Rock, 'P' for Paper, or 'S' for Scissors\\n>>> \").upper()\n if user_choice in list(options.keys()):\n print(\"You have selected {}.\".format(options[user_choice]))\n else:\n print(\"Please select a valid option\")\n exit()\n print(\"The computer is now selecting...\")\n sleep(1)\n computer_choice = random.choice(list(options.keys()))\n print(\"The computer has selected {}.\".format(options[computer_choice]))\n sleep(1)\n decide_winner(user_choice, computer_choice)", "def rps_function():\n randNumber = random.randint(1,3)\n companswer = \"Scissor\"\n if randNumber == 1:\n companswer = \"Rock\"\n elif randNumber == 2:\n companswer = \"Paper\"\n\n answer = q.select(\"Select Rock, Paper or Scissors\", choices=[\"Rock\", \"Paper\", \"Scissor\"]).ask()\n\n print(answer, companswer, sep=\" and \")\n\n if answer == companswer:\n print(Figlet(font='3x5', width=1000).renderText(\"It was a draw unfortunately\"))\n elif (answer == \"Rock\" and companswer == \"Paper\") or (answer == \"Paper\" and companswer == \"Scissor\") or (answer == \"Scissor\" and companswer == \"Rock\"):\n print(Figlet(font='3x5', width=100).renderText(\"Unfortunately you lost\"))\n else:\n print(Figlet(font='3x5').renderText(\"You won yay\"))", "async def rps(self, ctx, your_choice : RPSParser):\r\n author = ctx.message.author\r\n player_choice = your_choice.choice\r\n red_choice = choice((RPS.rock, RPS.paper, RPS.scissors))\r\n cond = {\r\n (RPS.rock, RPS.paper) : False,\r\n (RPS.rock, RPS.scissors) : True,\r\n (RPS.paper, RPS.rock) : True,\r\n (RPS.paper, RPS.scissors) : False,\r\n (RPS.scissors, RPS.rock) : False,\r\n (RPS.scissors, RPS.paper) : True\r\n }\r\n\r\n if red_choice == player_choice:\r\n outcome = None # Tie\r\n else:\r\n outcome = cond[(player_choice, red_choice)]\r\n\r\n if outcome is True:\r\n await self.bot.say(\"{} You win {}!\"\r\n \"\".format(red_choice.value, author.mention))\r\n elif outcome is False:\r\n await self.bot.say(\"{} You lose {}!\"\r\n \"\".format(red_choice.value, author.mention))\r\n else:\r\n await self.bot.say(\"{} We're square {}!\"\r\n \"\".format(red_choice.value, author.mention))", "def rock_p_scissor(user_input, comp_choice):\r\n if user_input.lower() == comp_choice:\r\n compare = results[2]\r\n score_update(scoreboard, compare)\r\n banner_text(\"*\")\r\n banner_text(\"Tie! We both chose {0}\".format(user_input))\r\n banner_text(\"*\")\r\n print(scoreboard)\r\n banner_text(\"*\")\r\n elif user_input.lower() == 'rock' and comp_choice == 'paper':\r\n compare = results[1]\r\n score_update(scoreboard, compare)\r\n print(\"You lost! you chose {0} and I chose {1}\".format(user_input, comp_choice))\r\n banner_text(\"*\")\r\n print(scoreboard)\r\n banner_text(\"*\")\r\n elif user_input.lower() == 'rock' and comp_choice == 'scissor':\r\n compare = results[0]\r\n score_update(scoreboard, compare)\r\n print(\"You won! you chose {0} and I chose {1}\".format(user_input, comp_choice))\r\n banner_text(\"*\")\r\n print(scoreboard)\r\n banner_text(\"*\")\r\n elif user_input.lower() == 'paper' and comp_choice == 'scissor':\r\n compare = results[1]\r\n score_update(scoreboard, compare)\r\n print(\"You lost! you chose {0} and I chose {1}\".format(user_input, comp_choice))\r\n banner_text(\"*\")\r\n print(scoreboard)\r\n banner_text(\"*\")\r\n elif user_input.lower() == 'paper' and comp_choice == 'rock':\r\n compare = results[0]\r\n score_update(scoreboard, compare)\r\n print(\"You won! you chose {0} and I chose {1}\".format(user_input, comp_choice))\r\n banner_text(\"*\")\r\n print(scoreboard)\r\n banner_text(\"*\")\r\n elif user_input.lower() == 'scissor' or user_input.lower() == 'scissors' and comp_choice == 'rock':\r\n compare = results[1]\r\n score_update(scoreboard, compare)\r\n print(\"You lost! you chose {0} and I chose {1}\".format(user_input, comp_choice))\r\n banner_text(\"*\")\r\n print(scoreboard)\r\n banner_text(\"*\")\r\n elif user_input.lower() == 'scissor' or user_input.lower() == 'scissors' and comp_choice == 'paper':\r\n compare = results[0]\r\n score_update(scoreboard, compare)\r\n print(\"You won! you chose {0} and I chose {1}\".format(user_input, comp_choice))\r\n banner_text(\"*\")\r\n print(scoreboard)\r\n banner_text(\"*\")\r\n else:\r\n print(\"Your input was not valid. Please try again\")", "def rpsls(player_choice):\r\n \r\n print \"Player chooses \"+ player_choice\r\n player_number = name_to_number(player_choice)\r\n comp_number = random.randrange(0,5)\r\n comp_choice = number_to_name(comp_number)\r\n\r\n print \"Computer chooses \"+ comp_choice\r\n\r\n result = (comp_number - player_number) % 5\r\n\r\n if result==1 or result==2:\r\n print \"Computer wins!\"\r\n elif result==3 or result==4:\r\n print \"Player wins!\"\r\n else:\r\n print \"Player and computer tie!\"\r\n print ''", "def computer_play(self):\r\n # Depending on game flow, helped randomize when smack showed up\r\n # This is more of an Easter Egg than anything.\r\n if (self.tr.disks_on_board != 0 and (self.tr.disks_on_board % 6 == 0 or\r\n self.tr.disks_on_board % 6 == 3) and self.tr.turn_tracker):\r\n self.ai.talk_smack()\r\n # Computer identifies possible moves to analyze\r\n for item in self.tr.computer_moves:\r\n self.ai.coordinate_extractor(item)\r\n # Computer chooses move\r\n choice = self.ai.choose_move()\r\n # Makes play\r\n choice = self.tr.bd.disks[choice[0]][choice[1]]\r\n self.ai.moves_reset()\r\n choice.color, choice.display_on = 1, True\r\n choice.chain()\r\n # Checks for player move, if none, checks for another move\r\n self.tr.board_scan_reset()\r\n if not self.tr.board_scan():\r\n return\r\n else:\r\n self.tr.board_scan_reset()\r\n if self.tr.board_scan():\r\n self.delay = frameCount\r\n return\r\n # If none, ends game\r\n else:\r\n if not self.tr.game_over:\r\n self.tr.board_scan_reset()\r\n self.tr.scanner()\r\n self.tr.game_over = True\r\n self.tr.run_game_is_over = frameCount", "def rpsls(player_choice):\n print \n\n print \"Player chooses \" + player_choice\n\n player_number = name_to_number(player_choice)\n\n comp_number = random.randrange(0, 5)\n\n comp_choice = number_to_name(comp_number)\n\n print \"Computer chooses \" + comp_choice\n\n\n if 0 < (player_number - comp_number) % 5 <= 2:\n print \"Player wins!\"\n \n elif (player_number - comp_number) % 5 > 2:\n print \"Computer wins!\"\n \n elif (player_number - comp_number) % 5 == 0:\n print \"Player and computer tie!\"\n \n else:\n print \"Error!!!\"\n return 1\n\n return 0", "def main(self, win):\n\n # The rock, paper, scissor buttons\n rockButton = Button(white, 50, 400, 100, 50, 'ROCK')\n paperButton = Button(white, 200, 400, 100, 50, 'PAPER')\n scissorButton = Button(white, 350, 400, 100, 50, 'SCISSOR')\n\n # Player and computer scores\n player = 0\n computer = 0\n\n run = True\n while run:\n userChoice = 'none'\n compChoice = 'none'\n beginGame = False\n for event in pygame.event.get():\n pos = pygame.mouse.get_pos()\n if event.type == pygame.QUIT:\n run = False\n\n # Control mouse button events\n if event.type == pygame.MOUSEBUTTONDOWN:\n if rockButton.isOver(pos):\n userChoice = 'rock'\n compChoice = self.computer_generate()\n beginGame = True\n elif paperButton.isOver(pos):\n userChoice = 'paper'\n compChoice = self.computer_generate()\n beginGame = True\n elif scissorButton.isOver(pos):\n compChoice = self.computer_generate()\n userChoice = 'scissor'\n beginGame = True\n\n self.display_score(win, player, computer)\n self.display_playground(win, rockButton, paperButton, scissorButton)\n\n if beginGame:\n self.game_initiate(win)\n\n self.display_player(userChoice, win)\n self.display_computer(compChoice, win)\n\n if beginGame:\n scores = self.decide_winner(userChoice, compChoice)\n pygame.display.update()\n pygame.time.delay(1000)\n player += scores[0]\n computer += scores[1]\n\n pygame.display.update()\n pygame.time.delay(40)", "def play_match(self):\r\n time.sleep(1)\r\n print(\"Let's play Rock, Paper or Scissors!\\n\")\r\n while True:\r\n self.plyer_op()\r\n while self.you.score != 3 or self.opposite.score != 3:\r\n\r\n self.play_round()\r\n print('your score :' + str(self.you.score) + ' vs ' +\r\n \"the opposite player :\" +\r\n str(self.opposite.score) + '\\n')\r\n if self.you.score == 3:\r\n print(\"you win!! the match\\n\")\r\n break\r\n elif self.opposite.score == 3:\r\n print(\"opposite player win!! the match\\n\")\r\n break\r\n self.you.score = 0\r\n self.opposite.score = 0", "async def evaluate(self):\n if self.players[1].id == bot.user.id:\n self.p2_move = random.choice((\"Rock\", \"Paper\", \"Scissors\"))\n\n if None in self.moves:\n return\n\n if len(self.moves) == 1:\n tie_embed = discord.Embed(title=\"It's a Draw\")\n await self.channel.send(embed=tie_embed)\n return await self.end()\n\n if self.moves == {\"Rock\", \"Paper\"}:\n winner = \"Paper\"\n elif self.moves == {\"Scissors\", \"Paper\"}:\n winner = \"Scissors\"\n elif self.moves == {\"Rock\", \"Scissors\"}:\n winner = \"Rock\"\n\n # P1 Wins\n if self.p1_move == winner:\n embed = discord.Embed(\n title=f\"{self.players[0].name}'s **{winner}** beats {self.players[1].name}'s **{self.p2_move}**\")\n await self.channel.send(embed=embed)\n await self.end(winner=self.players[0])\n\n # P2 Wins\n elif self.p2_move == winner:\n embed = discord.Embed(\n title=f\"{self.players[1].name}'s **{winner}** beats {self.players[0].name}'s **{self.p1_move}**\")\n await self.channel.send(embed=embed)\n await self.end(winner=self.players[1])", "def rpsls(player_guess):\n\n\n # convert name to player_number using name_to_number\n \n player_number = name_to_number(player_guess)\n \n # compute random guess for comp_number using random.randrange()\n \n comp_number = random.randrange(0, 5)\n \n # compute difference of player_number and comp_number modulo five\n \n difference = (player_number - comp_number) % 5\n \n # use if/elif/else to determine winner (but don't forget that players can tie !)\n \n if difference == 1 or difference == 2:\n result = \"Player wins\"\n elif difference == 3 or difference == 4:\n result = \"Computer wins\"\n else:\n result = \"Player and computer tie!\"\n \n # convert comp_number to name using number_to_name\n \n comp_guess = number_to_name(comp_number)\n \n # print results\n \n print \"Player chooses\", player_guess\n print \"Computer chooses\", comp_guess\n print result\n print", "def rpsls(name):\n global player_choice, comp_choice, winner, image_num, win_method\n \n # convert name to player_number\n player_number = item_dict.get(name)\n \n # determine if input is valid\n if player_number == None:\n player_choice = 'Invalid input. No ' + name + \" in RPSLS.\"\n comp_choice = ''\n win_method = ''\n winner = ''\n image_num = 5\n return\n\n # compute random guess for comp_number using random.randrange()\n comp_number = random.randrange(0,5)\n\n # compute difference of player_number and comp_number modulo five\n # (could use verb_grid to compute winner, but d = p1 - p2 % 5 is probably faster & easier)\n difference = (player_number - comp_number) % 5\n \n # convert comp_number to name using number_to_name\n comp_name = number_to_name(comp_number)\n\n # use if/elif/else to determine winner\n if (difference == 1) or (difference == 2):\n winner = \"Player wins!\"\n image_num = player_number\n win_method = ' '.join([name, verb_grid[player_number][comp_number], comp_name + '...'])\n elif (difference == 3) or (difference == 4):\n winner = \"Computer wins!\"\n image_num = comp_number\n win_method = ' '.join([comp_name, verb_grid[comp_number][player_number], name + '...'])\n elif difference == 0:\n winner = \"Player and computer tie!\"\n image_num = 6\n win_method = ''\n else:\n winner = \"Error computing winner.\"\n \n # update player and computer choice for display\n player_choice = \"Player chooses \" + name\n comp_choice = \"Computer chooses \" + comp_name", "def play_game():\n try:\n # get selections for player and computer\n user_pick = utils.get_user_selection(user_selection.get())\n computer_pick = utils.get_computer_selection()\n\n # determine winner\n winner = utils.determine_winner(user_pick, computer_pick, \"pvc\")\n\n # display result\n player_selection.set(f\"player selection: {user_pick.name}\")\n computer_selection.set(f\"computer selection: {computer_pick.name}\")\n output.set(winner)\n except Exception:\n output.set(\"Invalid: choose any one -- 1, 2, 3\")\n player_selection.set(\"player selection: \")\n computer_selection.set(\"computer selection: \")", "async def playrps(self, context, *args):\n\n if len(self.rps_data) < 2 and len(args) == 1:\n if args[0].upper() in [\"R\", \"P\", \"S\", \"||R||\", \"||P||\", \"||S||\"]:\n await context.channel.delete_messages([context.message])\n self.rps_data.append((context.author, args[0]))\n else:\n if args[0].upper() == \"RESETSCORES\":\n self.reset_score(context.author)\n elif args[0].upper() == \"SCORES\":\n await context.channel.send(self.get_score(context.author))\n else:\n await context.channel.send('Wrong format')\n if len(self.rps_data) == 2:\n result = self.get_result()\n if result == \"Draw\":\n await context.channel.send(\"It's a Draw!\"\n \" for {} and {}\".format(self.rps_data[0][0].display_name,\n self.rps_data[1][0].display_name))\n elif result == \"First\":\n await context.channel.send(self.rps_data[0][0].display_name + \" won!\")\n else:\n await context.channel.send(self.rps_data[1][0].display_name + \" won!\")\n self.rps_data.clear()", "def test_play(self):\n\t\tRockAI, PaperAI = self.DummyAI('r'), self.DummyAI('p')\n\t\tresult = rps_main.play(RockAI, PaperAI, rounds=58, verbosity=0)\n\n\t\tself.assertTrue(len(result) == 2, \"Check that the result is a 2-tuple, \\\n\t\t\t\t\t\t\t\tor *some* kind of length 2 container, anyway.\")\n\t\tself.assertTrue(result[0] == 0, \"First AI should score 0 points.\")\n\t\tself.assertTrue(result[1] == 58, \"Second AI should score 58 points.\")", "def decide_winner(user_choice, computer_choice):\n if user_choice == \"P\" and computer_choice == \"R\":\n print(\"You win\")\n elif user_choice == \"R\" and computer_choice == \"S\":\n print(\"You win\")\n elif user_choice == \"S\" and computer_choice == \"P\":\n print(\"You win\")\n elif user_choice == computer_choice:\n print(\"You tied\")\n else:\n print(\"You lost\")", "def rocksPaperScissors(player1, player2, advanced = False ):\n \n valid_plays = None\n if advanced: valid_plays = [\"Rock\", \"Paper\", \"Scissors\", \"Spock\", \"Lizard\"]\n else: valid_plays = [\"Rock\", \"Paper\", \"Scissors\"]\n\n if player1 not in valid_plays or player2 not in valid_plays:\n print (\"One or both players did not provide a valid_hand\")\n return\n \n if player1 == player2: print (\"Tie\")\n else:\n d = list(map(lambda x: x[1], defeats[player1]))\n if player2 in d:\n verb = defeats[player1][d.index(player2)][0]\n print( player1 + \" \" + verb + \" \" + player2)\n print(\"Player 1 wins\")\n else:\n d = list(map(lambda x: x[1], defeats[player2]))\n verb = defeats[player2][d.index(player1)][0]\n print (player2 + \" \" + verb + \" \" + player1 )\n print (\"Player 2 wins\")", "def pro() -> None:\n global player\n global points\n global comp_points\n while (points < 3) and (comp_points < 3):\n choice = str(input(\"rock...paper...scissors...SHOOT!!!: \"))\n computer = game[randint(0, 2)]\n print(f\"My turn: {computer}\")\n if choice == rock and computer == paper:\n points = points\n comp_points = comp_points + 1\n if choice == rock and computer == scissors:\n points = points + 1\n comp_points = comp_points\n if choice == paper and computer == rock:\n points = points + 1\n comp_points = comp_points\n if choice == paper and computer == scissors:\n points = points\n comp_points = comp_points + 1\n if choice == scissors and computer == rock:\n points = points\n comp_points = comp_points + 1\n if choice == scissors and computer == paper:\n points = points + 1\n comp_points = comp_points\n if choice == computer:\n points = points\n comp_points = comp_points\n print(f\"{player}'s score: {points}\")\n print(f\"My score: {comp_points}\")\n if points == 3:\n print(f\"Good job {player}! YOU WIN {STAR_EYES}{STAR_EYES}{STAR_EYES}\")\n if comp_points == 3:\n print(f\"Sorry, {player}. YOU LOSE {SAD_FACE}{SAD_FACE}{SAD_FACE}\")", "def opponent_hand(self):\r\n\r\n # 1 = rock, 2 = paper, 3 = scissors\r\n random_hand = random.randint(1, 3)\r\n\r\n # Slows down the pace of the game with pauses\r\n self.loading(0.5)\r\n\r\n if random_hand == 1:\r\n\r\n opp_hand_value = (\" \" * 72) + \"OPPONENT: ROCK\"\r\n self.opp_rock = True\r\n print(\"Opponent chose Rock.\")\r\n\r\n elif random_hand == 2:\r\n\r\n opp_hand_value = (\" \" * 72) + \"OPPONENT: PAPER\"\r\n self.opp_paper = True\r\n print(\"Opponent chose Paper.\")\r\n\r\n elif random_hand == 3:\r\n\r\n opp_hand_value = (\" \" * 72) + \"OPPONENT: SCISSORS\"\r\n self.opp_scissors = True\r\n print(\"Opponent chose Scissors.\")\r\n\r\n # Clear the opponent hand entry box\r\n self.opp_hand_entry.delete(0, \"end\")\r\n\r\n # Insert the value of the randomized hand of the opponent\r\n self.opp_hand_entry.insert(0, opp_hand_value)", "def rock():\n typer.echo(\"🤖🤘\")", "def RPSf():\n rps_dict = {'r': 0,\n 'p': 1,\n 's': 2}\n rps_list = ['Rock', 'Paper', 'Scissors']\n count = [0, 0, 0] # index 0 is for our score, 1 for bot, 2 for draw.\n while True:\n computer = randint(0, 2)\n print(f\"\\nYour score: {count[0]}. Bot score: {count[1]} . Draw: {count[2]}\") # Prints the score\n user = input(\"Rock, Paper, Scissors, Go!: \").lower()\n if user not in 'rps' and user not in rps_list:\n if QuBa(user):\n return\n else:\n print(\"That is not a valid choice. Please try again: \")\n continue\n print(f\"The bot chooses {rps_list[computer]}!\")\n if (rps_dict[user] + 1) == computer or (computer == 0 and rps_dict[user] == 2):\n print('You lose!')\n count[1] += 1\n elif rps_dict[user] == computer:\n print('Draw')\n count[2] += 1\n else:\n print('You win!')\n count[0] += 1", "def play_poker(self) -> None:\r\n self.deal_opening_cards()\r\n for i in range(PokerRules.NUM_OF_ROUNDS):\r\n if self.active_players == 1:\r\n break\r\n self.play_round()\r\n PokerRules.winner(self.card_stack, self.money_stack, self.players)", "def test_if_paper_beats_rock():\n\tresult = game.evaluate_game('paper', 'rock')\n\tassert result == 'User'", "def rps():\n # do stuff to initialize\n moves_list = []\n result = []\n \n play = True\n while play:\n \n human = int(input(\"Enter Your Move (0 for rocks, 1 for paper, 2 for scissors): \"))\n while human not in [0,1,2]:\n human = int(input(\"Enter a valid input (0 for rocks, 1 for paper, 2 for scissors): \"))\n \n \n #should cpu_move go here?\n comp_int = cpu_move(moves_list)\n #comp_int = randint(0,3)\n\n moves_list.append([human,comp_int])\n\n\n if human == 0: #human chooses rock\n if comp_int == 0:\n #tie\n result.append(\"T\")\n print(str([human,comp_int]) + \"Both choose rock. Tie!\")\n elif comp_int == 1:\n #comp wins paper v. rock\n result.append(0)\n print(str([human,comp_int]) + \"CPU wins paper over rock!\")\n else:\n #human wins rock v. scissor\n result.append(1)\n print(str([human,comp_int]) + \"you win rock over scissor!\")\n \n elif human == 1: #human chooses paper\n if comp_int == 0:\n #human wins paper v. rock\n result.append(1)\n print(str([human,comp_int]) + \"you win paper over rock!\")\n elif comp_int == 1:\n #tie\n result.append(\"T\")\n print(str([human,comp_int]) + \"Both choose paper. Tie!\")\n else:\n #comp wins scissor v. paper\n result.append(0)\n print(str([human,comp_int]) + \"CPU wins scissor over paper!\")\n \n else: #human chooses scissor\n if comp_int == 0:\n #comp wins rock v. scissor\n result.append(0)\n print(str([human,comp_int]) + \"CPU wins rock over scissor!\")\n elif comp_int == 1:\n #human wins scissor v. paper\n result.append(1)\n print(str([human,comp_int]) + \"you win scissor over paper!\")\n else:\n #tie\n result.append(\"T\")\n print(str([human,comp_int]) + \"Both choose scissor. Tie!\")\n \n\n\n keep_play = input(\"Want to play again? (y/n): \")\n if keep_play == \"y\":\n play = True\n if keep_play == \"n\":\n play = False\n \n # summarize the results\n sumout = \"\"\n sumout += \"player cpu \\n\"\n sumout += \"----------- \\n\"\n for k in range(len(moves_list)):\n sumout += str(moves_list[k]) + \" \" + str(result[k]) + \"\\n\"\n pct = (result.count(1)/(result.count(1) + result.count(0)))*100\n \n if pct > 50:\n sumout += \"winner: player! \\n\"\n elif pct == 50:\n sumout += \"Tie! \\n\"\n else:\n sumout += \"winner: CPU! \\n\"\n\n sumout += \"win pct: {:.2f} \\n\".format(pct)\n\n print(sumout)", "def play_against(self, p2, print_result=False):\n\n if self == p2:\n print('Invalid match. A player can not compete against themselves.')\n return None\n c1 = c2 = None\n\n while c1 == c2:\n # This loop takes care of ties.\n c1 = self.next_choice(opponent=p2)\n c2 = p2.next_choice(opponent=self)\n Player.record_play(self, c1, p2, c2, winner=None)\n\n if (c1 == 'rock' and c2 == 'paper') or \\\n (c1 == 'paper' and c2 == 'scissors') or \\\n (c1 == 'scissors' and c2 == 'rock'):\n winner = p2\n win_choice = c2\n loser = self\n loss_choice = c1\n else:\n winner = self\n win_choice = c1\n loser = p2\n loss_choice = c2\n\n # Update stats:\n Player.record_play(self, c1, p2, c2, winner)\n # winner.record_win(weapon=win_choice, opponent=loser)\n # loser.record_loss(weapon=loss_choice, opponent=winner)\n if print_result:\n print('{:20s} {} {} {}'.format(winner.name + ' beat ' + loser.name + '.',\n win_choice,\n Player.win_verb[win_choice],\n loss_choice))\n return winner", "def play(pl1, ch1, pl2, ch2):\n \n if ch1 == ch2:\n print(\"It's a tie.\")\n return None\n if ch1 == 'Rock':\n if ch2 == 'Scissors':\n print(\"Congratulations,\", pl1, \". You WON! Rock beats Scissors!\")\n return pl1\n else:\n print(\"Congratulations,\", pl2, \". You WON! Paper beats Rock!\")\n return pl2\n elif ch1 == 'Scissors':\n if ch2 == 'Rock':\n print(\"Congratulations,\", pl2, \". You WON! Rock beats Scissors!\")\n return pl2\n else:\n print(\"Congratulations,\", pl1, \". You WON! Scissors beat Paper!\")\n return pl1 \n else:\n if ch2 == 'Rock':\n print(\"Congratulations,\", pl1, \". You WON! Paper beats Rock!\")\n return pl1\n else:\n print(\"Congratulations,\", pl2, \". You WON! Scissors beat Paper!\")\n return pl2" ]
[ "0.7982476", "0.75652486", "0.7221312", "0.7203815", "0.7198876", "0.70861614", "0.6967138", "0.6955682", "0.6877361", "0.67542064", "0.6753567", "0.672975", "0.66974074", "0.66720617", "0.6622933", "0.66221225", "0.6594134", "0.65698695", "0.65573245", "0.6549117", "0.6526606", "0.651671", "0.6407679", "0.6398142", "0.63521224", "0.63510114", "0.63238424", "0.6277903", "0.6265595", "0.6262363" ]
0.7786586
1
Function to plot the data saved into the files in real time
def real_time_plot(files): global len_data, first_iter, colors for i,F in enumerate(files): # Load data data = pylab.loadtxt(F, delimiter=',', skiprows=1, usecols=(5,6,7)) # Check if new data if (len_data!= len(data[:,0])): # Plot label = ntpath.basename(F) label = label[0:-4] ax.plot(data[:,0], data[:,1], data[:,2], colors[i], label=label) pyplot.draw() # Update globals len_data = len(data[:,0]) if (first_iter == True): ax.legend() first_iter = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_data(self):", "def updateplot(self):\n plotfiles = []\n try:\n self.plotter.reset()\n self.plotter.set_xrange(self.xrangemin.value(), self.xrangemax.value())\n self.plotter.set_yrange(self.yrangemin.value(), self.yrangemax.value())\n self.plotter.set_bgirange(self.bgintmin.value(), self.bgintmax.value())\n self.plotter.set_pkrange(self.halphamin.value(), self.halphamax.value())\n for n,pf in enumerate(self.selecteddata):\n tf = os.path.join(self.tempdir, \"tf%d\" % n)\n self.dfparser.writefile(tf, pf)\n plotfiles.append(tf)\n self.plotter.set_plot(plotfiles)\n except datafile.Datafile_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()\n except plotter.Plotter_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()", "def plot_history(self, filename):\r\n plt.figure(figsize=(12, 9))\r\n plt.plot(self.Objective_value)\r\n plt.xlabel('Iteration')\r\n plt.ylabel('Value')\r\n plt.title('Objective Function Values')\r\n # plt.savefig(filename)\r\n plt.show()\r\n return", "def show_data_files(self):\n for idx in self.plot_data:\n self.plot_data[idx].show()", "def plot_and_save_2d(file_name, path_name, raw_data_file, show=False):\n print '-'*23+'PLOT (2d)'+'-'*24\n \n print 'Loading data...',\n data = load_file(path_name+file_name)\n t = data['t']\n \n pic_path = path_name+'pics/'\n if not os.path.exists(pic_path):\n os.makedirs(pic_path)\n print 'done'\n print 'Creating and saving plots...', \n\n # Moment.\n plt.figure(1)\n plt.plot(t, data['dyn']['M'], t, data['static']['M'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('M')\n plt.title('Moment')\n plt.grid()\n plt.savefig('%sM.png' %pic_path)\n\n # Axial force.\n plt.figure(2)\n plt.plot(t, data['dyn']['FY'], t, data['static']['FY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fa')\n plt.title('Fa')\n plt.grid()\n plt.savefig('%sFa.png' %pic_path)\n\n # Transverse force.\n plt.figure(3)\n plt.plot(t, data['dyn']['FZ'], t, data['static']['FZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Ft')\n plt.title('Ft')\n plt.grid()\n plt.savefig('%sFt.png' %pic_path)\n\n # Resultant force.\n plt.figure(4)\n plt.plot(t, np.sqrt(data['dyn']['FY']**2+data['dyn']['FZ']**2),\n t, np.sqrt(data['static']['FY']**2+data['static']['FZ']**2))\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fr')\n plt.title('Fr')\n plt.grid()\n plt.savefig('%sFr.png' %pic_path)\n print 'done'\n\n if show:\n plt.show()", "def plot_file(filename, params):\n\tarr = None\n\twith open(filename) as filep:\n\t\tarr = json.load(filep)\n\tplot_data(arr, params)", "def plot_1():\n p_files = []\n filename = \"energy_data_2D_80\"\n for file in sorted(os.listdir(folder)):\n if file.startswith(filename):\n p_files.append(os.path.join(folder,file))\n T_list = []\n fig, ax = plt.subplots()\n for p_file in p_files[3::3]:\n T = (os.path.splitext(os.path.basename(p_file))[0]).split('_',4)[4]\n #print(T)\n E = []\n t = []\n if (T not in T_list):\n T_list.append(T)\n with open(p_file) as csvfile:\n lines = csv.reader(csvfile, delimiter=' ')\n sweep = 0\n for row in lines:\n E.append(float(row[0]))\n t.append(sweep)\n sweep += 1\n ax.plot(t[0:200], E[0:200],label=\"T = \"+format(T[0:3]))\n ax.set_title(\"Energy per bond vs Time\")\n ax.set_ylabel(\"e / J\")\n ax.set_xlabel(\"t / sweeps\")\n ax.legend()\n\n fig.savefig(folder2+\"energy_vs_time.png\")\n fig.savefig(texfolder+\"energy_vs_time.pdf\")", "def __plot(name, x, y):\n import matplotlib.pyplot as plt\n\n plt.plot(x, y)\n plt.xlabel('elements')\n plt.ylabel('time (seconds)')\n plt.savefig(\"{}\".format(name))", "def loadAndPlot1DMassData(dataFile='movingPointMassData/testPointMassData000.pkl'):\n # Load the data back\n inputDataFile = open(dataFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n # Iterate over the different saved trajectores and plot out the results.\n for i in range(len(dataOut[0])):\n plt.figure(i)\n plt.plot(dataOut[0][i][1],dataOut[0][i][0])\n plt.show()", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial\"][1]) + \" at \" + str(self.values[\"Trial\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream.write(dict(x=self.values[\"Trial\"][0], y=self.values[\"Trial\"][1]))", "def plot_data(self, filepath=None, time_min=None, time_max=None, title=None,\n electrode=None):\n\n # normalizes the samples x electrodes array containing the EEG data and\n # adds 1 to each row so that the y-axis value corresponds to electrode\n # location in the MNI coordinate (x,y,z) by electrode df containing\n # electrode locations\n\n if self.get_data().shape[0] == 1:\n nii = self.to_nii()\n nii.plot_glass_brain(pdfpath=filepath)\n elif self.get_data().empty:\n fig = plt.figure()\n ax = fig.add_subplot(111, aspect='equal')\n ax.set_facecolor('w')\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"electrode\")\n if filepath:\n plt.savefig(filename=filepath)\n else:\n plt.show()\n else:\n Y = _normalize_Y(self.data) # self.get_data()) this allows us to plot all the electrodes even the recon ones\n\n if electrode is not None:\n Y = Y.loc[:, electrode]\n if len(Y.shape) > 1:\n for i, column in enumerate(Y):\n Y[column] = Y[column] - int(column) + i\n\n # divide index by sample rate so that index corresponds to time\n if self.sample_rate:\n Y.index = np.divide(Y.index,np.mean(self.sample_rate))\n\n # if a time window is designated index data in that window\n if all([time_min, time_max]):\n mask = (Y.index >= time_min) & (Y.index <= time_max)\n Y = Y[mask]\n\n # if a time window is not designated, default to the first 500 seconds\n else:\n time_min = 0\n time_max = 10\n mask = (Y.index >= time_min) & (Y.index <= time_max)\n Y= Y[mask]\n \n if electrode:\n if len(Y.shape) > 1:\n ax = Y.plot(title=title, lw=.6)\n else:\n ax = Y.plot(title=title, lw=.6, color='k')\n else:\n ax = Y.plot(legend=False, title=title, color='k', lw=.6)\n ax.set_facecolor('w')\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"electrode\")\n\n if filepath:\n plt.savefig(filename=filepath)\n else:\n plt.show()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def Plot(self):\n\n ### Create the path names ###\n folder_string = self.params.folder+\"/plots/\"\n u_string = self.params.folder+\"/plots/u.pdf\"\n p_string = self.params.folder+\"/plots/p.pdf\"\n\n ### Check if folder exists ###\n if not os.path.exists(folder_string): os.makedirs(folder_string)\n\n ### Plot the x component of velocity ###\n plot(self.u_next[0],title=\"Velocity in the x Direction\")\n plt.savefig(u_string)\n plt.figure()\n\n ### Plot the pressure ###\n plot(self.p_next,title=\"Pressure\")\n plt.savefig(p_string)\n plt.show()", "def plot_and_save_3d(file_name, path_name, raw_data_file, show=False):\n print '-'*23+'PLOT (3d)'+'-'*24\n \n print 'Loading force data...', \n data = load_file(path_name+file_name)\n t = data['t']\n dyn = 1.0\n \n pic_path = path_name+'pics/'\n if not os.path.exists(pic_path):\n os.makedirs(pic_path)\n print 'done'\n print 'Creating and saving plots...', \n\n # x-moment\n plt.figure(1)\n plt.plot(t, dyn*data['dyn']['MX'], t, data['static']['MX'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Mx')\n plt.title('Moment (x)')\n plt.grid()\n plt.savefig('%sMx.png' %pic_path)\n\n # y-moment\n plt.figure(2)\n plt.plot(t, dyn*data['dyn']['MY'], t, data['static']['MY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('M')\n plt.title('Moment (y)')\n plt.grid()\n plt.savefig('%sMy.png' %pic_path)\n\n # z-moment\n plt.figure(3)\n plt.plot(t, dyn*data['dyn']['MZ'], t, data['static']['MZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Mz')\n plt.title('Moment (z)')\n plt.grid()\n plt.savefig('%sMz.png' %pic_path)\n \n # x-force\n plt.figure(4)\n plt.plot(t, dyn*data['dyn']['FX'], t, data['static']['FX'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fx')\n plt.title('Fx')\n plt.grid()\n plt.savefig('%sFx.png' %pic_path)\n\n # y-force\n plt.figure(5)\n plt.plot(t, dyn*data['dyn']['FY'], t, data['static']['FY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fy')\n plt.title('Fy')\n plt.grid()\n plt.savefig('%sFy.png' %pic_path)\n\n # z-force\n plt.figure(6)\n plt.plot(t, dyn*data['dyn']['FZ'], t, data['static']['FZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fz')\n plt.title('Fz')\n plt.grid()\n plt.savefig('%sFz.png' %pic_path)\n print 'done'\n\n #nice_looking_plots(t, data['dyn'], data['static'])\n\n if show:\n plt.show()", "def worker_plot(fname):\n with Database() as base:\n _filter = base.get_filter(fname)\n plt.clf()\n plt.plot(_filter.trans_table[0], _filter.trans_table[1], color='k')\n plt.xlim(_filter.trans_table[0][0], _filter.trans_table[0][-1])\n plt.minorticks_on()\n plt.xlabel('Wavelength [nm]')\n plt.ylabel('Relative transmission')\n plt.title(\"{} filter\".format(fname))\n plt.tight_layout()\n plt.savefig(\"{}.pdf\".format(fname))", "def plotXY(xName,xDataRaw,yName, yDataRaw):\n scanFileHolder = getScanFileHolderXY(xName,xDataRaw,yName, yDataRaw) \n scanFileHolder.plot(xName, yName)\n return scanFileHolder", "def plot_files(n=15):\n lod = read_files(n)\n\n for i in range(len(lod)):\n plt.plot(range(len(lod[i])), lod[i])\n\n plt.show()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()", "def multi_plot(data, fname=None):\n for entry in data['data']:\n plt.plot(entry['x'], entry['y'], label=entry['label'])\n\n plt.title(data['title'])\n plt.xlabel(data['x_label'])\n plt.ylabel(data['y_label'])\n\n #plt.legend(loc='best')\n\n Plotter.show(data['title'], fname=fname)", "def plot(self):\n # Get data\n #print(self.file_name)\n fig, ax = plb.subplots(1,1,figsize=(18,20))\n for key,value in self.testTrend.items():\n x = np.arange(len(self.data_array))\n y = np.asarray(value)\n plb.plot(x,y, label=key)\n ax.scatter(x, y)\n for i in range(0, len(value)):\n ax.annotate(str(i), (x[i], y[i]))\n # Title\n plb.title(self.file_name)\n # Legend\n plb.legend(bbox_to_anchor=(.05, 1), loc='best', borderaxespad=0.)\n # x ticks\n plb.xticks(np.arange(min(x), max(x) + 1, 2.0))\n #plb.ylim(-250, 1)\n # Show image\n plb.show()", "def SavingAndPlottingAfterRecursive(self):\n\n\n #Starting points of events in sec.\n startpoints=self.AnalysisResults[self.sig]['StartPoints']\n\n #Ending points of events in sec.\n endpoints=self.AnalysisResults[self.sig]['EndPoints']\n\n #Total number of events\n numberofevents=self.AnalysisResults[self.sig]['NumberOfEvents']\n\n #Current drop in nA\n deli=self.AnalysisResults[self.sig]['DeltaI']\n\n #end[i] - start[i] in sec.\n dwell=self.AnalysisResults[self.sig]['DwellTime']\n\n #current drop / current at start \n frac=self.AnalysisResults[self.sig]['FractionalCurrentDrop']\n\n #start[i+1] - start[i] in sec.\n dt=self.AnalysisResults[self.sig]['Frequency']\n\n #start[i+1] - start[i] in sec.\n localBaseline=self.AnalysisResults[self.sig]['LocalBaseline']\n\n # If plotting takes too much time: \"Don_t_plot...\" the graph will be deleted\n if not self.ui.actionDon_t_Plot_if_slow.isChecked():\n #clear signal plot\n self.p1.clear()\n # Event detection plot, Signal Plot\n self.p1.plot(self.t, self.data[self.sig], pen='b')\n # Draw green circles indicating start of event\n self.p1.plot(self.t[startpoints], self.data[self.sig][startpoints], pen=None, symbol='o', symbolBrush='g', symbolSize=10)\n # Draw red circles indicating end of event\n self.p1.plot(self.t[endpoints], self.data[self.sig][endpoints], pen=None, symbol='o', symbolBrush='r', symbolSize=10)\n #self.p1.plot(self.t[startpoints-10], localBaseline, pen=None, symbol='x', symbolBrush='y', symbolSize=10)\n \n # choice only data for current file name\n try:\n self.p2.data = self.p2.data[np.where(np.array(self.sdf.fn) != self.matfilename)]\n except:\n IndexError\n #Data frame for event info storage\n self.sdf = self.sdf[self.sdf.fn != self.matfilename]\n \n #Panda series with file name of data loaded (without ending .dat or some other) \n #repeated numberofevents times\n fn = pd.Series([self.matfilename, ] * numberofevents)\n \n #Same color identification repeated numberofevents\n color = pd.Series([self.cb.color(), ] * numberofevents)\n \n self.sdf = self.sdf.append(pd.DataFrame({'fn': fn, 'color': color, 'deli': deli,\n 'frac': frac, 'dwell': dwell,\n 'dt': dt, 'startpoints': startpoints,\n 'endpoints': endpoints, 'baseline': localBaseline}), ignore_index=True)\n \n # Create Scatter plot with\n # x = log10(dwell)\n # y = current drop / current at start\n #self.p2.addPoints(x=np.log10(dwell), y=frac, symbol='o', brush=(self.cb.color()), pen=None, size=10)\n self.p2.addPoints(x=dwell, y=frac,\n symbol='o', brush=(self.cb.color()), pen=None, size=10)\n self.w1.addItem(self.p2)\n self.w1.setLogMode(x=False, y=False)\n self.p1.autoRange()\n self.w1.autoRange()\n self.ui.scatterplot.update() # Replot Scatter plot\n # Set y - axis range\n self.w1.setRange(yRange=[0, 1])\n \n # Pandas series of colors. \n colors = self.sdf.color\n # If we have data from different experiments and different analyte\n # we can change the color for them \n for i, x in enumerate(colors):\n\n # Preparation for distribution histogram of fraction. Different color\n # Corresponds to different experiments\n # For better undarstanding see Feng et. al Indentification of single nucliotides\n # in MoS2 nanopores - different nucliotides for different colors \n # frac = current drop / current at start \n fracy, fracx = np.histogram(self.sdf.frac[self.sdf.color == x],\n bins=np.linspace(0, 1, int(self.ui.fracbins.text())))\n # Create pyqtgraph hist of Fraction data\n hist = pg.PlotCurveItem(fracx, fracy , stepMode = True, \n fillLevel=0, brush = x, pen = 'k') \n #Plot Frac histogram \n self.w2.addItem(hist) \n \n\n # Preparation for distribution histogram of Current drop in nA.\n # Idea of color choice is the same as in histogram above.\n deliy, delix = np.histogram(self.sdf.deli[self.sdf.color == x], \n bins=np.linspace(float(self.ui.delirange0.text()) *1e-9, \n float(self.ui.delirange1.text()) * 1e-9, \n int(self.ui.delibins.text())))\n # Create pyqtgraph hist of Deli data\n #hist = pg.BarGraphItem(height=deliy, x0=delix[:-1], x1=delix[1:], brush=x)\n hist = pg.PlotCurveItem(delix, deliy , stepMode = True, \n fillLevel=0, brush = x, pen = 'k')\n self.w3.addItem(hist) #Deli histogram plot\n self.w3.setRange(xRange=[float(self.ui.delirange0.text()) * 10 ** -9,\n float(self.ui.delirange1.text()) * 10 ** -9])\n \n # Preparation for distribution histogram of length of events expressed as\n # end[i] - start[i] in sec..\n # Idea of color choice is the same as in histogram above.\n #linspace for bins\n print('dwell = ' + str(self.sdf.dwell))\n bins_dwell = np.linspace(float(self.ui.dwellrange0.text()) * 1e-6, \n float(self.ui.dwellrange1.text()) * 1e-6, \n int(self.ui.dwellbins.text()))\n\n dwelly, dwellx = np.histogram((self.sdf.dwell[self.sdf.color == x]),\n bins=bins_dwell,range=(bins_dwell.min(),\n bins_dwell.max()))\n hist = pg.PlotCurveItem(dwellx, dwelly , stepMode = True, \n fillLevel=0, brush = x, pen = 'k')\n self.w4.addItem(hist)\n\n \n # Preparation for distribution histogram of start[i+1] - start[i] in sec. \n # \"Frequency\" of events expressed as\n # Idea of color choice is the same as in histogram above.\n\n dty, dtx = np.histogram(self.sdf.dt[self.sdf.color == x],\n bins=np.linspace(float(self.ui.dtrange0.text()), \n float(self.ui.dtrange1.text()),\n int(self.ui.dtbins.text())))\n hist = pg.PlotCurveItem(dtx, dty , stepMode = True, \n fillLevel=0, brush = x, pen = 'k')\n self.w5.addItem(hist) #Dt histogram plot", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.canvas.draw()", "def plotData(BX,BY,xi,yi,expArr,t,savepath_dir):\r\n \r\n #Find the current channel data\r\n Jz=newCurrent(BX,BY,xi,yi,expArr,t)\r\n\r\n #Find the dipole vector components\r\n BxTime=np.real(BX*expArr[t])\r\n ByTime=np.real(BY*expArr[t])\r\n\r\n #Plot the current density contour and dipole vector grid\r\n #Create the figure\r\n p1=plt.figure(figsize=(9,8))\r\n \r\n #Plot the data\r\n p1=plt.contourf(xi,yi,Jz,levels=100,vmin=-0.1,vmax=0.1)\r\n qv1=plt.quiver(xi,yi,BxTime,ByTime,width=0.004,scale=3)\r\n \r\n #Add axes labels and title\r\n p1=plt.xlabel('X [cm]',fontsize=20)\r\n p1=plt.ylabel('Y [cm]',fontsize=20)\r\n # p1=plt.title('Alfven Wave Dipole; Frequency='+str(freq)+r'KHz; $\\nu_{ei}$='+str(col)+'KHz',fontsize=19,y=1.02)\r\n p1=plt.title('E Field; Frequency='+str(freq)+r'KHz; $\\nu_{ei}$='+str(col)+'KHz',fontsize=19,y=1.02)\r\n \r\n #Set axes parameters\r\n p1=plt.xticks(np.arange(-50,51,5))\r\n p1=plt.yticks(np.arange(-50,51,5))\r\n p1=plt.xlim(-xAxisLim,xAxisLim)\r\n p1=plt.ylim(-yAxisLim,yAxisLim)\r\n \r\n #Add colorbar\r\n cbar=plt.colorbar()\r\n cbar.set_label('Normalized Current Density',rotation=270,labelpad=15)\r\n cbar=plt.clim(-1,1)\r\n \r\n #Add vector label\r\n plt.quiverkey(qv1,-0.1,-0.1,0.2,label=r'$(B_x,B_y)$')\r\n \r\n #Miscellaneous\r\n p1=plt.tick_params(axis='both', which='major', labelsize=18)\r\n p1=plt.grid(True)\r\n p1=plt.gcf().subplots_adjust(left=0.15)\r\n\r\n #Save the plot\r\n savepath_frame=savepath_dir+'frame'+str(t+1)+'.png'\r\n p1=plt.savefig(savepath_frame,dpi=100,bbox_to_anchor='tight')\r\n p1=plt.close()\r\n\r\n #Let me know which frame we just saved\r\n print('Saved frame '+str(t+1)+' of '+str(len(expArr)))\r\n \r\n return", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial1\"][1]) + \" at \" + str(self.values[\"Trial1\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream1.write(dict(x=[], y=[]))\n\t\t\tself.stream2.write(dict(x=[], y=[]))\n\t\t\tself.stream3.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream1.write(dict(x=self.values[\"Trial1\"][0], y=self.values[\"Trial1\"][1]))#, trace=Bar)\n\t\t\tself.stream2.write(dict(x=self.values[\"Trial2\"][0], y=self.values[\"Trial2\"][1]))\n\t\t\tself.stream3.write(dict(x=self.values[\"Trial3\"][0], y=self.values[\"Trial3\"][1]))", "def plot(self, csvDataset = None):\n for item in self.data_array:\n item.plot()\n # If csvDataset is not None, plots also the file\n csvDataset.plot(sampleName=item.file_name)", "def plot_data(fname):\n if not os.path.isfile(fname):\n print('No data has been generated yet, aborting...')\n sys.exit(1)\n\n with open(fname, 'r') as fd:\n data = json.load(fd)\n\n x = np.arange(0, max(data, key=lambda e: e[0])[0], 1)\n\n const = .55e-8\n func = lambda x: const * x**3\n\n plt.plot(\n *zip(*data),\n label=r'ShRec3D data points',\n linestyle='None', marker='h'\n )\n plt.plot(x, func(x), label=r'$ %.0e \\cdot x^3$' % const)\n\n plt.title(r'Complexity ($\\in \\Theta\\left(x^3\\right)$) visualization of ShRec3D')\n plt.xlabel('loci number')\n plt.ylabel('execution time (seconds)')\n plt.legend(loc='best')\n\n plt.savefig('time_comparison.png', dpi=300, bbox_inches='tight')\n plt.show()" ]
[ "0.73381495", "0.7053424", "0.70268565", "0.69449216", "0.69063354", "0.68788403", "0.6830094", "0.68118745", "0.66952527", "0.66853476", "0.6675326", "0.6637472", "0.6637472", "0.6637472", "0.6637472", "0.6637472", "0.663149", "0.66256434", "0.6610606", "0.6609929", "0.66070247", "0.6597106", "0.65903115", "0.658308", "0.6560318", "0.65484166", "0.65452933", "0.65443283", "0.65409034", "0.6538096" ]
0.7893361
0
Bind initial data to a formset
def bind_formset(formset): if formset.is_bound: # do nothing if the formset is already bound return formset bindData={} # the formset.get_default_prefix() and form.add_prefix() methods add in the # dict keys that uniquely identify the various form fields with the individual # instance data # add formset management form data bindData[formset.get_default_prefix()+"-TOTAL_FORMS"]=str(formset.management_form['TOTAL_FORMS'].value()) bindData[formset.get_default_prefix()+"-INITIAL_FORMS"]=str(formset.management_form['INITIAL_FORMS'].value()) bindData[formset.get_default_prefix()+"-MIN_NUM_FORMS"]=str(formset.management_form['MIN_NUM_FORMS'].value()) bindData[formset.get_default_prefix()+"-MAX_NUM_FORMS"]=str(formset.management_form['MAX_NUM_FORMS'].value()) for form in formset: if form.instance: # field data, get these values from the instance for fieldName,fieldValue in form.fields.iteritems(): try: bindData[form.add_prefix(fieldName)]=getattr(form.instance, fieldName) except AttributeError: # this is an added field (i.e. DELETE), not derived from the # model, do nothing with it, since we are only binding instance # data to the form pass # hidden field data, get these from the field initial values set # when the form was created for field in form.hidden_fields(): bindData[form.add_prefix(field.name)]=field.field.initial # create a new bound formset by passing in the bindData dict, this looks # to the formset constructor like a request.POST dict newFormset=formset.__class__(bindData,instance=formset.instance, error_class=formset.error_class) return newFormset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initial_formset_data(self, request, step, formset):\n return None", "def test_view_initializes_formset_with_audit_initial_data(self):\n audit = AuditFactory(num_doctypes=3) # fixture\n\n formset = DocumentFormSet(audit_pk=audit.pk)\n\n expected_labels = {dt.name for dt in audit.required_doctypes.all()}\n forms_labels = {form.fields['file'].label for form in formset}\n self.assertSetEqual(expected_labels, forms_labels)\n\n expected_doctype_pks = {dt.pk for dt in audit.required_doctypes.all()}\n forms_pks = {form.initial['doctype'] for form in formset}\n self.assertSetEqual(expected_doctype_pks, forms_pks)", "def test_missing_initial_fieldsets(self):\n original_initial_fieldsets = self.form.fieldsets\n print(\"========================= TEST UNABLE TO DELETE THE PROPERTY FOR TESTING ========================\")\n print(original_initial_fieldsets)\n print(\"--------------------------------------\")\n delattr(self.form, 'fieldsets')\n response_fieldsets = self.form.make_fieldsets()\n print(response_fieldsets)\n\n setattr(self.form, 'fieldsets', original_initial_fieldsets)", "def get_formset(self, **kwargs):\n return self.formsets[kwargs.get('prefix')](\n data=self.get_form_kwargs().get('data'),\n **kwargs\n )", "def process_show_formset(self, request, step, formset):\n pass", "def get_context_data(self, **kwargs):\n if \"formset\" not in kwargs:\n kwargs[\"formset\"] = self.get_formset()\n return super().get_context_data(**kwargs)", "def initial_form_data(self, request, step, form):\n return None", "def get_formset_form(formset: forms.BaseFormSet) -> forms.BaseForm:\n return formset._construct_form(0, **formset.get_form_kwargs(0)) # type: ignore", "def get_form(self, request, obj=None, **kwargs):\n form = super().get_form(request, obj, **kwargs)\n if hasattr(self, 'work_ids'):\n form.base_fields['works'].initial = self.work_ids\n return form", "def populate_form(self, **kwargs):\n for name, value in kwargs.items():\n self.populate_field(name, value)", "def test_make_fieldsets_uses_prep_fields(self):\n original_called_prep_fields = self.form.called_prep_fields = False\n full_fieldsets = self.form.make_fieldsets()\n\n self.assertFalse(original_called_prep_fields)\n self.assertIsInstance(full_fieldsets, (list, tuple))\n self.assertIsNotNone(getattr(self.form, '_fieldsets', None))\n self.assertTrue(self.form.called_prep_fields)\n\n self.form.called_prep_fields = original_called_prep_fields", "def test_make_fieldsets_saves_results(self):\n original_initial_fieldsets = getattr(self.form, 'fieldsets', None)\n initial_fieldsets = deepcopy(original_initial_fieldsets)\n original_computed_fieldsets = getattr(self.form, '_fieldsets', None)\n original_summary = getattr(self.form, '_fs_summary', None)\n self.assertIsNotNone(original_initial_fieldsets)\n self.assertIsNone(original_computed_fieldsets)\n self.assertIsNone(original_summary)\n response_fieldsets = self.form.make_fieldsets()\n label, summary = response_fieldsets.pop()\n self.assertIsNotNone(self.form._fieldsets)\n self.assertIsNotNone(self.form._fs_summary)\n self.assertEqual('summary', label)\n self.assertEqual(summary, self.form._fs_summary)\n self.assertEqual(response_fieldsets, self.form._fieldsets)\n self.assertEqual(initial_fieldsets, self.form.fieldsets)\n\n self.form.fieldsets = original_initial_fieldsets\n self.form._fieldsets = original_computed_fieldsets\n self.form._fs_summary = original_summary\n if original_computed_fieldsets is None:\n del self.form._fieldsets\n if original_summary is None:\n del self.form._fs_summary", "def _construct_form(self, i, **kwargs):\n form = super(NestedFormSet, self)._construct_form(i, **kwargs)\n form.empty_permitted = False\n return form", "def save_formset( self, request, form, formset, change ):\n formset.save()", "def get_form(self, form_class):\n formset = super(ChallengeVerifyView, self).get_form(form_class)\n challenges = Challenge.objects.select_related(\n 'candidate__user__userprofile', 'challenge_type').filter(\n verifying_user=self.request.user, candidate__term=self.display_term)\n for i, challenge in enumerate(challenges):\n formset[i].instance = challenge\n formset[i].initial = {\n 'verified': challenges[i].verified,\n 'reason': challenges[i].reason}\n return formset", "def get_initial(self, **kwargs):\n\n # Get an entity\n entity = get_entity(\n self.request.session.get('token', False),\n self.kwargs['aiid'],\n self.kwargs['entity_name']\n )\n\n # Prepare data for the form\n # TODO: should be a better way to do it in the form itself?\n entity['entity_values'] = settings.TOKENFIELD_DELIMITER.join(\n entity['entity_values']\n )\n\n self.initial = entity\n\n return super(EntitiesUpdateView, self).get_initial(**kwargs)", "def prepare(self, form):\n \n return form", "def set_form(self, form):\n self.parameters = form", "def get_context_data(self, **kwargs):\n context= super(Reasignar,self).get_context_data(**kwargs)\n current_us = self.get_object().userstory_set.all()\n formset= self.UserStoryFormset(self.request.POST if self.request.method == 'POST' else None, initial=[{'userStory':us, 'flujo':us.actividad.flujo, 'desarrollador':us.desarrollador} for us in current_us])\n self.__filtrar_formset__(formset)\n context['current_action'] = 'Editar'\n context['formset'] = formset\n return context", "def get_form_class(self):\n\t\treturn formset_factory(super(FormsetMixin, self).get_form_class(), **self.get_formset_kwargs())", "def get_model_as_formset(self, client_id):\n # Basement\n formset = {\n 'form-TOTAL_FORMS': str(len(self.storage)),\n 'form-INITIAL_FORMS': u'0',\n }\n # Fill the formset\n for record_index, record in enumerate(self.storage):\n prefix = 'form-%i' % record_index\n row = {'%s-client' % prefix: client_id,}\n for model_index, model_row in enumerate(MODEL_MAP_RAW):\n name, delegate, title, action, static = model_row\n key = '%s-%s' % (prefix, name)\n value = record[model_index]\n value_t = type(value)\n\n if value is None: # send None as empty string\n value = str()\n elif value_t is datetime:\n value = dt2str(value)\n elif value_t is date:\n value = date2str(value)\n elif value_t in (int, float):\n value = str(value)\n elif value_t is dict:\n if 'id' in value:\n value = value['id']\n else:\n value = 'no id found'\n\n row.update( {key: value} )\n formset.update( row )\n return formset", "def get_formset(self, formset_class=None):\n if formset_class is None:\n formset_class = self.get_formset_class()\n return formset_class(**self.get_formset_kwargs())", "def __init__(self, *args, **kwargs):\n\n\t\tsuper(CustomStatusFormset, self).__init__(*args, **kwargs)\n\n\t\tfor form in self.forms:\n\t\t\tfor field in form.fields:\n\t\t\t\tform.fields[field].widget.attrs.update({'class': 'form-control'})", "def setUpFormData(self):\n self.formData = {'labGroup': '5', 'abbrev': 'etoh', 'name': 'ethanol', 'CAS_ID': '64-17-5', 'CSID': '682',\n 'chemicalClasses': [ChemicalClass.objects.get(label='Solv').pk]}", "def get_initial(self):\n initial = super(PresentarView, self).get_initial()\n self.preguntas = self.get_question_list(self.diagnostico)\n initial.extend([{'pregunta': pregunta} for pregunta in self.preguntas])\n return initial", "def __init__(self, *args, **kwargs):\n if 'instance' in kwargs:\n initial = kwargs.setdefault('initial', {})\n # The widget for a ModelMultipleChoiceField expects a list of primary key for the selected data.\n initial['members'] = [\n t.pk for t in kwargs['instance'].recipient_set.all()\n ]\n\n forms.ModelForm.__init__(self, *args, **kwargs)", "def render_formset(self, request, step, formset, context):\n\n\n return render_to_response(self.get_template(request, step, formset),\n context, RequestContext(request))", "def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context[\"event_list\"] = self.queryset\n context[\"formset\"] = self.form_class(queryset=self.queryset)\n return context", "def set_initial_values(self):\n\n pass", "def get_initial_data(self):\r\n data = {}\r\n for name, field in self.fields.items():\r\n if hasattr(field, 'widget') and 'ng-model' in field.widget.attrs:\r\n data[name] = self.initial and self.initial.get(name) or field.initial\r\n return data" ]
[ "0.75171286", "0.66315085", "0.6326573", "0.6272179", "0.62648076", "0.6243689", "0.6000025", "0.59879285", "0.59822977", "0.59440255", "0.5873928", "0.58384067", "0.58326924", "0.5798155", "0.57395196", "0.57047033", "0.570038", "0.56947863", "0.56623995", "0.5629219", "0.56098956", "0.5607753", "0.5584087", "0.5582158", "0.5544296", "0.5541061", "0.55264944", "0.55019444", "0.54773825", "0.5474231" ]
0.7631694
0
Take an email and put it in the inbox of the client it is addressed to.
def send(self, email): client = self.clients[email.addressee] client.receive(email)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def receive(self, email):\n self.inbox += email", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def send_email(email: str, name: str, message, db: Session):\n msg = MIMEText(message)\n msg[\"Subject\"] = name\n msg[\"From\"] = \"[email protected]\"\n msg[\"To\"] = email\n with smtplib.SMTP(host=\"localhost\", port=8025) as s:\n try:\n s.sendmail(msg[\"From\"], [email], msg.as_string())\n logger.info(\"Recipient reached at {}\".format(email))\n except smtplib.SMTPRecipientsRefused:\n logger.error(\"Recipient refused at {}\".format(email))\n raise\n mark_person_emailed(db, email)", "def sendEmail(body, subject, email=\"\"):\n dest = [\"[email protected]\", \"[email protected]\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"[email protected]\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "def compose(self, msg, recipient):\n email = Email(msg, self, recipient)\n self.mailman.send(email)", "def send_email(to, subject, body, attachment=None):\n outlook = win32.Dispatch('outlook.application')\n new_mail = outlook.CreateItem(0)\n new_mail.Subject = subject\n new_mail.HTMLBody = body\n new_mail.To = to\n\n if attachment:\n new_mail.Attachments.Add(attachment)\n\n new_mail.Send()", "def email(self, email_address, message):\n self.server.sendmail(self.username, email_address, message)", "def email(args):\n if args.name:\n add_user(name=args.name, email_address=args.email)\n\n if args.add_term:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=args.add_term.upper())\n if args.terms_from_file:\n with open(args.terms_from_file) as file:\n for line in file:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=line.strip().upper())\n if args.remove_term:\n Feed(Config.database).remove_search_term(email_address=args.email,\n term=args.remove_term)", "def sendmail(self, *args, **kwargs):\n #FUTURE: the EmailMessage attributes could be found by introspecting\n # the encoded message.\n message = mail.EmailMessage('SUBJECT', 'BODY', 'FROM', ['TO'])\n mail.outbox.append(message)", "def _send(self, email_message): \n\t\tif not email_message.recipients(): \n\t\t\treturn False \n\t\trecipients = map(self._sanitize, email_message.recipients()) \n\t\tMsg = o.CreateItem(0)\n \t\tMsg.To = recipients\n\t\tMsg.Subject = 'subject'\n\t\tMsg.Body = 'text'\n\t\tself.connection.SaveChanges(0)\n\n\t\tMsg.Send()\n\t\treturn True", "def _send(self, email_message):\n if not email_message.to:\n return False\n try:\n if (isinstance(email_message,gmail.EmailMessage)):\n e = message\n elif (isinstance(email_message,mail.EmailMessage)):\n e = gmail.EmailMessage(sender=email_message.from_email,\n to=email_message.to,\n subject=email_message.subject,\n body=email_message.body)\n if email_message.extra_headers.get('Reply-To', None):\n e.reply_to = email_message.extra_headers['Reply-To']\n if email_message.bcc:\n e.bcc = list(email_message.bcc)\n #TODO - add support for html messages and attachments...\n e.send()\n except:\n if not self.fail_silently:\n raise\n return False\n return True", "def send(from_addr, from_name, to_list, subject, message, mailhost='mail.eecis.udel.edu'): \n s = smtplib.SMTP(mailhost)\n s.helo(mailhost)\n data = 'To: %s\\n' % (','.join(to_list))\n data += 'From: \"%s\" <%s>\\n' % (from_name, from_addr)\n data += 'Subject: %s\\n' % (subject)\n data += message\n data += \"\\n\\n\"\n s.sendmail(from_addr, to_list, data)", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def ad_inq_email(email, body, name='', reply_to = ''):\r\n return _feedback_email(email, body, Email.Kind.ADVERTISE, name = name,\r\n reply_to = reply_to)", "def send_email(self, to_address, subject, body, cc_recipients=[]):\n\n # Build and send message\n msg = Message(\n account=self.account,\n folder=self.account.sent,\n subject=subject,\n body= HTMLBody(body),\n to_recipients=[Mailbox(email_address=to_address)],\n cc_recipients=[(Mailbox(email_address=x)) for x in cc_recipients]\n )\n\n msg.send_and_save()\n print(\"Message to {} sent.\".format(to_address))", "def enter_email(self, email):\n self.selib.input_text(self.locator.email, email)", "def send_email(to_address, from_address, subject, body):\n mail = \"\"\"echo \"From: %(from)s\\r\\nDate: $(date)\\r\\nSubject: %(subject)s\\r\\nMIME-Version: 1.0\\r\\nContent-Type: text/html; charset=utf-8\\r\\n\\r\\n%(body)s\" | ssmtp %(to)s\"\"\" % {\n \"to\": to_address,\n \"from\": from_address,\n \"subject\": subject,\n \"body\": body,\n }\n cmd(mail)", "async def handle_email(self, email):\n\t\tif 'logdir' in self.log_settings:\n\t\t\tfilename = 'email_%s_%s.eml' % (datetime.datetime.utcnow().isoformat(), str(uuid.uuid4()))\n\t\t\twith open(str(Path(self.log_settings['logdir'], 'emails', filename).resolve()), 'wb') as f:\n\t\t\t\tf.write(email.email.as_bytes())\n\n\t\tawait self.log('You got mail!')", "def send_email(recipient, subject, body) -> None:\n port = 465\n smtp_server = \"smtp.gmail.com\"\n sender_email = user['username']\n password = user['password']\n\n message = MIMEMultipart()\n message['From'] = sender_email\n message['To'] = recipient\n message['Subject'] = subject\n body = MIMEText(body) \n message.attach(body)\n\n server = smtplib.SMTP_SSL(smtp_server, port)\n server.login(sender_email, password)\n server.sendmail(sender_email, recipient, message.as_string())\n server.quit()", "def send_email(self, email):\n\n if not isinstance(email, str):\n raise TypeError('type of email must be str not %s' % type(email))\n\n message = self.get_message(email)\n self.server.send_message(message)", "def from_email(self, from_email):\n\n self._from_email = from_email", "def client_email(self, client_email):\n\n self._client_email = client_email", "def send_email_copy(message):\n receivers = [ receiver for receiver in message.receivers if receiver.player.user.email ]\n subject = message.header\n body = message.message\n if not (receivers):\n return\n\n msg = MIMEMultipart('alternative')\n msg['From'] = \"Winter's Oasis <[email protected]>\"\n msg['Subject'] = subject\n msg['Date'] = formatdate(localtime=True)\n\n # HTML email part.\n html_part = MIMEText('text', 'html')\n html_source = Template(HTML_TEMPLATE)\n value_map = {\n 'from' : ', '.join([ sender.name for sender in message.senders ]),\n 'message' : escape(unicode(body)).replace('\\n', '<br />'),\n 'recipients' : ', '.join([ receiver.name for receiver in message.receivers ]) }\n html_part.set_payload(html_source.substitute(value_map))\n\n value_map['message'] = unicode(body)\n text_source = Template(TEXT_TEMPLATE)\n body = text_source.substitute(value_map)\n text_part = MIMEText(unicode(body), 'plain', 'utf-8')\n msg.attach(text_part)\n msg.attach(html_part)\n\n for receiver in receivers:\n msg['To'] = receiver.db.email\n sendmail(SMTP_HOST, MAIL_FROM, receiver.player.user.email, msg.as_string())", "async def add_email_address(self, ctx, email_address: str):\n author = ctx.message.author\n\n if not EmailAddressCRUD.validate_email_address(email_address):\n await ctx.send(\"Enter a valid Email Address..!\")\n return\n\n if not self.email_list:\n with open(\"data/email/emails.json\", \"r\", encoding='utf-8') as file:\n self.email_list = json.load(file)\n\n if str(author.id) in self.email_list.keys():\n await ctx.send(\n \"There is already an email address configured, \"\n \"Please use update command to update it..!\")\n return\n else:\n self.email_list[str(author.id)] = email_address\n with open(\"data/email/emails.json\", \"w\", encoding='utf-8') as file:\n json.dump(self.email_list, file)\n await ctx.send(\"Email address has been configured successfully..!\")", "def _addressitem_from_email(emailstr):\n if len(emailstr) < 2:\n raise IOError(\"Error parsing email. Aborting.\")\n _name, _email = extract_sender(emailstr)\n return AddressItem(_email_address=_email, _name=_name)", "def send_email(self, froma, addrs, message=\"\"):\n with open(os.path.join(self.cache, \"notice.txt\"), 'w') as fd:\n fd.write(\"To \")\n fd.write(\" \".join(addrs))\n fd.write(\"\\n\")\n fd.write(\"From \"+froma)\n fd.write(\"\\n\")\n fd.write(message)", "def send_email(self, message):\n pass", "def get_inbox(character):\n messages = get_messages(character)\n return [ Mail(message) for message in messages ]", "def send_email(recipient,subject,message):\n msg = MIMEText(message)\n me = '[email protected]'\n \n msg['Subject'] = subject\n msg['From'] = me\n msg['To'] = recipient\n\n # Send the message via our own SMTP server, but don't include the\n # envelope header.\n username='cryolt2'\n password='Diamond=Geil!'\n\n server = smtplib.SMTP('smtp.gmail.com:587') \n server.starttls() \n server.login(username,password) \n server.sendmail(me, recipient, msg.as_string()) \n server.quit()" ]
[ "0.73855925", "0.61520404", "0.61520404", "0.59711707", "0.588577", "0.58609664", "0.5845908", "0.57926154", "0.57716554", "0.5729906", "0.57106847", "0.5706507", "0.5701675", "0.56726784", "0.5661653", "0.56558543", "0.5653912", "0.5635027", "0.56303835", "0.56113946", "0.56107146", "0.5608595", "0.56056863", "0.5578501", "0.5572308", "0.55716974", "0.5549148", "0.5547209", "0.55460197", "0.55426073" ]
0.6936672
1
Takes a client object and client_name and adds it to the clients instance variable.
def register_client(self, client, client_name): self.clients[client_name] = client
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_clients(client_name): # Crear nuevo Cliente\n global clients\n\n if client_name not in clients:\n clients.append(client_name)\n else:\n print('The client name is alredy in the client\\'s list')", "def add_client(name):\n return create_client(name)", "def update_client(client_name, updated_client_name): # Operacion modificar\n global clients\n\n if client_name in clients:\n index = clients.index(client_name)\n clients[index] = updated_client_name\n else:\n print(\"Client isn\\'t in the client list\")", "def register(self, client):\n self.clients.append(client)", "def add_client(self, client):\n\n now = int(time.time())\n\n self.send_line(\"%s N %s 1 %d %s %s +ik ]]]]]] %s :%s\" %\\\n (self.config[\"numeric\"], client.nick, now, client.user,\n client.host, client.uid, client.gecos))", "def __init__(self, name, client):\n self.name = name\n self.client = client", "def addClient(self, msg):\r\n guiControlClientId = msg[Messages.FIELD_GUI_CONTROL]\r\n if guiControlClientId != None:\r\n self.controllingClient.clear()\r\n self.controllingClient[guiControlClientId] = msg[Messages.FIELD_GUI_CONTROL_HOST]\r\n LOG(\"Set a new controlling client: \" + repr(guiControlClientId) + \" - \" +\r\n repr(self.controllingClient[guiControlClientId]))\r\n guiMonitoringClientId = msg[Messages.FIELD_GUI_LIST]\r\n # This list only contain one client reference\r\n if guiMonitoringClientId != None:\r\n self.monitoringClients[guiMonitoringClientId] = msg[Messages.FIELD_GUI_HOST_LIST]\r\n LOG(\"Added a new monitoring client: \" + repr(guiMonitoringClientId) + \" - \" +\r\n repr(self.monitoringClients[guiMonitoringClientId]))", "def __init__(self, client, name):\n if not isinstance(client, couch.Client):\n raise Exception(\"'client' arg must be instance of couch.Client\")\n\n self.client = client\n self.name = name", "def __add_clients__(self, r, new_clients):\r\n if set(r.clients).intersection(set(new_clients)):\r\n print >> sys.stderr, 'ERROR: clients intersect!'\r\n print >> sys.stderr, ' RCLIENTS of', r, [(n, i, type(n), id(n))\r\n for n, i in r.clients]\r\n print >> sys.stderr, ' NCLIENTS of', r, [(n, i, type(n), id(n))\r\n for n, i in new_clients]\r\n assert not set(r.clients).intersection(set(new_clients))\r\n r.clients += new_clients", "def clients():\n pass", "def add_client(self, cli):\n if self.clients.count(cli) is 0:\n self.clients.append(cli)", "def delete_client(client_name):\n global clients\n\n if client_name in clients:\n clients.remove(client_name)\n else:\n print(\"Client isn\\'t in the client list\")", "def list_clients(): # Listar clientes\n global clients\n\n for idx, client in enumerate(clients):\n print('{}: {}'.format(idx, client))", "def __init__(self, client, name):\n self._client = client\n self._attr_name = name", "def init_client(self, client):\n self.client = client", "def send_clients(self, client):\n listClients = ''\n for client in self.clients:\n listClients += str(client) + ', '\n for client in self.clients:\n self.send_message(listClients, client.get_socket())", "def manage_client(client):\r\n #information about the player\r\n msg_client('Ora inserisci il tuo nome: ', client)\r\n name = client.recv(BUFSIZ)\r\n clients[client] = name\r\n \r\n init_player(client)\r\n \r\n #get player's role\r\n msg_client('Il tuo ruolo è: ' + str(roles[client]), client)\r\n msg_client('Scrivi {quit} per uscire dal gioco', client)\r\n \r\n insert_number_player(client)\r\n \r\n start_question(client)\r\n \r\n check_player_ready(client)\r\n \r\n start_game(client)\r\n \r\n search_winner()\r\n \r\n close_client(client)", "def on_open(self, info):\n # When new client comes in, will add it to the clients list\n clients.add(self)", "def change_client_name(self, name, client):\n if self.name_is_unique(name):\n client.set_name(name)\n self.send_message('Usuario actualizado exitosamente.', client.get_socket())\n else:\n self.send_message('Nombre repetido.', client.get_socket())", "def __addNewClients(self):\n while True:\n client = self.nextPendingConnection()\n if (client == None):\n break\n \n # Add this socket to our list of clients\n self.__clients.append(client);\n \n # When the client disconnects, remove it from our list of clients.\n QObject.connect(client, SIGNAL(\"disconnected()\"), self.__removeClient)\n\n print \"connection from\", self.__clientName(client)", "def get_client_name(self, obj):\n\t\treturn obj.client.name", "def client_name(self, client_name):\n if client_name is None:\n raise ValueError(\"Invalid value for `client_name`, must not be `None`\") # noqa: E501\n\n self._client_name = client_name", "def start_client(self, name, address):\n if name not in self.clients:\n self.clients[name] = ProcessorClient()\n self.clients[name].connect(address)", "def remove_client(self, client):\n self.clients.remove(client)\n #print(\"removing:\" + str(client))", "def client_addresses(self, client_addresses):\n\n self._client_addresses = client_addresses", "def _add_clients(num_of_clients, version=None, version_separator=''):\n # TODO make a generic function that _add_clients can use\n if version and not isinstance(version, str):\n raise ValueError(\"version must be type string\")\n\n if not setup_bootstrap.pods:\n raise Exception(\"Could not find bootstrap node\")\n\n bs_info = setup_bootstrap.pods[0]\n\n client_key = 'client'\n if version:\n client_key += f'{version_separator}{version}'\n\n cspec = get_conf(bs_info, testconfig[client_key], testconfig['genesis_delta'])\n pods_names = add_multi_clients(testconfig, setup_bootstrap.deployment_id, cspec, size=num_of_clients)\n return pods_names", "def writeDataToClient(self, client, name):\n\n index = len(self.messagesList)\n\n while name in self.connected:\n if len(self.messagesList) > index:\n if self.messagesList[index][1] != name:\n client.send(self.messagesList[index][0])\n index += 1\n else:\n index += 1", "def test_list_clients(self):\n pass", "def test_01_add_client(self):\n try:\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n client.host_name = test_str[1]\n client.user_name = test_str[2]\n ClientsUnitTest._client_dao.add(client)\n self.assertTrue(\n ClientsUnitTest._client_dao.get_client(client.user_id))\n\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n self.assertTrue(ClientsUnitTest._client_dao.delete(client))\n\n except DBException as error:\n print(error.get_message())", "def handle_client(client, name): # Takes client socket as argument.\n\n welcome = 'Welcome %s! If you ever want to quit, type {quit} to exit.' % name\n client.send(bytes(welcome, \"utf8\"))\n msg = \"%s has joined the chat!\" % name\n broadcast(bytes(msg, \"utf8\"))\n clients[client] = name\n\n while True:\n msg = client.recv(BUFSIZ)\n if msg != bytes(\"{quit}\", \"utf8\"):\n broadcast(msg, name+\": \")\n else:\n client.send(bytes(\"{quit}\", \"utf8\"))\n client.close()\n del clients[client]\n broadcast(bytes(\"%s has left the chat.\" % name, \"utf8\"))\n break" ]
[ "0.752612", "0.7367797", "0.7360235", "0.7343854", "0.6815909", "0.6779203", "0.67535335", "0.66095585", "0.6547394", "0.65369165", "0.6477211", "0.6461652", "0.644304", "0.6378969", "0.63568664", "0.6327192", "0.63241255", "0.63210046", "0.6295234", "0.623358", "0.6229814", "0.62281394", "0.6208052", "0.6152236", "0.6143943", "0.6065273", "0.6027914", "0.5996523", "0.59938145", "0.5977559" ]
0.79341835
0
Send an email with given message msg to the given recipient.
def compose(self, msg, recipient): email = Email(msg, self, recipient) self.mailman.send(email)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_email(msg):\n common_send_email(subject=msg.subject, recipients=msg.recipients, html=msg.html)", "def send_message(self, recipient: str, msg: str) -> None:\n print(f'{self._name} sends a message to {recipient}: {msg}')\n self._group.private_message(sender=self, recipient_name=recipient, msg=msg)", "def send_email(msg):\n\tprint(\"sendEmail: \" + msg)", "def sendmail(message, recipient):\n \n import smtplib\n\n fromaddr = \"[email protected]\"\n toaddrs = recipient + \"@someaddress.com\"\n\n # Add the From: and To: headers at the start!\n msg = (\"From: %s\\r\\nTo: %s\\r\\n\\r\\n\" %(fromaddr, \", \".join(toaddrs)))\n msg = msg + str(message[0]) + message[1]\n\n server = smtplib.SMTP('localhost')\n server.set_debuglevel(0)\n server.sendmail(fromaddr, toaddrs, msg)\n server.quit()", "def send(e_from, e_to, e_subj, msg):\n msg = smart_unicode(msg)\n e_subj = smart_unicode(e_subj)\n e_from = smart_unicode(e_from)\n e_to = smart_unicode(e_to)\n\n # Use ASCII if possible, otherwise UTF-8. This is done to avoid\n # base64, which is ugly and not human-readable.\n charset = 'utf8'\n try:\n msg.encode('ascii')\n charset = 'ascii'\n except:\n pass\n\n msg = MIMEText(msg.encode(charset), 'html', _charset=charset)\n msg['Subject'] = e_subj\n msg['From'] = e_from\n msg['To'] = e_to\n msg = msg.as_string()\n\t\n try:\n s = smtplib.SMTP(HOST)\n s.sendmail(e_from, [e_to], msg)\n s.quit()\n return 1\n except:\n \treturn None", "def send_mail(self, msg):\n mail_queue.put(msg)", "async def send(self, msg: Message, recipient: int):\n if not self._session:\n await self._create_session()\n \n if isinstance(msg, str):\n msg = Message(msg)\n assert isinstance(msg, Message)\n msg.set_recipient(recipient)\n msg.set_sender(self._network._robot.id)\n await self._session.put(self._network.SERVER_ADDR + '/api/send', json=msg.to_dict())\n return msg", "def send_email(recipient, subject, message):\n from_email = os.getenv(\"EMAIL_SENDER\")\n status = send_mail(subject, message, from_email, [recipient])\n return status", "def send_email(self, to_address, subject, body, cc_recipients=[]):\n\n # Build and send message\n msg = Message(\n account=self.account,\n folder=self.account.sent,\n subject=subject,\n body= HTMLBody(body),\n to_recipients=[Mailbox(email_address=to_address)],\n cc_recipients=[(Mailbox(email_address=x)) for x in cc_recipients]\n )\n\n msg.send_and_save()\n print(\"Message to {} sent.\".format(to_address))", "def send_mail(to, sender, subject, message):\n\n msg = MIMEText(message)\n msg['From'] = sender\n msg['To'] = to\n msg['Subject'] = subject\n body = {'raw': base64.urlsafe_b64encode(msg.as_bytes()).decode()}\n MESSAGES.send(userId='me', body=body).execute()", "def send_email(self, to_addr, cc_addr, bcc_addr, topic, text, callback):\n\n\t\tmail_data = {\n\t\t\t\"key\": self.key,\n\t\t\t\"message\": self._prepare_message(to_addr, cc_addr, bcc_addr, topic, text)\n\t\t}\n\t\tbody = tornado.escape.json_encode(mail_data)\n\n\t\trequest = HTTPRequest(\n\t\t\turl=config.MANDRILL_URL + \"/messages/send.json\",\n\t\t\tconnect_timeout=config.TIMEOUT, request_timeout=config.TIMEOUT,\n\t\t\tbody=body, method='POST', validate_cert = False)\n\n\t\tresponse = yield tornado.gen.Task(\n\t\t\tself.http_client.fetch, request)\n\n\t\tbody = json.loads(response.body)\n\t\tif (int(response.code) == config.RESPONSE_OK\n\t\t\t\tand body[0]['status'] == 'sent'):\n\t\t\t# Each sent email gets assigned a different id. First (To address) used.\n\t\t\temail_id = body[0]['_id']\n\t\t\tcallback(config.SEND_STATUS.SENT, email_id)\n\t\t\treturn\n\t\telse:\n\t\t\tcallback(config.SEND_STATUS.FAILED, None)\n\t\t\treturn", "def send_email(to_address, from_address, subject, body):\n mail = \"\"\"echo \"From: %(from)s\\r\\nDate: $(date)\\r\\nSubject: %(subject)s\\r\\nMIME-Version: 1.0\\r\\nContent-Type: text/html; charset=utf-8\\r\\n\\r\\n%(body)s\" | ssmtp %(to)s\"\"\" % {\n \"to\": to_address,\n \"from\": from_address,\n \"subject\": subject,\n \"body\": body,\n }\n cmd(mail)", "def send_msg(\n self,\n msg: str,\n peer_id: int\n ) -> NoReturn:\n self.call_method(\n 'messages.send',\n dict(\n message=msg, peer_id=peer_id,\n disable_mentions=1, random_id=0))", "def send_email(recipient,subject,message):\n msg = MIMEText(message)\n me = '[email protected]'\n \n msg['Subject'] = subject\n msg['From'] = me\n msg['To'] = recipient\n\n # Send the message via our own SMTP server, but don't include the\n # envelope header.\n username='cryolt2'\n password='Diamond=Geil!'\n\n server = smtplib.SMTP('smtp.gmail.com:587') \n server.starttls() \n server.login(username,password) \n server.sendmail(me, recipient, msg.as_string()) \n server.quit()", "def sendmail(self, from_addr, to_addrs, msg, mail_options=()):\n\n # -i flag: do NOT treat bare dot as EOF\n cmd = [self.SENDMAIL, '-i']\n if from_addr: # envelope sender?\n cmd.append('-f%s' % from_addr)\n if isinstance(to_addrs, tuple): # be liberal\n to_addrs = list(to_addrs)\n elif not isinstance(to_addrs, list):\n to_addrs = [to_addrs]\n\n if sys.version_info[0] >= 3 or isinstance(msg, unicode):\n msg = msg.encode('utf-8')\n # need to force 8BIT (if length changed)?\n\n if '8bitmime' in mail_options:\n cmd.append('-B8BITMIME')\n # avoid shell / quoting issues\n proc = subprocess.Popen(cmd + to_addrs, shell=False,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = proc.communicate(input=msg)\n ret = proc.returncode # not clearly documented?!\n if self.debug:\n print(\"ret: %d\" % ret)\n print(\"stdout:\")\n print(out)\n print(\"stderr:\")\n print(err)\n\n if ret != OS_OK:\n # values suggested by Mateo Roldan:\n raise SendmailException(ret, out, err)\n return {}", "def emailMessage(subjectTitle, recipientEmail, bodyMessage, attachmentName = None, attachmentFilePath = None):\n msg = Message(\n subjectTitle,\n sender = os.getenv(tag+'email',base_config['email']) \n )\n for email in recipientEmail: \n msg.add_recipient(email)\n\n msg.body = bodyMessage\n\n if attachmentName is not None and attachmentFilePath is not None:\n with app.open_resource(attachmentFilePath) as fp:\n msg.attach(attachmentName, \"text/plain\", fp.read())\n\n mail.send(msg)", "def send_message(to_ip: str, msg: str):\r\n\t_sending_queue.put((to_ip, msg))", "def send_email(my_email, password, message):\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.starttls()\n server.login(my_email, password)\n # send from my_email to my_email (from, to, message)\n server.sendmail(my_email, my_email, message)\n server.quit()", "def send_email(message):\n mail_server = smtplib.SMTP('localhost')\n mail_server.send_message(message)\n mail_server.quit()", "def send_email(self, email_from, email_to, message):\n logging.info(\"Attempting to send email from \" + email_from + \" to \" + email_to)\n self.conn.sendmail(email_from, email_to, message)\n logging.info(\"Email sent\")", "def sendmail(self, message=None, subject=None, recipients=None):\n\n if not recipients:\n recipients = self.recipients\n if len(recipients) == 0:\n return False\n if not message:\n message = self.message\n if len(message) == 0:\n return False\n if not subject:\n subject = self.subject\n message = message.replace(\"—\", \" - \")\n message = message.replace(\"”\", \"\\\"\")\n message = '\\n'.join(textwrap.wrap(message,72))\n if not self.session:\n self.session = smtplib.SMTP(self.smtpserver)\n if self.smtpuser:\n self.session.login(self.smtpuser, self.smtppass)\n error = \"\"\n for recipient in recipients:\n if type(recipient) == type([]) or type(recipient) == type(\n ('','')):\n mailaddr = recipient[1]\n recipient = email.utils.formataddr(recipient)\n else:\n realname, mailaddr = email.utils.parseaddr(recipient)\n\n smtpresult = self.session.sendmail(self.sender, [mailaddr],\n self.makeMessage(recipient, message,\n subject))\n if smtpresult:\n for recip in smtpresult.keys():\n error += \"Couldn't delivery mail to: %s Error: %s\\n\" % (\n recip, smtpresult[recip][0], smtpresult[recip][1])\n else:\n print(\"Message sent successfully to %s\" % recipient)\n\n if error != \"\":\n print(\"%s\" % error)", "def send_mail(mail_to: str, subject: str, msg: str, mail_from: str = settings.MAIL_FROM):\n\n if not (settings.MAIL_API and settings.MAIL_API_KEY):\n return False\n\n data = {\n \"to\": mail_to,\n \"from\": mail_from,\n \"subject\": subject,\n \"text\": msg,\n }\n\n request = requests.post(\n f\"{settings.MAIL_API}/messages\", auth=(\"api\", settings.MAIL_API_KEY), data=data,\n )\n return request", "def send(self, msg, recipient, callback=None):\n \n fut = asyncio.run_coroutine_threadsafe(self.coro.send(msg, recipient), self._robot._event_loop)\n if callback is not None:\n def cb(fut):\n try:\n res = fut.result()\n except Exception:\n print(f'Sending message {msg} failed with exception {Exception}. Retrying...', file=sys.stderr)\n self.send(msg, recipient) # Try again\n return\n \n callback(res)\n else:\n def cb(fut):\n try:\n fut.result()\n except Exception:\n print(f'Sending message {msg} failed with exception {Exception}. Retrying...', file=sys.stderr)\n self.send(msg, recipient) # Try again\n \n fut.add_done_callback(cb)", "def send_message():\n # @todo validation & error handling.\n sg = SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))\n log(\"Message generated and sent at {}\".format(strftime('%x %H:%M:%S')))\n sg.client.mail.send.post(request_body=build_message())", "def send_email(self, message, from_addr=None, to_addrs=None,\n delay_send=0):\n if not isinstance(message, Message) and isinstance(message, str):\n smtp_meth = 'sendmail'\n if (from_addr is None) or (to_addrs is None):\n raise ValueError('If sending string email, please provide '\n 'from_addr and to_addrs.')\n elif isinstance(message, Message):\n smtp_meth = 'send_message'\n message = message.message\n else:\n raise ValueError('The message argument must either be an '\n 'auto_emailer.emailer.Message object or a string.')\n\n # delay sending by input value\n if delay_send:\n time.sleep(delay_send)\n\n # log in to email client if not already\n if not self._connected:\n self._login()\n\n # handle disconnect and connection errors by\n # quick login and attempt to send again\n try:\n delivery_meth = getattr(self._smtp, smtp_meth)\n delivery_meth(msg=message, from_addr=from_addr,\n to_addrs=to_addrs)\n except (smtplib.SMTPConnectError, smtplib.SMTPServerDisconnected):\n self._login()\n # needs to call getattr() again once it hits\n # here otherwise it will fail\n delivery_meth = getattr(self._smtp, smtp_meth)\n delivery_meth(msg=message, from_addr=from_addr,\n to_addrs=to_addrs)\n finally:\n self._logout()", "def send(self, recipient: Union[str, List[str]]) -> None:\n\n if isinstance(recipient, str):\n recipients = [recipient]\n elif isinstance(recipient, list):\n recipients = recipient\n else:\n raise TypeError('Argument \"recipients\" must be a string or a list of strings.')\n\n message = Message(self._subject, sender=self._sender, recipients=recipients)\n message.body = self._body_plain\n message.html = self._body_html\n\n application = get_app()\n thread = Thread(target=self._send, args=(message, application))\n thread.start()", "def send(self, address_to, message, emailSubject = \"Automated Email\", attachmentFilePath = None):\r\n\t\tmail = self._createEmail(address_to, message, emailSubject)\r\n\t\tif attachmentFilePath != None:\r\n\t\t\tmail.attachment = self._createAttachment(attachmentFilePath)\r\n\t\tsg = sendgrid.SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\r\n\t\tresponse = sg.send(mail)\r\n\t\tif response.status_code == 202:\r\n\t\t\tprint(\"Email sent\")\r\n\t\telse:\r\n\t\t\tprint(\"Email not sent. Please check error codes below - \")\r\n\t\t\tprint(response.status_code)\r\n\t\t\tprint(response.headers)", "def send_message(self, to, message):\n\t\tmessage_dict = {\n\t\t\tACTION: MESSAGE,\n\t\t\tSENDER: self.username,\n\t\t\tDESTINATION: to,\n\t\t\tTIME: time.time(),\n\t\t\tMESSAGE_TEXT: message\n\t\t}\n\t\tclient_log.debug(f'Сформирован словарь сообщения: {message_dict}')\n\t\t# Необходимо дождаться освобождения сокета для отправки сообщения\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, message_dict)\n\t\t\tself.process_server_ans(get_message(self.transport))\n\t\t\tclient_log.info(f'Отправлено сообщение для пользователя {to}')", "def email(self, email_address, message):\n self.server.sendmail(self.username, email_address, message)", "def send_message(user_id, name, user_info, subject, body):\n send_mail(subject, body, settings.SERVER_EMAIL, [\"%s <%s>\" % (name, user_id)],\n fail_silently=False, html_message=body)" ]
[ "0.7636544", "0.742264", "0.73602957", "0.72721535", "0.7057327", "0.7004801", "0.69507664", "0.6915788", "0.68165755", "0.6802773", "0.6533247", "0.65282345", "0.65067124", "0.64868224", "0.6430405", "0.6406949", "0.6379485", "0.6377416", "0.6361029", "0.6359102", "0.63530046", "0.6347446", "0.6343044", "0.6331065", "0.6317142", "0.6307567", "0.6299316", "0.62842476", "0.62753934", "0.6255951" ]
0.7442873
1
Take an email and add it to the inbox of this client.
def receive(self, email): self.inbox += email
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "async def add_email_address(self, ctx, email_address: str):\n author = ctx.message.author\n\n if not EmailAddressCRUD.validate_email_address(email_address):\n await ctx.send(\"Enter a valid Email Address..!\")\n return\n\n if not self.email_list:\n with open(\"data/email/emails.json\", \"r\", encoding='utf-8') as file:\n self.email_list = json.load(file)\n\n if str(author.id) in self.email_list.keys():\n await ctx.send(\n \"There is already an email address configured, \"\n \"Please use update command to update it..!\")\n return\n else:\n self.email_list[str(author.id)] = email_address\n with open(\"data/email/emails.json\", \"w\", encoding='utf-8') as file:\n json.dump(self.email_list, file)\n await ctx.send(\"Email address has been configured successfully..!\")", "def send(self, email):\n client = self.clients[email.addressee]\n client.receive(email)", "def add(self, item):\n if item.email and item.email not in [i.email for i in self.lst]:\n self.lst.append(item)\n else:\n print(\"WARN: Recipient not added because a recipient with that email address already exists: {}\", item)", "def Add_attendee(self, email):\n if (email not in self.Attendees) and (email not in self.Waitlist):\n self.Attendees[email] = Attendee(email)\n else:\n if email in self.Waitlist:\n print(\"Call Promote_from_waitlist() to move an applicant from \"\n \"the waitlist.\")\n raise PreexistingAddressException(email)", "def add_to_mailing_list(self, username, email, name, list_endpoint=None):\n if not (list_endpoint or self.default_list_endpoint) or not self.list_key:\n print('MAILING_LIST is turned on, but required fields are '\n 'missing.')\n return\n\n # if no endpoint provided, use default\n if list_endpoint is None:\n list_endpoint = self.default_list_endpoint\n\n try:\n res = requests.post(list_endpoint,\n auth=('nop', self.list_key),\n data=self.payload.format(\n email=email,\n name=name,\n username=username),\n timeout=1.5)\n\n if res.status_code != 200:\n print('Unexpected mailing list API response.. '\n 'status code: {0.status_code}\\n'\n 'content: {0.content}'.format(res))\n\n except Exception as e:\n if e is requests.exceptions.Timeout:\n print('Mailing list API timed out..')\n else:\n print('Adding to mailing list failed:', e)", "def email(args):\n if args.name:\n add_user(name=args.name, email_address=args.email)\n\n if args.add_term:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=args.add_term.upper())\n if args.terms_from_file:\n with open(args.terms_from_file) as file:\n for line in file:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=line.strip().upper())\n if args.remove_term:\n Feed(Config.database).remove_search_term(email_address=args.email,\n term=args.remove_term)", "def enter_email(self, email):\n self.selib.input_text(self.locator.email, email)", "def _findAndAddContactByEmail(self, email):\n try:\n contact = self._findAndAddContactsByEmail(email)\n except TalkException as e:\n self.raise_error(e.reason)\n\n contact = contact.values()[0]\n\n for c in self.contacts:\n if c.id == contact.mid:\n self.raise_error(\"%s already exists\" % contact.displayName)\n return\n\n c = LineContact(self, contact)\n self.contacts.append(c)\n\n self.contacts.sort()\n return c", "def add_recipient(self, address):\n if not self.validate_email_address(address):\n raise Exception(\"Invalid email address '%s'\" % address)\n self._to.append(address)", "def add_email(cmd, *args):\n\n cfg = get_config()\n nick, email = None, None\n if len(args) == 0:\n print(add_email.__doc__)\n if len(args) >= 1:\n nick = args[0]\n if len(args) >= 2:\n email = args[1]\n if len(args) >= 3:\n print(\"only <nick> <email_addr> understood :(\")\n # other param breakdown here\n vcard_fn = nick + '.vcf'\n vcard_fn = os.path.join(cfg['vcard_dir'], vcard_fn)\n vCard = getvcard(vcard_fn)\n infoDict = dict_from_vcard(vCard)\n\n infoDict['email'] = email\n\n ## make changes to infodict here\n vcard_merge_in_dict(infoDict, vCard)\n rawdata = vCard.serialize()\n with open(vcard_fn, 'w+') as fh:\n fh.write(rawdata)", "def add_to_email_address(self, email_address, friendly_name: str=None):\n if isinstance(email_address, EmailAddress):\n self._to_recipients.append(email_address)\n\n if isinstance(email_address, str):\n self._to_recipients.append(EmailAddress(email_address, friendly_name))", "def add_emails_to_addressbook(self, id, emails):\n logger.info(\"Function call: add_emails_to_addressbook into: {}\".format(id, ))\n if not id or not emails:\n self.__handle_error(\"Empty addressbook id or emails\")\n try:\n emails = json.dumps(emails)\n except:\n logger.debug(\"Emails: {}\".format(emails))\n return self.__handle_error(\"Emails list can't be converted by JSON library\")\n return self.__handle_result(self.__send_request('addressbooks/{}/emails'.format(id), 'POST', {'emails': emails}))", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def sendmail(self, *args, **kwargs):\n #FUTURE: the EmailMessage attributes could be found by introspecting\n # the encoded message.\n message = mail.EmailMessage('SUBJECT', 'BODY', 'FROM', ['TO'])\n mail.outbox.append(message)", "def add_email_to_blacklist(self, email, comment=''):\n logger.info(\"Function call: add_email_to_blacklist for '{}'\".format(email, ))\n return self.__handle_error('Empty email') if not email else self.__handle_result(self.__send_request('blacklist', 'POST', {'emails': base64.b64encode(email), 'comment': comment}))", "def Add_to_waitlist(self, email):\n if email not in self.Waitlist:\n self.Waitlist.add(email)\n else:\n raise PreexistingAddressException(email)", "def subscribe(email: str) -> None:\n url = constants.EFF_SUBSCRIBE_URI\n data = {'data_type': 'json',\n 'email': email,\n 'form_id': 'eff_supporters_library_subscribe_form'}\n logger.info('Subscribe to the EFF mailing list (email: %s).', email)\n logger.debug('Sending POST request to %s:\\n%s', url, data)\n _check_response(requests.post(url, data=data, timeout=60))", "def _send(self, email_message): \n\t\tif not email_message.recipients(): \n\t\t\treturn False \n\t\trecipients = map(self._sanitize, email_message.recipients()) \n\t\tMsg = o.CreateItem(0)\n \t\tMsg.To = recipients\n\t\tMsg.Subject = 'subject'\n\t\tMsg.Body = 'text'\n\t\tself.connection.SaveChanges(0)\n\n\t\tMsg.Send()\n\t\treturn True", "def email(self, email: str):\n\n self._email = email" ]
[ "0.66209656", "0.66209656", "0.64795595", "0.64078385", "0.63901234", "0.63149035", "0.6155216", "0.60956836", "0.60834974", "0.6063719", "0.6047605", "0.604135", "0.59993863", "0.5913825", "0.5892881", "0.5892881", "0.5892881", "0.5892881", "0.5892881", "0.5892881", "0.5892881", "0.5892881", "0.5892881", "0.5892881", "0.58187807", "0.57953495", "0.57846487", "0.5704797", "0.56701577", "0.56561565" ]
0.7970683
0
Fit the lambda means model
def fit(self, X, _, **kwargs): assert 'lambda0' in kwargs, 'Need a value for lambda' assert 'iterations' in kwargs, 'Need the number of EM iterations' lambda0 = kwargs['lambda0'] iterations = kwargs['iterations'] # TODO: Write code to fit the model. NOTE: labels should not be used here. try: X = X.toarray() except: pass num_of_samples = X.shape[0] num_of_features = X.shape[1] #Initializing lambda0 value if lambda0 == 0: mean = np.mean(X,0) sum = 0 for i in range(num_of_samples): temp = np.square(X[i,]-mean).sum() sum += np.sqrt(temp) lambda0 = sum/num_of_samples self.miu = np.mean(X,0) #Numbers of clusters + 1, so we can set iteration numbers to be k num_kplus1 = 1 for i in range(iterations): #initialize training label y_hat = np.zeros(num_of_samples) for j in range(num_of_samples): current_cluster_index = 0 min_distance = 10000000 for k in range(num_kplus1): temp_distance = np.square(X[j]-u[k]).sum() temp_distance = np.sqrt(temp_distance) if temp_distance < minValue: current_cluster_index = k min_distance = temp_distance if min_distance < lambda0: y_hat[j] = current_cluster_index else: self.miu = np.vstack((miu,X[i,:])) y_hat[j] = num_kplus1 num_kplus1 += 1 for i in range(num_k): self.miu[i] = np.mean(X[y_hat ==i,:],0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tune_lambda(Xtrain, ytrain, Xval, yval):\n #####################################################\n # TODO 5: Fill in your code here #\n #####################################################\n bestlambda = None\n err = 1\n\n for v in range(-19,20):\n if v>=0:\n val = float(\"1e+\"+str(v))\n else:\n val = float(\"1e\"+str(v))\n w = regularized_linear_regression(Xtrain,ytrain, val)\n error = mean_absolute_error(w, Xval,yval)\n if err > error:\n err = error\n bestlambda = val\n return bestlambda", "def tune_lambda(Xtrain, ytrain, Xval, yval):\n #####################################################\n # TODO 5: Fill in your code here #\n #####################################################\n\n bestlambda = None\n mean_abs_err = 10000000\n power = -19\n while power < 20:\n lambda0 = 10 ** (power)\n w = regularized_linear_regression(Xtrain, ytrain, lambda0)\n err = mean_absolute_error(w, Xval, yval)\n if err < mean_abs_err:\n mean_abs_err = err\n bestlambda = lambda0\n power = power + 1\n return bestlambda", "def fit_model(self):\r\n\t\tself.mu = np.mean(self.x, axis = 0)\r\n\t\tself.sig = np.std(self.x, axis = 0)", "def fit(self, x):\n self.alpha = np.mean(x)\n self.is_fit = True", "def fit(self, X):", "def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None):\n\n # check input dataframe\n X = super().fit(X)\n\n self.lambda_dict_ = {}\n\n for var in self.variables_:\n _, self.lambda_dict_[var] = stats.boxcox(X[var])\n\n return self", "def fit(self, x):\n pass", "def varying_lamda(x, y, z, lambda_min, lambda_max, n_lambda, k, save_fig = None, method = 'Ridge', split = True, train = 0.7, seed = 42, max_iter = 1001, l_min = False, plot_indexes = [0,1,2]):\n\n lambdas = np.array([0] + np.logspace(lambda_min, lambda_max, n_lambda).tolist())\n polynomials = np.array(k)\n X, Y = np.meshgrid(lambdas, polynomials)\n MSE = np.zeros(np.shape(X))\n\n j = 0\n for k in polynomials:\n print(k)\n\n model = regression(x, y, z, k = int(k), split = split, train = train, seed = seed)\n if method == 'Ridge':\n model.SVD()\n i = 0\n for lam in lambdas:\n\n if method == 'Ridge':\n beta = model.Ridge(lam = lam)\n elif method == 'Lasso':\n beta = model.Lasso(lam = lam, max_iter = max_iter)\n\n z_tilde = model.z_tilde(beta = beta, X = model.X_test)\n MSE[j, i] = model.MSE(z_tilde = z_tilde, z = model.z_test)\n i += 1\n j += 1\n\n print('Method = ', method)\n lambdas_min = []\n for i in range(len(polynomials)):\n minimum_index = MSE[i].argmin()\n print('Minimum lambda for polynomial %.i: ' %(polynomials[i]), lambdas[minimum_index], MSE[i].min())\n lambdas_min.append(int(minimum_index))\n\n #plt.pcolormesh(lambdas.tolist() + [lambdas[-1] + lambdas[1]], polynomials.tolist() + [polynomials[-1] + 1], MSE)\n #plt.colorbar()\n #plt.show()\n\n plt.title('MSE for the test data with ' + method)\n plt.contourf(lambdas, polynomials, MSE)\n plt.colorbar()\n plt.ylabel('Polynomial order', fontsize = 14)\n plt.xlabel('Lambda', fontsize = 14)\n try:\n plt.savefig(results_dir + save_fig + 'contour' + '.png')\n except:\n pass\n plt.show()\n\n plt.title('MSE for the test data with ' + method)\n plt.plot(lambdas, MSE[plot_indexes[0], :], label = 'k = ' + str(polynomials[plot_indexes[0]]))\n plt.plot(lambdas, MSE[plot_indexes[1], :], label = 'k = ' + str(polynomials[plot_indexes[1]]))\n plt.plot(lambdas, MSE[plot_indexes[2], :], label = 'k = ' + str(polynomials[plot_indexes[2]]))\n if l_min:\n plt.plot(lambdas[lambdas_min[1]], MSE[1, lambdas_min[1]], 'ro', label = 'Lambda min = %.4g' %(lambdas[lambdas_min[1]]))\n else:\n pass\n plt.legend()\n plt.xlabel('Lambda', fontsize = 14)\n plt.ylabel('MSE', fontsize = 14)\n plt.tight_layout()\n try:\n plt.savefig(results_dir + save_fig + '.png')\n except:\n pass\n plt.show()\n return lambdas_min", "def compute_lambda_mle(X: np.ndarray) -> float:\n\n Poisson._check_input_data(X=X)\n Poisson._check_support(X=X)\n\n lambda_ = X.mean()\n return lambda_", "def value(a, y, weights, lambda_):\n\t\treturn 0.5* (np.linalg.norm(a-y)**2) / (a.shape[0] * a.shape[1])\n\t\t# return unregularize + (0.5*lambda_*np.sum(np.square(weights[-1])) / (a.shape[0] * a.shape[1])) ", "def fit(self, x):\n raise NotImplementedError()", "def TrainGroupLasso(As, bs, groups, num_lambdas = 50, normalize=2):\n\n np.random.seed(0) # for consistancy\n\n m = len(As)\n n,D = As[0].shape\n\n # Normalize\n if normalize != 0:\n\n # get norm of each column\n candidate_norms = np.zeros(D)\n for i in range(D):\n candidate_norms[i] = Norm(np.vstack(A[:,i] for A in As), normalize)\n\n norm_bs = [m*Norm(b, normalize) for b in bs]\n\n # normalize \n for i in range(m):\n As[i] = As[i].dot(np.diag(candidate_norms**-1))\n bs[i] = bs[i]/norm_bs[i]\n\n # parameters for ADMM\n rho = 1e-3\n alpha = 1.5\n\n # Get array of lambdas to check\n # Looking at KKT conditions for group lasso, lambda higher than lambda_max will result in x=0\n # lambda_min is set arbitrailly to 1e-5 but if the optimal lambda turns out to be 0 or 1e-5, then one\n # could change this to check lower values\n lambda_max = np.max([np.sum([Norm(A[:,g].T.dot(b)) for (A,b) in zip(As,bs)]) for g in range(D)])\n lambda_min = 1e-5*lambda_max\n Lam = [0]+[np.exp(alpha) for alpha in np.linspace(np.log(lambda_min), np.log(lambda_max), num_lambdas)][:-1]\n\n # Test each value of lambda to find the best\n X = []\n Losses = []\n Histories = []\n\n for lam in Lam:\n x,history = GroupLassoADMM(As,bs,lam,groups,rho,alpha)\n X.append(x.reshape(D,m, order = 'F'))\n Losses.append(PDE_FIND_Loss(As,bs,x))\n Histories.append(history)\n\n if normalize != 0:\n for x in X:\n for i in range(D):\n for j in range(m):\n x[i,j] = x[i,j]/candidate_norms[i]*norm_bs[j]\n for i in range(m):\n As[i] = As[i].dot(np.diag(candidate_norms))\n bs[i] = bs[i]*norm_bs[i]\n\n return X,Lam,Losses,Histories", "def fit(self, y, x, n=1, epsilon=.01, regularization=None, _lambda=1.0):\n # Initialize the weight vector\n w_0 = np.zeros(x.shape[1])\n \n # Variables used for learning weights\n self._epsilon = epsilon\n self._num_training = x.shape[0]\n self._lambda = _lambda\n \n print 'Epsilon: {}'.format(self._epsilon)\n \n # Pick the correct update method\n if regularization == 'l1':\n print 'L1 regularization'\n print 'Lambda: {}'.format(self._lambda)\n update_func = self._l1\n elif regularization == 'l2':\n print 'L2 regularization'\n print 'Lambda: {}'.format(self._lambda)\n update_func = self._l2\n else:\n print 'No regularization'\n update_func = self._no_reg\n \n # Number of iterations\n for _ in range(n):\n\n # Loop over all the data points\n for i in range(x.shape[0]):\n \n y_minus_g = y[i] - sigmoid( np.dot(w_0, x[i]) )\n w_0 = update_func(y[i], x[i], y_minus_g, w_0)\n\n # Save the learned weights\n self.weights = w_0\n return None", "def fit(self, X, y):\n if not isinstance(X, np.ndarray):\n X = np.asarray(X, dtype=np.double)\n if len(X.shape) != 2:\n raise ValueError(\n \"The shape of the dimension of features (%d) should be Nxd.\"\n )\n\n if not isinstance(y, np.ndarray):\n y = np.asarray(y, dtype=np.double)\n\n if y.shape[0] != X.shape[0]:\n X = X.T\n if y.shape[0] != X.shape[0]:\n raise ValueError(\"Shape mismatches between X and Y.\")\n\n N = len(X)\n X = X.T\n y = y.reshape((1, N))\n\n X_kernel = self.kernel(X, X)\n V = X * y\n\n V_kernel = np.outer(y, y) * X_kernel\n # V_kernel = self.kernel(V, V)\n K = matrix(V_kernel)\n p = matrix(-np.ones((N, 1))) # all-one vector\n # Build A, b, G, h\n G = matrix(np.vstack((-np.eye(N), np.eye(N)))) # for all lambda_n >= 0\n h = matrix(np.vstack((np.zeros((N, 1)), self.C * np.ones((N, 1)))))\n\n A = matrix(y) # the equality constrain is actually y^T lambda = 0\n b = matrix(np.zeros((1, 1)))\n\n solvers.options[\"show_progress\"] = False\n sol = solvers.qp(K, p, G, h, A, b)\n lamda_matrix = np.array(sol[\"x\"])\n lamda_matrix_ravel = np.ravel(sol[\"x\"])\n\n # Find the support vectors\n epsilon = 1e-5 # just a small number, greater than 1e-9\n\n S = np.where(lamda_matrix > epsilon)[0]\n S2 = np.where(lamda_matrix < 0.999 * self.C)[0]\n\n M = [val for val in S if val in S2]\n\n self.margin_features = X[:, M]\n self.margin_labels = y[:, M]\n\n VS = V[:, S]\n XS = X[:, S]\n yS = y[:, S]\n lS = lamda_matrix[S]\n\n self.w = VS.dot(lS).T\n self.b = 0\n\n sv = lamda_matrix_ravel > epsilon\n\n ind = np.arange(len(lamda_matrix))[sv]\n\n a = lamda_matrix_ravel[sv]\n sv_y = y[0, sv]\n\n for n in range(len(a)):\n self.b += sv_y[n]\n self.b -= np.sum(a * sv_y * X_kernel[ind[n], sv])\n self.b /= len(a)\n\n self.support_vectors_ = XS.T\n self.support_vectors_label = yS\n self.lamda_support_vectors = lamda_matrix[S]", "def fit():\n pass", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def _beam_fit_fn_4(z, Theta):\n return (Theta*z)**2", "def fit(self, X, y):\n X = self.normalize_data(X)\n X = self.add_bias(X)\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n cost_function = 0\n start = time.time()\n for t in range(n_tasks):\n #print('Training task {} with group lasso'.format(t))\n fista = Fista(self, self.lambda_1)\n w_opt = fista.fit(W[:, t], X[t], y[t], self.groups,\n max_iter=self.max_iter)\n W[:, t] = w_opt\n cost_function += self.cost(X[t], y[t], W[:, t])\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def trend_filter(rets_data, lambda_value):\r\n #USING CVXPY convex optimiser\r\n n_periods = rets_data.shape[0]\r\n rets = rets_data.to_numpy()\r\n\r\n D_full = np.diag([1]*n_periods) - np.diag([1]*(n_periods-1), 1)\r\n D = D_full[0:n_periods-1,]\r\n beta = cp.Variable(n_periods)\r\n lambd = cp.Parameter(nonneg=True)\r\n lambd.value = lambda_value\r\n\r\n def lasso_min(betas, rets, lambd):\r\n return cp.norm(rets-betas, 2)**2 + lambd*cp.norm(cp.matmul(D, betas), 1)\r\n\r\n problem = cp.Problem(cp.Minimize(lasso_min(beta, rets, lambd)))\r\n problem.solve()\r\n\r\n # NOT WORKING\r\n # n_periods = rets_data.shape[0]\r\n # D_full = np.diag([1] * n_periods) - np.diag([1] * (n_periods - 1), 1)\r\n # D = D_full[0:n_periods - 1, ]\r\n # def lasso_min(betas, rets, D, lambda_value):\r\n # return np.linalg.norm(rets-betas)**2 + lambda_value*np.linalg.norm(D@betas,1)\r\n #\r\n # init_guess = np.repeat(1/n_periods, n_periods)\r\n # bounds = Bounds(lb=0.0, ub=1.0)\r\n # results = minimize(fun=lasso_min,\r\n # args=(rets_data, D, lambda_value),\r\n # x0=init_guess,\r\n # bounds=bounds,\r\n # method='SLSQP',\r\n # options={'disp':False})\r\n # betas = pd.Series(results.x, index=rets_data.index)\r\n # return betas\r\n betas = pd.DataFrame(beta.value, index=rets_data.index.to_timestamp(), columns=['drift'])\r\n return betas", "def fit(self, X_train, y_train):\n \n # Number of examples where y = 0,1\n No_y_train_1 = np.sum(y_train)\n No_y_train_0 = y_train.shape[0] - No_y_train_1\n \n #Ratio of Number of examples where y=0,1 and the total number of examples\n self.theta_0 = No_y_train_0/y_train.shape[0]\n self.theta_1 = No_y_train_1/y_train.shape[0]\n \n #Ratio of Number of examples where x_j =1 and y=0,1 and Number of examples where y=0,1 respectively\n No_inst_j1 = X_train.T.dot(y_train.reshape([-1,1])) \n No_inst_j0 = X_train.T.dot(1-y_train.reshape([-1,1]))\n \n #Whether or not laplace smoothing is implemented or not\n if self.l_smooth:\n self.prob1 = (No_inst_j1 + 1)/(No_y_train_1 + 2)\n self.prob0 = (No_inst_j0 + 1)/(No_y_train_0 + 2)\n else:\n self.prob1 = No_inst_j1/No_y_train_1\n self.prob0 = No_inst_j0/No_y_train_0\n \n return self", "def fit(self, data, num_features, lambda_user, lambda_item, gamma):\n user_matrix, item_matrix = self.init_matrix(data, num_features)\n nnz_users, nnz_items = data.nonzero()\n nnz_data = list(zip(nnz_users, nnz_items))\n for it in tqdm(range(self.num_epochs)):\n gamma /= 1.1\n np.random.shuffle(nnz_data)\n for u, i in nnz_data:\n user = user_matrix[u, :]\n item = item_matrix[:, i]\n err = data[u, i] - user @ item\n user_matrix[u, :] += gamma * (err * item - lambda_user * user)\n item_matrix[:, i] += gamma * (err * user - lambda_item * item)\n\n self.user_matrix = user_matrix\n self.item_matrix = item_matrix", "def _lambda(self, x, y, t, x_his, y_his, t_his):\n lam = self.mu + tf.reduce_sum(self._kernel(x - x_his, y - y_his, t - t_his), axis=0)\n return lam", "def fit(self, X, y, max_iter=MAX_ITER):\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n start = time.time()\n cost_function = 0\n X = self.normalize_data(X)\n X = self.add_bias(X)\n for t in range(n_tasks):\n #print('Training {} task with lasso regression'.format(t))\n lasso = Fista(self, self.lambda_1)\n w = lasso.fit(xk=W[:, t], A=X[t], b=y[t], ind=self.groups,\n max_iter=max_iter)\n W[:, t] = w\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def fit(self, X, y, sample_weight=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ..." ]
[ "0.6499958", "0.6446894", "0.6319095", "0.61819756", "0.6141082", "0.59941757", "0.5992641", "0.5890863", "0.5857018", "0.5783564", "0.57232213", "0.57163835", "0.5709896", "0.57097226", "0.5689378", "0.56857336", "0.56857336", "0.56857336", "0.5657545", "0.56331474", "0.5629706", "0.56105983", "0.5595885", "0.5588485", "0.55729634", "0.5558294", "0.555672", "0.555672", "0.555672", "0.555672" ]
0.71139395
0
Takes in a sentence and removes escape and unicode characters
def clean_up(sentence): return unicode(sentence.strip().replace("\n", ""), errors='ignore').strip().replace("\x0c", "")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _remove_special_chars(sentence, replace_with=\"\"):\n sentence = sentence.replace('\\n', replace_with).replace('\\t', replace_with)\n return sentence", "def remove_special_chars(sentence):\r\n result = re.sub(r\"[^a-zA-Z0-9.]+\", ' ', re.sub('\\.\\.+', ' ', sentence))\r\n return result", "def remove_unicode(text):\n regex = r\"(\\\\u....)\"\n text = re.sub(regex, ' ', text)\n return text", "def removeUnicode(text):\n text = re.sub(r'(\\\\u[0-9A-Fa-f]+)',r'', text) \n text = re.sub(r'[^\\x00-\\x7f]',r'',text)\n return text", "def clean_txt(txt):\n r = txt.encode(\"utf-8\", errors=\"backslashreplace\").decode('utf-8').replace(\"\\\\u0144\", \"\")\n return r", "def remove_specials(sentence):\n sentence = sentence.replace('-', ' ')\n sentence = re.sub(r'[^\\w\\s]', '', sentence)\n return sentence", "def clean_text_from_latin_supplement_unicode(text):\n return re.sub(r\"([\\u0080-\\u00FF])\", \" \", text)", "def clean_text_from_latin_supplement_unicode(text):\n return re.sub(r\"([\\u0080-\\u00FF])\", \" \", text)", "def clean_text(text):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text.strip().lower()", "def clean_up_text(text):\n text = html.unescape(text)\n return remove_emoji(text)", "def clean_non_chinese_symbols(text):\n text = regex.sub('[!!]+', \"!\", text)\n text = regex.sub('[??]+', \"?\", text)\n text = regex.sub(\"[a-zA-Z#$%&\\'()*+,-./:;:<=>@,。★、…【】《》“”‘’[\\\\]^_`{|}~]+\", \" UNK \", text)\n return regex.sub(\"\\s+\", \" \", text)", "def remove_diacritics(self, text):\n text = re.sub(self._arabic_diacritics, '', text)\n return text", "def remove_special_characters_from_text(text) -> str:\n return re.sub(r'[^\\w\\s]', '', text.strip())", "def clean_str(s):\n s = re.sub(r\"[^\\\\p{L}\\\\s]\", \" \", s) # This removes accents, which we want.\n s = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", s) #This removes accents, which we want.\n s = re.sub(r\"\\'s\", \"\", s)\n s = re.sub(r\"\\'ve\", \"have\", s)\n s = re.sub(r\"n\\'t\", \" not\", s)\n s = re.sub(r\"\\'re\", \" are\", s)\n s = re.sub(r\"\\'d\", \" would\", s)\n s = re.sub(r\"\\'ll\", \" will\", s)\n s = re.sub(r\",\", \"\", s) #s = re.sub(r\",\", \" ,\", s)\n s = re.sub(r\"!\", \"\", s)\n # s = re.sub(r\"\\(\", \"\\(\", s)\n # s = re.sub(r\"\\)\", \"\\) \", s)\n s = re.sub(r\"\\?\", \"\", s)\n s = re.sub(r\"\\s{2,}\", \" \", s)\n s = re.sub(r\" \", \" \", s)\n return s.strip().lower()", "def clean_unicode(text):\n clean_text = text.encode(\"ascii\", errors=\"replace\").strip().decode(\"ascii\")\n clean_text = clean_text.replace(\"?\", ' ')\n return clean_text", "def _normalize_sentence(sent):\n #sent = sent[sent.find('\\n\\n\\n'):]\n removed = r'[\\n ]+'\n sent = re.sub(removed, ' ', sent)\n return sent.strip()", "def remove_unicode_diac(text):\n # Replace diacritics with nothing\n text = text.replace(u\"\\u064B\", \"\") # fatHatayn\n text = text.replace(u\"\\u064C\", \"\") # Dammatayn\n text = text.replace(u\"\\u064D\", \"\") # kasratayn\n text = text.replace(u\"\\u064E\", \"\") # fatHa\n text = text.replace(u\"\\u064F\", \"\") # Damma\n text = text.replace(u\"\\u0650\", \"\") # kasra\n text = text.replace(u\"\\u0651\", \"\") # shaddah\n text = text.replace(u\"\\u0652\", \"\") # sukuun\n text = text.replace(u\"\\u0670\", \"`\") # dagger 'alif\n return text", "def clean_text_from_general_punctuation_unicode(text):\n return re.sub(r\"([\\u2000-\\u206F])\", \" \", text)", "def clean_text_from_general_punctuation_unicode(text):\n return re.sub(r\"([\\u2000-\\u206F])\", \" \", text)", "def _remove_diacritics(self, text: str) -> str:\n nfkd_form = unicodedata.normalize(\"NFKD\", text)\n return \"\".join([char for char in nfkd_form if not unicodedata.combining(char)])", "def preprocess_sentence(raw):\r\n\t\r\n\t# raw = re.sub(r\"[\\x80-\\xff]\",\" \",raw)\r\n\t\r\n\traw = regex_punct.sub(' ',raw)\r\n\traw = raw.strip()\r\n\traw = raw.lower()\r\n\t\r\n\twords = nltk.word_tokenize(raw)\r\n\twords = [replace_punctuation(w) for w in words if not w in stopwords and len(w) > 1]\r\n\t\r\n\treturn(' '.join(words))", "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def punctuation_filter(sentence):\n filtered_sentence = sentence\n regexps = [(r\"[.!?,\\-()\\[\\]\\\\]\", \" \"),\n (r\"&\", \"und\"),\n (r\" {2,}\", \" \")]\n for find, replace in regexps:\n filtered_sentence = re.sub(find, replace, filtered_sentence)\n \n return filtered_sentence", "def clean_text_from_private_unicode(line):\n line = re.sub(r\"([\\uE000-\\uF8FF]|\\uD83C[\\uDF00-\\uDFFF]|\\uD83D[\\uDC00-\\uDDFF])\", \" \", line)\n return line", "def clean_text_from_private_unicode(line):\n line = re.sub(r\"([\\uE000-\\uF8FF]|\\uD83C[\\uDF00-\\uDFFF]|\\uD83D[\\uDC00-\\uDDFF])\", \" \", line)\n return line", "def remove_special(s):\n return ansi_escape_chars.sub('', s)", "def clean_sentence(sentence):\n words = sentence.lower().split()\n clean_sent = \"\"\n for word in words:\n clean_sent += (''.join(list(map(lambda x: x if x in ascii_lowercase or x in \"1234567890\" else '', list(word))))) + \" \"\n return clean_sent[:-1]", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def remove_punct(self,text):", "def remove_special_characters(text):\n soup = BeautifulSoup(text, \"html.parser\")\n review = soup.get_text()\n review = r\"[^a-zA-z0-9\\s]\"\n review = re.sub(review, \"\", text)\n return review.lower()" ]
[ "0.7474423", "0.74422467", "0.7410658", "0.73010415", "0.72492003", "0.7232809", "0.7168041", "0.7168041", "0.71087813", "0.7105705", "0.7013335", "0.70034975", "0.6998236", "0.69968987", "0.69652265", "0.69615847", "0.6954357", "0.69324803", "0.69324803", "0.68823165", "0.68655854", "0.68461895", "0.68205136", "0.67995757", "0.67995757", "0.67904484", "0.6788046", "0.67780054", "0.677701", "0.67591125" ]
0.8358364
0
Checks if a string is empty or NaN
def check_nan(s): if s == "": return True if type(x) is not str: return np.isnan(s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_null_or_empty(string_val):\n if string_val and string_val.strip():\n return False\n return True", "def non_empty(val):\n return val is not None and val != \"\"", "def is_str_none_or_empty(val):\n if val is None:\n return True\n if isinstance(val, string_types):\n val = val.strip()\n if not val:\n return True\n return False", "def is_empty(string):\n return string == None or re.sub(\"\\\\s+\", \"\", string) == \"\"", "def check_empty_string(value: str):\n if not value:\n return True\n if not value.strip():\n return True\n else:\n return False", "def blank(string):\n if not string:\n return True\n else:\n return False", "def is_empty(val):\n return val in [None, ''] or val.isspace()", "def query_is_empty(input_string):\n if re.match(r'\\A\\s*\\Z', input_string) is None:\n return True\n else:\n return False", "def isnull(pd_val):\n \n return (pd.isnull(pd_val) or len(pd_val.strip()) == 0) if isinstance(pd_val, str) else pd.isnull(pd_val)", "def is_empty(value):\n logger.info('is_empty value:%s' % value )\n if not value.strip(' '):\n return True\n else:\n return False", "def is_empty_str(val):\n s = str(val)\n if not isinstance(s, str):\n return False\n if not s.strip():\n return True\n else:\n return False", "def IsEmptyString (s) :\n if s is None : return True\n elif isinstance (s, str) :\n return len (s) == 0 \n else :\n raise PQHException (\"the type is unexpected %s\" % str (type (s)))", "def check_empty(cell):\n return pd.isna(cell)", "def check_if_empty(a):\r\n if a == '0':\r\n return True\r\n elif a == '':\r\n return True\r\n else:\r\n return False", "def _is_bumf(value):\n if type(value) in (unicode, str):\n return value.strip() == ''\n return value is None", "def not_set(string):\n if string is None:\n return True\n elif string == '':\n return True\n return False", "def ISNA(value):\n return isinstance(value, float) and math.isnan(value)", "def non_empty_string(value):\n return value and bool(value.strip())", "def is_blank(text: str) -> bool:\n return len(text.strip()) == 0", "def _check_not_empty(self, string):\n if len(string) == 0:\n self._failed(u\"The given string has zero length\")", "def is_number(s: Union[str, int, float]):\n if isinstance(s, str) and s.lower() == \"nan\":\n return True\n try:\n float(s)\n return True\n except ValueError:\n return False", "def validate_nonblank(value):\n return value", "def is_empty(series):\n return series.isna().all()", "def is_non_empty_value(value):\n if value is None:\n return False\n if isinstance(value, str) and len(value.strip()) == 0:\n return False\n if (isinstance(value, list) or isinstance(value, dict)) and not value:\n return False\n return True", "def is_field_empty(*args):\n for field in args:\n if field == \"\" or field is None:\n return True\n return False\n return \"NONDETERMINISTIC\"", "def is_na(subject):\n\n if isinstance(subject, str):\n na_versions = [\"n/a\", \"nan\"]\n if subject.lower() in na_versions:\n return True\n else:\n return False\n elif isinstance(subject, float):\n if math.isnan(subject):\n return True\n elif isinstance(subject, bool):\n return not subject\n else:\n return False", "def is_empty(val):\n if val is None or isinstance(val, Sized) and len(val) == 0: # Empty string is also Sized of len 0\n return True\n return False", "def _is_nan(self, x: any) -> bool:\n return isinstance(x, float) and math.isnan(x)", "def is_str_null(msg):\n\n if None == msg or \"\" == msg:\n return True\n return False", "def validate(value):\n if str.isdigit(value) or value == \"\":\n return True\n else:\n return False" ]
[ "0.7631642", "0.75115323", "0.74912184", "0.74632865", "0.73394746", "0.73057926", "0.7259161", "0.7225448", "0.7190422", "0.7146433", "0.714167", "0.7140877", "0.7089258", "0.7072765", "0.6981772", "0.69643974", "0.693484", "0.6930107", "0.689618", "0.6857327", "0.6751747", "0.6732148", "0.6727279", "0.67115587", "0.6705865", "0.6662721", "0.66352135", "0.66254354", "0.66103965", "0.6592278" ]
0.8371675
0
Pooled MFoMEER, i.e., microaveraging EER approximation
def pooled_mfom_eer(y_true, y_pred): y_neg = 1 - y_true # number of positive samples P = K.sum(y_true) # number of negative samples N = K.sum(y_neg) # smooth false negative and false positive fn = y_pred * y_true fp = (1. - y_pred) * y_neg # smooth false negative and false positive rates fnr = K.sum(fn) / P fpr = K.sum(fp) / N smooth_eer = fpr + .5 * K.abs(fnr - fpr) return smooth_eer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def emm(dataset):\r\n\r\n ####################### CONFIGURE THIS ##############################\r\n\r\n #Define subgroup\r\n #subgroup = dataset[(dataset['dvce_type'] == 'Tablet')]\r\n subgroup = dataset[(dataset['os_timezone'].str.contains(\"Asia\") & (dataset['os_name'].str.contains(\"iPhone\")))]\r\n\r\n #Define target 1\r\n target1 = 'revenue'\r\n\r\n #Define target 2\r\n target2 = 'new_buttons'\r\n\r\n #####################################################################\r\n\r\n logging.info(\"Exceptional Model Mining. (Two targets)\")\r\n\r\n lengthDataset = len(dataset)\r\n logging.debug('Examples of the dataset {}'.format(lengthDataset)) \r\n logging.debug('Examples of subgroup: {} ({:.2f}%)'.format(len(subgroup), (len(subgroup)/lengthDataset) * 100))\r\n correlationTargets = phi_coefficient (dataset,target1,target2)\r\n logging.debug('Correlation of the two targets: {:.2f}'.format(correlationTargets))\r\n \r\n evaluate(QualityMeasure.SCD,ModelClass.PhiCoefficient,dataset,subgroup,target1,target2)", "def dRdE_millicharge(E, m_x, epsilon, target, vlag=232.0, sigmav=156.0, vesc=544.0):\n \n A = Avals[target]\n \n eta = calcEta(vmin(E, A, m_x),vlag=vlag, sigmav=sigmav, vesc=vesc)\n amu = 931.5e3 # keV\n q1 = np.sqrt(2*A*amu*E)\n\n #Recoil momentum over nucleon mass\n qr = q1/amu\n \n # Required for form factors\n q2 = q1*(1e-12/1.97e-7)\n b = np.sqrt(41.467/(45*A**(-1.0/3.0) - 25*A**(-2.0/3.0)))\n y = (q2*b/2)**2\n \n rate = E*0.0\n \n #Calculate the coupling to protons\n alpha = 0.007297\n e = np.sqrt(4*np.pi*alpha)\n m_p = 0.9315\n \n cn = 0\n cp = epsilon*e**2\n \n c = [cp + cn, cp - cn]\n \n for tau1 in [0,1]:\n for tau2 in [0,1]:\n \n c1 = c[tau1]\n c2 = c[tau2]\n \n R_M = c1*c2*eta/(q1*1e-6)**4\n rate += R_M*np.vectorize(WM.calcwm)(tau1, tau2, y, target)\n \n conv = (rho0/2./np.pi/m_x)*1.69612985e14 # 1 GeV^-4 * cm^-3 * km^-1 * s * c^6 * hbar^2 to keV^-1 kg^-1 day^-1\n\n rate = np.clip(rate, 0, 1e30)\n return (4*np.pi/(2*Jvals[target]+1))*rate*conv", "def compute_MSE(e):\n\n return 1/2*np.mean(e**2)", "def run_em(self, r):\n self.tc.reset()\n self.pf.Y = r.T\n em_data = {}\n if self.print_mode:\n print 'The hessian trace is {}'.format(\n np.trace(self.tc.t_H.get_value()))\n\n print 'Running full EM'\n\n for u in range(self.n_itr):\n t0 = self.n_t * u / self.n_itr\n tf = self.n_t * (u + 1) / self.n_itr\n print('Iteration: {} | Running up to time {}'.format(u, tf))\n\n self.run_e(tf)\n self.run_m(t0, tf, r, n_g_itr=self.n_g_itr)\n\n iteration_data = {\n 'time_steps': tf,\n 'path_means': self.pf.means,\n 'path_sdevs': self.pf.sdevs,\n 'image_est': self.tc.image_est(),\n 'coeff_est': self.tc.get_A()\n }\n\n if self.save_pix_rf_coupling:\n xr = self.pf.XS[t0:tf, :, 0].transpose()\n yr = self.pf.XS[t0:tf, :, 1].transpose()\n w = self.pf.WS[t0:tf].transpose()\n tmp = self.tc.get_sp_rf_coupling(xr, yr, w)\n iteration_data['pix_rf_coupling'] = tmp\n\n if self.save_hessian:\n iteration_data['hessian'] = self.tc.t_H.get_value()\n\n\n em_data[u] = iteration_data\n em_data['mode'] = 'EM'\n\n if self.save_mode:\n self.data['EM_data'] = em_data", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def m_e(self, z, m, tl, phi):\n\t if phi>0.:\n\t return self.MMAX*(self.CIRC_1*((self.TH - tl)/(self.TH - self.TW) + 1.)*(self.BETA*(z - self.MU))**3. - self.BETA*(self.TH - tl)/(self.TH - self.TW)*(z - self.MU) + \\\n\t self.CIRC_2*(self.TH - tl)/(self.TH - self.TW) -(1- self.f_o(z))*(1-m/(m+self.ALPHA_1*self.MMAX))) \n\t else:\n\t return self.MMAX*(self.CIRC_1*((self.TH - tl)/(self.TH - self.TW) + 1.)*(self.BETA*(z - self.MU))**3. - self.BETA*(self.TH - tl)/(self.TH - self.TW)*(z - self.MU) + \\\n\t self.CIRC_2*(self.TH - tl)/(self.TH - self.TW)+ (1-self.f_o(z)))", "def _calculate_measurement_error(self): \n \n # Calculate Hartmann Spot\n # FIXME what are factor_1, factor_2 ???\n factor_1, factor_2 = 206265*5.89e-7, 206265*6.5e-7\n term1, term2 = factor_1/self.actuator_spacing, factor_2/self.r0\n hartmann_spot = np.max([term1, term2])\n \n # Calculate SNR \n n_pix=4 # FIXME spreadsheet says not to change this idk why?\n sample_time = 1/(10*self.controller_frequency)\n brightness = (8.9e5)*10**((0-self.guide_star_mag)/2.5)\n n_photons = brightness*sample_time*((100*self.actuator_spacing)**2)\n snr = n_photons/np.sqrt(n_photons + n_pix*(self.sigma_readnoise)**2)\n\n # Calculate noise propagator \n degrees_of_freedom = np.round((np.pi/4) * (self.telescope_diameter/self.actuator_spacing)**2)\n factor_1, factor_2 = 0.0536, 0.0795 # FIXME WHAT THE HECK IS THIS\n if self.aperture == 'circular':\n factor_1, factor_2 = 0.0068, 0.0796\n noise_propagator = np.sqrt(2*(factor_1 + factor_2*np.log(degrees_of_freedom)))\n\n # Calculate close loop averaging\n controller_over_frame = 1/10\n close_loop_averaging = np.sqrt(2*controller_over_frame)*np.arctan(1/(2*controller_over_frame))\n sigma_measurement = noise_propagator * close_loop_averaging * (self.actuator_spacing*1e9) * (hartmann_spot/snr*4.84814e-6)\n self.sigma_measurement = sigma_measurement # in nm", "def ED_ME_mixed_state(bra, ket, pol_vec = np.array([1,1,1]), reduced = False):\n ME = 0\n bra = bra.transform_to_omega_basis()\n ket = ket.transform_to_omega_basis()\n for amp_bra, basis_bra in bra.data:\n for amp_ket, basis_ket in ket.data:\n ME += amp_bra.conjugate()*amp_ket*ED_ME_coupled(basis_bra, basis_ket, pol_vec = pol_vec, rme_only = reduced)\n\n return ME", "def _measmod_ekf0(ivp, prior, evlvar):\n spatialdim = prior.spatialdim\n h0 = prior.proj2coord(coord=0)\n h1 = prior.proj2coord(coord=1)\n\n def dyna(t, x, **kwargs):\n return h1 @ x - ivp.rhs(t, h0 @ x)\n\n def diff(t, **kwargs):\n return evlvar * np.eye(spatialdim)\n\n def jaco(t, x, **kwargs):\n return h1\n\n return DiscreteGaussianModel(dyna, diff, jaco)", "def dRdE_magnetic(E, m_x, mu_x, target, vlag=232.0, sigmav=156.0, vesc=544.0):\n \n A = Avals[target]\n \n #See Eq. 62 of https://arxiv.org/pdf/1307.5955.pdf, but note\n #that we're using some different normalisations for the operators\n #so there are some extra factors of m_x and m_p lurking around...\n \n amu = 931.5e3 # keV\n q1 = np.sqrt(2*A*amu*E) #Recoil momentum in keV\n \n alpha = 0.007297\n e = np.sqrt(4*np.pi*alpha)\n m_p = 0.9315\n \n #Proton and neutron g-factors\n gp = 5.59\n gn = -3.83\n \n #Bohr Magneton\n #Tesla = 194.6*eV**2 # Tesla in natural units (with e = sqrt(4 pi alpha))\n #muB = 5.7883818e-5*eV/Tesla # Bohr magneton\n mu_B = 297.45 #GeV^-1 (in natural units (with e = sqrt(4 pi alpha)))\n\n cp = [E*0.0 for i in range(11)]\n cn = [E*0.0 for i in range(11)]\n \n #Operator 1\n cp[0] = e*(mu_x*mu_B)/(2.0*m_x)\n \n #Operator 5\n cp[4] = 2*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2\n \n #Operator 4\n cp[3] = gp*e*(mu_x*mu_B)/m_p\n cn[3] = gn*e*(mu_x*mu_B)/m_p\n \n #Operator 6\n cp[5] = -gp*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2\n cn[5] = -gn*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2\n\n return dRdE_NREFT(E, m_x, cp, cn, target, vlag, sigmav, vesc)", "def m_e(self, z, m, tl, phi):\n\t if phi>0.:\n\t return self.mmax*(self.CIRC_1*((self.TH - tl)/(self.TH - self.TW) + 1.)*(self.BETA*(z - self.MU))**3. - self.BETA*(self.TH - tl)/(self.TH - self.TW)*(z - self.MU) + \\\n\t self.CIRC_2*(self.TH - tl)/(self.TH - self.TW) -(1- self.f_o(z))*(1-m/(m+self.ALPHA_1*self.mmax))) \n\t else:\n\t return self.mmax*(self.CIRC_1*((self.TH - tl)/(self.TH - self.TW) + 1.)*(self.BETA*(z - self.MU))**3. - self.BETA*(self.TH - tl)/(self.TH - self.TW)*(z - self.MU) + \\\n\t self.CIRC_2*(self.TH - tl)/(self.TH - self.TW)+ (1-self.f_o(z)))", "def Mtof(e,M):\n #first calculate eccentric anomaly (bigE)\n f = np.zeros(len(e))\n for i in np.arange(0,len(e)):\n n=0.\n delta=1000.\n bigE = M[i] - e[i]*np.sin(M[i]) \n while (n<1.e4 and delta>1.e-6):\n f1 = bigE - e[i]*np.sin(bigE) - M[i]\n fp = 1.0 - e[i]*np.cos(bigE)\n delta = -f1/fp\n bigE = bigE + delta\n n = n + 1\n f[i] = 2.*np.arctan( ((1. + e[i])/(1. - e[i]))**0.5 * np.tan(bigE/2.) )\n return f", "def _measmod_ekf1(ivp, prior, evlvar):\n spatialdim = prior.spatialdim\n h0 = prior.proj2coord(coord=0)\n h1 = prior.proj2coord(coord=1)\n\n def dyna(t, x, **kwargs):\n return h1 @ x - ivp.rhs(t, h0 @ x)\n\n def diff(t, **kwargs):\n return evlvar * np.eye(spatialdim)\n\n def jaco(t, x, **kwargs):\n return h1 - ivp.jacobian(t, h0 @ x) @ h0\n\n return DiscreteGaussianModel(dyna, diff, jaco)", "def mtf_transformer_lm_moe():\n hparams = mtf_transformer.mtf_transformer_lm_baseline()\n hparams.decoder_layers = [\"att\", \"moe\"] * 4\n moe.set_default_moe_hparams(hparams)\n hparams.mesh_shape = \"all:8\"\n hparams.layout = \"batch:all;experts:all\"\n return hparams", "def em_mog(X, k, max_iter=20):\n\n # Initialize variables\n mu = None\n sigma = [np.eye(X.shape[1]) for i in range(k)]\n phi = np.ones([k,])/k\n ll_prev = float('inf')\n start = time.time()\n\n #######################################################################\n # TODO: #\n # Initialize the means of the gaussians. You can use K-means! #\n #######################################################################\n\n initKmeans = KMeans(n_clusters=k, max_iter=max_iter).fit(X)\n mu = initKmeans.cluster_centers_\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n for l in range(max_iter):\n # E-Step: compute the probabilities p(z==j|x; mu, sigma, phi)\n w = e_step(X, mu, sigma, phi)\n\n # M-step: Update the parameters mu, sigma and phi\n phi, mu, sigma = m_step(w, X, mu, sigma, phi, k)\n\n # Check convergence\n ll = log_likelihood(X, mu, sigma, phi)\n print('Iter: {}/{}, LL: {}'.format(l+1, max_iter, ll))\n if ll/ll_prev > 0.999:\n print('EM has converged...')\n break\n ll_prev = ll\n\n # Get stats\n exec_time = time.time()-start\n print('Number of iterations: {}, Execution time: {}s'.format(l+1, exec_time))\n\n # Compute final assignment\n w = e_step(X, mu, sigma, phi)\n\n return phi, mu, sigma, w", "def iteration( M, sign_num):\n M_bootstrap = bootstrap(M)\n model = NMF(n_components = sign_num, solver = 'mu', max_iter = 10000000, init = 'random')\n #P = np.random.rand(len(M_bootstrap), sign_num)\n #E = np.random.rand(sign_num, len(M_bootstrap[0]))\n P = model.fit_transform(M_bootstrap)\n E = model.components_\n error = model.reconstruction_err_\n P , E = normalize(P, E)\n return P, error", "def what_emo(self, mfccs, model, params_file, st, endpoint):\n print(\"\\nEntering what_emo... \")\n emotion = None\n emo_label = {0:'angry', 1:'fearful', 2:'happy', 3:'sad', 4:'calm'}\n if endpoint == \"\":\n #emo_gen = self.mfccs_to_emo(mfccs, model, params_file, st)\n #emo = next(emo_gen)[\"dense_1\"]\n emo_list = self.mfccs_to_emo(mfccs, model, params_file, st)\n print(\"\\nemo_list: \")\n print(emo_list)\n epis_var = emo_list[0][0][-1] # we take variance from logits_variance\n print(\"\\nAleatoric Variance: \")\n print(epis_var)\n emo = emo_list[1] # softmax_output\n print(\"\\nemo: \")\n print(emo)\n else:\n dict_im = {}\n service = googleapiclient.discovery.build('ml', 'v1')\n dict_im[\"input_1\"] = mfccs.tolist()\n predictions = service.projects().predict(name=endpoint, body={'instances':[dict_im]}).execute()[\"predictions\"]\n emo = np.array(predictions[0]['dense_1']) \n print(\"\\nemo: \")\n print(emo)\n res = np.argmax(emo)\n print(\"\\nres: \")\n print(res)\n emotion = emo_label[res] \n print(\"\\nPrediction: \" + str(emotion))\n return str(emotion), res, emo, epis_var", "def dnde_ee(_: PseudoScalarMediatorBase, egams, cme):\n return dnde_xx_to_p_to_ffg(egams, cme, me)", "def _em(self, X, measurement_dim, state_dim):\n from pykalman import KalmanFilter\n\n X_masked = np.ma.masked_invalid(X)\n estimate_matrices_ = self._get_estimate_matrices()\n\n kf = KalmanFilter(\n transition_matrices=self.state_transition,\n observation_matrices=self.measurement_function,\n transition_covariance=self.process_noise,\n observation_covariance=self.measurement_noise,\n transition_offsets=self.transition_offsets,\n observation_offsets=self.measurement_offsets,\n initial_state_mean=self.initial_state,\n initial_state_covariance=self.initial_state_covariance,\n n_dim_obs=measurement_dim,\n n_dim_state=state_dim,\n )\n\n kf = kf.em(X=X_masked, em_vars=estimate_matrices_)\n\n F = kf.transition_matrices\n H = kf.observation_matrices\n Q = kf.transition_covariance\n R = kf.observation_covariance\n transition_offsets = kf.transition_offsets\n measurement_offsets = kf.observation_offsets\n X0 = kf.initial_state_mean\n P0 = kf.initial_state_covariance\n\n return F, H, Q, R, transition_offsets, measurement_offsets, X0, P0", "def moment_update(model, model_ema, m):\r\n for p1, p2 in zip(model.parameters(), model_ema.parameters()):\r\n p2.data.mul_(m).add_(1 - m, p1.detach().data)\r\n # p2.data.mul_(m).add_(1 - m, p1.data)", "def metallicity(method, emsystem):\n if method == 'PG16':\n # Requires Hbeta, [OII], [OIII], [NII], [SII]\n R2 = (emsystem.get_emline('[OII] 3726').attrib['flux'] +\n emsystem.get_emline('[OII] 3729').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n R3 = (emsystem.get_emline('[OIII] 4959').attrib['flux'] +\n emsystem.get_emline('[OIII] 5007').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n N2 = (emsystem.get_emline('[NII] 6548').attrib['flux'] +\n emsystem.get_emline('[NII] 6584').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n S2 = (emsystem.get_emline('[SII] 6716').attrib['flux'] +\n emsystem.get_emline('[SII] 6731').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n # Proceed\n if np.log10(N2) < -0.6:\n r_val = 7.932 + 0.944*np.log10(R3/R2) + 0.695*np.log10(N2) + \\\n ((0.97 - 0.291*np.log10(R3/R2)) - 0.019*np.log10(N2))*np.log10(R2)\n\n s_val = 8.072 + 0.789*np.log10(R3/S2) + 0.726*np.log10(N2) + \\\n (1.069 - 0.170*np.log10(R3/S2) +0.022*np.log10(N2))*np.log10(S2)\n else:\n r_val = 8.589 + 0.022*np.log10(R3/R2) + 0.399*np.log10(N2) + \\\n (-0.137 + 0.164*np.log10(R3/R2) + 0.589*np.log10(N2))*np.log10(R2)\n\n s_val = 8.424 + 0.030*np.log10(R3/S2) + 0.751*np.log10(N2) + \\\n (-0.349 + 0.182*np.log10(R3/S2) +0.508*np.log10(N2))*np.log10(S2)\n return r_val.decompose().value, s_val.decompose().value", "def test_difficulties_eps_multi(self):\n well_w = self.get_w_well_behaviour()\n\n def get_beamformer(A, B):\n return get_mvdr_vector_souden(\n A, B,\n return_ref_channel=True\n )\n\n for args in [\n (\n [self.PhiXX * 0, self.PhiXX],\n [self.PhiNN, self.PhiNN],\n ),\n (\n [self.PhiXX, self.PhiXX],\n [self.PhiNN * 0, self.PhiNN],\n ),\n (\n [self.PhiXX * 0, self.PhiXX],\n [self.PhiNN * 0, self.PhiNN],\n ),\n ]:\n w, ref_channel = get_beamformer(*args)\n assert ref_channel == 2, ref_channel\n np.testing.assert_allclose(\n w,\n np.array([[0., 0., 0.], well_w])\n )\n\n for args in [\n (\n [self.PhiXX * np.inf, self.PhiXX],\n [self.PhiNN, self.PhiNN],\n ),\n (\n [self.PhiXX, self.PhiXX],\n [self.PhiNN * np.inf, self.PhiNN],\n ),\n (\n [self.PhiXX * np.inf, self.PhiXX],\n [self.PhiNN * np.inf, self.PhiNN],\n ),\n ]:\n with tc.assert_raises(AssertionError):\n get_beamformer(*args)", "def fix_montage(raw, timestamp):\n # These channels are not recorded during an EEG experiment or are not included in standard 10/20 montage.\n \n non_eeg = ['SaO2 SpO2', 'HR HR','Pulse Plet', 'ExG1', 'ExG2', 'EEG A1', 'EEG A2']\n \n #Check if EOG was recorded. If so, save it so it can later be added to the data.\n EOG_CHANNEL_FOUND = False\n if('ExG1' in raw.ch_names): \n eog_data = raw.copy().pick_channels(['ExG1']).get_data()\n EOG_CHANNEL_FOUND = True\n \n exclude = list(set(non_eeg).intersection(raw.ch_names))\n raw.drop_channels(exclude)\n \n raw.info['ch_names'] = [name.split(' ')[-1] for name in raw.info['ch_names']]\n\n orig_names = raw.ch_names\n montage = mne.channels.read_montage(kind = 'standard_1020', ch_names=raw.info['ch_names'])\n \n data = raw.get_data()\n \n channels_dict = {}\n \n for channel_name, channel_data in zip(orig_names, data):\n channels_dict[channel_name] = channel_data\n \n reordered_data = np.zeros(shape = data.shape) \n \n for idx, channel_name in enumerate(montage.ch_names):\n reordered_data[idx, :] = channels_dict[channel_name]\n \n new_info = mne.create_info(\n ch_names= list(montage.ch_names),\n sfreq = raw.info['sfreq'],\n ch_types = ['eeg'] * len(list(montage.ch_names)),\n #meas_date = [timestamp[0], 0] # Time of the first sample and something else. Not well documented.\n )\n \n # Create new dataset with reordered channels\n new_raw = mne.io.RawArray(reordered_data, new_info)\n # Set electrode localizations using standard 1020 montage\n new_raw.set_montage(montage)\n \n if(EOG_CHANNEL_FOUND): # Add it to other channels\n eog_channel = mne.io.RawArray(eog_data, mne.create_info( ch_names= ['ExG1'], sfreq = raw.info['sfreq'], ch_types = ['eog']))\n new_raw = new_raw.add_channels([eog_channel])\n \n return new_raw", "def Fermi(En,T):\n ev = 1.60218e-19\n kb = 1.380e-23\n return 1/(1+np.exp(En/(kb*T/ev)))", "def fermi(E,mu,T):\n if (E-mu)/T > 600:\n f=0\n\t\t\t\t\n else:\n f=1/(math.exp((E-mu)/(kB*T) )+1)\n return(f)", "def TST_MMD_u(Fea, N_per, N1, Fea_org, sigma, sigma0, ep, alpha, device, dtype, is_smooth=True):\r\n mmd_vector = np.zeros(N_per)\r\n TEMP = MMDu(Fea, N1, Fea_org, sigma, sigma0, ep, is_smooth)\r\n mmd_value = get_item(TEMP[0], is_cuda)\r\n Kxyxy = TEMP[2]\r\n count = 0\r\n nxy = Fea.shape[0]\r\n nx = N1\r\n for r in range(N_per):\r\n # print r\r\n ind = np.random.choice(nxy, nxy, replace=False)\r\n # divide into new X, Y\r\n indx = ind[:nx]\r\n # print(indx)\r\n indy = ind[nx:]\r\n Kx = Kxyxy[np.ix_(indx, indx)]\r\n # print(Kx)\r\n Ky = Kxyxy[np.ix_(indy, indy)]\r\n Kxy = Kxyxy[np.ix_(indx, indy)]\r\n\r\n TEMP = h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed=False)\r\n mmd_vector[r] = TEMP[0]\r\n if mmd_vector[r] > mmd_value:\r\n count = count + 1\r\n if count > np.ceil(N_per * alpha):\r\n h = 0\r\n threshold = \"NaN\"\r\n break\r\n else:\r\n h = 1\r\n if h == 1:\r\n S_mmd_vector = np.sort(mmd_vector)\r\n # print(np.int(np.ceil(N_per*alpha)))\r\n threshold = S_mmd_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n return h, threshold, mmd_value.item()", "def EM(self, fe_params, cov_re, scale, niter_em=10,\n hist=None):\n\n xxtot = 0.\n for x in self.exog_li:\n xxtot += np.dot(x.T, x)\n\n xytot = 0.\n for x,y in zip(self.exog_li, self.endog_li):\n xytot += np.dot(x.T, y)\n\n pp = []\n for itr in range(niter_em):\n\n m1x, m1y, m2, m2xx = self.Estep(fe_params, cov_re, scale)\n\n fe_params = np.linalg.solve(xxtot, xytot - m1x)\n cov_re = m2 / self.n_groups\n\n scale = 0.\n for x,y in zip(self.exog_li, self.endog_li):\n scale += np.sum((y - np.dot(x, fe_params))**2)\n scale -= 2 * m1y\n scale += 2 * np.dot(fe_params, m1x)\n scale += np.trace(m2xx)\n scale /= self.n_totobs\n\n if hist is not None:\n hist.append([\"EM\", fe_params, cov_re, scale])\n\n return fe_params, cov_re, scale", "def env_EMG(emg, fs):\n EMGenv = np.copy(emg)\n \n #Remove line noise\n cof_50 = np.array([49, 51])\n Wn_50 = 2*cof_50/fs\n Wn_50[Wn_50 >= 1] = 0.99\n [B50, A50] = signal.butter(3, Wn_50, 'bandstop') #third order bandstop Butterworth filter\n EMGenv = signal.filtfilt(B50, A50, EMGenv, axis = 0)\n \n #BandPass filtering\n cof_1 = np.array([80, 500])\n Wn_1 = 2*cof_1/fs\n Wn_1[Wn_1 >= 1] = 0.99\n [B1, A1] = signal.butter(3, Wn_1, 'bandpass') #third order bandpass Butterworth filter\n EMGenv = signal.filtfilt(B1, A1, EMGenv, axis = 0)\n \n #Rectify\n EMGenv = abs(EMGenv)\n \n #LowPass filtering\n cof_2 = np.array([10])\n Wn_2 = 2*cof_2/fs\n Wn_2[Wn_2 >= 1] = 0.99\n [B2, A2] = signal.butter(3, Wn_2, 'lowpass') #third order lowpass Butterworth filter\n EMGenv = signal.filtfilt(B2, A2, EMGenv, axis = 0)\n \n return EMGenv", "def moment_update(model, model_ema, m):\n for p1, p2 in zip(model.parameters(), model_ema.parameters()):\n p2.data.mul_(m).add_(1-m, p1.detach().data)", "def moment_update(model, model_ema, m):\n for p1, p2 in zip(model.parameters(), model_ema.parameters()):\n p2.data.mul_(m).add_(1-m, p1.detach().data)" ]
[ "0.5958953", "0.57141393", "0.56649196", "0.55627", "0.5562114", "0.5515414", "0.5514347", "0.5499168", "0.54952323", "0.54772395", "0.5469913", "0.54049045", "0.5397329", "0.53947175", "0.5379511", "0.535299", "0.5349559", "0.53284127", "0.5324324", "0.5302682", "0.5300826", "0.5289601", "0.5284547", "0.527777", "0.52609617", "0.52540624", "0.52412015", "0.52285373", "0.52268225", "0.52268225" ]
0.5764583
1
MFoM micro F1, i.e. microaveraging F1 approximation (pool all scores and calculate errors)
def mfom_microf1(y_true, y_pred): p = 1. - y_pred numen = 2. * K.sum(p * y_true) denum = K.sum(p + y_true) smooth_f1 = numen / denum return 1.0 - smooth_f1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mfom_macrof1(y_true, y_pred):\n s = K.shape(y_true)\n y_true = K.reshape(y_true, (-1, s[-1]))\n y_pred = K.reshape(y_pred, (-1, s[-1]))\n y_neg = 1 - y_true\n # smooth counters per class\n tp = K.sum((1. - y_pred) * y_true, axis=0)\n fn = K.sum(y_pred * y_true, axis=0)\n fp = K.sum((1. - y_pred) * y_neg, axis=0)\n numen = 2. * tp\n denum = fp + fn + 2. * tp\n smooth_f1 = K.exp(K.log(numen + 1.) - K.log(denum + 1.))\n error_f1 = 1. - K.mean(smooth_f1)\n # debug output\n # tp = K.print_tensor(tp, message='TP is: ')\n # fn = K.print_tensor(fn, message='FN is: ')\n # fp = K.print_tensor(fp, message='FP is: ')\n # error_f1 = K.print_tensor(error_f1, message='error_f1: ')\n return error_f1", "def micro_f1(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)), axis=0)\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)), axis=0)\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)), axis=0)\n\n \"\"\"Micro_F1 metric.\n \"\"\"\n precision = K.sum(true_positives) / (K.sum(predicted_positives)+K.epsilon())\n recall = K.sum(true_positives) / (K.sum(possible_positives)+K.epsilon())\n micro_f1 = 2 * precision * recall / (precision + recall + K.epsilon())\n return micro_f1", "def evaluate_f1_ml(predict, truth):\n label_same = []\n label_predict = []\n label_truth = []\n label_f1 = []\n\n division = lambda x, y: (x * 1.0 / y) if y else 0\n f1 = lambda p, r: 2 * p * r / (p + r) if p + r else 0\n\n batch, label_size = predict.size()\n for i in range(label_size):\n cur_predict = predict[:, i]\n cur_truth = truth[:, i]\n\n predict_max = cur_predict.gt(0.5).long()\n cur_eq_num = (predict_max * cur_truth).sum().item()\n\n cur_predict_num = predict_max.sum().item()\n cur_truth_num = cur_truth.sum().item()\n\n cur_precision = division(cur_eq_num, cur_predict_num)\n cur_recall = division(cur_eq_num, cur_truth_num)\n cur_f1 = f1(cur_precision, cur_recall)\n\n label_same.append(cur_eq_num)\n label_predict.append(cur_predict_num)\n label_truth.append(cur_truth_num)\n label_f1.append(cur_f1)\n\n macro_f1 = sum(label_f1) / len(label_f1)\n micro_precision = division(sum(label_same), sum(label_predict))\n micro_recall = division(sum(label_same), sum(label_truth))\n micro_f1 = f1(micro_precision, micro_recall)\n\n return macro_f1, micro_f1, micro_precision, micro_recall, label_f1", "def micro_f1(pred_cnts, gold_cnts, intr_cnts):\n numerator = sum(intr_cnts)\n micro_p = pr(numerator, sum(pred_cnts))\n micro_r = rc(numerator, sum(gold_cnts))\n micro_f = f1(micro_p, micro_r)\n\n ret_dict = {\"f1\": micro_f,\n \"p\": micro_p,\n \"r\": micro_r}\n return ret_dict", "def f1_score_model(self, model, X, y):\n\n prediction = model.predict_classes(X)\n f1_macro = f1_score(y, prediction, average='macro')\n f1_micro = f1_score(y, prediction, average='macro')\n print(\"f1_macro: \", f1_score(y, prediction, average='macro'))\n print(\"f1_micro: \", f1_score(y, prediction, average=\"micro\"))\n print(\"f1_weighted: \", f1_score(y, prediction, average=\"weighted\"))\n return f1_macro, f1_micro", "def f1(x):\n \n # Sum over data points\n f = 0.0\n for m_ind in range(cfg.ntrain):\n f += f1_part_i(x,m_ind) \n \n return f", "def auxminf1(x):\n \n# Sum over data points\n f = 0.0\n for m_ind in range(cfg.ntrain):\n f += auxmin_f1_part_i(x,m_ind) \n \n return f", "def get_f1(self, predictions):\n preds = []\n ground_truth = []\n for index in range(len(self.test_data)):\n preds.append(predictions[index])\n ground_truth.append(self.test_data[index][self.target_attribute])\n f1 = metrics.f1_score(ground_truth,preds,labels=['win','loss','draw'],average='macro')\n return f1", "def macro_f1(pred_cnts, gold_cnts, intr_cnts):\n ps = []\n rs = []\n fs = []\n for pred_cnt, gold_cnt, intr_cnt in zip(pred_cnts, gold_cnts, intr_cnts):\n numerator = intr_cnt\n p = pr(numerator, pred_cnt)\n r = rc(numerator, gold_cnt)\n f = f1(p, r)\n ps.append(p)\n rs.append(r)\n fs.append(f)\n\n macro_f, macro_p, macro_r = map(np.average, [fs, ps, rs])\n ret_dict = {\"f1\": macro_f,\n \"p\": macro_p,\n \"r\": macro_r}\n return ret_dict", "def f1_score(self):", "def auxmaxf1(x):\n \n# Sum over data points\n f = 0.0\n for m_ind in range(cfg.ntrain):\n f += auxmax_f1_part_i(x,m_ind) \n \n return f", "def F1_score(y, model):\n\tp = precision(y, model)\n\tr = recall(y, model)\n\tf = 2*((p*r)/(p+r))\n\treturn f", "def stats_f1score_per_class(cm):\n # defined as 2 * recall * prec / recall + prec\n sums = (np.sum(cm, axis=1) + np.sum(cm, axis=0))\n mask = (sums>0)\n sums[sums==0] = 1\n f1score_per_class = 2 * np.diag(cm) / sums\n f1score_per_class[np.logical_not(mask)] = -1\n average_f1_score = f1score_per_class[mask].mean()\n return average_f1_score, f1score_per_class", "def f1_score(self):\n self.overall_f1_score = f1_score(\n self.y_true, self.y_pred, average = self.average_type).round(self.digits_count_fp)\n self.classes_f1_score = f1_score(\n self.y_true, self.y_pred, average = None).round(self.digits_count_fp)", "def f1_post_mean(self,f1,n_samp=5000):\n assert len(f1.shape)==1, 'input must be 1d ndarray'\n n_trial = len(f1)\n # Takes 1d array as input\n # sensory noise\n f1_ = np.tile(f1,(n_samp,1)) + self.s_s*np.random.randn(n_samp,n_trial)\n s = (1./self.s_g**2 + 1./self.s_s**2)**(-1./2.)\n mu = s**2*(self.mu_g/self.s_g**2 + f1_/self.s_s**2)\n if self.h == 0:\n return np.mean(mu,axis=0)\n else:\n # posterior inference\n #k = 1./np.sqrt(2.*np.pi)*s/(self.s_g*self.s_s)\\\n # *np.exp(-0.5*(self.mu_g**2/self.s_g**2+f1_**2/self.s_s**2-mu**2/s**2))\n k_g = s/self.s_s\\\n *np.exp(-0.5*(self.mu_g**2/self.s_g**2+f1_**2/self.s_s**2-mu**2/s**2)) # for unnormalized gaussian prior\n pi_u = self.h/(self.h+k_g)\n pi_g = 1.-pi_u\n\n return np.mean(pi_u*f1_+pi_g*mu,axis=0)", "def _compute_f1(self, tp: torch.Tensor, fp: torch.Tensor,\n fn: torch.Tensor) -> float:\n precision = tp / (tp + fp).clamp(min=1e-8)\n recall = tp / (tp + fn).clamp(min=1e-8)\n f1 = 2 * precision * recall / (precision + recall).clamp(min=1e-8)\n return float(f1.mean())", "def get_f1score(conf_matrix1, conf_matrix2, conf_matrix3):\r\n p = get_precision(conf_matrix1, conf_matrix2, conf_matrix3)\r\n r = get_recall(conf_matrix1, conf_matrix2, conf_matrix3)\r\n\r\n if p + r > 0:\r\n return 2 * p * r / (p + r)\r\n else:\r\n return 0", "def calculate_f1_score(predictions, actuals):\n predictions = predictions > 0.5\n # fbeta_score throws a confusing error if inputs are not numpy arrays\n predictions, actuals, = np.array(predictions), np.array(actuals)\n # We need to use average='samples' here, any other average method will generate bogus results\n return fbeta_score(actuals, predictions, beta=1, average='samples')", "def get_f1_score(actual_labels, preds_labels, binary_classifcation, pos_label=\"malaria\", confusion_matrix_title=\"\"):\n # demonstration of calculating metrics for a neural network model using sklearn\n if not binary_classifcation:\n # For multiclass classification.\n accuracy = accuracy_score(actual_labels, preds_labels)\n precision = precision_score(actual_labels, preds_labels, average=\"macro\")\n recall = recall_score(actual_labels, preds_labels, average=\"macro\")\n f1 = f1_score(actual_labels, preds_labels, average=\"macro\")\n print('Accuracy: %f' % accuracy)\n print('Precision: %f' % precision)\n print('Recall: %f' % recall)\n print('F1 score: %f' % f1)\n\n else:\n accuracy = accuracy_score(actual_labels, preds_labels)\n print('Accuracy: %f' % accuracy)\n # # precision tp / (tp + fp)\n precision = precision_score(actual_labels, preds_labels, pos_label=pos_label)\n print('Precision: %f' % precision)\n # recall: tp / (tp + fn)\n recall = recall_score(actual_labels, preds_labels, pos_label=pos_label)\n print('Recall: %f' % recall)\n # f1: 2 tp / (2 tp + fp + fn)\n f1 = f1_score(actual_labels, preds_labels, pos_label=pos_label)\n print('F1 score: %f' % f1)\n # ROC AUC\n # auc = roc_auc_score(test_labels, basic_cnn_preds_labels)\n # print('ROC AUC: %f' % auc)\n\n # confusion matrix\n disp = plot_confusion_matrix(y_true=actual_labels, y_pred=preds_labels,\n display_labels=list(np.unique(actual_labels)),\n cmap=plt.cm.Blues,\n normalize=None)\n disp.ax_.set_title(confusion_matrix_title)\n plt.show()\n matrix = confusion_matrix(actual_labels, preds_labels)\n print(matrix)\n # if plot_confusion_matrix:\n # show_confusion_matrix(matrix=matrix, labels=list(np.unique(actual_labels)))", "def computeF1_macro(confusion_matrix, matching, num_clusters):\r\n # Permute the matrix columns\r\n permuted_confusion_matrix = np.zeros([num_clusters, num_clusters])\r\n for cluster in range(num_clusters):\r\n matched_cluster = matching[cluster]\r\n permuted_confusion_matrix[:, cluster] = confusion_matrix[:, matched_cluster]\r\n# Compute the F1 score for every cluster\r\n F1_score = 0\r\n for cluster in range(num_clusters):\r\n TP = permuted_confusion_matrix[cluster,cluster]\r\n FP = np.sum(permuted_confusion_matrix[:,cluster]) - TP\r\n FN = np.sum(permuted_confusion_matrix[cluster,:]) - TP\r\n precision = TP/(TP + FP)\r\n recall = TP/(TP + FN)\r\n f1 = stats.hmean([precision,recall])\r\n F1_score += f1\r\n F1_score /= num_clusters\r\n return F1_score", "def f1_score(confusion):\n s = np.power(sensitivity(confusion), -1)\n p = np.power(precision(confusion), -1)\n return 2 / (s + p)", "def get_M(self, Y, K0, K1, prec):\n\n # # Use low precision\n dold = mpmath.mp.dps\n # mpmath.mp.dps=int(mpmath.ceil(abs(mpmath.log10(eps))))+5\n mpmath.mp.dps = max(dold, prec) + 5\n twopi = 2 * mpmath.pi()\n twopiy = twopi * mpmath.mpf(Y)\n # an extra two for the accumulation of errors\n eps = mpmath.mpf(10)**mpmath.mpf(-prec)\n minm = max(10, abs(int(1 - self._weight) + 1) / (2 * mpmath.pi() * Y))\n # print \"K0=\",K0\n # print \"K1=\",K1\n [Cp0, Cp1] = self.get_Cp(K0)\n Cm = self.get_Cm(K0, K1)\n # print \"Cp0,Cp1,Cm=\",mppr(Cp0),mppr(Cp1),mppr(Cm)\n fak = len(self.multiplier().D())\n try:\n for m in range(minm, minm + 10000):\n errest1 = fak * self.err_est_vv_hwmf_pos(Y, m, Cp0, Cp1)\n errest2 = fak * self.err_est_vv_hwmf_neg(Y, m, Cm)\n # print \"er1+(\",m,\")=\",mppr(errest1)\n # print \"er2-(\",m,\")=\",mppr(errest2)\n if(max(abs(errest1), abs(errest2)) < eps):\n raise StopIteration()\n raise ArithmeticError(\"Could not find M<%s such that error bound in truncation is <{0}! and Y,K0,K1={1},{2},{3} \\n err+={4} \\n err-={5}\".format(\n m, eps, mppr(Y), K0, K1, mppr(errest1), mppr(errest2)))\n except StopIteration:\n if(self._verbose > 2):\n print(\"er +={0}\".format(errest1))\n print(\"er -={0}\".format(errest2))\n print(\"m={0}\".format(m))\n print(\"Y={0}\".format(Y))\n mpmath.mp.dps = dold\n return m", "def f1_score(confusion):\n p = precision(confusion)\n r = sensitivity(confusion)\n F1 = (2 * p * r) / (p + r)\n return F1", "def f1_score(confusion):\n sens = sensitivity(confusion)\n prec = precision(confusion)\n return 2 * sens * prec / (sens + prec)", "def f1_score(y_true, y_pred, threshold, macro = False, eps = 1e-9):\n\n y_pred = torch.ge(y_pred.float(), threshold).float()\n\n y_true = y_true.float()\n\n tp_l = (y_pred * y_true).sum(0).float()\n\n fp_l = (y_pred * (1 - y_true)).sum(0).float()\n\n fn_l = ((1 - y_pred) * y_true).sum(0).float()\n\n precision_label = tp_l.div(tp_l + fp_l + eps)\n\n recall_label = tp_l.div(tp_l + fn_l + eps)\n\n if macro:\n\n f1_macro = torch.mean((precision_label * recall_label).div(precision_label + recall_label + eps) * 2)\n\n return f1_macro.item(), torch.mean(precision_label).item(), torch.mean(recall_label).item()\n\n else: \n\n tp = tp_l.sum()\n\n fp = fp_l.sum()\n\n fn = fn_l.sum()\n\n precision = tp / (tp + fp + eps)\n\n recall = tp / (tp + fn + eps)\n\n f1_micro = (precision * recall).div(precision + recall + eps) * 2\n\n return f1_micro.item(), precision.item(), recall.item()", "def f1_macro(y_true, y_pred):\n tn, fp, fn, tp = confusion_matrix(y_pred, y_true).ravel()\n p = tp / (tp + fp) # Precision\n r = tp / (tp + fn) # Recall\n # Harmonic Mean of Precision and Recall\n f1 = 2 / (p**-1 + r**-1)\n return f1", "def cal_F1(loader, model):\n correct = 0\n total = 0\n model.eval()\n cof_mat = np.zeros ((4,4))\n Ns = np.zeros(4)\n ns = np.zeros(4)\n for data, labels in loader:\n data_batch, label_batch = data.to(device), labels.to(device)\n outputs = F.softmax(model(data_batch), dim=1)\n predicted = outputs.max(1, keepdim=True)[1]\n total += label_batch.size(0)\n correct += predicted.eq(label_batch.view_as(predicted)).sum().item()\n acc = label_batch.view_as(predicted)\n for (a,p) in zip(acc, predicted):\n cof_mat[a][p] += 1\n Ns[a] += 1\n ns[p] += 1\n F1 = 0.0\n for i in range(len(Ns)):\n tempF = cof_mat[i][i]*2.0 /(Ns[i] + ns[i])\n F1 = F1+ tempF\n print('F1'+str(i)+':',tempF)\n F1 = F1/4.0\n print('cofmat',cof_mat)\n return 100 * correct / total, F1", "def _himf(LATENTDIM, REG, EXPERIMENTNUM, gamma,\n nmfflag=None, lr=0.001, esflag=True):\n fn_hi = '../H3N2_HIdata/H3N2_integrated_/H3N2_HI_data_minority.csv'\n virusindex = readdata.readvirusindex(fn_hi)\n serumindex = readdata.readserumindex(fn_hi)\n ratings = np.load('ratings_minority.npy')\n\n\n \"\"\"\n Cache date check and get simtx from cache\n \"\"\"\n seq_date = os.stat(\"./realdata_minority.fa\").st_mtime\n simtx_date = os.stat(\"./simtx_minority.npy\").st_mtime\n if simtx_date <= seq_date:\n fsim = open(\"./realdata_minority.fa\")\n print(\"making simtx_minority.npy..\")\n simtx = simseq.simseq_parallel(virusindex, fsim)\n np.save(\"simtx_minority.npy\", simtx)\n else:\n simtx = np.load(\"simtx_minority.npy\")\n print(\"simtx_minority ready!\")\n\n\n # create train, validation and test sets.\n n = int(ratings.shape[0] * 0.8)\n train = ratings[:n]\n test = ratings[n:]\n v = int(train.shape[0] * 0.9)\n # split train to 1(validate) : 9(training)\n val = train[v:]\n train = train[:v]\n from rsvd import RSVD\n dims = (len(virusindex), len(serumindex))\n\n \"\"\"\n get the average score\n MF\n \"\"\"\n\n model = RSVD.train(LATENTDIM, train, dims, simtx,\n probeArray=val, esflag=esflag, maxEpochs=1000,\n learnRate=lr,\n regularization=REG,\n nmfflag=nmfflag,\n randomNoise=0.1,\n gamma=gamma)\n\n sqerr = 0.0\n\n reslist = []\n for strainID, serumID, rating in test:\n err = rating - model(strainID, serumID)\n reslist.append([rating, model(strainID, serumID)])\n sqerr += err * err\n sqerr /= test.shape[0]\n\n modelpath = \"./experiment{0}/model-ldim-{1}-reg-{2}\".format(\n EXPERIMENTNUM, LATENTDIM, REG)\n rmsepath = \"./experiment{0}/rmse-ldim-{1}-reg-{2}\".format(\n EXPERIMENTNUM, LATENTDIM, REG)\n if nmfflag:\n modelpath = modelpath + \"-nmf\"\n rmsepath = rmsepath + \"-nmf\"\n modelpath = modelpath + \"-gamma-{0}\".format(gamma)\n rmsepath = rmsepath + \"-gamma-{0}\".format(gamma)\n modelpath = modelpath + \"/\"\n\n if not os.path.exists(os.path.dirname(modelpath)):\n try:\n os.makedirs(os.path.dirname(modelpath))\n model.save(modelpath)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n f = open(rmsepath, 'a+')\n print \"Test RMSE: {0}\\n\".format(np.sqrt(sqerr))\n f.write(\"Test RMSE: {0}\\n\".format(np.sqrt(sqerr)))\n f.close()\n\n np.save(modelpath + 'true_vs_prediction.npy',\n np.array(reslist))\n\n return reslist", "def record_f1_score(record_examples: List[RecordNestedExample]):\n if not record_examples:\n return 0.\n f1_scores = []\n for example in record_examples:\n example_f1s = []\n for answer in example.answers:\n example_f1s.append(exact_match_score(example.prediction, answer))\n if example_f1s:\n f1_scores.append(max(example_f1s))\n return np.mean(f1_scores)", "def f1score(prediction,ytest):\n truepos = np.sum(ytest[prediction == 1])\n predpos = np.sum(prediction)\n actpos = np.sum(ytest)\n precision = truepos/predpos\n recall = truepos/actpos\n return 2*precision*recall/(precision + recall)" ]
[ "0.65318006", "0.6385157", "0.63657004", "0.6327502", "0.6247419", "0.6231388", "0.61840904", "0.6177114", "0.6110904", "0.6081985", "0.6065075", "0.6055157", "0.6046821", "0.6025461", "0.60203606", "0.60167265", "0.5993932", "0.58769673", "0.58766234", "0.5874598", "0.5863062", "0.5857586", "0.5856431", "0.584316", "0.5836099", "0.5778988", "0.57776433", "0.57774675", "0.57476056", "0.5710723" ]
0.69825727
0
MFoM macro F1, i.e. microaveraging F1 approximation (calculate errors per class)
def mfom_macrof1(y_true, y_pred): s = K.shape(y_true) y_true = K.reshape(y_true, (-1, s[-1])) y_pred = K.reshape(y_pred, (-1, s[-1])) y_neg = 1 - y_true # smooth counters per class tp = K.sum((1. - y_pred) * y_true, axis=0) fn = K.sum(y_pred * y_true, axis=0) fp = K.sum((1. - y_pred) * y_neg, axis=0) numen = 2. * tp denum = fp + fn + 2. * tp smooth_f1 = K.exp(K.log(numen + 1.) - K.log(denum + 1.)) error_f1 = 1. - K.mean(smooth_f1) # debug output # tp = K.print_tensor(tp, message='TP is: ') # fn = K.print_tensor(fn, message='FN is: ') # fp = K.print_tensor(fp, message='FP is: ') # error_f1 = K.print_tensor(error_f1, message='error_f1: ') return error_f1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def f1_macro(y_true, y_pred):\n tn, fp, fn, tp = confusion_matrix(y_pred, y_true).ravel()\n p = tp / (tp + fp) # Precision\n r = tp / (tp + fn) # Recall\n # Harmonic Mean of Precision and Recall\n f1 = 2 / (p**-1 + r**-1)\n return f1", "def macro_f1(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)), axis=0)\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)), axis=0)\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)), axis=0)\n\n \"\"\"Macro_F1 metric.\n \"\"\"\n precision = true_positives / (predicted_positives + K.epsilon())\n recall = true_positives / (possible_positives + K.epsilon())\n macro_f1 = K.mean(2 * precision * recall / (precision + recall + K.epsilon()))\n return macro_f1", "def macro_f1(pred_cnts, gold_cnts, intr_cnts):\n ps = []\n rs = []\n fs = []\n for pred_cnt, gold_cnt, intr_cnt in zip(pred_cnts, gold_cnts, intr_cnts):\n numerator = intr_cnt\n p = pr(numerator, pred_cnt)\n r = rc(numerator, gold_cnt)\n f = f1(p, r)\n ps.append(p)\n rs.append(r)\n fs.append(f)\n\n macro_f, macro_p, macro_r = map(np.average, [fs, ps, rs])\n ret_dict = {\"f1\": macro_f,\n \"p\": macro_p,\n \"r\": macro_r}\n return ret_dict", "def f1(self, option='average'):\n\t\tif option == 'classwise':\treturn self.class_f1_\n\t\telif option == 'average':\treturn self.avg_f1_", "def stats_f1score_per_class(cm):\n # defined as 2 * recall * prec / recall + prec\n sums = (np.sum(cm, axis=1) + np.sum(cm, axis=0))\n mask = (sums>0)\n sums[sums==0] = 1\n f1score_per_class = 2 * np.diag(cm) / sums\n f1score_per_class[np.logical_not(mask)] = -1\n average_f1_score = f1score_per_class[mask].mean()\n return average_f1_score, f1score_per_class", "def get_f1(self, predictions):\n preds = []\n ground_truth = []\n for index in range(len(self.test_data)):\n preds.append(predictions[index])\n ground_truth.append(self.test_data[index][self.target_attribute])\n f1 = metrics.f1_score(ground_truth,preds,labels=['win','loss','draw'],average='macro')\n return f1", "def mfom_microf1(y_true, y_pred):\n p = 1. - y_pred\n numen = 2. * K.sum(p * y_true)\n denum = K.sum(p + y_true)\n smooth_f1 = numen / denum\n return 1.0 - smooth_f1", "def f1_score(self):\n self.overall_f1_score = f1_score(\n self.y_true, self.y_pred, average = self.average_type).round(self.digits_count_fp)\n self.classes_f1_score = f1_score(\n self.y_true, self.y_pred, average = None).round(self.digits_count_fp)", "def f1_score(confusion):\n p = precision(confusion)\n r = sensitivity(confusion)\n F1 = (2 * p * r) / (p + r)\n return F1", "def f1_score_model(self, model, X, y):\n\n prediction = model.predict_classes(X)\n f1_macro = f1_score(y, prediction, average='macro')\n f1_micro = f1_score(y, prediction, average='macro')\n print(\"f1_macro: \", f1_score(y, prediction, average='macro'))\n print(\"f1_micro: \", f1_score(y, prediction, average=\"micro\"))\n print(\"f1_weighted: \", f1_score(y, prediction, average=\"weighted\"))\n return f1_macro, f1_micro", "def micro_f1(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)), axis=0)\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)), axis=0)\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)), axis=0)\n\n \"\"\"Micro_F1 metric.\n \"\"\"\n precision = K.sum(true_positives) / (K.sum(predicted_positives)+K.epsilon())\n recall = K.sum(true_positives) / (K.sum(possible_positives)+K.epsilon())\n micro_f1 = 2 * precision * recall / (precision + recall + K.epsilon())\n return micro_f1", "def evaluate_f1_ml(predict, truth):\n label_same = []\n label_predict = []\n label_truth = []\n label_f1 = []\n\n division = lambda x, y: (x * 1.0 / y) if y else 0\n f1 = lambda p, r: 2 * p * r / (p + r) if p + r else 0\n\n batch, label_size = predict.size()\n for i in range(label_size):\n cur_predict = predict[:, i]\n cur_truth = truth[:, i]\n\n predict_max = cur_predict.gt(0.5).long()\n cur_eq_num = (predict_max * cur_truth).sum().item()\n\n cur_predict_num = predict_max.sum().item()\n cur_truth_num = cur_truth.sum().item()\n\n cur_precision = division(cur_eq_num, cur_predict_num)\n cur_recall = division(cur_eq_num, cur_truth_num)\n cur_f1 = f1(cur_precision, cur_recall)\n\n label_same.append(cur_eq_num)\n label_predict.append(cur_predict_num)\n label_truth.append(cur_truth_num)\n label_f1.append(cur_f1)\n\n macro_f1 = sum(label_f1) / len(label_f1)\n micro_precision = division(sum(label_same), sum(label_predict))\n micro_recall = division(sum(label_same), sum(label_truth))\n micro_f1 = f1(micro_precision, micro_recall)\n\n return macro_f1, micro_f1, micro_precision, micro_recall, label_f1", "def get_f1score(conf_matrix1, conf_matrix2, conf_matrix3):\r\n p = get_precision(conf_matrix1, conf_matrix2, conf_matrix3)\r\n r = get_recall(conf_matrix1, conf_matrix2, conf_matrix3)\r\n\r\n if p + r > 0:\r\n return 2 * p * r / (p + r)\r\n else:\r\n return 0", "def F1_score(y, model):\n\tp = precision(y, model)\n\tr = recall(y, model)\n\tf = 2*((p*r)/(p+r))\n\treturn f", "def f1_score(y_true, y_pred, threshold, macro = False, eps = 1e-9):\n\n y_pred = torch.ge(y_pred.float(), threshold).float()\n\n y_true = y_true.float()\n\n tp_l = (y_pred * y_true).sum(0).float()\n\n fp_l = (y_pred * (1 - y_true)).sum(0).float()\n\n fn_l = ((1 - y_pred) * y_true).sum(0).float()\n\n precision_label = tp_l.div(tp_l + fp_l + eps)\n\n recall_label = tp_l.div(tp_l + fn_l + eps)\n\n if macro:\n\n f1_macro = torch.mean((precision_label * recall_label).div(precision_label + recall_label + eps) * 2)\n\n return f1_macro.item(), torch.mean(precision_label).item(), torch.mean(recall_label).item()\n\n else: \n\n tp = tp_l.sum()\n\n fp = fp_l.sum()\n\n fn = fn_l.sum()\n\n precision = tp / (tp + fp + eps)\n\n recall = tp / (tp + fn + eps)\n\n f1_micro = (precision * recall).div(precision + recall + eps) * 2\n\n return f1_micro.item(), precision.item(), recall.item()", "def f1_score(confusion):\n s = np.power(sensitivity(confusion), -1)\n p = np.power(precision(confusion), -1)\n return 2 / (s + p)", "def f1(self) -> float:\n return self._matrix.f1", "def calculate_f1_score(y_test, y_pred):\n print('# Running precision, recall and F1-score')\n print('# F1-Score:\\t\\t%.2f' % (f1_score(y_test, y_pred, average=\"macro\") * 100))\n print('# Precision:\\t\\t%.2f' % (precision_score(y_test, y_pred, average=\"macro\") * 100))\n print('# Recall:\\t\\t%.2f' % (recall_score(y_test, y_pred, average=\"macro\") * 100))", "def f1_class(target, prediction, params):\n f1_array = f1_score(target, prediction, labels=np.arange(params['classnum']), average=None)\n return np.round(f1_array, 4)", "def _compute_f1(self, tp: torch.Tensor, fp: torch.Tensor,\n fn: torch.Tensor) -> float:\n precision = tp / (tp + fp).clamp(min=1e-8)\n recall = tp / (tp + fn).clamp(min=1e-8)\n f1 = 2 * precision * recall / (precision + recall).clamp(min=1e-8)\n return float(f1.mean())", "def get_f1_score(y_true, y_pred):\n cf_m = confusion_matrix(y_true, y_pred)\n print(\"tn:\",cf_m[0,0])\n print(\"fp:\",cf_m[0,1])\n print(\"fn:\",cf_m[1,0])\n print(\"tp:\",cf_m[1,1])\n precision = cf_m[1,1] / (cf_m[1,1] + cf_m[0,1])\n recall = cf_m[1,1] / (cf_m[1,1] + cf_m[1,0])\n f1 = 2 * (precision * recall) / (precision + recall)\n\n\n return precision, recall, f1", "def f1_score(confusion):\n sens = sensitivity(confusion)\n prec = precision(confusion)\n return 2 * sens * prec / (sens + prec)", "def report_F1(self, annotated_image_set, annotation_type='Bodies',\n m_samples=100, sample_ratio=None,\n annotation_border_ratio=None,\n channel_order=None, normalize_samples=False,\n morph_annotations=False, rotation_list=None,\n scale_list_x=None, scale_list_y=None,\n noise_level_list=None, show_figure='Off'):\n # Get m samples and labels from the AnnotatedImageSet\n samples,labels,annotations = annotated_image_set.data_sample(\n zoom_size=(self.y_res,self.x_res), annotation_type=annotation_type,\n m_samples=m_samples,\n return_annotations=False, sample_ratio=sample_ratio,\n annotation_border_ratio=annotation_border_ratio,\n normalize_samples=normalize_samples,\n morph_annotations=morph_annotations,\n rotation_list=rotation_list, scale_list_x=scale_list_x,\n scale_list_y=scale_list_y, noise_level_list=noise_level_list )\n\n # Calculate network accuracy\n result = self.sess.run( [self.network_prediction], feed_dict={\n self.x: samples, self.y_trgt: labels,\n self.fc1_keep_prob: 1.0 })\n pred = result[0]\n\n # Loop output classes\n for c in range(1,self.n_output_classes):\n # Calculate true/false pos/neg\n true_pos = np.sum( pred[labels[:,c]==1]==c )\n false_pos = np.sum( pred[labels[:,c]==0]==c )\n false_neg = np.sum( pred[labels[:,c]==1]!=c )\n true_neg = np.sum( pred[labels[:,c]==0]!=c )\n\n # Calculate accuracy, precision, recall, F1\n final_accuracy = (true_pos+true_neg) / len(pred)\n final_precision = true_pos / (true_pos+false_pos)\n final_recall = true_pos / (true_pos+false_neg)\n final_F1 = \\\n 2 * ((final_precision*final_recall)/(final_precision+final_recall))\n self.log('Labeled image set of size m={}, class {} :'.format(m_samples,c))\n self.log(' - # true positives = {:6.0f}'.format( true_pos ))\n self.log(' - # false positives = {:6.0f}'.format( false_pos ))\n self.log(' - # false negatives = {:6.0f}'.format( false_neg ))\n self.log(' - # true negatives = {:6.0f}'.format( true_neg ))\n self.log(' - Accuracy = {:6.4f}'.format( final_accuracy ))\n self.log(' - Precision = {:6.4f}'.format( final_precision ))\n self.log(' - Recall = {:6.4f}'.format( final_recall ))\n self.log(' - F1-score = {:6.4f}'.format( final_F1 ))\n\n # Display figure with examples if necessary\n if show_figure.lower() == 'on':\n titles = [\"true positives\",\"false positives\",\\\n \"false negatives\",\"true negatives\"]\n plot_positions = [(0,0),(0,1),(1,0),(1,1)]\n samples_mat = []\n samples_mat.append(\n samples[ np.logical_and(pred[:]==c,labels[:,c]==1), : ])\n samples_mat.append(\n samples[ np.logical_and(pred[:]==c,labels[:,c]==0), : ])\n samples_mat.append(\n samples[ np.logical_and(pred[:]!=c,labels[:,c]==1), : ])\n samples_mat.append(\n samples[ np.logical_and(pred[:]!=c,labels[:,c]==0), : ])\n\n # Handle RGB channel order\n if channel_order == None:\n chan_order = []\n for ch in range(3):\n if ch < self.n_input_channels:\n chan_order.append(ch)\n else:\n chan_order = channel_order\n\n plt.figure(figsize=(10,10), facecolor='w', edgecolor='w')\n for cnt in range(4):\n grid,_,brdr = ia.image_grid_RGB( samples_mat[cnt],\n n_channels=annotated_image_set.n_channels,\n image_size=(self.y_res,self.x_res), n_x=10, n_y=10,\n channel_order=chan_order, amplitude_scaling=(1.33,1.33,1),\n line_color=1, auto_scale=True, return_borders=True )\n if self.n_input_channels > 2:\n grid[:,:,2] = 0 # only show red and green channel\n grid[brdr==1] = 1 # make borders white\n with sns.axes_style(\"white\"):\n ax1 = plt.subplot2grid( (2,2), plot_positions[cnt] )\n ax1.imshow(\n grid, interpolation='nearest', vmax=grid.max()*0.8 )\n ax1.set_title(titles[cnt]+\", class={}\".format(c))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()", "def calf_f1(annotated_Y, predicted_Y):\n\n POSITIVE = ADR_MENTION_CLASS_LABEL\n NEGATIVE = NON_ADR_MENTION_CLASS_LABEL\n\n tp = 0\n fp = 0\n fn = 0\n tn = 0\n\n total_actual_positives = 0\n total_actual_negatives = 0\n\n for index, actual in enumerate(annotated_Y):\n predicted = predicted_Y[index]\n\n if actual == POSITIVE:\n total_actual_positives += 1\n\n if predicted == POSITIVE:\n tp += 1\n elif predicted == NEGATIVE:\n fn += 1\n\n elif actual == NEGATIVE:\n total_actual_negatives += 1\n\n if predicted == POSITIVE:\n fp += 1\n elif predicted == NEGATIVE:\n tn += 1\n\n if (tp+fp) == 0:\n precision = 0\n else:\n precision = tp/(tp+fp)\n\n if (tp+fn) == 0:\n recall = 0\n else:\n recall = tp/(tp+fn)\n\n if (precision+recall) == 0:\n f1 = 0\n else:\n f1 = 2*precision*recall/(precision+recall)\n\n # print(\"Total labels: {}, total actual positives: {}, total_actual_negatives: {}\".format(len(predicted_Y), total_actual_positives, total_actual_negatives))\n # print(\"tp: {}, tn: {}, fp: {}, fn: {}\".format(tp, tn, fp, fn))\n # print(\" Accuracy: {}\".format((tp+tn)/(len(test_Y))))\n print(\" Precision: {}\".format(precision))\n print(\" Recall: {}\".format(recall))\n print(\" F1: {}\".format(f1))", "def calculate_f1(fx, y):\n pred_idxs = fx.max(1, keepdim=True)[1]\n pred_names = [idx2target[i.item()] for i in pred_idxs]\n original_names = [idx2target[i.item()] for i in y]\n true_positive, false_positive, false_negative = 0, 0, 0\n for p, o in zip(pred_names, original_names):\n predicted_subtokens = p.split('|')\n original_subtokens = o.split('|')\n for subtok in predicted_subtokens:\n if subtok in original_subtokens:\n true_positive += 1\n else:\n false_positive += 1\n for subtok in original_subtokens:\n if not subtok in predicted_subtokens:\n false_negative += 1\n try:\n precision = true_positive / (true_positive + false_positive)\n recall = true_positive / (true_positive + false_negative)\n f1 = 2 * precision * recall / (precision + recall)\n except ZeroDivisionError:\n precision, recall, f1 = 0, 0, 0\n return precision, recall, f1", "def get_f1_score(actual_labels, preds_labels, binary_classifcation, pos_label=\"malaria\", confusion_matrix_title=\"\"):\n # demonstration of calculating metrics for a neural network model using sklearn\n if not binary_classifcation:\n # For multiclass classification.\n accuracy = accuracy_score(actual_labels, preds_labels)\n precision = precision_score(actual_labels, preds_labels, average=\"macro\")\n recall = recall_score(actual_labels, preds_labels, average=\"macro\")\n f1 = f1_score(actual_labels, preds_labels, average=\"macro\")\n print('Accuracy: %f' % accuracy)\n print('Precision: %f' % precision)\n print('Recall: %f' % recall)\n print('F1 score: %f' % f1)\n\n else:\n accuracy = accuracy_score(actual_labels, preds_labels)\n print('Accuracy: %f' % accuracy)\n # # precision tp / (tp + fp)\n precision = precision_score(actual_labels, preds_labels, pos_label=pos_label)\n print('Precision: %f' % precision)\n # recall: tp / (tp + fn)\n recall = recall_score(actual_labels, preds_labels, pos_label=pos_label)\n print('Recall: %f' % recall)\n # f1: 2 tp / (2 tp + fp + fn)\n f1 = f1_score(actual_labels, preds_labels, pos_label=pos_label)\n print('F1 score: %f' % f1)\n # ROC AUC\n # auc = roc_auc_score(test_labels, basic_cnn_preds_labels)\n # print('ROC AUC: %f' % auc)\n\n # confusion matrix\n disp = plot_confusion_matrix(y_true=actual_labels, y_pred=preds_labels,\n display_labels=list(np.unique(actual_labels)),\n cmap=plt.cm.Blues,\n normalize=None)\n disp.ax_.set_title(confusion_matrix_title)\n plt.show()\n matrix = confusion_matrix(actual_labels, preds_labels)\n print(matrix)\n # if plot_confusion_matrix:\n # show_confusion_matrix(matrix=matrix, labels=list(np.unique(actual_labels)))", "def micro_f1(pred_cnts, gold_cnts, intr_cnts):\n numerator = sum(intr_cnts)\n micro_p = pr(numerator, sum(pred_cnts))\n micro_r = rc(numerator, sum(gold_cnts))\n micro_f = f1(micro_p, micro_r)\n\n ret_dict = {\"f1\": micro_f,\n \"p\": micro_p,\n \"r\": micro_r}\n return ret_dict", "def get_f1(self,common_len,RTSummary,SystemSummary):\n recall = self.get_recall(common_len,RTSummary)\n precision = self.get_precision(common_len,SystemSummary)\n if precision<=0 or recall <=0:\n return 0.0\n else:\n return ((2 * recall * precision)/(recall + precision))", "def f1score(prediction,ytest):\n truepos = np.sum(ytest[prediction == 1])\n predpos = np.sum(prediction)\n actpos = np.sum(ytest)\n precision = truepos/predpos\n recall = truepos/actpos\n return 2*precision*recall/(precision + recall)", "def computeF1_macro(confusion_matrix, matching, num_clusters):\r\n # Permute the matrix columns\r\n permuted_confusion_matrix = np.zeros([num_clusters, num_clusters])\r\n for cluster in range(num_clusters):\r\n matched_cluster = matching[cluster]\r\n permuted_confusion_matrix[:, cluster] = confusion_matrix[:, matched_cluster]\r\n# Compute the F1 score for every cluster\r\n F1_score = 0\r\n for cluster in range(num_clusters):\r\n TP = permuted_confusion_matrix[cluster,cluster]\r\n FP = np.sum(permuted_confusion_matrix[:,cluster]) - TP\r\n FN = np.sum(permuted_confusion_matrix[cluster,:]) - TP\r\n precision = TP/(TP + FP)\r\n recall = TP/(TP + FN)\r\n f1 = stats.hmean([precision,recall])\r\n F1_score += f1\r\n F1_score /= num_clusters\r\n return F1_score" ]
[ "0.70378786", "0.69138384", "0.68874615", "0.6594724", "0.6549159", "0.65351105", "0.64717937", "0.6450765", "0.6422856", "0.641389", "0.6364266", "0.6336707", "0.6302828", "0.6294392", "0.6287796", "0.627425", "0.6270049", "0.62664056", "0.6256716", "0.62228644", "0.62122804", "0.6187433", "0.61744803", "0.6134901", "0.60924584", "0.608958", "0.6074332", "0.60616094", "0.6060171", "0.6040252" ]
0.7380835
0
match is an abstract method which must be overwritten by all inheriting classes. This is run prior to applying a modifier, to ensure that it's being applied to the correct object. Match must return something truthy or falsy.
def match(self) -> bool:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handleMatch(self, m):\r\n pass", "def match(self) -> \"MatchResult\":\n raise NotImplementedError", "def match(self, ctx):\n pass", "def matches(self):\n pass", "def null_match():\n return Self._.match(\n lambda e=Example: e\n )", "def matches(self):\n return False", "def match(self, *args): \n if self.fall or not args: \n return True \n elif self.value in args: # changed for v1.5, see below \n self.fall = True \n return True \n else: \n return False \n\n pass", "def test(self, parent, block):\n\n self.match = self.pattern.match(block) if self.pattern is not None else None\n return self.match is not None", "def has_explicit_match(self) -> bool:\n if self.matching is not None:\n return True\n else:\n return False", "def matches(self, accession):\n pass", "def match(self, *args):\n if self.fall or not args:\n return True\n elif self.value in args: # changed for v1.5\n self.fall = True\n return True\n else:\n return False", "def do_match(self, context):\n\t\treturn self.extract(context) is not None", "def match(self, *args):\n if self.fall or not args:\n return True\n elif self.value in args: # changed for v1.5, see below\n self.fall = True\n return True\n else:\n return False", "def interpret(self, match):\n raise NotImplementedError()", "def matches(self, request_info: RequestInfo):\n if self.url_pattern.search(request_info.request.url) is None:\n return MatchResult.NO_MATCH\n\n if len(self.modifiers) == 0:\n return MatchResult.MATCH\n\n domain_modifier = self.modifiers.get(\"domain\")\n if domain_modifier is not None:\n domain_modifier_result = domain_modifier.matches(request_info)\n if domain_modifier_result == MatchResult.NO_MATCH:\n return MatchResult.NO_MATCH\n\n third_party_modifier = self.modifiers.get(\"3p\")\n if third_party_modifier is not None:\n third_party_modifier_result = third_party_modifier.matches(request_info)\n if third_party_modifier_result == MatchResult.NO_MATCH:\n return MatchResult.NO_MATCH\n\n first_party_modifier = self.modifiers.get(\"1p\")\n if first_party_modifier is not None:\n first_party_modifier_result = first_party_modifier.matches(request_info)\n if first_party_modifier_result == MatchResult.NO_MATCH:\n return MatchResult.NO_MATCH\n\n modifier_match_results = [\n modifier.matches(request_info)\n for name, modifier in self.modifiers.items()\n if name not in [\"domain\", \"1p\", \"3p\"]\n ]\n\n if len(modifier_match_results) == 0:\n return MatchResult.MATCH\n\n has_override = any(result == MatchResult.OVERRIDE for result in modifier_match_results)\n if has_override:\n return MatchResult.OVERRIDE\n\n active_match_results = [result for result in modifier_match_results if result != MatchResult.IGNORE]\n if len(active_match_results) == 0:\n return MatchResult.NO_MATCH\n\n return MatchResult.from_bool(any(result == MatchResult.MATCH for result in active_match_results))", "def __matches__(component, dispatch_key):\n # Override in subclasses.\n return False", "def match(self, *args):\n if self.fall or not args:\n return True\n elif self.value in args: # changed for v1.5, see below\n self.fall = True\n return True\n else:\n return False", "def match(self, *args):\n if self.fall or not args:\n return True\n elif self.value in args: # changed for v1.5, see below\n self.fall = True\n return True\n else:\n return False", "def match(self, *args):\n if self.fall or not args:\n return True\n elif self.value in args: # changed for v1.5, see below\n self.fall = True\n return True\n else:\n return False", "def match(self, *args):\n if self.fall or not args:\n return True\n elif self.value in args: # changed for v1.5, see below\n self.fall = True\n return True\n else:\n return False", "def match(self, *args):\n if self.fall or not args:\n return True\n elif self.value in args: # changed for v1.5, see below\n self.fall = True\n return True\n else:\n return False", "def matches(self, target):\n raise NotImplementedError()", "def match(self, *args):\n if self.fall or not args:\n return True\n if self.value in args:\n self.fall = True\n return True\n return False", "def match(self, *args):\n if self.fall or not args:\n return True\n elif self.value in args: # changed for v1.5, see below\n self.fall = True\n return True\n else:\n return False", "def run(self):\n # If the change type doesn't match, do nothing.\n if not self.regex.match(self.chgtype): return 0\n\n # Perform the child actions.\n return super(FilterChgType, self).run()", "def match(self, cls):\n return isinstance(self, cls)", "def run(self):\n # If the capabilities don't match, do nothing.\n if not self.regex.search(self.capabilities): return 0\n\n # Perform the child actions.\n return super(FilterCapabilities, self).run()", "def match(self, other):", "def on_match_start(self, *args, **kwargs):\n self._match = list()", "def process_match_result(self, match):\n raise NotImplementedError()" ]
[ "0.6360904", "0.6255357", "0.6133291", "0.5979016", "0.58295554", "0.5766493", "0.57539165", "0.5730551", "0.56727016", "0.5654043", "0.565271", "0.563074", "0.5622233", "0.5607786", "0.5598791", "0.55559194", "0.55452245", "0.55452245", "0.55452245", "0.55452245", "0.55452245", "0.5539223", "0.55325735", "0.55270517", "0.5523136", "0.5512395", "0.55053556", "0.54729986", "0.5462782", "0.54581785" ]
0.67145634
0
Override job success method to publish job status.
def handle_job_success(self, job): super().handle_job_success(job) self._handle_job_status(job, "finished")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mark(self, job, status='succeeded'):\n pass", "def put_job_success(job, message):\n print('Putting job success')\n print(message)\n code_pipeline.put_job_success_result(jobId=job)", "def _handle_success(self, result_ttl: int, pipeline: 'Pipeline'):\n # self.log.debug('Setting job %s status to finished', job.id)\n self.set_status(JobStatus.FINISHED, pipeline=pipeline)\n # Result should be saved in job hash only if server\n # doesn't support Redis streams\n include_result = not self.supports_redis_streams\n # Don't clobber user's meta dictionary!\n self.save(pipeline=pipeline, include_meta=False, include_result=include_result)\n # Result creation should eventually be moved to job.save() after support\n # for Redis < 5.0 is dropped. job.save(include_result=...) is used to test\n # for backward compatibility\n if self.supports_redis_streams:\n from .results import Result\n\n Result.create(self, Result.Type.SUCCESSFUL, return_value=self._result, ttl=result_ttl, pipeline=pipeline)\n\n if result_ttl != 0:\n finished_job_registry = self.finished_job_registry\n finished_job_registry.add(self, result_ttl, pipeline)", "def indicate_success(self):\n pass", "def success(self, success):\n self._success = success", "def success(self):\n self.succeeded = True", "def success(self, message, *args, **kwargs):\n self.counters[\"success\"] += 1\n self._write(message.format(*args, **kwargs), SUCCESS)", "def success(self, result):\r\n raise NotImplementedError", "def do_status(self, args):\n status = self._leet.job_status\n\n for job in self.finished_jobs:\n status.append({\"id\" : job.id,\n \"hostname\" : job.machine.hostname,\n \"plugin\": job.plugin_instance.LEET_PG_NAME,\n \"status\" : job.status})\n if status:\n pretty_jobs_status(status)\n else:\n print(\"***No jobs pending\")", "def job_completed(self,event):\n if event.exception:\n logger.worker.warning('The job crashed :(')\n else:\n logger.worker.warning(self.task_id+'The job finished ')\n # set job complete to true, will display complete in web interface \n self.job_complete_status[self.task_id] = True", "def OnSuccess(self):\n pass", "def on_success(self):\n pass", "def _job_was_successful(self, status):\n success = True\n\n # https://cloud.google.com/life-sciences/docs/reference/rest/v2beta/Event\n for event in status[\"metadata\"][\"events\"]:\n\n logger.debug(event[\"description\"])\n\n # Does it always result in fail for other failure reasons?\n if \"failed\" in event:\n success = False\n action = event.get(\"failed\")\n logger.debug(\"{}: {}\".format(action[\"code\"], action[\"cause\"]))\n\n elif \"unexpectedExitStatus\" in event:\n action = event.get(\"unexpectedExitStatus\")\n\n if action[\"exitStatus\"] != 0:\n success = False\n\n # Provide reason for the failure (desc includes exit code)\n msg = \"%s\" % event[\"description\"]\n if \"stderr\" in action:\n msg += \": %s\" % action[\"stderr\"]\n logger.debug(msg)\n\n return success", "def test_successful_job(self):\n\n successful_job = json.loads(TREEHERDER_JOB % (\"success\", \"completed\"))\n self.assertEquals(self.query_api.get_job_status(successful_job), SUCCESS)", "def success_message_addon(self, queue, result):\n updated_issues_count, delay = result\n return ' [updated=%d]' % updated_issues_count", "def on_success(self, task_progress, task_id, args, kwargs):\r\n TASK_LOG.debug('Task %s: success returned with progress: %s', task_id, task_progress)\r\n # We should be able to find the InstructorTask object to update\r\n # based on the task_id here, without having to dig into the\r\n # original args to the task. On the other hand, the entry_id\r\n # is the first value passed to all such args, so we'll use that.\r\n # And we assume that it exists, else we would already have had a failure.\r\n entry_id = args[0]\r\n entry = InstructorTask.objects.get(pk=entry_id)\r\n # Check to see if any subtasks had been defined as part of this task.\r\n # If not, then we know that we're done. (If so, let the subtasks\r\n # handle updating task_state themselves.)\r\n if len(entry.subtasks) == 0:\r\n entry.task_output = InstructorTask.create_output_for_success(task_progress)\r\n entry.task_state = SUCCESS\r\n entry.save_now()", "def codepipeline_success(job_id):\n try:\n codepipeline = boto3.client('codepipeline')\n codepipeline.put_job_success_result(jobId=job_id)\n LOGGER.info('===SUCCESS===')\n return True\n except ClientError as err:\n LOGGER.error(\"Failed to PutJobSuccessResult for CodePipeline!\\n%s\", err)\n return False", "def job_done(self, success):\n run_usage = self._attempt.get_usage()\n self._usage.append(run_usage)\n\n log.debug(\"job_done job_id=%s success=%s (last attempt %s\", self.job_id, success, self._attempt_ids[-1])\n self._attempt = None", "def jobSuccess(self, jobReport):\n\n\n\n jobName = None\n try:\n\n #// Invoke job report handler with jobReport location and flag to enable/disable merge job report handling\n\n handler = ReportHandler(jobReport, int(self.args['MaxInputAccessFailures']), enableMergeHandling=self.enabled)\n jobName = handler()\n logging.info('this is jobname'+ str(jobName))\n except Exception, ex:\n msg = \"Failed to handle job report from job:\\n\"\n msg += \"%s\\n\" % jobReport\n msg += str(ex)\n msg += \"\\n\"\n msg += traceback.format_exc()\n logging.error(msg)\n\n #// Failed to read job report\n if jobName is None:\n return\n\n # files can be cleaned up now\n logging.info(\"trigger cleanup for: %s\" % jobName)\n\n try:\n self.trigger.setFlag(\"cleanup\", jobName, \"MergeAccountant\")\n except (ProdAgentException, ProdException):\n logging.error(\"trying to continue processing success event\")\n\n\n\n\n return #// END jobSuccess", "def success(self):\n return self._success", "def job_status(self, job_status):\n\n self._job_status = job_status", "def markSuccess(self, *args):\n self.add(True)", "def success(self, message=''):\n print(colored(message, 'green'))", "def jobComplete(self):\n self._Finished = True\n return", "def on_success(self) -> None:", "def submit(self, data):\n self.update_current_data(data)\n self.job_status = \"COMPLETED\"\n return None", "def store(self, job, result):\n pass", "def __set_job_status(self, job: Job):\n\n self.redis_client.set(f'jobstatus:{job.id}:{str(job.status)}', f'job:{job.id}')", "def mark_success(self):\n LOGGER.debug('Marking current_state as: %s', self.States.SUCCEEDED)\n self.current_state = self.States.SUCCEEDED", "def save(self, *args, **kwargs):\n result = super().save(*args, **kwargs)\n self.send_status_update()\n return result" ]
[ "0.7619745", "0.7176098", "0.7133502", "0.69943136", "0.69115824", "0.6872546", "0.6774765", "0.66946805", "0.66273", "0.6592162", "0.6588424", "0.6554816", "0.6483753", "0.6463557", "0.6463099", "0.6450274", "0.64069796", "0.63930815", "0.63866657", "0.6320475", "0.63036287", "0.62860143", "0.62799287", "0.6272197", "0.6271282", "0.62313145", "0.6221477", "0.6200489", "0.6183567", "0.6177429" ]
0.80101943
0
Override job error method to publish job status.
def handle_job_error(self, job): super().handle_job_error(job) self._handle_job_status(job, "failed")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_job_error(\n self,\n scheduler: plugin_jobs.Scheduler,\n job: tools_jobs.Job,\n exc: BaseException,\n ):\n self.error(exception=exc)", "def mark_error(self):\r\n self.status = ERROR", "def error(self):\n ...", "def handle_api_error(self, err, job_name):\n print(err)\n print('Exiting script execution.')\n self.jobs_collection.update_one({'job_name': job_name},\n {'$currentDate': {'updated': True},\n '$set': {'status': 'ERROR'}})\n exit(1)", "def error(self):\n raise NotImplementedError(\"subclasses need to override this method\")", "def _message_failed_job(self):\n self.ensure_one()\n return _(\"Something bad happened during the execution of the job. \"\n \"More details in the 'Exception Information' section.\")", "def error(self):\n pass", "def error(self, error):\n pass", "def error(self, *args, **kwargs):", "def indicate_error(self):\n pass", "def handle_job_exception(job):\n exception_sink = StringIO()\n exception_sink_ref = log.add(exception_sink)\n log.exception(f'Error running job '\n f'{box2json(job)}')\n job.worker_error = exception_sink.getvalue()\n log.remove(exception_sink_ref)\n exception_sink.close()\n # TODO: Some form of retry if it's a network or other\n # transient error", "def on_error(self, status_code, data):\n\t\tprint(\"error_code: \",status_code)", "def get_error(self):\n\t\treturn handle_to_object(call_sdk_function('PrlJob_GetError', self.handle))", "def put_job_failure(job, message):\n print('Putting job failure')\n print(message)\n code_pipeline.put_job_failure_result(jobId=job, failureDetails={'message': message, 'type': 'JobFailed'})", "def mark(self, job, status='succeeded'):\n pass", "def jobFail(job):\n\tif 'a' in job.proc.config._notify.when['pipeline']:\n\t\tlogger.debug('Notifying job fails')\n\t\tEMAIL.send('job', job, 'abort')", "def job_completed(self,event):\n if event.exception:\n logger.worker.warning('The job crashed :(')\n else:\n logger.worker.warning(self.task_id+'The job finished ')\n # set job complete to true, will display complete in web interface \n self.job_complete_status[self.task_id] = True", "def on_failure(self):\n pass", "def error(self):\n return self._decorator_wrapper(EventName.error)", "def error(self, handler):\n pass", "def job_step_error(self, job_request_payload, message):\n payload = JobStepErrorPayload(job_request_payload, message)\n self.send(job_request_payload.error_command, payload)", "def job_status(self, job_status):\n\n self._job_status = job_status", "def __call__(self, *args, **kwargs):\r\n return self.error(*args, **kwargs)", "def error(self, code, msg):\r\n self.status = code\r\n self.status_message = str(msg)", "def _validate_error(cls, item):\n if item.error and item.status_code not in [\n job_models.STATUS_CODE_FAILED, job_models.STATUS_CODE_CANCELED]:\n cls._add_error(\n base_model_validators.ERROR_CATEGORY_ERROR_CHECK,\n 'Entity id %s: error: %s for job is not empty but '\n 'job status is %s' % (item.id, item.error, item.status_code))\n\n if not item.error and item.status_code in [\n job_models.STATUS_CODE_FAILED, job_models.STATUS_CODE_CANCELED]:\n cls._add_error(\n base_model_validators.ERROR_CATEGORY_ERROR_CHECK,\n 'Entity id %s: error for job is empty but '\n 'job status is %s' % (item.id, item.status_code))", "def task_failed(self, worker_name, error):\n self.status = 'failed'\n self.modification_time = current_millis()\n self.message = '{} (worker): {}'.format(worker_name, error)\n return self", "def error(self, msg, *args, **kwargs):\n pass", "def write_error(self, status_code, **kwargs):\n self.finish(\"Error %d - %s\" % (status_code, kwargs['message']))", "def error(self, message):\n return self.log(\"ERROR\", message)", "def error(self, *args, **kwargs):\n self.msg(logging.ERROR, *args, **kwargs)" ]
[ "0.7221201", "0.66594404", "0.6498647", "0.6491245", "0.64659506", "0.635241", "0.6345156", "0.6336844", "0.6297702", "0.62516487", "0.6205033", "0.6202307", "0.6143262", "0.61112696", "0.60876584", "0.6080668", "0.60524887", "0.6030096", "0.60265785", "0.6003622", "0.6003541", "0.6002835", "0.5983537", "0.5971724", "0.5966028", "0.5958336", "0.5949478", "0.59458524", "0.5901036", "0.5898713" ]
0.7954076
0
Function that plots the football field for viewing plays.
def create_football_field(figsize=(12*2, 6.33*2), goals=True): #pitch outline & centre line pitch = patches.Rectangle((-52.5, -35), 105, 70, linewidth=2,capstyle='round', edgecolor='w', facecolor='darkgreen') fig, ax = plt.subplots(1, figsize=figsize) fig.patch.set_facecolor('green') fig.patch.set_alpha(0.7) ## goals if goals: plt.plot([-52.5, -55, -55, -52.5], [-5, -5, 5, 5], c='w', linewidth=2) plt.plot([52.5, 55, 55, 52.5], [-5, -5, 5, 5], c='w', linewidth=2) ## middle line midline = patches.ConnectionPatch([0,-35], [0,35], "data", "data", color='white') #center circle centreCircle = plt.Circle((0,0), 10, color="white", fill = False, linewidth=2) centreSpot = plt.Circle((0,0), 0.3, color="white", linewidth=2) #left, right penalty area leftPenalty = patches.Rectangle([-52.5,-15], width=14.5, height=30, fill = False, color='white', linewidth=2) rightPenalty = patches.Rectangle([38.0,-15], width=14.5, height=30, fill = False, color='white', linewidth=2) #left, right 6-yard box leftSixYard = patches.Rectangle([-52.5,-8], width=4.5, height=16, fill=False, color='white', linewidth=2) rightSixYard = patches.Rectangle([48,-8], width=4.5, height=16, fill=False, color='white', linewidth=2) #penalty spots leftPenSpot = plt.Circle((-43.5,0),0.3, color="white", linewidth=2) rightPenSpot = plt.Circle((43.5,0),0.3, color="white", linewidth=2) element = [pitch, midline, centreCircle, centreSpot, leftPenalty, rightPenalty, leftSixYard, rightSixYard, rightPenSpot, leftPenSpot] for i in element: ax.add_patch(i) plt.xlim(-56, 56) plt.ylim(-37, 37) plt.axis('off') return fig, ax
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot(self):\n\t\tself.plotOfSpect()", "def plots(x_bef,y_bef,z_bef):\r\n # Makes a 3-D plot of the x, y and z axes representing the ball's total trajectory\r\n plt.figure(3)\r\n plot3 = plt.axes(projection=\"3d\")\r\n plot3.plot3D(x_bef,y_bef,z_bef,'blue')\r\n plot3.set_xlabel('x (ft)')\r\n plot3.set_ylabel('y (ft)')\r\n plot3.set_zlabel('z (ft)')\r\n plot3.set_title('Total Trajectory')\r\n \r\n # Makes a 2-D plot of the x, and z axes representing the ball's total 2-D trajectory\r\n plt.figure(4)\r\n plt.plot(x_bef,z_bef)\r\n plt.xlabel('x (ft)')\r\n plt.ylabel('z (ft)')\r\n plt.title('z (ft) vs x (ft)')\r\n plt.show()", "def display_game(self):\n display = plt.figure()\n\n # Plots dots.\n for dot in self.dots:\n plt.scatter(dot.x + .5, dot.y + .5, color=dot.color, s=1000)\n\n # Makes a uniform grid,\n axes = display.gca()\n axes.set_aspect('equal', adjustable='box')\n axes.set_xticks(np.arange(0, self.dimension + 1, 1))\n axes.set_yticks(np.arange(0, self.dimension + 1, 1))\n plt.grid(True, color=\"black\", linestyle=\"-\")\n axes.set_xticklabels([])\n axes.set_yticklabels([])\n for tic in axes.xaxis.get_major_ticks():\n tic.tick1On = tic.tick2On = False\n for tic in axes.yaxis.get_major_ticks():\n tic.tick1On = tic.tick2On = False\n plt.show()", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def plot(self):\n\t\tself.plotOfTF().plot()", "def plot_victories_tournament(team, tour, spec=None, name=None):\n df = matches.copy()\n\n # These maps aren't played recently, matches on them aren't reflective of\n # current performance\n df = df[(df[\"map\"] != \"Cobblestone\") & (df[\"map\"] != \"Cache\")]\n\n # Ensure that we only look the specified team\n isTeam = (df[\"team1\"] == team) | (df[\"team2\"] == team)\n\n # Boolean series for whether or not the match was a win for the team\n winner = df[\"winner\"] == team\n loser = df[\"winner\"] != team\n\n # If spec is defined, we use spec as the \"away\" group\n if spec is None:\n at = df[\"tournament\"].isin(tour)\n away = ~df[\"tournament\"].isin(tour)\n else:\n at = df[\"tournament\"].isin(tour)\n away = df[\"tournament\"].isin(spec)\n\n # Mask and groupby to get counts of won and lost matches at home\n won_home = df[at & isTeam & winner]\n loss_home = df[at & isTeam & loser]\n won_home = (won_home.groupby(\"map\")['winner'].count())\n loss_home = (loss_home.groupby(\"map\")['winner'].count())\n\n # Mask and groupby to get counts of won and lost matches away from home\n won_away = df[away & isTeam & winner]\n loss_away = df[away & isTeam & loser]\n won_away = (won_away.groupby(\"map\")['winner'].count())\n loss_away = (loss_away.groupby(\"map\")['winner'].count())\n\n # Create subplots for plotting\n fig, [[ax1, ax2], [ax3, ax4]] = plt.subplots(2, figsize=(20, 12), ncols=2)\n if spec is None:\n ax1.set_title(team + \": At Home\")\n ax2.set_title(team + \": Away From\")\n ax3.set_title(team + \": Winrate At Home\")\n ax4.set_title(team + \": Winrate Away From\")\n else:\n ax1.set_title(team + \": At \" + name)\n ax2.set_title(team + \": Away From \" + name)\n ax3.set_title(team + \": Winrate At \" + name)\n ax4.set_title(team + \": Winrate After \" + name)\n\n # Plot win/loss per map at home\n total_home = pd.concat([won_home, loss_home], axis=1, sort=True)\n total_home.columns = [\"won\", \"lost\"]\n total_home.plot.bar(stacked=True, ax=ax1)\n\n # Plot winrate per map at home\n total_home = total_home.fillna(0)\n total_home[\"winrate\"] = total_home[\"won\"] / \\\n (total_home[\"won\"] + total_home[\"lost\"])\n total_home = total_home[\"winrate\"]\n total_home.plot.bar(ax=ax3, ylim=(0, 1), color=\"#43bc43\")\n\n # Plot win/loss away from home\n total_away = pd.concat([won_away, loss_away], axis=1, sort=True)\n total_away.columns = [\"won\", \"lost\"]\n total_away.plot.bar(stacked=True, ax=ax2)\n\n # Plot winrate away from home\n total_away = total_away.fillna(0)\n total_away[\"winrate\"] = total_away[\"won\"] / \\\n (total_away[\"won\"] + total_away[\"lost\"])\n total_away = total_away[\"winrate\"]\n total_away.plot.bar(ax=ax4, ylim=(0, 1), color=\"#43bc43\")\n\n plt.subplots_adjust(hspace=0.5)\n fig.savefig(\"tournament_winrates/\" + team + \".png\")", "def plot(self):\n pass", "def render(self, mode='human', action = None, num_col = 1, save_video = False):\n xmin = min(min(self.start_xpoints), min(self.goal_xpoints)) - 10.0\n xmax = max(max(self.start_xpoints), max(self.goal_xpoints)) + 10.0\n ymin = min(min(self.start_ypoints), min(self.goal_ypoints)) - 10.0\n ymax = max(max(self.start_ypoints), max(self.goal_ypoints)) + 10.0\n\n if self.fig is None:\n if not save_video:\n plt.ion()\n fig = plt.figure(figsize = (5*num_col, 5))\n def handle_close(evt):\n exit()\n\n fig.canvas.mpl_connect('close_event', handle_close)\n if not save_video:\n plt.show()\n\n ax = fig.add_subplot(1, num_col, 1)\n\n colors = self.task.robot_colors()# cm.rainbow(np.linspace(0, 1, len(self.x[:, 0])))\n scatter1 = ax.scatter(self.x[:, 0], self.x[:, 1], c=colors)\n scatter2 = ax.scatter(self.goal_xpoints, self.goal_ypoints, c='k', marker=\"x\")\n\n plt.title('%d Robots Formation'%len(self.x))\n #plt.gca().legend(('Robots'))\n\n self.task.plot()\n\n plt.ylim(ymin, ymax)\n plt.xlim(xmin, xmax)\n a = gca()\n a.set_xticklabels(a.get_xticks(), font)\n a.set_yticklabels(a.get_yticks(), font)\n self.fig = fig\n self.scatter1 = scatter1\n self.scatter2 = scatter2\n\n X = self.x[:, 0]\n Y = self.x[:, 1]\n\n self.scatter1.set_offsets(np.c_[X, Y])\n\n ax = self.fig.add_subplot(1, num_col, 1)\n for arrow in self.arrows:\n ax.patches.remove(arrow) \n\n self.arrows = []\n if action != None:\n _, max_per_agent = torch.max(action, dim = 1)\n #print(max_per_agent)\n print(set(max_per_agent.data.cpu().numpy()))\n \n for i in range(self.n_agents):\n x = self.x[i, 0]\n y = self.x[i, 1]\n goal = self.goals[ max_per_agent[i]]\n dx = goal[0] - x\n dy = goal[1] - y\n arrow = plt.Arrow(x, y, dx, dy )\n self.arrows.append(arrow)\n ax.add_patch(arrow)\n\n self.fig.canvas.draw()\n if not save_video:\n self.fig.canvas.flush_events()\n if action != None:\n plt.pause(0.01)\n else:\n plt.pause(0.01)\n\n return self.fig, self.scatter1", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def _plot_camera_view(self):\n fig, axs = plt.subplots()\n fig.show()\n axs.cla()\n axs.axis([-0.003, 0.003, -0.003, 0.003])\n axs.grid()\n axs.plot([0], [0], 'r+')\n for t_step in range(0, int(self._t_sim / self._dt) + 1, 250):\n axs.plot(\n self._feat_vec[t_step, 0, 0],\n self._feat_vec[t_step, 1, 0], 'ro')\n axs.plot(\n self._feat_vec[t_step, 0, 1],\n self._feat_vec[t_step, 1, 1], 'bo')\n axs.plot(\n self._feat_vec[t_step, 0, 2],\n self._feat_vec[t_step, 1, 2], 'yo')\n axs.plot(\n self._feat_vec[t_step, 0, 3],\n self._feat_vec[t_step, 1, 3], 'go')\n axs.plot(\n self._feat_vec[t_step, 0, 4],\n self._feat_vec[t_step, 1, 4], 'ro')\n plt.pause(1 / self._plot_fps)", "def plot(self):\n\t\t\n\t\ttf=tfData(self.shotno,tStart=None,tStop=None)\n\t\t\n\t\t_plt.figure()\n\t\tax1 = _plt.subplot2grid((3,2), (0,1), rowspan=3) #tf\n\t\tax2 = _plt.subplot2grid((3,2), (0,0)) #vf\n\t\tax3 = _plt.subplot2grid((3,2), (1,0),sharex=ax2) #oh\n\t\tax4 = _plt.subplot2grid((3,2), (2, 0),sharex=ax2) #sh\n\t\tfig=_plt.gcf()\n\t\tfig.set_size_inches(10,5)\n\t\t\t\t\n\t\ttStart=-2\n\t\ttStop=20\n\t\t\n\t\tax1.plot(tf.time*1e3,tf.tfBankField)\n\t\tax1.axvspan(tStart,tStop,color='r',alpha=0.3)\n\t\t_plot.finalizeSubplot(ax1,xlabel='Time (s)',xlim=[-150,450],ylabel='TF Field (T)')#,title=self.title\n\t\t\n\t\tax2.plot(self.vfTime*1e3,self.vfBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax2,ylabel='VF Current\\n(kA)')\n\t\t\n\t\tax3.plot(self.ohTime*1e3,self.ohBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax3,ylim=[-20,30],ylabel='OH Current\\n(kA)')\n\t\t\n\t\tax4.plot(self.shTime*1e3,self.shBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax4,ylim=[tStart,tStop],xlabel='Time (s)',ylabel='SH Current\\n(kA)')\n\t\t\n\t\t_plot.finalizeFigure(fig,title=self.title)\n#\t\tfig.set_tight_layout(True)\n\t\t\n\t\treturn fig", "def display(self):\n scatter_plot(self.points, self.hull_points, self.color, self.title)", "def plotMatchups(fantasyTeams, week, show, save):\n\tpos = ['qb','wr','rb','te','flx','k','def']\n\tmatchups, matchupsNicknames, teamsPoints = makeMatchupData(fantasyTeams, week)\n\t#print(matchups, teamsPoints)\n\tfor i in range(0,len(matchups),2):\n\t\tpoints1 = teamsPoints[i]\n\t\tpoints2 = teamsPoints[i+1]\n\t\tpos.append('')\n\t\tpoints1.append(points1[0]) # to make line connect to beginning\n\t\tpoints2.append(points2[0])\n\t\t#print(points1, points2)\n\t\tax = plt.subplot(polar=True)\n\t\tangles = [(2*pi)*(n/7) for n in range(8)] # size of angle slices\n\t\tplt.xticks(angles, pos)\n\t\tax.set_theta_offset(pi/2) # puts the first index on top\n\t\tax.set_theta_direction(-1) # set radial direction\n\t\tax.set_rlabel_position(0)\n\t\tplt.yticks([-20,0,20,40,60,80,100], [\"-20\",\"0\",\"20\",\"40\",\"60\",\"80\",\"100\"], color=\"grey\", size=10)\n\t\tplt.ylim(-20,100)\n\n\t\tax.plot(angles,points1,'b',linewidth=.75,label=str(matchupsNicknames[i])+': '\n\t\t\t\t\t+str(fantasyTeams[matchups[i]].points[week])+' pts')\n\t\tax.fill(angles,points1,'b',alpha=.25)\n\t\tax.plot(angles,points2,'r',linewidth=.75,label=str(matchupsNicknames[i+1])+': '\n\t\t\t\t\t+str(fantasyTeams[matchups[i+1]].points[week])+' pts')\n\t\tax.fill(angles,points2,'r',alpha=.25)\n\t\tax.legend(loc=(-0.15,0.9))\n\t\tax.set_title(matchupsNicknames[i]+' VS '+matchupsNicknames[i+1], fontsize=20, fontweight='bold', position=(.5,1))\n\n\t\tif fantasyTeams[matchups[i]].points[week]>fantasyTeams[matchups[i+1]].points[week]:\n\t\t\tplayer = fantasyTeams[matchups[i]].roster[week][fantasyTeams[matchups[i]].mvp[week]]\n\t\telse:\n\t\t\tplayer = fantasyTeams[matchups[i+1]].roster[week][fantasyTeams[matchups[i+1]].mvp[week]]\n\t\tstatsString = ''\n\t\tfor stat in player.statsFormatted[week]:\n\t\t\tstatsString += str(stat['statValue'])+' '+stat['stat']+': '+str(round(stat['statPoints'],2))+' pts\\n'\n\t\ttext = 'MVP: '+player.name+', '+player.position+'\\n'+statsString+'TOTAL: '+str(round(player.points[week],2))+' pts'\n\t\t#print(text)\n\t\tplt.text(1.3*pi,220,text,bbox={'facecolor': 'yellow', 'alpha': 0.5, 'pad': 5}) # for polar chart x=angle y=radius \n\n\t\tif save:\n\t\t\tplt.savefig('C:/Users/NeilS/Desktop/FantasyBoyzUSA/plots/plotMatchups/plotMatchupsWeek'+str(week)+'_'+str(int(i/2+1))+'.png')\n\t\tif show:\n\t\t\tplt.show()\n\t\tplt.close()", "def plot(self):\n fx = self.fitness_functions(self.archive)\n n = len(fx[0])\n\n if n == 2:\n plt.xlabel(\"F1\")\n plt.ylabel(\"F2\")\n plt.suptitle(\"Pareto Front\")\n plt.scatter(fx[:,0], fx[:,1], label='Archive')\n plt.show()\n elif n == 3:\n plt.figure()\n ax = plt.axes(projection='3d')\n ax.scatter(fx[:, 0], fx[:, 1], fx[:, 2])\n ax.set_xlabel(\"F1\")\n ax.set_ylabel(\"F2\")\n ax.set_zlabel(\"F3\")\n plt.suptitle(\"Pareto Front of Archive\")\n plt.show()\n else:\n print(\"Cannot Print Multi-Dimensional Front greater than 3D\")", "def show(self, fig=None):\n i = 0\n # for t = 0:obj.step_size:obj.duration\n # TODO: make a generator?\n iterator = np.linspace(0, self.duration(), num=math.ceil(self.duration() / self.step_precision) + 1)\n tfInterp_l = np.zeros((4, 4, len(iterator)))\n tfInterp_r = np.zeros((4, 4, len(iterator)))\n for t in iterator:\n [lfp, rfp] = self.footPosition(t)\n tfInterp_l[:, :, i] = lfp\n tfInterp_r[:, :, i] = rfp\n i = i + 1\n\n self.show_tf(fig, tfInterp_l, len(iterator))\n self.show_tf(fig, tfInterp_r, len(iterator))", "def hotspot_fields_plot(self, results_dict, tas_bound=None, pr_bound=None):\n sorted_keys = [(f\"{period}_{season}_{variable}\"\n f\"_{project}_{results_dict['scenario']}\")\n for variable in self.variables\n for period in self.cfg[\"future_periods\"]\n for project in self.projects for season in self.seasons]\n sorted_keys = [\n sorted_keys[:len(sorted_keys) // 2],\n sorted_keys[len(sorted_keys) // 2:]\n ]\n ancestor_files_var = [[\n ancestor_file for ancestor_file in results_dict[\"ancestors\"]\n if f\"/{var}_\" in ancestor_file\n ] for var in self.variables]\n for ancestor_files, keys, variable in zip(ancestor_files_var,\n sorted_keys, self.variables):\n fig = plt.figure(figsize=(14.4, 3.4),\n constrained_layout=True,\n dpi=300)\n plt.gcf().subplots_adjust()\n # bound colorbar to abs(max) value on the map\n style = self.cb_bounds(variable, results_dict, keys,\n [tas_bound, pr_bound])\n # plot each panel\n fill, frame = self._hotspot_fields_plot_panels(\n results_dict, fig, keys, style)\n # plot figtexts\n self._hotspot_fields_plot_figtexts(results_dict['scenario'], frame)\n # plot line\n self._hotspot_fields_plot_line(fig, frame)\n # plot colorbar\n cbar = plt.colorbar(fill,\n plt.gcf().add_axes([0.25, 0.125, 0.5, 0.04]),\n orientation=\"horizontal\",\n extend=\"both\")\n if variable == \"pr\":\n cbar.set_label(\"%\")\n against_region = (\n f\"{self.cfg['region'][2]}$^o$ N-\"\n f\"{self.cfg['region'][3]}$^o$ N latitudinal belt\")\n else:\n cbar.set_label(\n self.formatter(str(results_dict[keys[-1]].units)))\n against_region = \"global\"\n\n # plot title and save\n self._hotspot_fields_plot_save(against_region, variable,\n results_dict['scenario'],\n ancestor_files)", "def _plot(\n self, \n frame_idx: int, \n scores: List[float], \n losses: List[float],\n ):\n clear_output(True)\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('frame %s. score: %s' % (frame_idx, np.mean(scores[-10:])))\n plt.plot(scores)\n plt.subplot(132)\n plt.title('loss')\n plt.plot(losses)\n plt.show()", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def grid_40(player):\n plot = {\"Walls\": [\"N\", \"E\"], \"TARDIS\": True, \"Transmat\": False,\n \"Plot\": return_tardis() + f\"\\n\\nThat blue police box has never looked so \"\n f\"beautiful. {player['Name'][0]} ran as fast as \"\n f\"\\nthey could into the TARDIS. Hopefully the DOCTOR is already in there.\\n\"}\n return plot", "def plotTeamsPoints(fantasyTeams, weekStart, weekEnd, show, save):\n\t\"\"\"\n\tprojectedWins = makeProjectedWins(fantasyTeams, weekStart, weekEnd)\n\tranksList = []\n\trank = 1\n\twhile len(ranksList) < len(fantasyTeams):\n\t\tmaxWins = -1\n\t\tfor team in projectedWins:\n\t\t\tif projectedWins[team] > maxWins and team not in ranksList:\n\t\t\t\tmaxWins = projectedWins[team]\n\t\t\t\tteamToRank = team\n\t\tranksList.append(teamToRank)\n\t\"\"\"\n\tweeks = [n for n in range(weekStart, weekEnd+1)]\n\tweeksShift = [n+.25 for n in weeks]\n\tfig, axs = plt.subplots(len(fantasyTeams), 1, sharex=False, sharey=True) # figure with numTeams subplots, same x and y axes for all.\n\tfig.suptitle('Projected and Actual Scoring', fontweight='bold')\n\tfig.subplots_adjust(hspace = 1) # set horizontal space between axes\n\tfig.set_size_inches(5,10) # set size of figure\n\tyticks = [50,150]\n\tplt.yticks(yticks) # set labels for y axis\n\tplt.xlabel('week')\n\tplt.ylabel('Projected points for(blue) and against(yellow). Actual points for (green) and against(red)',position=(0,8))\n\tplotNum = 0\n\tfor team in fantasyTeams:\n\t\tprojectedPointsList = []\n\t\tprojectedPointsAgainstList = []\n\t\tpointsList = []\n\t\tpointsAgainstList = []\n\t\tfor i in range(weekStart, weekEnd+1):\n\t\t\tprojectedPointsList.append(fantasyTeams[team].projectedPoints[i])\n\t\t\tprojectedPointsAgainstList.append(fantasyTeams[team].projectedPointsAgainst[i])\n\t\t\tpointsList.append(fantasyTeams[team].points[i])\n\t\t\tpointsAgainstList.append(fantasyTeams[team].pointsAgainst[i])\n\t\taxs[plotNum].plot(weeks, projectedPointsAgainstList, 'y*')\n\t\taxs[plotNum].plot(weeks, projectedPointsList, 'b*')\n\t\taxs[plotNum].plot(weeksShift, pointsAgainstList, 'r.')\n\t\taxs[plotNum].plot(weeksShift, pointsList, 'g.')\n\t\taxs[plotNum].set_xticks(weeks)\n\t\t\"\"\"\n\t\tspacer1 = ' '*(30-len(team))\n\t\taxisTitle = team+spacer1+'Projected Wins: '+str(projectedWins[team])\n\t\tspacer2 = ' '*(50-len(axisTitle))\n\t\taxisTitle = fantasyTeams[team].teamName\n\t\t\"\"\"\n\t\taxs[plotNum].set_title(fantasyTeams[team].nickname)\n\t\tplotNum += 1\n\n\tif save:\n\t\tplt.savefig('C:/Users/NeilS/Desktop/FantasyBoyzUSA/plots/plotLeague/plotTeamsPoints.png')\n\tif show:\n\t\tplt.show()\n\tplt.close()", "def draw(self):\r\n dt = m.get_instance().dt\r\n self.perception_history = m.get_instance().larvae[0].history\r\n t = np.arange(0,len(self.perception_history)*dt,dt)\r\n plt.plot(t,self.perception_history)\r\n plt.title('Perception History')\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Perception (uM)')\r\n plt.show()", "def BeamStat_plot(item, n):\n file_dir = diagnostics_dir +'/cubestats-'+ field \n basename = '/cubeStats-image.restored.' + imagebase + field \n\n # use different basename for the Milky Way range\n if not glob.glob(file_dir + basename +'*.txt'):\n basename = '/cubeStats-image.restored.' + imagebase + 'MilkyWay.' + field\n \n params = {'axes.labelsize': 10,\n 'axes.titlesize':10,\n 'font.size':10}\n\n pylab.rcParams.update(params)\n\n if item == 'MADMFD':\n vmin = 0.0 # vmax = 3.0 is a conservative cut off based on M83 field. \n vmax = 3.0\n title = 'MAD Max Flux Density'\n plot_name = 'beamStat_MADMFD.png'\n saved_fig = fig_dir+'/'+plot_name\n \n\n if item == 'Avg_RMS': # this is not used \n vmin = 2.0\n vmax = 4.0\n title = 'Mean RMS'\n plot_name = 'beamStat_AvgRMS.png'\n saved_fig = fig_dir+'/'+plot_name\n \n beamXPOS, beamYPOS = BeamPosition()\n \n for i in range(36):\n bnum = n[i]\n infile = file_dir + basename +'.beam%02d.contsub.txt'%(bnum)\n if os.path.isfile(infile):\n if item == 'MADMFD': \n beamstat = cal_beam_MADMFD(infile)\n if item == 'Avg_RMS':\n beamstat = cal_beam_AvgRMS(infile)\n\n plt.scatter([beamXPOS[i]], [beamYPOS[i]], s=1300, c=[beamstat], cmap='RdYlGn_r', edgecolors='black', vmin=vmin, vmax=vmax)\n plt.text(beamXPOS[i], beamYPOS[i], n[i], va='center', ha='center')\n\n plt.xlim(0,0.7)\n plt.ylim(0,1.4)\n plt.tick_params(axis='both',which='both', bottom=False,top=False,right=False,left=False,labelbottom=False, labelleft=False)\n plt.title(title)\n cb = plt.colorbar()\n cb.set_label('mJy / beam')\n plt.savefig(saved_fig)\n plt.close()\n\n return saved_fig, plot_name", "def grid_13(player):\n plot = {\"Walls\": [\"E\"], \"TARDIS\": False, \"Transmat\": False,\n \"Plot\": f\"\\nFeeling their way through the dim corridor, {player['Name'][0]} \"\n f\"finds a secret door to the WEST. \\nThe corridor continues to the NORTH, \"\n f\"and there is a door to the SOUTH.\\n\"}\n return plot", "def run_and_plot(self):\n self.raw_processing()\n self.data_averaging_and_cleaning()\n\n print(self.organized_names)\n print(self.organized_film)\n print(self.organized_plank)\n\n height = self.organized_film\n bars = tuple(self.organized_names.copy())\n y_pos = np.arange(len(bars))\n\n plt.bar(y_pos, height)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('CFU/mL count')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()\n\n height2 = self.organized_plank\n\n plt.bar(y_pos, height2)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('Proportion of Biofilm CFUs to Planktonic CFUs')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()", "def show_grid(frame, episode_nr):\n plt.grid('on')\n ax = plt.gca()\n ax.set_xticks(np.arange(0.5, 10, 1))\n ax.set_yticks(np.arange(0.5, 10, 1))\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.imshow(frame, cmap='binary')\n ax.set_title(\"Episode {}\".format(episode_nr))\n plt.pause(0.01)\n plt.clf()", "def show_plot() :\n logger.info(\"Show plot\")\n pylab.axis('equal')\n pylab.xlabel(\"Longitud\")\n pylab.ylabel(\"Latitud\")\n pylab.grid(True)\n pylab.title(\"Product tiles and product source\")\n pylab.show()", "def show():\n setup()\n plt.show()", "def scatter_plot_players(dict_of_players):\n seasons = [x[2:4] for x in dict_of_players.keys()]\n plt.plot(seasons, dict_of_players.values())\n plt.show()", "def plotWinsDistribution(fantasyTeams, weekStart, weekEnd, show, save):\n\tfig, axs = plt.subplots(len(fantasyTeams), 1, sharex=False, sharey=True) # figure with numTeams subplots, same x and y axes for all.\n\tfig.subplots_adjust(hspace = 1) # set horizontal space between axes\n\tfig.set_size_inches(5,10) # set size of figure\n\tfig.suptitle('Wins Distribution', fontweight='bold')\n\tyticks = [0,30]\n\tplt.ylim(0,30)\n\tplt.yticks(yticks) # set labels for y axis\n\tplt.xlabel('wins')\n\tplt.ylabel('Probability (%)',position=(0,8))\n\n\twins = [i for i in range(weekEnd+1)] # a team can win between 0 and 13 games\n\tplotNum = 0\n\tfor team in fantasyTeams:\n\t\tprobList = []\n\t\tfor i in range(weekStart,weekEnd+1):\n\t\t\tprobList.append(fantasyTeams[team].winProbability[i])\n\t\tprobDistList = probDist(probList)\n\t\tscaledProbDistList = [i*100 for i in probDistList]\n\t\t#print(team,probList,probDistList)\n\t\tmaxIx = -1\n\t\tmaxProb = -1\n\t\tfor i in range(len(probDistList)):\n\t\t\tif probDistList[i]>maxProb:\n\t\t\t\tmaxIx = i\n\t\t\t\tmaxProb = probDistList[i]\n\t\taxs[plotNum].plot(wins, scaledProbDistList, 'bo')\n\t\taxs[plotNum].set_title(fantasyTeams[team].nickname)\n\t\taxs[plotNum].set_xticks(wins)\n\t\taxs[plotNum].annotate('', xy=(maxIx, 0), xycoords='data', xytext=(-15, 25), \n\t\t\t\t\t\t\t\ttextcoords='offset points', arrowprops=dict(arrowstyle = '->'),\n\t\t\t\t\t horizontalalignment='right', verticalalignment='top')\n\t\tplotNum += 1\n\n\tif save:\n\t\tplt.savefig('C:/Users/NeilS/Desktop/FantasyBoyzUSA/plots/plotLeague/plotWinsDistribution.png')\n\tif show:\n\t\tplt.show()\n\tplt.close()", "def grid_33(player):\n plot = {\"Walls\": [\"S\"], \"TARDIS\": False, \"Transmat\": False,\n \"Plot\": f\"\\nThere\\'s a faint hum of machinery through the SOUTH wall. Feeling their\\n\"\n f\"way through the smoke, {player['Name'][0]} finds a switch to a hidden \"\n f\"door to the WEST.\\n\"}\n return plot" ]
[ "0.61282665", "0.6110966", "0.60680497", "0.6056641", "0.59679395", "0.591012", "0.58574474", "0.58120966", "0.5806717", "0.5806647", "0.5776799", "0.57236886", "0.5697711", "0.56511635", "0.56275743", "0.5585723", "0.55778956", "0.5569716", "0.55657756", "0.5563268", "0.5558526", "0.5543476", "0.553152", "0.5522555", "0.5513149", "0.549282", "0.5490882", "0.5480972", "0.54639757", "0.54548407" ]
0.6909711
0
Test for validating an email is valid.
def test_is_valid_email(self): self.assertTrue(is_valid_email('[email protected]'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_invalid_email(self):\n self.assertFalse(is_valid_email('helloworld'))", "def is_valid_email(email):\n assert email is not None\n return validate_email(str(email))", "def is_valid_email_address (email):\n return valid_email.search(email)", "def is_valid_email(self, email):\n rex = \"^[\\w]+[\\d]?@[\\w]+\\.[\\w]+$\"\n return re.match(rex, email)", "def IsEmailValid(email):\n return email and EMAIL_RE.search(email)", "def _email_is_valid(email):\n return VALID_EMAIL_REGEXP.match(email) is not None", "def is_email_valid(e_mail):\n pattern = re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\")\n result = False\n if pattern.match(e_mail):\n result = True\n return result", "def is_email(address):\n try:\n validate_email(address)\n return True\n except:\n return False", "def IsValidEmail(s):\n return RE_EMAIL_ONLY.match(s)", "def valid_email(self, email):\n # uses regular expressions\n reg_email = re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\")\n return re.match(reg_email, email)", "def is_email_address_valid(email):\n if not re.match(\"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\\.[a-zA-Z0-9-]+)*$\", email):\n return False\n return True", "def is_valid_email(email):\n if re.search(EMAIL_REGEX, email):\n return True\n else:\n return False", "def validate_email(self):\n # source: https://docs.python.org/2/howto/regex.html\n if not re.match(r\"[^@.]+@[A-Za-z]+\\.[a-z]+\", self.email):\n return 'Invalid email address!'\n return self.email", "def verify_email(entered_email):\n return EMAIL_RE.match(entered_email)", "def isvalid(email):\n pattern = re.compile(r\"^([a-zA-Z0-9_\\-]+)@([a-zA-Z0-9]+)\\.\\w{,3}$\")\n return bool(pattern.match(email))", "def __validate_email(email):\n pattern = r\"\\\"?([-a-zA-Z0-9.`?{}]+@\\w+\\.\\w+)\\\"?\"\n pattern = re.compile(pattern)\n if not re.match(pattern, email):\n logging.critical(\"Incorrect email entered, email entered is -->{}\"\n .format(email))\n raise ValueError(\"You failed to match %s\" % email)\n return True", "def is_valid_email(email):\n return \"@\" in email and \".\" in email", "def is_valid_email(email):\n try:\n split = email.split('@')\n assert len(split) == 2\n domain = split[1]\n assert '.' in domain\n except AssertionError:\n return False\n return True", "def verify_email(email):\n email_reg_exp = re.compile(r\"^[\\S]+@[\\S]+.[\\S]+$\")\n return not email or email_reg_exp.match(email)", "def email_validator(email):\n if len(email) > 6:\n if re.match(REGEX_EXP, email) != None:\n return True\n return False", "def test_valid_email():\n assert_equals(is_valid_email(\"[email protected]\") is None, False)", "def emailValidate(form, field):\n\n if ' ' in field.data:\n raise ValidationError(message='Invalid e-mail address')\n\n if field.data.count('.') < 1:\n raise ValidationError(message='Invalid e-mail address')\n\n if field.data.count('@') < 1:\n raise ValidationError(message='Invalid e-mail address')", "def check_email_validity(email):\n if email.count('@') != 1:\n return False\n if len(email.split('@')[0]) == 0:\n return False\n if '.' not in email.split('@')[1]:\n return False\n return True", "def valid_email(email):\n email_regex = re.compile(r\"^[\\S]+@[\\S]+.[\\S]+$\")\n return email and email_regex.match(email)", "def is_valid_email(email):\n return re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\").fullmatch(email)", "def email_is_valid(email: Optional[str]) -> bool:\n if email is None:\n return True\n\n if re.match(r\"^[a-zA-Z0-9]+[\\.]?[a-zA-Z0-9]+[@]\\w+[.]\\w{2,3}$\", email):\n return True\n\n return False", "def validate_email(value):\n if not EMAIL_REGEX.match(value):\n raise ValidationError('Invalid email address')\n return value", "def is_valid_email(form, value):\n if '@' not in value or len(value) > 200:\n raise forms.ValidationError(_('Invalid email address'))", "def valid_email(email):\n # Ensure email is a string\n if not type(email) == str:\n return False\n\n # Find @ and . in the email address\n if re.match(\"[^@]+@[^@]+.[^@]+\", email):\n return True\n\n else:\n return False", "def validate_email(input_email: str) -> bool:\n regex = r\"\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b\"\n if re.fullmatch(regex, input_email):\n return True\n return False" ]
[ "0.7966595", "0.79205877", "0.79137427", "0.7898664", "0.78882504", "0.78553706", "0.78213567", "0.7798287", "0.7772676", "0.7767416", "0.77120966", "0.7700251", "0.7692308", "0.7688552", "0.7682156", "0.7664193", "0.7636919", "0.75773346", "0.7545542", "0.75453466", "0.7525421", "0.74995", "0.74974376", "0.74889237", "0.74842376", "0.7479472", "0.7478909", "0.74757814", "0.74647987", "0.74639726" ]
0.84519744
0
Test for validating an email is invalid.
def test_is_invalid_email(self): self.assertFalse(is_valid_email('helloworld'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_email(self):\n # source: https://docs.python.org/2/howto/regex.html\n if not re.match(r\"[^@.]+@[A-Za-z]+\\.[a-z]+\", self.email):\n return 'Invalid email address!'\n return self.email", "def test_invalid_email(self):\n rv = self.login('[email protected]', 'Bo1995')\n self.assertIn(b'Invalid email! Please try again', rv.data)", "def test_is_valid_email(self):\n self.assertTrue(is_valid_email('[email protected]'))", "def invalid_email(email):\n email_pattern = re.compile(r\"[^@]+@[^@]+\\.[^@]+\")\n if email_pattern.match(email):\n return False\n return True", "def test_registeration_invalid_email(self):\n response = self.signup_a_user(self.user_invalid_email)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data[\"errors\"][\"email\"],\n [\"Enter a valid email address.\"]\n )\n self.assertNotIn(\"token\", response.data)", "def __validate_email(email):\n pattern = r\"\\\"?([-a-zA-Z0-9.`?{}]+@\\w+\\.\\w+)\\\"?\"\n pattern = re.compile(pattern)\n if not re.match(pattern, email):\n logging.critical(\"Incorrect email entered, email entered is -->{}\"\n .format(email))\n raise ValueError(\"You failed to match %s\" % email)\n return True", "def test_invalid_email_when_logging_in(self):\n pass", "def validate_email(value):\n if not EMAIL_REGEX.match(value):\n raise ValidationError('Invalid email address')\n return value", "def IsEmailValid(email):\n return email and EMAIL_RE.search(email)", "def is_valid_email(self, email):\n rex = \"^[\\w]+[\\d]?@[\\w]+\\.[\\w]+$\"\n return re.match(rex, email)", "def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user is not None:\n raise ValidationError('Please use a different email address.')", "def is_valid_email(email):\n assert email is not None\n return validate_email(str(email))", "def _email_is_valid(email):\n return VALID_EMAIL_REGEXP.match(email) is not None", "def test_invalid_emails_cause_error(self):\n form = forms.GroupInviteForm({'emails': 'abcd123123'})\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors['emails'],\n [u'No Valid Addresses Found'])", "def is_valid_email_address (email):\n return valid_email.search(email)", "def validate_email(form, field):\n if not User.query.filter_by(email = field.data).first():\n raise ValidationError(\"Email is incorrect.\")", "def is_email_valid(e_mail):\n pattern = re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\")\n result = False\n if pattern.match(e_mail):\n result = True\n return result", "def emailValidate(form, field):\n\n if ' ' in field.data:\n raise ValidationError(message='Invalid e-mail address')\n\n if field.data.count('.') < 1:\n raise ValidationError(message='Invalid e-mail address')\n\n if field.data.count('@') < 1:\n raise ValidationError(message='Invalid e-mail address')", "def validate_email(email):\n try:\n user, domain = str(email).split(\"@\")\n if not re.match(\"^[A-Za-z]*$\", user):\n abort(make_response(jsonify({\n \"status\": 400, \"Error\": \"Email is Invalid\"}), 400))\n except ValueError:\n abort(make_response(jsonify(\n status=400,\n error=\"Email is Invalid\"), 400))\n if not user or not domain:\n abort(make_response(jsonify(error=\"Email is Invalid\"), 400))\n\n # Check that domain is valid\n try:\n domain_1, domain_2 = domain.split(\".\")\n if not re.match(\"^[A-Za-z]*$\", domain_1):\n abort(make_response(jsonify({\n \"status\": 400, \"Error\": \"Email is Invalid\"}), 400))\n if not re.match(\"^[A-Za-z]*$\", domain_2):\n abort(make_response(jsonify({\n \"status\": 400, \"Error\": \"Email is Invalid\"}), 400))\n except ValueError:\n abort(make_response(jsonify(\n status=400,\n error=\"Email is Invalid\"), 400))\n if not domain_1 or not domain_2:\n abort(make_response(jsonify(\n status=400,\n error=\"Email is Invalid\"), 400))\n\n return email", "def email_valid(email_string):\n if not email_string:\n raise WrongInput(\"Input cannot be blank\")\n if not isinstance(email_string, str):\n raise WrongInput(\"Invalid email address\")\n\n if '@' not in email_string or '.' not in email_string:\n raise WrongInput('Invalid email address. Example of a valid address: [email protected].')\n else:\n return email_string", "def is_email_address_valid(email):\n if not re.match(\"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\\.[a-zA-Z0-9-]+)*$\", email):\n return False\n return True", "def is_allowed_email(email):\n if email and not is_regex_email(email):\n return \"That is not a valid email.\"\n else:\n return \"\"", "def is_valid_email(email):\n return \"@\" in email and \".\" in email", "def validate_email( email ):\n message = ''\n if not( VALID_EMAIL_RE.match( email ) ):\n message = \"Please enter a real email address.\"\n elif len( email ) > 255:\n message = \"Email address exceeds maximum allowable length.\"\n return message", "def is_valid_email(form, value):\n if '@' not in value or len(value) > 200:\n raise forms.ValidationError(_('Invalid email address'))", "def IsValidEmail(s):\n return RE_EMAIL_ONLY.match(s)", "def test_valid_email_invalid(mock_db):\n database = sqlite3.connect(mock_db)\n # check for a invalid new unique email\n assert not valid_email(database, \"\")\n assert not valid_email(database, \"[email protected]\")\n assert not valid_email(database, \"[email protected]\")\n assert not valid_email(database, \"foobar\")", "def email_validator(email):\n if len(email) > 6:\n if re.match(REGEX_EXP, email) != None:\n return True\n return False", "def check_email_validity(email):\n if email.count('@') != 1:\n return False\n if len(email.split('@')[0]) == 0:\n return False\n if '.' not in email.split('@')[1]:\n return False\n return True", "def is_valid_email(email):\n if re.search(EMAIL_REGEX, email):\n return True\n else:\n return False" ]
[ "0.7875082", "0.7867147", "0.78601694", "0.78475493", "0.7844588", "0.78331923", "0.7718214", "0.77108777", "0.7701428", "0.7693918", "0.766842", "0.7642582", "0.7625827", "0.76145154", "0.76109374", "0.7607006", "0.7587825", "0.75767756", "0.7554816", "0.75215554", "0.7516638", "0.7501331", "0.7487622", "0.74871296", "0.748515", "0.7470946", "0.74533826", "0.7449794", "0.7438901", "0.7437151" ]
0.81805927
0
Test for successful password strength validation.
def test_password_strength_validator(self): self.assertIsNone(validate_password_strength('abcd123'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_password_strength():\r\n\r\n password_regex = re.compile(r'''(\r\n (?=.*[A-Z]{2})\r\n (?=.*[a-z]{3})\r\n (?=.*[/!@#$%^&_*+'\\\"-?.:;<>,])\r\n (?=.*[0-9])\r\n .{8,}\r\n )''', re.VERBOSE)\r\n\r\n get_password(password_regex)", "def test_password_strength_validator_length_fail(self):\n with self.assertRaises(ValidationError):\n validate_password_strength('hi')", "def password_strength(self, password_info):\n\n # Check for digits in the password\n digit_error = re.search(r\"\\d\", password_info) is None\n\n # Check for uppercase characters in the password\n uppercase_error = re.search(r\"[A-Z]\", password_info) is None\n\n # Check for lowercase characters in the password\n lowercase_error = re.search(r\"[a-z]\", password_info) is None\n\n # Check the condition of the password\n password_condition = not(\n digit_error or\n uppercase_error or\n lowercase_error\n )\n\n return password_condition # return the condition of the password", "def test_password_strength_validator_missing_letter(self):\n with self.assertRaises(ValidationError):\n validate_password_strength('1234567')", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def test01_password_length(self):\n self.set_complexity(length=13, numeric=0, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"correcthorsebatterystaple\",\n \"abcdefghijklmopqrstuvwxyz\",\n \"12345678901234567890\",\n \"!!!!!!!!!!!!!\" \"Password123!___\",\n )\n self.set_passwords(valid)", "def test_password_strength_validator_missing_digit(self):\n with self.assertRaises(ValidationError):\n validate_password_strength('abcdefg')", "def test_password_length(self):\n response = self.client().post('/api/v1/auth/signup', data=self.user_data_4)\n self.assertEqual(response.status_code, 400)\n # return result in json format\n result = json.loads(response.data.decode())\n self.assertEqual(\n result[\"message\"], \"Password should not be less than four characters.\")", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def test_invalid_password(self):\n pass", "def is_password_valid(password):\n #TODO : This should also be handled by the front_end\n pass", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def validate(self, data):\n\n password = data['password']\n password2 = data['password2']\n if password != password2:\n raise serializers.ValidationError(\"Passwords do not match.\")\n\n pw_results = zxcvbn.password_strength(password)\n strength = pw_results['score']\n\n if strength < settings.PASSWORD_STRENGTH_MIN:\n raise serializers.ValidationError(\"Insufficient password strength. Scored {}/4. \"\n \"Estimated time to crack: {}\"\n .format(strength,\n pw_results['crack_time_display']))\n\n return data", "def valid_password(password: Text):\n results = Utility.password_policy.test(password)\n if results:\n response = []\n for result in results:\n if isinstance(result, Length):\n response.append(\"Password length must be \" + str(result.length))\n elif isinstance(result, Special):\n response.append(\"Missing \" + str(result.count) + \" special letter\")\n elif isinstance(result, Uppercase):\n response.append(\"Missing \" + str(result.count) + \" uppercase letter\")\n elif isinstance(result, Numbers):\n response.append(\"Missing \" + str(result.count) + \"number\")\n\n if response:\n raise AppException(\"\\n\".join(response))", "async def password_strength(self, ctx, password: str):\n conv = PasswordStats(password)\n converter = conv.strength()\n if converter < 0.250:\n emoji = RED_CIRCLE\n text = \"This is a **weak** password.\"\n elif converter > 0.250 and converter < 0.500:\n emoji = ORANGE_CIRCLE\n text = \"This is an **okay** password.\"\n elif converter > 0.500 and converter < 0.750:\n emoji = YELLOW_CIRCLE\n text = \"This is a **good** password!\"\n else:\n emoji = GREEN_CIRCLE\n text = \"This is an **excellent** password!\"\n await ctx.maybe_send_embed(\n f\"**Strength rating: {round(converter * 100)}%** {emoji}\\n{cf.quote(text)}\"\n )", "def check(self, password):\n\n if len(password) < self.min_length:\n return False\n\n digits = len(findall(r\"\\d\", password))\n if digits < self.min_digits:\n return False\n\n special_chars = sum(v for k, v in Counter(password).items() if k in punctuation)\n if special_chars < self.min_special:\n return False\n\n alpha_chars = sum(v for k, v in Counter(password).items() if k in ascii_letters)\n if alpha_chars < self.min_alpha:\n return False\n\n upper_chars = sum(\n v for k, v in Counter(password).items() if k in ascii_uppercase\n )\n if upper_chars < self.min_upper:\n return False\n\n lower_chars = sum(\n v for k, v in Counter(password).items() if k in ascii_lowercase\n )\n if lower_chars < self.min_lower:\n return False\n\n if self.check_breaches and check_password(password):\n return False\n\n if self.func and not self.func(password):\n return False\n\n return True", "def test_password_validator(self):\n # Test with bad passwords\n pass_validator = self.validator.password_validator\n pass_list = mock_data['bad_pass']\n is_valid = pass_validator(pass_list[0])\n self.assertEqual(is_valid, 'Password must have eight characters')\n is_valid = pass_validator(pass_list[1])\n self.assertEqual(is_valid, 'Password must have a lowercase character')\n is_valid = pass_validator(pass_list[2])\n self.assertEqual(is_valid, 'Password must have an uppercase character')\n is_valid = pass_validator(pass_list[3])\n self.assertEqual(is_valid, 'Password must have a number')\n is_valid = pass_validator(pass_list[4])\n self.assertEqual(is_valid, 'Password must have one of this: _@*%!&$')\n is_valid = pass_validator(pass_list[5])\n self.assertEqual(is_valid, 'Password cannot have spaces')\n # Test with good password\n is_valid = pass_validator(mock_data['good_pass'])\n self.assertEqual(is_valid, True)", "def test_valid_password_valid():\n assert valid_password(\"123456\")\n assert valid_password(\"abcdef\")", "def test_password_is_valid():\n\twith pytest.raises(Exception):\n\t\tassert password_is_valid('qqqqqqqq') == Exception('Password has to be longer than 8 characters')\n\t\tassert password_is_valid('') == Exception('Password cannot be empty')\n\t\tassert password_is_valid('QQQQQQQQQ') == Exception('Password has to contain lowercase')\n\t\tassert password_is_valid('qqqqqqqqq') == Exception('Password has to contain uppercase')\n\t\tassert password_is_valid('Qqqqqqqqq') == Exception('Password has to contain a digit')\n\n\t\"\"\"test that valid passwords work\"\"\"\t\n\tassert password_is_valid('Q8qqqqqqqq') == True", "def reviewPassword():\n\t#Collect data\n\tpasswordData=genReviewEntry.get()\n\n\tstrength=calculatePasswordStrength(passwordData)\n\tresults=strength[3]\n\t#Clear tree\n\tgenReviewTree.delete(*genReviewTree.get_children())\n\t#Add the data to the tree\n\t\"\"\"\n\tFalse = Passed Test\n\tTrue = Failed Test\n\t\"\"\"\n\t#Iterate through the Tree\n\tfor item in results:\n\t\t#Get the result in the dictionary\n\t\tvalue=results[item]\n\t\t#If the value is true the test failed\n\t\tif value:\n\t\t\ttag=\"Fail\"\n\t\t\tmessage=\"Incomplete\"\n\t\t#Test passed\n\t\telse:\n\t\t\ttag=\"Pass\"\n\t\t\tmessage=\"Complete\"\n\t\t#Add data to tree\n\t\tgenReviewTree.insertData((item, message), tag)", "def validate_password(data):\n\n if \"password\" not in data:\n return False\n password = data[\"password\"]\n if len(password) < 4 or len(password) > 16:\n return False\n\n return True", "def test_password_strength(self):\n with self.client:\n response = register_user(\n self, 'Dalin', 'Oluoch', 'anothergmail.com', 'asdfasdf')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Validation errors.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 422)", "def test_password_length(self):\n self.response = self.client.post(\n \"/api/users/\",\n {\"user\": {\n \"username\": \"kake\",\n \"email\": 'kakegmail.com',\n \"password\": \"12345l\",\n }\n },\n format=\"json\"\n )\n self.assertEqual(self.response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual('Ensure this field has at least 8 characters.',\n self.response.json()['errors']['password'][0])", "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def test_validate_password_length_without_default_value(self):\n for _ in range(3):\n with self.assertRaises(ValidationError):\n test_password = os.urandom(random.randint(0, self.min_password_length - 1)).decode('latin1')\n validate_password_length(test_password)\n try:\n for _ in range(3):\n test_password = os.urandom(random.randint(self.min_password_length, 30)).decode('latin1')\n validate_password_length(test_password)\n except ValidationError:\n self.fail('ValidationError must not have been raised')", "def clean_password(self):\n password = self.cleaned_data['password']\n\n if CHECK_STRENGTH:\n if len(password) < MIN_PASSWORD_LEN:\n raise forms.ValidationError('Password must have at least %i characters.' % MIN_PASSWORD_LEN)\n\n symbols = set(password)\n\n if not ((_digit & symbols and _upper & symbols) or \\\n (_digit & symbols and _lower & symbols) or \\\n (_lower & symbols and _upper & symbols)):\n raise forms.ValidationError('Password is too week. Invent better one.')\n\n return password", "def verify_password(self, password):\n return self.PASS == password", "def test_length(self):\n for length in range(2, 30):\n self.assertEqual(len(generate_password(length)), length)", "def validate_password(self, value):\n policy = PasswordPolicy.from_names(\n length=8, # min length: 8\n uppercase=1, # need min. 1 uppercase letter\n numbers=1, # need min. 1 digit\n special=1, # need min. 1 special characters\n nonletters=1,\n )\n errors = [PASSWORD_ERRORS[p.name()] for p in policy.test(value)]\n if errors:\n raise ValidationError(errors)", "def pw_is_viable(password: str) -> bool:\n logging.debug(\"called\")\n if not any([\n not password,\n len(password) < 8,\n not any(map(lambda x: x.isdigit(), password)),\n not any(map(lambda x: x.isupper(), password)),\n not any(map(lambda x: x.islower(), password)),\n not any(map(lambda x: x in SPECIAL_CHARACTERS, password)),\n ]):\n return True\n else:\n raise PasswordError(\"Password should contain at least a digit, an uppercase, a lower case, and special \"\n \"characters and should be at least 8 digits in total.\", password)" ]
[ "0.811253", "0.7808661", "0.7498085", "0.7460592", "0.7393634", "0.73335767", "0.73282", "0.71243334", "0.70749134", "0.69481057", "0.6916921", "0.6855777", "0.6833095", "0.6819363", "0.68002194", "0.67896336", "0.6761693", "0.6758369", "0.6751114", "0.6736633", "0.6694997", "0.6689554", "0.66838115", "0.6672677", "0.6666205", "0.6643637", "0.6630013", "0.6629973", "0.6611562", "0.6600513" ]
0.85219926
0
Test for length fail password strength validation.
def test_password_strength_validator_length_fail(self): with self.assertRaises(ValidationError): validate_password_strength('hi')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_password_strength_validator(self):\n self.assertIsNone(validate_password_strength('abcd123'))", "def test01_password_length(self):\n self.set_complexity(length=13, numeric=0, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"correcthorsebatterystaple\",\n \"abcdefghijklmopqrstuvwxyz\",\n \"12345678901234567890\",\n \"!!!!!!!!!!!!!\" \"Password123!___\",\n )\n self.set_passwords(valid)", "def check_password_strength():\r\n\r\n password_regex = re.compile(r'''(\r\n (?=.*[A-Z]{2})\r\n (?=.*[a-z]{3})\r\n (?=.*[/!@#$%^&_*+'\\\"-?.:;<>,])\r\n (?=.*[0-9])\r\n .{8,}\r\n )''', re.VERBOSE)\r\n\r\n get_password(password_regex)", "def test_password_strength_validator_missing_letter(self):\n with self.assertRaises(ValidationError):\n validate_password_strength('1234567')", "def test_password_strength_validator_missing_digit(self):\n with self.assertRaises(ValidationError):\n validate_password_strength('abcdefg')", "def test_more_than_max_length_invalid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']) + 1)]))\n\n with self.assertRaises(ValidationError):\n valid_password(None, pass_field)", "def test_validate_password_length_without_default_value(self):\n for _ in range(3):\n with self.assertRaises(ValidationError):\n test_password = os.urandom(random.randint(0, self.min_password_length - 1)).decode('latin1')\n validate_password_length(test_password)\n try:\n for _ in range(3):\n test_password = os.urandom(random.randint(self.min_password_length, 30)).decode('latin1')\n validate_password_length(test_password)\n except ValidationError:\n self.fail('ValidationError must not have been raised')", "def test_less_than_min_length_invalid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MIN_PWD_LEN']) - 1)]))\n\n with self.assertRaises(ValidationError):\n valid_password(None, pass_field)", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def test_password_length(self):\n response = self.client().post('/api/v1/auth/signup', data=self.user_data_4)\n self.assertEqual(response.status_code, 400)\n # return result in json format\n result = json.loads(response.data.decode())\n self.assertEqual(\n result[\"message\"], \"Password should not be less than four characters.\")", "def test_length(self):\n for length in range(2, 30):\n self.assertEqual(len(generate_password(length)), length)", "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def test_invalid_length_for_new_password():\n user = User(email=\"[email protected]\", user_type=0)\n user_password = \"ILoveHTML\"\n user.SetPassword(user_password)\n\n new_password1 = \"pwd\"\n with pytest.raises(ValueError):\n user.SetPassword(new_password1)\n assert not user.VerifyPassword(new_password1)\n assert user.VerifyPassword(user_password)\n\n new_password2 = \"I love meatball and tuna.\"\n with pytest.raises(ValueError):\n user.SetPassword(new_password2)\n assert not user.VerifyPassword(new_password2)\n assert user.VerifyPassword(user_password)", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def valid_password(password: Text):\n results = Utility.password_policy.test(password)\n if results:\n response = []\n for result in results:\n if isinstance(result, Length):\n response.append(\"Password length must be \" + str(result.length))\n elif isinstance(result, Special):\n response.append(\"Missing \" + str(result.count) + \" special letter\")\n elif isinstance(result, Uppercase):\n response.append(\"Missing \" + str(result.count) + \" uppercase letter\")\n elif isinstance(result, Numbers):\n response.append(\"Missing \" + str(result.count) + \"number\")\n\n if response:\n raise AppException(\"\\n\".join(response))", "def password_strength(self, password_info):\n\n # Check for digits in the password\n digit_error = re.search(r\"\\d\", password_info) is None\n\n # Check for uppercase characters in the password\n uppercase_error = re.search(r\"[A-Z]\", password_info) is None\n\n # Check for lowercase characters in the password\n lowercase_error = re.search(r\"[a-z]\", password_info) is None\n\n # Check the condition of the password\n password_condition = not(\n digit_error or\n uppercase_error or\n lowercase_error\n )\n\n return password_condition # return the condition of the password", "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def test_long_password():\n expect_error(register, InputError,\n \"abcdef\", \"a\" * (MIN_PASSWORD - 1), \"a\", \"A\", \"a\")", "def passWord(pwd):\n pwdLen = len(pwd)\n if pwdLen < 4:\n raise Exception(\"The password is too short.\")\n if pwdLen > 8:\n raise Exception(\"tHE password is too long\")\n else:\n print('the length of the password is correct.')", "def test_password_length(self):\n self.response = self.client.post(\n \"/api/users/\",\n {\"user\": {\n \"username\": \"kake\",\n \"email\": 'kakegmail.com',\n \"password\": \"12345l\",\n }\n },\n format=\"json\"\n )\n self.assertEqual(self.response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual('Ensure this field has at least 8 characters.',\n self.response.json()['errors']['password'][0])", "def test_password_is_valid():\n\twith pytest.raises(Exception):\n\t\tassert password_is_valid('qqqqqqqq') == Exception('Password has to be longer than 8 characters')\n\t\tassert password_is_valid('') == Exception('Password cannot be empty')\n\t\tassert password_is_valid('QQQQQQQQQ') == Exception('Password has to contain lowercase')\n\t\tassert password_is_valid('qqqqqqqqq') == Exception('Password has to contain uppercase')\n\t\tassert password_is_valid('Qqqqqqqqq') == Exception('Password has to contain a digit')\n\n\t\"\"\"test that valid passwords work\"\"\"\t\n\tassert password_is_valid('Q8qqqqqqqq') == True", "def validatePassword(self, password):\n if len(password) < self.min_length:\n raise exceptions.LengthPasswordError(\n _('Password should be at least ${count} characters.',\n mapping={'count': self.min_length}))\n elif self.letters_digits and \\\n (password.isalpha() or password.isdigit()):\n raise exceptions.LettersDigitsPasswordError()\n elif self.letters_mixed_case and \\\n (password.isupper() or password.islower()):\n raise exceptions.LettersCasePasswordError()", "def password_validation(pass1,pass2):\n errors = []\n if(pass1 != pass2):\n errors.append(\"Lösenorden matchade inte.\")\n if(len(pass1) < 3):\n errors.append(\"Lösenordet måste vara längre än 3 bokstöver.\")\n \n return errors", "def validate_password_length(value):\r\n message = _(\"Invalid Length ({0})\")\r\n code = \"length\"\r\n\r\n min_length = getattr(settings, 'PASSWORD_MIN_LENGTH', None)\r\n max_length = getattr(settings, 'PASSWORD_MAX_LENGTH', None)\r\n\r\n if min_length and len(value) < min_length:\r\n raise ValidationError(message.format(_(\"must be {0} characters or more\").format(min_length)), code=code)\r\n elif max_length and len(value) > max_length:\r\n raise ValidationError(message.format(_(\"must be {0} characters or less\").format(max_length)), code=code)", "def test_invalid_password(self):\n pass", "def is_valid_password(password):\n if len(password) < MIN_LENGTH:\n return False\n return True", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def validate_password(password):\n return isinstance(password, str) and len(password) >= 8 and \\\n re.search(r'[A-Z]', password) and re.search(r'[0-9]', password)", "def validate_password(self, value):\n policy = PasswordPolicy.from_names(\n length=8, # min length: 8\n uppercase=1, # need min. 1 uppercase letter\n numbers=1, # need min. 1 digit\n special=1, # need min. 1 special characters\n nonletters=1,\n )\n errors = [PASSWORD_ERRORS[p.name()] for p in policy.test(value)]\n if errors:\n raise ValidationError(errors)", "def test_valid_password_invalid():\n assert not valid_password(\"\")\n assert not valid_password(\"1234567\")\n assert not valid_password(\"abcdefg\")" ]
[ "0.8374114", "0.80661535", "0.79859805", "0.7951157", "0.7858637", "0.77230054", "0.76463336", "0.7581791", "0.7435477", "0.73920065", "0.73624104", "0.73472536", "0.7245728", "0.7207087", "0.7175876", "0.7170261", "0.71615016", "0.7102726", "0.71006024", "0.7093672", "0.70468783", "0.70124507", "0.7008484", "0.6976596", "0.69347477", "0.69156814", "0.6907637", "0.6877643", "0.68544537", "0.68290144" ]
0.8685554
0
Test for password strength missing digit.
def test_password_strength_validator_missing_digit(self): with self.assertRaises(ValidationError): validate_password_strength('abcdefg')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_password_strength_validator(self):\n self.assertIsNone(validate_password_strength('abcd123'))", "def test_password_strength_validator_missing_letter(self):\n with self.assertRaises(ValidationError):\n validate_password_strength('1234567')", "def check_password_strength():\r\n\r\n password_regex = re.compile(r'''(\r\n (?=.*[A-Z]{2})\r\n (?=.*[a-z]{3})\r\n (?=.*[/!@#$%^&_*+'\\\"-?.:;<>,])\r\n (?=.*[0-9])\r\n .{8,}\r\n )''', re.VERBOSE)\r\n\r\n get_password(password_regex)", "def password_strength(self, password_info):\n\n # Check for digits in the password\n digit_error = re.search(r\"\\d\", password_info) is None\n\n # Check for uppercase characters in the password\n uppercase_error = re.search(r\"[A-Z]\", password_info) is None\n\n # Check for lowercase characters in the password\n lowercase_error = re.search(r\"[a-z]\", password_info) is None\n\n # Check the condition of the password\n password_condition = not(\n digit_error or\n uppercase_error or\n lowercase_error\n )\n\n return password_condition # return the condition of the password", "def test02_password_numeric(self):\n self.set_complexity(length=0, numeric=4, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n \"1abcd2efghij3\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"1correct2horse3battery4staple\",\n \"1234abc\",\n \"abc1234\",\n \"0000\",\n \"Password1234!___\",\n \"Test1Split2Numerics3In4Password\",\n )\n self.set_passwords(valid)", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def test_password_strength_validator_length_fail(self):\n with self.assertRaises(ValidationError):\n validate_password_strength('hi')", "def test01_password_length(self):\n self.set_complexity(length=13, numeric=0, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"correcthorsebatterystaple\",\n \"abcdefghijklmopqrstuvwxyz\",\n \"12345678901234567890\",\n \"!!!!!!!!!!!!!\" \"Password123!___\",\n )\n self.set_passwords(valid)", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def test05_password_special(self):\n self.set_complexity(length=0, numeric=0, upper=0, lower=0, special=5)\n\n invalid = (\n \"A\",\n \"!!!!\",\n \"!A_B@C£D\",\n \"@@PASSWORD123!!\",\n \"ADMIN\",\n \"A1aB2bC3cD4dE5eF6fG7g\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"_____\",\n \"_!@£$\",\n \"A!B@C£D$F%\",\n \"Tr0ub4dor&3!@£$\",\n \"1234;.,/]1234\",\n \"a!A@0£b$B%0^c&C*0(d)D_0+e\",\n 'password1234\\'\"\"\"\"\"',\n \"p@$$w@*d\",\n )\n self.set_passwords(valid)", "def valida_digito(password):\n for s in password:\n if s.isdigit() == True: \n return True\n return False", "def test_less_than_min_length_invalid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MIN_PWD_LEN']) - 1)]))\n\n with self.assertRaises(ValidationError):\n valid_password(None, pass_field)", "def test_validate_password_length_without_default_value(self):\n for _ in range(3):\n with self.assertRaises(ValidationError):\n test_password = os.urandom(random.randint(0, self.min_password_length - 1)).decode('latin1')\n validate_password_length(test_password)\n try:\n for _ in range(3):\n test_password = os.urandom(random.randint(self.min_password_length, 30)).decode('latin1')\n validate_password_length(test_password)\n except ValidationError:\n self.fail('ValidationError must not have been raised')", "def pw_is_viable(password: str) -> bool:\n logging.debug(\"called\")\n if not any([\n not password,\n len(password) < 8,\n not any(map(lambda x: x.isdigit(), password)),\n not any(map(lambda x: x.isupper(), password)),\n not any(map(lambda x: x.islower(), password)),\n not any(map(lambda x: x in SPECIAL_CHARACTERS, password)),\n ]):\n return True\n else:\n raise PasswordError(\"Password should contain at least a digit, an uppercase, a lower case, and special \"\n \"characters and should be at least 8 digits in total.\", password)", "def test07_no_raise(self):\n self.set_complexity(length=14, numeric=1, upper=1, lower=1, special=1)\n\n invalid = (\n \"A\",\n \"ACBDEabcde!!!!\",\n \"Tr0ub4dor&3\",\n \"!A_B@C£D\",\n \"@@PASSWORD123!!\",\n \"ADMIN\",\n \"A1aB2bC3cD4dE5eF6fG7g\",\n \"1234;.,/]1234\",\n )\n for password in invalid:\n with self.subTest(password=password):\n self.assertFalse(\n self.user1._check_password_complexity(password, raise_on_failure=False)\n )\n\n valid = (\n \"Sixteenchars12@_\",\n \"thisis4reallybadPassword!\",\n \"C0rrecthorsebatteryst@ple\",\n \"a!A@0£b$B%0^c&C*0(d)D_0+e\",\n 'Password1234\\'\"\"\"\"\"',\n )\n for password in valid:\n with self.subTest(password=password):\n self.assertTrue(\n self.user1._check_password_complexity(password, raise_on_failure=False)\n )", "def is_valid_password_v2(password):\n\n low = password[\"letter\"] == password[\"password\"][password[\"low\"] - 1]\n high = password[\"letter\"] == password[\"password\"][password[\"high\"] - 1]\n\n return xor(low, high)", "def has_usable_password(self):\n return True", "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def validate_password(password):\n return isinstance(password, str) and len(password) >= 8 and \\\n re.search(r'[A-Z]', password) and re.search(r'[0-9]', password)", "def acceptable_password(password):\r\n LOG.debug(\"PASS\")\r\n LOG.debug(password)\r\n\r\n if password is not None:\r\n LOG.debug(len(password))\r\n\r\n if password is None:\r\n return False\r\n\r\n if len(password) < 3:\r\n return False\r\n\r\n return True", "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def test_invalid_password(self):\n pass", "def test_valid_password_invalid():\n assert not valid_password(\"\")\n assert not valid_password(\"1234567\")\n assert not valid_password(\"abcdefg\")", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def check_pass(text):\r\n\r\n upperRegex = re.compile(r'[A-Z]')\r\n lowerRegex = re.compile(r'[a-z]')\r\n lengthRegex = re.compile(r'.{8,}')\r\n digitRegex = re.compile(r'\\d')\r\n\r\n if not upperRegex.search(text):\r\n return False\r\n elif not lowerRegex.search(text):\r\n return False\r\n elif not lengthRegex.search(text):\r\n return False\r\n elif not digitRegex.search(text):\r\n return False\r\n else:\r\n return True", "def _validatePassword(password):\n\n uppercaseChars = re.search('[A-Z]', password)\n lowercaseChars = re.search('[a-z]', password)\n\n if len(password) < 8:\n raise Exception(\"Password must be at lest 8 letters\")\n elif re.search('[0-9]', password) is None:\n raise Exception(\"Password must contain atleast one number\")\n elif uppercaseChars is None or lowercaseChars is None:\n raise Exception(\"Password must contain upper and lowercase letters\")\n else:\n return password", "def check_pwd(password: str) -> bool:\n # if len(password) > 0 and password[0].isdigit():\n # upper: List[Any] = [letter for letter in password if letter.isupper()]\n # lower: List[Any] = [letter for letter in password if letter.islower()]\n # return len(upper) > 1 and len(lower) > 0\n # else:\n # return False\n # Professor's solution\n return len(password) >= 4 \\\n and sum([1 for c in password if c.isupper()]) >= 2 \\\n and sum([1 for c in password if c.islower()]) >= 1 \\\n and password[0].isdigit()", "def valid_password(password: Text):\n results = Utility.password_policy.test(password)\n if results:\n response = []\n for result in results:\n if isinstance(result, Length):\n response.append(\"Password length must be \" + str(result.length))\n elif isinstance(result, Special):\n response.append(\"Missing \" + str(result.count) + \" special letter\")\n elif isinstance(result, Uppercase):\n response.append(\"Missing \" + str(result.count) + \" uppercase letter\")\n elif isinstance(result, Numbers):\n response.append(\"Missing \" + str(result.count) + \"number\")\n\n if response:\n raise AppException(\"\\n\".join(response))", "def valid_password(password):\n val = True\n\n if len(password) < 8:\n val = False\n return val\n\n if not any(char.isdigit() for char in password):\n val = False\n return val\n\n if not any(char.isupper() for char in password):\n val = False\n return val\n\n if not any(char.islower() for char in password):\n val = False\n return val\n\n if val:\n return val", "def contains_only_double_digit(password: int) -> bool:\n word = str(password)\n\n if word[0] == word[1] and word[0] != word[2]:\n return True\n if word[-2] == word[-1] and word[-2] != word[-3]:\n return True\n\n for i in range(1, len(word)-2):\n if word[i] == word[i+1] and word[i] != word[i+2] and word[i] != word[i-1]:\n return True\n\n return False" ]
[ "0.76214445", "0.7615998", "0.7283949", "0.7047271", "0.70288146", "0.68686", "0.68568677", "0.6758803", "0.6694275", "0.6566324", "0.6553022", "0.64874214", "0.6469547", "0.64618", "0.6447736", "0.6439617", "0.6412507", "0.641235", "0.63657725", "0.6363227", "0.6354931", "0.6336242", "0.63301253", "0.6304031", "0.6303931", "0.6258414", "0.6256333", "0.62521285", "0.6247638", "0.62439984" ]
0.80876046
0
Test for password strength missing letter.
def test_password_strength_validator_missing_letter(self): with self.assertRaises(ValidationError): validate_password_strength('1234567')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_password_strength_validator(self):\n self.assertIsNone(validate_password_strength('abcd123'))", "def test_password_strength_validator_missing_digit(self):\n with self.assertRaises(ValidationError):\n validate_password_strength('abcdefg')", "def check_password_strength():\r\n\r\n password_regex = re.compile(r'''(\r\n (?=.*[A-Z]{2})\r\n (?=.*[a-z]{3})\r\n (?=.*[/!@#$%^&_*+'\\\"-?.:;<>,])\r\n (?=.*[0-9])\r\n .{8,}\r\n )''', re.VERBOSE)\r\n\r\n get_password(password_regex)", "def test_password_strength_validator_length_fail(self):\n with self.assertRaises(ValidationError):\n validate_password_strength('hi')", "def test01_password_length(self):\n self.set_complexity(length=13, numeric=0, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"correcthorsebatterystaple\",\n \"abcdefghijklmopqrstuvwxyz\",\n \"12345678901234567890\",\n \"!!!!!!!!!!!!!\" \"Password123!___\",\n )\n self.set_passwords(valid)", "def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def password_strength(self, password_info):\n\n # Check for digits in the password\n digit_error = re.search(r\"\\d\", password_info) is None\n\n # Check for uppercase characters in the password\n uppercase_error = re.search(r\"[A-Z]\", password_info) is None\n\n # Check for lowercase characters in the password\n lowercase_error = re.search(r\"[a-z]\", password_info) is None\n\n # Check the condition of the password\n password_condition = not(\n digit_error or\n uppercase_error or\n lowercase_error\n )\n\n return password_condition # return the condition of the password", "def test05_password_special(self):\n self.set_complexity(length=0, numeric=0, upper=0, lower=0, special=5)\n\n invalid = (\n \"A\",\n \"!!!!\",\n \"!A_B@C£D\",\n \"@@PASSWORD123!!\",\n \"ADMIN\",\n \"A1aB2bC3cD4dE5eF6fG7g\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"_____\",\n \"_!@£$\",\n \"A!B@C£D$F%\",\n \"Tr0ub4dor&3!@£$\",\n \"1234;.,/]1234\",\n \"a!A@0£b$B%0^c&C*0(d)D_0+e\",\n 'password1234\\'\"\"\"\"\"',\n \"p@$$w@*d\",\n )\n self.set_passwords(valid)", "def is_valid_password_v2(password):\n\n low = password[\"letter\"] == password[\"password\"][password[\"low\"] - 1]\n high = password[\"letter\"] == password[\"password\"][password[\"high\"] - 1]\n\n return xor(low, high)", "def has_usable_password(self):\n return True", "def is_valid_password_v1(password):\n letter_count = sum([x == password[\"letter\"] for x in list(password[\"password\"])])\n return password[\"low\"] <= letter_count <= password[\"high\"]", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def test_letters(self):\n self.assertFalse(validate_measure_input('a', self.measures))\n self.assertFalse(validate_measure_input('1a', self.measures))", "def pw_is_viable(password: str) -> bool:\n logging.debug(\"called\")\n if not any([\n not password,\n len(password) < 8,\n not any(map(lambda x: x.isdigit(), password)),\n not any(map(lambda x: x.isupper(), password)),\n not any(map(lambda x: x.islower(), password)),\n not any(map(lambda x: x in SPECIAL_CHARACTERS, password)),\n ]):\n return True\n else:\n raise PasswordError(\"Password should contain at least a digit, an uppercase, a lower case, and special \"\n \"characters and should be at least 8 digits in total.\", password)", "def test07_no_raise(self):\n self.set_complexity(length=14, numeric=1, upper=1, lower=1, special=1)\n\n invalid = (\n \"A\",\n \"ACBDEabcde!!!!\",\n \"Tr0ub4dor&3\",\n \"!A_B@C£D\",\n \"@@PASSWORD123!!\",\n \"ADMIN\",\n \"A1aB2bC3cD4dE5eF6fG7g\",\n \"1234;.,/]1234\",\n )\n for password in invalid:\n with self.subTest(password=password):\n self.assertFalse(\n self.user1._check_password_complexity(password, raise_on_failure=False)\n )\n\n valid = (\n \"Sixteenchars12@_\",\n \"thisis4reallybadPassword!\",\n \"C0rrecthorsebatteryst@ple\",\n \"a!A@0£b$B%0^c&C*0(d)D_0+e\",\n 'Password1234\\'\"\"\"\"\"',\n )\n for password in valid:\n with self.subTest(password=password):\n self.assertTrue(\n self.user1._check_password_complexity(password, raise_on_failure=False)\n )", "def passWord(pwd):\n pwdLen = len(pwd)\n if pwdLen < 4:\n raise Exception(\"The password is too short.\")\n if pwdLen > 8:\n raise Exception(\"tHE password is too long\")\n else:\n print('the length of the password is correct.')", "def check_pass(text):\r\n\r\n upperRegex = re.compile(r'[A-Z]')\r\n lowerRegex = re.compile(r'[a-z]')\r\n lengthRegex = re.compile(r'.{8,}')\r\n digitRegex = re.compile(r'\\d')\r\n\r\n if not upperRegex.search(text):\r\n return False\r\n elif not lowerRegex.search(text):\r\n return False\r\n elif not lengthRegex.search(text):\r\n return False\r\n elif not digitRegex.search(text):\r\n return False\r\n else:\r\n return True", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def test_invalid_password(self):\n pass", "def valid_password(password: Text):\n results = Utility.password_policy.test(password)\n if results:\n response = []\n for result in results:\n if isinstance(result, Length):\n response.append(\"Password length must be \" + str(result.length))\n elif isinstance(result, Special):\n response.append(\"Missing \" + str(result.count) + \" special letter\")\n elif isinstance(result, Uppercase):\n response.append(\"Missing \" + str(result.count) + \" uppercase letter\")\n elif isinstance(result, Numbers):\n response.append(\"Missing \" + str(result.count) + \"number\")\n\n if response:\n raise AppException(\"\\n\".join(response))", "def acceptable_password(password):\r\n LOG.debug(\"PASS\")\r\n LOG.debug(password)\r\n\r\n if password is not None:\r\n LOG.debug(len(password))\r\n\r\n if password is None:\r\n return False\r\n\r\n if len(password) < 3:\r\n return False\r\n\r\n return True", "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def valid_password(lower, upper, letter, password):\n # Note the -1 to turn 1 indexing into 0 indexing\n matches = [idx for idx in (lower, upper) if password[idx - 1] == letter]\n return len(matches) == 1", "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def invalid_password(password):\n special_characters = ['$', '#', '@']\n password = password.replace(\" \", \"\")\n test_conditions = [\n (len(password) >= 8 and len(password) <= 12),\n (any(x.isupper() for x in password) and any(x.islower()\n for x in password)),\n (any(y in password for y in special_characters)\n and any(y.isdigit() for y in password))\n ]\n if all(condition is True for condition in test_conditions):\n return False\n return True", "def test02_password_numeric(self):\n self.set_complexity(length=0, numeric=4, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n \"1abcd2efghij3\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"1correct2horse3battery4staple\",\n \"1234abc\",\n \"abc1234\",\n \"0000\",\n \"Password1234!___\",\n \"Test1Split2Numerics3In4Password\",\n )\n self.set_passwords(valid)", "def _validatePassword(password):\n\n uppercaseChars = re.search('[A-Z]', password)\n lowercaseChars = re.search('[a-z]', password)\n\n if len(password) < 8:\n raise Exception(\"Password must be at lest 8 letters\")\n elif re.search('[0-9]', password) is None:\n raise Exception(\"Password must contain atleast one number\")\n elif uppercaseChars is None or lowercaseChars is None:\n raise Exception(\"Password must contain upper and lowercase letters\")\n else:\n return password", "def test_less_than_min_length_invalid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MIN_PWD_LEN']) - 1)]))\n\n with self.assertRaises(ValidationError):\n valid_password(None, pass_field)", "def validate_password(password):\n return isinstance(password, str) and len(password) >= 8 and \\\n re.search(r'[A-Z]', password) and re.search(r'[0-9]', password)" ]
[ "0.7635881", "0.7497957", "0.7032502", "0.6873941", "0.679898", "0.67746353", "0.6773938", "0.67620707", "0.6718229", "0.6624808", "0.6562677", "0.65404505", "0.6515814", "0.6507858", "0.643209", "0.6394227", "0.6375082", "0.63566536", "0.6346577", "0.63382757", "0.63098824", "0.63038737", "0.6298486", "0.62941265", "0.6291365", "0.6268107", "0.625951", "0.624491", "0.6242733", "0.62416166" ]
0.82154936
0
Test the is_valid_color method returns correct boolean for valid colors.
def test_is_valid_color(self): self.assertTrue(is_valid_color('black')) self.assertTrue(is_valid_color('#aabb11')) self.assertTrue(is_valid_color('rgba(23,45,67, .5)')) self.assertFalse(is_valid_color('bl(ack'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_valid_rgb_color(self):\n self.assertTrue(is_valid_rgb_color('rgb(12,23,5)'))\n self.assertTrue(is_valid_rgb_color('rgb(12, 223, 225)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1.0)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 0)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .3)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .34521)'))\n\n # invalid cases\n self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 0.5)'))\n self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 5)'))\n self.assertFalse(is_valid_rgb_color('rgb(1234, 223, 225)'))\n self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,.5)'))\n self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,1.1)'))", "def validColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW, EMPTY):\n return False\n else:\n return True", "def _is_color_valid(self, color):\n # make sure it is a tuple\n if type(color).__name__ != 'tuple':\n return False\n # check the length of the tuple\n if len(color) != 3:\n return False\n # verify that component colors are between _MIN and _MAX\n for c in color:\n if c < MIN or c > MAX:\n return False\n return True", "def valid_color(self, color):\n valid = False\n if (isinstance(color, list) and len(color) == 3):\n valid = True\n for chan in color:\n valid = valid and (0 <= chan <= 15)\n if not valid:\n _LOGGER.warn(\"{0} was not a valid color\".format(color))\n return valid", "def test_is_valid_color_name(self):\n self.assertTrue(is_valid_color_name('black'))\n self.assertTrue(is_valid_color_name('red'))\n self.assertFalse(is_valid_color_name('#aabb11'))\n self.assertFalse(is_valid_color_name('bl(ack'))", "def __isValidColor(self, name):\n try:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return QColor.isValidColor(name)\n except AttributeError:\n if name.startswith(\"#\"):\n if len(name) not in [4, 7, 10, 13]:\n return False\n hexCheckStr = name[1:]\n return self.__isHexString(hexCheckStr)\n else:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return name in QColor.colorNames()", "def validPlayerColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW):\n return False\n else:\n return True", "def is_valid_color(value):\n if is_str(value):\n return is_hex_string(value)\n elif is_tuple_or_list(value):\n return (is_tuple_or_list(value)\n and is_three_channeled(value)\n and has_valid_channel_values(value))\n else:\n return is_str_or_coll(value)", "def _validate_color(color):\n if not isinstance(color, (list, tuple)):\n raise ValueError(\"Color has to be list, or tuple\")\n if len(color) != 3:\n raise ValueError(\"Color have to contain exactly 3 values: [R, G, B]\")\n for channel in color:\n validate_channel_value(channel)", "def is_color(color):\n # check if color is\n # 1) the default empty value\n # 2) auto\n # 3) a color name from the 16 color palette\n # 4) a color index from the 256 color palette\n # 5) an HTML-style color code\n if (color in ['', 'auto'] or\n color in COLORS.keys() or\n (color.isdigit() and int(color) >= 0 and int(color) <= 255) or\n (color.startswith('#') and (len(color) in [4, 7, 9]) and\n all(c in '01234567890abcdefABCDEF' for c in color[1:]))):\n return color\n raise VdtValueError(color)", "def isColor(self,color):\n return self.color==color", "def is_rgb_color(v):\n if hasattr(v, \"r\") and hasattr(v, \"g\") and hasattr(v, \"b\"):\n v = [v.r, v.g, v.b]\n if not isiterable(v) or len(v) < 3:\n return False\n try:\n return all([0 <= int(x) <= 255 for x in v[:3]])\n except (TypeError, ValueError):\n return False", "def test_color(self):\n color = pygame.Color(0, 0, 0, 0)\n\n self.assertIsInstance(color, pygame.Color)", "def isRGB(color):\n if not(isinstance(color, list) or isinstance(color, tuple)):\n raise pgUIException(str(color) + ' is not a valid color',\n code = 20)\n if len(color) != 3:\n raise pgUIException(str(color) + ' color has to have three components',\n code = 21)\n if not(isinstance(color[0], int))\\\n or not(isinstance(color[1], int))\\\n or not(isinstance(color[2], int)):\n raise pgUIException(str(color) + ' color components have to be integers',\n code = 23)\n for c in color:\n if c < 0 or c > 255:\n raise pgUIException(str(color) +\n ' color components are to be in between 0 and 255',\n code = 22)\n return True", "def invalid_colour(colour):\n error_message = f\"`{colour}` is not a valid RGB colour\"\n\n if not isinstance(colour, list):\n return error_message\n\n if not all([0 <= component <= 255 for component in colour]):\n return error_message\n\n return False", "def _iscolorstring(self, color):\n try:\n rgb = self.cv.winfo_rgb(color)\n ok = True\n except TK.TclError:\n ok = False\n return ok", "def IsOk(*args, **kwargs):\n return _gdi_.Colour_IsOk(*args, **kwargs)", "def __isValidRgbaColor(self, color):\n rgba = []\n \n parts = color.split(\",\")\n if len(parts) not in [3, 4]:\n return False, []\n \n for part in parts:\n try:\n c = int(part)\n except ValueError:\n return False, []\n \n if c < 0 or c > 255:\n return False, []\n \n rgba.append(c)\n \n return True, rgba", "def print_illegal_color_format_screen( enteredBGColor, enteredFGColor, convertedBGColor, convertedFGColor ):\n print \"\"\n print \"Error: are the passed in colors valid?\"\n print \" - passed in background-color '\" + enteredBGColor + \"' was converted to '\" + convertedBGColor + \"'.\"\n print \" - passed in foreground-color '\" + enteredFGColor + \"' was converted to '\" + convertedFGColor + \"'.\"\n print \"\"", "def colorOK(colorStr):\n tkWdg = _getTkWdg()\n\n try:\n tkWdg.winfo_rgb(colorStr)\n except tkinter.TclError:\n return False\n return True", "def is_color(s):\n def in_range(i): return 0 <= i <= int('0xFFFFFF', 0)\n\n try:\n if type(s) == int:\n return in_range(s)\n elif type(s) not in (str, bytes):\n return False\n elif s in webcolors.css3_names_to_hex:\n return True\n elif s[0] == '#':\n return in_range(int('0x' + s[1:], 0))\n elif s[0:2] == '0x':\n return in_range(int(s, 0))\n elif len(s) == 6:\n return in_range(int('0x' + s, 0))\n except ValueError:\n return False", "def test_color(self):\n self._calibration_test(\"color_full\")", "def is_red(self):\n return \"red\" == self.color", "def is_valid_hair_color(hair_color: str) -> bool:\n return re.match(r'^#[a-f|0-9]{5}', hair_color)", "def CheckProperColoring(G):\r\n coloring_proper = True\r\n\r\n for vertex in G._color:\r\n #print('Vertex',vertex)\r\n #print('G._color',G._color[vertex])\r\n #print('G._adj[vertex]', G._adj[vertex])\r\n for adj_vertex in G._adj[vertex]:\r\n if G._color[vertex] == G._color[adj_vertex]:\r\n coloring_proper = False\r\n #end\r\n #end\r\n #end\r\n\r\n return coloring_proper", "def has_valid_channel_values(rgb_coll):\n return all([is_0to255(c) and is_int(c) for c in rgb_coll])", "def is_color(self, color: ColorLike) -> bool:\n\n if isinstance(color, Color):\n return self.color == color\n elif isinstance(color, str):\n return str(self.color) == color\n elif isinstance(color, int):\n return int(self.color) == color\n return False", "def test_color__rgba_int_args_invalid_value(self):\n self.assertRaises(ValueError, pygame.Color, 257, 10, 105, 44)\n self.assertRaises(ValueError, pygame.Color, 10, 257, 105, 44)\n self.assertRaises(ValueError, pygame.Color, 10, 105, 257, 44)\n self.assertRaises(ValueError, pygame.Color, 10, 105, 44, 257)", "def valid_image(self, image):\n valid = False\n if (isinstance(image, list) and len(image) == 11):\n valid = True\n for row in image:\n if (isinstance(row, list) and len(row) == 11):\n for pixel in row:\n if not self.valid_color(pixel):\n valid = False\n break\n else:\n valid = False\n break\n if not valid:\n _LOGGER.error(\"Invalid image data received\")\n return valid", "def test_is_valid_hex(self):\n self.assertTrue(is_valid_hex('#aabb11'))\n self.assertTrue(is_valid_hex('#000'))\n self.assertTrue(is_valid_hex('#aaa'))\n self.assertFalse(is_valid_hex('black'))\n self.assertFalse(is_valid_hex('bl(ack'))" ]
[ "0.82800925", "0.8025948", "0.7942473", "0.78476757", "0.76929873", "0.7682319", "0.76152545", "0.7574005", "0.7034168", "0.6930264", "0.69240725", "0.68971056", "0.68955773", "0.68178314", "0.68153834", "0.68081856", "0.67790467", "0.676682", "0.67347234", "0.6709806", "0.6688668", "0.66696703", "0.6669528", "0.66413695", "0.6635767", "0.65985733", "0.6576507", "0.6552019", "0.6550531", "0.65474826" ]
0.85718346
0
Test the is_valid_hex method returns correct boolean for valid hex values.
def test_is_valid_hex(self): self.assertTrue(is_valid_hex('#aabb11')) self.assertTrue(is_valid_hex('#000')) self.assertTrue(is_valid_hex('#aaa')) self.assertFalse(is_valid_hex('black')) self.assertFalse(is_valid_hex('bl(ack'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_valid_hex(hex_code: str) -> bool:\n\n match = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', hex_code)\n\n if match:\n return True\n else:\n return False", "def ishex(data: str) -> bool:\n return bool(re.fullmatch(r\"^0[x|X][0-9a-fA-F]+\", data)) or bool(re.fullmatch(r\"^[0-9a-fA-F]+[h|H]$\", data))", "def is_hex(s): \n # if it can be converted to a base 16 int then it is hex\n try:\n int(s, 16)\n return True\n \n except ValueError:\n # it could not be converted therefore is not hex\n return False\n # end try", "def is_hex(n):\n hex_test = (1 + sqrt(1 + 8*n))/4\n if hex_test == int(hex_test):\n return True\n return False", "def __isHexString(self, text):\n return all(map(lambda c: c in \"0123456789abcdefABCDEF\", text))", "def comparehex(hex1: str, hex2: str) -> bool:\n if int(str(hex1), 16) == int(str(hex2), 16):\n return True\n return False", "def ishex(char: chr) -> bool:\n return char.isdigit() or char in \"abcdef\"", "def validate_hash(h):\n if len(h) not in (32, 40, 64, 128):\n return False\n\n return bool(re.match(\"[0-9a-fA-F]*$\", h))", "def isHex(string, needHexPrefix):\n return (True)", "def is_hex_bytes(hex_string):\n try:\n bytearray.fromhex(hex_string)\n except ValueError:\n return False\n\n return True", "def is_hex_color(color, verbose='info'):\n # Set the logger\n set_logger(verbose=verbose)\n\n if not isinstance(color, str):\n logger.info('Hex [%s] should be of type string' %(str(color)))\n\n return False\n\n if color.startswith('#'):\n color = color[1:]\n else:\n logger.info('Hex [%s] should start with \"#\"' %(str(color)))\n return False\n\n if len(color) != 6:\n logger.info('Hex [%s] should be of length 7 incl \"#\"' %(str(color)))\n return False\n\n try:\n int(color, 16)\n return True\n except ValueError:\n return False", "def test_hex_helpers(self, number, expected):\n self.assertEqual(positional.from_hex(expected), number)\n self.assertEqual(positional.to_hex(number), expected)", "def test_save_hex_no_hex():\n with pytest.raises(ValueError) as ex:\n uflash.save_hex('', 'foo')\n assert ex.value.args[0] == 'Cannot flash an empty .hex file.'", "def test_color__hex_str_arg(self):\n # See test_webstyle() for related tests.\n color = pygame.Color(\"0x1a2B3c4D\")\n\n self.assertEqual(color.r, 0x1A)\n self.assertEqual(color.g, 0x2B)\n self.assertEqual(color.b, 0x3C)\n self.assertEqual(color.a, 0x4D)", "def test_extract_not_valid_hex():\n assert uflash.extract_script('invalid input') == ''", "def valid_for(obj):\n\n if not obj.filedata:\n return False\n\n #hexstring = \"cffaedfe07000001030000800200\"\n return True", "def test_bytes_to_pretty_hexinvalid_data():\n try:\n cmds._bytes_to_pretty_hex(data=[1, 2, 3, 4, \"500\"])\n except Exception:\n # The exception that bubbles up from IntelHex is implementation detail\n # from that library, so it could be anything\n assert True, \"Exception raised\"\n else:\n raise AssertionError(\"Exception NOT raised\")", "def test_toHex(self):\r\n self.assertEqual(self.black.toHex(), '#000000')\r\n self.assertEqual(self.red.toHex(), '#ff0000')\r\n self.assertEqual(self.pink.toHex(), '#640000')", "def ascii_hexchar(s: str) -> bool:\n return frozenset(s).issubset(_ascii_h)", "def _validate_hash(data, shasum):\n from hashlib import sha1\n digest = sha1(data).hexdigest()\n if digest == shasum:\n return True\n else:\n print('Invalid shasum, got: {} , expected: {}'.format(digest, shasum))\n return False", "def test_is_valid_color(self):\n self.assertTrue(is_valid_color('black'))\n self.assertTrue(is_valid_color('#aabb11'))\n self.assertTrue(is_valid_color('rgba(23,45,67, .5)'))\n self.assertFalse(is_valid_color('bl(ack'))", "def test_is_valid(self, address):\n self.test_string(address)\n self.test_alnum(address)", "def test_hex_bytes_empty():\n assert hex_bytes(bytes([]), truncate=1) == '(0 bytes)'\n assert hex_bytes(bytes([]), truncate=0) == '(0 bytes)'", "def test_int_to_hex():\n hex_values = ['61', '62', '63', '64', '65', '66', '67', '68', '69', '6a', '6b', '6c', '6d', '6e', '6f',\n '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '7a', '7b', '7c', '7d', '7e', '7f']\n index = 0\n for x in range(97, 123):\n assert pi_finder.int_to_hex(x, hex_dict) == hex_values[index]\n index += 1", "def is_hashed_base58_valid(base58):\n try:\n a2b_hashed_base58(base58)\n except EncodingError:\n return False\n return True", "def test_bytes_to_intel_hex_invalid_data():\n data = [1, 2, 3, 4, \"500\"]\n\n try:\n cmds._bytes_to_intel_hex(data=data)\n except Exception:\n # The exception that bubbles up from IntelHex is implementation detail\n # from that library, so it could be anything\n assert True, \"Exception raised\"\n else:\n raise AssertionError(\"Exception NOT raised\")", "def is_valid_address(address) -> bool:\n if not address.startswith('one1'):\n return False\n hrp, _ = bech32_decode(address)\n if not hrp:\n return False\n return True", "def test_hex():\n assert hex(Quantity(1, unit('m'))) == hex(1)", "def test_invalid_luhn(self):\n assert luhn_checksum(\"79927398714\") != 0", "def __set_has_hexadecimal(text=str):\n reg_ex = constants.HEXADECIMAL_REG_EX_PATTERN\n if reg_ex.search(text) is None:\n return text\n return reg_ex.sub(constants.QUESTION_HAS_HEXADECIMAL_KEY, text)" ]
[ "0.788637", "0.7450202", "0.7236668", "0.72257215", "0.6911687", "0.6770873", "0.67440605", "0.66821134", "0.66669804", "0.6646565", "0.6417729", "0.6403952", "0.6289003", "0.6203054", "0.6192361", "0.61916524", "0.613651", "0.6113249", "0.6108503", "0.60962427", "0.60597426", "0.60060316", "0.5962146", "0.59475464", "0.59234226", "0.5914586", "0.58622736", "0.5830874", "0.58216894", "0.5818839" ]
0.88187057
0
Test the is_valid_color_name method returns correct boolean for valid color names.
def test_is_valid_color_name(self): self.assertTrue(is_valid_color_name('black')) self.assertTrue(is_valid_color_name('red')) self.assertFalse(is_valid_color_name('#aabb11')) self.assertFalse(is_valid_color_name('bl(ack'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __isValidColor(self, name):\n try:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return QColor.isValidColor(name)\n except AttributeError:\n if name.startswith(\"#\"):\n if len(name) not in [4, 7, 10, 13]:\n return False\n hexCheckStr = name[1:]\n return self.__isHexString(hexCheckStr)\n else:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return name in QColor.colorNames()", "def test_is_valid_color(self):\n self.assertTrue(is_valid_color('black'))\n self.assertTrue(is_valid_color('#aabb11'))\n self.assertTrue(is_valid_color('rgba(23,45,67, .5)'))\n self.assertFalse(is_valid_color('bl(ack'))", "def test_is_valid_rgb_color(self):\n self.assertTrue(is_valid_rgb_color('rgb(12,23,5)'))\n self.assertTrue(is_valid_rgb_color('rgb(12, 223, 225)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1.0)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 0)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .3)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .34521)'))\n\n # invalid cases\n self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 0.5)'))\n self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 5)'))\n self.assertFalse(is_valid_rgb_color('rgb(1234, 223, 225)'))\n self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,.5)'))\n self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,1.1)'))", "def test_color__name_str_arg(self):\n for name in (\"aquamarine3\", \"AQUAMARINE3\", \"AqUAmArIne3\"):\n color = pygame.Color(name)\n\n self.assertEqual(color.r, 102)\n self.assertEqual(color.g, 205)\n self.assertEqual(color.b, 170)\n self.assertEqual(color.a, 255)", "def validColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW, EMPTY):\n return False\n else:\n return True", "def _iscolorstring(self, color):\n try:\n rgb = self.cv.winfo_rgb(color)\n ok = True\n except TK.TclError:\n ok = False\n return ok", "def the_name_should_not_reflect_in_the_state_of_the_device(color):\n assert (web_app.check_value_in_state(\"color\",color),False)", "def is_valid_hair_color(hair_color: str) -> bool:\n return re.match(r'^#[a-f|0-9]{5}', hair_color)", "def is_color(s):\n def in_range(i): return 0 <= i <= int('0xFFFFFF', 0)\n\n try:\n if type(s) == int:\n return in_range(s)\n elif type(s) not in (str, bytes):\n return False\n elif s in webcolors.css3_names_to_hex:\n return True\n elif s[0] == '#':\n return in_range(int('0x' + s[1:], 0))\n elif s[0:2] == '0x':\n return in_range(int(s, 0))\n elif len(s) == 6:\n return in_range(int('0x' + s, 0))\n except ValueError:\n return False", "def the_name_should_reflect_in_the_state_of_the_device(color):\n assert web_app.check_value_in_state(\"color\",color)", "def is_valid_color(value):\n if is_str(value):\n return is_hex_string(value)\n elif is_tuple_or_list(value):\n return (is_tuple_or_list(value)\n and is_three_channeled(value)\n and has_valid_channel_values(value))\n else:\n return is_str_or_coll(value)", "def validPlayerColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW):\n return False\n else:\n return True", "def EyeColorTest(str):\n\n\tvalidcolors = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\treturn str in validcolors", "def _is_color_valid(self, color):\n # make sure it is a tuple\n if type(color).__name__ != 'tuple':\n return False\n # check the length of the tuple\n if len(color) != 3:\n return False\n # verify that component colors are between _MIN and _MAX\n for c in color:\n if c < MIN or c > MAX:\n return False\n return True", "def validname(name):\r\n return len(name)>0 and (\r\n Context.__invalid_character.search(name) is None)", "def valid_color(self, color):\n valid = False\n if (isinstance(color, list) and len(color) == 3):\n valid = True\n for chan in color:\n valid = valid and (0 <= chan <= 15)\n if not valid:\n _LOGGER.warn(\"{0} was not a valid color\".format(color))\n return valid", "def is_color(color):\n # check if color is\n # 1) the default empty value\n # 2) auto\n # 3) a color name from the 16 color palette\n # 4) a color index from the 256 color palette\n # 5) an HTML-style color code\n if (color in ['', 'auto'] or\n color in COLORS.keys() or\n (color.isdigit() and int(color) >= 0 and int(color) <= 255) or\n (color.startswith('#') and (len(color) in [4, 7, 9]) and\n all(c in '01234567890abcdefABCDEF' for c in color[1:]))):\n return color\n raise VdtValueError(color)", "def is_valid(name):\n return bool(name)", "def check_dog_color(dog):\n colors = [\"White\", \"Black\", \"Brown\", \"Sable\", \"Gray\", \"Fawn\", \"Cream\"]\n\n if isinstance(dog.color, str):\n if dog.color not in colors:\n raise InvalidColorError(\"Dog color is not in the accepted list of colors\")\n else:\n raise NotStringError(\"Dog color entered is not a string\")", "def test_name(self):\n\n for name in TEST_NAMES:\n self.colorspace.setName(name)\n self.assertEqual(name, self.colorspace.getName())", "def is_valid_eye_color(eye_color: str) -> str:\n return eye_color in [\"amb\", \"blu\", \"brn\", \"gry\", \"grn\", \"hzl\", \"oth\"]", "def check_gs_name(self, name):\n if name in self.currentconfig.list_greyscales():\n QtWidgets.QMessageBox.warning(self, \"Name error\", \"Greyscale name\" + name + \" clashes with existing one\")\n return True\n return False", "def colorOK(colorStr):\n tkWdg = _getTkWdg()\n\n try:\n tkWdg.winfo_rgb(colorStr)\n except tkinter.TclError:\n return False\n return True", "def test_color__html_str_arg(self):\n # See test_webstyle() for related tests.\n color = pygame.Color(\"#a1B2c3D4\")\n\n self.assertEqual(color.r, 0xA1)\n self.assertEqual(color.g, 0xB2)\n self.assertEqual(color.b, 0xC3)\n self.assertEqual(color.a, 0xD4)", "def name_valid(name):\n return name.isalpha()", "def test_color__hex_str_arg(self):\n # See test_webstyle() for related tests.\n color = pygame.Color(\"0x1a2B3c4D\")\n\n self.assertEqual(color.r, 0x1A)\n self.assertEqual(color.g, 0x2B)\n self.assertEqual(color.b, 0x3C)\n self.assertEqual(color.a, 0x4D)", "def check_colormap(cmap):\n names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',\n 'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',\n 'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd',\n 'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning'])\n if cmap not in names:\n raise Exception(\"Invalid cmap '%s', must be one of %s\" % (cmap, names))\n else:\n return cmap", "def validate_hair_color(passport: map) -> bool:\n if passport.get('hcl'):\n regex = re.compile('#[0-9a-f]{6}')\n match = regex.match(passport['hcl'])\n return bool(match)\n\n return False", "def test_color__name_str_arg_from_colordict(self):\n for name, values in THECOLORS.items():\n color = pygame.Color(name)\n\n self.assertEqual(color.r, values[0])\n self.assertEqual(color.g, values[1])\n self.assertEqual(color.b, values[2])\n self.assertEqual(color.a, values[3])", "def is_dev_name_valid(self):\n return self._name_re.match(self.dev_name) is not None" ]
[ "0.83200604", "0.81965154", "0.7396032", "0.73364866", "0.7164896", "0.6983881", "0.6934124", "0.6925244", "0.6899707", "0.6870832", "0.68350774", "0.6824639", "0.6812794", "0.6721971", "0.6672961", "0.6653183", "0.66404563", "0.66331595", "0.6629673", "0.655786", "0.65213215", "0.64724964", "0.6468047", "0.6447766", "0.63809776", "0.6322157", "0.630576", "0.629333", "0.62895733", "0.62833184" ]
0.9264244
0
Test the is_valid_rgb_color method returns the correct boolean for valid rgb and rgba colors.
def test_is_valid_rgb_color(self): self.assertTrue(is_valid_rgb_color('rgb(12,23,5)')) self.assertTrue(is_valid_rgb_color('rgb(12, 223, 225)')) self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1)')) self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1.0)')) self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 0)')) self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .3)')) self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .34521)')) # invalid cases self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 0.5)')) self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 5)')) self.assertFalse(is_valid_rgb_color('rgb(1234, 223, 225)')) self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,.5)')) self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,1.1)'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_valid_color(self):\n self.assertTrue(is_valid_color('black'))\n self.assertTrue(is_valid_color('#aabb11'))\n self.assertTrue(is_valid_color('rgba(23,45,67, .5)'))\n self.assertFalse(is_valid_color('bl(ack'))", "def is_rgb_color(v):\n if hasattr(v, \"r\") and hasattr(v, \"g\") and hasattr(v, \"b\"):\n v = [v.r, v.g, v.b]\n if not isiterable(v) or len(v) < 3:\n return False\n try:\n return all([0 <= int(x) <= 255 for x in v[:3]])\n except (TypeError, ValueError):\n return False", "def __isValidRgbaColor(self, color):\n rgba = []\n \n parts = color.split(\",\")\n if len(parts) not in [3, 4]:\n return False, []\n \n for part in parts:\n try:\n c = int(part)\n except ValueError:\n return False, []\n \n if c < 0 or c > 255:\n return False, []\n \n rgba.append(c)\n \n return True, rgba", "def is_rgba_color(v):\n rgb = is_rgb_color(v)\n if not rgb:\n return False\n\n try:\n if hasattr(v, \"a\") and 0 <= int(v.a) <= 255:\n return True\n if len(v) >= 4 and 0 <= int(v[3]) <= 255:\n return True\n return False\n except (TypeError, ValueError):\n return False", "def isRGB(color):\n try:\n if color[0:4] != 'rgb(':\n return False\n if color[-1:] != ')':\n return False\n if len(color[4:-1].split(',')) != 3:\n return False\n for i in color[4:-1].split(','):\n if i.replace(' ', '').isdigit() == False:\n return False\n if int(i.replace(' ', '')) < 0 or int(i.replace(' ', '')) > 255:\n return False\n return True\n except TypeError:\n return False", "def isRGB(color):\n if not(isinstance(color, list) or isinstance(color, tuple)):\n raise pgUIException(str(color) + ' is not a valid color',\n code = 20)\n if len(color) != 3:\n raise pgUIException(str(color) + ' color has to have three components',\n code = 21)\n if not(isinstance(color[0], int))\\\n or not(isinstance(color[1], int))\\\n or not(isinstance(color[2], int)):\n raise pgUIException(str(color) + ' color components have to be integers',\n code = 23)\n for c in color:\n if c < 0 or c > 255:\n raise pgUIException(str(color) +\n ' color components are to be in between 0 and 255',\n code = 22)\n return True", "def _is_color_valid(self, color):\n # make sure it is a tuple\n if type(color).__name__ != 'tuple':\n return False\n # check the length of the tuple\n if len(color) != 3:\n return False\n # verify that component colors are between _MIN and _MAX\n for c in color:\n if c < MIN or c > MAX:\n return False\n return True", "def validColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW, EMPTY):\n return False\n else:\n return True", "def __isValidColor(self, name):\n try:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return QColor.isValidColor(name)\n except AttributeError:\n if name.startswith(\"#\"):\n if len(name) not in [4, 7, 10, 13]:\n return False\n hexCheckStr = name[1:]\n return self.__isHexString(hexCheckStr)\n else:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return name in QColor.colorNames()", "def valid_color(self, color):\n valid = False\n if (isinstance(color, list) and len(color) == 3):\n valid = True\n for chan in color:\n valid = valid and (0 <= chan <= 15)\n if not valid:\n _LOGGER.warn(\"{0} was not a valid color\".format(color))\n return valid", "def is_valid_color(value):\n if is_str(value):\n return is_hex_string(value)\n elif is_tuple_or_list(value):\n return (is_tuple_or_list(value)\n and is_three_channeled(value)\n and has_valid_channel_values(value))\n else:\n return is_str_or_coll(value)", "def validPlayerColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW):\n return False\n else:\n return True", "def has_valid_channel_values(rgb_coll):\n return all([is_0to255(c) and is_int(c) for c in rgb_coll])", "def _validate_color(color):\n if not isinstance(color, (list, tuple)):\n raise ValueError(\"Color has to be list, or tuple\")\n if len(color) != 3:\n raise ValueError(\"Color have to contain exactly 3 values: [R, G, B]\")\n for channel in color:\n validate_channel_value(channel)", "def test_color__rgba_int_args(self):\n color = pygame.Color(10, 20, 30, 40)\n\n self.assertEqual(color.r, 10)\n self.assertEqual(color.g, 20)\n self.assertEqual(color.b, 30)\n self.assertEqual(color.a, 40)", "def test_color__rgba_int_args_without_alpha(self):\n color = pygame.Color(10, 20, 30)\n\n self.assertEqual(color.r, 10)\n self.assertEqual(color.g, 20)\n self.assertEqual(color.b, 30)\n self.assertEqual(color.a, 255)", "def constrain_rgb(rgb: ndarray) -> bool:\n w = - min(0, *rgb) # Amount of white needed\n if w > 0:\n rgb += w # Add just enough white to make r, g, b all positive\n return True # Colour modified to fit RGB gamut\n return False # Colour within RGB gamut", "def test_color__rgba_int_args_invalid_value_without_alpha(self):\n self.assertRaises(ValueError, pygame.Color, 256, 10, 105)\n self.assertRaises(ValueError, pygame.Color, 10, 256, 105)\n self.assertRaises(ValueError, pygame.Color, 10, 105, 256)", "def is_rgb(img: np.ndarray) -> bool:\n\n return len(img.shape) >= 1 and img.shape[-1] == 3", "def test_conversion_through_rgb(self):\r\n\r\n xyz = convert_color(self.color, XYZColor)\r\n hsl = convert_color(xyz, HSLColor, through_rgb_type=AdobeRGBColor)\r\n # Notice how we don't have to pass through_rgb_type explicitly.\r\n xyz2 = convert_color(hsl, XYZColor)\r\n self.assertColorMatch(xyz, xyz2)", "def test_color__rgba_int_args_invalid_value(self):\n self.assertRaises(ValueError, pygame.Color, 257, 10, 105, 44)\n self.assertRaises(ValueError, pygame.Color, 10, 257, 105, 44)\n self.assertRaises(ValueError, pygame.Color, 10, 105, 257, 44)\n self.assertRaises(ValueError, pygame.Color, 10, 105, 44, 257)", "def test_is_valid_color_name(self):\n self.assertTrue(is_valid_color_name('black'))\n self.assertTrue(is_valid_color_name('red'))\n self.assertFalse(is_valid_color_name('#aabb11'))\n self.assertFalse(is_valid_color_name('bl(ack'))", "def invalid_colour(colour):\n error_message = f\"`{colour}` is not a valid RGB colour\"\n\n if not isinstance(colour, list):\n return error_message\n\n if not all([0 <= component <= 255 for component in colour]):\n return error_message\n\n return False", "def validate_shape_and_dtype(self):\n if self.rgb.dtype != tf.float32:\n raise ValueError(\"Expected float32 rgb!\")\n if len(self.rgb.shape) != 4:\n raise ValueError(f\"Expected (B, H, W, C), got {self.rgb.shape}\")\n _, _, _, channels = self.rgb.shape.as_list()\n if channels != 3:\n raise ValueError(f\"Expected 3 rgb channels, got shape {self.rgb.shape}\")", "def is_color(color):\n # check if color is\n # 1) the default empty value\n # 2) auto\n # 3) a color name from the 16 color palette\n # 4) a color index from the 256 color palette\n # 5) an HTML-style color code\n if (color in ['', 'auto'] or\n color in COLORS.keys() or\n (color.isdigit() and int(color) >= 0 and int(color) <= 255) or\n (color.startswith('#') and (len(color) in [4, 7, 9]) and\n all(c in '01234567890abcdefABCDEF' for c in color[1:]))):\n return color\n raise VdtValueError(color)", "def is_rgb(im):\n if(im.ndim == 3):\n return True\n else:\n return False", "def _verify_rgba_value(self, val):\n e = \"All RGBA color values must be integers between 0 and 255 (got {0})\"\n try:\n float(val)\n except (ValueError, TypeError):\n raise TypeError(e.format(val))\n if val < 0 or val > 255:\n raise ValueError(e.format(val))", "def valid_color_tuple(rgb_tuple, fix=False) -> (bool, tuple):\n if not isinstance(rgb_tuple, tuple):\n raise ValueError(\"valid_color_tuple(rgb_tuple) must be type(tuple)\")\n\n elif len(rgb_tuple) < 3 or len(rgb_tuple) > 4:\n raise ValueError(\n \"valid_color_tuple(rgb_tuple) should contain values for (R,G,B, or R,G,B,A)\")\n\n valid = True\n rgb_list = list(rgb_tuple)\n for i in range(len(rgb_list)):\n c = rgb_list[i]\n if not isinstance(c, int):\n raise ValueError(f\"A non-int value was passed as a color value. Received: {c}\")\n if c > 255 or c < 0:\n valid = False\n if fix:\n rgb_list[i] = 255 if c > 255 else 0\n\n if valid:\n return True, tuple(rgb_list)\n else:\n return False, tuple(rgb_list)", "def _iscolorstring(self, color):\n try:\n rgb = self.cv.winfo_rgb(color)\n ok = True\n except TK.TclError:\n ok = False\n return ok", "def is_RGB(self,img_path):\n image=Image.open(img_path)\n image=np.asarray(image)\n if(len(image.shape)<3):\n return False\n return True" ]
[ "0.77912307", "0.77550155", "0.75135416", "0.73164743", "0.72870094", "0.72755575", "0.72710174", "0.7164871", "0.7113623", "0.70725626", "0.6977921", "0.6952593", "0.67974997", "0.6797254", "0.67732257", "0.6749611", "0.6695688", "0.6689227", "0.6681818", "0.66803116", "0.66342443", "0.6619944", "0.66014254", "0.6478207", "0.64068294", "0.6388932", "0.6385347", "0.6314828", "0.6281298", "0.6279357" ]
0.8792777
0
Cast all args to a common type using numpy promotion logic
def numeric_normalize_types(*args): dtype = np.result_type(*[a.dtype for a in args]) return [a.astype(dtype) for a in args]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _cast_types(args):\n\targs.x_val = None if args.x_val == 'None' else int(args.x_val)\n\targs.test_size = float(args.test_size)\n\targs.alpha = float(args.alpha)\n\targs.fit_prior = (args.fit_prior in ['True', \"True\", 'true', \"true\"])\n\n\t# class_prior - array like type (problem to convert)\n\tif args.class_prior == \"None\" or args.class_prior == 'None':\n\t\targs.class_prior = None\n\n\t# --------- #\n\treturn args", "def _numpy_to_builtin(obj):\n if isinstance(obj, np.generic) and np.isscalar(obj):\n return np.asscalar(obj)\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n if isinstance(obj, dict):\n for key in obj.keys():\n value = obj[key]\n obj[key] = _numpy_to_builtin(value)\n return obj\n if isinstance(obj, (list, tuple)):\n return [_numpy_to_builtin(o) for o in obj]\n else:\n return obj", "def test_cast_array(self):\n dim = Real(\"yolo\", \"uniform\", -3, 4)\n assert np.all(dim.cast(np.array([\"1\", \"2\"])) == np.array([1.0, 2.0]))", "def test_upcast(self):\r\n if config.cast_policy == 'custom':\r\n assert arange(iscalar()).dtype == iscalar().dtype\r\n assert arange(fscalar()).dtype == fscalar().dtype\r\n assert arange(dscalar()).dtype == dscalar().dtype\r\n\r\n # int32 + float32 -> float64\r\n assert arange(iscalar(), fscalar()).dtype == dscalar().dtype\r\n assert arange(iscalar(), dscalar()).dtype == dscalar().dtype\r\n assert arange(fscalar(), dscalar()).dtype == dscalar().dtype\r\n\r\n assert arange(iscalar(), fscalar(), dscalar()).dtype == \\\r\n dscalar().dtype\r\n elif config.cast_policy in ('numpy', 'numpy+floatX'):\r\n for dtype in get_numeric_types():\r\n # Test with a single argument.\r\n arange_dtype = arange(scalar(dtype=str(dtype))).dtype\r\n numpy_dtype = numpy.arange(numpy.array(1, dtype=dtype)).dtype\r\n if (dtype != 'float64' and\r\n numpy_dtype == 'float64' and\r\n config.cast_policy == 'numpy+floatX' and\r\n config.floatX == 'float32'):\r\n # We want a float32 arange.\r\n assert arange_dtype == 'float32'\r\n else:\r\n # Follow numpy.\r\n assert arange_dtype == numpy_dtype\r\n\r\n # Test with two arguments.\r\n for stop_dtype in get_numeric_types():\r\n arange_dtype = arange(\r\n start=scalar(dtype=str(dtype)),\r\n stop=scalar(dtype=str(stop_dtype))).dtype\r\n numpy_dtype = numpy.arange(\r\n start=numpy.array(0, dtype=dtype),\r\n stop=numpy.array(1, dtype=stop_dtype)).dtype\r\n if (dtype != 'float64' and\r\n stop_dtype != 'float64' and\r\n numpy_dtype == 'float64' and\r\n config.cast_policy == 'numpy+floatX' and\r\n config.floatX == 'float32'):\r\n # We want a float32 arange.\r\n assert arange_dtype == 'float32'\r\n else:\r\n # Follow numpy.\r\n assert arange_dtype == numpy_dtype\r\n\r\n # Test with three arguments.\r\n for step_dtype in get_numeric_types():\r\n arange_dtype = arange(\r\n start=scalar(dtype=str(dtype)),\r\n stop=scalar(dtype=str(stop_dtype)),\r\n step=scalar(dtype=str(step_dtype))).dtype\r\n numpy_dtype = numpy.arange(\r\n start=numpy.array(0, dtype=dtype),\r\n stop=numpy.array(1, dtype=stop_dtype),\r\n step=numpy.array(1, dtype=step_dtype)).dtype\r\n if (dtype != 'float64' and\r\n stop_dtype != 'float64' and\r\n step_dtype != 'float64' and\r\n numpy_dtype == 'float64' and\r\n config.cast_policy == 'numpy+floatX' and\r\n config.floatX == 'float32'):\r\n # We want a float32 arange.\r\n assert arange_dtype == 'float32'\r\n else:\r\n # Follow numpy.\r\n assert arange_dtype == numpy_dtype\r\n else:\r\n raise NotImplementedError(config.cast_policy)", "def test_cast_array(self):\n dim = Integer(\"yolo\", \"uniform\", -3, 5)\n assert np.all(dim.cast(np.array([\"1\", \"2\"])) == np.array([1, 2]))", "def __convert_dtype(self, x, y):\n return x.astype(self.dtype), y.astype(self.dtype)", "def convertToPrimitiveArray(objectArray: typing.List[typing.Any]) -> typing.Any:\n ...", "def upcast(fn):\n\n def upcasted_fn(a, b):\n if a.dtype == b.dtype:\n return fn(a, b)\n else:\n common = common_type(a, b)\n return fn(a.astype(common), b.astype(common))\n\n return upcasted_fn", "def pack(*args):\n result = np.empty(len(args), dtype=object)\n for i, arg in enumerate(args):\n result[i] = arg\n return result", "def promote_shapes(*args):\n if len(args) < 2:\n return args\n else:\n shapes = [jnp.shape(arg) for arg in args]\n batch_shape = lax.broadcast_shapes(*shapes)\n num_dims = len(batch_shape)\n return [\n jnp.reshape(arg, (1,) * (num_dims - len(s)) + s)\n if len(s) < num_dims\n else arg\n for arg, s in zip(args, shapes)\n ]", "def cast(*args):\n return _ITKCostFunctionsPython.itkMultipleValuedCostFunction_cast(*args)", "def _align_np_datatype_for_array(array):\n return np.asarray(array, _numpy_datatype_from_nd4j_context())", "def test_reduce_default_dtype(self):\r\n # We try multiple axis combinations even though axis should not matter.\r\n for method in self.methods:\r\n for idx, dtype in enumerate(imap(str, theano.scalar.all_types)):\r\n axis = self.axes[idx % len(self.axes)]\r\n x = tensor.matrix(dtype=dtype)\r\n s = getattr(x, method)(axis=axis)\r\n assert s.dtype == dict(\r\n int8='int64',\r\n int16='int64',\r\n int32='int64',\r\n uint8='uint64',\r\n uint16='uint64',\r\n uint32='uint64',\r\n ).get(dtype, dtype)\r\n f = theano.function([x], s, mode=self.mode)\r\n topo = f.maker.fgraph.toposort()\r\n assert [n for n in topo if isinstance(n.op, self.op)], (topo, dtype)\r\n data = numpy.random.rand(3, 4) * 10\r\n data = data.astype(dtype)\r\n f(data)", "def cast(*args):\n return _ITKCostFunctionsPython.itkShapePriorMAPCostFunctionBaseID2D_cast(*args)", "def normalize_value(value: Any) -> Optional[Union[np.ndarray, List[Any], Tuple[Any]]]:\n if value is None:\n # Exclude None from falling through to blanket np.asarray conversion.\n return value\n\n if isinstance(value, (list, tuple, dict)):\n return value\n\n array = np.asarray(value)\n # TODO(#5359): Move into the function abi.\n if isinstance(value, (bool, int, float)):\n # Manually convert ints and floats to 32 bits.\n if array.dtype == np.float64:\n array = array.astype(np.float32)\n elif array.dtype == np.int64:\n array = array.astype(np.int32)\n\n return array", "def itkMultipleValuedCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkMultipleValuedCostFunction_cast(*args)", "def __call__(self, *args):\n return self.dtype(*args)", "def convert(self, *a, **kw):\n assert False,'Unimplemented'", "def cast(*args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_cast(*args)", "def check_type(a, b):\n\n if isinstance(a, np.ndarray):\n a = np.array(a, dtype=\"uint8\")\n if isinstance(b, np.ndarray):\n b = np.array(b, dtype=\"uint8\")\n\n if a.dtype != \"uint8\":\n a = a.astype(\"uint8\")\n\n if b.dtype != \"uint8\":\n b = b.astype(\"uint8\")\n\n return a, b", "def test_cast_array_multidim(self):\n categories = list(range(10))\n categories[0] = \"asdfa\"\n categories[2] = \"lalala\"\n dim = Categorical(\"yolo\", categories, shape=2)\n sample = np.array([\"asdfa\", \"1\"], dtype=object)\n assert np.all(dim.cast(sample) == np.array([\"asdfa\", 1], dtype=object))", "def test_as_float_array():\n X = np.ones((3, 10), dtype=np.int32)\n X = X + np.arange(10, dtype=np.int32)\n # Checks that the return type is ok\n X2 = as_float_array(X, copy=False)\n np.testing.assert_equal(X2.dtype, np.float32)\n # Another test\n X = X.astype(np.int64)\n X2 = as_float_array(X, copy=True)\n # Checking that the array wasn't overwritten\n assert as_float_array(X, False) is not X\n # Checking that the new type is ok\n np.testing.assert_equal(X2.dtype, np.float64)\n # Here, X is of the right type, it shouldn't be modified\n X = np.ones((3, 2), dtype=np.float32)\n assert as_float_array(X, copy=False) is X", "def __array_function__(self, func, types, args, kwargs):\n try:\n if not func.__module__.startswith(\"numpy\"):\n return NotImplemented\n except AttributeError:\n return NotImplemented\n _args = list(map(MetaTensor._convert, args))\n _kwargs = {k: MetaTensor._convert(v) for k, v in kwargs.items()}\n return func(*_args, **_kwargs)", "def _possibly_convert_objects(values, convert_dates=True, convert_numeric=True,\n convert_timedeltas=True, copy=True):\n\n # if we have passed in a list or scalar\n if isinstance(values, (list, tuple)):\n values = np.array(values, dtype=np.object_)\n if not hasattr(values, 'dtype'):\n values = np.array([values], dtype=np.object_)\n\n # convert dates\n if convert_dates and values.dtype == np.object_:\n\n # we take an aggressive stance and convert to datetime64[ns]\n if convert_dates == 'coerce':\n new_values = _possibly_cast_to_datetime(values, 'M8[ns]',\n errors='coerce')\n\n # if we are all nans then leave me alone\n if not isnull(new_values).all():\n values = new_values\n\n else:\n values = lib.maybe_convert_objects(values,\n convert_datetime=convert_dates)\n\n # convert timedeltas\n if convert_timedeltas and values.dtype == np.object_:\n\n if convert_timedeltas == 'coerce':\n from pandas.tseries.timedeltas import to_timedelta\n new_values = to_timedelta(values, coerce=True)\n\n # if we are all nans then leave me alone\n if not isnull(new_values).all():\n values = new_values\n\n else:\n values = lib.maybe_convert_objects(\n values, convert_timedelta=convert_timedeltas)\n\n # convert to numeric\n if values.dtype == np.object_:\n if convert_numeric:\n try:\n new_values = lib.maybe_convert_numeric(values, set(),\n coerce_numeric=True)\n\n # if we are all nans then leave me alone\n if not isnull(new_values).all():\n values = new_values\n\n except:\n pass\n else:\n # soft-conversion\n values = lib.maybe_convert_objects(values)\n\n values = values.copy() if copy else values\n\n return values", "def _cast_forward_inputs(self, *args, **kwargs):\n # TODO: Do not use the side stream for tensor copies for now;\n # investigate the perf with/without it\n # TODO: For mixed precision, move the inputs to the compute device and\n # cast to reduced-precision in a single `to()` call\n args, kwargs = _to_kwargs(args, kwargs, self.compute_device.index, False)\n args = args[0]\n kwargs = kwargs[0]\n if self._mixed_precision_enabled_for_params():\n input_dtype = self.mixed_precision.param_dtype\n args, kwargs = self._cast_fp_inputs_to_dtype(\n input_dtype, *args, **kwargs,\n )\n return args, kwargs", "def from_numpy(self, a):\n raise NotImplementedError(\"from_numpy\")", "def itkSingleValuedCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_cast(*args)", "def _unify_targets(inputs, targets):\n if isinstance(targets, int):\n return np.array([[targets] for _ in inputs]).astype(np.int)\n if isinstance(targets, Tensor):\n if not targets.shape:\n return np.array([[targets.asnumpy()] for _ in inputs]).astype(np.int)\n if len(targets.shape) == 1:\n return np.array([[t.asnumpy()] for t in targets]).astype(np.int)\n if len(targets.shape) == 2:\n return np.array([t.asnumpy() for t in targets]).astype(np.int)\n return targets", "def make_constant(args):\r\n def conv(a):\r\n if a is None:\r\n return a\r\n elif isinstance(a, slice):\r\n return slice(conv(a.start),\r\n conv(a.stop),\r\n conv(a.step))\r\n elif isinstance(a, (int, long, numpy.integer)):\r\n return scal.ScalarConstant(scal.int64, a)\r\n else:\r\n return a\r\n return tuple(map(conv, args))", "def _convert_data(self, data):\n if isinstance(data, Tensor):\n data = data.asnumpy()\n elif isinstance(data, list):\n data = np.array(data)\n elif isinstance(data, np.ndarray):\n pass\n else:\n raise TypeError('Input data type must be tensor, list or numpy.ndarray')\n return data" ]
[ "0.70481193", "0.6366465", "0.63539773", "0.63233066", "0.6275716", "0.614064", "0.61271524", "0.61202806", "0.59463507", "0.5895006", "0.58899707", "0.58613306", "0.5814436", "0.58100325", "0.5803434", "0.5773968", "0.5766643", "0.57581913", "0.57475054", "0.57052386", "0.56879306", "0.5680731", "0.56788933", "0.5676022", "0.56466454", "0.5623894", "0.5622826", "0.56201285", "0.5591321", "0.5590698" ]
0.6803261
1
Given a numpy or pandas dtype, converts it into the equivalent cuDF Python dtype.
def cudf_dtype_from_pydata_dtype(dtype): if cudf.api.types.is_categorical_dtype(dtype): return cudf.core.dtypes.CategoricalDtype elif cudf.api.types.is_decimal32_dtype(dtype): return cudf.core.dtypes.Decimal32Dtype elif cudf.api.types.is_decimal64_dtype(dtype): return cudf.core.dtypes.Decimal64Dtype elif cudf.api.types.is_decimal128_dtype(dtype): return cudf.core.dtypes.Decimal128Dtype elif dtype in cudf._lib.types.SUPPORTED_NUMPY_TO_LIBCUDF_TYPES: return dtype.type return infer_dtype_from_object(dtype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dtype(x, dtype):\n return x.type(dtype)", "def np_dtype(dali_dtype):\n return numpy.dtype(dali_dtype)", "def cudf_dtype_from_pa_type(typ):\n if pa.types.is_list(typ):\n return cudf.core.dtypes.ListDtype.from_arrow(typ)\n elif pa.types.is_struct(typ):\n return cudf.core.dtypes.StructDtype.from_arrow(typ)\n elif pa.types.is_decimal(typ):\n return cudf.core.dtypes.Decimal128Dtype.from_arrow(typ)\n else:\n return cudf.api.types.pandas_dtype(typ.to_pandas_dtype())", "def dtype(a):\n return a.dtype", "def convert_dtype(data_df, settings):\n data_df = data_df.astype(settings[\"dtype\"])\n return data_df", "def cudf_dtype_to_pa_type(dtype):\n if cudf.api.types.is_categorical_dtype(dtype):\n raise NotImplementedError()\n elif (\n cudf.api.types.is_list_dtype(dtype)\n or cudf.api.types.is_struct_dtype(dtype)\n or cudf.api.types.is_decimal_dtype(dtype)\n ):\n return dtype.to_arrow()\n else:\n return np_to_pa_dtype(cudf.dtype(dtype))", "def _getconv(dtype):\n\n\n\n\ttyp = dtype.type\n\tif issubclass(typ, np.bool_):\n\t\treturn lambda x: bool(int(x))\n\tif issubclass(typ, np.uint64):\n\t\treturn np.uint64\n\tif issubclass(typ, np.int64):\n\t\treturn np.int64\n\tif issubclass(typ, np.int32):\n\t\treturn np.int32\n\telif issubclass(typ, np.longdouble):\n\t\treturn np.longdouble\n\telif issubclass(typ, np.floating):\n\t\treturn np.float32\n\telse:\n\t\traise SystemExit(\"Incorrect data type\")", "def to_torch_dtype(numpy_dtype):\n x = np.zeros([1], dtype=numpy_dtype)\n t = torch.from_numpy(x)\n return t.dtype", "def col(name=None, dtype=None):\r\n if dtype is None:\r\n dtype = config.floatX\r\n type = CudaNdarrayType(dtype=dtype, broadcastable=(False, True))\r\n return type(name)", "def float2dtype(float_type):\n if float_type == 'single' or float_type is None:\n return numpy.float32\n if float_type == 'double':\n return numpy.float64\n raise NotImplementedError (`float_type`)", "def as_correct_dtype(obj: ndarray, required_dtype: dtype) -> ndarray:\n if obj.dtype != required_dtype:\n return obj.astype(required_dtype)\n return obj", "def dtype_to_type(dtype) -> Type:\n if dtype == np.object:\n return str\n else:\n return type(np.zeros(1, dtype).item())", "def convert_numpy_type(cls, dtype):\n\n import numpy as np\n\n m = {\n 'int64': cls.DATATYPE_INTEGER64,\n 'float64': cls.DATATYPE_FLOAT,\n 'object': cls.DATATYPE_TEXT # Hack. Pandas makes strings into object.\n\n }\n\n t = m.get(dtype.name, None)\n\n if not t:\n raise TypeError(\n \"Failed to convert numpy type: '{}' \".format(\n dtype.name))\n\n return t", "def infer_dtype(self):\n raise NotImplementedError", "def dtype():\n return RaggedDtype()", "def pytype_to_dtype(obj):\n\n if isinstance(obj, np.dtype):\n obj = obj.type\n if isinstance(obj, typing.Type):\n return obj\n if isinstance(obj, type) and obj in _simple_types:\n return _simple_types[obj]\n raise NotImplementedError()", "def dtype(self) -> tf.dtypes.DType:", "def dtype(basis: Basis) -> FDataBasisDType:\n return FDataBasisDType(basis=basis)", "def dtype(self) -> np.dtype:\n ...", "def dtype_float(dtype: DType):\n return promote_dtypes(dtype, np.float16)", "def map_dtype(dtype):\n item_idx = int(math.log(dtype.itemsize, 2))\n if dtype.kind == 'i':\n return [int8, int16, int32, int64][item_idx]\n elif dtype.kind == 'u':\n return [uint8, uint16, uint32, uint64][item_idx]\n elif dtype.kind == 'f':\n if dtype.itemsize == 2:\n pass # half floats not supported yet\n elif dtype.itemsize == 4:\n return float32\n elif dtype.itemsize == 8:\n return float64\n elif dtype.itemsize == 16:\n return float128\n elif dtype.kind == 'b':\n return int8\n elif dtype.kind == 'c':\n if dtype.itemsize == 8:\n return complex64\n elif dtype.itemsize == 16:\n return complex128\n elif dtype.itemsize == 32:\n return complex256\n elif dtype.kind == 'O':\n return object_", "def to_ctype(self, array, name_cvalue):\n if not isinstance(array, np.ndarray):\n array = np.asarray(array)\n if array.ndim != len(self.shape):\n raise ValueError(f\"Expected a {len(self.shape)}D array, got {array.ndim}D\")\n expected_dtype = np.dtype(self.ctype)\n if array.dtype != expected_dtype:\n raise ValueError(f\"Expected a {expected_dtype} array, got {array.dtype}\")\n expected_shape = tuple(\n size if isinstance(size, int) else name_cvalue.setdefault(size, IdxRepTy(real_size)).value\n for size, real_size in zip(self.shape, array.shape))\n if expected_shape != array.shape:\n raise ValueError(f\"Shape mismatch: expected {expected_shape}, but got {array.shape}\")\n if not array.flags['C_CONTIGUOUS']:\n raise ValueError(\"Only contiguous arrays supported as arguments at the moment\")\n return self.unsafe_array_ptr(array)", "def canonical_numeric_dtype(dtype):\n if dtype.is_floating:\n return tf.float32\n elif dtype.is_integer:\n return tf.int64\n else:\n raise ValueError('Bad dtype {}'.format(dtype))", "def np_to_pa_dtype(dtype):\n # special case when dtype is np.datetime64\n if dtype.kind == \"M\":\n time_unit, _ = np.datetime_data(dtype)\n if time_unit in (\"s\", \"ms\", \"us\", \"ns\"):\n # return a pa.Timestamp of the appropriate unit\n return pa.timestamp(time_unit)\n # default is int64_t UNIX ms\n return pa.date64()\n elif dtype.kind == \"m\":\n time_unit, _ = np.datetime_data(dtype)\n if time_unit in (\"s\", \"ms\", \"us\", \"ns\"):\n # return a pa.Duration of the appropriate unit\n return pa.duration(time_unit)\n # default fallback unit is ns\n return pa.duration(\"ns\")\n return _np_pa_dtypes[cudf.dtype(dtype).type]", "def get_dtype(col):\n dtype = col.dtype\n\n if isinstance(dtype, CategoricalDtype):\n col = col.astype(type(col.values[0]))\n out = get_dtype(col)\n elif np.issubdtype(dtype, np.floating):\n out = 'float32'\n elif np.issubdtype(dtype, np.integer):\n if col.max() < 32767:\n out = 'int16'\n else:\n out = 'int32'\n elif np.issubdtype(dtype, np.object_):\n size = int(col.astype(str).str.len().max())\n out = 'S{:}'.format(size)\n else:\n out = dtype\n\n return out", "def datatype_conversion(self):\n\n category_cols = self.FEATURE_TYPES[\"category_cols\"]\n integer_cols = self.FEATURE_TYPES[\"integer_cols\"]\n float_cols = self.FEATURE_TYPES[\"float_cols\"]\n datetime_cols = self.FEATURE_TYPES[\"datetime_cols\"]\n string_cols = self.FEATURE_TYPES[\"string_cols\"]\n bool_cols = self.FEATURE_TYPES[\"bool_cols\"]\n data = self.data\n \n data[category_cols] = data[category_cols].astype('category',copy=False) \n data[integer_cols] = data[integer_cols].astype('int64',copy=False)\n data[float_cols] = data[float_cols].astype('float64',copy=False)\n data[datetime_cols] = data[datetime_cols].astype('datetime64[ns]',copy=False)\n data[string_cols] = data[string_cols].astype('str',copy=False)\n data[bool_cols] = data[bool_cols].astype('bool', copy=False)\n\n return data", "def _maybe_convert_to_default_type(dtype):\n if cudf.get_option(\"default_integer_bitwidth\"):\n if cudf.api.types.is_signed_integer_dtype(dtype):\n return cudf.dtype(\n f'i{cudf.get_option(\"default_integer_bitwidth\")//8}'\n )\n elif cudf.api.types.is_unsigned_integer_dtype(dtype):\n return cudf.dtype(\n f'u{cudf.get_option(\"default_integer_bitwidth\")//8}'\n )\n if cudf.get_option(\n \"default_float_bitwidth\"\n ) and cudf.api.types.is_float_dtype(dtype):\n return cudf.dtype(f'f{cudf.get_option(\"default_float_bitwidth\")//8}')\n\n return dtype", "def to_cudf_compatible_scalar(val, dtype=None):\n\n if cudf._lib.scalar._is_null_host_scalar(val) or isinstance(\n val, cudf.Scalar\n ):\n return val\n\n if not cudf.api.types._is_scalar_or_zero_d_array(val):\n raise ValueError(\n f\"Cannot convert value of type {type(val).__name__} \"\n \"to cudf scalar\"\n )\n\n if isinstance(val, Decimal):\n return val\n\n if isinstance(val, (np.ndarray, cp.ndarray)) and val.ndim == 0:\n val = val.item()\n\n if (\n (dtype is None) and isinstance(val, str)\n ) or cudf.api.types.is_string_dtype(dtype):\n dtype = \"str\"\n\n if isinstance(val, str) and val.endswith(\"\\x00\"):\n # Numpy string dtypes are fixed width and use NULL to\n # indicate the end of the string, so they cannot\n # distinguish between \"abc\\x00\" and \"abc\".\n # https://github.com/numpy/numpy/issues/20118\n # In this case, don't try going through numpy and just use\n # the string value directly (cudf.DeviceScalar will DTRT)\n return val\n\n tz_error_msg = (\n \"Cannot covert a timezone-aware timestamp to timezone-naive scalar.\"\n )\n if isinstance(val, pd.Timestamp):\n if val.tz is not None:\n raise NotImplementedError(tz_error_msg)\n val = val.to_datetime64()\n elif isinstance(val, pd.Timedelta):\n val = val.to_timedelta64()\n elif isinstance(val, datetime.datetime):\n if val.tzinfo is not None:\n raise NotImplementedError(tz_error_msg)\n val = np.datetime64(val)\n elif isinstance(val, datetime.timedelta):\n val = np.timedelta64(val)\n\n val = _maybe_convert_to_default_type(\n cudf.api.types.pandas_dtype(type(val))\n ).type(val)\n\n if dtype is not None:\n if isinstance(val, str) and np.dtype(dtype).kind == \"M\":\n # pd.Timestamp can handle str, but not np.str_\n val = pd.Timestamp(str(val)).to_datetime64().astype(dtype)\n else:\n val = val.astype(dtype)\n\n if val.dtype.type is np.datetime64:\n time_unit, _ = np.datetime_data(val.dtype)\n if time_unit in (\"D\", \"W\", \"M\", \"Y\"):\n val = val.astype(\"datetime64[s]\")\n elif val.dtype.type is np.timedelta64:\n time_unit, _ = np.datetime_data(val.dtype)\n if time_unit in (\"D\", \"W\", \"M\", \"Y\"):\n val = val.astype(\"timedelta64[ns]\")\n\n return val", "def cast(self, dtype):\n self.dtype = np.dtype(dtype)\n self.preprocess = False\n self.set_data(self.data)", "def tensor_dtype_to_np_dtype(tensor_dtype: int) -> np.dtype:\n return mapping.TENSOR_TYPE_MAP[tensor_dtype].np_dtype" ]
[ "0.7102082", "0.6950359", "0.68659675", "0.6791954", "0.67600906", "0.6706449", "0.6687098", "0.6632583", "0.66178405", "0.6519006", "0.6518103", "0.6425718", "0.63941944", "0.63792527", "0.63391143", "0.6304734", "0.62857044", "0.6259856", "0.6246659", "0.6194909", "0.61477524", "0.6141035", "0.613633", "0.6120402", "0.6104377", "0.6103292", "0.60877943", "0.6074882", "0.6073866", "0.6059163" ]
0.75923187
0
Given a cuDF pyarrow dtype, converts it into the equivalent cudf pandas dtype.
def cudf_dtype_from_pa_type(typ): if pa.types.is_list(typ): return cudf.core.dtypes.ListDtype.from_arrow(typ) elif pa.types.is_struct(typ): return cudf.core.dtypes.StructDtype.from_arrow(typ) elif pa.types.is_decimal(typ): return cudf.core.dtypes.Decimal128Dtype.from_arrow(typ) else: return cudf.api.types.pandas_dtype(typ.to_pandas_dtype())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cudf_dtype_from_pydata_dtype(dtype):\n\n if cudf.api.types.is_categorical_dtype(dtype):\n return cudf.core.dtypes.CategoricalDtype\n elif cudf.api.types.is_decimal32_dtype(dtype):\n return cudf.core.dtypes.Decimal32Dtype\n elif cudf.api.types.is_decimal64_dtype(dtype):\n return cudf.core.dtypes.Decimal64Dtype\n elif cudf.api.types.is_decimal128_dtype(dtype):\n return cudf.core.dtypes.Decimal128Dtype\n elif dtype in cudf._lib.types.SUPPORTED_NUMPY_TO_LIBCUDF_TYPES:\n return dtype.type\n\n return infer_dtype_from_object(dtype)", "def cudf_dtype_to_pa_type(dtype):\n if cudf.api.types.is_categorical_dtype(dtype):\n raise NotImplementedError()\n elif (\n cudf.api.types.is_list_dtype(dtype)\n or cudf.api.types.is_struct_dtype(dtype)\n or cudf.api.types.is_decimal_dtype(dtype)\n ):\n return dtype.to_arrow()\n else:\n return np_to_pa_dtype(cudf.dtype(dtype))", "def convert_dtype(data_df, settings):\n data_df = data_df.astype(settings[\"dtype\"])\n return data_df", "def to_dtype(x, dtype):\n return x.type(dtype)", "def to_cudf_compatible_scalar(val, dtype=None):\n\n if cudf._lib.scalar._is_null_host_scalar(val) or isinstance(\n val, cudf.Scalar\n ):\n return val\n\n if not cudf.api.types._is_scalar_or_zero_d_array(val):\n raise ValueError(\n f\"Cannot convert value of type {type(val).__name__} \"\n \"to cudf scalar\"\n )\n\n if isinstance(val, Decimal):\n return val\n\n if isinstance(val, (np.ndarray, cp.ndarray)) and val.ndim == 0:\n val = val.item()\n\n if (\n (dtype is None) and isinstance(val, str)\n ) or cudf.api.types.is_string_dtype(dtype):\n dtype = \"str\"\n\n if isinstance(val, str) and val.endswith(\"\\x00\"):\n # Numpy string dtypes are fixed width and use NULL to\n # indicate the end of the string, so they cannot\n # distinguish between \"abc\\x00\" and \"abc\".\n # https://github.com/numpy/numpy/issues/20118\n # In this case, don't try going through numpy and just use\n # the string value directly (cudf.DeviceScalar will DTRT)\n return val\n\n tz_error_msg = (\n \"Cannot covert a timezone-aware timestamp to timezone-naive scalar.\"\n )\n if isinstance(val, pd.Timestamp):\n if val.tz is not None:\n raise NotImplementedError(tz_error_msg)\n val = val.to_datetime64()\n elif isinstance(val, pd.Timedelta):\n val = val.to_timedelta64()\n elif isinstance(val, datetime.datetime):\n if val.tzinfo is not None:\n raise NotImplementedError(tz_error_msg)\n val = np.datetime64(val)\n elif isinstance(val, datetime.timedelta):\n val = np.timedelta64(val)\n\n val = _maybe_convert_to_default_type(\n cudf.api.types.pandas_dtype(type(val))\n ).type(val)\n\n if dtype is not None:\n if isinstance(val, str) and np.dtype(dtype).kind == \"M\":\n # pd.Timestamp can handle str, but not np.str_\n val = pd.Timestamp(str(val)).to_datetime64().astype(dtype)\n else:\n val = val.astype(dtype)\n\n if val.dtype.type is np.datetime64:\n time_unit, _ = np.datetime_data(val.dtype)\n if time_unit in (\"D\", \"W\", \"M\", \"Y\"):\n val = val.astype(\"datetime64[s]\")\n elif val.dtype.type is np.timedelta64:\n time_unit, _ = np.datetime_data(val.dtype)\n if time_unit in (\"D\", \"W\", \"M\", \"Y\"):\n val = val.astype(\"timedelta64[ns]\")\n\n return val", "def _can_cast(from_dtype, to_dtype):\n if cudf.utils.utils.is_na_like(from_dtype):\n return True\n if isinstance(from_dtype, type):\n from_dtype = cudf.dtype(from_dtype)\n if isinstance(to_dtype, type):\n to_dtype = cudf.dtype(to_dtype)\n\n # TODO : Add precision & scale checking for\n # decimal types in future\n\n if isinstance(from_dtype, cudf.core.dtypes.DecimalDtype):\n if isinstance(to_dtype, cudf.core.dtypes.DecimalDtype):\n return True\n elif isinstance(to_dtype, np.dtype):\n if to_dtype.kind in {\"i\", \"f\", \"u\", \"U\", \"O\"}:\n return True\n else:\n return False\n elif isinstance(from_dtype, np.dtype):\n if isinstance(to_dtype, np.dtype):\n return np.can_cast(from_dtype, to_dtype)\n elif isinstance(to_dtype, cudf.core.dtypes.DecimalDtype):\n if from_dtype.kind in {\"i\", \"f\", \"u\", \"U\", \"O\"}:\n return True\n else:\n return False\n elif isinstance(to_dtype, cudf.core.types.CategoricalDtype):\n return True\n else:\n return False\n elif isinstance(from_dtype, cudf.core.dtypes.ListDtype):\n # TODO: Add level based checks too once casting of\n # list columns is supported\n if isinstance(to_dtype, cudf.core.dtypes.ListDtype):\n return np.can_cast(from_dtype.leaf_type, to_dtype.leaf_type)\n else:\n return False\n elif isinstance(from_dtype, cudf.core.dtypes.CategoricalDtype):\n if isinstance(to_dtype, cudf.core.dtypes.CategoricalDtype):\n return True\n elif isinstance(to_dtype, np.dtype):\n return np.can_cast(from_dtype._categories.dtype, to_dtype)\n else:\n return False\n else:\n return np.can_cast(from_dtype, to_dtype)", "def np_to_pa_dtype(dtype):\n # special case when dtype is np.datetime64\n if dtype.kind == \"M\":\n time_unit, _ = np.datetime_data(dtype)\n if time_unit in (\"s\", \"ms\", \"us\", \"ns\"):\n # return a pa.Timestamp of the appropriate unit\n return pa.timestamp(time_unit)\n # default is int64_t UNIX ms\n return pa.date64()\n elif dtype.kind == \"m\":\n time_unit, _ = np.datetime_data(dtype)\n if time_unit in (\"s\", \"ms\", \"us\", \"ns\"):\n # return a pa.Duration of the appropriate unit\n return pa.duration(time_unit)\n # default fallback unit is ns\n return pa.duration(\"ns\")\n return _np_pa_dtypes[cudf.dtype(dtype).type]", "def convert_dtypes(\n self,\n infer_objects: bool = True,\n convert_string: bool = True,\n convert_integer: bool = True,\n convert_boolean: bool = True,\n convert_floating: bool = True,\n dtype_backend: DtypeBackend = \"numpy_nullable\",\n ):\n return DataFrameDefault.register(pandas.DataFrame.convert_dtypes)(\n self,\n infer_objects=infer_objects,\n convert_string=convert_string,\n convert_integer=convert_integer,\n convert_boolean=convert_boolean,\n convert_floating=convert_floating,\n dtype_backend=dtype_backend,\n )", "def _maybe_convert_to_default_type(dtype):\n if cudf.get_option(\"default_integer_bitwidth\"):\n if cudf.api.types.is_signed_integer_dtype(dtype):\n return cudf.dtype(\n f'i{cudf.get_option(\"default_integer_bitwidth\")//8}'\n )\n elif cudf.api.types.is_unsigned_integer_dtype(dtype):\n return cudf.dtype(\n f'u{cudf.get_option(\"default_integer_bitwidth\")//8}'\n )\n if cudf.get_option(\n \"default_float_bitwidth\"\n ) and cudf.api.types.is_float_dtype(dtype):\n return cudf.dtype(f'f{cudf.get_option(\"default_float_bitwidth\")//8}')\n\n return dtype", "def _getconv(dtype):\n\n\n\n\ttyp = dtype.type\n\tif issubclass(typ, np.bool_):\n\t\treturn lambda x: bool(int(x))\n\tif issubclass(typ, np.uint64):\n\t\treturn np.uint64\n\tif issubclass(typ, np.int64):\n\t\treturn np.int64\n\tif issubclass(typ, np.int32):\n\t\treturn np.int32\n\telif issubclass(typ, np.longdouble):\n\t\treturn np.longdouble\n\telif issubclass(typ, np.floating):\n\t\treturn np.float32\n\telse:\n\t\traise SystemExit(\"Incorrect data type\")", "def test_roundtrip_from_dataframe2(self):\n import pandas as pd\n df = pd.DataFrame(data={\n 'a': np.arange(3),\n 'b': np.arange(3)[::-1]\n })\n ca = carray(df, dtype=np.dtype(np.float))\n assert_array_equal(df, ca)\n self.assertEqual(ca.dtype, np.dtype(np.float),\n msg='carray has been created with invalid dtype')", "def np_dtype(dali_dtype):\n return numpy.dtype(dali_dtype)", "def col(name=None, dtype=None):\r\n if dtype is None:\r\n dtype = config.floatX\r\n type = CudaNdarrayType(dtype=dtype, broadcastable=(False, True))\r\n return type(name)", "def infer_dtype(self):\n raise NotImplementedError", "def datatype_conversion(self):\n\n category_cols = self.FEATURE_TYPES[\"category_cols\"]\n integer_cols = self.FEATURE_TYPES[\"integer_cols\"]\n float_cols = self.FEATURE_TYPES[\"float_cols\"]\n datetime_cols = self.FEATURE_TYPES[\"datetime_cols\"]\n string_cols = self.FEATURE_TYPES[\"string_cols\"]\n bool_cols = self.FEATURE_TYPES[\"bool_cols\"]\n data = self.data\n \n data[category_cols] = data[category_cols].astype('category',copy=False) \n data[integer_cols] = data[integer_cols].astype('int64',copy=False)\n data[float_cols] = data[float_cols].astype('float64',copy=False)\n data[datetime_cols] = data[datetime_cols].astype('datetime64[ns]',copy=False)\n data[string_cols] = data[string_cols].astype('str',copy=False)\n data[bool_cols] = data[bool_cols].astype('bool', copy=False)\n\n return data", "def dtype(a):\n return a.dtype", "def _convert_dtype_value(val):\n\n convert_dtype_map = {\n 21: \"int8\",\n 20: \"uint8\",\n 6: \"float64\",\n 5: \"float32\",\n 4: \"float16\",\n 3: \"int64\",\n 2: \"int32\",\n 1: \"int16\",\n 0: \"bool\",\n }\n if val not in convert_dtype_map:\n msg = f\"Paddle data type value {val} is not handled yet.\"\n raise NotImplementedError(msg)\n return convert_dtype_map[val]", "def dtype(basis: Basis) -> FDataBasisDType:\n return FDataBasisDType(basis=basis)", "def pandas_typecast(self) -> dict:\n res = {}\n for feat in self.data_features:\n res[feat.key] = ApiForm.typecast(feat.dtype)\n return res", "def float2dtype(float_type):\n if float_type == 'single' or float_type is None:\n return numpy.float32\n if float_type == 'double':\n return numpy.float64\n raise NotImplementedError (`float_type`)", "def convert_types(df):\n \n # Iterate through each column\n for c in df:\n \n # Convert ids and booleans to integers\n if ('SK_ID' in c):\n df[c] = df[c].fillna(0).astype(np.int32)\n \n # Convert objects to category\n elif (df[c].dtype == 'object') and (df[c].nunique() < df.shape[0]):\n df[c] = df[c].astype('category')\n \n # Booleans mapped to integers\n elif list(df[c].unique()) == [1, 0]:\n df[c] = df[c].astype(bool)\n \n # Float64 to float32\n elif df[c].dtype == float:\n df[c] = df[c].astype(np.float32)\n \n # Int64 to int32\n elif df[c].dtype == int:\n df[c] = df[c].astype(np.int32)\n \n return df", "def pytype_to_dtype(obj):\n\n if isinstance(obj, np.dtype):\n obj = obj.type\n if isinstance(obj, typing.Type):\n return obj\n if isinstance(obj, type) and obj in _simple_types:\n return _simple_types[obj]\n raise NotImplementedError()", "def change_col_type(df,schema):\n d = {'int':IntegerType(),'str':StringType(),'float':FloatType(),'bool':BooleanType()}\n \n for c,t in schema.items():\n df = df.withColumn(c,col(c).cast(d[t]))\n return df", "def encode_dtypes(df):\n\n global catn, cato\n\n # Nominal categories\n for name in catn:\n df[name] = df[name].astype(\"category\")\n # Add a None category for missing values\n if \"None\" not in df[name].cat.categories:\n df[name].cat.add_categories(\"None\", inplace=True)\n # Ordinal categories\n for name, levels in cato.items():\n df[name] = df[name].astype(CategoricalDtype(levels,\n ordered=True))\n return df", "def to_torch_dtype(numpy_dtype):\n x = np.zeros([1], dtype=numpy_dtype)\n t = torch.from_numpy(x)\n return t.dtype", "def ibis_dtype_to_pandas(ibis_dtype):\n assert isinstance(ibis_dtype, dt.DataType)\n\n if isinstance(ibis_dtype, dt.Timestamp) and ibis_dtype.timezone:\n return DatetimeTZDtype('ns', ibis_dtype.timezone)\n elif isinstance(ibis_dtype, dt.Interval):\n return 'timedelta64[{}]'.format(ibis_dtype.unit)\n elif type(ibis_dtype) in _ibis_dtypes:\n return _ibis_dtypes[type(ibis_dtype)]\n else:\n return 'object'", "def cast_type(cdm_column_type, value):\n if cdm_column_type in ('integer', 'int64'):\n # Regex check only relevant if submission dtype is 'object'\n if not re.match(SCIENTIFIC_NOTATION_REGEX, str(value)):\n return int(value)\n if cdm_column_type in ('character varying', 'text', 'string'):\n return str(value)\n if cdm_column_type == 'numeric':\n return float(value)\n if cdm_column_type == 'float' and isinstance(value, float):\n return value\n if cdm_column_type == 'date' and isinstance(value, datetime.date):\n return value\n if cdm_column_type == 'timestamp' and isinstance(\n value, datetime.datetime): # do not do datetime.datetime\n return value", "def dtype(self) -> tf.dtypes.DType:", "def map_dtype(dtype):\n item_idx = int(math.log(dtype.itemsize, 2))\n if dtype.kind == 'i':\n return [int8, int16, int32, int64][item_idx]\n elif dtype.kind == 'u':\n return [uint8, uint16, uint32, uint64][item_idx]\n elif dtype.kind == 'f':\n if dtype.itemsize == 2:\n pass # half floats not supported yet\n elif dtype.itemsize == 4:\n return float32\n elif dtype.itemsize == 8:\n return float64\n elif dtype.itemsize == 16:\n return float128\n elif dtype.kind == 'b':\n return int8\n elif dtype.kind == 'c':\n if dtype.itemsize == 8:\n return complex64\n elif dtype.itemsize == 16:\n return complex128\n elif dtype.itemsize == 32:\n return complex256\n elif dtype.kind == 'O':\n return object_", "def dtype_float(dtype: DType):\n return promote_dtypes(dtype, np.float16)" ]
[ "0.757576", "0.7128742", "0.6667662", "0.63560766", "0.6272239", "0.6179554", "0.61016786", "0.60577655", "0.60316265", "0.59823334", "0.5971781", "0.59381866", "0.58385044", "0.5755033", "0.5734548", "0.5718757", "0.5669331", "0.5662042", "0.5653364", "0.56298435", "0.56290746", "0.5628144", "0.5577407", "0.55760187", "0.55497295", "0.55259436", "0.5504739", "0.5475907", "0.54612553", "0.54311657" ]
0.73913157
1
Converts the value `val` to a numpy/Pandas scalar, optionally casting to `dtype`. If `val` is None, returns None.
def to_cudf_compatible_scalar(val, dtype=None): if cudf._lib.scalar._is_null_host_scalar(val) or isinstance( val, cudf.Scalar ): return val if not cudf.api.types._is_scalar_or_zero_d_array(val): raise ValueError( f"Cannot convert value of type {type(val).__name__} " "to cudf scalar" ) if isinstance(val, Decimal): return val if isinstance(val, (np.ndarray, cp.ndarray)) and val.ndim == 0: val = val.item() if ( (dtype is None) and isinstance(val, str) ) or cudf.api.types.is_string_dtype(dtype): dtype = "str" if isinstance(val, str) and val.endswith("\x00"): # Numpy string dtypes are fixed width and use NULL to # indicate the end of the string, so they cannot # distinguish between "abc\x00" and "abc". # https://github.com/numpy/numpy/issues/20118 # In this case, don't try going through numpy and just use # the string value directly (cudf.DeviceScalar will DTRT) return val tz_error_msg = ( "Cannot covert a timezone-aware timestamp to timezone-naive scalar." ) if isinstance(val, pd.Timestamp): if val.tz is not None: raise NotImplementedError(tz_error_msg) val = val.to_datetime64() elif isinstance(val, pd.Timedelta): val = val.to_timedelta64() elif isinstance(val, datetime.datetime): if val.tzinfo is not None: raise NotImplementedError(tz_error_msg) val = np.datetime64(val) elif isinstance(val, datetime.timedelta): val = np.timedelta64(val) val = _maybe_convert_to_default_type( cudf.api.types.pandas_dtype(type(val)) ).type(val) if dtype is not None: if isinstance(val, str) and np.dtype(dtype).kind == "M": # pd.Timestamp can handle str, but not np.str_ val = pd.Timestamp(str(val)).to_datetime64().astype(dtype) else: val = val.astype(dtype) if val.dtype.type is np.datetime64: time_unit, _ = np.datetime_data(val.dtype) if time_unit in ("D", "W", "M", "Y"): val = val.astype("datetime64[s]") elif val.dtype.type is np.timedelta64: time_unit, _ = np.datetime_data(val.dtype) if time_unit in ("D", "W", "M", "Y"): val = val.astype("timedelta64[ns]") return val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def typedValue(val, dtype=None):\n\ttVal = None\n\t\n\tif dtype is not None:\n\t\tif dtype == \"num\":\n\t\t\tdtype = \"int\" if dtype.find(\".\") == -1 else \"float\"\n\t\t\t\n\t\tif dtype == \"int\":\n\t\t\ttVal = int(val)\n\t\telif dtype == \"float\":\n\t\t\ttVal = float(val)\n\t\telif dtype == \"bool\":\n\t\t\ttVal = bool(val)\n\t\telse:\n\t\t\ttVal = val\n\telse:\n\t\tif type(val) == str:\n\t\t\tlVal = val.lower()\n\t\t\n\t\t\t#int\n\t\t\tdone = True\n\t\t\ttry:\n\t\t\t\ttVal = int(val)\n\t\t\texcept ValueError:\n\t\t\t\tdone = False\n\t\t\n\t\t\t#float\n\t\t\tif not done:\t\n\t\t\t\tdone = True\n\t\t\t\ttry:\n\t\t\t\t\ttVal = float(val)\n\t\t\t\texcept ValueError:\n\t\t\t\t\tdone = False\n\t\t\t\t\n\t\t\t#boolean\n\t\t\tif not done:\n\t\t\t\tdone = True\n\t\t\t\tif lVal == \"true\":\n\t\t\t\t\ttVal = True\n\t\t\t\telif lVal == \"false\":\n\t\t\t\t\ttVal = False\n\t\t\t\telse:\n\t\t\t\t\tdone = False\n\t\t\t#None\t\t\n\t\t\tif not done:\n\t\t\t\tif lVal == \"none\":\n\t\t\t\t\ttVal = None\n\t\t\t\telse:\n\t\t\t\t\ttVal = val\n\t\telse:\n\t\t\ttVal = val\t\t\n\t\n\treturn tVal", "def nonull(val):\n return val if not pd.isnull(val) else None", "def asarray(val, dtype=np.float64):\n # val is a list, tuple etc\n if not np.isscalar(val) and np.ndim(val) > 0:\n np_val = np.asarray(val, dtype=dtype)\n else:\n # val is a scalar number\n np_val = np.asarray([val], dtype=dtype)\n\n return np_val", "def safe_cast(value, multiplier=1, func=float):\n res = None\n try:\n res = func(value)\n except:\n pass\n\n if res is not None and math.isnan(res):\n res = None\n\n if res is not None:\n res = res * multiplier\n\n return res", "def _infer_fill_value(val):\n\n if not is_list_like(val):\n val = [val]\n val = np.array(val, copy=False)\n if is_datetimelike(val):\n return np.array('NaT', dtype=val.dtype)\n elif is_object_dtype(val.dtype):\n dtype = lib.infer_dtype(_ensure_object(val))\n if dtype in ['datetime', 'datetime64']:\n return np.array('NaT', dtype=_NS_DTYPE)\n elif dtype in ['timedelta', 'timedelta64']:\n return np.array('NaT', dtype=_TD_DTYPE)\n return np.nan", "def _as_scalar(res, dtype=None):\r\n if dtype is None:\r\n dtype = config.floatX\r\n if numpy.all(res.type.broadcastable):\r\n while res.owner and isinstance(res.owner.op, T.DimShuffle):\r\n res = res.owner.inputs[0]\r\n # may still have some number of True's\r\n if res.type.broadcastable:\r\n rval = res.dimshuffle()\r\n else:\r\n rval = res\r\n if rval.type.dtype[:3] in ('int', 'uin'):\r\n # We check that the upcast of res and dtype won't change dtype.\r\n # If dtype is float64, we will cast int64 to float64.\r\n # This is valid when res is a scalar used as input to a dot22\r\n # as the cast of the scalar can be done before or after the dot22\r\n # and this will give the same result.\r\n if theano.scalar.upcast(res.dtype, dtype) == dtype:\r\n return T.cast(rval, dtype)\r\n else:\r\n return None\r\n\r\n return rval", "def makeOned (self, val):\n if isinstance(val, pd.DataFrame):\n sh = val.shape\n if sh[1] > 1:\n self.valid = False\n return None\n val = pd.Series(val.values[:,0])\n return val", "def from_scalar(value, dtype=None):\n return TensorValue(const(value, dtype).data)", "def _convert_dtype_value(val):\n\n convert_dtype_map = {\n 21: \"int8\",\n 20: \"uint8\",\n 6: \"float64\",\n 5: \"float32\",\n 4: \"float16\",\n 3: \"int64\",\n 2: \"int32\",\n 1: \"int16\",\n 0: \"bool\",\n }\n if val not in convert_dtype_map:\n msg = f\"Paddle data type value {val} is not handled yet.\"\n raise NotImplementedError(msg)\n return convert_dtype_map[val]", "def _floatOrCall(val):\n try:\n return float(val)\n except TypeError:\n pass\n try:\n return float(val())\n except TypeError:\n pass\n try:\n return val.value\n except AttributeError:\n # likely a complex\n return val", "def to_scalar(obj):\n if isinstance(obj, np.generic):\n return obj.item()\n else:\n return obj", "def cast(self, value):\n if value is None:\n return None\n return self.type(value)", "def convert_scalar(self, v, t):\n return v.asnumpy().item()", "def _infer_dtype(val):\n if re.match(r'\\d{4}-\\d{2}(?:-\\d{2})?', val):\n return 'date'\n elif re.match(r'[+-]?\\d+$', val):\n return 'int'\n elif re.match(r'[+-]?\\d+%$', val):\n return 'pct'\n elif re.match(r'[a-zA-Z ]+', val):\n return 'text'\n else:\n msg = \"val={0} dtype not recognized\".format(val)\n raise ValueError(msg)", "def null_value_handler(datatype, value, null_format):\n if DataType.Name(datatype) == \"STRING\":\n if NullValues.STRING == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DOUBLE\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"FLOAT\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT32\":\n if NullValues.INT32 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT64\":\n if NullValues.INT64 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DURATION\":\n if NullValues.DURATION.equals(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"TIMESTAMP\":\n if NullValues.TIMESTAMP.equals(value):\n return null_format\n return value\n else:\n return value", "def null_value_handler(datatype, value, null_format):\n if DataType.Name(datatype) == \"STRING\":\n if NullValues.STRING == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DOUBLE\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"FLOAT\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT32\":\n if NullValues.INT32 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT64\":\n if NullValues.INT64 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DURATION\":\n if NullValues.DURATION.equals(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"TIMESTAMP\":\n if NullValues.TIMESTAMP.equals(value):\n return null_format\n return value\n else:\n return value", "def __get_default_dtype(val_type):\n\n if val_type == int:\n dtype = q_consts.int64\n elif val_type == float:\n dtype = q_consts.float64\n else:\n raise Exception(\"input element type %s is not supported\" % val_type)\n return dtype", "def get_value_helper(series_data, datatype, index, null_format):\n if DataType.Name(datatype) == \"STRING\":\n return StachUtilities.null_value_handler(datatype, series_data.string_array.values[index], null_format)\n elif DataType.Name(datatype) == \"DOUBLE\":\n return StachUtilities.null_value_handler(datatype, series_data.double_array.values[index], null_format)\n elif DataType.Name(datatype) == \"FLOAT\":\n return StachUtilities.null_value_handler(datatype, series_data.float_array.values[index], null_format)\n elif DataType.Name(datatype) == \"INT32\":\n return StachUtilities.null_value_handler(datatype, series_data.int32_array.values[index], null_format)\n elif DataType.Name(datatype) == \"INT64\":\n return StachUtilities.null_value_handler(datatype, series_data.int64_array.values[index], null_format)\n elif DataType.Name(datatype) == \"BOOL\":\n return StachUtilities.null_value_handler(datatype, series_data.bool_array.values[index], null_format)\n elif DataType.Name(datatype) == \"DURATION\":\n return StachUtilities.null_value_handler(datatype, series_data.duration_array.values[index], null_format)\n elif DataType.Name(datatype) == \"TIMESTAMP\":\n return StachUtilities.null_value_handler(datatype, series_data.timestamp_array.values[index], null_format)\n else:\n ValueError(\"The datatype is not implemented\")", "def to_float(val):\n if val is None:\n return None\n if str(val) == \"null\":\n return None\n return float(val)", "def enforce_param_datatype(cls, name, value, dtype: DataType):\n if value is None:\n return\n\n if dtype == DataType.datetime:\n try:\n datetime_value = np.datetime64(value).item()\n if isinstance(datetime_value, int):\n raise MlflowException.invalid_parameter_value(\n f\"Invalid value for param {name}, it should \"\n f\"be convertible to datetime.date/datetime, got {value}\"\n )\n return datetime_value\n except ValueError as e:\n raise MlflowException.invalid_parameter_value(\n f\"Failed to convert value {value} from type {type(value).__name__} \"\n f\"to {dtype} for param {name}\"\n ) from e\n\n # Note that np.isscalar(datetime.date(...)) is False\n if not np.isscalar(value):\n raise MlflowException.invalid_parameter_value(\n f\"Value should be a scalar for param {name}, got {value}\"\n )\n\n # Always convert to python native type for params\n if getattr(DataType, f\"is_{dtype.name}\")(value):\n return DataType[dtype.name].to_python()(value)\n\n if (\n (\n DataType.is_integer(value)\n and dtype in (DataType.long, DataType.float, DataType.double)\n )\n or (DataType.is_long(value) and dtype in (DataType.float, DataType.double))\n or (DataType.is_float(value) and dtype == DataType.double)\n ):\n try:\n return DataType[dtype.name].to_python()(value)\n except ValueError as e:\n raise MlflowException.invalid_parameter_value(\n f\"Failed to convert value {value} from type {type(value).__name__} \"\n f\"to {dtype} for param {name}\"\n ) from e\n\n raise MlflowException.invalid_parameter_value(\n f\"Incompatible types for param {name}. Can not safely convert {type(value).__name__} \"\n f\"to {dtype}.\",\n )", "def numpy_val_to_pg_val(item, dtype):\n import math\n if item is None:\n return 'null'\n\n if dtype == 'text':\n if \"'\" in str(item):\n return \"'{}'\".format(item.replace(\"'\", \"\\'\\'\"))\n return \"'{}'\".format(item)\n elif dtype == 'numeric' or dtype == 'int':\n if math.isnan(item):\n return 'null'\n return str(item)\n return \"'{}'\".format(str(item).replace(\"'\", \"\\'\\'\"))", "def test_scalar_null(self):\n dset = self.f.create_dataset('x', shape=(), dtype='i1')\n out = dset[()]\n self.assertIsInstance(out, np.int8)", "def _conversion(self, val):\n if (self.__set_type == \"str\"):\n return val\n else:\n try:\n return ast.literal_eval(val)\n except ValueError:\n return None", "def get_dtype_and_shape(self, val):\n # get type of object as string\n val_type = str(type(val))\n matchObj = re.match(r\"<(type|class) '([^']+)'>\", val_type)\n if not matchObj:\n raise SystemError(\"** Error: Unable to find type in %s\" % val_type)\n val_type = matchObj.group(2)\n # check for \"value_info\" passed in through calling script (e.g. Matlab)\n # if so, then type and shape is given in val (it does not contain the actual data\n # to store.\n if val_type == 'str' and self.file.options['storage_method'] == 'none':\n # value_info string looks like the following:\n # value_info: type=\"float\", shape=\"[5]\" *OR*\n # value_info: type=\"float\", shape=\"[scalar]\"\n matchObj = re.match(r'^value_info: type=\"([^\"]+)\", shape=\"\\[([^\\]]+)\\]\"$', val)\n if matchObj:\n dtype = matchObj.group(1)\n shape = matchObj.group(2)\n if shape != 'scalar':\n # convert dimensions from string (like '4 5') to integer list\n shape = map(int, shape.split())\n return (dtype, shape)\n # check data shape and type \n if val_type in ('str', 'int', 'float', 'long', 'unicode', 'bool'):\n shape = \"scalar\"\n dtype = val_type\n elif val_type == 'list':\n # convert from list to np array to get shape\n a = np.array(val)\n shape = a.shape\n dtype = str(a.dtype)\n # print \"found list, dtype is %s, shape is:\" % dtype\n # pp.pprint (shape)\n elif 'numpy' in val_type or type(val) is h5py._hl.dataset.Dataset: \n shape = val.shape\n dtype = str(val.dtype)\n # print \"found numpy or h5py dataset, dtype is %s\", dtype\n else:\n print \"** Error, unable to determine shape of value assiged to dataset\"\n print \"value type is '%s'\" % val_type\n traceback.print_stack()\n sys.exit(1)\n return (dtype, shape)", "def to_scalar(self, v):\n raise NotImplementedError('to_scalar')", "def get_value_helper(series_data, datatype, index, null_format):\n if DataType.Name(datatype) == \"STRING\":\n return _SeriesDataHelper.null_value_handler(datatype, series_data.string_array.values[index], null_format)\n elif DataType.Name(datatype) == \"DOUBLE\":\n return _SeriesDataHelper.null_value_handler(datatype, series_data.double_array.values[index], null_format)\n elif DataType.Name(datatype) == \"FLOAT\":\n return _SeriesDataHelper.null_value_handler(datatype, series_data.float_array.values[index], null_format)\n elif DataType.Name(datatype) == \"INT32\":\n return _SeriesDataHelper.null_value_handler(datatype, series_data.int32_array.values[index], null_format)\n elif DataType.Name(datatype) == \"INT64\":\n return _SeriesDataHelper.null_value_handler(datatype, series_data.int64_array.values[index], null_format)\n elif DataType.Name(datatype) == \"BOOL\":\n return _SeriesDataHelper.null_value_handler(datatype, series_data.bool_array.values[index], null_format)\n elif DataType.Name(datatype) == \"DURATION\":\n return _SeriesDataHelper.null_value_handler(datatype, series_data.duration_array.values[index], null_format)\n elif DataType.Name(datatype) == \"TIMESTAMP\":\n return _SeriesDataHelper.null_value_handler(datatype, series_data.timestamp_array.values[index], null_format)\n else:\n ValueError(\"The datatype is not implemented\")", "def normalize_value(value: Any) -> Optional[Union[np.ndarray, List[Any], Tuple[Any]]]:\n if value is None:\n # Exclude None from falling through to blanket np.asarray conversion.\n return value\n\n if isinstance(value, (list, tuple, dict)):\n return value\n\n array = np.asarray(value)\n # TODO(#5359): Move into the function abi.\n if isinstance(value, (bool, int, float)):\n # Manually convert ints and floats to 32 bits.\n if array.dtype == np.float64:\n array = array.astype(np.float32)\n elif array.dtype == np.int64:\n array = array.astype(np.int32)\n\n return array", "def ts_float32(val):\n return np.float64(val)", "def scalar(name=None, dtype=None):\r\n if dtype is None:\r\n dtype = config.floatX\r\n type = CudaNdarrayType(dtype=dtype, broadcastable=())\r\n return type(name)", "def numpy_scalar(data):\r\n\r\n # handle case where data is numpy.array([])\r\n if data.ndim > 0 and (len(data.shape) == 0 or\r\n __builtins__['max'](data.shape) == 0):\r\n assert numpy.all(numpy.array([]) == data)\r\n raise EmptyConstantError()\r\n try:\r\n numpy.complex(data) # works for all numeric scalars\r\n return data\r\n except Exception:\r\n raise NotScalarConstantError(\r\n 'v.data is non-numeric, non-scalar, or has more than one'\r\n ' unique value', data)" ]
[ "0.64111996", "0.58769083", "0.58739436", "0.58730346", "0.58598644", "0.5829844", "0.57802534", "0.5682709", "0.56748736", "0.5644178", "0.5639977", "0.55624807", "0.55353075", "0.5530968", "0.55256444", "0.55256444", "0.55067986", "0.54407966", "0.5431665", "0.5428653", "0.5427526", "0.5422156", "0.54142576", "0.5408735", "0.53725654", "0.5362629", "0.5360838", "0.53457654", "0.5345639", "0.5308875" ]
0.7483308
0
This function checks if the given `obj` is a columnlike (Series, Index...) type or not.
def is_column_like(obj): return ( isinstance( obj, ( cudf.core.column.ColumnBase, cudf.Series, cudf.Index, pd.Series, pd.Index, ), ) or ( hasattr(obj, "__cuda_array_interface__") and len(obj.__cuda_array_interface__["shape"]) == 1 ) or ( hasattr(obj, "__array_interface__") and len(obj.__array_interface__["shape"]) == 1 ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_object_type(df, field):\n return df[field].dtype.name == 'object'", "def can_convert_to_column(obj):\n return is_column_like(obj) or cudf.api.types.is_list_like(obj)", "def predicate(obj):\n return inspect.isclass(obj) and issubclass(obj, MafColumnRecord)", "def is_arraylike(obj):\n if isinstance(obj, list):\n return True\n elif isinstance(obj, np.ndarray):\n return True\n elif isinstance(obj, pd.Series):\n return True\n elif isinstance(obj, pd.DataFrame):\n return True\n return False", "def obj_df(df):\n mask = np.array(df.dtypes == 'object')\n df_obj = df.iloc[:, mask]\n return df_obj", "def null_checker(obj):\n if (isinstance(obj, pd.DataFrame) or\n isinstance(obj, pd.Series)):\n if np.any(pd.isnull(obj)):\n raise ValueError('Data object contains NaN values', obj)\n elif np.isscalar(obj):\n if np.isnan(obj):\n raise ValueError('Data object contains NaN values', obj)\n else:\n raise TypeError('Data object can only be scalar or Pandas.')", "def _is_user_class(obj):\n type_dict = type(obj).__dict__\n is_user_class = '_pandas_type' in type_dict\n return is_user_class", "def is_orm_value(obj):\n return isinstance(obj, (sqlalchemy.orm.attributes.InstrumentedAttribute,\n sqlalchemy.sql.expression.ColumnElement))", "def isdatatype(object):\n return isinstance(object, (str, int, bool, float, type(None)))", "def verify_type(self, obj):\n return isinstance(obj, self.type_)", "def is_model_type(obj: Any) -> bool: # pragma: no cover\n pass", "def has_vector_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.vector3", "def check_is_pandas_dataframe(dataframe):\n # type: (Any) -> Optional[bool]\n try:\n from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries\n except ImportError:\n return None\n\n return isinstance(dataframe, (ABCDataFrame, ABCSeries))", "def _validate(self, obj):\n assert (self._confidence in obj.columns and self._predicted in obj.columns\n and self._groundtruth in obj.columns), \\\n \"Must at least have '%s', '%s' and '%s' columns.\" \\\n % (self._confidence, self._predicted, self._groundtruth)\n assert len(obj['groundtruth']) == len(obj['predicted']) == len(obj['confidence']), \\\n \"Dataframe columns are inconsistent \"\n\n if len(obj.index) < 2:\n self._logger.fatal(\"Stored procedure returned empty dataframe\")\n raise RuntimeError(\"Stored procedure returned empty dataframe\")\n\n self._logger.debug(obj.head)", "def is_dataclass_instance(obj: Any) -> bool:\n return dataclasses.is_dataclass(obj) and not isinstance(obj, type)", "def is_protected_type(obj):\r\n return isinstance(obj, (\r\n types.NoneType,\r\n int, long,\r\n datetime.datetime, datetime.date, datetime.time,\r\n float, Decimal)\r\n )", "def is_protected_type(obj):\r\n return isinstance(obj, (\r\n types.NoneType,\r\n int, long,\r\n datetime.datetime, datetime.date, datetime.time,\r\n float, Decimal)\r\n )", "def applies(cls, obj):\n return type(obj) in cls.types", "def check_data_type_column_data(X):\n if type(X) is not numpy.ndarray:\n raise TypeError(\"X should be type numpy.ndarray\")\n\n if len(X.shape) == 2 and X.shape[1] > 1:\n raise TypeError(\"X should have a single column.\")", "def is_appropriate_data_instance(self, df) -> bool:\n return isinstance(df, pl.DataFrame) or isinstance(df, pl.LazyFrame)", "def has_datetime_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.datetime64", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def check_type(df: pd.DataFrame, input_output=\"\") -> Tuple[bool, str]:\n\n error_string = (\n \"should be DataFrame: The input should be a Pandas DataFrame\"\n \" representing a matrix, where every cell is one entry of the matrix.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n if not isinstance(df, pd.DataFrame):\n return False, error_string\n else:\n return True, \"\"", "def has_numeric_type(obj: _std_typing.Any) -> bool:\n return (not has_vector_type(obj)) and (not has_string_type(obj))", "def check_is_dataframe(log):\r\n if pkgutil.find_loader(\"pandas\"):\r\n import pandas as pd\r\n return type(log) is pd.DataFrame\r\n return False", "def _is_all_int(df_list: List[Union[dd.DataFrame, pd.DataFrame]], col: str) -> bool:\n for df in df_list:\n if col in df.columns:\n srs = df[col]\n if isinstance(srs, (dd.DataFrame, pd.DataFrame)):\n for dtype in srs.dtypes:\n if not is_integer_dtype(dtype):\n return False\n elif isinstance(srs, (dd.Series, pd.Series)):\n if not is_integer_dtype(srs.dtype):\n return False\n else:\n raise ValueError(f\"unprocessed type of data:{type(srs)}\")\n return True", "def is_date_dtype(df, col_name):\n dtype = df.dtypes[col_name]\n return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64)", "def correct_type(data):\n\n if all(data[col].dtypes == data.dtypes[0] for col in data.columns):\n if all(data[col].isnull().sum() == 0 for col in data.columns):\n print('All columns have values of the correct type.')\n else:\n print('Bad result.')", "def _is_dataclass_instance(obj):\n return hasattr(type(obj), '__dataclass_fields__')", "def obj_lister(df):\n obj_list = []\n for col in df.select_dtypes([np.object]):\n obj_list.append(col)\n return obj_list" ]
[ "0.7964477", "0.78527325", "0.70655334", "0.7048928", "0.65591943", "0.6523134", "0.6514152", "0.6495595", "0.63864714", "0.6190344", "0.6165299", "0.6131327", "0.61073923", "0.6092305", "0.6044205", "0.60420626", "0.60420626", "0.6040725", "0.60335004", "0.59948176", "0.5915746", "0.58881354", "0.5886296", "0.5881742", "0.5854421", "0.58530384", "0.5852378", "0.58508587", "0.58483636", "0.58234453" ]
0.8504088
0
This function checks if the given `obj` can be used to create a column or not.
def can_convert_to_column(obj): return is_column_like(obj) or cudf.api.types.is_list_like(obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_column_like(obj):\n return (\n isinstance(\n obj,\n (\n cudf.core.column.ColumnBase,\n cudf.Series,\n cudf.Index,\n pd.Series,\n pd.Index,\n ),\n )\n or (\n hasattr(obj, \"__cuda_array_interface__\")\n and len(obj.__cuda_array_interface__[\"shape\"]) == 1\n )\n or (\n hasattr(obj, \"__array_interface__\")\n and len(obj.__array_interface__[\"shape\"]) == 1\n )\n )", "def predicate(obj):\n return inspect.isclass(obj) and issubclass(obj, MafColumnRecord)", "def _is_object_type(df, field):\n return df[field].dtype.name == 'object'", "def _is_sqlalchemy_object(obj):\n # TODO: better way?\n return hasattr(obj, \"_sa_instance_state\")", "def is_orm_value(obj):\n return isinstance(obj, (sqlalchemy.orm.attributes.InstrumentedAttribute,\n sqlalchemy.sql.expression.ColumnElement))", "def _validate(self, obj):\n assert (self._confidence in obj.columns and self._predicted in obj.columns\n and self._groundtruth in obj.columns), \\\n \"Must at least have '%s', '%s' and '%s' columns.\" \\\n % (self._confidence, self._predicted, self._groundtruth)\n assert len(obj['groundtruth']) == len(obj['predicted']) == len(obj['confidence']), \\\n \"Dataframe columns are inconsistent \"\n\n if len(obj.index) < 2:\n self._logger.fatal(\"Stored procedure returned empty dataframe\")\n raise RuntimeError(\"Stored procedure returned empty dataframe\")\n\n self._logger.debug(obj.head)", "def test_unsupported_object(self):\n\n self.assertRaises(\n (TypeError, ValueError), self.table.where, '[]'\n )\n self.assertRaises(TypeError, self.table.where, 'obj', {'obj': {}})\n self.assertRaises(\n (TypeError, ValueError), self.table.where, 'c_bool < []'\n )", "def is_rule(self, obj):\n if '_rule_type' in obj and obj['_rule_type'] in self.sql_generators:\n return True\n return False", "def is_model_type(obj: Any) -> bool: # pragma: no cover\n pass", "def verify_type(self, obj):\n return isinstance(obj, self.type_)", "def verify(self):\n for col in self.columns:\n if col not in self.table_obj.columns.keys():\n raise Exception('{} column not found in {}'.format(\n col, self.table_obj))", "def _is_dataclass_instance(obj):\n return hasattr(type(obj), '__dataclass_fields__')", "def applies(cls, obj):\n return type(obj) in cls.types", "def has_required(self, obj: T) -> bool:\n if (ufp_required_field := self.ufp_required_field) is None:\n return True\n if TYPE_CHECKING:\n # `ufp_required_field` is defined as a `str` in the dataclass, but\n # `__post_init__` converts it to a `tuple[str, ...]` to avoid\n # doing it at run time in `get_nested_attr` which is usually called\n # millions of times per day. This tells mypy that it's a tuple.\n assert isinstance(ufp_required_field, tuple)\n return bool(get_nested_attr(obj, ufp_required_field))", "def _valid_typable_object(ui_object, platform=Platform.ANDROID):\n if platform == Platform.ANDROID:\n return ui_object.obj_type in _TYPABLE_OBJECT_DESC.keys()\n else:\n assert False, 'Wrong Platform'", "def exists(self, obj):\n return False", "def __contains__(self, obj):\n if isinstance(obj, self):\n query = self.where(**obj.data).select()\n result = query.execute()\n if result.count:\n return True\n return False", "def is_object(space, w_obj):\n return space.wrap(space.is_object(w_obj))", "def callable(obj): # pylint: disable=redefined-builtin\n return bool(PyCallable_Check(py_object(obj)))", "def null_checker(obj):\n if (isinstance(obj, pd.DataFrame) or\n isinstance(obj, pd.Series)):\n if np.any(pd.isnull(obj)):\n raise ValueError('Data object contains NaN values', obj)\n elif np.isscalar(obj):\n if np.isnan(obj):\n raise ValueError('Data object contains NaN values', obj)\n else:\n raise TypeError('Data object can only be scalar or Pandas.')", "def determine_obj(self, obj):\n if type(obj) is Ohm:\n self._ohm_exists = self._ohm_exists ^ True\n if type(obj) is Amp:\n self._amp_exists = self._amp_exists ^ True\n if type(obj) is Volt:\n self._volt_exists = self._volt_exists ^ True\n if type(obj) is Power:\n self._power_exists = self._power_exists ^ True", "def objExists(*args, **kwargs)->bool:\n pass", "def has_object_permission(self, request, view, obj):\n if request.user.is_superuser:\n return True\n if request.user.profile.role == UserRole.CLIENT and obj.owner != request.user:\n return False\n if request.user.profile.role == UserRole.EXECUTOR and obj.executor != request.user:\n return False\n return True", "def _valid_typable_object_with_name(ui_object, platform=Platform.ANDROID):\n if platform == Platform.ANDROID:\n return (ui_object.obj_type in _TYPABLE_OBJECT_DESC.keys() and\n _valid_object_with_name(ui_object))\n else:\n assert False, 'Wrong Platform'", "def valid_for(obj):\n\n if not obj.filedata:\n return False\n\n #hexstring = \"cffaedfe07000001030000800200\"\n return True", "def is_command(schema_obj):\n\n return isinstance(schema_obj, schema.Command)", "def healthy_test(obj: np.ndarray) -> bool:\n nb_rows, nb_cols = obj.shape\n return nb_rows == nb_cols > 1 and np.array_equal(obj, colony(nb_rows))", "def _column_exists(self, tbname, colname):\n self._check_file(tbname)\n tb = tbtool()\n tb.open(tbname)\n cols = tb.colnames()\n tb.close()\n return (colname in cols)", "def has_object_permission(self,request,view,obj):\n\n\t\tif request.method in permissions.SAFE_METHODS:\n\t\t\treturn True\n\n\t\treturn obj.id == request.user.id", "def _check_columns_with_table(table: Table, columns: Sequence[str]) -> Optional[bool]:\n for column in columns:\n if column not in table.c.keys():\n raise TypeError(f\"Specified column {column} did not exist on table {table}\")\n return True" ]
[ "0.7094436", "0.6623961", "0.6605608", "0.6200926", "0.60841525", "0.60360515", "0.59904623", "0.589983", "0.57284397", "0.5697302", "0.5693625", "0.56490564", "0.56466", "0.5643506", "0.56173486", "0.56114477", "0.5589786", "0.5583199", "0.5549619", "0.5520467", "0.5517214", "0.55030394", "0.55029976", "0.5497607", "0.5466878", "0.54609525", "0.54538405", "0.5452232", "0.544591", "0.5435402" ]
0.7146523
0
Return the smallest signed integer dtype that can represent the integer ``x``
def min_signed_type(x, min_size=8): for int_dtype in np.sctypes["int"]: if (cudf.dtype(int_dtype).itemsize * 8) >= min_size: if np.iinfo(int_dtype).min <= x <= np.iinfo(int_dtype).max: return int_dtype # resort to using `int64` and let numpy raise appropriate exception: return np.int64(x).dtype
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min_unsigned_type(x, min_size=8):\n for int_dtype in np.sctypes[\"uint\"]:\n if (cudf.dtype(int_dtype).itemsize * 8) >= min_size:\n if 0 <= x <= np.iinfo(int_dtype).max:\n return int_dtype\n # resort to using `uint64` and let numpy raise appropriate exception:\n return np.uint64(x).dtype", "def minimal_dtype(val: int):\n if val < 250:\n return np.uint8\n return np.uint16 if val < 2**16 - 5 else np.uint32", "def get_smallest_dtype(arr: ndarray, uint: bool = True) -> int:\n possible_dtypes = (2**x for x in range(3, 8))\n max_number = numpy.amax(arr)\n if not uint:\n max_number = max_number * 2\n if max_number == 0:\n max_number = 1\n return next(dtype for dtype in possible_dtypes if dtype > math.log(max_number, 2))", "def min(x, axis=None, keepdims=False):\r\n x = as_tensor_variable(x)\r\n str_x_type = str(x.dtype)\r\n if str_x_type.startswith('float') or str_x_type in int_dtypes:\r\n return -max(-x, axis=axis, keepdims=keepdims)\r\n else:\r\n # Be careful about unsigned integers, complex\r\n raise NotImplementedError()", "def min_value(dtype):\n return _api_internal._min_value(dtype)", "def as_int(x, check=True):\n x = np.array(x)\n if x.dtype.kind in 'iu':\n # This works around a nasty numpy 1.4.1 bug such that:\n # >>> int(np.uint32(2**32-1)\n # -1\n return int(str(x))\n ix = int(x)\n if ix == x:\n return ix\n fx = np.floor(x)\n if check and fx != x:\n raise FloatingError('Not an integer: %s' % x)\n if not fx.dtype.type == np.longdouble:\n return int(x)\n # Subtract float64 chunks until we have all of the number. If the int is\n # too large, it will overflow\n ret = 0\n while fx != 0:\n f64 = np.float64(fx)\n fx -= f64\n ret += int(f64)\n return ret", "def smallest_int_type_for_range(minimum, maximum):\n signed = minimum < 0\n abs_max = max(maximum, abs(minimum))\n if signed:\n if abs_max < 1 << 7:\n return numpy.int8\n elif abs_max < 1 << 15:\n return numpy.int16\n elif abs_max < 1 << 31:\n return numpy.int32\n else:\n if abs_max < 1 << 8:\n return numpy.uint8\n elif abs_max < 1 << 16:\n return numpy.uint16\n elif abs_max < 1 << 32:\n return numpy.uint32\n # Return default integer type (other than in the exceptional case that the\n # value is too big to store in a signed 64-bit int)\n if not signed and abs_max > 1 << 63:\n return numpy.uint64\n else:\n return numpy.int64", "def min(self):\n if self.kind == 'u':\n return 0\n else:\n try:\n val = iinfo._min_vals[self.key]\n except KeyError:\n val = int(-(1 << (self.bits-1)))\n iinfo._min_vals[self.key] = val\n return val", "def min(self):\n if self.kind == 'u':\n return 0\n else:\n try:\n val = iinfo._min_vals[self.key]\n except KeyError:\n val = int(-(1 << (self.bits-1)))\n iinfo._min_vals[self.key] = val\n return val", "def get_lowest_one_mask(x):\n return x & (~(x-1))", "def numpy_most_compact_int_dtype(arr):\n if np.any(arr < 0):\n dtypes = int_dtypes\n else:\n dtypes = uint_dtypes\n\n arr_max = arr.max() ## FIXME: max ABS value\n for t, ii in dtypes:\n if arr_max <= ii.max:\n return arr.astype(t)\n\n raise ValueError(\"Unable to find a suitable datatype\")", "def get_lowest_set_bit(x):\n\n return x & -x", "def _safely_castable_to_int(dt):\n int_size = np.dtype(int).itemsize\n safe = (np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or (\n np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size\n )\n return safe", "def able_int_type(values):\n if any([v % 1 for v in values]):\n return None\n mn = min(values)\n mx = max(values)\n if mn >= 0:\n for ityp in np.sctypes['uint']:\n if mx <= np.iinfo(ityp).max:\n return ityp\n for ityp in np.sctypes['int']:\n info = np.iinfo(ityp)\n if mn >= info.min and mx <= info.max:\n return ityp\n return None", "def min_column_type(x, expected_type):\n\n if not isinstance(x, cudf.core.column.NumericalColumn):\n raise TypeError(\"Argument x must be of type column.NumericalColumn\")\n if x.valid_count == 0:\n return x.dtype\n\n if np.issubdtype(x.dtype, np.floating):\n return get_min_float_dtype(x)\n\n elif np.issubdtype(expected_type, np.integer):\n max_bound_dtype = np.min_scalar_type(x.max())\n min_bound_dtype = np.min_scalar_type(x.min())\n result_type = np.promote_types(max_bound_dtype, min_bound_dtype)\n else:\n result_type = x.dtype\n\n return cudf.dtype(result_type)", "def localmin(x):\r\n return (np.diff(np.sign(np.diff(x))) > 0).nonzero()[0] + 1", "def MinimumValue(self):\n datatype = self.NumPyDataType\n if issubclass(datatype, numpy.integer):\n return numpy.iinfo(datatype).min\n elif issubclass(datatype, numpy.floating):\n return -numpy.inf\n else:\n raise TypeError(\"Cannot handle DataType: {0}\".format(datatype))", "def dtype_int(dtype: DType):\n # TODO: Is there a better way of doing this?\n name = list(convert(dtype, NPDType).__name__)\n while name and name[0] not in set([str(i) for i in range(10)]):\n name.pop(0)\n return _convert_back(_name_to_numpy_dtype(\"int\" + \"\".join(name)), dtype)", "def convert_to_smallest_int_type(matrix):\n target_type = smallest_int_type_for_range(matrix.min(), matrix.max())\n if target_type != matrix.dtype:\n matrix = matrix.astype(target_type, copy=False)\n return matrix", "def argmin(x, axis=None, keepdims=False):\r\n x = as_tensor_variable(x)\r\n str_x_type = str(x.dtype)\r\n if str_x_type.startswith('float') or str_x_type in int_dtypes:\r\n return argmax(-x, axis=axis, keepdims=keepdims)\r\n else:\r\n # Be careful about unsigned integers, complex\r\n raise NotImplementedError()", "def localmin(x):\n return (np.diff(np.sign(np.diff(x))) > 0).nonzero()[0] + 1", "def signum ( x ) :\n ### for integers\n from ostap.core.ostap_types import is_integer as _is_integer \n if _is_integer ( x ) : return 0 if 0 == x else +1 if 0<x else -1\n ## for floating numbers\n return 0 if iszero ( x ) else +1 if 0 < x else -1", "def get_lowest_unset_bit(x):\n\n return ~x & (x + 1)", "def float_to_int_64(x):\n return np.float64(x).view(np.int64)", "def _int_overflow(x, msg=None):\n if x > iinfo(dfitpack_int).max:\n if msg is None:\n msg = f'{x!r} cannot fit into an {dfitpack_int!r}'\n raise OverflowError(msg)\n return dfitpack_int.type(x)", "def to_signed(x, bits):\n if x >= 0:\n return x\n else:\n return int('1{:0{}b}'.format(2**(bits-1)+x, bits-1), 2)", "def min(x: pd.Series, d: int or float) -> pd.Series:\n return ts_min(x, d)", "def float_to_int_32(x):\n return np.float32(x).view(np.int32)", "def tiny(x):\n\n # Make sure we have an array view\n x = np.asarray(x)\n\n # Only floating types generate a tiny\n if np.issubdtype(x.dtype, np.floating) or np.issubdtype(\n x.dtype, np.complexfloating\n ):\n dtype = x.dtype\n else:\n dtype = np.float32\n\n return np.finfo(dtype).tiny", "def floor_and_clip_to_unsigned_int(x,\n *,\n prec,\n dtype,\n half_shift):\n assert not half_shift\n x = floor_with_gradient(x)\n # TODO(lew): should be (a_max=2**prec - epsilon) for a better gradient.\n x = jnp.clip(x, a_min=0, a_max=2**prec - 1).astype(dtype)\n return x" ]
[ "0.76024413", "0.668726", "0.65399", "0.64511424", "0.6399774", "0.6128072", "0.6065688", "0.60427266", "0.60427266", "0.60032904", "0.60012406", "0.5924025", "0.5894979", "0.5814804", "0.5789021", "0.57721806", "0.57598513", "0.57583123", "0.5745024", "0.57367784", "0.57268727", "0.5723271", "0.568113", "0.5644811", "0.5545144", "0.5530499", "0.5514461", "0.54975176", "0.54863703", "0.54724556" ]
0.80808735
0
Return the smallest unsigned integer dtype that can represent the integer ``x``
def min_unsigned_type(x, min_size=8): for int_dtype in np.sctypes["uint"]: if (cudf.dtype(int_dtype).itemsize * 8) >= min_size: if 0 <= x <= np.iinfo(int_dtype).max: return int_dtype # resort to using `uint64` and let numpy raise appropriate exception: return np.uint64(x).dtype
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min_signed_type(x, min_size=8):\n for int_dtype in np.sctypes[\"int\"]:\n if (cudf.dtype(int_dtype).itemsize * 8) >= min_size:\n if np.iinfo(int_dtype).min <= x <= np.iinfo(int_dtype).max:\n return int_dtype\n # resort to using `int64` and let numpy raise appropriate exception:\n return np.int64(x).dtype", "def minimal_dtype(val: int):\n if val < 250:\n return np.uint8\n return np.uint16 if val < 2**16 - 5 else np.uint32", "def get_smallest_dtype(arr: ndarray, uint: bool = True) -> int:\n possible_dtypes = (2**x for x in range(3, 8))\n max_number = numpy.amax(arr)\n if not uint:\n max_number = max_number * 2\n if max_number == 0:\n max_number = 1\n return next(dtype for dtype in possible_dtypes if dtype > math.log(max_number, 2))", "def numpy_most_compact_int_dtype(arr):\n if np.any(arr < 0):\n dtypes = int_dtypes\n else:\n dtypes = uint_dtypes\n\n arr_max = arr.max() ## FIXME: max ABS value\n for t, ii in dtypes:\n if arr_max <= ii.max:\n return arr.astype(t)\n\n raise ValueError(\"Unable to find a suitable datatype\")", "def get_lowest_unset_bit(x):\n\n return ~x & (x + 1)", "def min(x, axis=None, keepdims=False):\r\n x = as_tensor_variable(x)\r\n str_x_type = str(x.dtype)\r\n if str_x_type.startswith('float') or str_x_type in int_dtypes:\r\n return -max(-x, axis=axis, keepdims=keepdims)\r\n else:\r\n # Be careful about unsigned integers, complex\r\n raise NotImplementedError()", "def get_lowest_set_bit(x):\n\n return x & -x", "def tiny(x):\n\n # Make sure we have an array view\n x = np.asarray(x)\n\n # Only floating types generate a tiny\n if np.issubdtype(x.dtype, np.floating) or np.issubdtype(\n x.dtype, np.complexfloating\n ):\n dtype = x.dtype\n else:\n dtype = np.float32\n\n return np.finfo(dtype).tiny", "def min_value(dtype):\n return _api_internal._min_value(dtype)", "def floor_and_clip_to_unsigned_int(x,\n *,\n prec,\n dtype,\n half_shift):\n assert not half_shift\n x = floor_with_gradient(x)\n # TODO(lew): should be (a_max=2**prec - epsilon) for a better gradient.\n x = jnp.clip(x, a_min=0, a_max=2**prec - 1).astype(dtype)\n return x", "def min_column_type(x, expected_type):\n\n if not isinstance(x, cudf.core.column.NumericalColumn):\n raise TypeError(\"Argument x must be of type column.NumericalColumn\")\n if x.valid_count == 0:\n return x.dtype\n\n if np.issubdtype(x.dtype, np.floating):\n return get_min_float_dtype(x)\n\n elif np.issubdtype(expected_type, np.integer):\n max_bound_dtype = np.min_scalar_type(x.max())\n min_bound_dtype = np.min_scalar_type(x.min())\n result_type = np.promote_types(max_bound_dtype, min_bound_dtype)\n else:\n result_type = x.dtype\n\n return cudf.dtype(result_type)", "def min(self):\n if self.kind == 'u':\n return 0\n else:\n try:\n val = iinfo._min_vals[self.key]\n except KeyError:\n val = int(-(1 << (self.bits-1)))\n iinfo._min_vals[self.key] = val\n return val", "def min(self):\n if self.kind == 'u':\n return 0\n else:\n try:\n val = iinfo._min_vals[self.key]\n except KeyError:\n val = int(-(1 << (self.bits-1)))\n iinfo._min_vals[self.key] = val\n return val", "def _safely_castable_to_int(dt):\n int_size = np.dtype(int).itemsize\n safe = (np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or (\n np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size\n )\n return safe", "def _flexible_bincount(x: Tensor) ->Tensor:\n x = x - x.min()\n unique_x = torch.unique(x)\n output = _bincount(x, minlength=torch.max(unique_x) + 1)\n return output[unique_x]", "def get_lowest_one_mask(x):\n return x & (~(x-1))", "def able_int_type(values):\n if any([v % 1 for v in values]):\n return None\n mn = min(values)\n mx = max(values)\n if mn >= 0:\n for ityp in np.sctypes['uint']:\n if mx <= np.iinfo(ityp).max:\n return ityp\n for ityp in np.sctypes['int']:\n info = np.iinfo(ityp)\n if mn >= info.min and mx <= info.max:\n return ityp\n return None", "def as_int(x, check=True):\n x = np.array(x)\n if x.dtype.kind in 'iu':\n # This works around a nasty numpy 1.4.1 bug such that:\n # >>> int(np.uint32(2**32-1)\n # -1\n return int(str(x))\n ix = int(x)\n if ix == x:\n return ix\n fx = np.floor(x)\n if check and fx != x:\n raise FloatingError('Not an integer: %s' % x)\n if not fx.dtype.type == np.longdouble:\n return int(x)\n # Subtract float64 chunks until we have all of the number. If the int is\n # too large, it will overflow\n ret = 0\n while fx != 0:\n f64 = np.float64(fx)\n fx -= f64\n ret += int(f64)\n return ret", "def dtype_int(dtype: DType):\n # TODO: Is there a better way of doing this?\n name = list(convert(dtype, NPDType).__name__)\n while name and name[0] not in set([str(i) for i in range(10)]):\n name.pop(0)\n return _convert_back(_name_to_numpy_dtype(\"int\" + \"\".join(name)), dtype)", "def smallest_int_type_for_range(minimum, maximum):\n signed = minimum < 0\n abs_max = max(maximum, abs(minimum))\n if signed:\n if abs_max < 1 << 7:\n return numpy.int8\n elif abs_max < 1 << 15:\n return numpy.int16\n elif abs_max < 1 << 31:\n return numpy.int32\n else:\n if abs_max < 1 << 8:\n return numpy.uint8\n elif abs_max < 1 << 16:\n return numpy.uint16\n elif abs_max < 1 << 32:\n return numpy.uint32\n # Return default integer type (other than in the exceptional case that the\n # value is too big to store in a signed 64-bit int)\n if not signed and abs_max > 1 << 63:\n return numpy.uint64\n else:\n return numpy.int64", "def drop_lowest_set_bit(x):\n\n return x & (x - 1)", "def cat2int(x):\n s = set(x)\n v2int = {}\n for i, v in enumerate(unique):\n v2int[v] = i\n assert len(s) >= 2, 'Kind of values is smaller than 2'\n return np.array([v2int[e] for e in x], dtype=np.int32), v2int", "def _rank(x):\n shift = int64(63)\n mask = int64((1 << 63) - 1)\n i64 = x.view(int64)\n value = i64 >> shift\n value &= mask\n value ^= i64\n return value", "def float_to_int_64(x):\n return np.float64(x).view(np.int64)", "def _int_overflow(x, msg=None):\n if x > iinfo(dfitpack_int).max:\n if msg is None:\n msg = f'{x!r} cannot fit into an {dfitpack_int!r}'\n raise OverflowError(msg)\n return dfitpack_int.type(x)", "def theano_type(x):\r\n if type(x) is int:\r\n return tensor.lscalar\r\n else:\r\n raise NotImplementedError()", "def _rankf(x):\n shift = int32(31)\n mask = int32((1 << 31) - 1)\n i32 = x.view(int32)\n value = i32 >> shift\n value &= mask\n value ^= i32\n return value", "def parse_number_auto_dtype(x):\n\n value = float(x)\n\n if value.is_integer():\n value = int(value)\n\n return value", "def drop_lowest_unset_bit(x):\n\n return x | (x + 1)", "def _maybe_convert_to_default_type(dtype):\n if cudf.get_option(\"default_integer_bitwidth\"):\n if cudf.api.types.is_signed_integer_dtype(dtype):\n return cudf.dtype(\n f'i{cudf.get_option(\"default_integer_bitwidth\")//8}'\n )\n elif cudf.api.types.is_unsigned_integer_dtype(dtype):\n return cudf.dtype(\n f'u{cudf.get_option(\"default_integer_bitwidth\")//8}'\n )\n if cudf.get_option(\n \"default_float_bitwidth\"\n ) and cudf.api.types.is_float_dtype(dtype):\n return cudf.dtype(f'f{cudf.get_option(\"default_float_bitwidth\")//8}')\n\n return dtype" ]
[ "0.73269904", "0.70547926", "0.66033536", "0.59406954", "0.58949196", "0.58196664", "0.5806322", "0.57088995", "0.5687075", "0.5664583", "0.5655909", "0.564999", "0.564999", "0.56453305", "0.5612093", "0.5579257", "0.5549137", "0.55284584", "0.54975426", "0.5454876", "0.5427949", "0.5423741", "0.5411688", "0.5389739", "0.5374773", "0.53651965", "0.53375435", "0.52746135", "0.5260689", "0.52351" ]
0.81060517
0
Return the smallest dtype which can represent all elements of the `NumericalColumn` `x` If the column is not a subtype of `np.signedinteger` or `np.floating` returns the same dtype as the dtype of `x` without modification
def min_column_type(x, expected_type): if not isinstance(x, cudf.core.column.NumericalColumn): raise TypeError("Argument x must be of type column.NumericalColumn") if x.valid_count == 0: return x.dtype if np.issubdtype(x.dtype, np.floating): return get_min_float_dtype(x) elif np.issubdtype(expected_type, np.integer): max_bound_dtype = np.min_scalar_type(x.max()) min_bound_dtype = np.min_scalar_type(x.min()) result_type = np.promote_types(max_bound_dtype, min_bound_dtype) else: result_type = x.dtype return cudf.dtype(result_type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min_signed_type(x, min_size=8):\n for int_dtype in np.sctypes[\"int\"]:\n if (cudf.dtype(int_dtype).itemsize * 8) >= min_size:\n if np.iinfo(int_dtype).min <= x <= np.iinfo(int_dtype).max:\n return int_dtype\n # resort to using `int64` and let numpy raise appropriate exception:\n return np.int64(x).dtype", "def get_smallest_dtype(arr: ndarray, uint: bool = True) -> int:\n possible_dtypes = (2**x for x in range(3, 8))\n max_number = numpy.amax(arr)\n if not uint:\n max_number = max_number * 2\n if max_number == 0:\n max_number = 1\n return next(dtype for dtype in possible_dtypes if dtype > math.log(max_number, 2))", "def min_unsigned_type(x, min_size=8):\n for int_dtype in np.sctypes[\"uint\"]:\n if (cudf.dtype(int_dtype).itemsize * 8) >= min_size:\n if 0 <= x <= np.iinfo(int_dtype).max:\n return int_dtype\n # resort to using `uint64` and let numpy raise appropriate exception:\n return np.uint64(x).dtype", "def get_data_type(col_val):\n dtype = \"\"\n\n original_col_val = col_val\n digits_only = col_val.replace('-', '',1).replace(',', '', -1).replace(\".\", \"\")\n if digits_only.isdigit():\n try:\n int(original_col_val)\n dtype = TYPE_INT\n except ValueError:\n dtype = TYPE_FLOAT\n \n return dtype", "def get_dtype(col):\n dtype = col.dtype\n\n if isinstance(dtype, CategoricalDtype):\n col = col.astype(type(col.values[0]))\n out = get_dtype(col)\n elif np.issubdtype(dtype, np.floating):\n out = 'float32'\n elif np.issubdtype(dtype, np.integer):\n if col.max() < 32767:\n out = 'int16'\n else:\n out = 'int32'\n elif np.issubdtype(dtype, np.object_):\n size = int(col.astype(str).str.len().max())\n out = 'S{:}'.format(size)\n else:\n out = dtype\n\n return out", "def _infer_pa_column_type(self, column: pa.lib.ChunkedArray):\n # Validates the column to ensure that value types are consistent\n column.validate()\n return pa_to_feast_value_type(column)", "def inspect_dtype_object(self, column: str) -> str:\n\n series = self.df[column].dropna()\n\n # check for bool\n try:\n conv = pd.to_numeric(series)\n return self.inspect_dtype(conv)\n except ValueError:\n pass\n\n # check for mixed dtypes\n dtypes = {type(x) for x in series}\n if len(dtypes) > 1:\n raise TypeError(\"Column `{}` has mixed dtypes: {}. Currently, \"\n \"this is not supported.\"\n .format(column, dtypes))\n\n # check for string\n if isinstance(series[0], str):\n return \"str\"\n\n # raise if unsupported dtype is encountered\n raise TypeError(\"Column `{}` has dtype `{}` which is currently \"\n \"not supported.\"\n .format(column, type(series[0])))", "def get_column_type(\n self,\n table: exp.Table | str,\n column: exp.Column,\n dialect: DialectType = None,\n normalize: t.Optional[bool] = None,\n ) -> exp.DataType:", "def minimal_dtype(val: int):\n if val < 250:\n return np.uint8\n return np.uint16 if val < 2**16 - 5 else np.uint32", "def numpy_most_compact_int_dtype(arr):\n if np.any(arr < 0):\n dtypes = int_dtypes\n else:\n dtypes = uint_dtypes\n\n arr_max = arr.max() ## FIXME: max ABS value\n for t, ii in dtypes:\n if arr_max <= ii.max:\n return arr.astype(t)\n\n raise ValueError(\"Unable to find a suitable datatype\")", "def min_value(dtype):\n return _api_internal._min_value(dtype)", "def parse_number_auto_dtype(x):\n\n value = float(x)\n\n if value.is_integer():\n value = int(value)\n\n return value", "def dtype(a):\n return a.dtype", "def get_column_type(cls, **kwargs: Any) -> Any:\n return sqlalchemy.SmallInteger()", "def identify_numeric_columns(dataset):\n return identify_columns_by_type(dataset, include=['int64', 'float64'])", "def convert_to_smallest_int_type(matrix):\n target_type = smallest_int_type_for_range(matrix.min(), matrix.max())\n if target_type != matrix.dtype:\n matrix = matrix.astype(target_type, copy=False)\n return matrix", "def get_column_type(cls, **kwargs: Any) -> Any:\n return sqlalchemy.BigInteger()", "def simulation_column_dtype(fname):\n data_types = []\n with open(fname, 'r') as f:\n for raw_line in f:\n line = tuple(s for s in raw_line.strip().split())\n data_types.append(line)\n\n return np.dtype(data_types)", "def get_data_type(self, col):\n if ((self.data_df[col].dtype == np.int64) or (self.data_df[col].dtype == np.int32)):\n return 'int'\n elif ((self.data_df[col].dtype == np.float64) or (self.data_df[col].dtype == np.float32)):\n return 'float'\n else:\n raise ValueError(\"Unknown data type of feature %s: must be int or float\" % col)", "def to_dtype(x, dtype):\n return x.type(dtype)", "def get_numeric(X):\n return X.dtypes[X.dtypes.apply(lambda x: str(x).startswith((\"float\", \"int\", \"bool\")))].index.tolist()", "def get_column_type(cls, **kwargs: Any) -> Any:\n return sqlalchemy.Float()", "def infer_dtype(self):\n raise NotImplementedError", "def get_floating_dtype(A):\n dtype = A.dtype\n if dtype in (torch.float16, torch.float32, torch.float64):\n return dtype\n return torch.float32", "def MinimumValue(self):\n datatype = self.NumPyDataType\n if issubclass(datatype, numpy.integer):\n return numpy.iinfo(datatype).min\n elif issubclass(datatype, numpy.floating):\n return -numpy.inf\n else:\n raise TypeError(\"Cannot handle DataType: {0}\".format(datatype))", "def get_field_dtype(self, field=None):\n\n if field in self._fields_dtypes:\n return self._fields_dtypes[field]\n\n # initialize dbtypes for all fields\n field_type = pd.read_sql(\n 'select distinct column_name, type '\n 'from fields',\n self._get_db_engine())\n\n for row in field_type.itertuples():\n self._fields_dtypes[row.column_name] = row.type\n\n return self._fields_dtypes[field] if field in self._fields_dtypes else None", "def numerical(df):\r\n numerical_var=df.select_dtypes(include =['float64','int64']).columns.tolist()\r\n return numerical_var", "def _maybe_convert_to_default_type(dtype):\n if cudf.get_option(\"default_integer_bitwidth\"):\n if cudf.api.types.is_signed_integer_dtype(dtype):\n return cudf.dtype(\n f'i{cudf.get_option(\"default_integer_bitwidth\")//8}'\n )\n elif cudf.api.types.is_unsigned_integer_dtype(dtype):\n return cudf.dtype(\n f'u{cudf.get_option(\"default_integer_bitwidth\")//8}'\n )\n if cudf.get_option(\n \"default_float_bitwidth\"\n ) and cudf.api.types.is_float_dtype(dtype):\n return cudf.dtype(f'f{cudf.get_option(\"default_float_bitwidth\")//8}')\n\n return dtype", "def get_col_dtype(col):\n if col.dtype == \"object\":\n try:\n col_new = pd.to_datetime(col.dropna().unique())\n return ['timestamp', 'datetime']\n except:\n return [\"text\", 'string']\n elif col.dtype == 'float64':\n return ['float', 'float64']\n elif col.dtype == 'int64':\n return ['int', 'int64']\n elif col.dtype == 'datetime64[ns]':\n return ['timestamp', 'datetime']\n else:\n return ['text', 'string']", "def __get_default_dtype(val_type):\n\n if val_type == int:\n dtype = q_consts.int64\n elif val_type == float:\n dtype = q_consts.float64\n else:\n raise Exception(\"input element type %s is not supported\" % val_type)\n return dtype" ]
[ "0.6888512", "0.67179626", "0.6486889", "0.64012885", "0.6289411", "0.6284615", "0.62319744", "0.6221949", "0.614388", "0.6117546", "0.60550815", "0.6052811", "0.60378283", "0.6026932", "0.595357", "0.589386", "0.5893674", "0.5880051", "0.5877823", "0.5874977", "0.58729416", "0.58687156", "0.5838525", "0.57908225", "0.57876784", "0.5780315", "0.5774758", "0.5752214", "0.5743747", "0.5675602" ]
0.7677199
0
Scale a Decimal such that the result is the integer that would result from removing the decimal point. Examples >>> _decimal_to_int64(Decimal('1.42')) 142 >>> _decimal_to_int64(Decimal('0.0042')) 42 >>> _decimal_to_int64(Decimal('1.004201')) 1004201
def _decimal_to_int64(decimal: Decimal) -> int: return int(f"{decimal:0f}".replace(".", ""))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_integer_price(decimal_price):\n return int(float(decimal_price) * 100)", "def decimal_to_digits(decimal, min_digits=None):\n digits = abs(int(np.log10(decimal)))\n if min_digits is not None:\n digits = np.clip(digits, min_digits, 20)\n return digits", "def float_to_int_64(x):\n return np.float64(x).view(np.int64)", "def normalize_decimal(d):\n return d.quantize(Decimal(1)) if d == d.to_integral() else d.normalize()", "def decimal_precision_from_scientific_notation(decimal_value: Decimal) -> int:\n return strict_integer_validator(\n None,\n -decimal_value.log10()\n )", "def round_int(dec):\n\n return int(decimal.Decimal(dec).quantize(decimal.Decimal('0'), decimal.ROUND_HALF_UP))", "def round_decimal(x, digits=0):\n x = decimal.Decimal(str(x))\n if digits == 0:\n return int(x.quantize(decimal.Decimal(\"1\"), rounding='ROUND_HALF_UP'))\n if digits > 1:\n string = '1e' + str(-1 * digits)\n else:\n string = '1e' + str(-1 * digits)\n return float(x.quantize(decimal.Decimal(string), rounding='ROUND_HALF_UP'))", "def _convert_int_to_i64(val):\n if val > 0x7FFFFFFFFFFFFFFF:\n val -= 0x10000000000000000\n return val", "def _float2int(x: float) -> int:\n return round(x * 100)", "def intify(x):\n return int(x) if almost_equal(x, round(x)) else x", "def float_to_int(x, prec=64):\n if prec == 16: return float_to_int_16(x)\n elif prec == 32: return float_to_int_32(x)\n elif prec == 64: return float_to_int_64(x)\n else: raise ValueError", "def toint(number):\n if isinstance(number, float):\n if number > 1:\n number = round(number, 0)\n else:\n # The following solves when image has small dimensions (like 1x54)\n # then scale factor 1 * 0.296296 and `number` will store `0`\n # that will later raise ZeroDivisionError.\n number = round(math.ceil(number), 0)\n return int(number)", "def decimal_ceil(x):\n int_x = int(x)\n if x - int_x == 0:\n return int_x\n return int_x + 1", "def distance_to_int(mi):\n return round(change_precision(mi, 2))", "def base2int(self, float_number):\r\n return int(round(float_number * self.mult_base))", "def _scale(x):\n scaleFactor = 1\n _ret = int(x/scaleFactor)\n return _ret", "def convert_decimal(value: t.Any) -> Decimal:\n return Decimal(value)", "def round_down(x):\n return int(math.floor(x / 10.0)) * 10", "def INT(val):\n return math.floor(val)", "def scaled_float(int_val, scale):\n assert isinstance(int_val, int)\n unscaled = decimal.Decimal(int_val)\n scaled = unscaled.scaleb(-scale)\n float_val = float(scaled)\n return float_val", "def round_down(amount: Decimal) -> Decimal:\n return Decimal(amount.quantize(Decimal('.01'), rounding=ROUND_FLOOR))", "def decimal(inte, c1):\n\n\tvector = list(str(inte))\n\tvec = []\n\tfor n in range(len(vector)):\n\t\tvector[n] = int(vector[n])\n\n\treturn dec(vector,c1)", "def lat_to_int(lat):\n lat = int((Decimal(lat) * 10000000).quantize(Decimal('1'), rounding=ROUND_HALF_UP))\n return min(900000000, max(-900000000, lat))", "def disc(x):\n return int(round(x))", "def _calculate_precision(interval_value: int) -> int:\n # log10(interval_value) + 1 is equivalent to len(str(interval_value)), but is significantly\n # faster and more memory-efficient\n if interval_value == 0:\n return 0\n if interval_value < 0:\n raise ValueError(\n f\"Expecting value to be a non-negative integer, got {interval_value}\"\n )\n return int(math.log10(interval_value)) + 1", "def _real_to_int(d):\n\n if d < 0:\n sign = 0x8000000000000000\n else:\n sign = 0\n\n exponent = log(d, 16)\n if (exponent < 0):\n exponent = ceil(exponent)\n else: # exponent > 0\n exponent = floor(exponent) + 1\n d = d / (16 ** exponent)\n\n mantissa = getMantissa(d)\n\n return sign | (int(exponent) + 64) << 56 | mantissa #updated for Python2 compatibility\n #return sign | (exponent + 64) << 56 | mantissa", "def round_down(n, decimals=0):\r\n multiplier = 10 ** decimals\r\n return int(n * multiplier) / multiplier", "def round_decimal(cls, value: Dec) -> Dec:\n # This check for numbers which are smaller than the precision allows will\n # be commented out for now as it seems to kill economic activity.\n # if value < Dec('1E-8'):\n # return Dec(0)\n return round(value, cls.currency_precision)", "def round_decimals_down(self, number, decimals: int = 2):\n if not isinstance(decimals, int):\n raise TypeError(\"decimal places must be an integer\")\n elif decimals < 0:\n raise ValueError(\"decimal places has to be 0 or more\")\n elif decimals == 0:\n return math.floor(number)\n\n factor = 10 ** decimals\n return math.floor(number * factor) / factor", "def lon_to_int(lon):\n lon = int((Decimal(lon) * 10000000).quantize(Decimal('1'), rounding=ROUND_HALF_UP))\n return (lon + 1800000000) % 3600000000 - 1800000000" ]
[ "0.6094313", "0.5868377", "0.57986164", "0.5793088", "0.57803726", "0.573587", "0.5701622", "0.55719393", "0.54925525", "0.54124594", "0.5406755", "0.53769886", "0.5276925", "0.52483565", "0.5238759", "0.5227422", "0.5219708", "0.5164578", "0.51548785", "0.50831896", "0.5032707", "0.50281215", "0.49764055", "0.4968833", "0.49365392", "0.49342513", "0.4921151", "0.48923448", "0.48477417", "0.4843297" ]
0.8136278
0
Wrapper over np.find_common_type to handle special cases
def find_common_type(dtypes): if len(dtypes) == 0: return None # Early exit for categoricals since they're not hashable and therefore # can't be put in a set. if any(cudf.api.types.is_categorical_dtype(dtype) for dtype in dtypes): if all( ( cudf.api.types.is_categorical_dtype(dtype) and (not dtype.ordered if hasattr(dtype, "ordered") else True) ) for dtype in dtypes ): if len({dtype._categories.dtype for dtype in dtypes}) == 1: return cudf.CategoricalDtype( cudf.core.column.concat_columns( [dtype._categories for dtype in dtypes] ).unique() ) else: raise ValueError( "Only unordered categories of the same underlying type " "may be coerced to a common type." ) else: # TODO: Should this be an error case (mixing categorical with other # dtypes) or should this return object? Unclear if we have enough # information to decide right now, may have to come back to this as # usage of find_common_type increases. return cudf.dtype("O") # Aggregate same types dtypes = set(dtypes) if any(cudf.api.types.is_decimal_dtype(dtype) for dtype in dtypes): if all( cudf.api.types.is_decimal_dtype(dtype) or cudf.api.types.is_numeric_dtype(dtype) for dtype in dtypes ): return _find_common_type_decimal( [ dtype for dtype in dtypes if cudf.api.types.is_decimal_dtype(dtype) ] ) else: return cudf.dtype("O") if any(cudf.api.types.is_list_dtype(dtype) for dtype in dtypes): if len(dtypes) == 1: return dtypes.get(0) else: # TODO: As list dtypes allow casting # to identical types, improve this logic of returning a # common dtype, for example: # ListDtype(int64) & ListDtype(int32) common # dtype could be ListDtype(int64). raise NotImplementedError( "Finding a common type for `ListDtype` is currently " "not supported" ) if any(cudf.api.types.is_struct_dtype(dtype) for dtype in dtypes): if len(dtypes) == 1: return dtypes.get(0) else: raise NotImplementedError( "Finding a common type for `StructDtype` is currently " "not supported" ) # Corner case 1: # Resort to np.result_type to handle "M" and "m" types separately dt_dtypes = set( filter(lambda t: cudf.api.types.is_datetime_dtype(t), dtypes) ) if len(dt_dtypes) > 0: dtypes = dtypes - dt_dtypes dtypes.add(np.result_type(*dt_dtypes)) td_dtypes = set( filter(lambda t: pd.api.types.is_timedelta64_dtype(t), dtypes) ) if len(td_dtypes) > 0: dtypes = dtypes - td_dtypes dtypes.add(np.result_type(*td_dtypes)) common_dtype = np.find_common_type(list(dtypes), []) if common_dtype == np.dtype("float16"): return cudf.dtype("float32") return cudf.dtype(common_dtype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def common_type(*arrays):\n dtypes = {array.dtype.name for array in arrays}\n has_complex = not _COMPLEX_DTYPES.isdisjoint(dtypes)\n has_double = not _DOUBLE_DTYPES.isdisjoint(dtypes)\n return _DTYPE_MAP[has_complex, has_double]", "def common_type(*arrays):\n arrays = [numpoly.aspolynomial(array) for array in arrays]\n arrays = [array[array.keys[0]] for array in arrays]\n return numpy.common_type(*arrays)", "def validate_common(ndarray, name):\n\tvalidate_ndarray(ndarray,(np.float, np.int), (2,) , name)", "def get_smallest_dtype(arr: ndarray, uint: bool = True) -> int:\n possible_dtypes = (2**x for x in range(3, 8))\n max_number = numpy.amax(arr)\n if not uint:\n max_number = max_number * 2\n if max_number == 0:\n max_number = 1\n return next(dtype for dtype in possible_dtypes if dtype > math.log(max_number, 2))", "def find_distance_in_same_type(self):\n pass", "def numpy_most_compact_int_dtype(arr):\n if np.any(arr < 0):\n dtypes = int_dtypes\n else:\n dtypes = uint_dtypes\n\n arr_max = arr.max() ## FIXME: max ABS value\n for t, ii in dtypes:\n if arr_max <= ii.max:\n return arr.astype(t)\n\n raise ValueError(\"Unable to find a suitable datatype\")", "def check_type(a, b):\n\n if isinstance(a, np.ndarray):\n a = np.array(a, dtype=\"uint8\")\n if isinstance(b, np.ndarray):\n b = np.array(b, dtype=\"uint8\")\n\n if a.dtype != \"uint8\":\n a = a.astype(\"uint8\")\n\n if b.dtype != \"uint8\":\n b = b.astype(\"uint8\")\n\n return a, b", "def check_iterable_item_type(first_item,iter_obj):\n\n if (\n operator.length_hint(first_item) > 1 or\n ( operator.length_hint(first_item) == 1 and not isinstance(first_item,(str,bytes)) ) or\n np.ndim(first_item) != 0\n ):\n return None\n\n dtype = np.dtype(first_item.__class__)\n if dtype.name == 'object' or 'str' in dtype.name or ( 'bytes' in dtype.name and len(first_item) > 1):\n return None\n for item in iter_obj:\n if np.ndim(item) != 0:\n return None\n common_dtype = np.result_type(np.dtype(item.__class__),dtype)\n if ( \n common_dtype.name == 'object' or\n 'str' in common_dtype.name or\n ( 'bytes' in common_dtype.name and len(item) > 1 )\n ):\n return None\n if dtype != common_dtype:\n dtype = common_dtype\n return dtype", "def common_shape(arrays):\n arrays = iter(arrays)\n shape = next(arrays).shape\n for array in arrays:\n shape = tuple(a if a == b else None\n for a, b in zip(shape, array.shape))\n return shape", "def check_argument_type(dtype, kernel_argument, i):\n types_map = {\"uint8\": [\"uchar\", \"unsigned char\", \"uint8_t\"],\n \"int8\": [\"char\", \"int8_t\"],\n \"uint16\": [\"ushort\", \"unsigned short\", \"uint16_t\"],\n \"int16\": [\"short\", \"int16_t\"],\n \"uint32\": [\"uint\", \"unsigned int\", \"uint32_t\"],\n \"int32\": [\"int\", \"int32_t\"], #discrepancy between OpenCL and C here, long may be 32bits in C\n \"uint64\": [\"ulong\", \"unsigned long\", \"uint64_t\"],\n \"int64\": [\"long\", \"int64_t\"],\n \"float16\": [\"half\"],\n \"float32\": [\"float\"],\n \"float64\": [\"double\"]}\n if dtype in types_map:\n return any([substr in kernel_argument for substr in types_map[dtype]])\n else:\n return False # unknown dtype. do not throw exception to still allow kernel to run.", "def compare_types(typ_1, typ_2):\n i_1, i_2 = map(available_location_types().index, (typ_1, typ_2))\n return i_1 - i_2", "def get_type_index(self, mask_all, col_type_dict):\n where_target = np.argwhere(mask_all == True)\n imp_categorical_index = []\n imp_continuous_index = []\n for index in where_target:\n col_type = col_type_dict[index[1]]\n if col_type is 'categotical':\n imp_categorical_index.append(index)\n elif col_type is 'continuous':\n imp_continuous_index.append(index)\n\n return imp_continuous_index, imp_categorical_index", "def shapeCompare(*args, **kwargs)->int:\n pass", "def checktypestest(chosen_df):\n for i in chosen_df:\n if not chosen_df.dtypes[1] == chosen_df.dtypes[i]:\n raise ValueError('Types do not match')", "def _find_common_cells(rows_or_columns, cell_value=1):\n\n T = np.array(rows_or_columns)\n if cell_value == 0:\n return np.where(np.logical_not(np.logical_or.reduce(T)))[0]\n else:\n return np.where(np.logical_and.reduce(T))[0]", "def sametype(variable1, variable2):\n\n # Return the result\n return isinstance(variable1, type(variable2))", "def most_specific_common_supertype(self, others):\n # pylint: disable=protected-access\n if not all(type(self) is type(other) for other in others):\n return None\n\n try:\n for other in others:\n tf.nest.assert_same_structure((self._specs, self._unique_id_params),\n (other._specs, other._unique_id_params))\n except (TypeError, ValueError):\n return None\n\n self_elements = tf.nest.flatten((self._specs, self._unique_id_params))\n others_elements = [\n tf.nest.flatten((other._specs, other._unique_id_params))\n for other in others\n ]\n\n def common_supertype_or_equal(a, bs):\n try:\n return a.most_specific_common_supertype(bs)\n except AttributeError:\n return a if all(a == b for b in bs) else None\n\n common_elements = [None] * len(self_elements)\n for i, self_element in enumerate(self_elements):\n common_elements[i] = common_supertype_or_equal(\n self_element,\n [other_elements[i] for other_elements in others_elements])\n if self_element is not None and common_elements[i] is None:\n return None\n specs, params = tf.nest.pack_sequence_as(\n (self._specs, self._unique_id_params), common_elements)\n\n kwargs = dict(specs, **params)\n if not self._transform_is_composite:\n if not all(self.transform_or_spec == other.transform_or_spec\n for other in others):\n return None\n kwargs['transform_or_spec'] = self.transform_or_spec\n return type(self)(**kwargs, name=None)", "def percentage_common_types (corpus): \n total = sum([t[1] for t in most_frequent(corpus)])\n return percentage(total, corpus_length(corpus))", "def type_info(np_type):\n dt = np.dtype(np_type)\n np_type = dt.type\n width = dt.itemsize\n try: # integer type\n info = np.iinfo(dt)\n except ValueError:\n pass\n else:\n return dict(min=np_type(info.min), max=np_type(info.max), minexp=None,\n maxexp=None, nmant=None, nexp=None, width=width)\n info = np.finfo(dt)\n # Trust the standard IEEE types\n nmant, nexp = info.nmant, info.nexp\n ret = dict(min=np_type(info.min),\n max=np_type(info.max),\n nmant=nmant,\n nexp=nexp,\n minexp=info.minexp,\n maxexp=info.maxexp,\n width=width)\n if np_type in (_float16, np.float32, np.float64,\n np.complex64, np.complex128):\n return ret\n info_64 = np.finfo(np.float64)\n if dt.kind == 'c':\n assert np_type is np.longcomplex\n vals = (nmant, nexp, width / 2)\n else:\n assert np_type is np.longdouble\n vals = (nmant, nexp, width)\n if vals in ((112, 15, 16), # binary128\n (info_64.nmant, info_64.nexp, 8), # float64\n (63, 15, 12), (63, 15, 16)): # Intel extended 80\n return ret # these are OK without modification\n # The remaining types are longdoubles with bad finfo values. Some we\n # correct, others we wait to hear of errors.\n # We start with float64 as basis\n ret = type_info(np.float64)\n if vals in ((52, 15, 12), # windows float96\n (52, 15, 16)): # windows float128?\n # On windows 32 bit at least, float96 is Intel 80 storage but operating\n # at float64 precision. The finfo values give nexp == 15 (as for intel\n # 80) but in calculations nexp in fact appears to be 11 as for float64\n ret.update(dict(width=width))\n return ret\n if vals == (105, 11, 16): # correctly detected double double\n ret.update(dict(nmant=nmant, nexp=nexp, width=width))\n return ret\n # Oh dear, we don't recognize the type information. Try some known types\n # and then give up. At this stage we're expecting exotic longdouble or\n # their complex equivalent.\n if np_type not in (np.longdouble, np.longcomplex) or width not in (16, 32):\n raise FloatingError('We had not expected type %s' % np_type)\n if (vals == (1, 1, 16) and on_powerpc() and\n _check_maxexp(np.longdouble, 1024)):\n # double pair on PPC. The _check_nmant routine does not work for this\n # type, hence the powerpc platform check instead\n ret.update(dict(nmant=106, width=width))\n elif (_check_nmant(np.longdouble, 52) and\n _check_maxexp(np.longdouble, 11)):\n # Got float64 despite everything\n pass\n elif (_check_nmant(np.longdouble, 112) and\n _check_maxexp(np.longdouble, 16384)):\n # binary 128, but with some busted type information. np.longcomplex\n # seems to break here too, so we need to use np.longdouble and\n # complexify\n two = np.longdouble(2)\n # See: https://matthew-brett.github.io/pydagogue/floating_point.html\n max_val = (two ** 113 - 1) / (two ** 112) * two ** 16383\n if np_type is np.longcomplex:\n max_val += 0j\n ret = dict(min=-max_val,\n max=max_val,\n nmant=112,\n nexp=15,\n minexp=-16382,\n maxexp=16384,\n width=width)\n else: # don't recognize the type\n raise FloatingError('We had not expected long double type %s '\n 'with info %s' % (np_type, info))\n return ret", "def sa_intersection(arr1: StaticArray, arr2: StaticArray, arr3: StaticArray) \\\n -> StaticArray:\n # creates an array to keep track of common elements in arr1 and arr2\n working_array_1 = StaticArray(arr1.size())\n # creates a temp array so that seen elements can be \"removed\"\n working_array_2 = StaticArray(arr2.size())\n for index in range(arr2.size()):\n working_array_2.set(index, arr2[index])\n length = 0\n\n # finds elements that match in both arr1 and working_array_2(arr2)\n for index1 in range(arr1.size()):\n for index2 in range(working_array_2.size()):\n if arr1[index1] == working_array_2[index2]:\n working_array_1.set(length, arr1[index1])\n working_array_2[index2] = None\n length += 1\n break # if element is found, discontinue search\n\n # if no matches, returns None\n if length == 0:\n results_array = StaticArray(1)\n\n else:\n # creates an array that truncates working array to just the results\n # gets rid of None values that might be compared\n working_array_3 = StaticArray(length)\n for index in range(working_array_3.size()):\n working_array_3.set(index, working_array_1[index])\n working_array_4 = StaticArray(arr3.size())\n length = 0\n\n # finds elements that match in both working_array_3(arr2) and arr3\n for index3 in range(arr3.size()):\n for index4 in range(working_array_3.size()):\n if working_array_3[index4] == arr3[index3]:\n working_array_4.set(length, arr3[index3])\n working_array_3.set(index4, None)\n length += 1\n break # if element is found, discontinue search\n\n # if no matches, returns None\n if length == 0:\n results_array = StaticArray(1)\n # creates an array to display the results\n else:\n results_array = StaticArray(length)\n results_array_index = 0\n for index5 in range(working_array_4.size()):\n if working_array_4[index5] is not None:\n results_array.set(results_array_index, working_array_4[index5])\n results_array_index += 1\n\n return results_array", "def take_common_features(feat1,feat2):\n common=np.intersect1d(feat1,feat2) # sorted\n ind1=find_indices(common,feat1)\n ind2=find_indices(common,feat2)\n return common,ind1,ind2", "def has_common(self, *args):\n return _ida_hexrays.lvar_t_has_common(self, *args)", "def check_global_attr_type(ds, attr, attr_type):\n if attr not in ds.ncattrs():\n return 0\n\n global_attr = getattr(ds, attr)\n\n if attr_type == 'int':\n attr_type_class = int\n elif attr_type == 'float':\n attr_type_class = float\n elif attr_type == 'str':\n attr_type_class = str\n else:\n return 1\n\n if len(str(global_attr)) == 0:\n return 2\n\n if np.dtype(type(global_attr)) != np.dtype(attr_type_class):\n return 3\n\n return 4", "def infer_array_type(self, elements):\n if not elements:\n raise Exception('Empty array, should never happen here.')\n\n candidate_type = ''\n for e in elements:\n etype = self.infer_value_type(e)\n if candidate_type == '':\n candidate_type = etype\n continue\n candidate_type = convert_type(candidate_type, etype)\n if not candidate_type:\n return None\n\n return candidate_type", "def check_type_compat(input_a, input_b):\n return return_family_type(input_a) is return_family_type(input_b)", "def _query(self, feature: str) -> np.ndarray:\n return np.flatnonzero(np.core.defchararray.find(self.internal_types, feature) != -1)", "def find_categorical(self, df):\n# print(type(df),df.ndim)\n categorical = [key for key in df.keys() if df.dtypes[key] == np.dtype('O')]\n numeric = [key for key in df.keys() if df.dtypes[key] != np.dtype('O')]\n # correct naive expectations\n actual_categoric = ['MSSubClass']\n numeric = list(set(numeric) - set(actual_categoric))\n categorical = list(set(categorical).union(set(actual_categoric)))\n return categorical", "def find_categorical(self, df):\n# print(type(df),df.ndim)\n categorical = [key for key in df.keys() if df.dtypes[key] == np.dtype('O')]\n numeric = [key for key in df.keys() if df.dtypes[key] != np.dtype('O')]\n # correct naive expectations\n actual_categoric = ['MSSubClass']\n numeric = list(set(numeric) - set(actual_categoric))\n categorical = list(set(categorical).union(set(actual_categoric)))\n return categorical", "def _determine_dtype(fields):\n # Check whether the required fields are there\n for field in _NRRD_REQUIRED_FIELDS:\n if field not in fields:\n raise NrrdError('Nrrd header misses required field: \"%s\".' % (field))\n\n # Process the data type\n np_typestring = _TYPEMAP_NRRD2NUMPY[fields['type']]\n # Endianness is not necessary for ASCII encoding type\n if np.dtype(np_typestring).itemsize > 1 and fields['encoding'] not in ['ascii', 'text', 'txt']:\n if 'endian' not in fields:\n raise NrrdError('Nrrd header misses required field: \"endian\".')\n if fields['endian'] == 'big':\n np_typestring = '>' + np_typestring\n elif fields['endian'] == 'little':\n np_typestring = '<' + np_typestring\n\n return np.dtype(np_typestring)", "def seek_types(dataframe: pd.DataFrame) -> Dict[str, List[str]]:\r\n\r\n def _get_global_type(t):\r\n if \"obj\" in str(t):\r\n return \"cat\"\r\n elif \"float\" in str(t):\r\n return \"float\"\r\n elif \"int\" in str(t):\r\n return \"int\"\r\n elif \"date\" in str(t):\r\n return \"date\"\r\n else:\r\n return \"other\"\r\n\r\n found_types = (\r\n dataframe.dtypes.apply(_get_global_type)\r\n .reset_index()\r\n .groupby(0)\r\n .agg(lambda x: list(x))\r\n )\r\n found_types = {k: v for k, v in zip(found_types.index, found_types[\"index\"])}\r\n return found_types" ]
[ "0.71931887", "0.67270565", "0.5763443", "0.55000544", "0.5499155", "0.515625", "0.5150497", "0.514426", "0.51265186", "0.5125046", "0.50527203", "0.49514917", "0.49380565", "0.49347687", "0.49293438", "0.4913139", "0.48950943", "0.48901024", "0.48788613", "0.48710108", "0.48527718", "0.48385555", "0.48378327", "0.482854", "0.48223004", "0.48189616", "0.48027945", "0.48027945", "0.47851375", "0.47668147" ]
0.7117615
1
Utility function to determine if we can cast from `from_dtype` to `to_dtype`. This function primarily calls `np.can_cast` but with some special handling around cudf specific dtypes.
def _can_cast(from_dtype, to_dtype): if cudf.utils.utils.is_na_like(from_dtype): return True if isinstance(from_dtype, type): from_dtype = cudf.dtype(from_dtype) if isinstance(to_dtype, type): to_dtype = cudf.dtype(to_dtype) # TODO : Add precision & scale checking for # decimal types in future if isinstance(from_dtype, cudf.core.dtypes.DecimalDtype): if isinstance(to_dtype, cudf.core.dtypes.DecimalDtype): return True elif isinstance(to_dtype, np.dtype): if to_dtype.kind in {"i", "f", "u", "U", "O"}: return True else: return False elif isinstance(from_dtype, np.dtype): if isinstance(to_dtype, np.dtype): return np.can_cast(from_dtype, to_dtype) elif isinstance(to_dtype, cudf.core.dtypes.DecimalDtype): if from_dtype.kind in {"i", "f", "u", "U", "O"}: return True else: return False elif isinstance(to_dtype, cudf.core.types.CategoricalDtype): return True else: return False elif isinstance(from_dtype, cudf.core.dtypes.ListDtype): # TODO: Add level based checks too once casting of # list columns is supported if isinstance(to_dtype, cudf.core.dtypes.ListDtype): return np.can_cast(from_dtype.leaf_type, to_dtype.leaf_type) else: return False elif isinstance(from_dtype, cudf.core.dtypes.CategoricalDtype): if isinstance(to_dtype, cudf.core.dtypes.CategoricalDtype): return True elif isinstance(to_dtype, np.dtype): return np.can_cast(from_dtype._categories.dtype, to_dtype) else: return False else: return np.can_cast(from_dtype, to_dtype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _can_cast_to(self, value, cast_type):\n try:\n _ = cast_type(value)\n return True\n except ValueError:\n return False", "def test_upcast(self):\r\n if config.cast_policy == 'custom':\r\n assert arange(iscalar()).dtype == iscalar().dtype\r\n assert arange(fscalar()).dtype == fscalar().dtype\r\n assert arange(dscalar()).dtype == dscalar().dtype\r\n\r\n # int32 + float32 -> float64\r\n assert arange(iscalar(), fscalar()).dtype == dscalar().dtype\r\n assert arange(iscalar(), dscalar()).dtype == dscalar().dtype\r\n assert arange(fscalar(), dscalar()).dtype == dscalar().dtype\r\n\r\n assert arange(iscalar(), fscalar(), dscalar()).dtype == \\\r\n dscalar().dtype\r\n elif config.cast_policy in ('numpy', 'numpy+floatX'):\r\n for dtype in get_numeric_types():\r\n # Test with a single argument.\r\n arange_dtype = arange(scalar(dtype=str(dtype))).dtype\r\n numpy_dtype = numpy.arange(numpy.array(1, dtype=dtype)).dtype\r\n if (dtype != 'float64' and\r\n numpy_dtype == 'float64' and\r\n config.cast_policy == 'numpy+floatX' and\r\n config.floatX == 'float32'):\r\n # We want a float32 arange.\r\n assert arange_dtype == 'float32'\r\n else:\r\n # Follow numpy.\r\n assert arange_dtype == numpy_dtype\r\n\r\n # Test with two arguments.\r\n for stop_dtype in get_numeric_types():\r\n arange_dtype = arange(\r\n start=scalar(dtype=str(dtype)),\r\n stop=scalar(dtype=str(stop_dtype))).dtype\r\n numpy_dtype = numpy.arange(\r\n start=numpy.array(0, dtype=dtype),\r\n stop=numpy.array(1, dtype=stop_dtype)).dtype\r\n if (dtype != 'float64' and\r\n stop_dtype != 'float64' and\r\n numpy_dtype == 'float64' and\r\n config.cast_policy == 'numpy+floatX' and\r\n config.floatX == 'float32'):\r\n # We want a float32 arange.\r\n assert arange_dtype == 'float32'\r\n else:\r\n # Follow numpy.\r\n assert arange_dtype == numpy_dtype\r\n\r\n # Test with three arguments.\r\n for step_dtype in get_numeric_types():\r\n arange_dtype = arange(\r\n start=scalar(dtype=str(dtype)),\r\n stop=scalar(dtype=str(stop_dtype)),\r\n step=scalar(dtype=str(step_dtype))).dtype\r\n numpy_dtype = numpy.arange(\r\n start=numpy.array(0, dtype=dtype),\r\n stop=numpy.array(1, dtype=stop_dtype),\r\n step=numpy.array(1, dtype=step_dtype)).dtype\r\n if (dtype != 'float64' and\r\n stop_dtype != 'float64' and\r\n step_dtype != 'float64' and\r\n numpy_dtype == 'float64' and\r\n config.cast_policy == 'numpy+floatX' and\r\n config.floatX == 'float32'):\r\n # We want a float32 arange.\r\n assert arange_dtype == 'float32'\r\n else:\r\n # Follow numpy.\r\n assert arange_dtype == numpy_dtype\r\n else:\r\n raise NotImplementedError(config.cast_policy)", "def _check_dtype(ds_in, dset_attrs):\n dtype = dset_attrs['dtype']\n attrs = dset_attrs['attrs']\n if ds_in.dtype.name != dtype:\n msg = ('Source dtype ({}) does not match specified dtype ({}), '\n .format(ds_in.dtype, dtype))\n logger.warning(msg)\n warn(msg)\n float_to_int = (np.issubdtype(ds_in.dtype, np.floating)\n and np.issubdtype(dtype, np.integer))\n int_to_float = (np.issubdtype(ds_in.dtype, np.integer)\n and np.issubdtype(dtype, np.floating))\n if float_to_int:\n if not any(c for c in attrs if 'scale_factor' in c):\n msg = ('Cannot downscale from {} to {} without a '\n 'scale_factor!'.format(ds_in.dtype, dtype))\n logger.error(msg)\n raise RuntimeError(msg)\n else:\n msg = 'Converting {} to {}'.format(ds_in.dtype, dtype)\n logger.warning(msg)\n warn(msg)\n elif int_to_float:\n msg = ('Cannot scale up an {} to a {}'\n .format(ds_in.dtype, dtype))\n logger.error(msg)\n raise RuntimeError(msg)\n elif not np.issubdtype(dtype, ds_in.dtype):\n msg = ('Output dtype ({}) has greater precision than input '\n 'dtype ({}), using input dtype'\n .format(dtype, ds_in.dtype))\n logger.warning(msg)\n warn(msg)\n\n dset_attrs['dtype'] = ds_in.dtype\n\n return dset_attrs", "def _check_dtype(input_dtype):\n\n product_version = tbe_platform.cce_conf.get_soc_spec(\"SOC_VERSION\")\n if product_version in (\"Hi3796CV300ES\"):\n if input_dtype == \"float32\":\n raise RuntimeError(\"float32 is not support in ES\")\n util.check_dtype_rule(input_dtype, (\"float16\",))\n else:\n util.check_dtype_rule(input_dtype, (\"float16\", \"float32\",))", "def cast(raw_tensor, dst_dtype):\n src_dtype = raw_tensor.dtype\n dst_dtype_lower = dst_dtype.lower()\n if dst_dtype_lower == src_dtype:\n return raw_tensor\n\n if not is_cast_support(src_dtype, dst_dtype_lower):\n if is_cast_support(src_dtype, \"float32\") and is_cast_support(\"float32\", dst_dtype_lower):\n raw_tensor = cast_op(raw_tensor, \"float32\", 'elewise_single_cast')\n elif is_cast_support(src_dtype, \"float16\") and is_cast_support(\"float16\", dst_dtype_lower):\n raw_tensor = cast_op(raw_tensor, \"float16\", 'elewise_single_cast')\n else:\n raise TypeError(\"Unsupported cast type!\")\n\n return cast_op(raw_tensor, dst_dtype_lower, 'elewise_single_cast')", "def is_convertible(source, target):\n return __is_convertible_t(source, target).is_convertible()", "def _cast_dtype(tik_instance, dst, src, cast_repeat_time,\n cast_remainder, cast_case):\n if cast_case == \"int8_2_float16\":\n tik_instance.vconv(MAX_MASK, 'none', dst, src, cast_repeat_time,\n 1, 1, 8, 4, None)\n with tik_instance.if_scope(cast_remainder != 0):\n tik_instance.vconv(cast_remainder, 'none',\n dst[cast_repeat_time * MAX_MASK],\n src[cast_repeat_time * MAX_MASK],\n 1, 1, 1, 8, 4, None)\n elif cast_case == \"float16_2_int8\":\n tik_instance.vconv(MAX_MASK, 'none', dst, src, cast_repeat_time,\n 1, 1, 4, 8, None)\n with tik_instance.if_scope(cast_remainder != 0):\n tik_instance.vconv(cast_remainder, 'none',\n dst[cast_repeat_time * MAX_MASK],\n src[cast_repeat_time * MAX_MASK],\n 1, 1, 1, 4, 8, None)", "def upcast(fn):\n\n def upcasted_fn(a, b):\n if a.dtype == b.dtype:\n return fn(a, b)\n else:\n common = common_type(a, b)\n return fn(a.astype(common), b.astype(common))\n\n return upcasted_fn", "def issubdtype(dtype1: DType, dtype2: DType):\n return np.issubdtype(convert(dtype1, NPDType), convert(dtype2, NPDType))", "def check_type_compat(input_a, input_b):\n return return_family_type(input_a) is return_family_type(input_b)", "def can_convert(self, from_type, to_type):\n #\n # Test if the glpsol executable is available\n #\n if not pyomo.common.Executable(\"pico_convert\"):\n return False\n #\n # Return True for specific from/to pairs\n #\n if from_type == ProblemFormat.nl and to_type == ProblemFormat.cpxlp:\n return True\n if from_type == ProblemFormat.nl and to_type == ProblemFormat.mps:\n return True\n if from_type == ProblemFormat.mps and to_type == ProblemFormat.cpxlp:\n return True\n if from_type == ProblemFormat.cpxlp and to_type == ProblemFormat.mps:\n return True\n return False", "def _cast_unsupported_dtypes(tensor):\n\n if tensor.dtype.__eq__(dtypes.int64):\n # outside-compilation doesn't support int64 input yet.\n return math_ops.cast(tensor, dtypes.int32)\n if tensor.dtype.__eq__(dtypes.bfloat16) or tensor.dtype.__eq__(\n dtypes.float16):\n # Since host can't handle bf16, convert tensor to f32.\n return math_ops.cast(tensor, dtypes.float32)\n return tensor", "def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None = None):\n if not hasattr(data, \"dtype\"):\n # e.g. collections.deque\n return data, copy\n\n if is_float_dtype(data.dtype):\n # pre-2.0 we treated these as wall-times, inconsistent with ints\n # GH#23675, GH#45573 deprecated to treat symmetrically with integer dtypes.\n # Note: data.astype(np.int64) fails ARM tests, see\n # https://github.com/pandas-dev/pandas/issues/49468.\n data = data.astype(DT64NS_DTYPE).view(\"i8\")\n copy = False\n\n elif lib.is_np_dtype(data.dtype, \"m\") or is_bool_dtype(data.dtype):\n # GH#29794 enforcing deprecation introduced in GH#23539\n raise TypeError(f\"dtype {data.dtype} cannot be converted to datetime64[ns]\")\n elif isinstance(data.dtype, PeriodDtype):\n # Note: without explicitly raising here, PeriodIndex\n # test_setops.test_join_does_not_recur fails\n raise TypeError(\n \"Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead\"\n )\n\n elif isinstance(data.dtype, ExtensionDtype) and not isinstance(\n data.dtype, DatetimeTZDtype\n ):\n # TODO: We have no tests for these\n data = np.array(data, dtype=np.object_)\n copy = False\n\n return data, copy", "def _cast_types(self, input_dict):\n return cast_types(input_dict, self.params['dtype'])", "def test_canConvert(string, cast, expected):\n assert canConvert(string, cast) == expected", "def is_date_dtype(df, col_name):\n dtype = df.dtypes[col_name]\n return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64)", "def _type_check(matrix_a, matrix_b=None, cast=False):\n\n if matrix_b is None and matrix_a.dtype in NUMPY_FLOAT_DTYPES:\n return matrix_a\n elif matrix_b is None and cast:\n return _cast_to_float64(matrix_a)\n elif matrix_b is None:\n err_msg = \"Matrix data type must be float32 or float64; {a} provided\".format(a=matrix_a.dtype)\n raise ValueError(err_msg)\n\n # Check dtypes\n if matrix_a.dtype == np.float32 and matrix_b.dtype == np.float32:\n return matrix_a, matrix_b\n\n elif matrix_a.dtype == np.float64 and matrix_b.dtype == np.float64:\n return matrix_a, matrix_b\n\n elif (matrix_a.dtype != np.float64 or matrix_b.dtype != np.float64) and cast:\n debug_print(\"Recasting matrix data types {a} and {b} to np.float64\".format(a=matrix_a.dtype, b=matrix_b.dtype))\n return _cast_to_float64(matrix_a), _cast_to_float64(matrix_b)\n\n elif matrix_a.dtype != np.float64 or matrix_b.dtype != np.float64:\n err_msg = \"Matrix data types must be in concordance; {a} and {b} provided\".format(a=matrix_a.dtype,\n b=matrix_b.dtype)\n raise ValueError(err_msg)", "def check_out_dtype(api_fn, in_specs, expect_dtypes, target_index=0, **configs):\n with paddle_static_guard():\n for i, expect_dtype in enumerate(expect_dtypes):\n with paddle.static.program_guard(paddle.static.Program()):\n input_t = []\n for index, spec in enumerate(in_specs):\n if len(spec) == 1:\n shape = spec[0]\n dtype = (\n expect_dtype if target_index == index else 'float32'\n )\n elif len(spec) == 2:\n shape, dtype = spec\n else:\n raise ValueError(\n \"Value of in_specs[{}] should contains two elements: [shape, dtype]\".format(\n index\n )\n )\n input_t.append(\n paddle.static.data(\n name='data_%s' % index, shape=shape, dtype=dtype\n )\n )\n\n out = api_fn(*input_t, **configs)\n out_dtype = fluid.data_feeder.convert_dtype(out.dtype)\n\n if out_dtype != expect_dtype:\n raise ValueError(\n \"Expected out.dtype is {}, but got {} from {}.\".format(\n expect_dtype, out_dtype, api_fn.__name__\n )\n )", "def _resolve_target_dtypes(self, dyf: DynamicFrame) -> DynamicFrame:\n resolve_choice_specs = [\n (col, f\"cast:{col_type}\") for col, col_type in self.target_table.get_dyf().toDF().dtypes\n ]\n\n return dyf.resolveChoice(resolve_choice_specs)", "def cudf_dtype_from_pydata_dtype(dtype):\n\n if cudf.api.types.is_categorical_dtype(dtype):\n return cudf.core.dtypes.CategoricalDtype\n elif cudf.api.types.is_decimal32_dtype(dtype):\n return cudf.core.dtypes.Decimal32Dtype\n elif cudf.api.types.is_decimal64_dtype(dtype):\n return cudf.core.dtypes.Decimal64Dtype\n elif cudf.api.types.is_decimal128_dtype(dtype):\n return cudf.core.dtypes.Decimal128Dtype\n elif dtype in cudf._lib.types.SUPPORTED_NUMPY_TO_LIBCUDF_TYPES:\n return dtype.type\n\n return infer_dtype_from_object(dtype)", "def check_dtype_equal(input_dict,\n target_dtype = jnp.float32,\n exclude_list = ()):\n flat_input = traverse_util.flatten_dict(input_dict)\n for key, value in flat_input.items():\n if key[0] in exclude_list:\n continue\n\n key_name = '_'.join([str(sub_key) for sub_key in key])\n if isinstance(value, jnp.ndarray):\n if value.dtype != target_dtype:\n raise TypeError(f'Input {key_name} has inconsistent type:'\n f'{value.dtype} vs {target_dtype}')\n else:\n raise TypeError(f'Illegal input type found: {type(value)}.')", "def can_convert_to_column(obj):\n return is_column_like(obj) or cudf.api.types.is_list_like(obj)", "def has_same_base_dtype(df_1, df_2, columns=None):\n if columns is None:\n if any(set(df_1.columns).symmetric_difference(set(df_2.columns))):\n print(\n \"Cannot test all columns because they are not all shared across DataFrames\"\n )\n return False\n columns = df_1.columns\n\n if not (\n has_columns(df=df_1, columns=columns) and has_columns(df=df_2, columns=columns)\n ):\n return False\n\n result = True\n for column in columns:\n if df_1[column].dtype.type.__base__ != df_2[column].dtype.type.__base__:\n print(\"Columns {} do not have the same base datatype\".format(column))\n result = False\n\n return result", "def convert(data, to):\n converted = None\n if to == 'array':\n if isinstance(data, np.ndarray):\n converted = data\n elif isinstance(data, pd.Series):\n converted = data.values\n elif isinstance(data, list):\n converted = np.array(data)\n elif isinstance(data, pd.DataFrame):\n converted = data.as_matrix()\n elif to == 'list':\n if isinstance(data, list):\n converted = data\n elif isinstance(data, pd.Series):\n converted = data.values.tolist()\n elif isinstance(data, np.ndarray):\n converted = data.tolist()\n elif to == 'dataframe':\n if isinstance(data, pd.DataFrame):\n converted = data\n elif isinstance(data, np.ndarray):\n converted = pd.DataFrame(data)\n else:\n raise ValueError(\"Unknown data conversion: {}\".format(to))\n if converted is None:\n raise TypeError(\n 'cannot handle data conversion of type: {} to {}'.format(\n type(data), to))\n else:\n return converted", "def _safely_castable_to_int(dt):\n int_size = np.dtype(int).itemsize\n safe = (np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or (\n np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size\n )\n return safe", "def is_casting(self):\n # type: () -> bool\n return self._is_casting", "def convert_dtype(data_df, settings):\n data_df = data_df.astype(settings[\"dtype\"])\n return data_df", "def _getconv(dtype):\n\n\n\n\ttyp = dtype.type\n\tif issubclass(typ, np.bool_):\n\t\treturn lambda x: bool(int(x))\n\tif issubclass(typ, np.uint64):\n\t\treturn np.uint64\n\tif issubclass(typ, np.int64):\n\t\treturn np.int64\n\tif issubclass(typ, np.int32):\n\t\treturn np.int32\n\telif issubclass(typ, np.longdouble):\n\t\treturn np.longdouble\n\telif issubclass(typ, np.floating):\n\t\treturn np.float32\n\telse:\n\t\traise SystemExit(\"Incorrect data type\")", "def check_force_cast(FROM, TO, operations, value):\n import re\n r = re.compile('(\\w+) \\%i\\d, \\$(-?\\d+)')\n r2 = re.compile('(\\w+) \\%i\\d')\n #\n value = rffi.cast(FROM, value)\n value = rffi.cast(lltype.Signed, value)\n #\n expected_value = rffi.cast(TO, value)\n expected_value = rffi.cast(lltype.Signed, expected_value)\n #\n for op in operations:\n match = r.match(op)\n if match is None:\n match = r2.match(op)\n assert match, \"line %r does not match regexp\" % (op,)\n opname = match.group(1)\n if opname == 'int_and':\n value &= int(match.group(2))\n elif opname == 'int_signext':\n numbytes = int(match.group(2))\n value = int_signext(value, numbytes)\n elif opname == 'int_is_true':\n value = bool(value)\n else:\n assert 0, opname\n #\n assert rffi.cast(lltype.Signed, value) == expected_value", "def test_conversion(backend):\n\n x = np.random.rand(10, 10)\n x_b = backend.from_numpy(x)\n x_c = backend.to_numpy(x_b)\n\n assert np.all(np.isclose(x, x_c))" ]
[ "0.6512901", "0.6029256", "0.6008407", "0.5839921", "0.58141434", "0.5667138", "0.56541944", "0.5560597", "0.5534378", "0.5519287", "0.5447551", "0.5432719", "0.54149824", "0.5303196", "0.52843744", "0.5275316", "0.52229995", "0.51824373", "0.5181992", "0.5151185", "0.51425725", "0.50830895", "0.50797033", "0.50623214", "0.5061361", "0.5034422", "0.5032198", "0.502543", "0.5002837", "0.4975635" ]
0.917543
0
Convert `dtype` to default if specified by user. If not specified, return as is.
def _maybe_convert_to_default_type(dtype): if cudf.get_option("default_integer_bitwidth"): if cudf.api.types.is_signed_integer_dtype(dtype): return cudf.dtype( f'i{cudf.get_option("default_integer_bitwidth")//8}' ) elif cudf.api.types.is_unsigned_integer_dtype(dtype): return cudf.dtype( f'u{cudf.get_option("default_integer_bitwidth")//8}' ) if cudf.get_option( "default_float_bitwidth" ) and cudf.api.types.is_float_dtype(dtype): return cudf.dtype(f'f{cudf.get_option("default_float_bitwidth")//8}') return dtype
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_torch_default_dtype(dtype) -> torch.dtype:\n if dtype is None:\n return torch.get_default_dtype()\n if isinstance(dtype, str):\n if dtype not in dtype_mapping:\n raise ValueError(\n f\"Unknown torch dtype: {dtype}. \"\n f\"Choose from: {list(dtype_mapping.keys())}\"\n )\n dtype = dtype_mapping.get(dtype)\n logger.info(f\"Setting torch dtype to {dtype}\")\n torch.set_default_dtype(dtype)\n return dtype", "def __get_default_dtype(val_type):\n\n if val_type == int:\n dtype = q_consts.int64\n elif val_type == float:\n dtype = q_consts.float64\n else:\n raise Exception(\"input element type %s is not supported\" % val_type)\n return dtype", "def _normalize_default(value, dtype):\n\n # Create NumPy objects as defaults\n # This is better in order to serialize them as attributes\n if value is None:\n value = 0\n basedtype = dtype.base\n try:\n default = np.array(value, dtype=basedtype)\n except ValueError:\n array = np.array(value)\n if array.shape != basedtype.shape:\n raise\n # Maybe nested dtype with \"scalar\" value.\n default = np.array(value, dtype=basedtype.base)\n # 0-dim arrays will be representented as NumPy scalars\n # (PyTables attribute convention)\n if default.shape == ():\n default = default[()]\n return default", "def datatype_map(dtype):\n # TODO: add datetype conversion\n if 'float' in dtype:\n return 'numeric'\n elif 'int' in dtype:\n return 'int'\n elif 'bool' in dtype:\n return 'boolean'\n else:\n return 'text'", "def cudf_dtype_from_pydata_dtype(dtype):\n\n if cudf.api.types.is_categorical_dtype(dtype):\n return cudf.core.dtypes.CategoricalDtype\n elif cudf.api.types.is_decimal32_dtype(dtype):\n return cudf.core.dtypes.Decimal32Dtype\n elif cudf.api.types.is_decimal64_dtype(dtype):\n return cudf.core.dtypes.Decimal64Dtype\n elif cudf.api.types.is_decimal128_dtype(dtype):\n return cudf.core.dtypes.Decimal128Dtype\n elif dtype in cudf._lib.types.SUPPORTED_NUMPY_TO_LIBCUDF_TYPES:\n return dtype.type\n\n return infer_dtype_from_object(dtype)", "def _infer_dtype(val):\n if re.match(r'\\d{4}-\\d{2}(?:-\\d{2})?', val):\n return 'date'\n elif re.match(r'[+-]?\\d+$', val):\n return 'int'\n elif re.match(r'[+-]?\\d+%$', val):\n return 'pct'\n elif re.match(r'[a-zA-Z ]+', val):\n return 'text'\n else:\n msg = \"val={0} dtype not recognized\".format(val)\n raise ValueError(msg)", "def _convert_dtype_value(val):\n\n convert_dtype_map = {\n 21: \"int8\",\n 20: \"uint8\",\n 6: \"float64\",\n 5: \"float32\",\n 4: \"float16\",\n 3: \"int64\",\n 2: \"int32\",\n 1: \"int16\",\n 0: \"bool\",\n }\n if val not in convert_dtype_map:\n msg = f\"Paddle data type value {val} is not handled yet.\"\n raise NotImplementedError(msg)\n return convert_dtype_map[val]", "def test_default_dtype(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.dtype == np.dtype('float32')", "def as_correct_dtype(obj: ndarray, required_dtype: dtype) -> ndarray:\n if obj.dtype != required_dtype:\n return obj.astype(required_dtype)\n return obj", "def _get_scalar_default_value(dtype, default_value):\n if dtype == tf.string:\n return default_value or \"\"\n elif default_value is None:\n return 0\n elif isinstance(default_value, int) or isinstance(default_value, float):\n return default_value\n elif (isinstance(default_value, list) or\n isinstance(default_value, tuple)) and len(default_value) == 1:\n return default_value[0]\n else:\n raise ValueError(\"Only scalar or equivalent is allowed in default_value.\")", "def typecast(dtype: Any) -> str:\n if dtype is int:\n return \"Int64\"\n elif dtype is float:\n return \"Float64\"\n elif dtype is bool:\n return \"bool\"\n return \"string\"", "def _binary_op_dtype_override(a: str, b: str) -> Optional[np.dtype]:\n overrides = {\n ('float16', 'int16'): np.float16,\n ('float16', 'int32'): np.float16,\n ('float16', 'int64'): np.float16,\n ('float16', 'uint16'): np.float16,\n ('float16', 'uint32'): np.float16,\n ('float16', 'uint64'): np.float16,\n ('int8', 'uint8'): np.uint8,\n ('int8', 'uint16'): np.uint16,\n ('int8', 'uint32'): np.uint32,\n ('int8', 'uint64'): np.uint64,\n ('int16', 'uint16'): np.uint16,\n ('int16', 'uint32'): np.uint32,\n ('int16', 'uint64'): np.uint64,\n ('int32', 'uint32'): np.uint32,\n ('int32', 'uint64'): np.uint64,\n ('int64', 'uint64'): np.uint64,\n }\n key = (a, b) if a < b else (b, a)\n return overrides.get(key)", "def convert_dtype(data_df, settings):\n data_df = data_df.astype(settings[\"dtype\"])\n return data_df", "def na_value_for_dtype(dtype):\n dtype = pandas_dtype(dtype)\n\n if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or\n is_timedelta64_dtype(dtype)):\n return NaT\n elif is_float_dtype(dtype):\n return np.nan\n elif is_integer_dtype(dtype):\n return 0\n elif is_bool_dtype(dtype):\n return False\n return np.nan", "def _map_data_types(dtype):\n return _data_type_map[dtype]", "def np_dtype(dali_dtype):\n return numpy.dtype(dali_dtype)", "def dtype_to_type(dtype) -> Type:\n if dtype == np.object:\n return str\n else:\n return type(np.zeros(1, dtype).item())", "def safe_cast(val, to_type, default=None):\n try:\n if to_type == int:\n return to_type(double(val))\n return to_type(val)\n except (ValueError, TypeError):\n return default", "def data_type(self, col_name: str, pa_dtype: pa.DataType) -> Optional[str]:\n set_type = self._type_dict.get(col_name)\n if set_type:\n return set_type\n\n mapped_type = self.convert_pa_dtype(pa_dtype)\n if mapped_type:\n return mapped_type\n\n return None", "def to_dtype(x, dtype):\n return x.type(dtype)", "def _getconv(dtype):\n\n\n\n\ttyp = dtype.type\n\tif issubclass(typ, np.bool_):\n\t\treturn lambda x: bool(int(x))\n\tif issubclass(typ, np.uint64):\n\t\treturn np.uint64\n\tif issubclass(typ, np.int64):\n\t\treturn np.int64\n\tif issubclass(typ, np.int32):\n\t\treturn np.int32\n\telif issubclass(typ, np.longdouble):\n\t\treturn np.longdouble\n\telif issubclass(typ, np.floating):\n\t\treturn np.float32\n\telse:\n\t\traise SystemExit(\"Incorrect data type\")", "def default_na_value(self):\n dkind = self.dtype.kind\n if dkind == \"M\":\n return np.datetime64(\"nat\", self.time_unit)\n else:\n raise TypeError(\n \"datetime column of {} has no NaN value\".format(self.dtype)\n )", "def make_convertor(name, dtype):\n\n # The spaces may be important in the strings, but don't think\n # they are for my use case, so remove them.\n #\n if dtype == 'char':\n if name.endswith('_flag'):\n return convert_to_bool\n else:\n return lambda v: v.strip()\n elif dtype == 'int':\n return convert_to_int\n elif dtype == 'double':\n return convert_to_float\n elif dtype == 'boolean':\n return convert_to_bool\n else:\n raise ValueError(dtype)", "def infer_dtype(self):\n raise NotImplementedError", "def _check_dtype(ds_in, dset_attrs):\n dtype = dset_attrs['dtype']\n attrs = dset_attrs['attrs']\n if ds_in.dtype.name != dtype:\n msg = ('Source dtype ({}) does not match specified dtype ({}), '\n .format(ds_in.dtype, dtype))\n logger.warning(msg)\n warn(msg)\n float_to_int = (np.issubdtype(ds_in.dtype, np.floating)\n and np.issubdtype(dtype, np.integer))\n int_to_float = (np.issubdtype(ds_in.dtype, np.integer)\n and np.issubdtype(dtype, np.floating))\n if float_to_int:\n if not any(c for c in attrs if 'scale_factor' in c):\n msg = ('Cannot downscale from {} to {} without a '\n 'scale_factor!'.format(ds_in.dtype, dtype))\n logger.error(msg)\n raise RuntimeError(msg)\n else:\n msg = 'Converting {} to {}'.format(ds_in.dtype, dtype)\n logger.warning(msg)\n warn(msg)\n elif int_to_float:\n msg = ('Cannot scale up an {} to a {}'\n .format(ds_in.dtype, dtype))\n logger.error(msg)\n raise RuntimeError(msg)\n elif not np.issubdtype(dtype, ds_in.dtype):\n msg = ('Output dtype ({}) has greater precision than input '\n 'dtype ({}), using input dtype'\n .format(dtype, ds_in.dtype))\n logger.warning(msg)\n warn(msg)\n\n dset_attrs['dtype'] = ds_in.dtype\n\n return dset_attrs", "def cast(self, dtype):\n self.dtype = np.dtype(dtype)\n self.preprocess = False\n self.set_data(self.data)", "def dtype(a):\n return a.dtype", "def set_dtype(self, dtype):\n _d = dtype.lower()\n if \"phot\" in _d:\n self.dtype = \"photon\"\n elif \"ener\" in _d:\n self.dtype = \"energy\"\n else:\n raise ValueError('Unknown detector type {0}'.format(dtype))", "def map_dtype(dtype):\n item_idx = int(math.log(dtype.itemsize, 2))\n if dtype.kind == 'i':\n return [int8, int16, int32, int64][item_idx]\n elif dtype.kind == 'u':\n return [uint8, uint16, uint32, uint64][item_idx]\n elif dtype.kind == 'f':\n if dtype.itemsize == 2:\n pass # half floats not supported yet\n elif dtype.itemsize == 4:\n return float32\n elif dtype.itemsize == 8:\n return float64\n elif dtype.itemsize == 16:\n return float128\n elif dtype.kind == 'b':\n return int8\n elif dtype.kind == 'c':\n if dtype.itemsize == 8:\n return complex64\n elif dtype.itemsize == 16:\n return complex128\n elif dtype.itemsize == 32:\n return complex256\n elif dtype.kind == 'O':\n return object_", "def _astype_internal(self, column: str, numpy_dtype: str) -> None:\n new_kind: str = utils.convert_numpy_to_kind(numpy_dtype)\n dtype, loc, order = self._get_col_dtype_loc_order(column) # type: str, int, int\n\n srm = []\n\n if dtype == new_kind:\n return None\n col_data: ndarray = self._data[dtype][:, loc]\n nulls = utils.isna_array(col_data, dtype)\n\n if numpy_dtype == 'S':\n col_data = col_data.astype('U')\n col_data, _, srm = _va.convert_str_to_cat(col_data)\n col_data[nulls] = 0\n elif numpy_dtype == 'b':\n col_data = col_data.astype('bool').astype('int8')\n col_data[nulls] = -1\n elif numpy_dtype == 'i':\n col_data = col_data.astype('int64')\n col_data[nulls] = MIN_INT\n elif numpy_dtype == 'f':\n col_data = col_data.astype('int64')\n col_data[nulls] = np.nan\n elif col_data.dtype.kind == 'M':\n col_data = col_data.astype('datetime64[ns]')\n col_data[nulls] = NaT\n elif col_data.dtype.kind == 'm':\n col_data = col_data.astype('timedelta64[ns]')\n col_data[nulls] = NaT\n\n self._remove_column(column)\n self._write_new_column_data(column, new_kind, col_data, srm, order)" ]
[ "0.7363833", "0.6817029", "0.68166256", "0.64516294", "0.6188759", "0.6085092", "0.60710496", "0.60582787", "0.60573584", "0.6052707", "0.59538263", "0.5938294", "0.58810425", "0.58723915", "0.57957613", "0.5775995", "0.57383156", "0.57367325", "0.5732564", "0.5722388", "0.57158124", "0.57080686", "0.568792", "0.56669533", "0.56497735", "0.56180304", "0.5605198", "0.5589434", "0.55625206", "0.5551092" ]
0.8073433
0
Checks if the unit was visible on a snapshot
def is_snapshot(self): return self.proto.display_type == DISPLAY_TYPE.Snapshot.value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_visibility(self):\r\n\r\n for gs in self.ground_stations:\r\n if self.visible ^ (elevation_dot_product(self.r_ecef,self.ground_stations[gs][1],self.earth) > 0.0):\r\n self.visible ^= 1\r\n self.gs_id = self.ground_stations[gs][0]\r\n return True", "def is_visible(self):", "def is_visible(self):\n return self.real > 0", "def is_visible(self):\n return self.rect.x < self.screen_rect.width", "def is_visible(self):\n return self.visible_date < timezone.now()", "def is_displayed(self, unit):\n try:\n field_is_displayed = getattr(self.unit.get_model_name()+'_is_displayed')\n if field_is_displayed:\n return field_is_displayed(unit)\n except AttributeError:\n pass\n if not self.displayed and not self.excluded:\n return True\n elif self.displayed and self.excluded:\n return unit.get_model_name() in self.displayed \\\n and unit.get_model_name() not in self.excluded\n elif self.excluded:\n return unit.get_model_name() not in self.excluded\n elif self.displayed:\n return unit.get_model_name() in self.displayed\n else:\n return True", "def is_visible(self):\n return self.container['is_visible']", "def is_visible(self):\n return self.proto.display_type == DISPLAY_TYPE.Visible.value", "def is_visible(self):\n return self.window.active_panel() == self.full_name", "def can_snapshot(self):\n return self.shard_build_desc.get('can_snapshot', False)", "def isVisible(self):\n\t\treturn True", "def visible(self):\n return -PipePair.WIDTH < self.x < WIN_WIDTH", "def is_alive(self):\r\n return self.visible", "def outOfScreen(self):\n x,y = self.currentLevel.transformToScreenCoordinate(self.position)\n w,h = cblocals.GAME_SCREEN_SIZE\n if x<0 or y<0 or x>x or y>h:\n return True\n return False", "def test_visible_ramp(self):\n total_number = 100000\n expected_percentage = .10\n self.feature_test.set_percentage(expected_percentage * 100)\n # Generate a range of user ids and map these ids to the feature\n # test result.\n user_ids = list(range(1, total_number + 1))\n visibility_map = [\n self.feature_test.is_visible(user_id)\n for user_id\n in user_ids\n ]\n # Count the number of success conditions.\n visibility_count = visibility_map.count(True)\n # This should match 10%.\n actual_percentage = visibility_count / float(total_number)\n self.assertAlmostEqual(\n actual_percentage, expected_percentage, delta=.012\n )", "def has_visible_entity(self):\n ret = False\n for e in self:\n if e.is_visible() == True:\n ret = True\n break\n return ret", "def is_viewed(self):\n return self.has_label(VIEWED_LABEL)", "def isVisible( self ):\n layer = self.layer()\n if ( layer and not layer.isVisible() ):\n return False\n# \n# if ( self.isIsolateHidden() ):\n# return False\n# \n return self._visible", "def issnapshot_available(self, item_name):\n return False", "def stats_change(self):\n return True if self.board.prev_state != self.board.shot_count else False", "def is_visible(self):\n return self._visible", "def is_snap_enabled(self) -> bool:\r\n ...", "def check_in_screen(self):\n if self.rect.colliderect(screen_rect) and not self.moving:\n return True\n return False", "def is_won(self):\n for tile in self:\n if not tile.is_mine and tile.visibility != 1:\n return False\n return True", "def check(self):\n if self.widget:\n self.widget.update()\n\n self.check_virtual_display()\n\n return self.runner.check()", "def test_draft_story_must_be_visible_to_owner(self):\n self.assertEqual(self.ds.is_visible_for(self.au), False)\n\n \"\"\" Draft story must not be visible for another. \"\"\"\n self.assertEqual(self.ds.is_visible_for(self.u2), False)\n\n \"\"\" Draft story must be visible for story owner. \"\"\"\n self.assertEqual(self.ds.is_visible_for(self.u1), True)\n\n \"\"\" Draft story must not be visible for a blocked user. \"\"\"\n self.assertEqual(self.ds.is_visible_for(self.u3), False)", "def exists(self):\n if self.attributes[AT.GARBAGE]:\n return False\n if get_ticks() < self.attributes[AT.TIME_TO_BE_SHOWN]:\n return False\n return True", "def visible(self):\n return self._turtle.isvisible()", "def visible(self):\n return self._turtle.isvisible()", "def is_element_visible(self):\n if self.web_element.is_displayed():\n return True\n else:\n return False" ]
[ "0.6635701", "0.65887165", "0.6557644", "0.6523512", "0.6309742", "0.6167063", "0.6114277", "0.6098455", "0.60661596", "0.6048041", "0.6021813", "0.6008146", "0.5979416", "0.5978506", "0.59772563", "0.59762603", "0.5956536", "0.59140164", "0.589002", "0.5868256", "0.58513856", "0.5849977", "0.58286345", "0.581225", "0.5810084", "0.5801053", "0.57976407", "0.57901525", "0.57901525", "0.57892334" ]
0.6746657
0
Returns the unit alliance
def alliance(self) -> ALLIANCE: return self.proto.alliance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_weapon_holding():\n return unrealsdk.GetEngine().GamePlayers[0].Actor.Pawn.Weapon", "def get_unit(self):\n return self.unit", "def getVarUnit( self, name, adbOut ):\n\n if name not in _adbUnit: return None\n\n unit = None\n for item in _adbUnit[name]:\n if item[1] == 'all' or adbOut.lower() in item[1].split(','):\n if item[0] == \"None\":\n unit= \"nondim\"\n else:\n unit= acuUnit.getDefUnit( item[0] )\n \n break\n return unit", "def getArmor(self):\n return self.av", "def get_weapon(self):\n return self.__weapon", "def getUnit(self, *args):\n return _libsbml.UnitDefinition_getUnit(self, *args)", "def get_organization_unit(self):\n return self.reference[REF_ORGANIZATION_UNIT][REF_VALUE]", "def unit(self):\n return self._unit", "def aantalArmen(self):\n return self._aantalArmen.get_waarde()", "def aantalArmen(self):\n return self._aantalArmen.get_waarde()", "def unit_of_measurement(self):\n return self._tasmota_entity.unit", "def get_health(self):\n self.__health = sum([i.get_health for i in self.__units])\n return self.__health", "def get_total_health(self,obs):\n total_health = 0\n for unit in obs.observation.raw_units:\n if(unit.alliance == PlayerRelative.SELF):\n total_health += unit[FeatureUnit.health]\n return total_health", "def unit(self):\n return self._unit", "def unit(self):\n return self._unit", "def weapon(self):\n return self._weapon", "def unit(self):\n return self.__unit", "def getAdaptiveAmbient(self, channel, unitCode=0):\n resp = self.XAPCommand(\"AAMB\", channel, unitCode=unitCode)\n return int(resp)", "def get_total_appliance(self):\n total = 0\n for appliance in self.get_appliances():\n total += appliance.get_total()\n return total", "def a_realization(self):\n return self.UHP()", "def get_AIA(self):\n\n return self.get_POW().getAIA()", "def get_weapon(self):\n\n return self.suggestion_set[1]", "def name(self):\n return self.appliance_name", "def test_get_dealer_active_inventory(self):\n pass", "def organizational_unit_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"organizational_unit_arn\")", "def unit(self) -> str:", "def unit_type(self) -> str:", "def aircraft_type(self) -> FlyingType:\n return self.units[0].unit_type", "def get_unit(self, unit_id: str) -> sqlite3.Row:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from units\n WHERE unit_id = ?;\n \"\"\",\n (unit_id,),\n )\n results = c.fetchall()\n return results[0]", "def get_unit(self,tag):" ]
[ "0.618757", "0.60084546", "0.5974307", "0.58600867", "0.5816895", "0.57909936", "0.5775281", "0.5772687", "0.57205343", "0.57205343", "0.5619353", "0.559985", "0.55267227", "0.55230135", "0.55230135", "0.5519075", "0.55124843", "0.550832", "0.5463949", "0.54221255", "0.5421643", "0.5374799", "0.5371671", "0.5368864", "0.53639627", "0.53425854", "0.53422546", "0.5341189", "0.5327922", "0.5323732" ]
0.6642671
0
Checks if the unit is mine
def is_mine(self) -> bool: return self.proto.alliance == ALLIANCE.Self.value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_mine(self):\n return self.has_label(MINE_LABEL)", "def is_won(self):\n for tile in self:\n if not tile.is_mine and tile.visibility != 1:\n return False\n return True", "def is_mine(board, x, y):\n return board[x, y] == MINE", "def isMine(self):\n return self.mine", "def is_mine(self, coords):\n try:\n if coords[0] >= 0 and coords[1] >= 0:\n return self.grid[coords[1]][coords[0]] == self.mine\n else:\n return False\n except IndexError:\n return False", "def is_unit(self):\n return math.isclose(self.magnitude(), 1)", "def monster_check():\n player = get_locations()['player']\n monster = get_locations()['monster']\n if player == monster:\n if STATUS['weapon'] == 'armed':\n print(\"You killed the monster with the sword!\")\n play_again()\n else:\n if STATUS['hp'] > 0:\n STATUS['hp'] -= 5\n return \"The monster caught you! You barely manage to escape...\"\n elif STATUS['hp'] <= 0:\n print(\"The monster catachs you in its claws. Its not pretty.\")\n play_again()\n else:\n return \"Nothing in this room. Its around here somehwere though. \"", "def isWeber(self):\n return _libsbml.Unit_isWeber(self)", "def isWatt(self):\n return _libsbml.Unit_isWatt(self)", "def weapon_check():\n if get_locations()['player'] == get_locations()['weapon']:\n STATUS['weapon'] = 'armed'\n STATUS['locations']['weapon'] = None\n print(\"You found the weapon! Now go and kill the monster!\")", "def won(self):\n return self.mines_found == self.mines", "def won(self):\n return self.mines_found == self.mines", "def won(self):\n return self.mines_found == self.mines", "def won(self):\n return self.mines_found == self.mines", "def won(self):\n return self.mines_found == self.mines", "def won(self):\n return self.mines_found == self.mines", "def isMine(self, row, col):\n return self.board[row, col] == 1", "def test_regularUserCantIlluminate(self):\n objects.LocationLighting(thing=self.location,\n store=self.location.store,\n candelas=100)\n self._test(\n \"illuminate 0\",\n [\"You are insufficiently brilliant to do that directly.\"])\n self.assertEquals(self.store.findUnique(\n objects.LocationLighting,\n objects.LocationLighting.thing == self.location).candelas, 100)", "def hit(self):\n\n self.units.pop()\n return (len(self.units) == 0) # Returns True if the ship has been sunk", "def is_summon(self):\n return False", "def check_passing(self, tile):\n # Check if the creature can walk to the tile\n # Depends on creature and tile properties\n #cre = self.creature\n \n if self.creature:\n is_spirit = self.creature['spirit']\n else:\n is_spirit = False\n\n if is_spirit:\n passes = not tile.properties['electricity']\n else:\n passes = not tile.properties['solid']\n\n if (tile.properties['pass_small']):\n passes = self.creature['small']\n\n if tile.overtile:\n has_bridge = (tile.overtile.status == 'bridge')\n else:\n has_bridge = False\n\n if (tile.properties['water'] and not has_bridge):\n passes = (self.creature['swims'] or self.creature['flying'])\n\n if has_bridge: #tile.bridge:\n print(\"Bridge: {}\".format(tile.overtile))\n print(tile.overtile.x, tile.overtile.y)\n\n #print(passes)\n return(passes)", "def fe7_myrms_only(unit: ActiveUnit) -> None:\n if unit.template.unit_class.name not in [\"Myrmidon\", \"Swordmaster\", \"Blade Lord\"]:\n raise ValueError(\"Only Myrmidons and Swordmasters can equip this weapon\")\n return None", "def fountain_on_location(game, loc):\n my_fountains = game.get_my_mana_fountains()\n for fountain in my_fountains:\n if fountain.location.equals(loc):\n return True\n return False", "def isValid(self, game):\n if self.x == None or self.y == None or self.team == None:\n return False\n\n citytile = game.map.getCell(self.x, self.y).citytile\n if citytile == None:\n return False\n \n if not citytile.canBuildUnit():\n return False\n\n # TODO handle multiple units building workers in same turn\n if game.cartUnitCapReached(self.team):\n return False\n \n return True", "def isMole(self):\n return _libsbml.Unit_isMole(self)", "def is_monster_lord(self):\n return True", "def isValid(self, game):\n if self.x == None or self.y == None or self.team == None:\n return False\n \n if self.y < 0 or self.y >= game.map.height:\n return False\n if self.x < 0 or self.x >= game.map.height:\n return False\n\n citytile = game.map.getCell(self.x, self.y).citytile\n if citytile == None:\n return False\n \n if not citytile.canBuildUnit():\n return False\n\n # TODO handle multiple units building workers in same turn\n if game.workerUnitCapReached(self.team):\n return False\n \n return True", "def in_check(self):\r\n if self.turn_white:\r\n return self.square_under_attack(self.wKingPos[0], self.wKingPos[1])\r\n else:\r\n return self.square_under_attack(self.bKingPos[0], self.bKingPos[1])", "def test_is_mountain_in_range(self):\n self.assertTrue(self.user_location.is_mountain_in_range(self.mountain_one))\n self.assertFalse(self.user_location.is_mountain_in_range(self.mountain_two))", "def check_grue(self, tile):\n if tile[2] == 'grue':\n if self.lab.inventory > 0:\n self.lab.fire()\n print 'Lighted match'" ]
[ "0.683787", "0.6821652", "0.67176837", "0.6585365", "0.6462661", "0.6346446", "0.6329767", "0.6189364", "0.6164356", "0.6162114", "0.6154434", "0.6154434", "0.6154434", "0.6154434", "0.6154434", "0.6154434", "0.61218596", "0.60992813", "0.60683167", "0.60471827", "0.60274", "0.5974287", "0.5968742", "0.5942771", "0.59082854", "0.5891702", "0.58902556", "0.58793634", "0.580948", "0.579939" ]
0.69367707
0
Checks if the unit is from the enemy
def is_enemy(self) -> bool: return self.proto.alliance == ALLIANCE.Enemy.value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def attack(self, enemy: 'games.stardash.unit.Unit') -> bool:\n return self._run_on_server('attack', {\n 'enemy': enemy\n })", "def __is_will_belong_to_enemy(self, iceberg):\n simulation_data = self.__simulation_data\n last_turn_data = simulation_data.get(iceberg)[-1]\n owner_in_the_end = last_turn_data[OWNER]\n return utils.is_enemy(self.__game, owner_in_the_end)", "def enemyCaptured(self):\n return self.game.team.flag.carrier != None", "def IsGameOver(self):\n return any(c.cX + c.width >= self.end_location for c in self.enemies)", "def check_enemies(self):\n for enemy in self.pjs.enemies:\n for block in enemy.rects:\n if block.overlap(self.rects[0]):\n self.killer = enemy\n return", "def is_enemy(self, x, y, mycolor):\n piece = self.get_piece(x, y)\n if piece:\n return piece.color != mycolor\n return False", "def is_enemy_dead(self):\n enemy_rect = self.enemy.get_drawables()[0]\n return self.enemy.collided_with(player_rect)", "def isInGoal(self):\n coordx= self.playerPos.x\n coordy= self.playerPos.y\n target = 0 if self.id_team == 1 else 1\n\n if((((target == 0)and (coordx<=5))|\n ((target == 1) and(coordx>145))) \n and (coordy<=50 and coordy>=40)):\n return True\n else:\n return False", "def engageEnemyRobots(self, targetRobot):\n # self.log(\"engaging enemys\")\n enemyEngaged = False\n if SPECS.UNITS[self.me.unit].ATTACK_RADIUS[0] <= targetRobot['distance'] <= SPECS.UNITS[self.me.unit].ATTACK_RADIUS[1]: \n enemyEngaged = True\n return enemyEngaged", "def is_dead(self):\n if self.killer:\n if self.killer.stype == 'fire' and not (self.killer in self.pjs.fires):\n return True\n elif self.killer.stype == 'enemy' and self.timeout == 0:\n return True\n else:\n return False", "def detect_in_bounds(self):\n creature_x, creature_y = self.creature.current_location\n if creature_x < 0 or creature_x >= self.world_width\\\n or creature_y < 0 or creature_y >= self.world_height:\n print('The creature is out of bounds!')\n return False\n return True", "def iswalking(self):\r\n return self.model.coord!=self.model.targetcoord", "def _ensure_is_alive(self):\n if self._hit_points == 0:\n raise UnitIsDead('Unit is dead!')", "def check_teleport(self):\n if self.level.get_tile(self.nearest_node).teleport():\n self.nearest_node = self.level.get_tile(self.nearest_node).teleport_to_tile\n self.pos = self.node_to_pixel(self.nearest_node)", "def is_player_dead(self):\n player_rect = self.plane.get_drawables()[0].get_rect()\n for enemy in self.enemies:\n if enemy.collided_with(player_rect):\n return True", "def detect_collision(self, other_sprite):\n\n # collision detection in case enemy is above or below player position\n if self.y_pos > other_sprite.y_pos + other_sprite.height or \\\n self.y_pos + self.height < other_sprite.y_pos:\n return False\n # collision detection in case enemy at the same Y but left or right of the\n # player's position\n if self.x_pos > other_sprite.x_pos + other_sprite.width or \\\n self.x_pos + self.width < other_sprite.x_pos:\n return False\n return True", "def at_target(self):\n return self.location == self.target_location", "def check_collide_enemy(self):\n for enemy in pygame.sprite.spritecollide(self, enemy_group, False,\n collided=pygame.sprite.collide_mask):\n if self.attack_mode:\n enemy.death()\n elif not self.damage_mode and not self.death_mode:\n self.lives -= 1\n if self.lives > 0:\n self.damage()\n else:\n self.death()\n self.visible_hearts()\n if self.rotation == enemy.rotation:\n enemy.xvel = -enemy.xvel\n if enemy.rotation == ROTATION_LEFT:\n enemy.rotation = ROTATION_RIGHT\n else:\n enemy.rotation = ROTATION_LEFT\n enemy.attack()", "def attack(map_, unit, targets):\n attack_target = unit.find_first_adjacent_target(map_, targets)\n if attack_target is None:\n return False\n\n attack_target.hp -= unit.attack\n\n if attack_target.hp <= 0:\n attack_target.is_dead = True\n map_.open.add((attack_target.y, attack_target.x))\n map_.matrix[attack_target.y][attack_target.x] = \".\"\n return True", "def can_hit(self, target_unit):\n # If it's an air unit return false\n if isinstance(target_unit, unit.air_unit.AirUnit):\n return False\n \n # Not an air unit, return true\n return True", "def isNearTo(self, point):\n # BBB: I'm using a majored version of the collide rect to fix a problem with a charas-bouncing-effect on movement... :-|\n x, y = self.currentLevel.transformToScreenCoordinate(point)\n collide_rect = self.collide_rect\n collide_rect.height+=3\n return collide_rect.collidepoint(x, y)", "async def is_target_reached(self) -> bool: # type: ignore\n ...", "def check_if_won(self):\n if self.player_points > self.enemy_points:\n self.bHasWon = True\n else:\n self.bHasWon = False", "def has_enemy_piece(self, piece) -> bool:\r\n if self.has_piece():\r\n if piece.get_color() != self.get_piece().get_color():\r\n return True\r\n \r\n return False", "def is_hero(self):\n return True", "def check_enemy_fleet(self):\n if len(self.enemyShips) > 0:\n response = False\n for ship in self.enemyShips:\n if ship.afloat == True:\n response = True\n return response", "def still_valid(self,board) -> bool:\r\n\r\n # Return False if any of the pieces are outside the board.\r\n if self.y_pos not in range(len(board)) or self.x_pos not in range(len(board)):\r\n return False\r\n if self.y_pos + self.y_dir not in range(len(board)) or self.x_pos + self.x_dir not in range(len(board)):\r\n return False\r\n # Return False if the spot behind the victim_piece is no longer empty.\r\n if board[self.y_pos+self.y_dir][self.x_pos+self.x_dir] != 0:\r\n return False\r\n\r\n killer_piece = board[self.y_pos][self.x_pos]\r\n victim_piece = board[self.y_pos+(self.y_dir//2)][self.x_pos+(self.x_dir//2)]\r\n\r\n # Return False if the pieces are the same type or if either piece has disappeared from the spot.\r\n if killer_piece == victim_piece or 0 in [killer_piece,victim_piece]:\r\n return False\r\n return True", "def isCombatOver(self):\n\t\treturn len(set([creature.type for creature in self.positionToCreature.values()])) <= 1", "def check_in_screen(self):\n if self.rect.colliderect(screen_rect) and not self.moving:\n return True\n return False", "def shooting(self):\r\n return not self.stopped" ]
[ "0.68174136", "0.675546", "0.66239154", "0.6620806", "0.6594355", "0.6586144", "0.6476883", "0.64507884", "0.63767195", "0.627667", "0.6238833", "0.6225941", "0.6219776", "0.6193665", "0.61795765", "0.610945", "0.6093099", "0.605545", "0.6041871", "0.6026772", "0.60141253", "0.60120934", "0.5974945", "0.5963112", "0.5956868", "0.594557", "0.5943624", "0.591874", "0.59116626", "0.59110475" ]
0.70494425
0
3d position of the unit.
def position3d(self) -> Point3: return Point3.from_proto(self.proto.pos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def position(self, x, y, z):\n self.curr_position = Vector([x, y, z])\n self.ptr.position(x, y, z)", "def set_position(self, x, y, z):\n for sec in self.all:\n for i in range(int(nrn.n3d())):\n nrn.pt3dchange(i, \\\n x-self.x+nrn.x3d(i), \\\n y-self.y+nrn.y3d(i), \\\n z-self.z+nrn.z3d(i), \\\n nrn.diam3d(i))\n self.x = x; self.y = y; self.z = z", "def get_pos(self):\n return (self.x/3, 3**0.5*self.y/3, self.r/3)", "def kinect_transform(self, x, y, z):\n xposition = x\n yposition = y\n zposition = z\n\n return zposition, xposition, yposition", "def __pos__(self):\r\n return vec4(+self.x, +self.y, +self.z, +self.w)", "def z(self):\r\n return self.position.z", "def get_pixel_pos(self):\n\n c = self.get_center()\n\n return Tank.three_by_three(c[0],c[1])", "def getVerticePosition(self):\n #def getvoxelpos(model,scale,dims,translate,i,j,k): #centroid!\n return(self.X,self.Y,self.Z)", "def z(self):\n return self.coords[2]", "def __init__(self, unit_vector_3d):\n \n self.unit_vector = unit_vector_3d\n transposed_uv = np.transpose(self.unit_vector)\n self.x = transposed_uv[0] \n self.y = transposed_uv[1] \n self.z = transposed_uv[2]\n self.d = SkyCoord(self.x, self.y, self.z, \n unit = 'mpc', \n representation_type = 'cartesian', \n frame = 'icrs')\n self.d.representation_type = 'spherical'\n self.lons = self.d.galactic.l.wrap_at(360 * u.deg).deg\n self.lats = self.d.galactic.b.wrap_at(180 * u.deg).deg", "def three_dimensional(self, z): # Maybe I misunderstood the task. My method looks weird\n return (self.x, self.y, z)", "def get_spawn_xyz(self):\n return self.X, self.Y, self.Z", "def ion1_position(self,x,y,z):\n axes_vector = np.array([self.a,self.b,self.c])\n self.ion1 = x*self.a + y*self.b + z*self.c\n self.position['1A'] = np.dot(self.position_map[1],axes_vector) + self.ion1\n self.position['2A'] = np.dot(self.position_map[2],axes_vector) + self.ion1\n self.position['3A'] = np.dot(self.position_map[3],axes_vector) + self.ion1\n self.position['4A'] = np.dot(self.position_map[4],axes_vector) + self.ion1\n self.position['5A'] = np.dot(self.position_map[5],axes_vector) + self.ion1\n self.position['6A'] = np.dot(self.position_map[6],axes_vector) + self.ion1\n self.position['7A'] = np.dot(self.position_map[7],axes_vector) + self.ion1\n self.position['8A'] = np.dot(self.position_map[8],axes_vector) + self.ion1", "def ion1_position(self,x,y,z):\n axes_vector = np.array([self.a,self.b,self.c])\n self.ion1 = x*self.a + y*self.b + z*self.c\n self.position['1A'] = np.dot(self.position_map[1],axes_vector) + self.ion1\n self.position['2A'] = np.dot(self.position_map[2],axes_vector) + self.ion1\n self.position['3A'] = np.dot(self.position_map[3],axes_vector) + self.ion1\n self.position['4A'] = np.dot(self.position_map[4],axes_vector) + self.ion1\n self.position['5A'] = np.dot(self.position_map[5],axes_vector) + self.ion1\n self.position['6A'] = np.dot(self.position_map[6],axes_vector) + self.ion1\n self.position['7A'] = np.dot(self.position_map[7],axes_vector) + self.ion1\n self.position['8A'] = np.dot(self.position_map[8],axes_vector) + self.ion1", "def __init__(self, _x, _y, _z):\n self.position = Position3d(int(_x), int(_y), int(_z))\n self.velocity = Velocity3d(0, 0, 0)", "def get_position(self):\n ret = _pal.Vec3()\n _pal.lib.geometry_get_position(self._geometry, ret)\n return [x for x in ret]", "def _world_point(self, point_3d):\n return self.obj.matrix_world @ point_3d", "def xyz(self):\n return (self.x(), self.y(), self.z())", "def z(self):\n return self._coords[2]", "def unit(self):\r\n return Vector(self.x/self.length(), self.y/self.length())", "def convert_coordinate_system_3d(x, y, z):\n\n return x, -z, y", "def get_joystick3d(self, index=0):\r\n return (self.handler.absx[index], self.handler.absy[index], self.handler.absz[index])", "def __Us__(self, z):\r\n return pos(z - self.psi)", "def I3_u3(self) -> complex:\n return self.I3_u1() * cmath.rect(1, 120 / 180 * cmath.pi)", "def get_z(self):\n return self.coords[2]", "def position(self):\n return pm.datatypes.Point(self.transform.getTranslation(ws=True))", "def coords(self):\n return (self.x, self.y, self.z)", "def coords(self):\n return (self.x, self.y, self.z)", "def sat_3d_position(sat_2d_position):\n return np.dot(transformation_parameter, xi_eta(sat_2d_position))", "def full_3d(self, quantity):\n # The data just tells you what integer grid point you are on. Not what actual x,y coordinate you\n # are at\n x = np.arange(0, self.period, self.dx)\n y = np.arange(0, self.period, self.dy)\n z = np.arange(0, self.height + self.dz, self.dz)\n points = np.array(list(itertools.product(z, x, y)))\n # Get the scalar\n scalar = self.get_scalar_quantity(quantity)\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now plot!\n self.scatter3d(points[:, 1], points[:, 2], points[\n :, 0], scalar.flatten(), labels, 'full_3d')" ]
[ "0.6906099", "0.687728", "0.6757202", "0.66627234", "0.66445655", "0.6620072", "0.6583574", "0.64869004", "0.64429545", "0.6421024", "0.64205307", "0.64181536", "0.641555", "0.641555", "0.6350701", "0.6316061", "0.6304115", "0.62886834", "0.6281074", "0.62697476", "0.6258691", "0.62532365", "0.6232527", "0.6230468", "0.62303454", "0.61872816", "0.61654884", "0.61654884", "0.61627054", "0.6159319" ]
0.7619963
0
Returns the unit radar range
def radar_range(self) -> Union[int, float]: return self.proto.radar_range
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def polar_radius(self):\n return self.r * (1 - self.f)", "def radii(self):\n dim_half = (self.shape[0] + 1) // 2\n x = np.arange(dim_half)\n if self.step is None:\n return x\n else:\n xmax = x.max()\n x2 = list(x[x*(self.step-1) <= 1])\n v1 = x[len(x2)]\n while v1 < xmax:\n x2.append(v1)\n v1 *= self.step\n x2.append(xmax)\n return np.array(x2)", "def rad(area) :\n return sqrt(area/pi)", "def range_finder_angles(self):\n return -90, -75, -60, -45, -30, -20, -15, -10, -5, 0, 5, 10, 15, 20, \\\n 30, 45, 60, 75, 90", "def range_finder_angles(self):\n return -90, -75, -60, -45, -30, -20, -15, -10, -5, 0, 5, 10, 15, 20, \\\n 30, 45, 60, 75, 90", "def get_radius(self):\n return self.R", "def get_rho_range(self):\n return [self.rho_min,self.rho_max]", "def get_radius(self):\n return self.r", "def rad(x) :#en mm!\r\n return topdia(x)/2.0", "def getR(self):\n # Reynolds number uses the absolute value of the velocity\n V = abs(self.V)\n return (V * self.D) / self.v # formula for Reynolds number", "def inner_rad(self) -> Quantity:\n return self._inner_rad", "def get_radius(self):", "def radrad(rxn_class):\n return rxn_class[2]", "def get_radius(center, rad, speed_limit):\n i = Intersection(center, rad, speed_limit)\n return i.get_radius()", "def get_radiation():\n sun_pos = get_sun_position()\n if sun_pos <= POSITION_MIN or sun_pos >= POSITION_MAX:\n return 0\n else:\n # Calculate a new delta.\n delta = random.randint(0, RADIATION_DELTA)\n if random.random() > 0.5:\n delta = -1 * delta\n # Calculate the radiation based on the sun position.\n new_radiation = round(-0.1279 * pow(sun_pos, 2) + 46.05 * sun_pos - 3100)\n # Apply the delta and return the value.\n return new_radiation + delta", "def eggleton_roche_radius(self):\n return self.eggleton_roche_over_separation() * self.separation()", "def fRad(self, fDu, NA, wLen):\n x = 2 * np.pi * NA / (wLen * fDu[0])\n y = 2 * np.pi * NA / (wLen * fDu[1])\n avr = np.int32(np.average([x, y]))\n return avr", "def get_range(self):\n if self.battery_size == 70:\n r = 240\n elif self.battery_size == 85:\n r = 270\n\n message = \"This car can go approximately \" + str(r)\n message += \" miles on a full charge.\"\n print(message)", "def convert_rpm_to_rads(rpm_wheelR, rpm_wheelL):\n speed_wheelR = 2*pi*rpm_wheelR/60\n speed_wheelL = 2*pi*rpm_wheelL/60\n if speed_wheelR<0:\n speed_wheelR = -speed_wheelR\n if speed_wheelR<0:\n speed_wheelR = -speed_wheelR\n return [speed_wheelR, speed_wheelL]", "def R(self):\n\t\treturn (arange(self.rbins) + 0.5) * (self.cbins - 0.5) / self.rbins", "def radialInner(self):\n if self.radial in range(1, len(self.ThRZmesh.getPositions(label=\"R\"))):\n R = self.ThRZmesh.getUpper(label=\"R\", n=(self.radial - 1))\n else:\n runLog.warning(\n \"Error: Radial Index ({0}) location not INSIDE mesh \".format(\n self.radial\n )\n )\n runLog.warning(self.ThRZmesh.getPositions(label=\"R\"))\n R = None\n return R", "def _rad_center(self):\n return ((self.rad_hi + self.rad_lo) / 2).to(\"deg\")", "def get_radius(self):\r\n return 1", "def Rcoords(self):\n if self.radial > 0 and self.radial < len(self.ThRZmesh.getPositions(label=\"R\")):\n R = (self.radialInner() + self.radialOuter()) / 2.0\n else:\n # n = 0\n runLog.warning(\n \"Error: Radial Index ({}) location not INSIDE mesh \".format(self.radial)\n )\n runLog.warning(self.ThRZmesh.getPositions(label=\"R\"))\n R = None\n return R", "def get_van_Der_Waals_radius(self):\n return self.van_Der_Waals_radius", "def get_radius(self):\n return self.radius", "def get_radius(self):\n return self.radius", "def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f'This car can go about {range} miles on a full charge.')", "def Truncated_radius(self):\n r_trunc = fminbound(self.Mass_diff_005, -10., np.log10(self.scale_radius))\n return 10**float(r_trunc)", "def radius_to_annulus(r,annuli):\n if r < R_in:\n return -1\n for annulus in range(len(annuli)):\n if annuli[annulus] == r:\n annulus_smaller = annulus\n return annulus_smaller \n if annuli[annulus] > r:\n annulus_smaller = annulus-1\n return annulus_smaller\n return len(annuli)-1" ]
[ "0.65111315", "0.6504559", "0.65019053", "0.6387397", "0.6380159", "0.629702", "0.6281683", "0.6266965", "0.61659265", "0.60936135", "0.60886836", "0.6071368", "0.6068421", "0.6052135", "0.6042729", "0.60379237", "0.60167426", "0.6009", "0.5983171", "0.5955859", "0.59381574", "0.5911136", "0.5878273", "0.5874228", "0.5873673", "0.58175945", "0.58175945", "0.5802119", "0.57934225", "0.5779619" ]
0.7913477
0
Returns the structure building progress
def build_progress(self) -> Union[int, float]: return self.proto.build_progress
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_build_progress_info(self, build_id):\n pass", "def getProgress(self):", "def calc_progress(self):\n if self.is_prepared():\n self._sync_info_from_disk()\n self._num_sown_batches = len(\n glob.glob(\n os.path.join(self.location, \"batches\", BTCH_NM.format(\"*\"))\n )\n )\n self._num_results = len(\n glob.glob(\n os.path.join(self.location, \"results\", RSLT_NM.format(\"*\"))\n )\n )\n else:\n self._num_sown_batches = -1\n self._num_results = -1", "def report_build_progress(self, build_id, current, total, group_name='',\n status_line=''):\n pass", "def GetProgress(self):\n return self.objects_finished", "def progress(self):\n if self.dynamic:\n self._update_db_obj()\n return self._db_obj.progress", "def progress(self):\n try:\n return 100.0 * (self.fields['sizeWhenDone'] - self.fields['leftUntilDone']) / float(self.fields['sizeWhenDone'])\n except ZeroDivisionError:\n return 0.0", "def fetch_progress(self):\n threads = len(opts.thread)\n files = len(self.files)\n t_width = len(str(threads))\n f_width = len(str(files))\n\n t_progress = f\"[{self.pos: >{t_width}}/{threads}]\"\n f_progress = f\"[{self.count: >{f_width}}/{files}]\"\n\n if self.count:\n progress = f\"{t_progress} {f_progress}\"\n else:\n progress = t_progress\n\n return progress", "def get_progress(self):\r\n return None", "def get_progress(self):\n ret = self.state + \"\\n\"\n self.reset_progress()\n return ret", "def progress(self):\n return self.runProgress", "def progress(self):\n return self.runProgress", "def GetProgress(self):\n return self.new_progress", "def progress_status(self):\n from tqdm import tqdm\n pbar_a = tqdm(total=len(self.jobs), position=0)\n pbar_a.set_description('Submitted jobs ...')\n pbar_b = tqdm(total=self.n_submit_script, position=1)\n pbar_b.set_description('Running jobs ...')\n pbar_c = tqdm(total=self.n_submit_script, position=2)\n pbar_c.set_description('Completed jobs ...')\n pbar_d = tqdm(total=self.n_submit_script, position=3)\n pbar_d.set_description('Failed? jobs ...')\n while self.n_completed < self.n_submit_script:\n pbar_a.n = self.n_submitted\n pbar_b.n = self.n_running\n pbar_c.n = self.n_completed\n pbar_d.n = self.n_failed + self.n_unknown\n pbar_a.refresh()\n pbar_b.refresh()\n pbar_c.refresh()\n pbar_d.refresh()\n sleep(5)\n self.update_status()", "def build_progress_report(self):\n\n report = {\n 'manifest': self._generate_manifest_section(),\n 'isos': self._generate_isos_section(),\n }\n return report", "def progress(self):\n return self.progressValue", "def build_progress_report(self):\n\n report = {\n 'packages' : self._packages_section(),\n 'metadata' : self._metadata_section(),\n 'publishing' : self._publishing_section(),\n }\n return report", "def report(self):\n\n job_summary = {}\n for job in self._jobs:\n \n if job.step_name not in job_summary:\n job_summary[ job.step_name ] = {}\n job_summary[ job.step_name ][ 'DONE' ] = 0\n job_summary[ job.step_name ][ 'RUNNING' ] = 0\n job_summary[ job.step_name ][ 'QUEUING' ] = 0\n job_summary[ job.step_name ][ 'FAILED' ] = 0\n job_summary[ job.step_name ][ 'UNKNOWN' ] = 0\n job_summary[ job.step_name ][ 'max_mem' ] = 0\n job_summary[ job.step_name ][ 'cputime' ] = 0\n\n if job.status == Job_status.FINISHED:\n job_summary[ job.step_name ][ 'DONE' ] += 1\n if job.cputime is not None:\n job_summary[ job.step_name ]['cputime'] += int(job.cputime)\n\n if job.max_memory is not None and job.max_memory > job_summary[ job.step_name ][ 'max_mem']:\n job_summary[ job.step_name ][ 'max_mem'] = int(job.max_memory)\n\n elif job.status == Job_status.RUNNING:\n job_summary[ job.step_name ][ 'RUNNING' ] += 1\n elif job.status == Job_status.QUEUEING or job.status == Job_status.SUBMITTED:\n job_summary[ job.step_name ][ 'QUEUING' ] += 1\n elif job.status == Job_status.FAILED or job.status == Job_status.NO_RESTART:\n job_summary[ job.step_name ][ 'FAILED' ] += 1\n else:\n job_summary[ job.step_name ][ 'UNKNOWN' ] += 1\n\n\n\n local_time = strftime(\"%d/%m/%Y %H:%M\", time.localtime())\n \n\n pickle_file = \"{}.{}\".format(self.pipeline.project_name, self.pipeline._pid)\n\n print(\"[{} @{} {}]\".format( local_time,self.pipeline._hostname , pickle_file))\n\n print(\"{:20} || {:12} || {:12} || {:2s} {:2s} {:2s} {:2s} {:2s}\".format(\"Run stats\", \"Runtime\", \"Max Mem\", \"D\",\"R\",\"Q\",\"F\",\"U\"))\n\n for step in sorted(self.pipeline._workflow._analysis_order, key=self.pipeline._workflow._analysis_order.__getitem__):\n if step not in job_summary:\n continue\n\n print(\"{:20} || {:12} || {:12} || {:02d}/{:02d}/{:02d}/{:02d}/{:02d}\".format(step, \n self.format_time(job_summary[ step ]['cputime']),\n self.format_memory(job_summary[ step ]['max_mem']),\n job_summary[ step ][ 'DONE' ],\n job_summary[ step ][ 'RUNNING' ],\n job_summary[ step ][ 'QUEUING' ],\n job_summary[ step ][ 'FAILED' ],\n job_summary[ step ][ 'UNKNOWN' ]))", "def progress_data(self) -> Dict[str, Any]:\n result: Dict[str, Union[Dict[str, Union[str, int]], List[Tuple[str, str]], str]] = {\n 'progress': {\n 'state': 'error',\n 'step': -1\n },\n 'stages': TestStatus.stages(),\n 'start': '-',\n 'end': '-'\n }\n\n if len(self.progress) > 0:\n result['start'] = self.progress[0].timestamp\n last_status = self.progress[-1]\n\n if last_status.status in [TestStatus.completed, TestStatus.canceled]:\n result['end'] = last_status.timestamp\n\n if last_status.status == TestStatus.canceled:\n if len(self.progress) > 1:\n result['progress']['step'] = TestStatus.progress_step(self.progress[-2].status) # type: ignore\n\n else:\n result['progress']['state'] = 'ok' # type: ignore\n result['progress']['step'] = TestStatus.progress_step(last_status.status) # type: ignore\n\n return result", "def status(self):\n\t\tstatus = self.thread.status()\n#\t\tprint_array(status)\n\t\tmessage = [\"------ RSYNC PROGRESS ------ \"]\n\t\tif self.log_message:\n\t\t\tmessage.append(self.log_message)\n\t\tmessage.append(\"Current file: %s\" % status['current_file'])\n\t\tmessage.append(\"\\tBytes Copied: %s\" % status['bytes_copied'])\n\t\tmessage.append(\"\\tPercent Done: %s\" % status['percent_done'])\n\t\tmessage.append(\"\\tTransfer Rate: %s\" % status['transfer_rate'])\n\t\tmessage.append(\"\\tTime Remaining: %s\" % status['est_remain'])\n\t\tmessage.append(\"\\tTransfer Number: %s\" % status['xfer_num'])\n\t\tmessage.append(\"\\tTransfers Remaining: %s\" % status['xfer_remain'])\n\t\tmessage.append(\"\\tTransfers Total: %s\" % status['xfer_total'])\n\t\tmessage.append(\"\\t----------------------------------\")\n\t\ttry:\n\t\t\toverall_percent = int(round((int(status['xfer_num'])*1.0)/int(status['xfer_total']),2)*100)\n\t\texcept: overall_percent = 0\n\t\tmessage.append(\"\\tTotal Rsync done: %s%%\\n\" % overall_percent)\n\t\tp = open(self.progress_file,'w+',0)\n\t\tfor line in message:\n\t\t\t#print line\n\t\t\tp.write(\"%s\\n\" % line)\n\t\tp.flush()\n\t\tp.close()", "def get_progress(self):\n return self.cloudserver.progress", "def get_progress(self):\n\t\treturn call_sdk_function('PrlJob_GetProgress', self.handle)", "def status(self):\n if self.error_code:\n msg = self.error_code\n else:\n msg = 'C{cycle},P{seen},{progress:.0f}%'.format(\n cycle=self.cycle,\n seen=self.seen_per_cycle,\n progress=(self.step / float(self.count_points) * 100)\n )\n return '[W{worker_no}: {msg}]'.format(\n worker_no=self.worker_no,\n msg=msg\n )", "def wait_progress(self):\n pass", "def wait_progress(self):\n pass", "def getBuild():", "def get_info(self):\n if os.path.isfile(self.path):\n total_size = os.path.getsize(self.path)\n total_files = 1\n elif os.path.exists(self.path):\n total_size = 0\n total_files = 0\n for x in os.walk(self.path):\n for fn in x[2]:\n fpath = os.path.normpath(os.path.join(x[0], fn))\n rel_path = os.path.relpath(fpath, self.path)\n if any(fnmatch.fnmatch(rel_path, ext) for ext in self.exclude):\n continue\n fsize = os.path.getsize(fpath)\n if fsize and not is_hidden_file(fpath):\n total_size += fsize\n total_files += 1\n else:\n raise exceptions.InvalidInputException\n if not (total_files and total_size):\n raise exceptions.EmptyInputException\n if self.piece_size:\n ps = self.piece_size\n else:\n ps = 1 << max(0, math.ceil(math.log(total_size / 1500, 2)))\n if ps < MIN_PIECE_SIZE:\n ps = MIN_PIECE_SIZE\n if ps > MAX_PIECE_SIZE:\n ps = MAX_PIECE_SIZE\n return (total_size, total_files, ps, math.ceil(total_size / ps))", "def status(self):\n\n # check if all complete\n if io.job_complete(self.finaldir):\n return (\"complete\",None)\n\n # check status of relaxation runs\n self.update_rundir()\n\n # if not yet started\n if len(self.rundir) == 0:\n return (\"incomplete\", \"setup\")\n\n # if the latest run is complete:\n if io.job_complete(self.rundir[-1]):\n\n # if it is a final constant volume run\n if io.get_incar_tag(\"SYSTEM\", self.rundir[-1]) != None:\n if io.get_incar_tag(\"SYSTEM\", self.rundir[-1]).split()[-1].strip().lower() == \"final\":\n # if io.get_incar_tag(\"ISIF\", self.rundir[-1]) == 2 and \\\n # io.get_incar_tag(\"NSW\", self.rundir[-1]) == 0 and \\\n # io.get_incar_tag(\"ISMEAR\", self.rundir[-1]) == -5:\n return (\"complete\", None)\n\n # elif constant volume run (but not the final one)\n if io.get_incar_tag(\"ISIF\", self.rundir[-1]) in [0,1,2]:\n if io.get_incar_tag(\"NSW\", self.rundir[-1]) == len(io.Oszicar(os.path.join(self.rundir[-1],\"OSZICAR\")).E):\n return (\"incomplete\", \"relax\") # static run hit NSW limit and so isn't \"done\"\n else:\n return (\"incomplete\", \"constant\")\n\n # elif convergence criteria met\n if self.converged():\n return (\"incomplete\", \"constant\")\n\n # elif not converging, return 'not_converging' error\n if self.not_converging():\n return (\"not_converging\", None)\n\n # else continue relaxing\n else:\n return (\"incomplete\", \"relax\")\n\n # elif not converging, return 'not_converging' error\n elif self.not_converging():\n return (\"not_converging\", None)\n\n # else if the latest run is not complete, continue running it\n return (\"incomplete\", self.rundir[-1])", "def getFinalStatus():\n p = progressbar[:]\n p.insert(0, '[')\n p.insert(len(p), ']')\n return string.join(p, '')", "def _progress(self, walker):\n\n min_distance = self._calc_min_distance(walker)\n\n # test to see if the ligand is unbound\n unbound = False\n if min_distance >= self._cutoff_distance:\n unbound = True\n\n progress_data = {'min_distances' : min_distance}\n\n return unbound, progress_data" ]
[ "0.6612247", "0.6320412", "0.6279761", "0.6238136", "0.62358683", "0.62171316", "0.6205578", "0.6187908", "0.6183282", "0.61451906", "0.60300565", "0.60300565", "0.60299975", "0.6015985", "0.5934461", "0.5904014", "0.5863854", "0.5849668", "0.5822888", "0.58013105", "0.5792203", "0.57406014", "0.57213974", "0.56425005", "0.56425005", "0.5626768", "0.5594431", "0.5592802", "0.5589251", "0.55679274" ]
0.71646124
0
Checks if the unit is flying
def is_flying(self) -> bool: return self.proto.is_flying
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_fallen(self):\n orientation = self.minitaur_env.minitaur.GetBaseOrientation()\n rot_mat = self.minitaur_env._pybullet_client.getMatrixFromQuaternion(orientation)\n local_up = rot_mat[6:]\n return (np.dot(np.asarray([0, 0, 1]), np.asarray(local_up)) < 0.3)", "def check_fleet(self):\n if len(self.ships) > 0:\n response = False\n for ship in self.ships:\n if ship.afloat == True:\n response = True\n return response", "def isgood(self):\n\t\tanswer = True\n\t\t\n\t\tif self.mes_flux <= 0.0:\n\t\t\tanswer = False\n\n\t\treturn answer", "def check_enemy_fleet(self):\n if len(self.enemyShips) > 0:\n response = False\n for ship in self.enemyShips:\n if ship.afloat == True:\n response = True\n return response", "def shooting(self):\r\n return not self.stopped", "def fuel_ok(game, ship):\n fuelCost = game.game_map[ship.position].halite_amount * .1\n\n if round(fuelCost) > ship.halite_amount:\n if DEBUG & (DEBUG_GAME): logging.info(\"Game - Ship {} has insuffient fuel. Have {}, need {}\".format(ship.id, ship.halite_amount, round(fuelCost, 2)))\n return False\n\n return True", "def is_food(self) -> bool:\n return self in (self.off, self.off_pro)", "def is_dead(self):\n if self.killer:\n if self.killer.stype == 'fire' and not (self.killer in self.pjs.fires):\n return True\n elif self.killer.stype == 'enemy' and self.timeout == 0:\n return True\n else:\n return False", "def is_penalty_event(self):\n if hasattr(self, \"fouls_to_give\"):\n team_ids = list(self.current_players.keys())\n offense_team_id = self.get_offense_team_id()\n defense_team_id = (\n team_ids[0] if offense_team_id == team_ids[1] else team_ids[1]\n )\n if self.fouls_to_give[defense_team_id] == 0:\n if isinstance(self, (Foul, FreeThrow, Rebound)):\n # if foul or free throw or rebound on a missed ft\n # check foul event and should return false is foul\n # was shooting foul and team had a foul to give\n if isinstance(self, Foul):\n foul_event = self\n elif isinstance(self, FreeThrow):\n foul_event = self.foul_that_led_to_ft\n else:\n # if rebound is on missed ft, also need to look at foul that led to FT\n if not self.oreb and isinstance(self.missed_shot, FreeThrow):\n foul_event = self.missed_shot.foul_that_led_to_ft\n else:\n return True\n if foul_event is None:\n return True\n fouls_to_give_prior_to_foul = (\n foul_event.previous_event.fouls_to_give[defense_team_id]\n )\n if fouls_to_give_prior_to_foul > 0:\n return False\n return True\n return False", "def checkFuel(self):\n return self.maze.checkFuelCost(self.checkpoint,currentLap = self.laps) - self.timeDriving", "def is_valid_flight(self) -> bool:\n flight_snapshot = self.flight()\n orbit_snapshot = self.orbit()\n direction_snapshot = np.array(self.direction())\n\n # zero altitude after x time condition\n if self.vessel.met > 10 and flight_snapshot.speed == 0:\n print('Rocket never left')\n return False\n\n # vessel not in ocean condition\n if self.vessel.met > 10 and (self.situation() == self.situation().docked\n or self.situation() == self.situation().landed\n or self.situation() == self.situation().splashed):\n print('Rocket not flying anymore')\n return False\n\n # zero fuel condition\n if min(self.liquid_fuel(), self.oxidizer()) == 0:\n print('Rocket out of fuel')\n return False\n\n # If rocket is ballistic. As in flying towards the ground\n horizontal_direction = np.array((0, direction_snapshot[1], direction_snapshot[2]))\n pitch = self.angle_between_vectors(direction_snapshot, horizontal_direction)\n if direction_snapshot[0] < 0:\n pitch = -pitch\n\n if pitch < -3 and flight_snapshot.mean_altitude < 70000:\n print(f'Went Ballistic with pitch{pitch} at altitude {flight_snapshot.mean_altitude}')\n return False\n\n return True", "def in_fire(self):\n Fire=False\n if self.state>0 and self.state<=5:\n Fire=True\n return Fire", "def is_shooting(self):\n if self.gun_interface:\n return self.gun_interface.is_preparing()\n return False", "def fight(self, combat_item):\r\n print(self.name + \" doesn't want to fight with you\")\r\n return True", "def _ftolCheck(self):\n oldLoss = biggestRecentLoss(self.loss, self.lookback)\n newLoss = float(self.loss[-1])\n fracDiff = 2 * (oldLoss - newLoss)/(oldLoss + newLoss)\n \n if fracDiff < self.ftol:\n \n self.converged = True", "def can_flyover(self):\n return False", "def is_unit(self):\n return math.isclose(self.magnitude(), 1)", "def IsFloatable(self):\r\n\r\n return self.HasFlag(self.optionFloatable)", "def isFim(self):\r\n return", "def is_passable(self, tile, pos):\n #Check superclass to see if it's passable first\n if not super().is_passable(tile, pos):\n return False\n\n #This unit can't pass these specific terrains\n ttype = tile.type\n if (tile.type == 'forest'):\n return False\n \n #The tile is passable\n return True", "def isFim(self):\r\n return self.sair", "def time_to_fire(self):\n return(self.frequency < (time.time() - self.last_fired))", "def fusable(self) -> bool:\n obs_fusable = self._can_fuse_set_of_gridded_perms(self.obstruction_fuse_counter)\n req_fusable = all(\n self._can_fuse_set_of_gridded_perms(counter)\n for counter in self.requirements_fuse_counters\n )\n ass_fusable = all(\n self._can_fuse_assumption(assumption, counter)\n for assumption, counter in zip(\n self._tiling.assumptions, self.assumptions_fuse_counters\n )\n )\n return (\n obs_fusable\n and req_fusable\n and ass_fusable\n and self._check_isolation_level()\n )", "def fight(self, combat_item):\r\n if combat_item == self.weakness:\r\n print(\"You fend \" + self.name + \" off with the \" + combat_item )\r\n Character.victory_count +=1\r\n return True\r\n else:\r\n print(self.name + \" crushes you, puny adventurer\")\r\n return False", "def isFTS(issue):\n return (issue.isActive() and\n issue.isFTS())", "def check_falling(self, obstacles):\n self.rect.move_ip((0, 1))\n if not pygame.sprite.spritecollideany(self, obstacles):\n if not self.climb:\n\t self.fall = True\n\n self.rect.move_ip((0, -1))", "def is_on(self) -> bool:\n return self.entity_description.get_ufp_value(self.device) is True", "def is_on(self) -> bool:\n return self.entity_description.get_ufp_value(self.device) is True", "def is_gathering(self) -> bool:\n return self.orders and self.orders[0].ability.id is AbilityId.HARVEST_GATHER", "def run_checks(self, tile_model: TileModel) -> bool:\n\n # Doge cannot fire the deck gun\n if self.player.role == PlayerRoleEnum.DOGE:\n return False\n\n if not self.player == GameStateModel.instance().players_turn:\n return False\n\n ap_deduct = 2 if self.player.role == PlayerRoleEnum.DRIVER else 4\n\n if not TurnEvent.has_required_AP(self.player.ap, ap_deduct):\n return False\n\n # If the player is not located in the\n # same space as the engine, they cannot\n # fire the deck gun.\n engine_orient = self.engine.orientation\n if engine_orient == VehicleOrientationEnum.HORIZONTAL:\n on_first_spot = self.player.row == self.engine.row and self.player.column == self.engine.column\n on_second_spot = self.player.row == self.engine.row and self.player.column == self.engine.column + 1\n if not on_first_spot and not on_second_spot:\n return False\n\n elif engine_orient == VehicleOrientationEnum.VERTICAL:\n on_first_spot = self.player.row == self.engine.row and self.player.column == self.engine.column\n on_second_spot = self.player.row == self.engine.row + 1 and self.player.column == self.engine.column\n if not on_first_spot and not on_second_spot:\n return False\n\n engine_quadrant = self._determine_quadrant(self.engine.row, self.engine.column)\n tile_input_quadrant = self._determine_quadrant(tile_model.row, tile_model.column)\n # If there are players present in the\n # quadrant, the deck gun cannot be fired.\n # tile input gotta be on quadrant adjacent to engine\n if self._are_players_in_quadrant(engine_quadrant) or tile_input_quadrant != engine_quadrant:\n return False\n\n return True" ]
[ "0.6714423", "0.6327646", "0.6310387", "0.6241162", "0.62241924", "0.6197398", "0.61516845", "0.61313725", "0.60862607", "0.6074081", "0.6042982", "0.59909093", "0.5981111", "0.5970643", "0.59467703", "0.59375894", "0.5923914", "0.59213585", "0.59081155", "0.5905994", "0.5889967", "0.5852855", "0.5852529", "0.58451563", "0.5822611", "0.58124226", "0.5809103", "0.5809103", "0.5784877", "0.5734346" ]
0.6856984
0
Checks if the unit is a structure
def is_structure(self) -> bool: return ATTRIBUTE.Structure.value in self.type_data.attributes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_struct(self):\n return False", "def is_struct_type(self, objtype):\n return issubclass(objtype, self.get_real_ctypes_member('Structure'))", "def isUnitKind(*args):\n return _libsbml.Unit_isUnitKind(*args)", "def Unit_isUnitKind(*args):\n return _libsbml.Unit_isUnitKind(*args)", "def is_struct(schema_obj):\n\n return (isinstance(schema_obj, schema.Struct) or\n (isinstance(schema_obj, schema.Field) and schema_obj.struct_type))", "def is_structural(self):\n\n if self.depth > 1:\n\n if (self.path[0] == \"input\") and (self.path[1] in gs.all_elements):\n\n return True\n\n return False", "def is_unit(self):\n return math.isclose(self.magnitude(), 1)", "def hasUnit(val):\n return hasattr(val, 'unit') or hasattr(val, 'units')", "def is_noncomplex(obj):\n if type(obj) is time.struct_time:\n return True\n return False", "def testLengthsOfStructure(self):\n\t\tstruct_len = len(self.d3.structures)\n\t\tself.assertEqual(6, struct_len)", "def test_type(self):\n state1 = State()\n self.assertEqual(type(state1.name), str)\n self.assertNotEqual(type(state1.name), list)", "def is_unit(xblock):\r\n if xblock.category == 'vertical':\r\n parent_xblock = get_parent_xblock(xblock)\r\n parent_category = parent_xblock.category if parent_xblock else None\r\n return parent_category == 'sequential'\r\n return False", "def _is_valid_unit(unit: str, unit_type: str) -> bool:\n if unit_type == LENGTH:\n return unit in LENGTH_UNITS\n if unit_type == ACCUMULATED_PRECIPITATION:\n return unit in LENGTH_UNITS\n if unit_type == WIND_SPEED:\n return unit in WIND_SPEED_UNITS\n if unit_type == TEMPERATURE:\n return unit in TEMPERATURE_UNITS\n if unit_type == MASS:\n return unit in MASS_UNITS\n if unit_type == VOLUME:\n return unit in VOLUME_UNITS\n if unit_type == PRESSURE:\n return unit in PRESSURE_UNITS\n return False", "def isValidUnit(unit):\n\tfor i in Units:\n\t\tif (unit == i):\n\t\t\treturn True;\n\treturn False", "def test_empty_structure():\n empty = SME_Struct()\n\n assert isinstance(empty.version, str)\n assert empty.teff is not None\n assert empty.logg is not None\n assert empty.vmic == 0\n assert empty.vmac == 0\n assert empty.vsini == 0\n\n assert empty.nseg == 0\n assert empty.wave is None\n assert empty.spec is None\n assert empty.uncs is None\n assert empty.synth is None\n assert empty.cont is None\n assert empty.mask is None\n assert empty.mask_good is None\n assert empty.mask_bad is None\n # assert empty.mask_line is None\n # assert empty.mask_continuum is None\n\n assert empty.cscale.shape == (0, 1)\n assert empty.vrad.shape == (0,)\n assert empty.cscale_flag == \"none\"\n assert empty.vrad_flag == \"none\"\n assert empty.cscale_degree == 0\n\n assert empty.mu is not None\n assert empty.nmu == 7\n\n # assert empty.md5 is not None\n\n assert empty.linelist is not None\n assert empty.species is not None\n assert len(empty.species) == 0\n assert empty.atomic is not None\n\n assert empty.monh == 0\n assert not np.isnan(empty[\"abund Fe\"])\n assert empty.abund[\"H\"] == 12\n assert not np.isnan(empty.abund()[\"Mg\"])\n\n assert empty.system_info is not None\n assert empty.system_info.arch == \"\"\n\n assert len(empty.fitparameters) == 0\n assert empty.fitresults is not None\n assert empty.fitresults.covariance is None\n\n assert empty.atmo is not None\n assert empty.atmo.depth is None\n\n assert empty.nlte is not None\n assert empty.nlte.elements == []", "def _check_block_type():\n \n \n maxBlocks = blocks[0]['maxBlocks']\n freeEnd = blocks[0]['freeEnd']\n \n while(blkcounter in xrange(freeEnd + 1, maxBlocks, 1)) :\n if (type(bdata) == dict) :\n # directory, file inodes\n # won't touch the character device file\n if IS_DIR(blocks[blkcounter]['mode']):\n status = _check_dir_type(blocks[blkcounter], blkcounter)\n elif IS_REG(blocks[blkcounter]['mode']) :\n status = _check_reg_type(blocks[blkcounter], blkcounter)\n \n if(status != STATUS['OK']) :\n return status\n \n return STATUS['OK']", "def verify_type(self, obj):\n return isinstance(obj, self.type_)", "def isItem(self):\n return _libsbml.Unit_isItem(self)", "def isSetKind(self):\n return _libsbml.Unit_isSetKind(self)", "def check_type(self):\n return True", "def structure_worth_investigating(left_struc, right_struc):\n if type(left_struc) is not type(right_struc):\n return False\n if type(left_struc) in TERMINALS:\n return False\n if len(left_struc) == 0 or len(right_struc) == 0:\n return False\n return True", "def test_get_structure_fmt(self):\n fmt, block_size = get_structure_fmt(SlotHeaderStructure)\n self.assertEqual(fmt, self.fmt_slot_header)\n self.assertEqual(block_size, self.block_size_slot_header)", "def test_unit_definitions(self):\n st = State(\"water\")\n props = st._all_props.union(st._read_only_props) - {\"phase\"} # type: ignore\n assert all([a in st._SI_units.keys() for a in props]) # type: ignore", "def intra_struct():\n\n return get('level') == 2", "def hasUnits(self):\n return _libsbml.ASTNode_hasUnits(self)", "def checkStruct(lst):\n obj = lst[0]\n if (isinstance(obj,tuple) or isinstance(obj,list)):\n dim = len(obj[0][0])\n elif isinstance(obj,Model): \n dim = obj.n \n elif isinstance(obj,Mat): \n dim = obj.shape[0]-1 \n elif isinstance(obj,Struct): \n dim = len(obj.box[0]) \n return dim", "def isdatatype(object):\n return isinstance(object, (str, int, bool, float, type(None)))", "def is_raw(self):\n return not self.has_structure", "def test_type_of_attributes(self):\n self.assertIsInstance(self.city.name, str)\n self.assertIsInstance(self.city.state_id, str)", "def isWeber(self):\n return _libsbml.Unit_isWeber(self)" ]
[ "0.71684635", "0.66890734", "0.65594214", "0.6516844", "0.6315124", "0.60989225", "0.6083501", "0.60701495", "0.6067753", "0.59412205", "0.5836151", "0.57489586", "0.5738008", "0.566084", "0.5591216", "0.5588741", "0.5575983", "0.5564178", "0.55461234", "0.5534568", "0.54774755", "0.5467258", "0.5466242", "0.5423045", "0.5413241", "0.5395809", "0.53938913", "0.53796965", "0.5370344", "0.53678215" ]
0.73056
1
Checks if the unit is from the light class
def is_light(self) -> bool: return ATTRIBUTE.Light.value in self.type_data.attributes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def have_light(self, light):\n if light > 1:\n return False\n return bool(self.light_array[light])", "def check_lighting_state_room1():\n if timer_lights_on_off_room1() == room1_lux():\n pass\n else:\n light_room1(timer_lights_on_off_room1())", "def is_light(game_object: GameObject) -> bool:\n return CommonObjectTagUtils.has_game_tags(game_object, (\n CommonGameTag.BUY_CAT_LD_WALL_LIGHT,\n CommonGameTag.BUY_CAT_LD_OUTDOOR_LIGHT,\n CommonGameTag.BUY_CAT_LD_CEILING_LIGHT,\n CommonGameTag.BUY_CAT_LD_NIGHT_LIGHT,\n CommonGameTag.BUY_CAT_LD_MISC_LIGHT,\n CommonGameTag.FUNC_LIGHT_NON_ELECTRIC,\n CommonGameTag.FUNC_POOL_LIGHT,\n CommonGameTag.FUNC_BUSINESS_LIGHT,\n CommonGameTag.FUNC_LASER_LIGHT,\n CommonGameTag.FUNC_RETAIL_NEON_LIGHT,\n CommonGameTag.STYLE_FESTIVAL_LIGHT,\n CommonGameTag.FUNC_HOLIDAY_FESTIVE_LIGHTING\n ))", "def check_light(light: pykulersky.Light):\n light.connect()\n light.get_color()", "def test_04_Light(self):\n l_xml = self.m_xml.light\n l_device = self.m_device_obj\n l_light = deviceXML.read_base_device_object_xml(l_device, l_xml)\n # print(PrettyFormatAny.form(l_light, 'C4-04-A - Light'))\n self.assertEqual(l_light.Name, TESTING_LIGHT_NAME_0)\n self.assertEqual(l_device.RoomName, TESTING_LIGHT_ROOM_NAME_0)", "def isLumen(self):\n return _libsbml.Unit_isLumen(self)", "def light_is_on(self):\r\n return self._light == \"ON\"", "def __get_light_state(self, light):\n rospy.logdebug(\"TLDetector.__get_light_state\")\n if not self.__has_image:\n self.__prev_light_loc = None\n return False\n\n cv_image = self.__bridge.imgmsg_to_cv2(self.__camera_image, \"bgr8\")\n\n rospy.logdebug(\"TLDetector: classifying light\")\n return self.__light_classifier.get_classification(cv_image)", "def light_is_on(self):\n return self._light == \"ON\"", "def is_unit(self):\n return math.isclose(self.magnitude(), 1)", "def test_02_Light(self):\n l_xml = self.m_xml.light_sect[1]\n print(PrettyFormatAny.form(l_xml, 'C3-02-A - XML'))\n l_device = self.m_device_obj\n l_light = deviceXML.read_base_device_object_xml(l_device, l_xml)\n print(PrettyFormatAny.form(l_light, 'C3-02-B - Light'))\n self.assertEqual(l_light.Name, TESTING_LIGHT_NAME_1)\n self.assertEqual(l_device.RoomName, TESTING_LIGHT_ROOM_NAME_1)\n self.assertEqual(l_light.UPBAddress, convert.dotted_hex2int(TESTING_INSTEON_ADDRESS_0))", "async def check_light(self, ip: str) -> bool:\n miner = self.miners[ip]\n return miner.lit", "def is_active(self):\n for unit in self.units:\n if unit.is_alive():\n return True\n return False", "def check_lighting_state_room2():\n if timer_lights_on_off_room2() == room2_lux():\n pass\n else:\n light_room2(timer_lights_on_off_room1())", "def isWeber(self):\n return _libsbml.Unit_isWeber(self)", "def has_undercoordinated_metal(self):\n return self._has_low_metal_coordination()", "def check_engine_light(self):\n return self._check_engine_light", "def test_light_sensor_unit_lm(self):\n with patch.dict(TYPES, {'LightSensor': self.mock_type}):\n state = State('sensor.light', '900',\n {ATTR_UNIT_OF_MEASUREMENT: 'lm'})\n get_accessory(None, state, 2, {})", "def is_on(self):\n return self._light_on", "def obj_is_in_class(obj: unrealsdk.UObject, in_class: str) -> bool:\n return bool(obj.Class == unrealsdk.FindClass(in_class))", "def test_light_no_data(self):\n light = Light({})\n\n assert light.warning is None\n assert light.off is None", "def get_light_state(self, light):\n if(not self.has_image):\n self.prev_light_loc = None\n return False\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n #cv2.imshow('image',cv_image)\n #cv2.waitKey(0)\n\n # Get classification\n if self.init_ok == True:\n return self.light_classifier.get_classification(cv_image)\n else:\n return TrafficLight.UNKNOWN", "def check_ball_on_target():\n\n pass", "def class_is(cls: Class) -> bool:\n pass", "def hasSummon(self):\n if self.isClass(\"Sorcerer\") and self.minionList:\n return True\n return False", "def get_light_state(self, light):\n\treturn light.state \n\n\t#if(not self.has_image):\n # self.prev_light_loc = None\n # return False", "def isClass(self, className):\n return self.characterClass == className or self.baseClass == className", "def Unit_isUnitKind(*args):\n return _libsbml.Unit_isUnitKind(*args)", "def _get_light_state(self, light):\n if(not self.has_image):\n return False\n\n #\n # we want to convert the image from internal ROS format to\n # CV2 GBR8 format because we trained our classifier model using GBR8 images.\n # The original raining images were in PNG and JPG format and in folders\n # that make it look like they are RGB, but we must remember that the training\n # loads the images using cv2.imread() which loads the images into BGR8\n # format, NOT RGB8!\n #\n cv2_image_bgr = self.bridge.imgmsg_to_cv2(self.camera_image_msg, \"bgr8\")\n\n #\n #Get classification\n #\n return self.light_classifier.get_classification(cv2_image_bgr)", "def can_hit(self, target_unit):\n # If it's an air unit return false\n if isinstance(target_unit, unit.air_unit.AirUnit):\n return False\n \n # Not an air unit, return true\n return True" ]
[ "0.67619145", "0.62722176", "0.624471", "0.6222856", "0.61466455", "0.6130839", "0.61091214", "0.6041529", "0.59606564", "0.5940065", "0.5884954", "0.5875173", "0.5874196", "0.58629966", "0.5836105", "0.5811311", "0.56859607", "0.5681524", "0.56774807", "0.56640285", "0.5658129", "0.5638092", "0.56283855", "0.56130093", "0.5601861", "0.5583544", "0.55748165", "0.5563149", "0.5555986", "0.55486846" ]
0.70930135
1
Checks if the unit is from the armored class
def is_armored(self) -> bool: return ATTRIBUTE.Armored.value in self.type_data.attributes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_instance(self):\n self.assertIsInstance(self.amenity_1, amenity)\n self.assertIsInstance(self.amenity_2, amenity)", "def match(self, cls):\n return isinstance(self, cls)", "def is_actor():\n return False", "def is_not_subclass(self, cls, seconds=60):\n st = '('+') & ('.join(cls.axioms)+')'\n m = prover9(self.axioms, [st], seconds, 1, options=self.options)\n if type(m)==list:\n return True, m[0]\n else:\n return False, m", "def can_hit(self, target_unit):\n # If it's an air unit return false\n if isinstance(target_unit, unit.air_unit.AirUnit):\n return False\n \n # Not an air unit, return true\n return True", "def issub_class(self):\n insta = Amenity()\n self.assertIsInstance(insta, BaseModel)\n self.assertTrue(hasattr(insta, \"id\"))\n self.assertTrue(hasattr(insta, \"created_at\"))\n self.assertTrue(hasattr(insta, \"update_at\"))", "def class_is(cls: Class) -> bool:\n pass", "def is_actor(self):\n return True", "def test_airplane(self):\n try:\n self.test = oop1.Airplane()\n self.assertIsInstance(self.test, oop1.Airplane)\n print(\"\\nPASS : Airplane Class Exists\\n\")\n except NameError as e:\n print(e)", "def obj_is_in_class(obj: unrealsdk.UObject, in_class: str) -> bool:\n return bool(obj.Class == unrealsdk.FindClass(in_class))", "def is_unit(self):\n return math.isclose(self.magnitude(), 1)", "def test_instantiation(self):\n self.assertIsInstance(self.amenity, Amenity)", "def is_icecube_class(obj: Any) -> bool:\n classname = str(type(obj))\n return \"icecube.\" in classname", "def is_reaction_class(rxn_class):\n return rxn_class in _values(ReactionClass.Typ)", "def _check(self, class_):\r\n\r\n if isinstance(class_, (types.FunctionType, types.LambdaType,\r\n types.ClassType, types.InstanceType)):\r\n return False\r\n if not hasattr(class_, '__dict__'):\r\n if not hasattr(class_, '__slots__'):\r\n return False\r\n return True", "def isNodeAGizmo(aNode):\n if type(aNode) == 'Gizmo':\n return True\n else:\n return False", "def _is_run_type(cls, object_):\n # Do a string comparison instead of using isinstance() to avoid needing\n # to import lyse or other modules with these classes.\n return (type(object_).__name__ in cls._RUN_TYPES)", "def _isA(self, elementClass, category = ''):\n if not isinstance(self, elementClass):\n return False\n if category and self.getCategory() != category:\n return False\n return True", "def is_arming(self):\n return self == ArmingState.ARMING", "def isClass(self, className):\n return self.characterClass == className or self.baseClass == className", "def test_motorcycle(self):\n try:\n self.test = oop1.Motorcycle()\n self.assertIsInstance(self.test, oop1.Motorcycle)\n print(\"\\nPASS : Class Exists\\n\")\n except NameError as e:\n print(e)", "def is_applicable_to(cls, device_type: str,\n device_class: Type[gdm_test_base.DeviceType],\n device_name: str) -> bool:\n return issubclass(device_class, gazoo_device_base.GazooDeviceBase)", "def match(cls, kind: 'dsl.Any') -> bool:\n return isinstance(kind, cls)", "def predicate(cls: nodes.ClassDef) -> bool:\n if cls.name in CLASS_NAME_SKIPLIST:\n # class looks like an API model class, but it isn't.\n return False\n\n if not cls.name.endswith(\"API\") and \"schema\" not in cls.locals:\n # class does not look like an API model class.\n return False\n\n return True", "def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False", "def has_classname(self):\n return self.unpack_word(0x4A) > 0", "def has_class(self, name):\n return name in self._cached_class", "def test_class_attribute_equality(self):\n self.assertEqual(self.aldous, self.__class__.aldous)", "def is_a(self, t):\n return isinstance(self._, t)", "def hasSummon(self):\n if self.isClass(\"Sorcerer\") and self.minionList:\n return True\n return False" ]
[ "0.62438434", "0.61637175", "0.60709447", "0.60623026", "0.6050455", "0.6046287", "0.60447234", "0.5943034", "0.591435", "0.585211", "0.5851524", "0.58474976", "0.58188045", "0.5810405", "0.57621753", "0.5739988", "0.57276726", "0.57066995", "0.568539", "0.567616", "0.56665856", "0.5656026", "0.5646659", "0.56438535", "0.5632024", "0.56268793", "0.5560006", "0.55366987", "0.55333793", "0.5502958" ]
0.6683675
1
Checks if the unit is from the robotic class
def is_robotic(self) -> bool: return ATTRIBUTE.Robotic.value in self.type_data.attributes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_applicable_to(cls, device_type: str,\n device_class: Type[gdm_test_base.DeviceType],\n device_name: str) -> bool:\n return issubclass(device_class, gazoo_device_base.GazooDeviceBase)", "def test_ground_vehicle(self):\n try:\n self.test = oop1.GroundVehicle()\n self.assertIsInstance(self.test, oop1.GroundVehicle)\n print(\"\\nPASS : GroundVehicle Class Exists\\n\")\n except NameError as e:\n print(e)", "def is_simulation_robot(self):\r\n return self._arm.is_simulation_robot", "def class_is(cls: Class) -> bool:\n pass", "def is_hero(self):\n return True", "def is_unit(self):\n return math.isclose(self.magnitude(), 1)", "def obj_is_in_class(obj: unrealsdk.UObject, in_class: str) -> bool:\n return bool(obj.Class == unrealsdk.FindClass(in_class))", "def is_active(self):\n for unit in self.units:\n if unit.is_alive():\n return True\n return False", "def is_not_subclass(self, cls, seconds=60):\n st = '('+') & ('.join(cls.axioms)+')'\n m = prover9(self.axioms, [st], seconds, 1, options=self.options)\n if type(m)==list:\n return True, m[0]\n else:\n return False, m", "def is_icecube_class(obj: Any) -> bool:\n classname = str(type(obj))\n return \"icecube.\" in classname", "def is_actor():\n return False", "def test_vehicle(self):\n try:\n self.test = oop1.Vehicle()\n self.assertIsInstance(self.test, oop1.Vehicle)\n print(\"\\nPASS : Vehicle Class Exists\\n\")\n except NameError as e:\n print(e)", "def is_bot(self) -> undefined.UndefinedOr[bool]:", "def test_motorcycle(self):\n try:\n self.test = oop1.Motorcycle()\n self.assertIsInstance(self.test, oop1.Motorcycle)\n print(\"\\nPASS : Class Exists\\n\")\n except NameError as e:\n print(e)", "def is_actor(self):\n return True", "def hasSummon(self):\n if self.isClass(\"Sorcerer\") and self.minionList:\n return True\n return False", "def can_hit(self, target_unit):\n # If it's an air unit return false\n if isinstance(target_unit, unit.air_unit.AirUnit):\n return False\n \n # Not an air unit, return true\n return True", "def isClass(self, className):\n return self.characterClass == className or self.baseClass == className", "def is_active(self, physics):\n pass", "def _is_run_type(cls, object_):\n # Do a string comparison instead of using isinstance() to avoid needing\n # to import lyse or other modules with these classes.\n return (type(object_).__name__ in cls._RUN_TYPES)", "def is_bot(self) -> bool:", "def match(self, cls):\n return isinstance(self, cls)", "def has_classname(self):\n return self.unpack_word(0x4A) > 0", "def is_shooting(self):\n if self.gun_interface:\n return self.gun_interface.is_preparing()\n return False", "def is_system(self) -> undefined.UndefinedOr[bool]:", "def is_system(self) -> bool:", "def isWeber(self):\n return _libsbml.Unit_isWeber(self)", "def _is_valid_unit(unit: str, unit_type: str) -> bool:\n if unit_type == LENGTH:\n return unit in LENGTH_UNITS\n if unit_type == ACCUMULATED_PRECIPITATION:\n return unit in LENGTH_UNITS\n if unit_type == WIND_SPEED:\n return unit in WIND_SPEED_UNITS\n if unit_type == TEMPERATURE:\n return unit in TEMPERATURE_UNITS\n if unit_type == MASS:\n return unit in MASS_UNITS\n if unit_type == VOLUME:\n return unit in VOLUME_UNITS\n if unit_type == PRESSURE:\n return unit in PRESSURE_UNITS\n return False", "def class_is_interesting(name: str):\n if name.startswith('org.chromium.'):\n return True\n return False", "def detect(cls):\n return False" ]
[ "0.62810934", "0.6114108", "0.6111489", "0.60843873", "0.60417813", "0.60207677", "0.5918093", "0.59043795", "0.5873593", "0.5871166", "0.5870519", "0.5862491", "0.5849073", "0.58296394", "0.5796854", "0.57912356", "0.5782388", "0.5762797", "0.5751194", "0.5750995", "0.57483846", "0.57021254", "0.569495", "0.5687345", "0.56444293", "0.5641967", "0.56235534", "0.56223744", "0.5620562", "0.5594548" ]
0.661055
1
Checks if the unit is from the psionic class
def is_psionic(self) -> bool: return ATTRIBUTE.Psionic.value in self.type_data.attributes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isUnitKind(*args):\n return _libsbml.Unit_isUnitKind(*args)", "def Unit_isUnitKind(*args):\n return _libsbml.Unit_isUnitKind(*args)", "def is_unit(self):\n return math.isclose(self.magnitude(), 1)", "def hasSummon(self):\n if self.isClass(\"Sorcerer\") and self.minionList:\n return True\n return False", "def hasUnit(val):\n return hasattr(val, 'unit') or hasattr(val, 'units')", "def _is_valid_unit(unit: str, unit_type: str) -> bool:\n if unit_type == LENGTH:\n return unit in LENGTH_UNITS\n if unit_type == ACCUMULATED_PRECIPITATION:\n return unit in LENGTH_UNITS\n if unit_type == WIND_SPEED:\n return unit in WIND_SPEED_UNITS\n if unit_type == TEMPERATURE:\n return unit in TEMPERATURE_UNITS\n if unit_type == MASS:\n return unit in MASS_UNITS\n if unit_type == VOLUME:\n return unit in VOLUME_UNITS\n if unit_type == PRESSURE:\n return unit in PRESSURE_UNITS\n return False", "def is_system(self) -> bool:", "def is_system(self) -> undefined.UndefinedOr[bool]:", "def is_not_subclass(self, cls, seconds=60):\n st = '('+') & ('.join(cls.axioms)+')'\n m = prover9(self.axioms, [st], seconds, 1, options=self.options)\n if type(m)==list:\n return True, m[0]\n else:\n return False, m", "def is_translation_unit(self):\r\n return conf.lib.clang_isTranslationUnit(self)", "def _assert_has_spc(subcase, fem):\n if 'SPC' not in subcase:\n has_ps = False\n for unused_nid, node in fem.nodes.items():\n if node.ps:\n has_ps = True\n break\n assert subcase.has_parameter('SPC', 'STATSUB') or has_ps, subcase", "def has_cooling_system(bpr):\n\n if bpr.hvac['type_cs'] in {'T1', 'T2', 'T3'}:\n return True\n elif bpr.hvac['type_cs'] in {'T0'}:\n return False\n else:\n raise", "def _IsAngstroem(units):\n if isinstance(units, Atom):\n check = units.store.get(\"units\")\n if not check:\n return False\n else:\n check = units\n return check == \"angstroem\" or check == \"angstroemd0\"", "def is_locust(tup):\n name, item = tup\n return bool(\n inspect.isclass(item)\n and issubclass(item, Locust)\n and hasattr(item, \"task_set\")\n and getattr(item, \"task_set\")\n and not name.startswith('_')\n )", "def is_shooting(self):\n if self.gun_interface:\n return self.gun_interface.is_preparing()\n return False", "def isItem(self):\n return _libsbml.Unit_isItem(self)", "def has_apt(klass):\n return False", "def isValidUnit(unit):\n\tfor i in Units:\n\t\tif (unit == i):\n\t\t\treturn True;\n\treturn False", "def class_is(cls: Class) -> bool:\n pass", "def is_applicable_to(cls, device_type: str,\n device_class: Type[gdm_test_base.DeviceType],\n device_name: str) -> bool:\n return issubclass(device_class, gazoo_device_base.GazooDeviceBase)", "def has_classname(self):\n return self.unpack_word(0x4A) > 0", "def hasUnits(self):\n return _libsbml.ASTNode_hasUnits(self)", "def is_active(self):\n for unit in self.units:\n if unit.is_alive():\n return True\n return False", "def test_stationarity_check(self, whp_pandas, not_station_pd):\n test_class = Slug_Forecasting(whp_pandas.copy()) # Instantiate class object\n test_class.stationarity_check()\n\n assert hasattr(test_class, \"station_result\"), \"Station_result attribute is created\"\n assert test_class.station_result[0] < 0.05, \"In this example, p-value should be less than 5%\"\n\n test_class.stationarity_check(diff=1)\n assert test_class.station_result[0] <= 0.0, \"In this example, p-value should be 0%\"\n\n test_class = Slug_Forecasting(not_station_pd.copy()) # Instantiate new object with non stationary data\n test_class.stationarity_check()\n assert test_class.station_result[0] > 0.05, \"In this example, p-value should be more than 5%\"", "def test_is_metric(self):\n self.assertTrue(METRIC_SYSTEM.is_metric)\n self.assertFalse(IMPERIAL_SYSTEM.is_metric)", "def is_dsp(self):\n return True", "def is_libpsp():\n return __is_libpsp__", "def obj_is_in_class(obj: unrealsdk.UObject, in_class: str) -> bool:\n return bool(obj.Class == unrealsdk.FindClass(in_class))", "def is_P(self):\n return isinstance(self,P)", "def isStation(self) -> bool:\n return self.station" ]
[ "0.61035377", "0.6066632", "0.6008541", "0.5901321", "0.5820014", "0.5700515", "0.55786186", "0.5520976", "0.5505748", "0.5477234", "0.5328617", "0.53267735", "0.53158754", "0.5307552", "0.5296573", "0.5270201", "0.52660614", "0.5259549", "0.52338326", "0.52231985", "0.5217615", "0.52148145", "0.5212257", "0.5210289", "0.5190517", "0.51896846", "0.51728237", "0.5172474", "0.5155422", "0.51545423" ]
0.67083144
0
Building tech equality, e.g. OrbitalCommand is the same as CommandCenter For Hive, this returns [UnitTypeId.Hatchery, UnitTypeId.Lair] For SCV, this returns None
def tech_alias(self) -> Optional[List[UnitTypeId]]: return self.type_data.tech_alias
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unit_type(self) -> str:", "def get_enduse_techs(fuel_tech_p_by):\n enduse_techs = []\n\n for tech_fueltype in fuel_tech_p_by.values():\n if 'placeholder_tech' in tech_fueltype.keys():\n return []\n else:\n enduse_techs += tech_fueltype.keys()\n\n return list(set(enduse_techs))", "def _get_set(cost):\n if any(i in cost for i in [\"_cap\", \"depreciation_rate\", \"purchase\", \"area\"]):\n return \"loc_techs_investment_cost\"\n elif any(i in cost for i in [\"om_\", \"export\"]):\n return \"loc_techs_om_cost\"\n else:\n return \"loc_techs\"", "def _get_set(constraint):\n if \"_area\" in constraint:\n return \"loc_techs_area\"\n elif any(\n i in constraint for i in [\"resource_cap\", \"parasitic\", \"resource_min_use\"]\n ):\n return \"loc_techs_supply_plus\"\n elif (\n \"resource\" in constraint\n ): # i.e. everything with 'resource' in the name that isn't resource_cap\n return \"loc_techs_finite_resource\"\n elif (\n \"storage\" in constraint\n or \"charge_rate\" in constraint\n or \"energy_cap_per_storage_cap\" in constraint\n ):\n return \"loc_techs_store\"\n elif \"purchase\" in constraint:\n return \"loc_techs_purchase\"\n elif \"units_\" in constraint:\n return \"loc_techs_milp\"\n elif \"export\" in constraint:\n return \"loc_techs_export\"\n else:\n return \"loc_techs\"", "def units_which_can_be_built(self):\n what_can_be_built = [Pikeman.kind]\n player = self.player\n if player.age in ('bronze age', 'iron age'):\n shields = BronzeShields\n swords = BronzeSwords\n if all(s.name in player.things_researched for s in (shields, swords)):\n what_can_be_built.append(Swordsman.kind)\n return what_can_be_built", "def get_ontology_set_of_type(self, function_type, go_slim_or_basic):\n #!!! potential speed up with \"|=\" instead of \".union()\"\n if function_type == \"all_GO\":\n if go_slim_or_basic == \"basic\":\n return self.type_2_association_dict[-21].union(self.type_2_association_dict[-22]).union(self.type_2_association_dict[-23])\n else: # slim\n return self.go_slim_set\n\n elif function_type == \"UPK\":\n return self.type_2_association_dict[-51]\n\n elif function_type == \"BP\":\n if go_slim_or_basic == \"basic\":\n return self.type_2_association_dict[-21]\n else:\n return self.type_2_association_dict[-21].intersection(self.go_slim_set)\n\n elif function_type == \"MF\":\n if go_slim_or_basic == \"basic\":\n return self.type_2_association_dict[-22]\n else:\n return self.type_2_association_dict[-22].intersection(self.go_slim_set)\n\n elif function_type == \"CP\":\n if go_slim_or_basic == \"basic\":\n return self.type_2_association_dict[-23]\n else:\n return self.type_2_association_dict[-23].intersection(self.go_slim_set)\n else:\n print(\"entity_type: '{}' does not exist\".format(function_type))\n raise StopIteration", "def buildUnits(self, obs, UnitName, Quantity):\n \n \"\"\"drones, overlords, zerglings, roaches, hydralisks, corrupters, queen(may need own function)\"\"\" \n actions.FUNCTIONS.select_larva(\"select\")\n if (UnitName == \"drone\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Drone_quick(\"now\")\n if (UnitName == \"overlord\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Overlord_quick(\"now\")\n if (UnitName == \"zergling\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Zergling_quick(\"now\")\n if (UnitName == \"Roach\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Roach_quick(\"now\")\n if (UnitName == \"hydralisks\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Hydralisk_quick(\"now\")\n if (UnitName == \"corruptor\"):\n if self.can_do(obs, actions.FUNCTIONS.Train_Zergling_quick.id):\n return actions.FUNCTIONS.Train_Corruptor_quick(\"now\")\n \"\"\"UnitsForControlGroup: [#drone, #zergling, #roaches, #hydralisks]\"\"\"", "def getUnitType(self, id):\n self.send(\"#5\" + str(id) + \" SERECHO 1 \\r\")\n if self.readResponse() == \"1\":\n return \"XAP800\"\n self.send(\"#7\" + str(id) + \" SERECHO 1 \\r\")\n if self.readResponse() == \"1\":\n return \"XAP400\"\n self.send(\"#4\" + str(id) + \" SERECHO 1 \\r\")\n if self.readResponse() == \"1\":\n return \"PSR1212\"\n self.send(\"#6\" + str(id) + \" SERECHO 1 \\r\")\n if self.readResponse() == \"1\":\n return \"XAPTH2\"\n return \"No Device Found\"", "def to_equivalent_for_robot_type(self, robot_type: RobotType) -> DeckSlotName:\n if robot_type == \"OT-2 Standard\":\n return self.to_ot2_equivalent()\n elif robot_type == \"OT-3 Standard\":\n return self.to_ot3_equivalent()", "def match_units(self, other):\n for key in self.photons.colnames:\n if key in other.photons.colnames:\n if other[key].unit:\n unit = other[key].unit\n if str(unit).lower() == 'none' and str(self[key].unit).lower() == 'none':\n continue\n self[key].convert_unit_to(unit)", "def ExpectedEnginesToBuild(self, run_params):\n return [\"TRTEngineOp_000\"]", "def _device_type_returner(self, symbol):\n if self.names.get_name_string(symbol.id) == \"AND\":\n return self.devices.AND\n if self.names.get_name_string(symbol.id) == \"OR\":\n return self.devices.OR\n if self.names.get_name_string(symbol.id) == \"NAND\":\n return self.devices.NAND\n if self.names.get_name_string(symbol.id) == \"NOR\":\n return self.devices.NOR\n if self.names.get_name_string(symbol.id) == \"XOR\":\n return self.devices.XOR\n if self.names.get_name_string(symbol.id) == \"CLOCK\":\n return self.devices.CLOCK\n if self.names.get_name_string(symbol.id) == \"SWITCH\":\n return self.devices.SWITCH\n if self.names.get_name_string(symbol.id) == \"DTYPE\":\n return self.devices.D_TYPE\n if self.names.get_name_string(symbol.id) == \"SIGGEN\":\n return self.devices.SIGGEN", "def _MatchType1(self, command_task, extra_required_attrs):\n run_target = command_task.run_targets[0]\n devices = self._run_target_index.get(run_target)\n for device in six.itervalues(devices):\n if self._MatchDeviceAttributes(\n (command_task.test_bench.host.groups[0].run_targets[0]\n .device_attributes) + extra_required_attrs,\n device.attributes):\n return [device]\n return None", "def vehicle_type():\n pass", "def find_all_ORFs_both_strands_unit_tests():\n print \"input: CTA, \"+\"output: \"+ \",\".join(find_all_ORFs_both_strands(\"CTA\"))+\", actual output: CTA,TA,A,AG,G\"\n print \"input: GTCACTTAGGGTTTT, \"+\"output: \"+\",\".join(find_all_ORFs_both_strands(\"GTCACTTAGGGTTTT\"))+\", actual output: GTCACT,GGTTTT,TCACTTAGGGTTTT,CACTTAGGGTTTT,AAAACCCTAAGTGAC,AAACCC,GTGAC,AACCCTAAG\"\n print \"input: AAATTTTATAATGGGTGAAGTTAG, \"+\"output: \"+\",\".join(find_all_ORFs_both_strands(\"AAATTTTATAATGGGTGAAGTTAG\"))+\", actual output: AAATTTTATAATGGG,AGT,AATTTTATAATGGGTGAAGTTAG,ATTTTA,TGGGTGAAGTTAG,CTAACTTCACCCATTATAAAATTT,CTTCACCCATTA,AATTT,AACTTCACCCATTATAAAATTT\"\n print \"input: TATATGGAGGATAATAGTTGATAATAG, \"+\"output: \"+ \",\".join(find_all_ORFs_both_strands(\"TATATGGAGGATAATAGTTGATAATAG\"))+\", actual output: TATATGGAGGATAATAGT,ATATGGAGGATAATAGTTGATAATAG,TATGGAGGA,TTGATAATAG,CTATTATCAACTATTATCCTCCATATA,TATTATCAACTATTATCCTCCATATA,ATTATCAACTATTATCCTCCATATA\"", "def sameHardware(self, other):\n\n return (self.vendorId == other.vendorId and \\\n self.deviceId == other.deviceId and \\\n self.physicalMemory == other.physicalMemory and \\\n self.osInfo == other.osInfo and \\\n self.cpuSpeed[0] == other.cpuSpeed[0])", "def test_closest_type_specified(self):\n m = mats.Materials(\"mats_test.json\", NoneVisited())\n self.assertEqual( '164 G. Canis Majoris', m.closest([0, 0, 0], ['Tungsten', 'Germanium'], types = ['Braintree'])[1]['system'])\n self.assertEqual( '2MASS J10433563-5945136', m.closest([8000, 0, 3000], ['Tungsten', 'Germanium'], types=['Thargoid'])[1]['system'])", "def _MatchType2(self, command_task, extra_required_attrs):\n for group in six.itervalues(self._groups):\n matched_devices = self._MatchGroup(\n group,\n command_task.test_bench.host.groups[0],\n extra_required_attrs)\n if matched_devices:\n return matched_devices\n return None", "def find_all_ORFs_both_strands_unit_tests():\n\n # YOUR IMPLEMENTATION HERE", "def get_platform_combinations():\n mapped_osname = platform_map(g_osname)\n mapped_osarch = g_osarch\n ret = [mapped_osname]\n while True:\n ret += [mapped_osarch, mapped_osname + \"-\" + mapped_osarch]\n mapped_osarch = platform_map_iterate(mapped_osarch)\n if not mapped_osarch:\n break\n return sorted(ret, reverse=True) + [\"default\"]", "def get_unit_info(config_dict):\n try:\n group_dict = config_dict['StdReport']['StandardReport']['Units']['Groups']\n # Look for a strict superset of the group settings:\n if all(group_dict[group] == us_group[group] for group in us_group):\n return 'us'\n elif all(group_dict[group] == metric_group[group] for group in metric_group):\n return 'metric'\n elif all(group_dict[group] == metricwx_group[group] for group in metricwx_group):\n return 'metricwx'\n except KeyError:\n return None", "def _custom_unit_or_undef(\n self, primary_key: str, secondary_key: str\n ) -> str | None | UndefinedType:\n assert self.registry_entry\n if (\n (sensor_options := self.registry_entry.options.get(primary_key))\n and secondary_key in sensor_options\n and (device_class := self.device_class) in UNIT_CONVERTERS\n and self.native_unit_of_measurement\n in UNIT_CONVERTERS[device_class].VALID_UNITS\n and (custom_unit := sensor_options[secondary_key])\n in UNIT_CONVERTERS[device_class].VALID_UNITS\n ):\n return cast(str, custom_unit)\n return UNDEFINED", "def getHardware(self):\n return (self.vendorId, self.deviceId, self.physicalMemory, self.osInfo, self.cpuSpeed[0])", "def _determine_conditions(self, prod_obj):\n\n # determine product type, initialize and build conditions list\n if hasattr(prod_obj, \"edp_list\") and hasattr(prod_obj, \"fdp_list\"): # For total products\n if self.instrument == \"wfc3\" and self.detector == \"uvis\":\n thresh_time = Time(\"2012-11-08T02:59:15\", format='isot', scale='utc').mjd\n # Get the MJDUTC of the first exposure in the filter exposure product list. While\n # each exposure will have its own MJDUTC (the EXPSTART keyword), this is probably\n # granular enough.\n mjdutc = prod_obj.edp_list[0].mjdutc\n if mjdutc >= thresh_time:\n self.conditions = [\"total_basic_post\"]\n else:\n self.conditions = ['total_basic_pre']\n else:\n self.conditions = [\"total_basic\"]\n if len(prod_obj.edp_list) == 1:\n self.conditions.append(\"any_n1\")\n elif hasattr(prod_obj, \"edp_list\") and not hasattr(prod_obj, \"fdp_list\"): # For filter products\n self.conditions = [\"filter_basic\"]\n n_exp = len(prod_obj.edp_list)\n if n_exp == 1:\n self.conditions.append(\"any_n1\")\n else:\n # Get the filter of the first exposure in the filter exposure product list. The filter\n # will be the same for all the exposures in the list.\n self.filters = prod_obj.edp_list[0].filters\n if self.instrument == \"acs\":\n if self.detector == \"hrc\":\n if n_exp in [2, 3]:\n self.conditions.append(\"acs_hrc_any_n2\")\n if n_exp in [4, 5]:\n self.conditions.append(\"acs_hrc_any_n4\")\n if n_exp >= 6:\n self.conditions.append(\"acs_hrc_any_n6\")\n elif self.detector == \"sbc\":\n if self.filters.lower() in [\"f115lp\", \"f122m\"]:\n if n_exp in [2, 3, 4, 5]:\n self.conditions.append(\"acs_sbc_blue_n2\")\n if n_exp >= 6:\n self.conditions.append(\"acs_sbc_blue_n6\")\n else:\n if n_exp in [2, 3, 4, 5]:\n self.conditions.append(\"acs_sbc_any_n2\")\n if n_exp >= 6:\n self.conditions.append(\"acs_sbc_any_n6\")\n elif self.detector == \"wfc\":\n if n_exp in [2, 3]:\n self.conditions.append(\"acs_wfc_any_n2\")\n if n_exp in [4, 5]:\n self.conditions.append(\"acs_wfc_any_n4\")\n if n_exp >= 6:\n self.conditions.append(\"acs_wfc_any_n6\")\n else:\n sys.exit(\"INVALID ACS DETECTOR!\")\n elif self.instrument == \"wfc3\":\n if self.detector == \"ir\":\n if self.filters.lower() in [\"g102\", \"g141\"]:\n if n_exp in [2, 3]:\n self.conditions.append(\"wfc3_ir_grism_n2\")\n if n_exp >= 4:\n self.conditions.append(\"wfc3_ir_grism_n4\")\n else:\n if n_exp in [2, 3]:\n self.conditions.append(\"wfc3_ir_any_n2\")\n if n_exp >= 4:\n self.conditions.append(\"wfc3_ir_any_n4\")\n elif self.detector == \"uvis\":\n thresh_time = Time(\"2012-11-08T02:59:15\", format='isot', scale='utc').mjd\n # Get the MJDUTC of the first exposure in the filter exposure product list. While\n # each exposure will have its own MJDUTC (the EXPSTART keyword), this is probably\n # granular enough.\n mjdutc = prod_obj.edp_list[0].mjdutc\n if mjdutc >= thresh_time:\n if n_exp in [2, 3]:\n self.conditions.append(\"wfc3_uvis_any_post_n2\")\n if n_exp in [4, 5]:\n self.conditions.append(\"wfc3_uvis_any_post_n4\")\n if n_exp >= 6:\n self.conditions.append(\"wfc3_uvis_any_post_n6\")\n else:\n if n_exp in [2, 3]:\n self.conditions.append(\"wfc3_uvis_any_pre_n2\")\n if n_exp in [4, 5]:\n self.conditions.append(\"wfc3_uvis_any_pre_n4\")\n if n_exp >= 6:\n self.conditions.append(\"wfc3_uvis_any_pre_n6\")\n else:\n sys.exit(\"INVALID WFC3 DETECTOR!\")\n else:\n sys.exit(\"INVALID HST INSTRUMENT!\")\n else: # For single-exposure products\n self.conditions = [\"single_basic\"]\n if prod_obj.is_singleton:\n self.conditions.append(\"any_n1\")", "def unit_alias(self) -> Optional[UnitTypeId]:\n return self.type_data.unit_alias", "def test_domain_and_target_type(self):\n t = OneHotEncode(3)\n assert t.domain_type == \"integer\"\n assert t.target_type == \"real\"", "def T2s(self) -> Dict[int, Optional[float]]:\n return {qs.id: qs.T2 for qs in self.qubits_specs}", "def legal_target(self):\n return choice([each for each in self.minions if not hasattr(each, 'taunt')])", "def player_tech(self):\n # type: () -> string_types\n return self._player_tech", "def UnitKind_equals(*args):\n return _libsbml.UnitKind_equals(*args)" ]
[ "0.54681575", "0.532318", "0.5206584", "0.5172237", "0.5132882", "0.5122617", "0.50264764", "0.50133854", "0.50082165", "0.4973949", "0.49399197", "0.49248987", "0.49206087", "0.4909017", "0.4907926", "0.48900557", "0.48842818", "0.48838764", "0.48517433", "0.48451227", "0.48201892", "0.47948116", "0.4789509", "0.47829986", "0.47689563", "0.47630566", "0.47418988", "0.4740551", "0.47342566", "0.4729046" ]
0.61599445
0
Returns the unit race
def race(self) -> RACE: return RACE(self.type_data.proto.race)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_unit(self):\n return self.unit", "def race(self, instance):\r\n return '/'.join([raza.name for raza in instance.user.profile.race.all()])", "def unit(self):\n if self._pipeline:\n try:\n #return getattr(self, self._pipeline[-1][0].name).unit\n return self._pipeline[-1].frame.unit\n except AttributeError:\n return None\n else:\n return None", "def unit(self):\n return self._unit", "def unit(self):\n return self.__unit", "def unit(self):\n return self._unit", "def unit(self):\n return self._unit", "def getScheduleUnit(self):\n schedule_unit = DPxGetDinSchedRate()\n return schedule_unit[1]", "def unit(self) -> str:", "def labor(self):\n time = (\n self.farmer.labor()\n + self.reseller.labor()\n + self.cofiring_plant.cofuel_om_work()\n - self.coal_work_lost\n )\n return display_as(time, \"hr\")", "def base_unit() -> ureg:\n return ureg.meter", "def get_organization_unit(self):\n return self.reference[REF_ORGANIZATION_UNIT][REF_VALUE]", "def RandomLoadUnit(self):\n\t\treturn self._get_attribute('randomLoadUnit')", "def race(v1, v2, g):\n if v2 <= v1:\n return None\n\n time = math.floor(3600 * g / (v2-v1))\n hours = math.floor(time / 3600)\n minutes = math.floor((time - hours*3600) / 60)\n seconds = time - hours*3600 - minutes*60\n\n return [hours, minutes, seconds]", "def get_playable_race(self, region, namespace, race_id, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/playable-race/{0}', region, *[race_id], **filters)", "def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")", "def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")", "def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")", "def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")", "def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")", "def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")", "def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")", "def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")", "def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")", "def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")", "def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")", "def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")", "def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")", "def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")", "def unit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"unit\")" ]
[ "0.6350847", "0.619878", "0.6094755", "0.6057409", "0.59604293", "0.5947612", "0.5947612", "0.5906686", "0.57367295", "0.5582007", "0.55521536", "0.55200833", "0.55150247", "0.5500381", "0.5493672", "0.549077", "0.549077", "0.549077", "0.549077", "0.549077", "0.549077", "0.549077", "0.549077", "0.549077", "0.549077", "0.549077", "0.549077", "0.549077", "0.549077", "0.549077" ]
0.67661387
0
Returns the unit current shield
def shield(self) -> Union[int, float]: return self.proto.shield
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shield(self):\n capacity = self._getAttribute(Attribute.shieldCapacity)\n recharge = self._getAttribute(Attribute.shieldRecharge)\n em = self._getAttribute(Attribute.shieldEM)\n explosive = self._getAttribute(Attribute.shieldExplosive)\n kinetic = self._getAttribute(Attribute.shieldKinetic)\n thermal = self._getAttribute(Attribute.shieldThermal)\n\n recharge /= 1000 # milliseconds\n em = 1.0 - em\n explosive = 1.0 - explosive\n kinetic = 1.0 - kinetic\n thermal = 1.0 - thermal\n\n return {\n \"capacity\": capacity,\n \"recharge\": recharge,\n \"resists\": {\n \"em\": em,\n \"explosive\": explosive,\n \"kinetic\": kinetic,\n \"thermal\": thermal\n }\n }", "def get_weapon_holding():\n return unrealsdk.GetEngine().GamePlayers[0].Actor.Pawn.Weapon", "def get_weapon(self):\n return self.__weapon", "def weapon(self):\n return self._weapon", "def get_total_shield(self,obs):", "def shield_percentage(self) -> Union[int, float]:\n if not self.proto.shield_max:\n return 0\n return self.proto.shield / self.proto.shield_max", "def shield_percentage(self) -> Union[int, float]:\n if not self.proto.shield_max:\n return 0\n return self.proto.shield / self.proto.shield_max", "def get_occupant(self):\n\t\treturn self.occupant", "def shield_max(self) -> Union[int, float]:\n return self.proto.shield_max", "def shield_max(self) -> Union[int, float]:\n return self.proto.shield_max", "def get_weapon(self):\n\n return self.suggestion_set[1]", "def weapon(self):\n if not (0 <= self._weapon_i < len(self.weapons)):\n raise Exception('No weapons')\n return self.weapons[self._weapon_i]", "def get_damage():\n\n return character['Damage']", "def call_shield(ai, var, screen, ship, shields, hub):\r\n\tif not hub.s_bar < 10:\r\n\t\tif not ship.shield:\r\n\t\t\tship.shield = 1\r\n\t\t\tshield = Shield(ai, var, screen, ship, hub)\r\n\t\t\tshields.add(shield)", "def get_main_hand_equipped(self):\n\t\treturn self.equippedMainHand", "def get_current(self):\n return self.node.sdo[0x221c].phys # mA", "def get_watt(self):\n\n svc = \"urn:micasaverde-com:serviceId:EnergyMetering1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n\n return self.get_variable(svc, \"Watts\")", "def get_unit(self):\n return self.unit", "def getMeter(self):\n return self._Meter", "def get_occupant(self):\n\t\tpass", "def get_current_health(self):\n return self.health", "def CopperMedium(self):\n\t\treturn self._get_attribute('copperMedium')", "def get_armor_equipped(self):\n\t\treturn self.equippedArmor", "def getDefense(self):\n\t\treturn(self.maneuverability * self.protection)", "def get_damage(self):\n return self.__damage", "def unit(self):\n return self._unit", "def prep_shield(self):\r\n self.shield = Group()\r\n for shield in range(self.stats.shields_left):\r\n stats_shield = ShipShield(self.ai_settings, self.screen, self.ship)\r\n stats_shield.rect.x = 20\r\n stats_shield.rect.y = 750\r\n self.shield.add(stats_shield)", "def current_humidity(self):\n return self._client.get_indoor_humidity()", "def unit(self):\n return self.__unit", "def tool_quat(self):\n return self.sim.data.get_body_xquat(self.end_effector)" ]
[ "0.7095084", "0.67565095", "0.64647067", "0.6286027", "0.6218231", "0.6174586", "0.6174586", "0.5749591", "0.57085866", "0.57085866", "0.5695347", "0.5675916", "0.564563", "0.5520833", "0.5477099", "0.5453233", "0.5435188", "0.5431757", "0.54274654", "0.5418057", "0.5381869", "0.53780895", "0.5345012", "0.5344318", "0.5341639", "0.5330957", "0.53230846", "0.53218716", "0.53010994", "0.52721536" ]
0.7600682
1
Returns the unit max shield
def shield_max(self) -> Union[int, float]: return self.proto.shield_max
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shield(self) -> Union[int, float]:\n return self.proto.shield", "def shield(self) -> Union[int, float]:\n return self.proto.shield", "def getUmidadeArMax(self):\n return str(self.getWeather('umid-max')[:2]) + '%'", "def shield_percentage(self) -> Union[int, float]:\n if not self.proto.shield_max:\n return 0\n return self.proto.shield / self.proto.shield_max", "def shield_percentage(self) -> Union[int, float]:\n if not self.proto.shield_max:\n return 0\n return self.proto.shield / self.proto.shield_max", "def max_charge(self):\n return self.strength", "def max_pwm(self):\r\n return self._max_pwm", "def target_humidity_max(self):\n if not (hum_range := self._get_humidity_range()):\n return None\n return hum_range[1]", "def _maximum(self) -> float:\n if self._type == \"power\":\n return 5.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_max\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]", "def attack(self):\n # TODO: Use integer division to find half of the max_damage value\n # then return a random integer between\n # half of max_damage and max_damage\n print(\"max damage of \" + self.name + \" is \")\n print(str(self.attack_strength))\n min_damage = self.attack_strength // 2\n weapon_attack_value = random.randint(min_damage, self.attack_strength)\n return weapon_attack_value", "def max_health():\r\n max_health = max(Titan.health_titans())\r\n return max_health", "def most_powerful_weapon(self):\n # sets inital damge to 0\n max_damage = 0\n # sets the best weapon to nothing\n best_weapon = None\n # Loop for each item in inventory\n for item in self.inventory:\n # Code adapted from Make Your own Python Text Based Adventure\n # tries to see if the item damage is greator than the current max\n # damage and then replaces the best weapon in inventory\n try:\n if item.damage > max_damage:\n best_weapon = item\n max_damage = item.damage\n except AttributeError:\n pass\n # sends the best weapon to function\n return best_weapon", "def get_weapon_holding():\n return unrealsdk.GetEngine().GamePlayers[0].Actor.Pawn.Weapon", "def shield_bounce(shield, max_shield, damage,\n shield_bounce_zone=SHIELD_BOUNCE_ZONE):\n\n # really, shield can't become negative unless some external factors\n # hacked it into one.\n return ((damage < shield * shield_bounce_zone) and shield > 0 and\n shield or shield - damage)", "def hemt_gate_max_voltage(self):\n return self._hemt_gate_max_voltage", "def u_max(self):\n if self._u_max is None:\n return self.uv_max\n else:\n return self._u_max", "def health_max(self) -> Union[int, float]:\n return self.proto.health_max", "def health_max(self) -> Union[int, float]:\n return self.proto.health_max", "def locked_temp_max(self) -> int:\r\n # TODO: Force this to return an int.\r\n if self.temperature_scale == \"C\":\r\n return self.locked_temp_max_c\r\n elif self.temperature_scale == \"F\":\r\n return self.locked_temp_max_f\r\n else:\r\n return self._locked_temp_max\r\n\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"locked_temp_max\"))", "def max_power(self):\r\n est_max_power = self.model * self.max_pwm / 100\r\n return est_max_power", "def max_temp(self):\n return self.atag.dhw_max_temp", "def get_lmax_limit(self):\n\n if self.pixel == \"HEALPIX\":\n l_max_limit = 3 * self.nside - 1\n elif self.pixel == \"CAR\":\n cdelt = self.data.wcs.wcs.cdelt[1]\n l_max_limit = 360 / cdelt / 4\n return l_max_limit", "def get_max(self):\n return self.serie.max()", "def _get_maximum(self):\n return self._maximum", "def get_max(self):\n current = self\n while current.hasRight(): # This is the belief that the max has to be to the right. If you can't go right either in the begining or any more\n # if current has a right this line will be set and will keep going from line 129 to 130 until there are no more rights.\n current = current.right\n # this line returns as soon there is no more rights. breaking out of the loop.\n return current.value", "def steer_max(self):\n return self._steer_max", "def enemyrawdmg(self):\n\n enemystr = globalvalues.ai.getstatus()[3]\n # rngfactor will ensure that regular mobs won't absolutely crush you\n rngfactor = float(float(random.randint(45, 65)) / 100)\n level = (\n globalvalues.p1.getlevel()\n - globalvalues.ai.getstatus()[0]\n )\n lvlfactor = float(1 - level * 0.05)\n\n return int((enemystr) * 102 * 0.12 * rngfactor * lvlfactor)", "def __find_max_distance(self):\n return utils.find_max_distance(self.__game)", "def max(self) -> float:\n return stats.max(self)", "def get_max_velocity(self):\n return self._max_velocity" ]
[ "0.66524976", "0.66524976", "0.6641473", "0.65482044", "0.65482044", "0.6506884", "0.6457938", "0.64100444", "0.63996667", "0.63713896", "0.63391215", "0.631314", "0.6224268", "0.61657584", "0.6144013", "0.61057657", "0.6100425", "0.6100425", "0.60905296", "0.6083612", "0.60721797", "0.60697156", "0.6061939", "0.60584235", "0.6056318", "0.6054282", "0.60114485", "0.6006884", "0.596882", "0.59678346" ]
0.8254433
1
Returns the unit current shield percentage
def shield_percentage(self) -> Union[int, float]: if not self.proto.shield_max: return 0 return self.proto.shield / self.proto.shield_max
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_percent(self):\n return self.percent", "def shield(self) -> Union[int, float]:\n return self.proto.shield", "def shield(self) -> Union[int, float]:\n return self.proto.shield", "def get_percentage(self):\n return self.percentage", "def get_percentage(self):\n return self.percentage", "def get_percentage(self):\n return self.PotTax_percentage", "def percent(self):\r\n return self._percent", "def get_percent_wet():\n # Create an ADS1115 ADC (16-bit) instance.\n adc = Adafruit_ADS1x15.ADS1115()\n\n GAIN = 1\n DRY = 20280 # 100% Dry\n WET = 10140 # 100% Wet\n\n value = adc.read_adc(0, gain=GAIN)\n \n # print \"value: %d\" % value\n \n percent_dry = ((value - WET)*100)/(DRY-WET)\n percent_wet = 100 - percent_dry\n\n return percent_wet", "def pct(self):\n\t\treturn self.bottle.pct()", "def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)", "def get_total_shield(self,obs):", "def percentage_update(self):\n\n self.event_update()\n return self.percentage", "def percentage_used(self):\n return self.volume_used/self.total_volume * 100.0", "def get_health(self):\n return round(self.health)", "def percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"percentage\")", "def getBatteryPercent(self):\n with open(\"/sys/class/power_supply/BAT0/charge_full\") as f:\n charge_full = int(f.read().replace(\"\\n\", \"\")) # The battery's full potential \n with open(\"/sys/class/power_supply/BAT0/charge_now\") as f:\n charge_now = int(f.read().replace(\"\\n\", \"\")) # The battery's charge right now \n battery_percent = int(round((charge_now/charge_full)*100))\n return battery_percent", "def percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"percentage\")", "def percentage(self) -> str:\n return ranged_value_to_percentage(\n self._device.fan_speed_limits, self._device.fan_speed\n )", "def health_percentage(self) -> Union[int, float]:\n if not self.proto.health_max:\n return 0\n return self.proto.health / self.proto.health_max", "def health_percentage(self) -> Union[int, float]:\n if not self.proto.health_max:\n return 0\n return self.proto.health / self.proto.health_max", "def get_percent_oxygen(self) -> float:\n try:\n datalist = self.get_data()\n data = datalist[1]\n if data.endswith('\\x00'):\n data = data.rstrip('\\x00')\n return float(data)\n else:\n return float(data)\n except Exception as err:\n print(f'po read error: {err}')\n return -1", "def get_opinion_percent(self):\n return (self.get_percent()+100)/2", "def unit_of_measurement(self):\n return \"%\"", "def getDefense(self):\n\t\treturn(self.maneuverability * self.protection)", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def getPercent(*args):", "def getPercent(*args):", "def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"", "def get_percentComplete(self):\n val = self.resource.get_cdmi_sys_meta().get(\"cdmi_percentComplete\",\n \"100\")\n return val", "def offense(self):\n #return self.stats.strength + self.stats.level\n return self.stats.offense" ]
[ "0.7029887", "0.69653", "0.69653", "0.6941394", "0.6941394", "0.69091225", "0.6898005", "0.68086296", "0.6731268", "0.66439867", "0.6607218", "0.6476712", "0.64499915", "0.6420584", "0.6419708", "0.6415546", "0.63554376", "0.6292765", "0.6292196", "0.6292196", "0.627772", "0.6266082", "0.6256261", "0.62492615", "0.6246778", "0.62237483", "0.62237483", "0.62065035", "0.62017095", "0.61985946" ]
0.855793
1
Returns the unit max energy
def energy_max(self) -> Union[int, float]: return self.proto.energy_max
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_Ec_max(self):\n return self.Ec_max", "def _maximum(self) -> float:\n if self._type == \"power\":\n return 5.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_max\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]", "def max_temp(self):\n return self.atag.dhw_max_temp", "def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)", "def get_max(self):\n return self.serie.max()", "def max_power(self):\r\n est_max_power = self.model * self.max_pwm / 100\r\n return est_max_power", "def get_energy(self):\r\n return self._energy", "def max(self) -> float:\n return stats.max(self)", "def get_max_temp(self):\n self.max_temp = self.domain[1] * 2", "def max_temp(self):\n # return convert_temperature(\n # self._device.max_temp, TEMP_CELSIUS, self.hass.config.units.temperature_unit\n # )\n return self._device.max_temp", "def lambda_max(self):\n return const.b_wien / self.temperature", "def accel_max(self):\n return self._accel_max", "def get_max_cell_voltage(self): \n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? .*? .*? (.*?) . .*? .*? . . . .*?'\n maxv = float(re.findall(pattern,summary).pop())\n return maxv", "def test_energy_max(self):\n sqw_ws = MuscatSofQW(SampleWorkspace=self._sample_ws,\n ResolutionWorkspace=self._resolution_ws,\n ParameterWorkspace=self._param_ws,\n OutputWorkspace='__MuscatSofQWTest_result',\n EnergyMax=1.0)\n\n self.assertEqual(sqw_ws.getNumberHistograms(), self._sample_ws.getNumberHistograms())\n self.assertEqual(sqw_ws.getAxis(0).getUnit().unitID(), 'Energy')\n self.assertEqual(sqw_ws.getAxis(1).getUnit().unitID(), 'MomentumTransfer')\n\n x_data = sqw_ws.dataX(0)\n self.assertAlmostEqual(x_data[0], -1.0)\n self.assertAlmostEqual(x_data[-1], 1.0)\n self.assertAlmostEqual(x_data[len(x_data)/2], 0.0)\n\n self.assertEquals(sqw_ws.blocksize(), 400)", "def energy(self) -> Union[int, float]:\n return self.proto.energy", "def energy(self) -> Union[int, float]:\n return self.proto.energy", "def max_temperature(self):\n mini, maxi = ct.c_int(), ct.c_int()\n self.lib.GetTemperatureRange(ct.pointer(mini), ct.pointer(maxi))\n return maxi.value", "def energy(self):\n return self._energy", "def _get_maximum(self):\n return self._maximum", "def max_gain(self):\n return np.max(self.fr)", "def maximum_temperature(self):\n return self._maximum_temperature", "def getEnergy(self):\n if not hasattr(self,\"energy\"):\n self.energy = self.calcEnergy()\n return self.energy", "def get_max_density(self):\n max_density = str(self.density.index(min(self.density)) + 1)\n print(max_density)\n return max_density", "def max_temp(self) -> float | None:\n try:\n return self._device.config[\"max_temp\"]\n except TypeError: # 'NoneType' object is not subscriptable\n return", "def max_flux(self):\n return np.max(self.flux)", "def native_max_value(self) -> float:\n return TEMP_MAXIMUM", "def get_max(self):\n return self._max", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth" ]
[ "0.7158754", "0.7100571", "0.7078389", "0.7024071", "0.70133126", "0.6982264", "0.69725335", "0.6966379", "0.6940615", "0.6939813", "0.6856558", "0.682932", "0.6829139", "0.6802222", "0.6798505", "0.6798505", "0.6795786", "0.679211", "0.6791478", "0.6741895", "0.6739708", "0.6739347", "0.6734658", "0.6729765", "0.67258954", "0.6711863", "0.67069435", "0.6705084", "0.6705084", "0.6705084" ]
0.82604754
1
Returns the unit current energy percentage
def energy_percentage(self) -> Union[int, float]: if not self.proto.energy_max: return 0 return self.proto.energy / self.proto.energy_max
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def percentage_used(self):\n return self.volume_used/self.total_volume * 100.0", "def total_energy(self):\n return self._total_energy", "def percentage_update(self):\n\n self.event_update()\n return self.percentage", "def unit_of_measurement(self):\n return \"%\"", "def get_percentage(self):\n return self.percentage", "def get_percentage(self):\n return self.percentage", "def unit_of_measurement(self) -> Any:\n return PERCENTAGE", "def get_energy(self):\r\n return self._energy", "def pct(self):\n\t\treturn self.bottle.pct()", "def get_energy(self):\n return self.momentum*self.momentum/(2*self.mass)", "def percent(self):\r\n return self._percent", "def get_percent(self):\n return self.percent", "def get_percentage(self):\n return self.PotTax_percentage", "def unit_of_measurement(self):\n return '%'", "def cu_energy(self,val,units=\"1/cm\"):\n if units in self.units[\"energy\"]:\n x = conversion_facs_energy[units]\n i_val = x*val\n \n cu = self.current_units[\"energy\"] \n if cu != \"1/fs\":\n y = conversion_facs_energy[units] \n return i_val/y\n \n return i_val", "def energy(energy_name: str) -> float:\n pass", "def get_percent_oxygen(self) -> float:\n try:\n datalist = self.get_data()\n data = datalist[1]\n if data.endswith('\\x00'):\n data = data.rstrip('\\x00')\n return float(data)\n else:\n return float(data)\n except Exception as err:\n print(f'po read error: {err}')\n return -1", "def getEnergy(self):\n if not hasattr(self,\"energy\"):\n self.energy = self.calcEnergy()\n return self.energy", "def TotalEnergy(self):\n return (math.sqrt((Particle.RestEnergy(self) ** 2)\n + (np.linalg.norm(Particle.Momentum(self)) * const.speed_of_light) ** 2))", "def ComputeEnergyConsumption(self):\r\n pass", "def energy(self):\n return self._energy", "def unit_of_measurement(self) -> str:\n return \"%\"", "def unit_of_measurement(self) -> str:\n return \"%\"", "def unit_of_measurement(self):\n if self._sensor == \"batteryLevel\":\n return PERCENTAGE\n return None", "def energy_pfu(self):\n return self._energy_pfu", "def energy(self) -> Union[int, float]:\n return self.proto.energy", "def energy(self) -> Union[int, float]:\n return self.proto.energy", "def get_total_energy_produced (self):\n return self.net_generation_wind", "def get_total_energy_produced (self):\n return self.pre_intertie_generation[:self.actual_project_life]", "def percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"percentage\")" ]
[ "0.7260914", "0.715174", "0.71471816", "0.7129316", "0.71087325", "0.71087325", "0.7087947", "0.70763487", "0.7016208", "0.7008012", "0.70043814", "0.6984919", "0.6962187", "0.6925202", "0.6843054", "0.6839179", "0.6819234", "0.68084145", "0.6756037", "0.6739484", "0.67370003", "0.67328876", "0.67328876", "0.671597", "0.6694189", "0.6675571", "0.6675571", "0.66376805", "0.6621356", "0.65872425" ]
0.7790874
0
Gets the weapons of the unit
def weapons(self): if self._weapons: return self._weapons if hasattr(self.type_data.proto, "weapons"): self._weapons = self.type_data.proto.weapons return self._weapons return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weapons(self):\n return self._get_by_class(Weapon)", "def weapon(self):\n if not (0 <= self._weapon_i < len(self.weapons)):\n raise Exception('No weapons')\n return self.weapons[self._weapon_i]", "def get_weapon_holding():\n return unrealsdk.GetEngine().GamePlayers[0].Actor.Pawn.Weapon", "def get_weapon(self):\n return self.__weapon", "def weapon(self):\n return self._weapon", "def get_weapon(self):\n\n return self.suggestion_set[1]", "def get_top_weapons(self):\n try:\n top_weapon = self.page.find(\"div\", {\"class\": \"weapon\"}).text.split(\" \")\n\n top_weapon_informations = {\"Weapon\": top_weapon[1], \"Headshots\": top_weapon[-5],\"Bodyshot\": top_weapon[-4],\n \"Leg_shot\":top_weapon[-3], \"Kills\": top_weapon[-1]}\n\n return top_weapon_informations\n\n except Exception as e:\n print(f\"Error: {e}. Make sure if it's the correct nickname and tag. Otherwise, it might be about your account visibility, check if it's public.\")\n print(\"Otherwise, it might be about your account visibility, check if it's public. Read : https://github.com/HicaroD/Valorant-Stats\\n\")", "def fireMyWeapons(self):\n for myWeapon in self.readyWeapons:\n myWeapon.fire()\n\n self.readyWeapons = []", "def get_list_powers(self):\r\n return self._api.get_list_powers()", "def updateWeapons(self):\n self.readyWeapons = []\n self.setWeaponStatus()\n\n for myWeapon in self.activeWeapons:\n if myWeapon.preFireCheck() == 1:\n self.readyWeapons.append(myWeapon)\n self.alternateTargets = []\n\n if self.amsTargets != []:\n for myWeapon in self.amsWeapons:\n if myWeapon.preFireCheck() == 1:\n self.readyWeapons.append(myWeapon)\n self.amsTargets = []", "def most_powerful_weapon(self):\n # sets inital damge to 0\n max_damage = 0\n # sets the best weapon to nothing\n best_weapon = None\n # Loop for each item in inventory\n for item in self.inventory:\n # Code adapted from Make Your own Python Text Based Adventure\n # tries to see if the item damage is greator than the current max\n # damage and then replaces the best weapon in inventory\n try:\n if item.damage > max_damage:\n best_weapon = item\n max_damage = item.damage\n except AttributeError:\n pass\n # sends the best weapon to function\n return best_weapon", "def retrieve_handcrafted_inputs(self, obs):\n self.detect_self_unit_types(obs)\n\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n selected_allies = [unit for unit in allies if unit.unit_type == self.current_group_id]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n hitpoints = 0\n for unit in selected_allies:\n hitpoints += unit.health\n\n if self.current_group_id in unit_health.keys():\n init_hp = 0\n init_hp = unit_health[self.current_group_id] * self.init_unit_counts[self.current_group_id]\n else:\n init_hp = self.initial_self_hit_points\n current_hp = hitpoints / init_hp\n\n weapon_cooldown = 0\n for ally in selected_allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(selected_allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(selected_allies) > 0:\n self_weapon_range = weapon_ranges[self.current_group_id]\n self_radius = unit_sizes[self.current_group_id] / float(2)\n self_unit_type = unit_type[self.current_group_id]\n self_speed = unit_speed[self.current_group_id]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n self.enemy_id = enemies[0].unit_type\n enemy_weapon_range = weapon_ranges[self.enemy_id]\n enemy_radius = unit_sizes[self.enemy_id] / float(2)\n enemy_unit_type = unit_type[self.enemy_id]\n enemy_speed = unit_speed[self.enemy_id]\n\n # TODO can be inaccurate if using melee units\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_avg_location_of_self_subgroup(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n in_enemy_range = 0\n for ally in selected_allies:\n for enemy in enemies:\n if self.retrieve_distance_between_positions([enemy.x, enemy.y], [ally.x, ally.y]) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n break\n else:\n in_enemy_range = 0\n if in_enemy_range:\n break\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs, for_subgroup=True)\n\n if self.previous_commands[self.current_group_id] == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_commands[self.current_group_id] == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs,\n for_subgroup=True)\n\n distance_to_enemy = self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_avg_location_of_self_subgroup(obs))\n distance_to_enemy = distance_to_enemy / float((32 ** 2 + 20 ** 2) ** 0.5)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed, distance_to_enemy]", "def powerWeapons(self, interval, availPower):\n if self.allWeaponsPowered == 0:\n weaponList = []\n for position, myQuad in self.quads.iteritems():\n weaponIDList = []\n weaponIDList.extend(funcs.sortStringList(myQuad.weapons.keys()))\n for wID in weaponIDList:\n weaponList.append(myQuad.weapons[wID])\n\n while availPower > 0 and self.allWeaponsPowered == 0:\n toCharge = []\n toChargeAMS = []\n # go through each quadrant looking for weapons to power\n for myWeapon in weaponList:\n if myWeapon.operational == 1 and myWeapon.currentPower < myWeapon.myWeaponData.maxPower:\n if 1 == myWeapon.myWeaponData.AMS:\n toChargeAMS.append(myWeapon)\n else:\n toCharge.append(myWeapon)\n\n if len(toChargeAMS) == 0 and len(toCharge) == 0:\n self.allWeaponsPowered = 1\n return availPower\n\n #AMS are charged first and sequentially\n if len(toChargeAMS) != 0:\n if availPower !=0:\n for myW in toChargeAMS:\n defecit=myW.myWeaponData.maxPower - myW.currentPower\n if defecit >= availPower:\n myW.currentPower+=availPower\n availPower=0\n break\n else:\n myW.currentPower=myW.myWeaponData.maxPower\n availPower-=defecit\n\n #non-AMS weapons are charged concurrently; each gets an equal share of the available power \n if len(toCharge) != 0:\n kW=availPower/len(toCharge)\n if kW !=0:\n #print \"tT:\",len(toCharge),\"aP:\",availPower,\"kW each:\",kW\n for myW in toCharge:\n defecit=myW.myWeaponData.maxPower - myW.currentPower\n if defecit >= kW:\n myW.currentPower+=kW\n availPower-=kW\n else:\n myW.currentPower=myW.myWeaponData.maxPower\n availPower-=kW-defecit\n else:\n availPower=0\n\n return availPower", "def get_weapon_stats(attack_level):\n if attack_level >= 60:\n # Dragon scimitar\n return (67, 66)\n elif attack_level >= 40:\n # Rune scimitar\n return (45, 44)\n elif attack_level >= 30:\n # Adamant scimitar\n return (29, 28)\n elif attack_level >= 20:\n # Mithril scimitar\n return (21, 20)\n elif attack_level >= 10:\n # Black scimitar\n return (19, 14)\n elif attack_level >= 5:\n # Steel scimitar\n return (15, 14)\n else:\n # Iron scimitar\n return (10, 9)", "def create_weapon(gameworld, weapon_type, game_config):\n weapon_file_path = constants.FILE_WEAPONSFILE\n\n weapon_file = jsonUtilities.read_json_file(weapon_file_path)\n\n item_fg = \"[color=ITEM_GENERIC_FG]\"\n item_bg = \"[color=ITEM_GENERIC_BG]\"\n\n for weapon in weapon_file['weapons']:\n if weapon['name'] == weapon_type:\n myweapon = ItemManager.create_base_item(gameworld=gameworld)\n # generate common item components\n itemsHelp.ItemUtilities.set_type_of_item(gameworld=gameworld, entity_id=myweapon, value='weapon')\n gameworld.add_component(myweapon, items.Material(texture='wooden'))\n itemsHelp.ItemUtilities.set_item_name(gameworld=gameworld, entity_id=myweapon, value=weapon['name'])\n itemsHelp.ItemUtilities.set_item_description(gameworld=gameworld, entity_id=myweapon, value=weapon['description'])\n itemsHelp.ItemUtilities.set_item_glyph(gameworld=gameworld, entity_id=myweapon, value=weapon['glyph'])\n itemsHelp.ItemUtilities.set_item_foreground_colour(gameworld=gameworld, entity_id=myweapon, value=item_fg)\n itemsHelp.ItemUtilities.set_item_background_colour(gameworld=gameworld, entity_id=myweapon, value=item_bg)\n itemsHelp.ItemUtilities.set_item_displayname(gameworld=gameworld, entity_id=myweapon, value=weapon['display_name'])\n\n gameworld.add_component(myweapon, items.RenderItem(istrue=True))\n gameworld.add_component(myweapon, items.Quality(level=weapon['quality_level']))\n\n # generate weapon specific components\n gameworld.add_component(myweapon, items.WeaponType(label=weapon_type))\n gameworld.add_component(myweapon, items.Spells(\n slot_one=0,\n slot_two=0,\n slot_three=0,\n slot_four=0,\n slot_five=0))\n\n gameworld.add_component(myweapon, items.Wielded(\n hands=weapon['wielded_hands'],\n true_or_false=True))\n\n gameworld.add_component(myweapon, items.Experience(current_level=11))\n\n gameworld.add_component(myweapon, items.Hallmarks(\n hallmark_slot_one='00',\n hallmark_slot_two='00'))\n\n gameworld.add_component(myweapon, items.DamageRange(ranges=weapon['damage_ranges']))\n\n return myweapon # this is the entity id for the newly created weapon", "def weapon_strength(weapon):\n weapon_strength_int = WEAPON_STRENGTHS[weapon]\n #print weapon_strength_int\n return weapon_strength_int", "def get_alive_units(self):\n alive_units = []\n for unit in self.units:\n if not unit.is_alive():\n continue\n alive_units.append(unit)\n return alive_units", "def getWatts(self):\n return self.json_state.get(\"charging\").get(\"watt_power\")", "def W(self, multiplier=1):\n multiplier = str(multiplier);\n weapon_dice_count = self.Attribute_Power(\"weapon-num-dice\");\n weapon_dice = self.Attribute_Power(\"weapon-dice\");\n return \"\".join((\"(\", multiplier, \"*\", weapon_dice_count, \")d\", weapon_dice));", "def ground_weapon(self):\n if self._ground_weapon:\n return self._ground_weapon\n if self.weapons:\n self._ground_weapon = next(\n (weapon for weapon in self.weapons if weapon.type in {TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value}),\n None,\n )\n return self._ground_weapon\n return None", "def weapon_mapping(self, weapon: int) -> str:\n if weapon == 0:\n return 'shortsword'\n elif weapon == 1:\n return 'longsword'\n elif weapon == 2:\n return 'greatsword'\n elif weapon == 3:\n return 'halberd'\n elif weapon == 4:\n return 'longbow'\n elif weapon == 5:\n return 'greataxe'\n elif weapon == 6:\n return 'crossbow-heavy'\n elif weapon == 7:\n return 'battleaxe'\n elif weapon == 8:\n return 'handaxe'\n elif weapon == 9:\n return 'light-hammer'\n elif weapon == 10:\n return 'maul'\n elif weapon == 11:\n return 'glaive'\n elif weapon == 12:\n return 'rapier'", "def get_health(self):\n self.__health = sum([i.get_health for i in self.__units])\n return self.__health", "def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(allies) > 0:\n self.self_id = allies[0].unit_type\n self_weapon_range = weapon_ranges[self.self_id]\n self_radius = unit_sizes[self.self_id] / float(2)\n self_unit_type = unit_type[self.self_id]\n self_speed = unit_speed[self.self_id]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n self.enemy_id = enemies[0].unit_type\n enemy_weapon_range = weapon_ranges[self.enemy_id]\n enemy_radius = unit_sizes[self.enemy_id] / float(2)\n enemy_unit_type = unit_type[self.enemy_id]\n enemy_speed = unit_speed[self.enemy_id]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n in_enemy_range = 0\n for ally in allies:\n for enemy in enemies:\n if self.retrieve_distance_between_positions([enemy.x, enemy.y], [ally.x, ally.y]) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n break\n else:\n in_enemy_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed, self.self_id,\n self.enemy_id]", "def get_active_units(self):\n alive_units = self.get_alive_units()\n active_units = []\n for alive_unit in alive_units:\n if not alive_unit.ready_to_attack():\n continue\n active_units.append(alive_unit)\n return active_units", "def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(allies) > 0:\n self_weapon_range = weapon_ranges[allies[0].unit_type]\n self_radius = unit_sizes[allies[0].unit_type] / float(2)\n self_unit_type = unit_type[allies[0].unit_type]\n self_speed = unit_speed[allies[0].unit_type]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n enemy_weapon_range = weapon_ranges[enemies[0].unit_type]\n enemy_radius = unit_sizes[enemies[0].unit_type] / float(2)\n enemy_unit_type = unit_type[enemies[0].unit_type]\n enemy_speed = unit_speed[enemies[0].unit_type]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, prev_cmd, north_bound, south_bound, west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed]", "def miner_pick(self):\n return build_weapon(PICK, self)", "def list_power_supply_units(self):\n\n doc = self.client.enumerate(uris.CIM_PowerSupply)\n\n psus = doc.find('.//s:Body/wsen:EnumerateResponse/wsman:Items',\n wsman.NS_MAP)\n\n return [self._parse_psus(psu) for psu in psus]", "def weapon_cooldown(self) -> Union[int, float]:\n if self.can_attack_ground or self.can_attack_air:\n return self.proto.weapon_cooldown\n return -1", "def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n if len(allies) > 0:\n self_weapon_range = weapon_ranges[allies[0].unit_type]\n self_radius = unit_sizes[allies[0].unit_type] / float(2)\n self_unit_type = unit_type[allies[0].unit_type]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n if len(enemies) > 0:\n enemy_weapon_range = weapon_ranges[enemies[0].unit_type]\n enemy_radius = unit_sizes[enemies[0].unit_type] / float(2)\n enemy_unit_type = unit_type[enemies[0].unit_type]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n else:\n in_enemy_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type]", "def bless_basic(unit):\n return {DAMAGE: unit.maximum_damage}" ]
[ "0.81732154", "0.73420286", "0.7124047", "0.7047223", "0.69233257", "0.63363516", "0.62370163", "0.58344877", "0.5760204", "0.568579", "0.5684724", "0.5681291", "0.5652495", "0.56233495", "0.56164604", "0.5591934", "0.55885166", "0.55638725", "0.5534569", "0.55334944", "0.5471407", "0.5405386", "0.538921", "0.5378405", "0.53682935", "0.53662354", "0.53632903", "0.53596103", "0.53587097", "0.5339101" ]
0.7523198
1
How much cargo space is used (some units take up more than 1 space)
def cargo_used(self) -> Union[float, int]: return self.proto.cargo_space_taken
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getCargoSpaceLeft(self):\n spaceused = self.cargo[\"wood\"] + self.cargo[\"coal\"] + self.cargo[\"uranium\"]\n if self.type == UNIT_TYPES.WORKER:\n return GAME_CONSTANTS[\"PARAMETERS\"][\"RESOURCE_CAPACITY\"][\"WORKER\"] - spaceused\n else:\n return GAME_CONSTANTS[\"PARAMETERS\"][\"RESOURCE_CAPACITY\"][\"CART\"] - spaceused", "def total_sdram_requirements(self):", "def Capacity(self) -> int:", "def get_space_used():\n fs.get_space_used()", "def cargo_space_left(self):\n return GAME_CONSTANTS[\"PARAMETERS\"][\"RESOURCE_CAPACITY\"][self.type_str] - self.num_resources", "def get_additional_ball_capacity(self):\n return 999", "def units(self):\n pass", "def search_space_size(self):", "def memory_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"memory_per_unit\")", "def memory_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"memory_per_unit\")", "def memory_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"memory_per_unit\")", "def memory_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"memory_per_unit\")", "def memory_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"memory_per_unit\")", "def memory_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"memory_per_unit\")", "def memory_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"memory_per_unit\")", "def getRepairCapacity(self):\n return int(self.myDesign.getSYCRequired() * (1-(self.strength/100.0)))", "def cargo_size(self) -> Union[float, int]:\n return self.type_data.cargo_size", "def cargo_size(self) -> Union[float, int]:\n return self.type_data.cargo_size", "def capacity_used(self):\n raise NotImplementedError()", "def get_additional_ball_capacity(cls):\n return 999", "def get_total_n_cpu(self) -> int:", "def cost(self) -> float:", "def kilometres_available(self):\n return self.fuel / self.litres_per_kilometre", "def unitcost(self):\n cost = self.tablecost\n\n for component, quantity in self.components.items():\n cost += component.unitcost * quantity\n\n return cost", "def get_num_slots(self):\n # Your code here\n return self.capacity", "def test_capacity(self, space_each_type):\n tspace = build_required_space(space_each_type, type_requirement=\"real\")\n assert tspace.cardinality == numpy.inf\n\n space = Space()\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", 2, 3, 4)\n dim = Categorical(\"yolo\", OrderedDict(zip(categories, probs)), shape=2)\n space.register(dim)\n dim = Integer(\"yolo2\", \"uniform\", -3, 6)\n space.register(dim)\n tspace = build_required_space(space, type_requirement=\"integer\")\n assert tspace.cardinality == (4**2) * (6 + 1)\n\n dim = Integer(\"yolo3\", \"uniform\", -3, 6, shape=(2, 1))\n space.register(dim)\n tspace = build_required_space(space, type_requirement=\"integer\")\n assert tspace.cardinality == (4**2) * (6 + 1) * ((6 + 1) ** (2 * 1))\n\n tspace = build_required_space(\n space, type_requirement=\"integer\", shape_requirement=\"flattened\"\n )\n assert tspace.cardinality == (4**2) * (6 + 1) * ((6 + 1) ** (2 * 1))\n\n tspace = build_required_space(\n space, type_requirement=\"integer\", dist_requirement=\"linear\"\n )\n assert tspace.cardinality == (4**2) * (6 + 1) * ((6 + 1) ** (2 * 1))", "def get_capacity():\n fs.get_capacity()", "def totalUnits(self):\n\t\treturn self.units", "def available_capacity(self):\r\n return self.capacity - len(self.passengers)", "def part1():\n program = read_input()\n root = build_filesystem(program)\n all_sizes = root.make_size_list()\n return sum(size for size in all_sizes if size <= 100000)" ]
[ "0.691869", "0.68516904", "0.68139845", "0.67872095", "0.6642728", "0.64669245", "0.6460234", "0.64431036", "0.64162153", "0.64162153", "0.64162153", "0.64162153", "0.64162153", "0.64162153", "0.64162153", "0.64082515", "0.63786423", "0.63786423", "0.63745487", "0.6364653", "0.6319589", "0.631596", "0.62985", "0.62655926", "0.6237268", "0.62359697", "0.62354624", "0.6229186", "0.6200989", "0.6200275" ]
0.7691078
0
Returns the unit that are passengers tags
def passengers_tags(self) -> Set[int]: return {unit.tag for unit in self.proto.passengers}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_unit(self,tag):", "def engTagging(word, accurateMode, StanfordTagger):\n tokens = nltk.tokenize.word_tokenize(word)\n if accurateMode:\n tags = StanfordTagger.tag(tokens)\n for tag in tags:\n if tag[1] == 'PERSON':\n return 1\n elif tag[1] in ['ORGANIZATION', 'GPE']:\n return 0\n return 2\n else:\n for chunk in ne_chunk(pos_tag(tokens)):\n if hasattr(chunk, 'label') and chunk.label() == 'PERSON':\n return 1\n elif hasattr(chunk, 'label') and chunk.label() in ['ORGANIZATION', 'GPE']:\n return 0\n return 2", "def getTags(self,):\n\t\treturn self.tags;", "def tags():", "def freeform_tags(self):\n return self._freeform_tags", "def get_tags(self,element):\n if element in self.element2tags.keys():\n return self.element2tags[element]\n return []", "def get_tags(self):\n return self.tags", "def get_all_tagged(self,tag_name):\n return self.tag2elements[tag_name]", "def get_entities(tags):\n pass", "def tagger():", "def independent_tags(self):\n if not RerankingParser._parser_model_loaded:\n raise ValueError(\"You need to have loaded a parser model in \"\n \"order to calculate most likely tags.\")\n return Tree(self.sentrep.makeFailureTree('X')).tags()", "def _get_units(self, unit_tag):\n\n # a list that contains apartment unit's information\n unit = []\n # use a loop to list all the cells in a row \n for cell in unit_tag.find_all('td'):\n if cell.attrs: # omit the cell with nothing in it \n # look for the apartment #, however, this info is not\n # consistent across the entire webiste\n if cell['data-tid'] == 'pdpfloorplans-unit-displayText':\n unit_num = cell.get_text()\n unit.append(unit_num)\n # scrape the price of the unit\n if cell['data-tid'] == 'pdpfloorplans-unit-price':\n try:\n unit_price = cell.get_text().replace('$', '')\n # try to convert the price to float \n unit.append(float(unit_price))\n except:\n # if there's no price for this unit\n # append the list with a null value \n unit.append(np.nan)\n if cell['data-tid'] == 'pdpfloorplans-unit-bedbath':\n try:\n # try to extract the tags that include the number\n # of bedrooms and bathrooms \n bedbath_tag = cell.find_all('span')\n bed_tag, bath_tag = bedbath_tag[0], bedbath_tag[1]\n # regular expression pattern for extracting any types\n # of numbers, including integer and floating numbers \n pattern = r'[-+]?\\d*\\.\\d+|\\d+'\n bed = re.findall(pattern, bed_tag.get_text())\n bath = re.findall(pattern, bath_tag.get_text())\n bed_unit, bath_unit = 0, 0\n if bed:\n bed_unit = bed[0]\n if bath:\n bath_unit = bath[0]\n unit.append(float(bed_unit))\n unit.append(float(bath_unit))\n except:\n # if the convertion failed, append the list\n # will two null values \n unit.append(np.nan)\n unit.append(np.nan)\n if cell['data-tid'] == 'pdpfloorplans-unit-sqft':\n # follow the same procedure as above, but this time\n # scrape the square foot of the apartment unit\n try:\n pattern = r'[-+]?\\d*\\.\\d+|\\d+'\n sqft_unit = re.findall(pattern, cell.get_text())[0]\n unit.append(float(sqft_unit))\n except:\n unit.append(np.nan)\n return unit", "def get_alive_units(self):\n alive_units = []\n for unit in self.units:\n if not unit.is_alive():\n continue\n alive_units.append(unit)\n return alive_units", "def find_unit(self, unit):\n for u in self.units:\n if u.character.name == unit.character.name:\n return u\n assert False", "async def get_pots(self):\n return await self.get_states_by_tag_prefix(\"pot\", True)", "def get_enemy_gun(self):\n return [(self.rect.x + x_pos, self.rect.y + y_pos) for x_pos, y_pos in MallFighter.GUN_POS_OFFSETS]", "def passengers(self) -> Set[\"PassengerUnit\"]:\n return {PassengerUnit(unit, self._game_data) for unit in self.proto.passengers}", "def get_tags(self):\n return ''", "def get_tags(self):\n return ''", "def get_own_enemy_lost_units(self) -> Tuple[Dict[UnitTypeId, List[Unit]], Dict[UnitTypeId, List[Unit]]]:\n return (self._my_lost_units, self._enemy_lost_units)", "def tag(self):\n \n tag = super(self.__class__, self).tag();\n tag = als.tag_join(tag, als.stra(self.strain));\n tag = als.tag_join(tag, als.stra(self.dtype));\n tag = als.tag_join(tag, 'w=%s' % als.stra(self.wid)); \n tag = als.tag_join(tag, 's=%s' % als.stra(self.stage));\n #tag = analysis.tag_join(tag, 'l=%s' % analysis.stra(self.label)); \n\n return tag;", "def taggerWord(self,word):\n if(\"tagger\" in self._classes):\n return self._tagger.taggerWord(word)", "def get_tag_object(self) -> Any:\n return self.tags", "def get_target_tags(self):\n raise NotImplementedError(\"\")", "def PYSAM_WEATHER_TAG(self):\n raise NotImplementedError", "def getTags(number=None):", "def tags(self) -> Optional[Any]:\n return pulumi.get(self, \"tags\")", "def get_tags(self):\n\n return self.tags", "def tags_with(self, word):\n return tags_with_word", "def active_tu(active):\n active = list(active)\n t_units = list({tu for gene in active for tu in gene.transcription_units})\n return t_units" ]
[ "0.6454601", "0.5695407", "0.55388343", "0.5504569", "0.5365013", "0.53606063", "0.53351146", "0.5320194", "0.531565", "0.5275182", "0.5269605", "0.5228612", "0.52207935", "0.5194548", "0.5177581", "0.51735795", "0.5171133", "0.5149033", "0.5149033", "0.5133746", "0.5129977", "0.5111874", "0.5102999", "0.5088993", "0.5085318", "0.50835603", "0.5077698", "0.5069077", "0.50588703", "0.5052476" ]
0.7001418
0
Gets the ground weapons of the unit
def ground_weapon(self): if self._ground_weapon: return self._ground_weapon if self.weapons: self._ground_weapon = next( (weapon for weapon in self.weapons if weapon.type in {TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value}), None, ) return self._ground_weapon return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weapons(self):\n return self._get_by_class(Weapon)", "def get_weapon_holding():\n return unrealsdk.GetEngine().GamePlayers[0].Actor.Pawn.Weapon", "def ground_range(self) -> Union[int, float]:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in [TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value]), None\n )\n if weapon:\n return weapon.range\n return 0", "def weapons(self):\n if self._weapons:\n return self._weapons\n if hasattr(self.type_data.proto, \"weapons\"):\n self._weapons = self.type_data.proto.weapons\n return self._weapons\n return None", "def ground_dps(self) -> Union[int, float]:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in [TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value]), None\n )\n if weapon:\n return (weapon.damage * weapon.attacks) / weapon.speed\n return 0", "def ground_dps(self) -> Union[int, float]:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in {TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value}), None\n )\n if weapon:\n return (weapon.damage * weapon.attacks) / weapon.speed\n return 0", "def get_weapon(self):\n return self.__weapon", "def ground_range(self) -> Union[int, float]:\n return self.ground_weapon and self.ground_weapon.range", "def weapon(self):\n if not (0 <= self._weapon_i < len(self.weapons)):\n raise Exception('No weapons')\n return self.weapons[self._weapon_i]", "def weapon(self):\n return self._weapon", "def can_attack_ground(self) -> bool:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in [TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value]), None\n )\n return weapon is not None\n return False", "def can_attack_ground(self) -> bool:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in {TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value}), None\n )\n return weapon is not None\n return False", "def get_enemy_gun(self):\n return [(self.rect.x + x_pos, self.rect.y + y_pos) for x_pos, y_pos in MallFighter.GUN_POS_OFFSETS]", "def get_weapon(self):\n\n return self.suggestion_set[1]", "async def play_gun(game_state) -> None:\n play_weapon(game_state, Supply.GUN)", "def ground(self, grounding):\n if self._fully_grounded:\n return self\n\n grounded = list()\n for effect in self._sub_effects:\n sub_grounds = effect.ground(grounding)\n if not sub_grounds.contains_slots():\n grounded.append(sub_grounds)\n\n return Effect(grounded)", "def get_levels(self):\n return self.levels[self.game]", "def get_top_weapons(self):\n try:\n top_weapon = self.page.find(\"div\", {\"class\": \"weapon\"}).text.split(\" \")\n\n top_weapon_informations = {\"Weapon\": top_weapon[1], \"Headshots\": top_weapon[-5],\"Bodyshot\": top_weapon[-4],\n \"Leg_shot\":top_weapon[-3], \"Kills\": top_weapon[-1]}\n\n return top_weapon_informations\n\n except Exception as e:\n print(f\"Error: {e}. Make sure if it's the correct nickname and tag. Otherwise, it might be about your account visibility, check if it's public.\")\n print(\"Otherwise, it might be about your account visibility, check if it's public. Read : https://github.com/HicaroD/Valorant-Stats\\n\")", "def most_powerful_weapon(self):\n # sets inital damge to 0\n max_damage = 0\n # sets the best weapon to nothing\n best_weapon = None\n # Loop for each item in inventory\n for item in self.inventory:\n # Code adapted from Make Your own Python Text Based Adventure\n # tries to see if the item damage is greator than the current max\n # damage and then replaces the best weapon in inventory\n try:\n if item.damage > max_damage:\n best_weapon = item\n max_damage = item.damage\n except AttributeError:\n pass\n # sends the best weapon to function\n return best_weapon", "def tower_damage(self):\n return self._tower_damage", "def get_water_level(self):\n return self.water_level", "def bless_basic(unit):\n return {DAMAGE: unit.maximum_damage}", "def get_alive_units(self):\n alive_units = []\n for unit in self.units:\n if not unit.is_alive():\n continue\n alive_units.append(unit)\n return alive_units", "def getWatts(self):\n return self.json_state.get(\"charging\").get(\"watt_power\")", "def powerWeapons(self, interval, availPower):\n if self.allWeaponsPowered == 0:\n weaponList = []\n for position, myQuad in self.quads.iteritems():\n weaponIDList = []\n weaponIDList.extend(funcs.sortStringList(myQuad.weapons.keys()))\n for wID in weaponIDList:\n weaponList.append(myQuad.weapons[wID])\n\n while availPower > 0 and self.allWeaponsPowered == 0:\n toCharge = []\n toChargeAMS = []\n # go through each quadrant looking for weapons to power\n for myWeapon in weaponList:\n if myWeapon.operational == 1 and myWeapon.currentPower < myWeapon.myWeaponData.maxPower:\n if 1 == myWeapon.myWeaponData.AMS:\n toChargeAMS.append(myWeapon)\n else:\n toCharge.append(myWeapon)\n\n if len(toChargeAMS) == 0 and len(toCharge) == 0:\n self.allWeaponsPowered = 1\n return availPower\n\n #AMS are charged first and sequentially\n if len(toChargeAMS) != 0:\n if availPower !=0:\n for myW in toChargeAMS:\n defecit=myW.myWeaponData.maxPower - myW.currentPower\n if defecit >= availPower:\n myW.currentPower+=availPower\n availPower=0\n break\n else:\n myW.currentPower=myW.myWeaponData.maxPower\n availPower-=defecit\n\n #non-AMS weapons are charged concurrently; each gets an equal share of the available power \n if len(toCharge) != 0:\n kW=availPower/len(toCharge)\n if kW !=0:\n #print \"tT:\",len(toCharge),\"aP:\",availPower,\"kW each:\",kW\n for myW in toCharge:\n defecit=myW.myWeaponData.maxPower - myW.currentPower\n if defecit >= kW:\n myW.currentPower+=kW\n availPower-=kW\n else:\n myW.currentPower=myW.myWeaponData.maxPower\n availPower-=kW-defecit\n else:\n availPower=0\n\n return availPower", "def get_damage():\n\n return character['Damage']", "def get_active_units(self):\n alive_units = self.get_alive_units()\n active_units = []\n for alive_unit in alive_units:\n if not alive_unit.ready_to_attack():\n continue\n active_units.append(alive_unit)\n return active_units", "def damage(self):\n out = (self.blurbs[self.state][\"damage\"])\n self.next_state(\"damage\")\n return out", "def air_weapon(self):\n if self._air_weapon:\n return self._air_weapon\n if self.weapons:\n self._air_weapon = next(\n (weapon for weapon in self.weapons if weapon.type in {TARGET_TYPE.Air.value, TARGET_TYPE.Any.value}),\n None,\n )\n return self._air_weapon\n return None", "def get_turbine_powers(self) -> NDArrayFloat:\n\n # Confirm calculate wake has been run\n if self.floris.state is not State.USED:\n raise RuntimeError(\n \"Can't run function `FlorisInterface.get_turbine_powers` without \"\n \"first running `FlorisInterface.calculate_wake`.\"\n )\n\n turbine_powers = power(\n ref_density_cp_ct=self.floris.farm.ref_density_cp_cts,\n rotor_effective_velocities=self.turbine_effective_velocities,\n power_interp=self.floris.farm.turbine_power_interps,\n turbine_type_map=self.floris.farm.turbine_type_map,\n )\n return turbine_powers" ]
[ "0.6984032", "0.68695444", "0.669433", "0.66664845", "0.6424029", "0.6411573", "0.63551617", "0.6341753", "0.6335376", "0.6181134", "0.59627587", "0.59375674", "0.5718733", "0.5498892", "0.5459464", "0.54360014", "0.54295766", "0.5428505", "0.54121685", "0.53793263", "0.53201467", "0.52710885", "0.525758", "0.5253405", "0.5227248", "0.52226186", "0.52225095", "0.51929915", "0.51772034", "0.51510936" ]
0.7385891
0
Gets the air weapons of the unit
def air_weapon(self): if self._air_weapon: return self._air_weapon if self.weapons: self._air_weapon = next( (weapon for weapon in self.weapons if weapon.type in {TARGET_TYPE.Air.value, TARGET_TYPE.Any.value}), None, ) return self._air_weapon return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weapons(self):\n return self._get_by_class(Weapon)", "def get_weapon_holding():\n return unrealsdk.GetEngine().GamePlayers[0].Actor.Pawn.Weapon", "def weapons(self):\n if self._weapons:\n return self._weapons\n if hasattr(self.type_data.proto, \"weapons\"):\n self._weapons = self.type_data.proto.weapons\n return self._weapons\n return None", "def weapon(self):\n if not (0 <= self._weapon_i < len(self.weapons)):\n raise Exception('No weapons')\n return self.weapons[self._weapon_i]", "def get_weapon(self):\n return self.__weapon", "def air_range(self) -> Union[int, float]:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in [TARGET_TYPE.Air.value, TARGET_TYPE.Any.value]), None\n )\n if weapon:\n return weapon.range\n return 0", "def weapon(self):\n return self._weapon", "def air_dps(self) -> Union[int, float]:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in [TARGET_TYPE.Air.value, TARGET_TYPE.Any.value]), None\n )\n if weapon:\n return (weapon.damage * weapon.attacks) / weapon.speed\n return 0", "def getArmor(self):\n return self.av", "def can_attack_air(self) -> bool:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in [TARGET_TYPE.Air.value, TARGET_TYPE.Any.value]), None\n )\n return weapon is not None\n return False", "def air_range(self) -> Union[int, float]:\n return self.air_weapon and self.air_weapon.range", "def get_armor_equipped(self):\n\t\treturn self.equippedArmor", "def air_dps(self) -> Union[int, float]:\n return self.air_weapon and (self.air_weapon.damage * self.air_weapon.attacks) / self.air_weapon.speed", "def get_top_weapons(self):\n try:\n top_weapon = self.page.find(\"div\", {\"class\": \"weapon\"}).text.split(\" \")\n\n top_weapon_informations = {\"Weapon\": top_weapon[1], \"Headshots\": top_weapon[-5],\"Bodyshot\": top_weapon[-4],\n \"Leg_shot\":top_weapon[-3], \"Kills\": top_weapon[-1]}\n\n return top_weapon_informations\n\n except Exception as e:\n print(f\"Error: {e}. Make sure if it's the correct nickname and tag. Otherwise, it might be about your account visibility, check if it's public.\")\n print(\"Otherwise, it might be about your account visibility, check if it's public. Read : https://github.com/HicaroD/Valorant-Stats\\n\")", "def can_attack_air(self) -> bool:\n return self.air_weapon", "def get_equipment_from_inventory(self):\n return [x for x in self.inventory if x.is_equip()]", "def get_watt(self):\n\n svc = \"urn:micasaverde-com:serviceId:EnergyMetering1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n\n return self.get_variable(svc, \"Watts\")", "def ground_weapon(self):\n if self._ground_weapon:\n return self._ground_weapon\n if self.weapons:\n self._ground_weapon = next(\n (weapon for weapon in self.weapons if weapon.type in {TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value}),\n None,\n )\n return self._ground_weapon\n return None", "def getWatts(self):\n return self.json_state.get(\"charging\").get(\"watt_power\")", "def get_weapon(self):\n\n return self.suggestion_set[1]", "def powerWeapons(self, interval, availPower):\n if self.allWeaponsPowered == 0:\n weaponList = []\n for position, myQuad in self.quads.iteritems():\n weaponIDList = []\n weaponIDList.extend(funcs.sortStringList(myQuad.weapons.keys()))\n for wID in weaponIDList:\n weaponList.append(myQuad.weapons[wID])\n\n while availPower > 0 and self.allWeaponsPowered == 0:\n toCharge = []\n toChargeAMS = []\n # go through each quadrant looking for weapons to power\n for myWeapon in weaponList:\n if myWeapon.operational == 1 and myWeapon.currentPower < myWeapon.myWeaponData.maxPower:\n if 1 == myWeapon.myWeaponData.AMS:\n toChargeAMS.append(myWeapon)\n else:\n toCharge.append(myWeapon)\n\n if len(toChargeAMS) == 0 and len(toCharge) == 0:\n self.allWeaponsPowered = 1\n return availPower\n\n #AMS are charged first and sequentially\n if len(toChargeAMS) != 0:\n if availPower !=0:\n for myW in toChargeAMS:\n defecit=myW.myWeaponData.maxPower - myW.currentPower\n if defecit >= availPower:\n myW.currentPower+=availPower\n availPower=0\n break\n else:\n myW.currentPower=myW.myWeaponData.maxPower\n availPower-=defecit\n\n #non-AMS weapons are charged concurrently; each gets an equal share of the available power \n if len(toCharge) != 0:\n kW=availPower/len(toCharge)\n if kW !=0:\n #print \"tT:\",len(toCharge),\"aP:\",availPower,\"kW each:\",kW\n for myW in toCharge:\n defecit=myW.myWeaponData.maxPower - myW.currentPower\n if defecit >= kW:\n myW.currentPower+=kW\n availPower-=kW\n else:\n myW.currentPower=myW.myWeaponData.maxPower\n availPower-=kW-defecit\n else:\n availPower=0\n\n return availPower", "def weapon_strength(weapon):\n weapon_strength_int = WEAPON_STRENGTHS[weapon]\n #print weapon_strength_int\n return weapon_strength_int", "def tower_damage(self):\n return self._tower_damage", "def get_main_hand_options(self):\n\t\toptions = []\n\t\tfor weap in self.inventoryDictionary:\n\t\t\tif isinstance(weap, Items.Weapon):\n\t\t\t\tif weap.main_hand:\n\t\t\t\t\toptions.append(weap.name)\n\n\t\treturn options", "def getWaterConditions(self):\n return self._getConditions(restrict=['CS-Eau'])", "def weapon_cooldown(self) -> Union[int, float]:\n if self.can_attack_ground or self.can_attack_air:\n return self.proto.weapon_cooldown\n return -1", "def equip_items(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Actions/Items/EquipItems/\"))", "def get_damage():\n\n return character['Damage']", "def get_all_equipped(self):\n\t\t\n\t\treturn (self.get_main_hand_equipped(), None, self.get_armor_equipped(), None, None)", "def most_powerful_weapon(self):\n # sets inital damge to 0\n max_damage = 0\n # sets the best weapon to nothing\n best_weapon = None\n # Loop for each item in inventory\n for item in self.inventory:\n # Code adapted from Make Your own Python Text Based Adventure\n # tries to see if the item damage is greator than the current max\n # damage and then replaces the best weapon in inventory\n try:\n if item.damage > max_damage:\n best_weapon = item\n max_damage = item.damage\n except AttributeError:\n pass\n # sends the best weapon to function\n return best_weapon" ]
[ "0.72091115", "0.6857885", "0.6791066", "0.6668672", "0.65047014", "0.6473929", "0.63382876", "0.62787205", "0.61948407", "0.6023964", "0.60232615", "0.58107126", "0.5809153", "0.5799154", "0.57667565", "0.57152504", "0.5712398", "0.5667893", "0.5666294", "0.5623758", "0.5513774", "0.5480583", "0.54029363", "0.5355122", "0.5316417", "0.5297897", "0.5289819", "0.5285624", "0.5257845", "0.5248832" ]
0.71079904
1
Checks if the unit can attack ground
def can_attack_ground(self) -> bool: if hasattr(self.type_data.proto, "weapons"): weapons = self.type_data.proto.weapons weapon = next( (weapon for weapon in weapons if weapon.type in {TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value}), None ) return weapon is not None return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_attack_ground(self) -> bool:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in [TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value]), None\n )\n return weapon is not None\n return False", "def impact(self, ground):\n return self.position[1] > ground", "def is_attack(self):\n\n return self.purpose == 'attack'", "def can_attack_world(self, world: World) -> bool:\n forces = self.context.calculate_forces(world)\n return (\n forces.space_forces <= self.max_space_force\n and forces.ground_forces <= self.max_ground_force\n )", "def can_take_damage(self):\n result = True\n if self.side_effects[\"shield\"] > 0:\n result = False\n return result", "def can_attack_world(self, world: World) -> bool:\n forces = self.context.calculate_forces(world)\n return (\n forces.space_forces <= self.max_space_force\n and (forces.space_forces - forces.missile_forces)\n < self.max_nonmissile_forces\n )", "def can_hit(self, target_unit):\n # If it's an air unit return false\n if isinstance(target_unit, unit.air_unit.AirUnit):\n return False\n \n # Not an air unit, return true\n return True", "def can_attack_air(self) -> bool:\n return self.air_weapon", "def is_on_ground(self):\n return bool(self.ground_sprites())", "def can_attack_world(\n self: HammerFleetBucket,\n world: World,\n ) -> bool:\n forces = self.context.calculate_forces(world)\n return forces.space_forces <= self.max_space_force", "def detect_in_bounds(self):\n creature_x, creature_y = self.creature.current_location\n if creature_x < 0 or creature_x >= self.world_width\\\n or creature_y < 0 or creature_y >= self.world_height:\n print('The creature is out of bounds!')\n return False\n return True", "def check_passing(self, tile):\n # Check if the creature can walk to the tile\n # Depends on creature and tile properties\n #cre = self.creature\n \n if self.creature:\n is_spirit = self.creature['spirit']\n else:\n is_spirit = False\n\n if is_spirit:\n passes = not tile.properties['electricity']\n else:\n passes = not tile.properties['solid']\n\n if (tile.properties['pass_small']):\n passes = self.creature['small']\n\n if tile.overtile:\n has_bridge = (tile.overtile.status == 'bridge')\n else:\n has_bridge = False\n\n if (tile.properties['water'] and not has_bridge):\n passes = (self.creature['swims'] or self.creature['flying'])\n\n if has_bridge: #tile.bridge:\n print(\"Bridge: {}\".format(tile.overtile))\n print(tile.overtile.x, tile.overtile.y)\n\n #print(passes)\n return(passes)", "def can_attack_world(self, world: World) -> bool:\n raise NotImplementedError()", "def _ensure_is_alive(self):\n if self._hit_points == 0:\n raise UnitIsDead('Unit is dead!')", "def is_shooting(self):\n if self.gun_interface:\n return self.gun_interface.is_preparing()\n return False", "def canTile(self):\n raise RuntimeError('Not implemented')\n \n return False", "def isCombatOver(self):\n\t\treturn len(set([creature.type for creature in self.positionToCreature.values()])) <= 1", "async def attacking_logic(self):\n if len(self.units(UnitTypeId.ZERGLING)) >= 6:\n for zergling in self.units(UnitTypeId.ZERGLING):\n self.do(zergling.attack(self.enemy_start_locations[0]))", "def run_checks(self, tile_model: TileModel) -> bool:\n\n # Doge cannot fire the deck gun\n if self.player.role == PlayerRoleEnum.DOGE:\n return False\n\n if not self.player == GameStateModel.instance().players_turn:\n return False\n\n ap_deduct = 2 if self.player.role == PlayerRoleEnum.DRIVER else 4\n\n if not TurnEvent.has_required_AP(self.player.ap, ap_deduct):\n return False\n\n # If the player is not located in the\n # same space as the engine, they cannot\n # fire the deck gun.\n engine_orient = self.engine.orientation\n if engine_orient == VehicleOrientationEnum.HORIZONTAL:\n on_first_spot = self.player.row == self.engine.row and self.player.column == self.engine.column\n on_second_spot = self.player.row == self.engine.row and self.player.column == self.engine.column + 1\n if not on_first_spot and not on_second_spot:\n return False\n\n elif engine_orient == VehicleOrientationEnum.VERTICAL:\n on_first_spot = self.player.row == self.engine.row and self.player.column == self.engine.column\n on_second_spot = self.player.row == self.engine.row + 1 and self.player.column == self.engine.column\n if not on_first_spot and not on_second_spot:\n return False\n\n engine_quadrant = self._determine_quadrant(self.engine.row, self.engine.column)\n tile_input_quadrant = self._determine_quadrant(tile_model.row, tile_model.column)\n # If there are players present in the\n # quadrant, the deck gun cannot be fired.\n # tile input gotta be on quadrant adjacent to engine\n if self._are_players_in_quadrant(engine_quadrant) or tile_input_quadrant != engine_quadrant:\n return False\n\n return True", "def can_act(self) -> bool:\n return self.cooldown < 1", "def can_act(self) -> bool:\n return self.cooldown < 1", "def is_attacking(self) -> bool:\n return self.orders and self.orders[0].ability.id in (\n AbilityId.ATTACK,\n AbilityId.ATTACK_ATTACK,\n AbilityId.ATTACK_ATTACKTOWARDS,\n AbilityId.ATTACK_ATTACKBARRAGE,\n AbilityId.SCAN_MOVE,\n )", "def can_attack_air(self) -> bool:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in [TARGET_TYPE.Air.value, TARGET_TYPE.Any.value]), None\n )\n return weapon is not None\n return False", "def canAct(self) -> bool:\n return self.cooldown < 1", "def is_contagious(self):\n if self.health >= 0 and self.health <= 49:\n return True\n elif self.health >= 50 and self.health <= 100:\n return False", "def attack(self, enemy: 'games.stardash.unit.Unit') -> bool:\n return self._run_on_server('attack', {\n 'enemy': enemy\n })", "def is_dead(self):\n return self.hp <= 0", "def check_allowed(self):\n if self.state_model.op_state in [\n DevState.FAULT,\n DevState.UNKNOWN,\n DevState.DISABLE,\n ]:\n return False\n\n return True", "def attack(map_, unit, targets):\n attack_target = unit.find_first_adjacent_target(map_, targets)\n if attack_target is None:\n return False\n\n attack_target.hp -= unit.attack\n\n if attack_target.hp <= 0:\n attack_target.is_dead = True\n map_.open.add((attack_target.y, attack_target.x))\n map_.matrix[attack_target.y][attack_target.x] = \".\"\n return True", "def can_flyover(self):\n return False" ]
[ "0.78913313", "0.7011123", "0.66745543", "0.6650226", "0.65617794", "0.65487546", "0.650418", "0.6458717", "0.6417536", "0.6376729", "0.6284213", "0.6278386", "0.62669563", "0.6263691", "0.62569064", "0.6137678", "0.61298555", "0.6104166", "0.6080718", "0.60655564", "0.60655564", "0.6064105", "0.6061671", "0.60428023", "0.6007382", "0.59941095", "0.5983154", "0.59821314", "0.59624577", "0.593999" ]
0.7896708
0
Returns the unit movement speed
def movement_speed(self) -> Union[int, float]: return self.type_data.proto.movement_speed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def speed(self):\n return sqrt(self.velocity_x ** 2 + self.velocity_y ** 2)", "def get_speed(self):\n return self._speed", "def get_speed(self) -> float: \r\n if self.distance < self.distance_stop:\r\n print(\"STOP: Obstacle detected ({} cm)\".format(self.distance))\r\n return 0\r\n elif self.distance < self.distance_slow: \r\n return self.speed * 0.8\r\n else:\r\n return self.speed", "def movementSpeedModifier(self):\n return 0", "def speed(self) -> float:\n return self._speed", "def speed(self) -> float:\n return self._speed", "def speed(self):\n return self._turtle.speed()", "def speed(self):\n return self._turtle.speed()", "def speed(self):\n return self._speed.value", "def speed(self) -> float:\n return linalg.norm(self.velocity)", "def get_speed(self):\n return self.send(self.cmd.GET_ROTATION_ACT)", "def get_speed(self):\r\n return self.__x_speed, self.__y_speed", "def get_speed(self, hero):\n vel = hero.get_velocity()\n return 3.6 * math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2)", "def get_cmd_velocity(self):\n return self.gripper_io.get_signal_value(\"speed_mps\")", "def getMotorSpeed(self):\n cmd = 'E'\n vel = [-1,-1]\n out = self.getData(cmd)\n out = str(out, 'utf-8')\n if self.debug:\n print(out)\n if out[0] == 'e':\n isStart = False\n j = 0\n for i in range(len(out)):\n if isStart:\n if out[i] == ',':\n vel[j] = int(data)\n j = j + 1\n isStart = False\n else:\n data=data+out[i]\n if out[i] == ',':\n isStart = True\n data = ''\n vel[j] = int(data)\n return vel", "def calc_speed(self):\n if self.vars['step'] > 0:\n target_tensor = torch.abs(self.state - self.state_prev)\n speed = torch.max(target_tensor)\n else: # this is the first step, no calculation is possible\n speed = float('NaN')\n return speed", "def get_speed(self):\n raise NotImplementedError", "def get_speed(self):\n raise NotImplementedError", "def wind_speed(self):\r\n raise NotImplementedError", "def wind_speed(self):\n return self.flow_field.wind_speed", "def set_move_speed(cls, quad):\n\n\t\tspeed = cls.get_address_value(quad.result)\n\t\treturn speed/1000.0", "def calc_speed2(self):\n if self.vars['step'] > 0:\n target_tensor = torch.abs(self.state - self.state_prev)\n speed = torch.max(target_tensor) / self.vars['dt']\n else: # this is the first step, no calculation is possible\n speed = float('NaN')\n return speed", "def speed(self):\n return self._getAttribute(Attribute.maxVelocity)", "def speed(self) -> int:\n return self._speed", "def speed(self) -> int:\n return self._speed", "def get_speed(self):\n p = self._get_sub_text('speed')\n if not p:\n return None\n else:\n try:\n return float(p)\n except ValueError:\n return None", "def get_speed(vehicle):\n vel = vehicle.get_velocity()\n\n return 3.6 * math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2)", "def speed(self) -> str:\n return self._current_speed", "def get_speed(self):\n raise NotImplementedError()", "def GetSpeed(self):\n pass" ]
[ "0.7511374", "0.7306894", "0.7306178", "0.7221197", "0.7182297", "0.7182297", "0.71699685", "0.71699685", "0.7161676", "0.71568924", "0.71368206", "0.7089084", "0.70701754", "0.7040754", "0.69985515", "0.6961326", "0.6937583", "0.6937583", "0.69204515", "0.6907069", "0.6897137", "0.68494296", "0.6820912", "0.682061", "0.682061", "0.6817393", "0.6815827", "0.6813485", "0.6789587", "0.6776184" ]
0.7891986
0
Checks if a worker (or MULE) is carrying (gold)minerals.
def is_carrying_minerals(self) -> bool: return self.has_buff(BuffId.CARRYMINERALFIELDMINERALS) or self.has_buff( BuffId.CARRYHIGHYIELDMINERALFIELDMINERALS )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_mineral_field(self) -> bool:\n return self.type_data.has_minerals", "def verify_miner(self):\n for transaction in self.transaction_list:\n if(transaction.verify_miner()):\n return True\n return False", "def all_enter(self):\n return self.num_enters == self.num_workers", "def CanBalanceWithWater(self):\n try:\n extra_waters = self._ExtraWaters()\n except ReactantFormulaMissingError:\n return True\n \n return extra_waters is not None", "def TryBalanceWithWater(self): \n extra_waters = self._ExtraWaters()\n if extra_waters is None:\n # cannot balance the reaction with H2O only\n return False\n if extra_waters != 0:\n self._AddCompound('C00001', extra_waters)\n self._Dedup()\n return True", "def insufficient_material(self):\n piece_set = set()\n for row in range(8):\n for col in range(8):\n piece = self.board.squares[row][col]\n if piece is None:\n continue\n # Any pawn, queen or rook means there is sufficient material\n if piece == ChessPiece.W_PAWN or piece == ChessPiece.B_PAWN or \\\n piece == ChessPiece.W_QUEEN or piece == ChessPiece.B_QUEEN or \\\n piece == ChessPiece.W_ROOK or piece == ChessPiece.B_ROOK:\n return False\n else:\n # If you have 2 bishops or 2 knights, you have sufficient material\n if piece in piece_set:\n return False\n else:\n piece_set.add(piece)\n return True", "def is_mine(self) -> bool:\n return self.proto.alliance == ALLIANCE.Self.value", "def is_spare(self):\n if self.is_strike():\n return False\n\n return (self.first_ball + self.second_ball) == 10", "async def should_handle(self):\n local_controller = self.controller\n workers_total = len(local_controller.workers)\n geysers = local_controller.extractors\n drones_in_queue = local_controller.already_pending(DRONE)\n if (\n not local_controller.close_enemies_to_base\n and local_controller.can_train(DRONE)\n and not local_controller.counter_attack_vs_flying\n ):\n if workers_total == 12 and not drones_in_queue:\n return True\n if (\n workers_total in (13, 14, 15)\n and len(local_controller.overlords) + local_controller.already_pending(OVERLORD) > 1\n ):\n return True\n optimal_workers = min(\n sum(x.ideal_harvesters for x in local_controller.townhalls | geysers), 90 - len(geysers)\n )\n return (\n workers_total + drones_in_queue < optimal_workers\n and np.sum(\n np.array(\n [\n len(local_controller.zerglings),\n len(local_controller.hydras),\n len(local_controller.ultralisks),\n ]\n )\n * np.array([1, 2, 3])\n )\n > 15\n )\n return False", "def has_credits(self):\n return self.script or \\\n self.pencils or \\\n self.inks or \\\n self.colors or \\\n self.letters or \\\n self.editing or \\\n self.job_number", "def has_credits(self):\n return self.script or \\\n self.pencils or \\\n self.inks or \\\n self.colors or \\\n self.letters or \\\n self.editing or \\\n self.job_number", "def won(self):\n return self.mines_found == self.mines", "def won(self):\n return self.mines_found == self.mines", "def won(self):\n return self.mines_found == self.mines", "def won(self):\n return self.mines_found == self.mines", "def won(self):\n return self.mines_found == self.mines", "def won(self):\n return self.mines_found == self.mines", "def isWeber(self):\n return _libsbml.Unit_isWeber(self)", "def is_worker_allowed(self, worker_id):\n return worker_id in self.allowed_workers", "def is_carrying_vespene(self) -> bool:\n return (\n self.has_buff(BuffId.CARRYHARVESTABLEVESPENEGEYSERGAS)\n or self.has_buff(BuffId.CARRYHARVESTABLEVESPENEGEYSERGASPROTOSS)\n or self.has_buff(BuffId.CARRYHARVESTABLEVESPENEGEYSERGASZERG)\n )", "def has_waters(self):\n for frag in self.iter_waters():\n return True\n return False", "def check_loss(self):\n return POKEMON in self.get_game()", "def should_ask_if_examiner_want_to_give_another_chance(self):\n if self.assignment.is_electronic:\n return (self.delivery_status == \"corrected\" and not self.feedback.is_passing_grade) \\\n or self.delivery_status == 'closed-without-feedback'\n else:\n return False", "def is_collecting(self) -> bool:\n return self.orders and self.orders[0].ability.id in {AbilityId.HARVEST_GATHER, AbilityId.HARVEST_RETURN}", "def workers_ready(self, qty=None):\n agents = self.agents_status()\n if any([a['state'] != 'RUNNING' for a in agents]):\n return False\n if qty and len(agents) != qty:\n return False\n return True", "def _workers_available(self) -> bool:\n total_compute_power = sum(self.client.nthreads().values())\n if len(self.futures) < total_compute_power:\n return True\n return False", "def is_gathering(self) -> bool:\n return self.orders and self.orders[0].ability.id is AbilityId.HARVEST_GATHER", "def check_for_barrierless_reaction(self):\n # Check for barrierless reaction leading to new graphs\n if self.rc_opt_system_name not in self.systems: # Skip if already done\n print(\"Running Reactive Complex Optimization\")\n print(\"Settings:\")\n print(self.settings[self.rc_opt_system_name], \"\\n\")\n self.systems, success = self.observed_readuct_call(\n 'run_opt_task', self.systems, [self.rc_key], **self.settings[self.rc_opt_system_name]\n )\n self.throw_if_not_successful(\n success,\n self.systems,\n [self.rc_opt_system_name],\n [],\n \"Reactive complex optimization failed.\\n\",\n )\n _, rc_opt_graph, _, _, rc_opt_decision_lists = \\\n self.get_graph_charges_multiplicities(self.rc_opt_system_name, sum(self.start_charges))\n\n if not masm.JsonSerialization.equal_molecules(self.start_graph, rc_opt_graph):\n return rc_opt_graph, rc_opt_decision_lists\n return None, None", "def isHeavyMetal(self):\n return self._base.isHeavyMetal()", "def is_concealed(self) -> bool:\n # return not self._exposed\n return sum(self.concealed_part.values()) == 13" ]
[ "0.62416214", "0.61482203", "0.59674853", "0.5875922", "0.5744568", "0.5655014", "0.5642399", "0.56053805", "0.5594345", "0.5547255", "0.5547255", "0.5538677", "0.5538677", "0.5538677", "0.5538677", "0.5538677", "0.5538677", "0.54845953", "0.5477522", "0.5464437", "0.54502815", "0.5428208", "0.53992283", "0.5384728", "0.5365626", "0.5347793", "0.5346233", "0.5311726", "0.530425", "0.53013366" ]
0.703138
0
Checks if a worker is carrying vespene.
def is_carrying_vespene(self) -> bool: return ( self.has_buff(BuffId.CARRYHARVESTABLEVESPENEGEYSERGAS) or self.has_buff(BuffId.CARRYHARVESTABLEVESPENEGEYSERGASPROTOSS) or self.has_buff(BuffId.CARRYHARVESTABLEVESPENEGEYSERGASZERG) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_enter(self):\n return self.num_enters == self.num_workers", "def is_worker_allowed(self, worker_id):\n return worker_id in self.allowed_workers", "def _workers_available(self) -> bool:\n total_compute_power = sum(self.client.nthreads().values())\n if len(self.futures) < total_compute_power:\n return True\n return False", "def is_vespene_geyser(self) -> bool:\n return self.type_data.has_vespene", "def isVersor(self) -> bool:\n\n Vhat = self.gradeInvol()\n Vrev = ~self\n Vinv = Vrev/(self*Vrev)[0]\n\n gpres = grades_present(Vhat*Vinv, 0.000001)\n if len(gpres) == 1:\n if gpres[0] == 0:\n if np.sum(np.abs((Vhat*Vinv).value - (Vinv*Vhat).value)) < 0.0001:\n for e in basis_vectors(self.layout).values():\n gpres = grades_present(Vhat*e*Vrev, 0.000001)\n if not (len(gpres) == 1 and gpres[0] == 1):\n return False\n gpres = grades_present(self, 0.000001)\n if len(gpres) == 1:\n return False\n else:\n return True\n return False", "def has_vacancy(self):\n return len(self.occupants) < self.capacity", "def has_receiver(self):\n return self.balance < 0", "async def should_handle(self):\n local_controller = self.controller\n workers_total = len(local_controller.workers)\n geysers = local_controller.extractors\n drones_in_queue = local_controller.already_pending(DRONE)\n if (\n not local_controller.close_enemies_to_base\n and local_controller.can_train(DRONE)\n and not local_controller.counter_attack_vs_flying\n ):\n if workers_total == 12 and not drones_in_queue:\n return True\n if (\n workers_total in (13, 14, 15)\n and len(local_controller.overlords) + local_controller.already_pending(OVERLORD) > 1\n ):\n return True\n optimal_workers = min(\n sum(x.ideal_harvesters for x in local_controller.townhalls | geysers), 90 - len(geysers)\n )\n return (\n workers_total + drones_in_queue < optimal_workers\n and np.sum(\n np.array(\n [\n len(local_controller.zerglings),\n len(local_controller.hydras),\n len(local_controller.ultralisks),\n ]\n )\n * np.array([1, 2, 3])\n )\n > 15\n )\n return False", "def is_returning_worker(conn, workerId):\n query = \"SELECT * FROM workers WHERE worker_id = '{}'\".format(workerId)\n return len(conn.execute(query).fetchall()) != 0", "def _is_working():\n global _worker\n return _worker is not None and _worker.is_alive()", "def solve_is_valid(bv: BinaryView):\n return not bv.session_data.mui_is_running", "def is_worker_thread():\n try:\n return worker_thread_data.is_worker_thread\n except AttributeError:\n return False", "def any(self) -> bool:\n return len(self.queue) > 0", "def valid_for_send(self, app):\n return (\n (self.to is not None) and\n (self.next_hop is not None) and\n (self.source is not None) and\n (self.command is not None) and\n (self.handler is not None) and\n (self.kind is not None) and\n (self.time_to_live is not None) and\n (self.time_to_live >= app.tick)\n )", "def workers_ready(self, qty=None):\n agents = self.agents_status()\n if any([a['state'] != 'RUNNING' for a in agents]):\n return False\n if qty and len(agents) != qty:\n return False\n return True", "def solved(self):\n return GOAL_VEHICLE in self.vehicles", "def is_occupied(self, pos):\n return any([p == pos for p in self._workers.values()])", "def is_worker_registered(self, worker_id):\n return worker_id in self.registered_workers and self.registered_workers[worker_id]", "def _check_episode_start_condition(self):\n vehicles = self._get_available_vehicles()\n if np.sum(vehicles == 0) >= self.episode_threshold:\n return True\n else:\n return False", "def _check_episode_end_condition(self):\n vehicles = self._get_available_vehicles()\n if np.sum(vehicles == 0) < self.episode_threshold:\n return True\n else:\n return False", "async def check_pause(self, ip: str) -> bool:\n miner = self.miners[ip]\n return not miner.running.is_set()", "def cheapCheck(t: Ticket) -> bool:\n t_staker = stakers[t.sender]\n\n valid_Q_j = t.proof.Q_j == t_staker.address\n valid_vs = t_staker.weight() > t.proof.vs >= 1\n\n return valid_Q_j && valid_vs", "def _in_multi_worker_mode(self):\n return False", "def is_vacuum(self, tol=0.0, **kwargs):\n return self.circuit.is_vacuum(tol)", "def is_vacuum(self, tol=0.0, **kwargs):\n return self.circuit.is_vacuum(tol)", "def has_sender(self):\n return self.balance > 0", "def res_required(self):\n v = self[22]\n return (v & 0b1) != 0", "def calculate_alive(self):\n return self.lives > 0", "def check(self):\n return True", "def runnable(self):\n if \"calculations\" not in self.ctx:\n return True # if no calculations have run\n return self.ctx.running_calc < 2 and self.can_restart()" ]
[ "0.6346869", "0.6190996", "0.60167", "0.6014252", "0.58534354", "0.5851993", "0.57939464", "0.5719733", "0.57030445", "0.56983227", "0.5682124", "0.56527627", "0.56478727", "0.5629735", "0.5602998", "0.5526202", "0.5517705", "0.5509002", "0.5499553", "0.54875815", "0.547757", "0.5477135", "0.546933", "0.5469326", "0.5469326", "0.5431286", "0.5419831", "0.53942925", "0.53896785", "0.53766507" ]
0.66838014
0
Checks if the unit is an SCV that is currently building.
def is_constructing_scv(self) -> bool: return self.orders and self.orders[0].ability.id in { AbilityId.TERRANBUILD_ARMORY, AbilityId.TERRANBUILD_BARRACKS, AbilityId.TERRANBUILD_BUNKER, AbilityId.TERRANBUILD_COMMANDCENTER, AbilityId.TERRANBUILD_ENGINEERINGBAY, AbilityId.TERRANBUILD_FACTORY, AbilityId.TERRANBUILD_FUSIONCORE, AbilityId.TERRANBUILD_GHOSTACADEMY, AbilityId.TERRANBUILD_MISSILETURRET, AbilityId.TERRANBUILD_REFINERY, AbilityId.TERRANBUILD_SENSORTOWER, AbilityId.TERRANBUILD_STARPORT, AbilityId.TERRANBUILD_SUPPLYDEPOT, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_building(self):\n return self._is_name_type(self.BUILDING)", "def canConstructSensorStation(self, pUnit, iBuild):\r\n\t\tbValid = false\r\n\t\tpBestPlot, iBestValue = self.findBestChokepoint(pUnit.getOwner(), true) # bug fix - was trying to use non-existent iPlayer\r\n\t\tif (pBestPlot != -1):\r\n\t\t\tiX = pBestPlot.getX()\r\n\t\t\tiY = pBestPlot.getY()\r\n\r\n\t\t\tpBuildInfo = gc.getBuildInfo(iBuild)\r\n\t\t\tpImprovementInfo = gc.getImprovementInfo(pBuildInfo.getImprovement())\r\n\t\t\tpCivilization = gc.getCivilizationInfo(pUnit.getCivilizationType())\r\n\t\t\tiBuildUnit = pCivilization.getCivilizationUnits(pImprovementInfo.getUnitClassBuilt())\r\n\t\t\tpyPlayer = PyPlayer(pUnit.getOwner())\r\n\t\t\tapUnitList = pyPlayer.getUnitsOfType(iBuildUnit)\r\n\t\t\tiThreshold = 45 + len(apUnitList)\r\n\t\t\tprintd(\"canConstructSensorStation: threshold=%d, best=%d\" % (iThreshold, iBestValue))\r\n\t\t\tif (iBestValue > iThreshold):\t# was 10 with old system\t\t#What should be the cutoff for a really good value?\r\n\t\t\t\tbValid = True\r\n\t\t\telse:\r\n\t\t\t\tiX = -1\r\n\t\t\t\tiY = -1\r\n\t\treturn (bValid, iX, iY)", "def in_build(self):\n\n return self.is_valid_platform() and not self['ENVIRONMENT']", "def checkBuildStatus(self):\n pass", "def isValid(self, game):\n if self.x == None or self.y == None or self.team == None:\n return False\n\n citytile = game.map.getCell(self.x, self.y).citytile\n if citytile == None:\n return False\n \n if not citytile.canBuildUnit():\n return False\n\n # TODO handle multiple units building workers in same turn\n if game.cartUnitCapReached(self.team):\n return False\n \n return True", "def solve_is_valid(bv: BinaryView):\n return not bv.session_data.mui_is_running", "def is_active(self):\n for unit in self.units:\n if unit.is_alive():\n return True\n return False", "def is_translation_unit(self):\r\n return conf.lib.clang_isTranslationUnit(self)", "def _IsBuildRunning(build_data):\n current_step = build_data.get('currentStep')\n if (current_step and current_step.get('isStarted') and\n current_step.get('results') is None):\n return True\n return False", "def is_running(self):\n data = self._poll()\n return data.get('building', False)", "def is_unit(self):\n return math.isclose(self.magnitude(), 1)", "def has_cooling_system(bpr):\n\n if bpr.hvac['type_cs'] in {'T1', 'T2', 'T3'}:\n return True\n elif bpr.hvac['type_cs'] in {'T0'}:\n return False\n else:\n raise", "def canBuild(self, game_map) -> bool:\n cell = game_map.getCellByPos(self.pos)\n if not cell.hasResource() and self.canAct() and (self.cargo[\"wood\"] + self.cargo[\"coal\"] + self.cargo[\"uranium\"]) >= GAME_CONSTANTS[\"PARAMETERS\"][\"CITY_BUILD_COST\"]:\n return True\n return False", "def is_shooting(self):\n if self.gun_interface:\n return self.gun_interface.is_preparing()\n return False", "def doStationConstructionAI(self, pUnit, iBuild):\r\n\t\tpBuildInfo = gc.getBuildInfo(iBuild) # bug fix - was using the non-existent \"iStation\" instead of iBuild\r\n\t\tpImprovementInfo = gc.getImprovementInfo(pBuildInfo.getImprovement())\r\n\t\tpCivilization = gc.getCivilizationInfo(pUnit.getCivilizationType()) # bug fix - part of the fix for the next line\r\n\t\tpUnitInfo = gc.getUnitInfo(pCivilization.getCivilizationUnits(pImprovementInfo.getUnitClassBuilt())) # bug fix - was doing the getUnitInfo on the unit class directly\r\n\t\tpPlayer = gc.getPlayer(pUnit.getOwner())\r\n\t\tpTeam = gc.getTeam(pPlayer.getTeam())\r\n\t\t\r\n\t\tbOverride = False\r\n\t\tbValid = False\r\n\t\t\r\n\t\t#General construction stuff (before specific to unit)\r\n\t\tiBuildCost = gc.getBuildInfo(iBuild).getCost() * (100 + pPlayer.calculateInflationRate()) / 100\r\n\t\tif (pPlayer.getGold() > iBuildCost):\r\n\t\t\tif pTeam.isHasTech(pBuildInfo.getTechPrereq()):\r\n\t\t\t\tbValid = True\r\n\t\telse:\r\n\t\t\tpPlayer.AI_setExtraGoldTarget(250)\r\n\r\n\t\t#Unit-specific construction checks\r\n\t\tif bValid == true:\r\n\t\t\tif (pUnitInfo.isStarbase() and not pUnitInfo.isOtherStation()):\r\n\t\t\t\tbValid, iX, iY = self.canConstructStarbase(pUnit, iBuild)\r\n\t\t\telse:\r\n\t\t\t\t#For now, we only have two station types- starbases and 'other stations'- defined by the DLL\r\n\t\t\t\t#So, this code assumes we are trying to build a Sensor Station (the only 'other station')\r\n\t\t\t\t#You can define hardcoded exceptions here)\r\n\t\t\t\tbValid, iX, iY = self.canConstructSensorStation(pUnit, iBuild)\r\n\t\t\t\t\r\n\t\t#If we can build something, go ahead and do it!\r\n\t\tif bValid == true:\r\n\t\t\tif (pUnit.getX() == iX and pUnit.getY() == iY):\r\n\t\t\t\tif (pUnit.getBuildType() != iBuild):\r\n\t\t\t\t\tif (pUnit.canBuild(pUnit.plot(), iBuild, true)):\r\n\t\t\t\t\t\tbCanDo = true\r\n\t\t\t\t\t\tfor iUnitLoop in range(pUnit.plot().getNumUnits()):\r\n\t\t\t\t\t\t\tpUnitLoop = pUnit.plot().getUnit(iUnitLoop)\r\n\t\t\t\t\t\t\tif (pUnitLoop.getBuildType() == iBuild):\r\n\t\t\t\t\t\t\t\tbCanDo = false\t\r\n\t\t\t\t\t\tif (bCanDo):\r\n\t\t\t\t\t\t\tpUnit.getGroup().pushMission(MissionTypes.MISSION_BUILD, iBuild, -1, -1, false, true, MissionAITypes.MISSIONAI_BUILD, pUnit.plot(), pUnit)\r\n\t\t\t\t\t\t\tbOverride = true\r\n\r\n\t\t\t#Unit not in the right spot, send him in the right direction\r\n\t\t\telse:\r\n\t\t\t\tif (pUnit.canMoveInto(CyMap().plot(iX, iY), false, false, false)):\r\n\t\t\t\t\tpUnit.getGroup().pushMoveToMission(iX, iY)\r\n\t\t\t\t\tpUnit.finishMoves()\r\n\t\t\t\t\tbOverride = true\r\n\t\t\t\t\t\r\n\t\treturn bOverride", "def isValid(self, game):\n if self.x == None or self.y == None or self.team == None:\n return False\n \n if self.y < 0 or self.y >= game.map.height:\n return False\n if self.x < 0 or self.x >= game.map.height:\n return False\n\n citytile = game.map.getCell(self.x, self.y).citytile\n if citytile == None:\n return False\n \n if not citytile.canBuildUnit():\n return False\n\n # TODO handle multiple units building workers in same turn\n if game.workerUnitCapReached(self.team):\n return False\n \n return True", "def isWeber(self):\n return _libsbml.Unit_isWeber(self)", "def isPlayerHasBuilding(self, iPlayer, iBuilding):\n\t\tapCityList = PyPlayer(iPlayer).getCityList()\n\t\tfor pCity in apCityList:\n\t\t\tif pCity.GetCy().getNumRealBuilding(iBuilding): \n\t\t\t\treturn True\n\t\treturn False", "async def should_handle(self):\n local_controller = self.controller\n cavern = local_controller.caverns\n if local_controller.hives and not cavern:\n return False\n if not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready):\n return False\n if local_controller.pits.ready and not local_controller.hives and not await BuildHive.morphing_lairs(self):\n return False\n if cavern.ready:\n return len(local_controller.ultralisks) * 2.75 > len(local_controller.hydras)\n return not local_controller.floating_buildings_bm", "def can_build(self, game_map) -> bool:\n if self.is_cart():\n return False\n cell = game_map.get_cell_by_pos(self.pos)\n if not cell.has_resource() and cell.citytile is None and self.can_act() and self.has_enough_resources_to_build:\n return True\n return False", "def is_vectorvox(self):\n return bool(self.gxvox.is_vector_voxel())", "def isVolt(self):\n return _libsbml.Unit_isVolt(self)", "def isValidForSimulation(self):\n for position, myQuad in self.myDesign.quads.iteritems():\n if myQuad.components != {}:\n return 1\n return 0", "def is_ready(self) -> bool:\n return self.build_progress == 1.0", "def isCityHasBuilding(self, tCoords, iBuilding):\n\t\tplot = gc.getMap().plot(tCoords[0], tCoords[1])\n\t\tif plot.isCity():\n\t\t\tif plot.getPlotCity().getNumRealBuilding(iBuilding): \n\t\t\t\treturn True\n\t\treturn False", "def is_up(self):\n data = self.vxprint()\n return self.name in data and data[self.name].STATE == \"ACTIVE\"", "def is_valid(self):\r\n return self.circuit.is_valid", "def is_smoke(self, channel=None):\n return self.get_state(channel)", "def isContinuousFramework(*args):\n return _libsbml.SBO_isContinuousFramework(*args)", "def isUnitKind(*args):\n return _libsbml.Unit_isUnitKind(*args)" ]
[ "0.61469597", "0.5953716", "0.58659333", "0.5855476", "0.5771452", "0.5711982", "0.5688464", "0.56500685", "0.5618142", "0.55776274", "0.5502253", "0.54851365", "0.5467927", "0.54370534", "0.54180664", "0.5416628", "0.54015076", "0.5363318", "0.53548175", "0.5348815", "0.5337608", "0.53307885", "0.532922", "0.5327341", "0.5317675", "0.5287663", "0.52855664", "0.5282167", "0.5277386", "0.52695644" ]
0.6780311
0